diff --git a/.asf.yaml b/.asf.yaml deleted file mode 100644 index ac29efed9ff..00000000000 --- a/.asf.yaml +++ /dev/null @@ -1,37 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -notifications: - commits: commits@cassandra.apache.org - issues: commits@cassandra.apache.org - pullrequests: pr@cassandra.apache.org - jira_options: link worklog - -github: - description: "Java Driver for Apache Cassandra®" - homepage: https://cassandra.apache.org/ - enabled_merge_buttons: - squash: false - merge: false - rebase: true - features: - wiki: false - issues: false - projects: false - autolink_jira: - - CASSANDRA - - CASSJAVA diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 07449882cc0..00000000000 --- a/.gitignore +++ /dev/null @@ -1,16 +0,0 @@ -.settings -.DS_Store -.documenter_local_last_run - -/.idea -*.iml -.classpath -.project - -.java-version -.flattened-pom.xml - -.documenter_local_last_run -/docs -target/ -dependency-reduced-pom.xml diff --git a/.snyk b/.snyk deleted file mode 100644 index a081b17225c..00000000000 --- a/.snyk +++ /dev/null @@ -1,35 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# Snyk (https://snyk.io) policy file, patches or ignores known vulnerabilities. -version: v1.22.2 -# ignores vulnerabilities until expiry date; change duration by modifying expiry date -ignore: - SNYK-JAVA-ORGGRAALVMSDK-2767964: - - '*': - reason: cannot upgrade to graal-sdk 22.1.0+ until we move off Java8, which is slated for later this year - expires: 2024-01-10T00:00:00.000Z - created: 2023-06-21T00:00:00.000Z - SNYK-JAVA-ORGGRAALVMSDK-2769618: - - '*': - reason: cannot upgrade to graal-sdk 22.1.0+ until we move off Java8, which is slated for later this year - expires: 2024-01-10T00:00:00.000Z - created: 2023-06-21T00:00:00.000Z - SNYK-JAVA-ORGGRAALVMSDK-5457933: - - '*': - reason: cannot upgrade to graal-sdk 22.1.0+ until we move off Java8, which is slated for later this year - expires: 2024-01-10T00:00:00.000Z - created: 2023-06-21T00:00:00.000Z diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md deleted file mode 100644 index 53857383cf2..00000000000 --- a/CONTRIBUTING.md +++ /dev/null @@ -1,534 +0,0 @@ - - -# Contributing guidelines - -## Code formatting - -### Java - -We follow the [Google Java Style Guide](https://google.github.io/styleguide/javaguide.html). See -https://github.com/google/google-java-format for IDE plugins. The rules are not configurable. - -The build will fail if the code is not formatted. To format all files from the command line, run: - -``` -mvn fmt:format -``` - -Some aspects are not covered by the formatter: braces must be used with `if`, `else`, `for`, `do` -and `while` statements, even when the body is empty or contains only a single statement. - -### XML - -The build will fail if XML files are not formatted correctly. Run the following command before you -commit: - -```java -mvn xml-format:xml-format -``` - -The formatter does not enforce a maximum line length, but please try to keep it below 100 characters -to keep files readable across all mediums (IDE, terminal, Github...). - -### Other text files (markdown, etc) - -Similarly, enforce a right margin of 100 characters in those files. Editors and IDEs generally have -a way to configure this (for IDEA, install the "Wrap to column" plugin). - -## Coding style -- production code - -Do not use static imports. They make things harder to understand when you look at the code -someplace where you don't have IDE support, like Github's code view. - -Avoid abbreviations in class and variable names. A good rule of thumb is that you should only use -them if you would also do so verbally, for example "id" and "config" are probably reasonable. -Single-letter variables are permissible if the variable scope is only a few lines, or for commonly -understood cases (like `i` for a loop index). - -Keep source files short. Short files are easy to understand and test. The average should probably -be around 200-300 lines. - -### Javadoc - -All types in "API" packages must be documented. For "internal" packages, documentation is optional, -but in no way discouraged: it's generally a good idea to have a class-level comment that explains -where the component fits in the architecture, and anything else that you feel is important. - -You don't need to document every parameter or return type, or even every method. Don't document -something if it is completely obvious, we don't want to end up with this: - -```java -/** - * Returns the name. - * - * @return the name - */ -String getName(); -``` - -On the other hand, there is often something useful to say about a method, so most should have at -least a one-line comment. Use common sense. - -Driver users coding in their IDE should find the right documentation at the right time. Try to -think of how they will come into contact with the class. For example, if a type is constructed with -a builder, each builder method should probably explain what the default is when you don't call it. - -Avoid using too many links, they can make comments harder to read, especially in the IDE. Link to a -type the first time it's mentioned, then use a text description ("this registry"...) or an `@code` -block. Don't link to a class in its own documentation. Don't link to types that appear right below -in the documented item's signature. - -```java -/** -* @return this {@link Builder} <-- completely unnecessary -*/ -Builder withLimit(int limit) { -``` - -### Logs - -We use SLF4J; loggers are declared like this: - -```java -private static final Logger LOG = LoggerFactory.getLogger(TheEnclosingClass.class); -``` - -Logs are intended for two personae: - -* Ops who manage the application in production. -* Developers (maybe you) who debug a particular issue. - -The first 3 log levels are for ops: - -* `ERROR`: something that renders the driver -- or a part of it -- completely unusable. An action is - required to fix it: bouncing the client, applying a patch, etc. -* `WARN`: something that the driver can recover from automatically, but indicates a configuration or - programming error that should be addressed. For example: the driver connected successfully, but - one of the contact points in the configuration was malformed; the same prepared statement is being - prepared multiple time by the application code. -* `INFO`: something that is part of the normal operation of the driver, but might be useful to know - for an operator. For example: the driver has initialized successfully and is ready to process - queries; an optional dependency was detected in the classpath and activated an enhanced feature. - -Do not log errors that are rethrown to the client (such as the error that you're going to complete a -request with). This is annoying for ops because they see a lot of stack traces that require no -actual action on their part, because they're already handled by application code. - -Similarly, do not log stack traces for non-critical errors. If you still want the option to get the -trace for debugging, see the `Loggers.warnWithException` utility. - -The last 2 levels are for developers, to help follow what the driver is doing from a "black box" -perspective (think about debugging an issue remotely, and all you have are the logs). - -* `TRACE`: anything that happens **for every user request**. Not only request handling, but all - related components (e.g. timestamp generators, policies, etc). -* `DEBUG`: everything else. For example, node state changes, control connection activity, etc. - -Note that `DEBUG` and `TRACE` can coexist within the same component, for example the LBP -initializing is a one-time event, but returning a query plan is a per-request event. - -Logs statements start with a prefix that identifies its origin, for example: - -* for components that are unique to the cluster instance, just the cluster name: `[c0]`. -* for sessions, the cluster name + a generated unique identifier: `[c0|s0]`. -* for channel pools, the session identifier + the address of the node: `[c0|s0|/127.0.0.2:9042]`. -* for channels, the identifier of the owner (session or control connection) + the Netty identifier, - which indicates the local and remote ports: - `[c0|s0|id: 0xf9ef0b15, L:/127.0.0.1:51482 - R:/127.0.0.1:9042]`. -* for request handlers, the session identifier, a unique identifier, and the index of the - speculative execution: `[c0|s0|1077199500|0]`. - -Tests run with the configuration defined in `src/test/resources/logback-test.xml`. The default level -for driver classes is `WARN`, but you can override it with a system property: `-DdriverLevel=DEBUG`. -A nice setup is to use `DEBUG` when you run from your IDE, and keep the default for the command -line. - -When you add or review new code, take a moment to run the tests in `DEBUG` mode and check if the -output looks good. - -### Don't abuse the stream API - -The `java.util.stream` API is often used (abused?) as a "functional API for collections": - -```java -List sizes = words.stream().map(String::length).collect(Collectors.toList()); -``` - -The perceived advantages of this approach over traditional for-loops are debatable: - -* readability: this is highly subjective. But consider the following: - * everyone can read for-loops, whether they are familiar with the Stream API or not. The opposite - is not true. - * the stream API does not spell out all the details: what kind of list does `Collectors.toList()` - return? Is it pre-sized? Mutable? Thread-safe? - * the stream API looks pretty on simple examples, but things can get ugly fast. Try rewriting - `NetworkTopologyReplicationStrategy` with streams. -* concision: this is irrelevant. When we look at code we care about maintainability, not how many - keystrokes the author saved. The for-loop version of the above example is just 5 lines long, and - your brain doesn't take longer to parse it. - -The bottom line: don't try to "be functional" at all cost. Plain old for-loops are often just as -simple. - -### Never assume a specific format for `toString()` - -Only use `toString()` for debug logs or exception messages, and always assume that its format is -unspecified and can change at any time. - -If you need a specific string representation for a class, make it a dedicated method with a -documented format, for example `toCqlLiteral`. Otherwise it's too easy to lose track of the intended -usage and break things: for example, someone modifies your `toString()` method to make their logs -prettier, but unintentionally breaks the script export feature that expected it to produce CQL -literals. - -`toString()` can delegate to `toCqlLiteral()` if that is appropriate for logs. - - -### Concurrency annotations - -We use the [JCIP annotations](http://jcip.net/annotations/doc/index.html) to document thread-safety -policies. - -Add them for all new code, with the exception of: - -* enums and interfaces; -* utility classes (only static methods); -* test code. - -Make sure you import the types from `net.jcip`, there are homonyms in the classpath. - - -### Nullability annotations - -We use the [Spotbugs annotations](https://spotbugs.github.io) to document nullability of parameters, -method return types and class members. - -Please annotate any new class or interface with the appropriate annotations: `@NonNull`, `@Nullable`. Make sure you import -the types from `edu.umd.cs.findbugs.annotations`, there are homonyms in the classpath. - - -## Coding style -- test code - -Static imports are permitted in a couple of places: -* All AssertJ methods, e.g.: - ```java - assertThat(node.getDatacenter()).isNotNull(); - fail("Expecting IllegalStateException to be thrown"); - ``` -* All Mockito methods, e.g.: - ```java - when(codecRegistry.codecFor(DataTypes.INT)).thenReturn(codec); - verify(codec).decodePrimitive(any(ByteBuffer.class), eq(ProtocolVersion.DEFAULT)); - ``` -* All Awaitility methods, e.g.: - ```java - await().until(() -> somethingBecomesTrue()); - ``` - -Test methods names use lower snake case, generally start with `should`, and clearly indicate the -purpose of the test, for example: `should_fail_if_key_already_exists`. If you have trouble coming -up with a simple name, it might be a sign that your test does too much, and should be split. - -We use AssertJ (`assertThat`) for assertions. Don't use JUnit assertions (`assertEquals`, -`assertNull`, etc). - -Don't try to generify at all cost: a bit of duplication is acceptable, if that helps keep the tests -simple to understand (a newcomer should be able to understand how to fix a failing test without -having to read too much code). - -Test classes can be a bit longer, since they often enumerate similar test cases. You can also -factor some common code in a parent abstract class named with "XxxTestBase", and then split -different families of tests into separate child classes. For example, `CqlRequestHandlerTestBase`, -`CqlRequestHandlerRetryTest`, `CqlRequestHandlerSpeculativeExecutionTest`... - -### Unit tests - -They live in the same module as the code they are testing. They should be fast and not start any -external process. They usually target one specific component and mock the rest of the driver -context. - -### Integration tests - -They live in the `integration-tests` module, and exercise the whole driver stack against an external -process, which can be either one of: -* [Simulacron](https://github.com/datastax/simulacron): simulates Cassandra nodes on loopback - addresses; your test must "prime" data, i.e. tell the nodes what results to return for - pre-determined queries. - - For an example of a Simulacron-based test, see `NodeTargetingIT`. -* [CCM](https://github.com/pcmanus/ccm): launches actual Cassandra nodes locally. The `ccm` - executable must be in the path. - - You can pass a `-Dccm.version` system property to the build to target a particular Cassandra - version (it defaults to 3.11.0). `-Dccm.directory` allows you to point to a local installation - -- this can be a checkout of the Cassandra codebase, as long as it's built. See `CcmBridge` in - the driver codebase for more details. - - For an example of a CCM-based test, see `PlainTextAuthProviderIT`. - -#### Categories - -Integration tests are divided into three categories: - -##### Parallelizable tests - -These tests can be run in parallel, to speed up the build. They either use: -* dedicated Simulacron instances. These are lightweight, and Simulacron will manage the ports to - make sure that there are no collisions. -* a shared, one-node CCM cluster. Each test works in its own keyspace. - -The build runs them with a configurable degree of parallelism (currently 8). The shared CCM cluster -is initialized the first time it's used, and stopped before moving on to serial tests. Note that we -run with `parallel=classes`, which means methods within the same class never run concurrent to each -other. - -To make an integration test parallelizable, annotate it with `@Category(ParallelizableTests.class)`. -If you use CCM, it **must** be with `CcmRule`. - -For an example of a Simulacron-based parallelizable test, see `NodeTargetingIT`. For a CCM-based -test, see `DirectCompressionIT`. - -##### Serial tests - -These tests cannot run in parallel, in general because they require CCM clusters of different sizes, -or with a specific configuration (we never run more than one CCM cluster simultaneously: it would be -too resource-intensive, and too complicated to manage all the ports). - -The build runs them one by one, after the parallelizable tests. - -To make an integration test serial, do not annotate it with `@Category`. The CCM rule **must** be -`CustomCcmRule`. - -For an example, see `DefaultLoadBalancingPolicyIT`. - -Note: if multiple serial tests have a common "base" class, do not pull up `CustomCcmRule`, each -child class must have its own instance. Otherwise they share the same CCM instance, and the first -one destroys it on teardown. See `TokenITBase` for how to organize code in those cases. - -##### Isolated tests - -Not only can those tests not run in parallel, they also require specific environment tweaks, -typically system properties that need to be set before initialization. - -The build runs them one by one, *each in its own JVM fork*, after the serial tests. - -To isolate an integration test, annotate it with `@Category(IsolatedTests.class)`. The CCM rule -**must** be `CustomCcmRule`. - -For an example, see `HeapCompressionIT`. - -#### About test rules - -Do not mix `CcmRule` and `SimulacronRule` in the same test. It makes things harder to follow, and -can be inefficient (if the `SimulacronRule` is method-level, it will create a Simulacron cluster for -every test method, even those that only need CCM). - -##### Class-level rules - -Rules annotated with `@ClassRule` wrap the whole test class, and are reused across methods. Try to -use this as much as possible, as it's more efficient. The fields need to be static; also make them -final and use constant naming conventions, like `CCM_RULE`. - -When you use a server rule (`CcmRule` or `SimulacronRule`) and a `SessionRule` at the same level, -wrap them into a rule chain to ensure proper initialization order: - -```java -private static final CcmRule CCM_RULE = CcmRule.getInstance(); -private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - -@ClassRule -public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); -``` - -##### Method-level rules - -Rules annotated with `@Rule` wrap each test method. Use lower-camel case for field names: - -```java -private CcmRule ccmRule = CcmRule.getInstance(); -private SessionRule sessionRule = SessionRule.builder(ccmRule).build(); - -@ClassRule -public TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); -``` - -Only use this for: - -* CCM tests that use `@CassandraRequirement` or `@DseRequirement` restrictions at the method level - (ex: `BatchStatementIT`). -* tests where you *really* need to restart from a clean state for every method. - -##### Mixed - -It's also possible to use a `@ClassRule` for CCM / Simulacron, and a `@Rule` for the session rule. -In that case, you don't need to use a rule chain. - -## Running the tests - -### Unit tests - - mvn clean test - -This currently takes about 30 seconds. The goal is to keep it within a couple of minutes (it runs -for each commit if you enable the pre-commit hook -- see below). - -### Integration tests - - mvn clean verify - -This currently takes about 9 minutes. We don't have a hard limit, but ideally it should stay within -30 minutes to 1 hour. - -You can skip test categories individually with `-DskipParallelizableITs`, `-DskipSerialITs` and -`-DskipIsolatedITs` (`-DskipITs` still works to skip them all at once). - -### Configuring MacOS for Simulacron - -Simulacron (used in integration tests) relies on loopback aliases to simulate multiple nodes. On -Linux or Windows, you shouldn't have anything to do. On MacOS, run this script: - -``` -#!/bin/bash -for sub in {0..4}; do - echo "Opening for 127.0.$sub" - for i in {0..255}; do sudo ifconfig lo0 alias 127.0.$sub.$i up; done -done -``` - -Note that this is known to cause temporary increased CPU usage in OS X initially while mDNSResponder -acclimates itself to the presence of added IP addresses. This lasts several minutes. Also, this does -not survive reboots. - - -## License headers - -The build will fail if some license headers are missing. To update all files from the command line, -run: - -``` -mvn license:format -``` - -## Pre-commit hook (highly recommended) - -Ensure `pre-commit.sh` is executable, then run: - -``` -ln -s ../../pre-commit.sh .git/hooks/pre-commit -``` - -This will only allow commits if the tests pass. It is also a good reminder to keep the test suite -short. - -Note: the tests run on the current state of the working directory. I tried to add a `git stash` in -the script to only test what's actually being committed, but I couldn't get it to run reliably -(it's still in there but commented). Keep this in mind when you commit, and don't forget to re-add -the changes if the first attempt failed and you fixed the tests. - -## Speeding up the build for local tests - -If you need to install something in your local repository quickly, you can use the `fast` profile to -skip all "non-essential" checks (licenses, formatting, tests, etc): - -``` -mvn clean install -Pfast -``` - -You can speed things up even more by targeting specific modules with the `-pl` option: - -``` -mvn clean install -Pfast -pl core,query-builder,mapper-runtime,mapper-processor,bom -``` - -Please run the normal build at least once before you push your changes. - -## Commits - -Keep your changes **focused**. Each commit should have a single, clear purpose expressed in its -message. - -Resist the urge to "fix" cosmetic issues (add/remove blank lines, move methods, etc.) in existing -code. This adds cognitive load for reviewers, who have to figure out which changes are relevant to -the actual issue. If you see legitimate issues, like typos, address them in a separate commit (it's -fine to group multiple typo fixes in a single commit). - -Isolate trivial refactorings into separate commits. For example, a method rename that affects dozens -of call sites can be reviewed in a few seconds, but if it's part of a larger diff it gets mixed up -with more complex changes (that might affect the same lines), and reviewers have to check every -line. - -Commit message subjects start with a capital letter, use the imperative form and do **not** end -with a period: - -* correct: "Add test for CQL request handler" -* incorrect: "~~Added test for CQL request handler~~" -* incorrect: "~~New test for CQL request handler~~" - -Avoid catch-all messages like "Minor cleanup", "Various fixes", etc. They don't provide any useful -information to reviewers, and might be a sign that your commit contains unrelated changes. - -We don't enforce a particular subject line length limit, but try to keep it short. - -You can add more details after the subject line, separated by a blank line. The following pattern -(inspired by [Netty](http://netty.io/wiki/writing-a-commit-message.html)) is not mandatory, but -welcome for complex changes: - -``` -One line description of your change - -Motivation: - -Explain here the context, and why you're making that change. -What is the problem you're trying to solve. - -Modifications: - -Describe the modifications you've done. - -Result: - -After your change, what will change. -``` - -## Pull requests - -Like commits, pull requests should be focused on a single, clearly stated goal. - -Don't base a pull request onto another one, it's too complicated to follow two branches that evolve -at the same time. If a ticket depends on another, wait for the first one to be merged. - -If you have to address feedback, avoid rewriting the history (e.g. squashing or amending commits): -this makes the reviewers' job harder, because they have to re-read the full diff and figure out -where your new changes are. Instead, push a new commit on top of the existing history; it will be -squashed later when the PR gets merged. If the history is complex, it's a good idea to indicate in -the message where the changes should be squashed: - -``` -* 20c88f4 - Address feedback (to squash with "Add metadata parsing logic") (36 minutes ago) -* 7044739 - Fix various typos in Javadocs (2 days ago) -* 574dd08 - Add metadata parsing logic (2 days ago) -``` - -(Note that the message refers to the other commit's subject line, not the SHA-1. This way it's still -relevant if there are intermediary rebases.) - -If you need new stuff from the base branch, it's fine to rebase and force-push, as long as you don't -rewrite the history. Just give a heads up to the reviewers beforehand. Don't push a merge commit to -a pull request. diff --git a/Jenkinsfile-asf b/Jenkinsfile-asf deleted file mode 100644 index 4b5041903c1..00000000000 --- a/Jenkinsfile-asf +++ /dev/null @@ -1,81 +0,0 @@ -#!groovy - -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -pipeline { - agent { - label 'cassandra-small' - } - - triggers { - // schedules only run against release branches (i.e. 3.x, 4.x, 4.5.x, etc.) - cron(branchPatternCron().matcher(env.BRANCH_NAME).matches() ? '@weekly' : '') - } - - stages { - stage('Matrix') { - matrix { - axes { - axis { - name 'TEST_JAVA_VERSION' - values 'openjdk@1.8.0-292', 'openjdk@1.11.0-9', 'openjdk@1.17.0', 'openjdk@1.21.0' - } - axis { - name 'SERVER_VERSION' - values '3.11', - '4.0', - '4.1', - '5.0' - } - } - stages { - stage('Tests') { - agent { - label 'cassandra-medium' - } - steps { - script { - executeTests() - junit testResults: '**/target/surefire-reports/TEST-*.xml', allowEmptyResults: true - junit testResults: '**/target/failsafe-reports/TEST-*.xml', allowEmptyResults: true - } - } - } - } - } - } - } -} - -def executeTests() { - def testJavaMajorVersion = (TEST_JAVA_VERSION =~ /@(?:1\.)?(\d+)/)[0][1] - sh """ - container_id=\$(docker run -td -e TEST_JAVA_VERSION=${TEST_JAVA_VERSION} -e SERVER_VERSION=${SERVER_VERSION} -e TEST_JAVA_MAJOR_VERSION=${testJavaMajorVersion} -v \$(pwd):/home/docker/cassandra-java-driver apache.jfrog.io/cassan-docker/apache/cassandra-java-driver-testing-ubuntu2204 'sleep 2h') - docker exec --user root \$container_id bash -c \"sudo bash /home/docker/cassandra-java-driver/ci/create-user.sh docker \$(id -u) \$(id -g) /home/docker/cassandra-java-driver\" - docker exec --user docker \$container_id './cassandra-java-driver/ci/run-tests.sh' - ( nohup docker stop \$container_id >/dev/null 2>/dev/null & ) - """ -} - -// branch pattern for cron -// should match 3.x, 4.x, 4.5.x, etc -def branchPatternCron() { - ~'((\\d+(\\.[\\dx]+)+))' -} diff --git a/Jenkinsfile-datastax b/Jenkinsfile-datastax deleted file mode 100644 index 602f33101ca..00000000000 --- a/Jenkinsfile-datastax +++ /dev/null @@ -1,639 +0,0 @@ -#!groovy -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -def initializeEnvironment() { - env.DRIVER_DISPLAY_NAME = 'Java Driver for Apache CassandraⓇ' - env.DRIVER_METRIC_TYPE = 'oss' - - env.GIT_SHA = "${env.GIT_COMMIT.take(7)}" - env.GITHUB_PROJECT_URL = "https://${GIT_URL.replaceFirst(/(git@|http:\/\/|https:\/\/)/, '').replace(':', '/').replace('.git', '')}" - env.GITHUB_BRANCH_URL = "${GITHUB_PROJECT_URL}/tree/${env.BRANCH_NAME}" - env.GITHUB_COMMIT_URL = "${GITHUB_PROJECT_URL}/commit/${env.GIT_COMMIT}" - - env.MAVEN_HOME = "${env.HOME}/.mvn/apache-maven-3.8.8" - env.PATH = "${env.MAVEN_HOME}/bin:${env.PATH}" - - /* - * As of JAVA-3042 JAVA_HOME is always set to JDK8 and this is currently necessary for mvn compile and DSE Search/Graph. - * To facilitate testing with JDK11/17 we feed the appropriate JAVA_HOME into the maven build via commandline. - * - * Maven command-line flags: - * - -DtestJavaHome=/path/to/java/home: overrides JAVA_HOME for surefire/failsafe tests, defaults to environment JAVA_HOME. - * - -Ptest-jdk-N: enables profile for running tests with a specific JDK version (substitute N for 8/11/17). - * - * Note test-jdk-N is also automatically loaded based off JAVA_HOME SDK version so testing with an older SDK is not supported. - * - * Environment variables: - * - JAVA_HOME: Path to JDK used for mvn (all steps except surefire/failsafe), Cassandra, DSE. - * - JAVA8_HOME: Path to JDK8 used for Cassandra/DSE if ccm determines JAVA_HOME is not compatible with the chosen backend. - * - TEST_JAVA_HOME: PATH to JDK used for surefire/failsafe testing. - * - TEST_JAVA_VERSION: TEST_JAVA_HOME SDK version number [8/11/17], used to configure test-jdk-N profile in maven (see above) - */ - - env.JAVA_HOME = sh(label: 'Get JAVA_HOME',script: '''#!/bin/bash -le - . ${JABBA_SHELL} - jabba which ${JABBA_VERSION}''', returnStdout: true).trim() - env.JAVA8_HOME = sh(label: 'Get JAVA8_HOME',script: '''#!/bin/bash -le - . ${JABBA_SHELL} - jabba which 1.8''', returnStdout: true).trim() - - sh label: 'Download Apache CassandraⓇ, DataStax Enterprise or DataStax HCD ',script: '''#!/bin/bash -le - . ${JABBA_SHELL} - jabba use 1.8 - . ${CCM_ENVIRONMENT_SHELL} ${SERVER_VERSION} - ''' - - if (env.SERVER_VERSION.split('-')[0] == 'dse') { - env.DSE_FIXED_VERSION = env.SERVER_VERSION.split('-')[1] - sh label: 'Update environment for DataStax Enterprise', script: '''#!/bin/bash -le - cat >> ${HOME}/environment.txt << ENVIRONMENT_EOF -CCM_CASSANDRA_VERSION=${DSE_FIXED_VERSION} # maintain for backwards compatibility -CCM_VERSION=${DSE_FIXED_VERSION} -CCM_SERVER_TYPE=dse -DSE_VERSION=${DSE_FIXED_VERSION} -CCM_BRANCH=${DSE_FIXED_VERSION} -DSE_BRANCH=${DSE_FIXED_VERSION} -ENVIRONMENT_EOF - ''' - } - - if (env.SERVER_VERSION.split('-')[0] == 'hcd') { - env.HCD_FIXED_VERSION = env.SERVER_VERSION.split('-')[1] - sh label: 'Update environment for DataStax HCD', script: '''#!/bin/bash -le - cat >> ${HOME}/environment.txt << ENVIRONMENT_EOF -CCM_CASSANDRA_VERSION=${HCD_FIXED_VERSION} # maintain for backwards compatibility -CCM_VERSION=${HCD_FIXED_VERSION} -CCM_SERVER_TYPE=hcd -HCD_VERSION=${HCD_FIXED_VERSION} -CCM_BRANCH=${HCD_FIXED_VERSION} -HCD_BRANCH=${HCD_FIXED_VERSION} -ENVIRONMENT_EOF - ''' - } - - sh label: 'Display Java and environment information',script: '''#!/bin/bash -le - # Load CCM environment variables - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - . ${JABBA_SHELL} - jabba use 1.8 - - java -version - mvn -v - printenv | sort - ''' -} - -def buildDriver(jabbaVersion) { - def buildDriverScript = '''#!/bin/bash -le - - . ${JABBA_SHELL} - jabba use '''+jabbaVersion+''' - - echo "Building with Java version '''+jabbaVersion+'''" - - mvn -B -V install -DskipTests -Dmaven.javadoc.skip=true - ''' - sh label: 'Build driver', script: buildDriverScript -} - -def executeTests() { - def testJavaHome = sh(label: 'Get TEST_JAVA_HOME',script: '''#!/bin/bash -le - . ${JABBA_SHELL} - jabba which ${JABBA_VERSION}''', returnStdout: true).trim() - def testJavaVersion = (JABBA_VERSION =~ /.*\.(\d+)/)[0][1] - - def executeTestScript = '''#!/bin/bash -le - # Load CCM environment variables - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - . ${JABBA_SHELL} - jabba use 1.8 - - if [ "${JABBA_VERSION}" != "1.8" ]; then - SKIP_JAVADOCS=true - else - SKIP_JAVADOCS=false - fi - - INTEGRATION_TESTS_FILTER_ARGUMENT="" - if [ ! -z "${INTEGRATION_TESTS_FILTER}" ]; then - INTEGRATION_TESTS_FILTER_ARGUMENT="-Dit.test=${INTEGRATION_TESTS_FILTER}" - fi - printenv | sort - - mvn -B -V ${INTEGRATION_TESTS_FILTER_ARGUMENT} -T 1 verify \ - -Ptest-jdk-'''+testJavaVersion+''' \ - -DtestJavaHome='''+testJavaHome+''' \ - -DfailIfNoTests=false \ - -Dmaven.test.failure.ignore=true \ - -Dmaven.javadoc.skip=${SKIP_JAVADOCS} \ - -Dccm.version=${CCM_CASSANDRA_VERSION} \ - -Dccm.distribution=${CCM_SERVER_TYPE:cassandra} \ - -Dproxy.path=${HOME}/proxy \ - ${SERIAL_ITS_ARGUMENT} \ - ${ISOLATED_ITS_ARGUMENT} \ - ${PARALLELIZABLE_ITS_ARGUMENT} - ''' - echo "Invoking Maven with parameters test-jdk-${testJavaVersion} and testJavaHome = ${testJavaHome}" - sh label: 'Execute tests', script: executeTestScript -} - -def executeCodeCoverage() { - jacoco( - execPattern: '**/target/jacoco.exec', - classPattern: '**/classes', - sourcePattern: '**/src/main/java' - ) -} - -def notifySlack(status = 'started') { - // Notify Slack channel for every build except adhoc executions - if (params.ADHOC_BUILD_TYPE != 'BUILD-AND-EXECUTE-TESTS') { - // Set the global pipeline scoped environment (this is above each matrix) - env.BUILD_STATED_SLACK_NOTIFIED = 'true' - - def buildType = 'Commit' - if (params.CI_SCHEDULE != 'DO-NOT-CHANGE-THIS-SELECTION') { - buildType = "${params.CI_SCHEDULE.toLowerCase().capitalize()}" - } - - def color = 'good' // Green - if (status.equalsIgnoreCase('aborted')) { - color = '808080' // Grey - } else if (status.equalsIgnoreCase('unstable')) { - color = 'warning' // Orange - } else if (status.equalsIgnoreCase('failed')) { - color = 'danger' // Red - } - - def message = """Build ${status} for ${env.DRIVER_DISPLAY_NAME} [${buildType}] -<${env.GITHUB_BRANCH_URL}|${env.BRANCH_NAME}> - <${env.RUN_DISPLAY_URL}|#${env.BUILD_NUMBER}> - <${env.GITHUB_COMMIT_URL}|${env.GIT_SHA}>""" - if (!status.equalsIgnoreCase('Started')) { - message += """ -${status} after ${currentBuild.durationString - ' and counting'}""" - } - - slackSend color: "${color}", - channel: "#java-driver-dev-bots", - message: "${message}" - } -} - -def describePerCommitStage() { - script { - currentBuild.displayName = "Per-Commit build" - currentBuild.description = 'Per-Commit build and testing of development Apache CassandraⓇ and current DataStax Enterprise against Oracle JDK 8' - } -} - -def describeAdhocAndScheduledTestingStage() { - script { - if (params.CI_SCHEDULE == 'DO-NOT-CHANGE-THIS-SELECTION') { - // Ad-hoc build - currentBuild.displayName = "Adhoc testing" - currentBuild.description = "Testing ${params.ADHOC_BUILD_AND_EXECUTE_TESTS_SERVER_VERSION} against JDK version ${params.ADHOC_BUILD_AND_EXECUTE_TESTS_JABBA_VERSION}" - } else { - // Scheduled build - currentBuild.displayName = "${params.CI_SCHEDULE.toLowerCase().replaceAll('_', ' ').capitalize()} schedule" - currentBuild.description = "Testing server versions [${params.CI_SCHEDULE_SERVER_VERSIONS}] against JDK version ${params.CI_SCHEDULE_JABBA_VERSION}" - } - } -} - -// branch pattern for cron -// should match 3.x, 4.x, 4.5.x, etc -def branchPatternCron() { - ~"((\\d+(\\.[\\dx]+)+))" -} - -pipeline { - agent none - - // Global pipeline timeout - options { - timeout(time: 10, unit: 'HOURS') - buildDiscarder(logRotator(artifactNumToKeepStr: '10', // Keep only the last 10 artifacts - numToKeepStr: '50')) // Keep only the last 50 build records - } - - parameters { - choice( - name: 'ADHOC_BUILD_TYPE', - choices: ['BUILD', 'BUILD-AND-EXECUTE-TESTS'], - description: '''

Perform a adhoc build operation

- - - - - - - - - - - - - - - -
ChoiceDescription
BUILDPerforms a Per-Commit build
BUILD-AND-EXECUTE-TESTSPerforms a build and executes the integration and unit tests
''') - choice( - name: 'ADHOC_BUILD_AND_EXECUTE_TESTS_SERVER_VERSION', - choices: ['4.0', // Previous Apache CassandraⓇ - '4.1', // Previous Apache CassandraⓇ - '5.0', // Current Apache CassandraⓇ - 'dse-4.8.16', // Previous EOSL DataStax Enterprise - 'dse-5.0.15', // Long Term Support DataStax Enterprise - 'dse-5.1.35', // Legacy DataStax Enterprise - 'dse-6.0.18', // Previous DataStax Enterprise - 'dse-6.7.17', // Previous DataStax Enterprise - 'dse-6.8.30', // Current DataStax Enterprise - 'dse-6.9.0', // Current DataStax Enterprise - 'hcd-1.0.0', // Current DataStax HCD - 'ALL'], - description: '''Apache Cassandra® and DataStax Enterprise server version to use for adhoc BUILD-AND-EXECUTE-TESTS builds - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ChoiceDescription
4.0Apache Cassandra® v4.0.x
4.1Apache Cassandra® v4.1.x
5.0Apache Cassandra® v5.0.x
dse-4.8.16DataStax Enterprise v4.8.x (END OF SERVICE LIFE)
dse-5.0.15DataStax Enterprise v5.0.x (Long Term Support)
dse-5.1.35DataStax Enterprise v5.1.x
dse-6.0.18DataStax Enterprise v6.0.x
dse-6.7.17DataStax Enterprise v6.7.x
dse-6.8.30DataStax Enterprise v6.8.x
dse-6.9.0DataStax Enterprise v6.9.x
hcd-1.0.0DataStax HCD v1.0.x
''') - choice( - name: 'ADHOC_BUILD_AND_EXECUTE_TESTS_JABBA_VERSION', - choices: [ - '1.8', // Oracle JDK version 1.8 (current default) - 'openjdk@1.11', // OpenJDK version 11 - 'openjdk@1.17', // OpenJDK version 17 - 'openjdk@1.21' // OpenJDK version 21 - ], - description: '''JDK version to use for TESTING when running adhoc BUILD-AND-EXECUTE-TESTS builds. All builds will use JDK8 for building the driver - - - - - - - - - - - - - - - - - - - - - - - -
ChoiceDescription
1.8Oracle JDK version 1.8 (Used for compiling regardless of choice)
openjdk@1.11OpenJDK version 11
openjdk@1.17OpenJDK version 17
openjdk@1.21OpenJDK version 21
''') - booleanParam( - name: 'SKIP_SERIAL_ITS', - defaultValue: false, - description: 'Flag to determine if serial integration tests should be skipped') - booleanParam( - name: 'SKIP_ISOLATED_ITS', - defaultValue: false, - description: 'Flag to determine if isolated integration tests should be skipped') - booleanParam( - name: 'SKIP_PARALLELIZABLE_ITS', - defaultValue: false, - description: 'Flag to determine if parallel integration tests should be skipped') - string( - name: 'INTEGRATION_TESTS_FILTER', - defaultValue: '', - description: '''

Run only the tests whose name match patterns

- See Maven Failsafe Plugin for more information on filtering integration tests''') - choice( - name: 'CI_SCHEDULE', - choices: ['DO-NOT-CHANGE-THIS-SELECTION', 'WEEKNIGHTS', 'WEEKENDS', 'MONTHLY'], - description: 'CI testing schedule to execute periodically scheduled builds and tests of the driver (DO NOT CHANGE THIS SELECTION)') - string( - name: 'CI_SCHEDULE_SERVER_VERSIONS', - defaultValue: 'DO-NOT-CHANGE-THIS-SELECTION', - description: 'CI testing server version(s) to utilize for scheduled test runs of the driver (DO NOT CHANGE THIS SELECTION)') - string( - name: 'CI_SCHEDULE_JABBA_VERSION', - defaultValue: 'DO-NOT-CHANGE-THIS-SELECTION', - description: 'CI testing JDK version(s) to utilize for scheduled test runs of the driver (DO NOT CHANGE THIS SELECTION)') - } - - triggers { - // schedules only run against release branches (i.e. 3.x, 4.x, 4.5.x, etc.) - parameterizedCron(branchPatternCron().matcher(env.BRANCH_NAME).matches() ? """ - # Every weekend (Saturday, Sunday) around 2:00 AM - H 2 * * 0 %CI_SCHEDULE=WEEKENDS;CI_SCHEDULE_SERVER_VERSIONS=4.0 4.1 5.0 dse-4.8.16 dse-5.0.15 dse-5.1.35 dse-6.0.18 dse-6.7.17;CI_SCHEDULE_JABBA_VERSION=1.8 - # Every weeknight (Monday - Friday) around 12:00 PM noon - H 12 * * 1-5 %CI_SCHEDULE=WEEKNIGHTS;CI_SCHEDULE_SERVER_VERSIONS=4.1 5.0 dse-6.8.30 dse-6.9.0 hcd-1.0.0;CI_SCHEDULE_JABBA_VERSION=openjdk@1.11 - H 12 * * 1-5 %CI_SCHEDULE=WEEKNIGHTS;CI_SCHEDULE_SERVER_VERSIONS=4.1 5.0 dse-6.8.30 dse-6.9.0 hcd-1.0.0;CI_SCHEDULE_JABBA_VERSION=openjdk@1.17 - """ : "") - } - - environment { - OS_VERSION = 'ubuntu/focal64/java-driver' - JABBA_SHELL = '/usr/lib/jabba/jabba.sh' - CCM_ENVIRONMENT_SHELL = '/usr/local/bin/ccm_environment.sh' - SERIAL_ITS_ARGUMENT = "-DskipSerialITs=${params.SKIP_SERIAL_ITS}" - ISOLATED_ITS_ARGUMENT = "-DskipIsolatedITs=${params.SKIP_ISOLATED_ITS}" - PARALLELIZABLE_ITS_ARGUMENT = "-DskipParallelizableITs=${params.SKIP_PARALLELIZABLE_ITS}" - INTEGRATION_TESTS_FILTER = "${params.INTEGRATION_TESTS_FILTER}" - } - - stages { - stage ('Per-Commit') { - options { - timeout(time: 2, unit: 'HOURS') - } - when { - beforeAgent true - allOf { - expression { params.ADHOC_BUILD_TYPE == 'BUILD' } - expression { params.CI_SCHEDULE == 'DO-NOT-CHANGE-THIS-SELECTION' } - expression { params.CI_SCHEDULE_SERVER_VERSIONS == 'DO-NOT-CHANGE-THIS-SELECTION' } - expression { params.CI_SCHEDULE_JABBA_VERSION == 'DO-NOT-CHANGE-THIS-SELECTION' } - not { buildingTag() } - } - } - - matrix { - axes { - axis { - name 'SERVER_VERSION' - values '4.0', // Previous Apache CassandraⓇ - '5.0', // Current Apache CassandraⓇ - 'dse-6.8.30', // Current DataStax Enterprise - 'dse-6.9.0', // Current DataStax Enterprise - 'hcd-1.0.0' // Current DataStax HCD - } - axis { - name 'JABBA_VERSION' - values '1.8', // jdk8 - 'openjdk@1.11', // jdk11 - 'openjdk@1.17', // jdk17 - 'openjdk@1.21' // jdk21 - } - } - - agent { - label "${OS_VERSION}" - } - - stages { - stage('Initialize-Environment') { - steps { - initializeEnvironment() - script { - if (env.BUILD_STATED_SLACK_NOTIFIED != 'true') { - notifySlack() - } - } - } - } - stage('Describe-Build') { - steps { - describePerCommitStage() - } - } - stage('Build-Driver') { - steps { - buildDriver('1.8') - } - } - stage('Execute-Tests') { - steps { - catchError { - // Use the matrix JDK for testing - executeTests() - } - } - post { - always { - /* - * Empty results are possible - * - * - Build failures during mvn verify may exist so report may not be available - */ - junit testResults: '**/target/surefire-reports/TEST-*.xml', allowEmptyResults: true - junit testResults: '**/target/failsafe-reports/TEST-*.xml', allowEmptyResults: true - } - } - } - stage('Execute-Code-Coverage') { - // Ensure the code coverage is run only once per-commit - when { environment name: 'SERVER_VERSION', value: '4.0' } - steps { - executeCodeCoverage() - } - } - } - } - post { - aborted { - notifySlack('aborted') - } - success { - notifySlack('completed') - } - unstable { - notifySlack('unstable') - } - failure { - notifySlack('FAILED') - } - } - } - - stage('Adhoc-And-Scheduled-Testing') { - when { - beforeAgent true - allOf { - expression { (params.ADHOC_BUILD_TYPE == 'BUILD' && params.CI_SCHEDULE != 'DO-NOT-CHANGE-THIS-SELECTION') || - params.ADHOC_BUILD_TYPE == 'BUILD-AND-EXECUTE-TESTS' } - not { buildingTag() } - anyOf { - expression { params.ADHOC_BUILD_TYPE == 'BUILD-AND-EXECUTE-TESTS' } - allOf { - expression { params.ADHOC_BUILD_TYPE == 'BUILD' } - expression { params.CI_SCHEDULE != 'DO-NOT-CHANGE-THIS-SELECTION' } - expression { params.CI_SCHEDULE_SERVER_VERSIONS != 'DO-NOT-CHANGE-THIS-SELECTION' } - } - } - } - } - - environment { - SERVER_VERSIONS = "${params.CI_SCHEDULE_SERVER_VERSIONS == 'DO-NOT-CHANGE-THIS-SELECTION' ? params.ADHOC_BUILD_AND_EXECUTE_TESTS_SERVER_VERSION : params.CI_SCHEDULE_SERVER_VERSIONS}" - JABBA_VERSION = "${params.CI_SCHEDULE_JABBA_VERSION == 'DO-NOT-CHANGE-THIS-SELECTION' ? params.ADHOC_BUILD_AND_EXECUTE_TESTS_JABBA_VERSION : params.CI_SCHEDULE_JABBA_VERSION}" - } - - matrix { - axes { - axis { - name 'SERVER_VERSION' - values '4.0', // Previous Apache CassandraⓇ - '4.1', // Previous Apache CassandraⓇ - '5.0', // Current Apache CassandraⓇ - 'dse-4.8.16', // Previous EOSL DataStax Enterprise - 'dse-5.0.15', // Last EOSL DataStax Enterprise - 'dse-5.1.35', // Legacy DataStax Enterprise - 'dse-6.0.18', // Previous DataStax Enterprise - 'dse-6.7.17', // Previous DataStax Enterprise - 'dse-6.8.30', // Current DataStax Enterprise - 'dse-6.9.0', // Current DataStax Enterprise - 'hcd-1.0.0' // Current DataStax HCD - } - } - when { - beforeAgent true - allOf { - expression { return env.SERVER_VERSIONS.split(' ').any { it =~ /(ALL|${env.SERVER_VERSION})/ } } - } - } - agent { - label "${env.OS_VERSION}" - } - - stages { - stage('Initialize-Environment') { - steps { - initializeEnvironment() - script { - if (env.BUILD_STATED_SLACK_NOTIFIED != 'true') { - notifySlack() - } - } - } - } - stage('Describe-Build') { - steps { - describeAdhocAndScheduledTestingStage() - } - } - stage('Build-Driver') { - steps { - buildDriver('1.8') - } - } - stage('Execute-Tests') { - steps { - catchError { - // Use the matrix JDK for testing - executeTests() - } - } - post { - always { - /* - * Empty results are possible - * - * - Build failures during mvn verify may exist so report may not be available - * - With boolean parameters to skip tests a failsafe report may not be available - */ - junit testResults: '**/target/surefire-reports/TEST-*.xml', allowEmptyResults: true - junit testResults: '**/target/failsafe-reports/TEST-*.xml', allowEmptyResults: true - } - } - } - stage('Execute-Code-Coverage') { - // Ensure the code coverage is run only once per-commit - when { - allOf { - environment name: 'SERVER_VERSION', value: '4.0' - environment name: 'JABBA_VERSION', value: '1.8' - } - } - steps { - executeCodeCoverage() - } - } - } - } - post { - aborted { - notifySlack('aborted') - } - success { - notifySlack('completed') - } - unstable { - notifySlack('unstable') - } - failure { - notifySlack('FAILED') - } - } - } - } -} diff --git a/LICENSE b/LICENSE index a157e31d058..d6456956733 100644 --- a/LICENSE +++ b/LICENSE @@ -200,24 +200,3 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - -Apache Cassandra Java Driver bundles code and files from the following projects: - -JNR project -Copyright (C) 2008-2010 Wayne Meissner -This product includes software developed as part of the JNR project ( https://github.com/jnr/jnr-ffi )s. -see core/src/main/java/com/datastax/oss/driver/internal/core/os/CpuInfo.java - -Protocol Buffers -Copyright 2008 Google Inc. -This product includes software developed as part of the Protocol Buffers project ( https://developers.google.com/protocol-buffers/ ). -see core/src/main/java/com/datastax/oss/driver/internal/core/type/util/VIntCoding.java - -Guava -Copyright (C) 2007 The Guava Authors -This product includes software developed as part of the Guava project ( https://guava.dev ). -see core/src/main/java/com/datastax/oss/driver/internal/core/util/CountingIterator.java - -Copyright (C) 2018 Christian Stein -This product includes software developed by Christian Stein -see ci/install-jdk.sh diff --git a/LICENSE_binary b/LICENSE_binary deleted file mode 100644 index b59c6ec22bb..00000000000 --- a/LICENSE_binary +++ /dev/null @@ -1,247 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -Apache Cassandra Java Driver bundles code and files from the following projects: - -JNR project -Copyright (C) 2008-2010 Wayne Meissner -This product includes software developed as part of the JNR project ( https://github.com/jnr/jnr-ffi )s. -see core/src/main/java/com/datastax/oss/driver/internal/core/os/CpuInfo.java - -Protocol Buffers -Copyright 2008 Google Inc. -This product includes software developed as part of the Protocol Buffers project ( https://developers.google.com/protocol-buffers/ ). -see core/src/main/java/com/datastax/oss/driver/internal/core/type/util/VIntCoding.java - -Guava -Copyright (C) 2007 The Guava Authors -This product includes software developed as part of the Guava project ( https://guava.dev ). -see core/src/main/java/com/datastax/oss/driver/internal/core/util/CountingIterator.java - -Copyright (C) 2018 Christian Stein -This product includes software developed by Christian Stein -see ci/install-jdk.sh - -This product bundles Java Native Runtime - POSIX 3.1.15, -which is available under the Eclipse Public License version 2.0. -see licenses/jnr-posix.txt - -This product bundles jnr-x86asm 1.0.2, -which is available under the MIT License. -see licenses/jnr-x86asm.txt - -This product bundles ASM 9.2: a very small and fast Java bytecode manipulation framework, -which is available under the 3-Clause BSD License. -see licenses/asm.txt - -This product bundles HdrHistogram 2.1.12: A High Dynamic Range (HDR) Histogram, -which is available under the 2-Clause BSD License. -see licenses/HdrHistogram.txt - -This product bundles The Simple Logging Facade for Java (SLF4J) API 1.7.26, -which is available under the MIT License. -see licenses/slf4j-api.txt - -This product bundles Reactive Streams 1.0.3, -which is available under the MIT License. -see licenses/reactive-streams.txt diff --git a/NOTICE.txt b/NOTICE.txt index 8e27ae3e52f..477f0645ed9 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -1,5 +1,20 @@ Apache Cassandra Java Driver Copyright 2012- The Apache Software Foundation -This product includes software developed at The Apache Software +This product includes software developed by The Apache Software Foundation (http://www.apache.org/). + +JNR project +Copyright (C) 2008-2010 Wayne Meissner +This product includes software developed as part of the JNR project ( https://github.com/jnr/jnr-ffi )s. +see core/src/main/java/com/datastax/oss/driver/internal/core/os/CpuInfo.java + +Protocol Buffers +Copyright 2008 Google Inc. +This product includes software developed as part of the Protocol Buffers project ( https://developers.google.com/protocol-buffers/ ). +see core/src/main/java/com/datastax/oss/driver/internal/core/type/util/VIntCoding.java + +Guava +Copyright (C) 2007 The Guava Authors +This product includes software developed as part of the Guava project ( https://guava.dev ). +see core/src/main/java/com/datastax/oss/driver/internal/core/util/CountingIterator.java \ No newline at end of file diff --git a/NOTICE_binary.txt b/NOTICE_binary.txt deleted file mode 100644 index f6f11c298f6..00000000000 --- a/NOTICE_binary.txt +++ /dev/null @@ -1,249 +0,0 @@ -Apache Cassandra Java Driver -Copyright 2012- The Apache Software Foundation - -This product includes software developed at The Apache Software -Foundation (http://www.apache.org/). - -This compiled product also includes Apache-licensed dependencies -that contain the following NOTICE information: - -================================================================== -io.netty:netty-handler NOTICE.txt -================================================================== -This product contains the extensions to Java Collections Framework which has -been derived from the works by JSR-166 EG, Doug Lea, and Jason T. Greene: - - * LICENSE: - * license/LICENSE.jsr166y.txt (Public Domain) - * HOMEPAGE: - * http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/ - * http://viewvc.jboss.org/cgi-bin/viewvc.cgi/jbosscache/experimental/jsr166/ - -This product contains a modified version of Robert Harder's Public Domain -Base64 Encoder and Decoder, which can be obtained at: - - * LICENSE: - * license/LICENSE.base64.txt (Public Domain) - * HOMEPAGE: - * http://iharder.sourceforge.net/current/java/base64/ - -This product contains a modified portion of 'Webbit', an event based -WebSocket and HTTP server, which can be obtained at: - - * LICENSE: - * license/LICENSE.webbit.txt (BSD License) - * HOMEPAGE: - * https://github.com/joewalnes/webbit - -This product contains a modified portion of 'SLF4J', a simple logging -facade for Java, which can be obtained at: - - * LICENSE: - * license/LICENSE.slf4j.txt (MIT License) - * HOMEPAGE: - * https://www.slf4j.org/ - -This product contains a modified portion of 'Apache Harmony', an open source -Java SE, which can be obtained at: - - * NOTICE: - * license/NOTICE.harmony.txt - * LICENSE: - * license/LICENSE.harmony.txt (Apache License 2.0) - * HOMEPAGE: - * https://archive.apache.org/dist/harmony/ - -This product contains a modified portion of 'jbzip2', a Java bzip2 compression -and decompression library written by Matthew J. Francis. It can be obtained at: - - * LICENSE: - * license/LICENSE.jbzip2.txt (MIT License) - * HOMEPAGE: - * https://code.google.com/p/jbzip2/ - -This product contains a modified portion of 'libdivsufsort', a C API library to construct -the suffix array and the Burrows-Wheeler transformed string for any input string of -a constant-size alphabet written by Yuta Mori. It can be obtained at: - - * LICENSE: - * license/LICENSE.libdivsufsort.txt (MIT License) - * HOMEPAGE: - * https://github.com/y-256/libdivsufsort - -This product contains a modified portion of Nitsan Wakart's 'JCTools', Java Concurrency Tools for the JVM, - which can be obtained at: - - * LICENSE: - * license/LICENSE.jctools.txt (ASL2 License) - * HOMEPAGE: - * https://github.com/JCTools/JCTools - -This product optionally depends on 'JZlib', a re-implementation of zlib in -pure Java, which can be obtained at: - - * LICENSE: - * license/LICENSE.jzlib.txt (BSD style License) - * HOMEPAGE: - * http://www.jcraft.com/jzlib/ - -This product optionally depends on 'Compress-LZF', a Java library for encoding and -decoding data in LZF format, written by Tatu Saloranta. It can be obtained at: - - * LICENSE: - * license/LICENSE.compress-lzf.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/ning/compress - -This product optionally depends on 'lz4', a LZ4 Java compression -and decompression library written by Adrien Grand. It can be obtained at: - - * LICENSE: - * license/LICENSE.lz4.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/yawkat/lz4-java - -This product optionally depends on 'lzma-java', a LZMA Java compression -and decompression library, which can be obtained at: - - * LICENSE: - * license/LICENSE.lzma-java.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/jponge/lzma-java - -This product optionally depends on 'zstd-jni', a zstd-jni Java compression -and decompression library, which can be obtained at: - - * LICENSE: - * license/LICENSE.zstd-jni.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/luben/zstd-jni - -This product contains a modified portion of 'jfastlz', a Java port of FastLZ compression -and decompression library written by William Kinney. It can be obtained at: - - * LICENSE: - * license/LICENSE.jfastlz.txt (MIT License) - * HOMEPAGE: - * https://code.google.com/p/jfastlz/ - -This product contains a modified portion of and optionally depends on 'Protocol Buffers', Google's data -interchange format, which can be obtained at: - - * LICENSE: - * license/LICENSE.protobuf.txt (New BSD License) - * HOMEPAGE: - * https://github.com/google/protobuf - -This product optionally depends on 'Bouncy Castle Crypto APIs' to generate -a temporary self-signed X.509 certificate when the JVM does not provide the -equivalent functionality. It can be obtained at: - - * LICENSE: - * license/LICENSE.bouncycastle.txt (MIT License) - * HOMEPAGE: - * https://www.bouncycastle.org/ - -This product optionally depends on 'Snappy', a compression library produced -by Google Inc, which can be obtained at: - - * LICENSE: - * license/LICENSE.snappy.txt (New BSD License) - * HOMEPAGE: - * https://github.com/google/snappy - -This product optionally depends on 'JBoss Marshalling', an alternative Java -serialization API, which can be obtained at: - - * LICENSE: - * license/LICENSE.jboss-marshalling.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/jboss-remoting/jboss-marshalling - -This product optionally depends on 'Caliper', Google's micro- -benchmarking framework, which can be obtained at: - - * LICENSE: - * license/LICENSE.caliper.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/google/caliper - -This product optionally depends on 'Apache Commons Logging', a logging -framework, which can be obtained at: - - * LICENSE: - * license/LICENSE.commons-logging.txt (Apache License 2.0) - * HOMEPAGE: - * https://commons.apache.org/logging/ - -This product optionally depends on 'Apache Log4J', a logging framework, which -can be obtained at: - - * LICENSE: - * license/LICENSE.log4j.txt (Apache License 2.0) - * HOMEPAGE: - * https://logging.apache.org/log4j/ - -This product optionally depends on 'Aalto XML', an ultra-high performance -non-blocking XML processor, which can be obtained at: - - * LICENSE: - * license/LICENSE.aalto-xml.txt (Apache License 2.0) - * HOMEPAGE: - * https://wiki.fasterxml.com/AaltoHome - -This product contains a modified version of 'HPACK', a Java implementation of -the HTTP/2 HPACK algorithm written by Twitter. It can be obtained at: - - * LICENSE: - * license/LICENSE.hpack.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/twitter/hpack - -This product contains a modified version of 'HPACK', a Java implementation of -the HTTP/2 HPACK algorithm written by Cory Benfield. It can be obtained at: - - * LICENSE: - * license/LICENSE.hyper-hpack.txt (MIT License) - * HOMEPAGE: - * https://github.com/python-hyper/hpack/ - -This product contains a modified version of 'HPACK', a Java implementation of -the HTTP/2 HPACK algorithm written by Tatsuhiro Tsujikawa. It can be obtained at: - - * LICENSE: - * license/LICENSE.nghttp2-hpack.txt (MIT License) - * HOMEPAGE: - * https://github.com/nghttp2/nghttp2/ - -This product contains a modified portion of 'Apache Commons Lang', a Java library -provides utilities for the java.lang API, which can be obtained at: - - * LICENSE: - * license/LICENSE.commons-lang.txt (Apache License 2.0) - * HOMEPAGE: - * https://commons.apache.org/proper/commons-lang/ - - -This product contains the Maven wrapper scripts from 'Maven Wrapper', that provides an easy way to ensure a user has everything necessary to run the Maven build. - - * LICENSE: - * license/LICENSE.mvn-wrapper.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/takari/maven-wrapper - -This product contains the dnsinfo.h header file, that provides a way to retrieve the system DNS configuration on MacOS. -This private header is also used by Apple's open source - mDNSResponder (https://opensource.apple.com/tarballs/mDNSResponder/). - - * LICENSE: - * license/LICENSE.dnsinfo.txt (Apple Public Source License 2.0) - * HOMEPAGE: - * https://www.opensource.apple.com/source/configd/configd-453.19/dnsinfo/dnsinfo.h - -This product optionally depends on 'Brotli4j', Brotli compression and -decompression for Java., which can be obtained at: - - * LICENSE: - * license/LICENSE.brotli4j.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/hyperxpro/Brotli4j diff --git a/README.md b/README.md deleted file mode 100644 index d8ef01d0964..00000000000 --- a/README.md +++ /dev/null @@ -1,105 +0,0 @@ -# Java Driver for Apache Cassandra® - -[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) -[![Maven Central](https://maven-badges.herokuapp.com/maven-central/org.apache.cassandra/java-driver-core/badge.svg)](https://maven-badges.herokuapp.com/maven-central/org.apache.cassandra/java-driver-core) - -*If you're reading this on github.com, please note that this is the readme for the development -version and that some features described here might not yet have been released. You can find the -documentation for latest version through [DataStax Docs] or via the release tags, e.g. -[4.17.0](https://github.com/datastax/java-driver/tree/4.17.0).* - -A modern, feature-rich and highly tunable Java client library for [Apache Cassandra®] \(2.1+) and -[DataStax Enterprise] \(4.7+), and [DataStax Astra], using exclusively Cassandra's binary protocol -and Cassandra Query Language (CQL) v3. - -[DataStax Docs]: http://docs.datastax.com/en/developer/java-driver/ -[Apache Cassandra®]: http://cassandra.apache.org/ - -## Getting the driver - -The driver artifacts are published in Maven central, under the group id [org.apache.cassandra]; there -are multiple modules, all prefixed with `java-driver-`. - -```xml - - org.apache.cassandra - java-driver-core - ${driver.version} - - - - org.apache.cassandra - java-driver-query-builder - ${driver.version} - - - - org.apache.cassandra - java-driver-mapper-runtime - ${driver.version} - -``` - -Note that the query builder is now published as a separate artifact, you'll need to add the -dependency if you plan to use it. - -Refer to each module's manual for more details ([core](manual/core/), [query -builder](manual/query_builder/), [mapper](manual/mapper)). - -[org.apache.cassandra]: http://search.maven.org/#search%7Cga%7C1%7Cg%3A%22org.apache.cassandra%22 - -## Compatibility - -The driver is compatible with Apache Cassandra® 2.1 and higher, DataStax Enterprise 4.7 and -higher, and DataStax Astra. - -It requires Java 8 or higher. - -Disclaimer: Some DataStax/DataStax Enterprise products might partially work on big-endian systems, -but DataStax does not officially support these systems. - -## Migrating from previous versions - -Java Driver 4 is **not binary compatible** with previous versions. However, most of the concepts -remain unchanged, and the new API will look very familiar to 2.x and 3.x users. - -See the [upgrade guide](upgrade_guide/) for details. - -## Useful links - -* [Manual](manual/) -* [API docs] -* Bug tracking: [JIRA] -* [Mailing list] -* [Changelog] -* [FAQ] - -[API docs]: https://docs.datastax.com/en/drivers/java/4.17 -[JIRA]: https://issues.apache.org/jira/issues/?jql=project%20%3D%20CASSJAVA%20ORDER%20BY%20key%20DESC -[Mailing list]: https://groups.google.com/a/lists.datastax.com/forum/#!forum/java-driver-user -[Changelog]: changelog/ -[FAQ]: faq/ - -## License - -© The Apache Software Foundation - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - ----- - -Apache Cassandra, Apache, Tomcat, Lucene, Solr, Hadoop, Spark, TinkerPop, and Cassandra are -trademarks of the [Apache Software Foundation](http://www.apache.org/) or its subsidiaries in -Canada, the United States and/or other countries. - -Binary artifacts of this product bundle Java Native Runtime libraries, which is available under the Eclipse Public License version 2.0. diff --git a/bom/pom.xml b/bom/pom.xml deleted file mode 100644 index dd76153a9b1..00000000000 --- a/bom/pom.xml +++ /dev/null @@ -1,121 +0,0 @@ - - - - 4.0.0 - - org.apache.cassandra - java-driver-parent - 4.19.3-SNAPSHOT - - java-driver-bom - pom - Apache Cassandra Java Driver - Bill Of Materials - - - - org.apache.cassandra - java-driver-core - 4.19.3-SNAPSHOT - - - org.apache.cassandra - java-driver-core-shaded - 4.19.3-SNAPSHOT - - - org.apache.cassandra - java-driver-mapper-processor - 4.19.3-SNAPSHOT - - - org.apache.cassandra - java-driver-mapper-runtime - 4.19.3-SNAPSHOT - - - org.apache.cassandra - java-driver-query-builder - 4.19.3-SNAPSHOT - - - org.apache.cassandra - java-driver-guava-shaded - 4.19.3-SNAPSHOT - - - org.apache.cassandra - java-driver-test-infra - 4.19.3-SNAPSHOT - - - org.apache.cassandra - java-driver-metrics-micrometer - 4.19.3-SNAPSHOT - - - org.apache.cassandra - java-driver-metrics-microprofile - 4.19.3-SNAPSHOT - - - com.datastax.oss - native-protocol - 1.5.2 - - - - - - - org.codehaus.mojo - flatten-maven-plugin - - - flatten - process-resources - - flatten - - - - keep - expand - expand - expand - expand - expand - expand - expand - expand - expand - expand - expand - expand - remove - - true - - - - - - - diff --git a/changelog/README.md b/changelog/README.md deleted file mode 100644 index b01c3db3bf9..00000000000 --- a/changelog/README.md +++ /dev/null @@ -1,2413 +0,0 @@ - - -## Changelog - - - -### 4.19.2 - -- [bug] CASSJAVA-116: Retry or Speculative Execution with RequestIdGenerator throws "Duplicate Key" - -### 4.19.1 - -- [improvement] CASSJAVA-97: Let users inject an ID for each request and write to the custom payload -- [improvement] CASSJAVA-92: Add Local DC to driver connection info and provide visibility with nodetool clientstats -- [bug] PR 2025: Eliminate lock in ConcurrencyLimitingRequestThrottler -- [improvement] CASSJAVA-89: Fix deprecated table configs in Cassandra 5 -- [improvement] PR 2028: Remove unnecessary locking in DefaultNettyOptions -- [improvement] CASSJAVA-102: Fix revapi spurious complaints about optional dependencies -- [improvement] PR 2013: Add SubnetAddressTranslator -- [improvement] CASSJAVA-68: Improve DefaultCodecRegistry.CacheKey#hashCode() to eliminate Object[] allocation -- [improvement] PR 1989: Bump Jackson version to la(te)st 2.13.x, 2.13.5 -- [improvement] CASSJAVA-76: Make guava an optional dependency of java-driver-guava-shaded -- [bug] PR 2035: Prevent long overflow in SNI address resolution -- [improvement] CASSJAVA-77: 4.x: Upgrade Netty to 4.1.119 -- [improvement] CASSJAVA-40: Driver testing against Java 21 -- [improvement] CASSJAVA-90: Update native-protocol -- [improvement] CASSJAVA-80: Support configuration to disable DNS reverse-lookups for SAN validation - -### 4.19.0 - -- [bug] JAVA-3055: Prevent PreparedStatement cache to be polluted if a request is cancelled. -- [bug] JAVA-3168: Copy node info for contact points on initial node refresh only from first match by endpoint -- [improvement] JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0) -- [improvement] CASSJAVA-53: Update Guava version used in cassandra-java-driver -- [improvement] JAVA-3118: Add support for vector data type in Schema Builder, QueryBuilder -- [bug] CASSJAVA-55: Remove setting "Host" header for metadata requests -- [bug] JAVA-3057: Allow decoding a UDT that has more fields than expected -- [improvement] CASSJAVA-52: Bring java-driver-shaded-guava into the repo as a submodule -- [bug] CASSJAVA-2: TableMetadata#describe produces invalid CQL when a type of a column is a vector -- [bug] JAVA-3051: Memory leak in DefaultLoadBalancingPolicy measurement of response times -- [improvement] CASSJAVA-14: Query builder support for NOT CQL syntax -- [bug] CASSJAVA-12: DefaultSslEngineFactory missing null check on close -- [improvement] CASSJAVA-46: Expose table extensions via schema builders -- [bug] PR 1938: Fix uncaught exception during graceful channel shutdown after exceeding max orphan ids -- [improvement] PR 1607: Annotate BatchStatement, Statement, SimpleStatement methods with CheckReturnValue -- [improvement] CASSJAVA-41: Reduce lock held duration in ConcurrencyLimitingRequestThrottler -- [bug] JAVA-3149: Async Query Cancellation Not Propagated To RequestThrottler -- [bug] JAVA-3167: CompletableFutures.allSuccessful() may return never completed future -- [bug] PR 1620: Don't return empty routing key when partition key is unbound -- [improvement] PR 1623: Limit calls to Conversions.resolveExecutionProfile -- [improvement] CASSJAVA-29: Update target Cassandra versions for integration tests, support new 5.0.x - -### 4.18.1 - -- [improvement] JAVA-3142: Ability to specify ordering of remote local dc's via new configuration for graceful automatic failovers -- [bug] CASSANDRA-19457: Object reference in Micrometer metrics prevent GC from reclaiming Session instances -- [improvement] CASSANDRA-19468: Don't swallow exception during metadata refresh -- [bug] CASSANDRA-19333: Fix data corruption in VectorCodec when using heap buffers -- [improvement] CASSANDRA-19290: Replace uses of AttributeKey.newInstance -- [improvement] CASSANDRA-19352: Support native_transport_(address|port) + native_transport_port_ssl for DSE 6.8 (4.x edition) -- [improvement] CASSANDRA-19180: Support reloading keystore in cassandra-java-driver - -### 4.18.0 - -- [improvement] PR 1689: Add support for publishing percentile time series for the histogram metrics (nparaddi-walmart) -- [improvement] JAVA-3104: Do not eagerly pre-allocate array when deserializing CqlVector -- [improvement] JAVA-3111: upgrade jackson-databind to 2.13.4.2 to address gradle dependency issue -- [improvement] PR 1617: Improve ByteBufPrimitiveCodec readBytes (chibenwa) -- [improvement] JAVA-3095: Fix CREATE keyword in vector search example in upgrade guide -- [improvement] JAVA-3100: Update jackson-databind to 2.13.4.1 and jackson-jaxrs-json-provider to 2.13.4 to address recent CVEs -- [improvement] JAVA-3089: Forbid wildcard imports - -### 4.17.0 - -- [improvement] JAVA-3070: Make CqlVector and CqlDuration serializable -- [improvement] JAVA-3085: Initialize c.d.o.d.i.core.util.Dependency at Graal native image build-time -- [improvement] JAVA-3061: CqlVector API improvements, add support for accessing vectors directly as float arrays -- [improvement] JAVA-3042: Enable automated testing for Java17 -- [improvement] JAVA-3050: Upgrade Netty to 4.1.94 - -### 4.16.0 - -- [improvement] JAVA-3058: Clear prepared statement cache on UDT type change event -- [improvement] JAVA-3060: Add vector type, codec + support for parsing CQL type -- [improvement] DOC-2813: Add error handling guidance linking to a helpful blog post -- [improvement] JAVA-3045: Fix GraalVM native image support for GraalVM 22.2 - -### 4.15.0 - -- [improvement] JAVA-3041: Update Guava session sample code to use ProgrammaticArguments -- [improvement] JAVA-3022: Implement AddressTranslator for AWS PrivateLink -- [bug] JAVA-3021: Update table SchemaBuilder page to replace withPrimaryKey with withPartitionKey -- [bug] JAVA-3005: Node list refresh behavior in 4.x is different from 3.x -- [bug] JAVA-3002: spring-boot app keeps connecting to IP of replaced node -- [improvement] JAVA-3023 Upgrade Netty to 4.1.77 -- [improvement] JAVA-2995: CodecNotFoundException doesn't extend DriverException - -### 4.14.1 - -- [improvement] JAVA-3013: Upgrade dependencies to address CVEs and other security issues, 4.14.1 edition -- [improvement] JAVA-2977: Update Netty to resolve higher-priority CVEs -- [improvement] JAVA-3003: Update jnr-posix to address CVE-2014-4043 - -### 4.14.0 - -- [bug] JAVA-2976: Support missing protocol v5 error codes CAS_WRITE_UNKNOWN, CDC_WRITE_FAILURE -- [bug] JAVA-2987: BasicLoadBalancingPolicy remote computation assumes local DC is up and live -- [bug] JAVA-2992: Include options into DefaultTableMetadata equals and hash methods -- [improvement] JAVA-2982: Switch Esri geometry lib to an optional dependency -- [improvement] JAVA-2959: Don't throw NoNodeAvailableException when all connections busy - -### 4.13.0 - -- [improvement] JAVA-2940: Add GraalVM native image build configurations -- [improvement] JAVA-2953: Promote ProgrammaticPlainTextAuthProvider to the public API and add - credentials hot-reload -- [improvement] JAVA-2951: Accept multiple node state listeners, schema change listeners and request - trackers - -Merged from 4.12.x: - -- [bug] JAVA-2949: Provide mapper support for CompletionStage> -- [bug] JAVA-2950: Remove reference to Reflection class from DependencyCheck - -### 4.12.1 - -Merged from 4.11.x: - -- [bug] JAVA-2949: Provide mapper support for CompletionStage> -- [bug] JAVA-2950: Remove reference to Reflection class from DependencyCheck - -### 4.12.0 - -- [improvement] JAVA-2935: Make GetEntity and SetEntity methods resilient to incomplete data -- [improvement] JAVA-2944: Upgrade MicroProfile Metrics to 3.0 - -Merged from 4.11.x: - -- [bug] JAVA-2932: Make DefaultDriverConfigLoader.close() resilient to terminated executors -- [bug] JAVA-2945: Reinstate InternalDriverContext.getNodeFilter method -- [bug] JAVA-2947: Release buffer after decoding multi-slice frame -- [bug] JAVA-2946: Make MapperResultProducerService instances be located with user-provided class loader -- [bug] JAVA-2942: GraphStatement.setConsistencyLevel() is not effective -- [bug] JAVA-2941: Cannot add a single static column with the alter table API -- [bug] JAVA-2943: Prevent session leak with wrong keyspace name -- [bug] JAVA-2938: OverloadedException message is misleading - -### 4.11.3 - -- [bug] JAVA-2949: Provide mapper support for CompletionStage> -- [bug] JAVA-2950: Remove reference to Reflection class from DependencyCheck - -### 4.11.2 - -- [bug] JAVA-2932: Make DefaultDriverConfigLoader.close() resilient to terminated executors -- [bug] JAVA-2945: Reinstate InternalDriverContext.getNodeFilter method -- [bug] JAVA-2947: Release buffer after decoding multi-slice frame -- [bug] JAVA-2946: Make MapperResultProducerService instances be located with user-provided class loader -- [bug] JAVA-2942: GraphStatement.setConsistencyLevel() is not effective -- [bug] JAVA-2941: Cannot add a single static column with the alter table API -- [bug] JAVA-2943: Prevent session leak with wrong keyspace name -- [bug] JAVA-2938: OverloadedException message is misleading - -### 4.11.1 - -- [bug] JAVA-2910: Add a configuration option to support strong values for prepared statements cache -- [bug] JAVA-2936: Support Protocol V6 -- [bug] JAVA-2934: Handle empty non-final pages in ReactiveResultSetSubscription - -### 4.11.0 - -- [improvement] JAVA-2930: Allow Micrometer to record histograms for timers -- [improvement] JAVA-2914: Transform node filter into a more flexible node distance evaluator -- [improvement] JAVA-2929: Revisit node-level metric eviction -- [new feature] JAVA-2830: Add mapper support for Java streams -- [bug] JAVA-2928: Generate counter increment/decrement constructs compatible with legacy C* - versions -- [new feature] JAVA-2872: Ability to customize metric names and tags -- [bug] JAVA-2925: Consider protocol version unsupported when server requires USE_BETA flag for it -- [improvement] JAVA-2704: Remove protocol v5 beta status, add v6-beta -- [improvement] JAVA-2916: Annotate generated classes with `@SuppressWarnings` -- [bug] JAVA-2927: Make Dropwizard truly optional -- [improvement] JAVA-2917: Include GraalVM substitutions for request processors and geo codecs -- [bug] JAVA-2918: Exclude invalid peers from schema agreement checks - -### 4.10.0 - -- [improvement] JAVA-2907: Switch Tinkerpop to an optional dependency -- [improvement] JAVA-2904: Upgrade Jackson to 2.12.0 and Tinkerpop to 3.4.9 -- [bug] JAVA-2911: Prevent control connection from scheduling too many reconnections -- [bug] JAVA-2902: Consider computed values when validating constructors for immutable entities -- [new feature] JAVA-2899: Re-introduce cross-DC failover in driver 4 -- [new feature] JAVA-2900: Re-introduce consistency downgrading retries -- [new feature] JAVA-2903: BlockHound integration -- [improvement] JAVA-2877: Allow skipping validation for individual mapped entities -- [improvement] JAVA-2871: Allow keyspace exclusions in the metadata, and exclude system keyspaces - by default -- [improvement] JAVA-2449: Use non-cryptographic random number generation in Uuids.random() -- [improvement] JAVA-2893: Allow duplicate keys in DefaultProgrammaticDriverConfigLoaderBuilder -- [documentation] JAVA-2894: Clarify usage of Statement.setQueryTimestamp -- [bug] JAVA-2889: Remove TypeSafe imports from DriverConfigLoader -- [bug] JAVA-2883: Use root locale explicitly when changing string case -- [bug] JAVA-2890: Fix off-by-one error in UdtCodec -- [improvement] JAVA-2905: Prevent new connections from using a protocol version higher than the negotiated one -- [bug] JAVA-2647: Handle token types in QueryBuilder.literal() -- [bug] JAVA-2887: Handle composite profiles with more than one key and/or backed by only one profile - -### 4.9.0 - -- [documentation] JAVA-2823: Make Astra more visible in the docs -- [documentation] JAVA-2869: Advise against using 4.5.x-4.6.0 in the upgrade guide -- [documentation] JAVA-2868: Cover reconnect-on-init in the manual -- [improvement] JAVA-2827: Exclude unused Tinkerpop transitive dependencies -- [improvement] JAVA-2827: Remove dependency to Tinkerpop gremlin-driver -- [task] JAVA-2859: Upgrade Tinkerpop to 3.4.8 -- [bug] JAVA-2726: Fix Tinkerpop incompatibility with JPMS -- [bug] JAVA-2842: Remove security vulnerabilities introduced by Tinkerpop -- [bug] JAVA-2867: Revisit compressor substitutions -- [improvement] JAVA-2870: Optimize memory usage of token map -- [improvement] JAVA-2855: Allow selection of the metrics framework via the config -- [improvement] JAVA-2864: Revisit mapper processor's messaging -- [new feature] JAVA-2816: Support immutability and fluent accessors in the mapper -- [new feature] JAVA-2721: Add counter support in the mapper -- [bug] JAVA-2863: Reintroduce mapper processor dependency to SLF4J - -### 4.8.0 - -- [improvement] JAVA-2811: Add aliases for driver 3 method names -- [new feature] JAVA-2808: Provide metrics bindings for Micrometer and MicroProfile -- [new feature] JAVA-2773: Support new protocol v5 message format -- [improvement] JAVA-2841: Raise timeouts during connection initialization -- [bug] JAVA-2331: Unregister old metrics when a node gets removed or changes RPC address -- [improvement] JAVA-2850: Ignore credentials in secure connect bundle [DataStax Astra] -- [improvement] JAVA-2813: Don't fail when secure bundle is specified together with other options -- [bug] JAVA-2800: Exclude SLF4J from mapper-processor dependencies -- [new feature] JAVA-2819: Add DriverConfigLoader.fromString -- [improvement] JAVA-2431: Set all occurrences when bound variables are used multiple times -- [improvement] JAVA-2829: Log protocol negotiation messages at DEBUG level -- [bug] JAVA-2846: Give system properties the highest precedence in DefaultDriverConfigLoader -- [new feature] JAVA-2691: Provide driver 4 support for extra codecs -- [improvement] Allow injection of CodecRegistry on session builder -- [improvement] JAVA-2828: Add safe paging state wrapper -- [bug] JAVA-2835: Correctly handle unresolved addresses in DefaultEndPoint.equals -- [bug] JAVA-2838: Avoid ConcurrentModificationException when closing connection -- [bug] JAVA-2837: make StringCodec strict about unicode in ascii - -### 4.7.2 - -- [bug] JAVA-2821: Can't connect to DataStax Astra using driver 4.7.x - -### 4.7.1 - -- [bug] JAVA-2818: Remove root path only after merging non-programmatic configs - -### 4.7.0 - -- [improvement] JAVA-2301: Introduce OSGi tests for the mapper -- [improvement] JAVA-2658: Refactor OSGi tests -- [bug] JAVA-2657: Add ability to specify the class loader to use for application-specific classpath resources -- [improvement] JAVA-2803: Add Graal substitutions for protocol compression -- [documentation] JAVA-2666: Document BOM and driver modules -- [documentation] JAVA-2613: Improve connection pooling documentation -- [new feature] JAVA-2793: Add composite config loader -- [new feature] JAVA-2792: Allow custom results in the mapper -- [improvement] JAVA-2663: Add Graal substitutions for native functions -- [improvement] JAVA-2747: Revisit semantics of Statement.setExecutionProfile/Name - -### 4.6.1 - -- [bug] JAVA-2676: Don't reschedule write coalescer after empty runs - -### 4.6.0 - -- [improvement] JAVA-2741: Make keyspace/table metadata impls serializable -- [bug] JAVA-2740: Extend peer validity check to include datacenter, rack and tokens -- [bug] JAVA-2744: Recompute token map when node is added -- [new feature] JAVA-2614: Provide a utility to emulate offset paging on the client side -- [new feature] JAVA-2718: Warn when the number of sessions exceeds a configurable threshold -- [improvement] JAVA-2664: Add a callback to inject the session in listeners -- [bug] JAVA-2698: TupleCodec and UdtCodec give wrong error message when parsing fails -- [improvement] JAVA-2435: Add automatic-module-names to the manifests -- [new feature] JAVA-2054: Add now_in_seconds to protocol v5 query messages -- [bug] JAVA-2711: Fix handling of UDT keys in the mapper -- [improvement] JAVA-2631: Add getIndex() shortcuts to TableMetadata -- [improvement] JAVA-2679: Add port information to QueryTrace and TraceEvent -- [improvement] JAVA-2184: Refactor DescribeIT to improve maintainability -- [new feature] JAVA-2600: Add map-backed config loader -- [new feature] JAVA-2105: Add support for transient replication -- [new feature] JAVA-2670: Provide base class for mapped custom codecs -- [new feature] JAVA-2633: Add execution profile argument to DAO mapper factory methods -- [improvement] JAVA-2667: Add ability to fail the build when integration tests fail -- [bug] JAVA-1861: Add Metadata.getClusterName() - -### 4.5.1 - -- [bug] JAVA-2673: Fix mapper generated code for UPDATE with TTL and IF condition - -### 4.5.0 - -- [bug] JAVA-2654: Make AdminRequestHandler handle integer serialization -- [improvement] JAVA-2618: Improve error handling in request handlers -- [new feature] JAVA-2064: Add support for DSE 6.8 graph options in schema builder -- [documentation] JAVA-2559: Fix GraphNode javadocs -- [improvement] JAVA-2281: Extend GraphBinaryDataTypesTest to other graph protocols -- [new feature] JAVA-2498: Add support for reactive graph queries -- [bug] JAVA-2572: Prevent race conditions when cancelling a continuous paging query -- [improvement] JAVA-2566: Introduce specific metrics for Graph queries -- [improvement] JAVA-2556: Make ExecutionInfo compatible with any Request type -- [improvement] JAVA-2571: Revisit usages of DseGraph.g -- [improvement] JAVA-2558: Revisit GraphRequestHandler -- [bug] JAVA-2508: Preserve backward compatibility in schema metadata types -- [bug] JAVA-2465: Avoid requesting 0 page when executing continuous paging queries -- [improvement] JAVA-2472: Enable speculative executions for paged graph queries -- [improvement] JAVA-1579: Change default result format to latest GraphSON format -- [improvement] JAVA-2496: Revisit timeouts for paged graph queries -- [bug] JAVA-2510: Fix GraphBinaryDataTypesTest Codec registry initialization -- [bug] JAVA-2492: Parse edge metadata using internal identifiers -- [improvement] JAVA-2282: Remove GraphSON3 support -- [new feature] JAVA-2098: Add filter predicates for collections -- [improvement] JAVA-2245: Rename graph engine Legacy to Classic and Modern to Core -- [new feature] JAVA-2099: Enable Paging Through DSE Driver for Gremlin Traversals (2.x) -- [new feature] JAVA-1898: Expose new table-level graph metadata -- [bug] JAVA-2642: Fix default value of max-orphan-requests -- [bug] JAVA-2644: Revisit channel selection when pool size > 1 -- [bug] JAVA-2630: Correctly handle custom classes in IndexMetadata.describe -- [improvement] JAVA-1556: Publish Maven Bill Of Materials POM -- [improvement] JAVA-2637: Bump Netty to 4.1.45 -- [bug] JAVA-2617: Reinstate generation of deps.txt for Insights -- [new feature] JAVA-2625: Provide user-friendly programmatic configuration for kerberos -- [improvement] JAVA-2624: Expose a config option for the connect timeout -- [improvement] JAVA-2592: Make reload support parameterizable for DefaultDriverConfigLoader -- [new feature] JAVA-2263: Add optional schema validation to the mapper - -### 4.4.0 - -This version brings in all functionality that was formerly only in the DataStax Enterprise driver, -such as the built-in support for reactive programming. Going forward, all new features will be -implemented in this single driver (for past DataStax Enterprise driver versions before the merge, -refer to the [DSE driver -changelog](https://docs.datastax.com/en/developer/java-driver-dse/latest/changelog/)). - -- [documentation] JAVA-2607: Improve visibility of driver dependencies section -- [documentation] JAVA-1975: Document the importance of using specific TinkerPop version -- [improvement] JAVA-2529: Standardize optional/excludable dependency checks -- [bug] JAVA-2598: Do not use context class loader when attempting to load classes -- [improvement] JAVA-2582: Don't propagate a future into SchemaQueriesFactory -- [documentation] JAVA-2542: Improve the javadocs of methods in CqlSession -- [documentation] JAVA-2609: Add docs for proxy authentication to unified driver -- [improvement] JAVA-2554: Improve efficiency of InsightsClient by improving supportsInsights check -- [improvement] JAVA-2601: Inject Google Tag Manager scripts in generated API documentation -- [improvement] JAVA-2551: Improve support for DETERMINISTIC and MONOTONIC functions -- [documentation] JAVA-2446: Revisit continuous paging javadocs -- [improvement] JAVA-2550: Remove warnings in ContinuousCqlRequestHandler when coordinator is not replica -- [improvement] JAVA-2569: Make driver compatible with Netty < 4.1.34 again -- [improvement] JAVA-2541: Improve error messages during connection initialization -- [improvement] JAVA-2530: Expose shortcuts for name-based UUIDs -- [improvement] JAVA-2547: Add method DriverConfigLoader.fromPath -- [improvement] JAVA-2528: Store suppressed exceptions in AllNodesFailedException -- [new feature] JAVA-2581: Add query builder support for indexed list assignments -- [improvement] JAVA-2596: Consider collection removals as idempotent in query builder -- [bug] JAVA-2555: Generate append/prepend constructs compatible with legacy C* versions -- [bug] JAVA-2584: Ensure codec registry is able to create codecs for collections of UDTs and tuples -- [bug] JAVA-2583: IS NOT NULL clause should be idempotent -- [improvement] JAVA-2442: Don't check for schema agreement twice when completing a DDL query -- [improvement] JAVA-2473: Don't reconnect control connection if protocol is downgraded -- [bug] JAVA-2556: Make ExecutionInfo compatible with any Request type -- [new feature] JAVA-2532: Add BoundStatement ReturnType for insert, update, and delete DAO methods -- [improvement] JAVA-2107: Add XML formatting plugin -- [bug] JAVA-2527: Allow AllNodesFailedException to accept more than one error per node -- [improvement] JAVA-2546: Abort schema refresh if a query fails - -### 4.3.1 - -- [bug] JAVA-2557: Accept any negative length when decoding elements of tuples and UDTs - -### 4.3.0 - -- [improvement] JAVA-2497: Ensure nodes and exceptions are serializable -- [bug] JAVA-2464: Fix initial schema refresh when reconnect-on-init is enabled -- [improvement] JAVA-2516: Enable hostname validation with Cloud -- [documentation]: JAVA-2460: Document how to determine the local DC -- [improvement] JAVA-2476: Improve error message when codec registry inspects a collection with a - null element -- [documentation] JAVA-2509: Mention file-based approach for Cloud configuration in the manual -- [improvement] JAVA-2447: Mention programmatic local DC method in Default LBP error message -- [improvement] JAVA-2459: Improve extensibility of existing load balancing policies -- [documentation] JAVA-2428: Add developer docs -- [documentation] JAVA-2503: Migrate Cloud "getting started" page to driver manual -- [improvement] JAVA-2484: Add errors for cloud misconfiguration -- [improvement] JAVA-2490: Allow to read the secure bundle from an InputStream -- [new feature] JAVA-2478: Allow to provide the secure bundle via URL -- [new feature] JAVA-2356: Support for DataStax Cloud API -- [improvement] JAVA-2407: Improve handling of logback configuration files in IDEs -- [improvement] JAVA-2434: Add support for custom cipher suites and host name validation to ProgrammaticSslEngineFactory -- [improvement] JAVA-2480: Upgrade Jackson to 2.10.0 -- [documentation] JAVA-2505: Annotate Node.getHostId() as nullable -- [improvement] JAVA-1708: Support DSE "everywhere" replication strategy -- [improvement] JAVA-2471: Consider DSE version when parsing the schema -- [improvement] JAVA-2444: Add method setRoutingKey(ByteBuffer...) to StatementBuilder -- [improvement] JAVA-2398: Improve support for optional dependencies in OSGi -- [improvement] JAVA-2452: Allow "none" as a compression option -- [improvement] JAVA-2419: Allow registration of user codecs at runtime -- [documentation] JAVA-2384: Add quick overview section to each manual page -- [documentation] JAVA-2412: Cover DDL query debouncing in FAQ and upgrade guide -- [documentation] JAVA-2416: Update paging section in the manual -- [improvement] JAVA-2402: Add setTracing(boolean) to StatementBuilder -- [bug] JAVA-2466: Set idempotence to null in BatchStatement.newInstance - -### 4.2.2 - -- [bug] JAVA-2475: Fix message size when query string contains Unicode surrogates -- [bug] JAVA-2470: Fix Session.OSS_DRIVER_COORDINATES for shaded JAR - -### 4.2.1 - -- [bug] JAVA-2454: Handle "empty" CQL type while parsing schema -- [improvement] JAVA-2455: Improve logging of schema refresh errors -- [documentation] JAVA-2429: Document expected types on DefaultDriverOption -- [documentation] JAVA-2426: Fix month pattern in CqlDuration documentation -- [bug] JAVA-2451: Make zero a valid estimated size for PagingIterableSpliterator -- [bug] JAVA-2443: Compute prepared statement PK indices for protocol v3 -- [bug] JAVA-2430: Use variable metadata to infer the routing keyspace on bound statements - -### 4.2.0 - -- [improvement] JAVA-2390: Add methods to set the SSL engine factory programmatically -- [improvement] JAVA-2379: Fail fast if prepared id doesn't match when repreparing on the fly -- [bug] JAVA-2375: Use per-request keyspace when repreparing on the fly -- [improvement] JAVA-2370: Remove auto-service plugin from mapper processor -- [improvement] JAVA-2377: Add a config option to make driver threads daemon -- [improvement] JAVA-2371: Handle null elements in collections on the decode path -- [improvement] JAVA-2351: Add a driver example for the object mapper -- [bug] JAVA-2323: Handle restart of a node with same host_id but a different address -- [improvement] JAVA-2303: Ignore peer rows matching the control host's RPC address -- [improvement] JAVA-2236: Add methods to set the auth provider programmatically -- [improvement] JAVA-2369: Change mapper annotations retention to runtime -- [improvement] JAVA-2365: Redeclare default constants when an enum is abstracted behind an - interface -- [improvement] JAVA-2302: Better target mapper errors and warnings for inherited methods -- [improvement] JAVA-2336: Expose byte utility methods in the public API -- [improvement] JAVA-2338: Revisit toString() for data container types -- [bug] JAVA-2367: Fix column names in EntityHelper.updateByPrimaryKey -- [bug] JAVA-2358: Fix list of reserved CQL keywords -- [improvement] JAVA-2359: Allow default keyspace at the mapper level -- [improvement] JAVA-2306: Clear security tokens from memory immediately after use -- [improvement] JAVA-2320: Expose more attributes on mapper Select for individual query clauses -- [bug] JAVA-2332: Destroy connection pool when a node gets removed -- [bug] JAVA-2324: Add support for primitive shorts in mapper -- [bug] JAVA-2325: Allow "is" prefix for boolean getters in mapped entities -- [improvement] JAVA-2308: Add customWhereClause to `@Delete` -- [improvement] JAVA-2247: PagingIterable implementations should implement spliterator() -- [bug] JAVA-2312: Handle UDTs with names that clash with collection types -- [improvement] JAVA-2307: Improve `@Select` and `@Delete` by not requiring full primary key -- [improvement] JAVA-2315: Improve extensibility of session builder -- [bug] JAVA-2394: BaseCcmRule DseRequirement max should use DseVersion, not Cassandra version - -### 4.1.0 - -- [documentation] JAVA-2294: Fix wrong examples in manual page on batch statements -- [bug] JAVA-2304: Avoid direct calls to ByteBuffer.array() -- [new feature] JAVA-2078: Add object mapper -- [improvement] JAVA-2297: Add a NettyOptions method to set socket options -- [bug] JAVA-2280: Ignore peer rows with missing host id or RPC address -- [bug] JAVA-2264: Adjust HashedWheelTimer tick duration from 1 to 100 ms -- [bug] JAVA-2260: Handle empty collections in PreparedStatement.bind(...) -- [improvement] JAVA-2278: Pass the request's log prefix to RequestTracker -- [bug] JAVA-2253: Don't strip trailing zeros in ByteOrderedToken -- [improvement] JAVA-2207: Add bulk value assignment to QueryBuilder Insert -- [bug] JAVA-2234: Handle terminated executor when the session is closed twice -- [documentation] JAVA-2220: Emphasize that query builder is now a separate artifact in root README -- [documentation] JAVA-2217: Cover contact points and local datacenter earlier in the manual -- [improvement] JAVA-2242: Allow skipping all integration tests with -DskipITs -- [improvement] JAVA-2241: Make DefaultDriverContext.cycleDetector protected -- [bug] JAVA-2226: Support IPv6 contact points in the configuration - -### 4.0.1 - -- [new feature] JAVA-2201: Expose a public API for programmatic config -- [new feature] JAVA-2205: Expose public factory methods for alternative config loaders -- [bug] JAVA-2214: Fix flaky RequestLoggerIT test -- [bug] JAVA-2203: Handle unresolved addresses in DefaultEndPoint -- [bug] JAVA-2210: Add ability to set TTL for modification queries -- [improvement] JAVA-2212: Add truncate to QueryBuilder -- [improvement] JAVA-2211: Upgrade Jersey examples to fix security issue sid-3606 -- [bug] JAVA-2193: Fix flaky tests in ExecutionInfoWarningsIT -- [improvement] JAVA-2197: Skip deployment of examples and integration tests to Maven central - -### 4.0.0 - -- [improvement] JAVA-2192: Don't return generic types with wildcards -- [improvement] JAVA-2148: Add examples -- [bug] JAVA-2189: Exclude virtual keyspaces from token map computation -- [improvement] JAVA-2183: Enable materialized views when testing against Cassandra 4 -- [improvement] JAVA-2182: Add insertInto().json() variant that takes an object in QueryBuilder -- [improvement] JAVA-2161: Annotate mutating methods with `@CheckReturnValue` -- [bug] JAVA-2177: Don't exclude down nodes when initializing LBPs -- [improvement] JAVA-2143: Rename Statement.setTimestamp() to setQueryTimestamp() -- [improvement] JAVA-2165: Abstract node connection information -- [improvement] JAVA-2090: Add support for additional_write_policy and read_repair table options -- [improvement] JAVA-2164: Rename statement builder methods to setXxx -- [bug] JAVA-2178: QueryBuilder: Alias after function column is not included in a query -- [improvement] JAVA-2158: Allow BuildableQuery to build statement with values -- [improvement] JAVA-2150: Improve query builder error message on unsupported literal type -- [documentation] JAVA-2149: Improve Term javadocs in the query builder - -### 4.0.0-rc1 - -- [improvement] JAVA-2106: Log server side warnings returned from a query -- [improvement] JAVA-2151: Drop "Dsl" suffix from query builder main classes -- [new feature] JAVA-2144: Expose internal API to hook into the session lifecycle -- [improvement] JAVA-2119: Add PagingIterable abstraction as a supertype of ResultSet -- [bug] JAVA-2063: Normalize authentication logging -- [documentation] JAVA-2034: Add performance recommendations in the manual -- [improvement] JAVA-2077: Allow reconnection policy to detect first connection attempt -- [improvement] JAVA-2067: Publish javadocs JAR for the shaded module -- [improvement] JAVA-2103: Expose partitioner name in TokenMap API -- [documentation] JAVA-2075: Document preference for LZ4 over Snappy - -### 4.0.0-beta3 - -- [bug] JAVA-2066: Array index range error when fetching routing keys on bound statements -- [documentation] JAVA-2061: Add section to upgrade guide about updated type mappings -- [improvement] JAVA-2038: Add jitter to delays between reconnection attempts -- [improvement] JAVA-2053: Cache results of session.prepare() -- [improvement] JAVA-2058: Make programmatic config reloading part of the public API -- [improvement] JAVA-1943: Fail fast in execute() when the session is closed -- [improvement] JAVA-2056: Reduce HashedWheelTimer tick duration -- [bug] JAVA-2057: Do not create pool when SUGGEST\_UP topology event received -- [improvement] JAVA-2049: Add shorthand method to SessionBuilder to specify local DC -- [bug] JAVA-2037: Fix NPE when preparing statement with no bound variables -- [improvement] JAVA-2014: Schedule timeouts on a separate Timer -- [bug] JAVA-2029: Handle schema refresh failure after a DDL query -- [bug] JAVA-1947: Make schema parsing more lenient and allow missing system_virtual_schema -- [bug] JAVA-2028: Use CQL form when parsing UDT types in system tables -- [improvement] JAVA-1918: Document temporal types -- [improvement] JAVA-1914: Optimize use of System.nanoTime in CqlRequestHandlerBase -- [improvement] JAVA-1945: Document corner cases around UDT and tuple attachment -- [improvement] JAVA-2026: Make CqlDuration implement TemporalAmount -- [improvement] JAVA-2017: Slightly optimize conversion methods on the hot path -- [improvement] JAVA-2010: Make dependencies to annotations required again -- [improvement] JAVA-1978: Add a config option to keep contact points unresolved -- [bug] JAVA-2000: Fix ConcurrentModificationException during channel shutdown -- [improvement] JAVA-2002: Reimplement TypeCodec.accepts to improve performance -- [improvement] JAVA-2011: Re-add ResultSet.getAvailableWithoutFetching() and isFullyFetched() -- [improvement] JAVA-2007: Make driver threads extend FastThreadLocalThread -- [bug] JAVA-2001: Handle zero timeout in admin requests - -### 4.0.0-beta2 - -- [new feature] JAVA-1919: Provide a timestamp <=> ZonedDateTime codec -- [improvement] JAVA-1989: Add BatchStatement.newInstance(BatchType, Iterable) -- [improvement] JAVA-1988: Remove pre-fetching from ResultSet API -- [bug] JAVA-1948: Close session properly when LBP fails to initialize -- [improvement] JAVA-1949: Improve error message when contact points are wrong -- [improvement] JAVA-1956: Add statementsCount accessor to BatchStatementBuilder -- [bug] JAVA-1946: Ignore protocol version in equals comparison for UdtValue/TupleValue -- [new feature] JAVA-1932: Send Driver Name and Version in Startup message -- [new feature] JAVA-1917: Add ability to set node on statement -- [improvement] JAVA-1916: Base TimestampCodec.parse on java.util.Date. -- [improvement] JAVA-1940: Clean up test resources when CCM integration tests finish -- [bug] JAVA-1938: Make CassandraSchemaQueries classes public -- [improvement] JAVA-1925: Rename context getters -- [improvement] JAVA-1544: Check API compatibility with Revapi -- [new feature] JAVA-1900: Add support for virtual tables - -### 4.0.0-beta1 - -- [new feature] JAVA-1869: Add DefaultDriverConfigLoaderBuilder -- [improvement] JAVA-1913: Expose additional counters on Node -- [improvement] JAVA-1880: Rename "config profile" to "execution profile" -- [improvement] JAVA-1889: Upgrade dependencies to the latest minor versions -- [improvement] JAVA-1819: Propagate more attributes to bound statements -- [improvement] JAVA-1897: Improve extensibility of schema metadata classes -- [improvement] JAVA-1437: Enable SSL hostname validation by default -- [improvement] JAVA-1879: Duplicate basic.request options as Request/Statement attributes -- [improvement] JAVA-1870: Use sensible defaults in RequestLogger if config options are missing -- [improvement] JAVA-1877: Use a separate reconnection schedule for the control connection -- [improvement] JAVA-1763: Generate a binary tarball as part of the build process -- [improvement] JAVA-1884: Add additional methods from TypeToken to GenericType -- [improvement] JAVA-1883: Use custom queue implementation for LBP's query plan -- [improvement] JAVA-1890: Add more configuration options to DefaultSslEngineFactory -- [bug] JAVA-1895: Rename PreparedStatement.getPrimaryKeyIndices to getPartitionKeyIndices -- [bug] JAVA-1891: Allow null items when setting values in bulk -- [improvement] JAVA-1767: Improve message when column not in result set -- [improvement] JAVA-1624: Expose ExecutionInfo on exceptions where applicable -- [improvement] JAVA-1766: Revisit nullability -- [new feature] JAVA-1860: Allow reconnection at startup if no contact point is available -- [improvement] JAVA-1866: Make all public policies implement AutoCloseable -- [new feature] JAVA-1762: Build alternate core artifact with Netty shaded -- [new feature] JAVA-1761: Add OSGi descriptors -- [bug] JAVA-1560: Correctly propagate policy initialization errors -- [improvement] JAVA-1865: Add RelationMetadata.getPrimaryKey() -- [improvement] JAVA-1862: Add ConsistencyLevel.isDcLocal and isSerial -- [improvement] JAVA-1858: Implement Serializable in implementations, not interfaces -- [improvement] JAVA-1830: Surface response frame size in ExecutionInfo -- [improvement] JAVA-1853: Add newValue(Object...) to TupleType and UserDefinedType -- [improvement] JAVA-1815: Reorganize configuration into basic/advanced categories -- [improvement] JAVA-1848: Add logs to DefaultRetryPolicy -- [new feature] JAVA-1832: Add Ec2MultiRegionAddressTranslator -- [improvement] JAVA-1825: Add remaining Typesafe config primitive types to DriverConfigProfile -- [new feature] JAVA-1846: Add ConstantReconnectionPolicy -- [improvement] JAVA-1824: Make policies overridable in profiles -- [bug] JAVA-1569: Allow null to be used in positional and named values in statements -- [new feature] JAVA-1592: Expose request's total Frame size through API -- [new feature] JAVA-1829: Add metrics for bytes-sent and bytes-received -- [improvement] JAVA-1755: Normalize usage of DEBUG/TRACE log levels -- [improvement] JAVA-1803: Log driver version on first use -- [improvement] JAVA-1792: Add AuthProvider callback to handle missing challenge from server -- [improvement] JAVA-1775: Assume default packages for built-in policies -- [improvement] JAVA-1774: Standardize policy locations -- [improvement] JAVA-1798: Allow passing the default LBP filter as a session builder argument -- [new feature] JAVA-1523: Add query logger -- [improvement] JAVA-1801: Revisit NodeStateListener and SchemaChangeListener APIs -- [improvement] JAVA-1759: Revisit metrics API -- [improvement] JAVA-1776: Use concurrency annotations -- [improvement] JAVA-1799: Use CqlIdentifier for simple statement named values -- [new feature] JAVA-1515: Add query builder -- [improvement] JAVA-1773: Make DriverConfigProfile enumerable -- [improvement] JAVA-1787: Use standalone shaded Guava artifact -- [improvement] JAVA-1769: Allocate exact buffer size for outgoing requests -- [documentation] JAVA-1780: Add manual section about case sensitivity -- [new feature] JAVA-1536: Add request throttling -- [improvement] JAVA-1772: Revisit multi-response callbacks -- [new feature] JAVA-1537: Add remaining socket options -- [bug] JAVA-1756: Propagate custom payload when preparing a statement -- [improvement] JAVA-1847: Add per-node request tracking - -### 4.0.0-alpha3 - -- [new feature] JAVA-1518: Expose metrics -- [improvement] JAVA-1739: Add host_id and schema_version to node metadata -- [improvement] JAVA-1738: Convert enums to allow extensibility -- [bug] JAVA-1727: Override DefaultUdtValue.equals -- [bug] JAVA-1729: Override DefaultTupleValue.equals -- [improvement] JAVA-1720: Merge Cluster and Session into a single interface -- [improvement] JAVA-1713: Use less nodes in DefaultLoadBalancingPolicyIT -- [improvement] JAVA-1707: Add test infrastructure for running DSE clusters with CCM -- [bug] JAVA-1715: Propagate unchecked exceptions to CompletableFuture in SyncAuthenticator methods -- [improvement] JAVA-1714: Make replication strategies pluggable -- [new feature] JAVA-1647: Handle metadata_changed flag in protocol v5 -- [new feature] JAVA-1633: Handle per-request keyspace in protocol v5 -- [improvement] JAVA-1678: Warn if auth is configured on the client but not the server -- [improvement] JAVA-1673: Remove schema agreement check when repreparing on up -- [new feature] JAVA-1526: Provide a single load balancing policy implementation -- [improvement] JAVA-1680: Improve error message on batch log write timeout -- [improvement] JAVA-1675: Remove dates from copyright headers -- [improvement] JAVA-1645: Don't log stack traces at WARN level -- [new feature] JAVA-1524: Add query trace API -- [improvement] JAVA-1646: Provide a more readable error when connecting to Cassandra 2.0 or lower -- [improvement] JAVA-1662: Raise default request timeout -- [improvement] JAVA-1566: Enforce API rules automatically -- [bug] JAVA-1584: Validate that no bound values are unset in protocol v3 - -### 4.0.0-alpha2 - -- [new feature] JAVA-1525: Handle token metadata -- [new feature] JAVA-1638: Check schema agreement -- [new feature] JAVA-1494: Implement Snappy and LZ4 compression -- [new feature] JAVA-1514: Port Uuids utility class -- [new feature] JAVA-1520: Add node state listeners -- [new feature] JAVA-1493: Handle schema metadata -- [improvement] JAVA-1605: Refactor request execution model -- [improvement] JAVA-1597: Fix raw usages of Statement -- [improvement] JAVA-1542: Enable JaCoCo code coverage -- [improvement] JAVA-1295: Auto-detect best protocol version in mixed cluster -- [bug] JAVA-1565: Mark node down when it loses its last connection and was already reconnecting -- [bug] JAVA-1594: Don't create pool if node comes back up but is ignored -- [bug] JAVA-1593: Reconnect control connection if current node is removed, forced down or ignored -- [bug] JAVA-1595: Don't use system.local.rpc_address when refreshing node list -- [bug] JAVA-1568: Handle Reconnection#reconnectNow/stop while the current attempt is still in - progress -- [improvement] JAVA-1585: Add GenericType#where -- [improvement] JAVA-1590: Properly skip deployment of integration-tests module -- [improvement] JAVA-1576: Expose AsyncResultSet's iterator through a currentPage() method -- [improvement] JAVA-1591: Add programmatic way to get driver version - -### 4.0.0-alpha1 - -- [improvement] JAVA-1586: Throw underlying exception when codec not found in cache -- [bug] JAVA-1583: Handle write failure in ChannelHandlerRequest -- [improvement] JAVA-1541: Reorganize configuration -- [improvement] JAVA-1577: Set default consistency level to LOCAL_ONE -- [bug] JAVA-1548: Retry idempotent statements on READ_TIMEOUT and UNAVAILABLE -- [bug] JAVA-1562: Fix various issues around heart beats -- [improvement] JAVA-1546: Make all statement implementations immutable -- [bug] JAVA-1554: Include VIEW and CDC in WriteType -- [improvement] JAVA-1498: Add a cache above Typesafe config -- [bug] JAVA-1547: Abort pending requests when connection dropped -- [new feature] JAVA-1497: Port timestamp generators from 3.x -- [improvement] JAVA-1539: Configure for deployment to Maven central -- [new feature] JAVA-1519: Close channel if number of orphan stream ids exceeds a configurable - threshold -- [new feature] JAVA-1529: Make configuration reloadable -- [new feature] JAVA-1502: Reprepare statements on newly added/up nodes -- [new feature] JAVA-1530: Add ResultSet.wasApplied -- [improvement] JAVA-1531: Merge CqlSession and Session -- [new feature] JAVA-1513: Handle batch statements -- [improvement] JAVA-1496: Improve log messages -- [new feature] JAVA-1501: Reprepare on the fly when we get an UNPREPARED response -- [bug] JAVA-1499: Wait for load balancing policy at cluster initialization -- [new feature] JAVA-1495: Add prepared statements - -## 3.11.5 -- [improvement] JAVA-3114: Shade io.dropwizard.metrics:metrics-core in shaded driver -- [improvement] JAVA-3115: SchemaChangeListener#onKeyspaceChanged can fire when keyspace has not changed if using SimpleStrategy replication - -## 3.11.4 -- [improvement] JAVA-3079: Upgrade Netty to 4.1.94, 3.x edition -- [improvement] JAVA-3082: Fix maven build for Apple-silicon -- [improvement] PR 1671: Fix LatencyAwarePolicy scale docstring - -## 3.11.3 - -- [improvement] JAVA-3023: Upgrade Netty to 4.1.77, 3.x edition - -## 3.11.2 - -- [improvement] JAVA-3008: Upgrade Netty to 4.1.75, 3.x edition -- [improvement] JAVA-2984: Upgrade Jackson to resolve high-priority CVEs - -## 3.11.1 - -- [bug] JAVA-2967: Support native transport peer information for DSE 6.8. -- [bug] JAVA-2976: Support missing protocol v5 error codes CAS_WRITE_UNKNOWN, CDC_WRITE_FAILURE. - -## 3.11.0 - -- [improvement] JAVA-2705: Remove protocol v5 beta status, add v6-beta. -- [bug] JAVA-2923: Detect and use Guava's new HostAndPort.getHost method. -- [bug] JAVA-2922: Switch to modern framing format inside a channel handler. -- [bug] JAVA-2924: Consider protocol version unsupported when server requires USE_BETA flag for it. - -### 3.10.2 - -- [bug] JAVA-2860: Avoid NPE if channel initialization crashes. - -### 3.10.1 - -- [bug] JAVA-2857: Fix NPE when built statements without parameters are logged at TRACE level. -- [bug] JAVA-2843: Successfully parse DSE table schema in OSS driver. - -### 3.10.0 - -- [improvement] JAVA-2676: Don't reschedule flusher after empty runs -- [new feature] JAVA-2772: Support new protocol v5 message format - -### 3.9.0 - -- [bug] JAVA-2627: Avoid logging error message including stack trace in request handler. -- [new feature] JAVA-2706: Add now_in_seconds to protocol v5 query messages. -- [improvement] JAVA-2730: Add support for Cassandra® 4.0 table options -- [improvement] JAVA-2702: Transient Replication Support for Cassandra® 4.0 - -### 3.8.0 - -- [new feature] JAVA-2356: Support for DataStax Cloud API. -- [improvement] JAVA-2483: Allow to provide secure bundle via URL. -- [improvement] JAVA-2499: Allow to read the secure bundle from an InputStream. -- [improvement] JAVA-2457: Detect CaaS and change default consistency. -- [improvement] JAVA-2485: Add errors for Cloud misconfiguration. -- [documentation] JAVA-2504: Migrate Cloud "getting started" page to driver manual. -- [improvement] JAVA-2516: Enable hostname validation with Cloud -- [bug] JAVA-2515: NEW_NODE and REMOVED_NODE events should trigger ADDED and REMOVED. - - -### 3.7.2 - -- [bug] JAVA-2249: Stop stripping trailing zeros in ByteOrderedTokens. -- [bug] JAVA-1492: Don't immediately reuse busy connections for another request. -- [bug] JAVA-2198: Handle UDTs with names that clash with collection types. -- [bug] JAVA-2204: Avoid memory leak when client holds onto a stale TableMetadata instance. - - -### 3.7.1 - -- [bug] JAVA-2174: Metadata.needsQuote should accept empty strings. -- [bug] JAVA-2193: Fix flaky tests in WarningsTest. - - -### 3.7.0 - -- [improvement] JAVA-2025: Include exception message in Abstract\*Codec.accepts(null). -- [improvement] JAVA-1980: Use covariant return types in RemoteEndpointAwareJdkSSLOptions.Builder methods. -- [documentation] JAVA-2062: Document frozen collection preference with Mapper. -- [bug] JAVA-2071: Fix NPE in ArrayBackedRow.toString(). -- [bug] JAVA-2070: Call onRemove instead of onDown when rack and/or DC information changes for a host. -- [improvement] JAVA-1256: Log parameters of BuiltStatement in QueryLogger. -- [documentation] JAVA-2074: Document preference for LZ4 over Snappy. -- [bug] JAVA-1612: Include netty-common jar in binary tarball. -- [improvement] JAVA-2003: Simplify CBUtil internal API to improve performance. -- [improvement] JAVA-2002: Reimplement TypeCodec.accepts to improve performance. -- [documentation] JAVA-2041: Deprecate cross-DC failover in DCAwareRoundRobinPolicy. -- [documentation] JAVA-1159: Document workaround for using tuple with udt field in Mapper. -- [documentation] JAVA-1964: Complete remaining "Coming Soon" sections in docs. -- [improvement] JAVA-1950: Log server side warnings returned from a query. -- [improvement] JAVA-2123: Allow to use QueryBuilder for building queries against Materialized Views. -- [bug] JAVA-2082: Avoid race condition during cluster close and schema refresh. - - -### 3.6.0 - -- [improvement] JAVA-1394: Add request-queue-depth metric. -- [improvement] JAVA-1857: Add Statement.setHost. -- [bug] JAVA-1920: Use nanosecond precision in LocalTimeCodec#format(). -- [bug] JAVA-1794: Driver tries to create a connection array of size -1. -- [new feature] JAVA-1899: Support virtual tables. -- [bug] JAVA-1908: TableMetadata.asCQLQuery does not add table option 'memtable_flush_period_in_ms' in the generated query. -- [bug] JAVA-1924: StatementWrapper setters should return the wrapping statement. -- [new feature] JAVA-1532: Add Codec support for Java 8's LocalDateTime and ZoneId. -- [improvement] JAVA-1786: Use Google code formatter. -- [bug] JAVA-1871: Change LOCAL\_SERIAL.isDCLocal() to return true. -- [documentation] JAVA-1902: Clarify unavailable & request error in DefaultRetryPolicy javadoc. -- [new feature] JAVA-1903: Add WhiteListPolicy.ofHosts. -- [bug] JAVA-1928: Fix GuavaCompatibility for Guava 26. -- [bug] JAVA-1935: Add null check in QueryConsistencyException.getHost. -- [improvement] JAVA-1771: Send driver name and version in STARTUP message. -- [improvement] JAVA-1388: Add dynamic port discovery for system.peers\_v2. -- [documentation] JAVA-1810: Note which setters are not propagated to PreparedStatement. -- [bug] JAVA-1944: Surface Read and WriteFailureException to RetryPolicy. -- [bug] JAVA-1211: Fix NPE in cluster close when cluster init fails. -- [bug] JAVA-1220: Fail fast on cluster init if previous init failed. -- [bug] JAVA-1929: Preempt session execute queries if session was closed. - -Merged from 3.5.x: - -- [bug] JAVA-1872: Retain table's views when processing table update. - - -### 3.5.0 - -- [improvement] JAVA-1448: TokenAwarePolicy should respect child policy ordering. -- [bug] JAVA-1751: Include defaultTimestamp length in encodedSize for protocol version >= 3. -- [bug] JAVA-1770: Fix message size when using Custom Payload. -- [documentation] JAVA-1760: Add metrics documentation. -- [improvement] JAVA-1765: Update dependencies to latest patch versions. -- [improvement] JAVA-1752: Deprecate DowngradingConsistencyRetryPolicy. -- [improvement] JAVA-1735: Log driver version on first use. -- [documentation] JAVA-1380: Add FAQ entry for errors arising from incompatibilities. -- [improvement] JAVA-1748: Support IS NOT NULL and != in query builder. -- [documentation] JAVA-1740: Mention C*2.2/3.0 incompatibilities in paging state manual. -- [improvement] JAVA-1725: Add a getNodeCount method to CCMAccess for easier automation. -- [new feature] JAVA-708: Add means to measure request sizes. -- [documentation] JAVA-1788: Add example for enabling host name verification to SSL docs. -- [improvement] JAVA-1791: Revert "JAVA-1677: Warn if auth is configured on the client but not the server." -- [bug] JAVA-1789: Account for flags in Prepare encodedSize. -- [bug] JAVA-1797: Use jnr-ffi version required by jnr-posix. - - -### 3.4.0 - -- [improvement] JAVA-1671: Remove unnecessary test on prepared statement metadata. -- [bug] JAVA-1694: Upgrade to jackson-databind 2.7.9.2 to address CVE-2015-15095. -- [documentation] JAVA-1685: Clarify recommendation on preparing SELECT *. -- [improvement] JAVA-1679: Improve error message on batch log write timeout. -- [improvement] JAVA-1672: Remove schema agreement check when repreparing on up. -- [improvement] JAVA-1677: Warn if auth is configured on the client but not the server. -- [new feature] JAVA-1651: Add NO_COMPACT startup option. -- [improvement] JAVA-1683: Add metrics to track writes to nodes. -- [new feature] JAVA-1229: Allow specifying the keyspace for individual queries. -- [improvement] JAVA-1682: Provide a way to record latencies for cancelled speculative executions. -- [improvement] JAVA-1717: Add metrics to latency-aware policy. -- [improvement] JAVA-1675: Remove dates from copyright headers. - -Merged from 3.3.x: - -- [bug] JAVA-1555: Include VIEW and CDC in WriteType. -- [bug] JAVA-1599: exportAsString improvements (sort, format, clustering order) -- [improvement] JAVA-1587: Deterministic ordering of columns used in Mapper#saveQuery -- [improvement] JAVA-1500: Add a metric to report number of in-flight requests. -- [bug] JAVA-1438: QueryBuilder check for empty orderings. -- [improvement] JAVA-1490: Allow zero delay for speculative executions. -- [documentation] JAVA-1607: Add FAQ entry for netty-transport-native-epoll. -- [bug] JAVA-1630: Fix Metadata.addIfAbsent. -- [improvement] JAVA-1619: Update QueryBuilder methods to support Iterable input. -- [improvement] JAVA-1527: Expose host_id and schema_version on Host metadata. -- [new feature] JAVA-1377: Add support for TWCS in SchemaBuilder. -- [improvement] JAVA-1631: Publish a sources jar for driver-core-tests. -- [improvement] JAVA-1632: Add a withIpPrefix(String) method to CCMBridge.Builder. -- [bug] JAVA-1639: VersionNumber does not fullfill equals/hashcode contract. -- [bug] JAVA-1613: Fix broken shaded Netty detection in NettyUtil. -- [bug] JAVA-1666: Fix keyspace export when a UDT has case-sensitive field names. -- [improvement] JAVA-1196: Include hash of result set metadata in prepared statement id. -- [improvement] JAVA-1670: Support user-provided JMX ports for CCMBridge. -- [improvement] JAVA-1661: Avoid String.toLowerCase if possible in Metadata. -- [improvement] JAVA-1659: Expose low-level flusher tuning options. -- [improvement] JAVA-1660: Support netty-transport-native-epoll in OSGi container. - - -### 3.3.2 - -- [bug] JAVA-1666: Fix keyspace export when a UDT has case-sensitive field names. -- [improvement] JAVA-1196: Include hash of result set metadata in prepared statement id. -- [improvement] JAVA-1670: Support user-provided JMX ports for CCMBridge. -- [improvement] JAVA-1661: Avoid String.toLowerCase if possible in Metadata. -- [improvement] JAVA-1659: Expose low-level flusher tuning options. -- [improvement] JAVA-1660: Support netty-transport-native-epoll in OSGi container. - - -### 3.3.1 - -- [bug] JAVA-1555: Include VIEW and CDC in WriteType. -- [bug] JAVA-1599: exportAsString improvements (sort, format, clustering order) -- [improvement] JAVA-1587: Deterministic ordering of columns used in Mapper#saveQuery -- [improvement] JAVA-1500: Add a metric to report number of in-flight requests. -- [bug] JAVA-1438: QueryBuilder check for empty orderings. -- [improvement] JAVA-1490: Allow zero delay for speculative executions. -- [documentation] JAVA-1607: Add FAQ entry for netty-transport-native-epoll. -- [bug] JAVA-1630: Fix Metadata.addIfAbsent. -- [improvement] JAVA-1619: Update QueryBuilder methods to support Iterable input. -- [improvement] JAVA-1527: Expose host_id and schema_version on Host metadata. -- [new feature] JAVA-1377: Add support for TWCS in SchemaBuilder. -- [improvement] JAVA-1631: Publish a sources jar for driver-core-tests. -- [improvement] JAVA-1632: Add a withIpPrefix(String) method to CCMBridge.Builder. -- [bug] JAVA-1639: VersionNumber does not fullfill equals/hashcode contract. -- [bug] JAVA-1613: Fix broken shaded Netty detection in NettyUtil. - - -### 3.3.0 - -- [bug] JAVA-1469: Update LoggingRetryPolicy to deal with SLF4J-353. -- [improvement] JAVA-1203: Upgrade Metrics to allow usage in OSGi. -- [bug] JAVA-1407: KeyspaceMetadata exportAsString should export user types in topological sort order. -- [bug] JAVA-1455: Mapper support using unset for null values. -- [bug] JAVA-1464: Allow custom codecs with non public constructors in @Param. -- [bug] JAVA-1470: Querying multiple pages overrides WrappedStatement. -- [improvement] JAVA-1428: Upgrade logback and jackson dependencies. -- [documentation] JAVA-1463: Revisit speculative execution docs. -- [documentation] JAVA-1466: Revisit timestamp docs. -- [documentation] JAVA-1445: Clarify how nodes are penalized in LatencyAwarePolicy docs. -- [improvement] JAVA-1446: Support 'DEFAULT UNSET' in Query Builder JSON Insert. -- [improvement] JAVA-1443: Add groupBy method to Select statement. -- [improvement] JAVA-1458: Check thread in mapper sync methods. -- [improvement] JAVA-1488: Upgrade Netty to 4.0.47.Final. -- [improvement] JAVA-1460: Add speculative execution number to ExecutionInfo -- [improvement] JAVA-1431: Improve error handling during pool initialization. - - -### 3.2.0 - -- [new feature] JAVA-1347: Add support for duration type. -- [new feature] JAVA-1248: Implement "beta" flag for native protocol v5. -- [new feature] JAVA-1362: Send query options flags as [int] for Protocol V5+. -- [new feature] JAVA-1364: Enable creation of SSLHandler with remote address information. -- [improvement] JAVA-1367: Make protocol negotiation more resilient. -- [bug] JAVA-1397: Handle duration as native datatype in protocol v5+. -- [improvement] JAVA-1308: CodecRegistry performance improvements. -- [improvement] JAVA-1287: Add CDC to TableOptionsMetadata and Schema Builder. -- [improvement] JAVA-1392: Reduce lock contention in RPTokenFactory. -- [improvement] JAVA-1328: Provide compatibility with Guava 20. -- [improvement] JAVA-1247: Disable idempotence warnings. -- [improvement] JAVA-1286: Support setting and retrieving udt fields in QueryBuilder. -- [bug] JAVA-1415: Correctly report if a UDT column is frozen. -- [bug] JAVA-1418: Make Guava version detection more reliable. -- [new feature] JAVA-1174: Add ifNotExists option to mapper. -- [improvement] JAVA-1414: Optimize Metadata.escapeId and Metadata.handleId. -- [improvement] JAVA-1310: Make mapper's ignored properties configurable. -- [improvement] JAVA-1316: Add strategy for resolving properties into CQL names. -- [bug] JAVA-1424: Handle new WRITE_FAILURE and READ_FAILURE format in v5 protocol. - -Merged from 3.1.x branch: - -- [bug] JAVA-1371: Reintroduce connection pool timeout. -- [bug] JAVA-1313: Copy SerialConsistencyLevel to PreparedStatement. -- [documentation] JAVA-1334: Clarify documentation of method `addContactPoints`. -- [improvement] JAVA-1357: Document that getReplicas only returns replicas of the last token in range. -- [bug] JAVA-1404: Fix min token handling in TokenRange.contains. -- [bug] JAVA-1429: Prevent heartbeats until connection is fully initialized. - - -### 3.1.4 - -Merged from 3.0.x branch: - -- [bug] JAVA-1371: Reintroduce connection pool timeout. -- [bug] JAVA-1313: Copy SerialConsistencyLevel to PreparedStatement. -- [documentation] JAVA-1334: Clarify documentation of method `addContactPoints`. -- [improvement] JAVA-1357: Document that getReplicas only returns replicas of the last token in range. - - -### 3.1.3 - -Merged from 3.0.x branch: - -- [bug] JAVA-1330: Add un/register for SchemaChangeListener in DelegatingCluster -- [bug] JAVA-1351: Include Custom Payload in Request.copy. -- [bug] JAVA-1346: Reset heartbeat only on client reads (not writes). -- [improvement] JAVA-866: Support tuple notation in QueryBuilder.eq/in. - - -### 3.1.2 - -- [bug] JAVA-1321: Wrong OSGi dependency version for Guava. - -Merged from 3.0.x branch: - -- [bug] JAVA-1312: QueryBuilder modifies selected columns when manually selected. -- [improvement] JAVA-1303: Add missing BoundStatement.setRoutingKey(ByteBuffer...) -- [improvement] JAVA-262: Make internal executors customizable - - -### 3.1.1 - -- [bug] JAVA-1284: ClockFactory should check system property before attempting to load Native class. -- [bug] JAVA-1255: Allow nested UDTs to be used in Mapper. -- [bug] JAVA-1279: Mapper should exclude Groovy's "metaClass" property when looking for mapped properties - -Merged from 3.0.x branch: - -- [improvement] JAVA-1246: Driver swallows the real exception in a few cases -- [improvement] JAVA-1261: Throw error when attempting to page in I/O thread. -- [bug] JAVA-1258: Regression: Mapper cannot map a materialized view after JAVA-1126. -- [bug] JAVA-1101: Batch and BatchStatement should consider inner statements to determine query idempotence -- [improvement] JAVA-1262: Use ParseUtils for quoting & unquoting. -- [improvement] JAVA-1275: Use Netty's default thread factory -- [bug] JAVA-1285: QueryBuilder routing key auto-discovery should handle case-sensitive column names. -- [bug] JAVA-1283: Don't cache failed query preparations in the mapper. -- [improvement] JAVA-1277: Expose AbstractSession.checkNotInEventLoop. -- [bug] JAVA-1272: BuiltStatement not able to print its query string if it contains mapped UDTs. -- [bug] JAVA-1292: 'Adjusted frame length' error breaks driver's ability to read data. -- [improvement] JAVA-1293: Make DecoderForStreamIdSize.MAX_FRAME_LENGTH configurable. -- [improvement] JAVA-1053: Add a metric for authentication errors -- [improvement] JAVA-1263: Eliminate unnecessary memory copies in FrameCompressor implementations. -- [improvement] JAVA-893: Make connection pool non-blocking - - -### 3.1.0 - -- [new feature] JAVA-1153: Add PER PARTITION LIMIT to Select QueryBuilder. -- [improvement] JAVA-743: Add JSON support to QueryBuilder. -- [improvement] JAVA-1233: Update HdrHistogram to 2.1.9. -- [improvement] JAVA-1233: Update Snappy to 1.1.2.6. -- [bug] JAVA-1161: Preserve full time zone info in ZonedDateTimeCodec and DateTimeCodec. -- [new feature] JAVA-1157: Allow asynchronous paging of Mapper Result. -- [improvement] JAVA-1212: Don't retry non-idempotent statements by default. -- [improvement] JAVA-1192: Make EventDebouncer settings updatable at runtime. -- [new feature] JAVA-541: Add polymorphism support to object mapper. -- [new feature] JAVA-636: Allow @Column annotations on getters/setters as well as fields. -- [new feature] JAVA-984: Allow non-void setters in object mapping. -- [new feature] JAVA-1055: Add ErrorAware load balancing policy. - -Merged from 3.0.x branch: - -- [bug] JAVA-1179: Request objects should be copied when executed. -- [improvement] JAVA-1182: Throw error when synchronous call made on I/O thread. -- [bug] JAVA-1184: Unwrap StatementWrappers when extracting column definitions. -- [bug] JAVA-1132: Executing bound statement with no variables results in exception with protocol v1. -- [improvement] JAVA-1040: SimpleStatement parameters support in QueryLogger. -- [improvement] JAVA-1151: Fail fast if HdrHistogram is not in the classpath. -- [improvement] JAVA-1154: Allow individual Statement to cancel the read timeout. -- [bug] JAVA-1074: Fix documentation around default timestamp generator. -- [improvement] JAVA-1109: Document SSLOptions changes in upgrade guide. -- [improvement] JAVA-1065: Add method to create token from partition key values. -- [improvement] JAVA-1136: Enable JDK signature check in module driver-extras. -- [improvement] JAVA-866: Support tuple notation in QueryBuilder.eq/in. -- [bug] JAVA-1140: Use same connection to check for schema agreement after a DDL query. -- [improvement] JAVA-1113: Support Cassandra 3.4 LIKE operator in QueryBuilder. -- [improvement] JAVA-1086: Support Cassandra 3.2 CAST function in QueryBuilder. -- [bug] JAVA-1095: Check protocol version for custom payload before sending the query. -- [improvement] JAVA-1133: Add OSGi headers to cassandra-driver-extras. -- [bug] JAVA-1137: Incorrect string returned by DataType.asFunctionParameterString() for collections and tuples. -- [bug] JAVA-1046: (Dynamic)CompositeTypes need to be parsed as string literal, not blob. -- [improvement] JAVA-1164: Clarify documentation on Host.listenAddress and broadcastAddress. -- [improvement] JAVA-1171: Add Host method to determine if DSE Graph is enabled. -- [improvement] JAVA-1069: Bootstrap driver-examples module. -- [documentation] JAVA-1150: Add example and FAQ entry about ByteBuffer/BLOB. -- [improvement] JAVA-1011: Expose PoolingOptions default values. -- [improvement] JAVA-630: Don't process DOWN events for nodes that have active connections. -- [improvement] JAVA-851: Improve UUIDs javadoc with regard to user-provided timestamps. -- [improvement] JAVA-979: Update javadoc for RegularStatement toString() and getQueryString() to indicate that consistency level and other parameters are not maintained in the query string. -- [bug] JAVA-1068: Unwrap StatementWrappers when hashing the paging state. -- [improvement] JAVA-1021: Improve error message when connect() is called with an invalid keyspace name. -- [improvement] JAVA-879: Mapper.map() accepts mapper-generated and user queries. -- [bug] JAVA-1100: Exception when connecting with shaded java driver in OSGI -- [bug] JAVA-1064: getTable create statement doesn't properly handle quotes in primary key. -- [bug] JAVA-1089: Set LWT made from BuiltStatements to non-idempotent. -- [improvement] JAVA-923: Position idempotent flag on object mapper queries. -- [bug] JAVA-1070: The Mapper should not prepare queries synchronously. -- [new feature] JAVA-982: Introduce new method ConsistencyLevel.isSerial(). -- [bug] JAVA-764: Retry with the normal consistency level (not the serial one) when a write times out on the Paxos phase. -- [improvement] JAVA-852: Ignore peers with null entries during discovery. -- [bug] JAVA-1005: DowngradingConsistencyRetryPolicy does not work with EACH_QUORUM when 1 DC is down. -- [bug] JAVA-1002: Avoid deadlock when re-preparing a statement on other hosts. -- [bug] JAVA-1072: Ensure defunct connections are properly evicted from the pool. -- [bug] JAVA-1152: Fix NPE at ControlConnection.refreshNodeListAndTokenMap(). - -Merged from 2.1 branch: - -- [improvement] JAVA-1038: Fetch node info by rpc_address if its broadcast_address is not in system.peers. -- [improvement] JAVA-888: Add cluster-wide percentile tracker. -- [improvement] JAVA-963: Automatically register PercentileTracker from components that use it. -- [new feature] JAVA-1019: SchemaBuilder support for CREATE/ALTER/DROP KEYSPACE. -- [bug] JAVA-727: Allow monotonic timestamp generators to drift in the future + use microsecond precision when possible. -- [improvement] JAVA-444: Add Java process information to UUIDs.makeNode() hash. - - -### 3.0.7 - -- [bug] JAVA-1371: Reintroduce connection pool timeout. -- [bug] JAVA-1313: Copy SerialConsistencyLevel to PreparedStatement. -- [documentation] JAVA-1334: Clarify documentation of method `addContactPoints`. -- [improvement] JAVA-1357: Document that getReplicas only returns replicas of the last token in range. - - -### 3.0.6 - -- [bug] JAVA-1330: Add un/register for SchemaChangeListener in DelegatingCluster -- [bug] JAVA-1351: Include Custom Payload in Request.copy. -- [bug] JAVA-1346: Reset heartbeat only on client reads (not writes). -- [improvement] JAVA-866: Support tuple notation in QueryBuilder.eq/in. - - -### 3.0.5 - -- [bug] JAVA-1312: QueryBuilder modifies selected columns when manually selected. -- [improvement] JAVA-1303: Add missing BoundStatement.setRoutingKey(ByteBuffer...) -- [improvement] JAVA-262: Make internal executors customizable -- [bug] JAVA-1320: prevent unnecessary task creation on empty pool - - -### 3.0.4 - -- [improvement] JAVA-1246: Driver swallows the real exception in a few cases -- [improvement] JAVA-1261: Throw error when attempting to page in I/O thread. -- [bug] JAVA-1258: Regression: Mapper cannot map a materialized view after JAVA-1126. -- [bug] JAVA-1101: Batch and BatchStatement should consider inner statements to determine query idempotence -- [improvement] JAVA-1262: Use ParseUtils for quoting & unquoting. -- [improvement] JAVA-1275: Use Netty's default thread factory -- [bug] JAVA-1285: QueryBuilder routing key auto-discovery should handle case-sensitive column names. -- [bug] JAVA-1283: Don't cache failed query preparations in the mapper. -- [improvement] JAVA-1277: Expose AbstractSession.checkNotInEventLoop. -- [bug] JAVA-1272: BuiltStatement not able to print its query string if it contains mapped UDTs. -- [bug] JAVA-1292: 'Adjusted frame length' error breaks driver's ability to read data. -- [improvement] JAVA-1293: Make DecoderForStreamIdSize.MAX_FRAME_LENGTH configurable. -- [improvement] JAVA-1053: Add a metric for authentication errors -- [improvement] JAVA-1263: Eliminate unnecessary memory copies in FrameCompressor implementations. -- [improvement] JAVA-893: Make connection pool non-blocking - - -### 3.0.3 - -- [improvement] JAVA-1147: Upgrade Netty to 4.0.37. -- [bug] JAVA-1213: Allow updates and inserts to BLOB column using read-only ByteBuffer. -- [bug] JAVA-1209: ProtocolOptions.getProtocolVersion() should return null instead of throwing NPE if Cluster has not - been init'd. -- [improvement] JAVA-1204: Update documentation to indicate tcnative version requirement. -- [bug] JAVA-1186: Fix duplicated hosts in DCAwarePolicy warn message. -- [bug] JAVA-1187: Fix warning message when local CL used with RoundRobinPolicy. -- [improvement] JAVA-1175: Warn if DCAwarePolicy configuration is inconsistent. -- [bug] JAVA-1139: ConnectionException.getMessage() throws NPE if address is null. -- [bug] JAVA-1202: Handle null rpc_address when checking schema agreement. -- [improvement] JAVA-1198: Document that BoundStatement is not thread-safe. -- [improvement] JAVA-1200: Upgrade LZ4 to 1.3.0. -- [bug] JAVA-1232: Fix NPE in IdempotenceAwareRetryPolicy.isIdempotent. -- [improvement] JAVA-1227: Document "SELECT *" issue with prepared statement. -- [bug] JAVA-1160: Fix NPE in VersionNumber.getPreReleaseLabels(). -- [improvement] JAVA-1126: Handle schema changes in Mapper. -- [bug] JAVA-1193: Refresh token and replica metadata synchronously when schema is altered. -- [bug] JAVA-1120: Skip schema refresh debouncer when checking for agreement as a result of schema change made by client. -- [improvement] JAVA-1242: Fix driver-core dependency in driver-stress -- [improvement] JAVA-1235: Move the query to the end of "re-preparing .." log message as a key value. - - -### 3.0.2 - -Merged from 2.1 branch: - -- [bug] JAVA-1179: Request objects should be copied when executed. -- [improvement] JAVA-1182: Throw error when synchronous call made on I/O thread. -- [bug] JAVA-1184: Unwrap StatementWrappers when extracting column definitions. - - -### 3.0.1 - -- [bug] JAVA-1132: Executing bound statement with no variables results in exception with protocol v1. -- [improvement] JAVA-1040: SimpleStatement parameters support in QueryLogger. -- [improvement] JAVA-1151: Fail fast if HdrHistogram is not in the classpath. -- [improvement] JAVA-1154: Allow individual Statement to cancel the read timeout. -- [bug] JAVA-1074: Fix documentation around default timestamp generator. -- [improvement] JAVA-1109: Document SSLOptions changes in upgrade guide. -- [improvement] JAVA-1065: Add method to create token from partition key values. -- [improvement] JAVA-1136: Enable JDK signature check in module driver-extras. -- [improvement] JAVA-866: Support tuple notation in QueryBuilder.eq/in. -- [bug] JAVA-1140: Use same connection to check for schema agreement after a DDL query. -- [improvement] JAVA-1113: Support Cassandra 3.4 LIKE operator in QueryBuilder. -- [improvement] JAVA-1086: Support Cassandra 3.2 CAST function in QueryBuilder. -- [bug] JAVA-1095: Check protocol version for custom payload before sending the query. -- [improvement] JAVA-1133: Add OSGi headers to cassandra-driver-extras. -- [bug] JAVA-1137: Incorrect string returned by DataType.asFunctionParameterString() for collections and tuples. -- [bug] JAVA-1046: (Dynamic)CompositeTypes need to be parsed as string literal, not blob. -- [improvement] JAVA-1164: Clarify documentation on Host.listenAddress and broadcastAddress. -- [improvement] JAVA-1171: Add Host method to determine if DSE Graph is enabled. -- [improvement] JAVA-1069: Bootstrap driver-examples module. -- [documentation] JAVA-1150: Add example and FAQ entry about ByteBuffer/BLOB. - -Merged from 2.1 branch: - -- [improvement] JAVA-1011: Expose PoolingOptions default values. -- [improvement] JAVA-630: Don't process DOWN events for nodes that have active connections. -- [improvement] JAVA-851: Improve UUIDs javadoc with regard to user-provided timestamps. -- [improvement] JAVA-979: Update javadoc for RegularStatement toString() and getQueryString() to indicate that consistency level and other parameters are not maintained in the query string. -- [bug] JAVA-1068: Unwrap StatementWrappers when hashing the paging state. -- [improvement] JAVA-1021: Improve error message when connect() is called with an invalid keyspace name. -- [improvement] JAVA-879: Mapper.map() accepts mapper-generated and user queries. -- [bug] JAVA-1100: Exception when connecting with shaded java driver in OSGI -- [bug] JAVA-1064: getTable create statement doesn't properly handle quotes in primary key. -- [bug] JAVA-1089: Set LWT made from BuiltStatements to non-idempotent. -- [improvement] JAVA-923: Position idempotent flag on object mapper queries. -- [bug] JAVA-1070: The Mapper should not prepare queries synchronously. -- [new feature] JAVA-982: Introduce new method ConsistencyLevel.isSerial(). -- [bug] JAVA-764: Retry with the normal consistency level (not the serial one) when a write times out on the Paxos phase. -- [improvement] JAVA-852: Ignore peers with null entries during discovery. -- [bug] JAVA-1005: DowngradingConsistencyRetryPolicy does not work with EACH_QUORUM when 1 DC is down. -- [bug] JAVA-1002: Avoid deadlock when re-preparing a statement on other hosts. -- [bug] JAVA-1072: Ensure defunct connections are properly evicted from the pool. -- [bug] JAVA-1152: Fix NPE at ControlConnection.refreshNodeListAndTokenMap(). - - -### 3.0.0 - -- [bug] JAVA-1034: fix metadata parser for collections of custom types. -- [improvement] JAVA-1035: Expose host broadcast_address and listen_address if available. -- [new feature] JAVA-1037: Allow named parameters in simple statements. -- [improvement] JAVA-1033: Allow per-statement read timeout. -- [improvement] JAVA-1042: Include DSE version and workload in Host data. - -Merged from 2.1 branch: - -- [improvement] JAVA-1030: Log token to replica map computation times. -- [bug] JAVA-1039: Minor bugs in Event Debouncer. - - -### 3.0.0-rc1 - -- [bug] JAVA-890: fix mapper for case-sensitive UDT. - - -### 3.0.0-beta1 - -- [bug] JAVA-993: Support for "custom" types after CASSANDRA-10365. -- [bug] JAVA-999: Handle unset parameters in QueryLogger. -- [bug] JAVA-998: SchemaChangeListener not invoked for Functions or Aggregates having UDT arguments. -- [bug] JAVA-1009: use CL ONE to compute query plan when reconnecting - control connection. -- [improvement] JAVA-1003: Change default consistency level to LOCAL_ONE (amends JAVA-926). -- [improvement] JAVA-863: Idempotence propagation in prepared statements. -- [improvement] JAVA-996: Make CodecRegistry available to ProtocolDecoder. -- [bug] JAVA-819: Driver shouldn't retry on client timeout if statement is not idempotent. -- [improvement] JAVA-1007: Make SimpleStatement and QueryBuilder "detached" again. - -Merged from 2.1 branch: - -- [improvement] JAVA-989: Include keyspace name when invalid replication found when generating token map. -- [improvement] JAVA-664: Reduce heap consumption for TokenMap. -- [bug] JAVA-994: Don't call on(Up|Down|Add|Remove) methods if Cluster is closed/closing. - - -### 3.0.0-alpha5 - -- [improvement] JAVA-958: Make TableOrView.Order visible. -- [improvement] JAVA-968: Update metrics to the latest version. -- [improvement] JAVA-965: Improve error handling for when a non-type 1 UUID is given to bind() on a timeuuid column. -- [improvement] JAVA-885: Pass the authenticator name from the server to the auth provider. -- [improvement] JAVA-961: Raise an exception when an older version of guava (<16.01) is found. -- [bug] JAVA-972: TypeCodec.parse() implementations should be case insensitive when checking for keyword NULL. -- [bug] JAVA-971: Make type codecs invariant. -- [bug] JAVA-986: Update documentation links to reference 3.0. -- [improvement] JAVA-841: Refactor SSLOptions API. -- [improvement] JAVA-948: Don't limit cipher suites by default. -- [improvement] JAVA-917: Document SSL configuration. -- [improvement] JAVA-936: Adapt schema metadata parsing logic to new storage format of CQL types in C* 3.0. -- [new feature] JAVA-846: Provide custom codecs library as an extra module. -- [new feature] JAVA-742: Codec Support for JSON. -- [new feature] JAVA-606: Codec support for Java 8. -- [new feature] JAVA-565: Codec support for Java arrays. -- [new feature] JAVA-605: Codec support for Java enums. -- [bug] JAVA-884: Fix UDT mapper to process fields in the correct order. - -Merged from 2.1 branch: - -- [bug] JAVA-854: avoid early return in Cluster.init when a node doesn't support the protocol version. -- [bug] JAVA-978: Fix quoting issue that caused Mapper.getTableMetadata() to return null. -- [improvement] JAVA-920: Downgrade "error creating pool" message to WARN. -- [bug] JAVA-954: Don't trigger reconnection before initialization complete. -- [improvement] JAVA-914: Avoid rejected tasks at shutdown. -- [improvement] JAVA-921: Add SimpleStatement.getValuesCount(). -- [bug] JAVA-901: Move call to connection.release() out of cancelHandler. -- [bug] JAVA-960: Avoid race in control connection shutdown. -- [bug] JAVA-656: Fix NPE in ControlConnection.updateLocationInfo. -- [bug] JAVA-966: Count uninitialized connections in conviction policy. -- [improvement] JAVA-917: Document SSL configuration. -- [improvement] JAVA-652: Add DCAwareRoundRobinPolicy builder. -- [improvement] JAVA-808: Add generic filtering policy that can be used to exclude specific DCs. -- [bug] JAVA-988: Metadata.handleId should handle escaped double quotes. -- [bug] JAVA-983: QueryBuilder cannot handle collections containing function calls. - - -### 3.0.0-alpha4 - -- [improvement] JAVA-926: Change default consistency level to LOCAL_QUORUM. -- [bug] JAVA-942: Fix implementation of UserType.hashCode(). -- [improvement] JAVA-877: Don't delay UP/ADDED notifications if protocol version = V4. -- [improvement] JAVA-938: Parse 'extensions' column in table metadata. -- [bug] JAVA-900: Fix Configuration builder to allow disabled metrics. -- [new feature] JAVA-902: Prepare API for async query trace. -- [new feature] JAVA-930: Add BoundStatement#unset. -- [bug] JAVA-946: Make table metadata options class visible. -- [bug] JAVA-939: Add crcCheckChance to TableOptionsMetadata#equals/hashCode. -- [bug] JAVA-922: Make TypeCodec return mutable collections. -- [improvement] JAVA-932: Limit visibility of codec internals. -- [improvement] JAVA-934: Warn if a custom codec collides with an existing one. -- [improvement] JAVA-940: Allow typed getters/setters to target any CQL type. -- [bug] JAVA-950: Fix Cluster.connect with a case-sensitive keyspace. -- [bug] JAVA-953: Fix MaterializedViewMetadata when base table name is case sensitive. - - -### 3.0.0-alpha3 - -- [new feature] JAVA-571: Support new system tables in C* 3.0. -- [improvement] JAVA-919: Move crc_check_chance out of compressions options. - -Merged from 2.0 branch: - -- [improvement] JAVA-718: Log streamid at the trace level on sending request and receiving response. -- [bug] JAVA-796: Fix SpeculativeExecutionPolicy.init() and close() are never called. -- [improvement] JAVA-710: Suppress unnecessary warning at shutdown. -- [improvement] #340: Allow DNS name with multiple A-records as contact point. -- [bug] JAVA-794: Allow tracing across multiple result pages. -- [bug] JAVA-737: DowngradingConsistencyRetryPolicy ignores write timeouts. -- [bug] JAVA-736: Forbid bind marker in QueryBuilder add/append/prepend. -- [bug] JAVA-712: Prevent QueryBuilder.quote() from applying duplicate double quotes. -- [bug] JAVA-688: Prevent QueryBuilder from trying to serialize raw string. -- [bug] JAVA-679: Support bind marker in QueryBuilder DELETE's list index. -- [improvement] JAVA-475: Improve QueryBuilder API for SELECT DISTINCT. -- [improvement] JAVA-225: Create values() function for Insert builder using List. -- [improvement] JAVA-702: Warn when ReplicationStrategy encounters invalid - replication factors. -- [improvement] JAVA-662: Add PoolingOptions method to set both core and max - connections. -- [improvement] JAVA-766: Do not include epoll JAR in binary distribution. -- [improvement] JAVA-726: Optimize internal copies of Request objects. -- [bug] JAVA-815: Preserve tracing across retries. -- [improvement] JAVA-709: New RetryDecision.tryNextHost(). -- [bug] JAVA-733: Handle function calls and raw strings as non-idempotent in QueryBuilder. -- [improvement] JAVA-765: Provide API to retrieve values of a Parameterized SimpleStatement. -- [improvement] JAVA-827: implement UPDATE .. IF EXISTS in QueryBuilder. -- [improvement] JAVA-618: Randomize contact points list to prevent hotspots. -- [improvement] JAVA-720: Surface the coordinator used on query failure. -- [bug] JAVA-792: Handle contact points removed during init. -- [improvement] JAVA-719: Allow PlainTextAuthProvider to change its credentials at runtime. -- [new feature] JAVA-151: Make it possible to register for SchemaChange Events. -- [improvement] JAVA-861: Downgrade "Asked to rebuild table" log from ERROR to INFO level. -- [improvement] JAVA-797: Provide an option to prepare statements only on one node. -- [improvement] JAVA-658: Provide an option to not re-prepare all statements in onUp. -- [improvement] JAVA-853: Customizable creation of netty timer. -- [bug] JAVA-859: Avoid quadratic ring processing with invalid replication factors. -- [improvement] JAVA-657: Debounce control connection queries. -- [bug] JAVA-784: LoadBalancingPolicy.distance() called before init(). -- [new feature] JAVA-828: Make driver-side metadata optional. -- [improvement] JAVA-544: Allow hosts to remain partially up. -- [improvement] JAVA-821, JAVA-822: Remove internal blocking calls and expose async session - creation. -- [improvement] JAVA-725: Use parallel calls when re-preparing statement on other - hosts. -- [bug] JAVA-629: Don't use connection timeout for unrelated internal queries. -- [bug] JAVA-892: Fix NPE in speculative executions when metrics disabled. - - -### 3.0.0-alpha2 - -- [new feature] JAVA-875, JAVA-882: Move secondary index metadata out of column definitions. - -Merged from 2.2 branch: - -- [bug] JAVA-847: Propagate CodecRegistry to nested UDTs. -- [improvement] JAVA-848: Ability to store a default, shareable CodecRegistry - instance. -- [bug] JAVA-880: Treat empty ByteBuffers as empty values in TupleCodec and - UDTCodec. - - -### 3.0.0-alpha1 - -- [new feature] JAVA-876: Support new system tables in C* 3.0.0-alpha1. - -Merged from 2.2 branch: - -- [improvement] JAVA-810: Rename DateWithoutTime to LocalDate. -- [bug] JAVA-816: DateCodec does not format values correctly. -- [bug] JAVA-817: TimeCodec does not format values correctly. -- [bug] JAVA-818: TypeCodec.getDataTypeFor() does not handle LocalDate instances. -- [improvement] JAVA-836: Make ResultSet#fetchMoreResult return a - ListenableFuture. -- [improvement] JAVA-843: Disable frozen checks in mapper. -- [improvement] JAVA-721: Allow user to register custom type codecs. -- [improvement] JAVA-722: Support custom type codecs in mapper. - - -### 2.2.0-rc3 - -- [bug] JAVA-847: Propagate CodecRegistry to nested UDTs. -- [improvement] JAVA-848: Ability to store a default, shareable CodecRegistry - instance. -- [bug] JAVA-880: Treat empty ByteBuffers as empty values in TupleCodec and - UDTCodec. - - -### 2.2.0-rc2 - -- [improvement] JAVA-810: Rename DateWithoutTime to LocalDate. -- [bug] JAVA-816: DateCodec does not format values correctly. -- [bug] JAVA-817: TimeCodec does not format values correctly. -- [bug] JAVA-818: TypeCodec.getDataTypeFor() does not handle LocalDate instances. -- [improvement] JAVA-836: Make ResultSet#fetchMoreResult return a - ListenableFuture. -- [improvement] JAVA-843: Disable frozen checks in mapper. -- [improvement] JAVA-721: Allow user to register custom type codecs. -- [improvement] JAVA-722: Support custom type codecs in mapper. - -Merged from 2.1 branch: - -- [bug] JAVA-834: Special case check for 'null' string in index_options column. -- [improvement] JAVA-835: Allow accessor methods with less parameters in case - named bind markers are repeated. -- [improvement] JAVA-475: Improve QueryBuilder API for SELECT DISTINCT. -- [improvement] JAVA-715: Make NativeColumnType a top-level class. -- [improvement] JAVA-700: Expose ProtocolVersion#toInt. -- [bug] JAVA-542: Handle void return types in accessors. -- [improvement] JAVA-225: Create values() function for Insert builder using List. -- [improvement] JAVA-713: HashMap throws an OOM Exception when logging level is set to TRACE. -- [bug] JAVA-679: Support bind marker in QueryBuilder DELETE's list index. -- [improvement] JAVA-732: Expose KEYS and FULL indexing options in IndexMetadata. -- [improvement] JAVA-589: Allow @Enumerated in Accessor method parameters. -- [improvement] JAVA-554: Allow access to table metadata from Mapper. -- [improvement] JAVA-661: Provide a way to map computed fields. -- [improvement] JAVA-824: Ignore missing columns in mapper. -- [bug] JAVA-724: Preserve default timestamp for retries and speculative executions. -- [improvement] JAVA-738: Use same pool implementation for protocol v2 and v3. -- [improvement] JAVA-677: Support CONTAINS / CONTAINS KEY in QueryBuilder. -- [improvement] JAVA-477/JAVA-540: Add USING options in mapper for delete and save - operations. -- [improvement] JAVA-473: Add mapper option to configure whether to save null fields. - -Merged from 2.0 branch: - -- [bug] JAVA-737: DowngradingConsistencyRetryPolicy ignores write timeouts. -- [bug] JAVA-736: Forbid bind marker in QueryBuilder add/append/prepend. -- [bug] JAVA-712: Prevent QueryBuilder.quote() from applying duplicate double quotes. -- [bug] JAVA-688: Prevent QueryBuilder from trying to serialize raw string. -- [bug] JAVA-679: Support bind marker in QueryBuilder DELETE's list index. -- [improvement] JAVA-475: Improve QueryBuilder API for SELECT DISTINCT. -- [improvement] JAVA-225: Create values() function for Insert builder using List. -- [improvement] JAVA-702: Warn when ReplicationStrategy encounters invalid - replication factors. -- [improvement] JAVA-662: Add PoolingOptions method to set both core and max - connections. -- [improvement] JAVA-766: Do not include epoll JAR in binary distribution. -- [improvement] JAVA-726: Optimize internal copies of Request objects. -- [bug] JAVA-815: Preserve tracing across retries. -- [improvement] JAVA-709: New RetryDecision.tryNextHost(). -- [bug] JAVA-733: Handle function calls and raw strings as non-idempotent in QueryBuilder. - - -### 2.2.0-rc1 - -- [new feature] JAVA-783: Protocol V4 enum support. -- [new feature] JAVA-776: Use PK columns in protocol v4 PREPARED response. -- [new feature] JAVA-777: Distinguish NULL and UNSET values. -- [new feature] JAVA-779: Add k/v payload for 3rd party usage. -- [new feature] JAVA-780: Expose server-side warnings on ExecutionInfo. -- [new feature] JAVA-749: Expose new read/write failure exceptions. -- [new feature] JAVA-747: Expose function and aggregate metadata. -- [new feature] JAVA-778: Add new client exception for CQL function failure. -- [improvement] JAVA-700: Expose ProtocolVersion#toInt. -- [new feature] JAVA-404: Support new C* 2.2 CQL date and time types. - -Merged from 2.1 branch: - -- [improvement] JAVA-782: Unify "Target" enum for schema elements. - - -### 2.1.10.2 - -Merged from 2.0 branch: - -- [bug] JAVA-1179: Request objects should be copied when executed. -- [improvement] JAVA-1182: Throw error when synchronous call made on I/O thread. -- [bug] JAVA-1184: Unwrap StatementWrappers when extracting column definitions. - - -### 2.1.10.1 - -- [bug] JAVA-1152: Fix NPE at ControlConnection.refreshNodeListAndTokenMap(). -- [bug] JAVA-1156: Fix NPE at TableMetadata.equals(). - - -### 2.1.10 - -- [bug] JAVA-988: Metadata.handleId should handle escaped double quotes. -- [bug] JAVA-983: QueryBuilder cannot handle collections containing function calls. -- [improvement] JAVA-863: Idempotence propagation in PreparedStatements. -- [bug] JAVA-937: TypeCodec static initializers not always correctly executed. -- [improvement] JAVA-989: Include keyspace name when invalid replication found when generating token map. -- [improvement] JAVA-664: Reduce heap consumption for TokenMap. -- [improvement] JAVA-1030: Log token to replica map computation times. -- [bug] JAVA-1039: Minor bugs in Event Debouncer. -- [improvement] JAVA-843: Disable frozen checks in mapper. -- [improvement] JAVA-833: Improve message when a nested type can't be serialized. -- [improvement] JAVA-1011: Expose PoolingOptions default values. -- [improvement] JAVA-630: Don't process DOWN events for nodes that have active connections. -- [improvement] JAVA-851: Improve UUIDs javadoc with regard to user-provided timestamps. -- [improvement] JAVA-979: Update javadoc for RegularStatement toString() and getQueryString() to indicate that consistency level and other parameters are not maintained in the query string. -- [improvement] JAVA-1038: Fetch node info by rpc_address if its broadcast_address is not in system.peers. -- [improvement] JAVA-974: Validate accessor parameter types against bound statement. -- [bug] JAVA-1068: Unwrap StatementWrappers when hashing the paging state. -- [bug] JAVA-831: Mapper can't load an entity where the PK is a UDT. -- [improvement] JAVA-1021: Improve error message when connect() is called with an invalid keyspace name. -- [improvement] JAVA-879: Mapper.map() accepts mapper-generated and user queries. -- [bug] JAVA-1100: Exception when connecting with shaded java driver in OSGI -- [bug] JAVA-819: Expose more errors in RetryPolicy + provide idempotent-aware wrapper. -- [improvement] JAVA-1040: SimpleStatement parameters support in QueryLogger. -- [bug] JAVA-1064: getTable create statement doesn't properly handle quotes in primary key. -- [improvement] JAVA-888: Add cluster-wide percentile tracker. -- [improvement] JAVA-963: Automatically register PercentileTracker from components that use it. -- [bug] JAVA-1089: Set LWT made from BuiltStatements to non-idempotent. -- [improvement] JAVA-923: Position idempotent flag on object mapper queries. -- [new feature] JAVA-1019: SchemaBuilder support for CREATE/ALTER/DROP KEYSPACE. -- [bug] JAVA-1070: The Mapper should not prepare queries synchronously. -- [new feature] JAVA-982: Introduce new method ConsistencyLevel.isSerial(). -- [bug] JAVA-764: Retry with the normal consistency level (not the serial one) when a write times out on the Paxos phase. -- [bug] JAVA-727: Allow monotonic timestamp generators to drift in the future + use microsecond precision when possible. -- [improvement] JAVA-444: Add Java process information to UUIDs.makeNode() hash. -- [improvement] JAVA-977: Preserve original cause when BuiltStatement value can't be serialized. -- [bug] JAVA-1094: Backport TypeCodec parse and format fixes from 3.0. -- [improvement] JAVA-852: Ignore peers with null entries during discovery. -- [bug] JAVA-1132: Executing bound statement with no variables results in exception with protocol v1. -- [bug] JAVA-1005: DowngradingConsistencyRetryPolicy does not work with EACH_QUORUM when 1 DC is down. -- [bug] JAVA-1002: Avoid deadlock when re-preparing a statement on other hosts. - -Merged from 2.0 branch: - -- [bug] JAVA-994: Don't call on(Up|Down|Add|Remove) methods if Cluster is closed/closing. -- [improvement] JAVA-805: Document that metrics are null until Cluster is initialized. -- [bug] JAVA-1072: Ensure defunct connections are properly evicted from the pool. - - -### 2.1.9 - -- [bug] JAVA-942: Fix implementation of UserType.hashCode(). -- [bug] JAVA-854: avoid early return in Cluster.init when a node doesn't support the protocol version. -- [bug] JAVA-978: Fix quoting issue that caused Mapper.getTableMetadata() to return null. - -Merged from 2.0 branch: - -- [bug] JAVA-950: Fix Cluster.connect with a case-sensitive keyspace. -- [improvement] JAVA-920: Downgrade "error creating pool" message to WARN. -- [bug] JAVA-954: Don't trigger reconnection before initialization complete. -- [improvement] JAVA-914: Avoid rejected tasks at shutdown. -- [improvement] JAVA-921: Add SimpleStatement.getValuesCount(). -- [bug] JAVA-901: Move call to connection.release() out of cancelHandler. -- [bug] JAVA-960: Avoid race in control connection shutdown. -- [bug] JAVA-656: Fix NPE in ControlConnection.updateLocationInfo. -- [bug] JAVA-966: Count uninitialized connections in conviction policy. -- [improvement] JAVA-917: Document SSL configuration. -- [improvement] JAVA-652: Add DCAwareRoundRobinPolicy builder. -- [improvement] JAVA-808: Add generic filtering policy that can be used to exclude specific DCs. - - -### 2.1.8 - -Merged from 2.0 branch: - -- [improvement] JAVA-718: Log streamid at the trace level on sending request and receiving response. - -- [bug] JAVA-796: Fix SpeculativeExecutionPolicy.init() and close() are never called. -- [improvement] JAVA-710: Suppress unnecessary warning at shutdown. -- [improvement] #340: Allow DNS name with multiple A-records as contact point. -- [bug] JAVA-794: Allow tracing across multiple result pages. -- [bug] JAVA-737: DowngradingConsistencyRetryPolicy ignores write timeouts. -- [bug] JAVA-736: Forbid bind marker in QueryBuilder add/append/prepend. -- [bug] JAVA-712: Prevent QueryBuilder.quote() from applying duplicate double quotes. -- [bug] JAVA-688: Prevent QueryBuilder from trying to serialize raw string. -- [bug] JAVA-679: Support bind marker in QueryBuilder DELETE's list index. -- [improvement] JAVA-475: Improve QueryBuilder API for SELECT DISTINCT. -- [improvement] JAVA-225: Create values() function for Insert builder using List. -- [improvement] JAVA-702: Warn when ReplicationStrategy encounters invalid - replication factors. -- [improvement] JAVA-662: Add PoolingOptions method to set both core and max - connections. -- [improvement] JAVA-766: Do not include epoll JAR in binary distribution. -- [improvement] JAVA-726: Optimize internal copies of Request objects. -- [bug] JAVA-815: Preserve tracing across retries. -- [improvement] JAVA-709: New RetryDecision.tryNextHost(). -- [bug] JAVA-733: Handle function calls and raw strings as non-idempotent in QueryBuilder. -- [improvement] JAVA-765: Provide API to retrieve values of a Parameterized SimpleStatement. -- [improvement] JAVA-827: implement UPDATE .. IF EXISTS in QueryBuilder. -- [improvement] JAVA-618: Randomize contact points list to prevent hotspots. -- [improvement] JAVA-720: Surface the coordinator used on query failure. -- [bug] JAVA-792: Handle contact points removed during init. -- [improvement] JAVA-719: Allow PlainTextAuthProvider to change its credentials at runtime. -- [new feature] JAVA-151: Make it possible to register for SchemaChange Events. -- [improvement] JAVA-861: Downgrade "Asked to rebuild table" log from ERROR to INFO level. -- [improvement] JAVA-797: Provide an option to prepare statements only on one node. -- [improvement] JAVA-658: Provide an option to not re-prepare all statements in onUp. -- [improvement] JAVA-853: Customizable creation of netty timer. -- [bug] JAVA-859: Avoid quadratic ring processing with invalid replication factors. -- [improvement] JAVA-657: Debounce control connection queries. -- [bug] JAVA-784: LoadBalancingPolicy.distance() called before init(). -- [new feature] JAVA-828: Make driver-side metadata optional. -- [improvement] JAVA-544: Allow hosts to remain partially up. -- [improvement] JAVA-821, JAVA-822: Remove internal blocking calls and expose async session - creation. -- [improvement] JAVA-725: Use parallel calls when re-preparing statement on other - hosts. -- [bug] JAVA-629: Don't use connection timeout for unrelated internal queries. -- [bug] JAVA-892: Fix NPE in speculative executions when metrics disabled. - - -### 2.1.7.1 - -- [bug] JAVA-834: Special case check for 'null' string in index_options column. -- [improvement] JAVA-835: Allow accessor methods with less parameters in case - named bind markers are repeated. - - -### 2.1.7 - -- [improvement] JAVA-475: Improve QueryBuilder API for SELECT DISTINCT. -- [improvement] JAVA-715: Make NativeColumnType a top-level class. -- [improvement] JAVA-782: Unify "Target" enum for schema elements. -- [improvement] JAVA-700: Expose ProtocolVersion#toInt. -- [bug] JAVA-542: Handle void return types in accessors. -- [improvement] JAVA-225: Create values() function for Insert builder using List. -- [improvement] JAVA-713: HashMap throws an OOM Exception when logging level is set to TRACE. -- [bug] JAVA-679: Support bind marker in QueryBuilder DELETE's list index. -- [improvement] JAVA-732: Expose KEYS and FULL indexing options in IndexMetadata. -- [improvement] JAVA-589: Allow @Enumerated in Accessor method parameters. -- [improvement] JAVA-554: Allow access to table metadata from Mapper. -- [improvement] JAVA-661: Provide a way to map computed fields. -- [improvement] JAVA-824: Ignore missing columns in mapper. -- [bug] JAVA-724: Preserve default timestamp for retries and speculative executions. -- [improvement] JAVA-738: Use same pool implementation for protocol v2 and v3. -- [improvement] JAVA-677: Support CONTAINS / CONTAINS KEY in QueryBuilder. -- [improvement] JAVA-477/JAVA-540: Add USING options in mapper for delete and save - operations. -- [improvement] JAVA-473: Add mapper option to configure whether to save null fields. - -Merged from 2.0 branch: - -- [bug] JAVA-737: DowngradingConsistencyRetryPolicy ignores write timeouts. -- [bug] JAVA-736: Forbid bind marker in QueryBuilder add/append/prepend. -- [bug] JAVA-712: Prevent QueryBuilder.quote() from applying duplicate double quotes. -- [bug] JAVA-688: Prevent QueryBuilder from trying to serialize raw string. -- [bug] JAVA-679: Support bind marker in QueryBuilder DELETE's list index. -- [improvement] JAVA-475: Improve QueryBuilder API for SELECT DISTINCT. -- [improvement] JAVA-225: Create values() function for Insert builder using List. -- [improvement] JAVA-702: Warn when ReplicationStrategy encounters invalid - replication factors. -- [improvement] JAVA-662: Add PoolingOptions method to set both core and max - connections. -- [improvement] JAVA-766: Do not include epoll JAR in binary distribution. -- [improvement] JAVA-726: Optimize internal copies of Request objects. -- [bug] JAVA-815: Preserve tracing across retries. -- [improvement] JAVA-709: New RetryDecision.tryNextHost(). -- [bug] JAVA-733: Handle function calls and raw strings as non-idempotent in QueryBuilder. - - -### 2.1.6 - -Merged from 2.0 branch: - -- [new feature] JAVA-584: Add getObject to BoundStatement and Row. -- [improvement] JAVA-419: Improve connection pool resizing algorithm. -- [bug] JAVA-599: Fix race condition between pool expansion and shutdown. -- [improvement] JAVA-622: Upgrade Netty to 4.0.27. -- [improvement] JAVA-562: Coalesce frames before flushing them to the connection. -- [improvement] JAVA-583: Rename threads to indicate that they are for the driver. -- [new feature] JAVA-550: Expose paging state. -- [new feature] JAVA-646: Slow Query Logger. -- [improvement] JAVA-698: Exclude some errors from measurements in LatencyAwarePolicy. -- [bug] JAVA-641: Fix issue when executing a PreparedStatement from another cluster. -- [improvement] JAVA-534: Log keyspace xxx does not exist at WARN level. -- [improvement] JAVA-619: Allow Cluster subclasses to delegate to another instance. -- [new feature] JAVA-669: Expose an API to check for schema agreement after a - schema-altering statement. -- [improvement] JAVA-692: Make connection and pool creation fully async. -- [improvement] JAVA-505: Optimize connection use after reconnection. -- [improvement] JAVA-617: Remove "suspected" mechanism. -- [improvement] reverts JAVA-425: Don't mark connection defunct on client timeout. -- [new feature] JAVA-561: Speculative query executions. -- [bug] JAVA-666: Release connection before completing the ResultSetFuture. -- [new feature BETA] JAVA-723: Percentile-based variant of query logger and speculative - executions. -- [bug] JAVA-734: Fix buffer leaks when compression is enabled. -- [improvement] JAVA-756: Use Netty's pooled ByteBufAllocator by default. -- [improvement] JAVA-759: Expose "unsafe" paging state API. -- [bug] JAVA-768: Prevent race during pool initialization. - - -### 2.1.5 - -- [bug] JAVA-575: Authorize Null parameter in Accessor method. -- [improvement] JAVA-570: Support C* 2.1.3's nested collections. -- [bug] JAVA-612: Fix checks on mapped collection types. -- [bug] JAVA-672: Fix QueryBuilder.putAll() when the collection contains UDTs. - -Merged from 2.0 branch: - -- [new feature] JAVA-518: Add AddressTranslater for EC2 multi-region deployment. -- [improvement] JAVA-533: Add connection heartbeat. -- [improvement] JAVA-568: Reduce level of logs on missing rpc_address. -- [improvement] JAVA-312, JAVA-681: Expose node token and range information. -- [bug] JAVA-595: Fix cluster name mismatch check at startup. -- [bug] JAVA-620: Fix guava dependency when using OSGI. -- [bug] JAVA-678: Fix handling of DROP events when ks name is case-sensitive. -- [improvement] JAVA-631: Use List instead of List in QueryBuilder API. -- [improvement] JAVA-654: Exclude Netty POM from META-INF in shaded JAR. -- [bug] JAVA-655: Quote single quotes contained in table comments in asCQLQuery method. -- [bug] JAVA-684: Empty TokenRange returned in a one token cluster. -- [improvement] JAVA-687: Expose TokenRange#contains. -- [bug] JAVA-614: Prevent race between cancellation and query completion. -- [bug] JAVA-632: Prevent cancel and timeout from cancelling unrelated ResponseHandler if - streamId was already released and reused. -- [bug] JAVA-642: Fix issue when newly opened pool fails before we could mark the node UP. -- [bug] JAVA-613: Fix unwanted LBP notifications when a contact host is down. -- [bug] JAVA-651: Fix edge cases where a connection was released twice. -- [bug] JAVA-653: Fix edge cases in query cancellation. - - -### 2.1.4 - -Merged from 2.0 branch: - -- [improvement] JAVA-538: Shade Netty dependency. -- [improvement] JAVA-543: Target schema refreshes more precisely. -- [bug] JAVA-546: Don't check rpc_address for control host. -- [improvement] JAVA-409: Improve message of NoHostAvailableException. -- [bug] JAVA-556: Rework connection reaper to avoid deadlock. -- [bug] JAVA-557: Avoid deadlock when multiple connections to the same host get write - errors. -- [improvement] JAVA-504: Make shuffle=true the default for TokenAwarePolicy. -- [bug] JAVA-577: Fix bug when SUSPECT reconnection succeeds, but one of the pooled - connections fails while bringing the node back up. -- [bug] JAVA-419: JAVA-587: Prevent faulty control connection from ignoring reconnecting hosts. -- temporarily revert "Add idle timeout to the connection pool". -- [bug] JAVA-593: Ensure updateCreatedPools does not add pools for suspected hosts. -- [bug] JAVA-594: Ensure state change notifications for a given host are handled serially. -- [bug] JAVA-597: Ensure control connection reconnects when control host is removed. - - -### 2.1.3 - -- [bug] JAVA-510: Ignore static fields in mapper. -- [bug] JAVA-509: Fix UDT parsing at init when using the default protocol version. -- [bug] JAVA-495: Fix toString, equals and hashCode on accessor proxies. -- [bug] JAVA-528: Allow empty name on Column and Field annotations. - -Merged from 2.0 branch: - -- [bug] JAVA-497: Ensure control connection does not trigger concurrent reconnects. -- [improvement] JAVA-472: Keep trying to reconnect on authentication errors. -- [improvement] JAVA-463: Expose close method on load balancing policy. -- [improvement] JAVA-459: Allow load balancing policy to trigger refresh for a single host. -- [bug] JAVA-493: Expose an API to cancel reconnection attempts. -- [bug] JAVA-503: Fix NPE when a connection fails during pool construction. -- [improvement] JAVA-423: Log datacenter name in DCAware policy's init when it is explicitly provided. -- [improvement] JAVA-504: Shuffle the replicas in TokenAwarePolicy.newQueryPlan. -- [improvement] JAVA-507: Make schema agreement wait tuneable. -- [improvement] JAVA-494: Document how to inject the driver metrics into another registry. -- [improvement] JAVA-419: Add idle timeout to the connection pool. -- [bug] JAVA-516: LatencyAwarePolicy does not shutdown executor on invocation of close. -- [improvement] JAVA-451: Throw an exception when DCAwareRoundRobinPolicy is built with - an explicit but null or empty local datacenter. -- [bug] JAVA-511: Fix check for local contact points in DCAware policy's init. -- [improvement] JAVA-457: Make timeout on saturated pool customizable. -- [improvement] JAVA-521: Downgrade Guava to 14.0.1. -- [bug] JAVA-526: Fix token awareness for case-sensitive keyspaces and tables. -- [bug] JAVA-515: Check maximum number of values passed to SimpleStatement. -- [improvement] JAVA-532: Expose the driver version through the API. -- [improvement] JAVA-522: Optimize session initialization when some hosts are not - responsive. - - -### 2.1.2 - -- [improvement] JAVA-361, JAVA-364, JAVA-467: Support for native protocol v3. -- [bug] JAVA-454: Fix UDT fields of type inet in QueryBuilder. -- [bug] JAVA-455: Exclude transient fields from Frozen checks. -- [bug] JAVA-453: Fix handling of null collections in mapper. -- [improvement] JAVA-452: Make implicit column names case-insensitive in mapper. -- [bug] JAVA-433: Fix named bind markers in QueryBuilder. -- [bug] JAVA-458: Fix handling of BigInteger in object mapper. -- [bug] JAVA-465: Ignore synthetic fields in mapper. -- [improvement] JAVA-451: Throw an exception when DCAwareRoundRobinPolicy is built with - an explicit but null or empty local datacenter. -- [improvement] JAVA-469: Add backwards-compatible DataType.serialize methods. -- [bug] JAVA-487: Handle null enum fields in object mapper. -- [bug] JAVA-499: Handle null UDT fields in object mapper. - -Merged from 2.0 branch: - -- [bug] JAVA-449: Handle null pool in PooledConnection.release. -- [improvement] JAVA-425: Defunct connection on request timeout. -- [improvement] JAVA-426: Try next host when we get a SERVER_ERROR. -- [bug] JAVA-449, JAVA-460, JAVA-471: Handle race between query timeout and completion. -- [bug] JAVA-496: Fix DCAwareRoundRobinPolicy datacenter auto-discovery. - - -### 2.1.1 - -- [new] JAVA-441: Support for new "frozen" keyword. - -Merged from 2.0 branch: - -- [bug] JAVA-397: Check cluster name when connecting to a new node. -- [bug] JAVA-326: Add missing CAS delete support in QueryBuilder. -- [bug] JAVA-363: Add collection and data length checks during serialization. -- [improvement] JAVA-329: Surface number of retries in metrics. -- [bug] JAVA-428: Do not use a host when no rpc_address found for it. -- [improvement] JAVA-358: Add ResultSet.wasApplied() for conditional queries. -- [bug] JAVA-349: Fix negative HostConnectionPool open count. -- [improvement] JAVA-436: Log more connection details at trace and debug levels. -- [bug] JAVA-445: Fix cluster shutdown. - - -### 2.1.0 - -- [bug] JAVA-408: ClusteringColumn annotation not working with specified ordering. -- [improvement] JAVA-410: Fail BoundStatement if null values are not set explicitly. -- [bug] JAVA-416: Handle UDT and tuples in BuiltStatement.toString. - -Merged from 2.0 branch: - -- [bug] JAVA-407: Release connections on ResultSetFuture#cancel. -- [bug] JAVA-393: Fix handling of SimpleStatement with values in query builder - batches. -- [bug] JAVA-417: Ensure pool is properly closed in onDown. -- [bug] JAVA-415: Fix tokenMap initialization at startup. -- [bug] JAVA-418: Avoid deadlock on close. - - -### 2.1.0-rc1 - -Merged from 2.0 branch: - -- [bug] JAVA-394: Ensure defunct connections are completely closed. -- [bug] JAVA-342, JAVA-390: Fix memory and resource leak on closed Sessions. - - -### 2.1.0-beta1 - -- [new] Support for User Defined Types and tuples -- [new] Simple object mapper - -Merged from 2.0 branch: everything up to 2.0.3 (included), and the following. - -- [improvement] JAVA-204: Better handling of dead connections. -- [bug] JAVA-373: Fix potential NPE in ControlConnection. -- [bug] JAVA-291: Throws NPE when passed null for a contact point. -- [bug] JAVA-315: Avoid LoadBalancingPolicy onDown+onUp at startup. -- [bug] JAVA-343: Avoid classloader leak in Tomcat. -- [bug] JAVA-387: Avoid deadlock in onAdd/onUp. -- [bug] JAVA-377, JAVA-391: Make metadata parsing more lenient. - - -### 2.0.12.2 - -- [bug] JAVA-1179: Request objects should be copied when executed. -- [improvement] JAVA-1182: Throw error when synchronous call made on I/O thread. -- [bug] JAVA-1184: Unwrap StatementWrappers when extracting column definitions. - - -### 2.0.12.1 - -- [bug] JAVA-994: Don't call on(Up|Down|Add|Remove) methods if Cluster is closed/closing. -- [improvement] JAVA-805: Document that metrics are null until Cluster is initialized. -- [bug] JAVA-1072: Ensure defunct connections are properly evicted from the pool. - - -### 2.0.12 - -- [bug] JAVA-950: Fix Cluster.connect with a case-sensitive keyspace. -- [improvement] JAVA-920: Downgrade "error creating pool" message to WARN. -- [bug] JAVA-954: Don't trigger reconnection before initialization complete. -- [improvement] JAVA-914: Avoid rejected tasks at shutdown. -- [improvement] JAVA-921: Add SimpleStatement.getValuesCount(). -- [bug] JAVA-901: Move call to connection.release() out of cancelHandler. -- [bug] JAVA-960: Avoid race in control connection shutdown. -- [bug] JAVA-656: Fix NPE in ControlConnection.updateLocationInfo. -- [bug] JAVA-966: Count uninitialized connections in conviction policy. -- [improvement] JAVA-917: Document SSL configuration. -- [improvement] JAVA-652: Add DCAwareRoundRobinPolicy builder. -- [improvement] JAVA-808: Add generic filtering policy that can be used to exclude specific DCs. - - -### 2.0.11 - -- [improvement] JAVA-718: Log streamid at the trace level on sending request and receiving response. -- [bug] JAVA-796: Fix SpeculativeExecutionPolicy.init() and close() are never called. -- [improvement] JAVA-710: Suppress unnecessary warning at shutdown. -- [improvement] #340: Allow DNS name with multiple A-records as contact point. -- [bug] JAVA-794: Allow tracing across multiple result pages. -- [bug] JAVA-737: DowngradingConsistencyRetryPolicy ignores write timeouts. -- [bug] JAVA-736: Forbid bind marker in QueryBuilder add/append/prepend. -- [bug] JAVA-712: Prevent QueryBuilder.quote() from applying duplicate double quotes. -- [bug] JAVA-688: Prevent QueryBuilder from trying to serialize raw string. -- [bug] JAVA-679: Support bind marker in QueryBuilder DELETE's list index. -- [improvement] JAVA-475: Improve QueryBuilder API for SELECT DISTINCT. -- [improvement] JAVA-225: Create values() function for Insert builder using List. -- [improvement] JAVA-702: Warn when ReplicationStrategy encounters invalid - replication factors. -- [improvement] JAVA-662: Add PoolingOptions method to set both core and max - connections. -- [improvement] JAVA-766: Do not include epoll JAR in binary distribution. -- [improvement] JAVA-726: Optimize internal copies of Request objects. -- [bug] JAVA-815: Preserve tracing across retries. -- [improvement] JAVA-709: New RetryDecision.tryNextHost(). -- [bug] JAVA-733: Handle function calls and raw strings as non-idempotent in QueryBuilder. -- [improvement] JAVA-765: Provide API to retrieve values of a Parameterized SimpleStatement. -- [improvement] JAVA-827: implement UPDATE .. IF EXISTS in QueryBuilder. -- [improvement] JAVA-618: Randomize contact points list to prevent hotspots. -- [improvement] JAVA-720: Surface the coordinator used on query failure. -- [bug] JAVA-792: Handle contact points removed during init. -- [improvement] JAVA-719: Allow PlainTextAuthProvider to change its credentials at runtime. -- [new feature] JAVA-151: Make it possible to register for SchemaChange Events. -- [improvement] JAVA-861: Downgrade "Asked to rebuild table" log from ERROR to INFO level. -- [improvement] JAVA-797: Provide an option to prepare statements only on one node. -- [improvement] JAVA-658: Provide an option to not re-prepare all statements in onUp. -- [improvement] JAVA-853: Customizable creation of netty timer. -- [bug] JAVA-859: Avoid quadratic ring processing with invalid replication factors. -- [improvement] JAVA-657: Debounce control connection queries. -- [bug] JAVA-784: LoadBalancingPolicy.distance() called before init(). -- [new feature] JAVA-828: Make driver-side metadata optional. -- [improvement] JAVA-544: Allow hosts to remain partially up. -- [improvement] JAVA-821, JAVA-822: Remove internal blocking calls and expose async session - creation. -- [improvement] JAVA-725: Use parallel calls when re-preparing statement on other - hosts. -- [bug] JAVA-629: Don't use connection timeout for unrelated internal queries. -- [bug] JAVA-892: Fix NPE in speculative executions when metrics disabled. - -Merged from 2.0.10_fixes branch: - -- [improvement] JAVA-756: Use Netty's pooled ByteBufAllocator by default. -- [improvement] JAVA-759: Expose "unsafe" paging state API. -- [bug] JAVA-767: Fix getObject by name. -- [bug] JAVA-768: Prevent race during pool initialization. - - -### 2.0.10.1 - -- [improvement] JAVA-756: Use Netty's pooled ByteBufAllocator by default. -- [improvement] JAVA-759: Expose "unsafe" paging state API. -- [bug] JAVA-767: Fix getObject by name. -- [bug] JAVA-768: Prevent race during pool initialization. - - -### 2.0.10 - -- [new feature] JAVA-518: Add AddressTranslater for EC2 multi-region deployment. -- [improvement] JAVA-533: Add connection heartbeat. -- [improvement] JAVA-568: Reduce level of logs on missing rpc_address. -- [improvement] JAVA-312, JAVA-681: Expose node token and range information. -- [bug] JAVA-595: Fix cluster name mismatch check at startup. -- [bug] JAVA-620: Fix guava dependency when using OSGI. -- [bug] JAVA-678: Fix handling of DROP events when ks name is case-sensitive. -- [improvement] JAVA-631: Use List instead of List in QueryBuilder API. -- [improvement] JAVA-654: Exclude Netty POM from META-INF in shaded JAR. -- [bug] JAVA-655: Quote single quotes contained in table comments in asCQLQuery method. -- [bug] JAVA-684: Empty TokenRange returned in a one token cluster. -- [improvement] JAVA-687: Expose TokenRange#contains. -- [new feature] JAVA-547: Expose values of BoundStatement. -- [new feature] JAVA-584: Add getObject to BoundStatement and Row. -- [improvement] JAVA-419: Improve connection pool resizing algorithm. -- [bug] JAVA-599: Fix race condition between pool expansion and shutdown. -- [improvement] JAVA-622: Upgrade Netty to 4.0.27. -- [improvement] JAVA-562: Coalesce frames before flushing them to the connection. -- [improvement] JAVA-583: Rename threads to indicate that they are for the driver. -- [new feature] JAVA-550: Expose paging state. -- [new feature] JAVA-646: Slow Query Logger. -- [improvement] JAVA-698: Exclude some errors from measurements in LatencyAwarePolicy. -- [bug] JAVA-641: Fix issue when executing a PreparedStatement from another cluster. -- [improvement] JAVA-534: Log keyspace xxx does not exist at WARN level. -- [improvement] JAVA-619: Allow Cluster subclasses to delegate to another instance. -- [new feature] JAVA-669: Expose an API to check for schema agreement after a - schema-altering statement. -- [improvement] JAVA-692: Make connection and pool creation fully async. -- [improvement] JAVA-505: Optimize connection use after reconnection. -- [improvement] JAVA-617: Remove "suspected" mechanism. -- [improvement] reverts JAVA-425: Don't mark connection defunct on client timeout. -- [new feature] JAVA-561: Speculative query executions. -- [bug] JAVA-666: Release connection before completing the ResultSetFuture. -- [new feature BETA] JAVA-723: Percentile-based variant of query logger and speculative - executions. -- [bug] JAVA-734: Fix buffer leaks when compression is enabled. - -Merged from 2.0.9_fixes branch: - -- [bug] JAVA-614: Prevent race between cancellation and query completion. -- [bug] JAVA-632: Prevent cancel and timeout from cancelling unrelated ResponseHandler if - streamId was already released and reused. -- [bug] JAVA-642: Fix issue when newly opened pool fails before we could mark the node UP. -- [bug] JAVA-613: Fix unwanted LBP notifications when a contact host is down. -- [bug] JAVA-651: Fix edge cases where a connection was released twice. -- [bug] JAVA-653: Fix edge cases in query cancellation. - - -### 2.0.9.2 - -- [bug] JAVA-651: Fix edge cases where a connection was released twice. -- [bug] JAVA-653: Fix edge cases in query cancellation. - - -### 2.0.9.1 - -- [bug] JAVA-614: Prevent race between cancellation and query completion. -- [bug] JAVA-632: Prevent cancel and timeout from cancelling unrelated ResponseHandler if - streamId was already released and reused. -- [bug] JAVA-642: Fix issue when newly opened pool fails before we could mark the node UP. -- [bug] JAVA-613: Fix unwanted LBP notifications when a contact host is down. - - -### 2.0.9 - -- [improvement] JAVA-538: Shade Netty dependency. -- [improvement] JAVA-543: Target schema refreshes more precisely. -- [bug] JAVA-546: Don't check rpc_address for control host. -- [improvement] JAVA-409: Improve message of NoHostAvailableException. -- [bug] JAVA-556: Rework connection reaper to avoid deadlock. -- [bug] JAVA-557: Avoid deadlock when multiple connections to the same host get write - errors. -- [improvement] JAVA-504: Make shuffle=true the default for TokenAwarePolicy. -- [bug] JAVA-577: Fix bug when SUSPECT reconnection succeeds, but one of the pooled - connections fails while bringing the node back up. -- [bug] JAVA-419: JAVA-587: Prevent faulty control connection from ignoring reconnecting hosts. -- temporarily revert "Add idle timeout to the connection pool". -- [bug] JAVA-593: Ensure updateCreatedPools does not add pools for suspected hosts. -- [bug] JAVA-594: Ensure state change notifications for a given host are handled serially. -- [bug] JAVA-597: Ensure control connection reconnects when control host is removed. - - -### 2.0.8 - -- [bug] JAVA-526: Fix token awareness for case-sensitive keyspaces and tables. -- [bug] JAVA-515: Check maximum number of values passed to SimpleStatement. -- [improvement] JAVA-532: Expose the driver version through the API. -- [improvement] JAVA-522: Optimize session initialization when some hosts are not - responsive. - - -### 2.0.7 - -- [bug] JAVA-449: Handle null pool in PooledConnection.release. -- [improvement] JAVA-425: Defunct connection on request timeout. -- [improvement] JAVA-426: Try next host when we get a SERVER_ERROR. -- [bug] JAVA-449, JAVA-460, JAVA-471: Handle race between query timeout and completion. -- [bug] JAVA-496: Fix DCAwareRoundRobinPolicy datacenter auto-discovery. -- [bug] JAVA-497: Ensure control connection does not trigger concurrent reconnects. -- [improvement] JAVA-472: Keep trying to reconnect on authentication errors. -- [improvement] JAVA-463: Expose close method on load balancing policy. -- [improvement] JAVA-459: Allow load balancing policy to trigger refresh for a single host. -- [bug] JAVA-493: Expose an API to cancel reconnection attempts. -- [bug] JAVA-503: Fix NPE when a connection fails during pool construction. -- [improvement] JAVA-423: Log datacenter name in DCAware policy's init when it is explicitly provided. -- [improvement] JAVA-504: Shuffle the replicas in TokenAwarePolicy.newQueryPlan. -- [improvement] JAVA-507: Make schema agreement wait tuneable. -- [improvement] JAVA-494: Document how to inject the driver metrics into another registry. -- [improvement] JAVA-419: Add idle timeout to the connection pool. -- [bug] JAVA-516: LatencyAwarePolicy does not shutdown executor on invocation of close. -- [improvement] JAVA-451: Throw an exception when DCAwareRoundRobinPolicy is built with - an explicit but null or empty local datacenter. -- [bug] JAVA-511: Fix check for local contact points in DCAware policy's init. -- [improvement] JAVA-457: Make timeout on saturated pool customizable. -- [improvement] JAVA-521: Downgrade Guava to 14.0.1. - - -### 2.0.6 - -- [bug] JAVA-397: Check cluster name when connecting to a new node. -- [bug] JAVA-326: Add missing CAS delete support in QueryBuilder. -- [bug] JAVA-363: Add collection and data length checks during serialization. -- [improvement] JAVA-329: Surface number of retries in metrics. -- [bug] JAVA-428: Do not use a host when no rpc_address found for it. -- [improvement] JAVA-358: Add ResultSet.wasApplied() for conditional queries. -- [bug] JAVA-349: Fix negative HostConnectionPool open count. -- [improvement] JAVA-436: Log more connection details at trace and debug levels. -- [bug] JAVA-445: Fix cluster shutdown. -- [improvement] JAVA-439: Expose child policy in chainable load balancing policies. - - -### 2.0.5 - -- [bug] JAVA-407: Release connections on ResultSetFuture#cancel. -- [bug] JAVA-393: Fix handling of SimpleStatement with values in query builder - batches. -- [bug] JAVA-417: Ensure pool is properly closed in onDown. -- [bug] JAVA-415: Fix tokenMap initialization at startup. -- [bug] JAVA-418: Avoid deadlock on close. - - -### 2.0.4 - -- [improvement] JAVA-204: Better handling of dead connections. -- [bug] JAVA-373: Fix potential NPE in ControlConnection. -- [bug] JAVA-291: Throws NPE when passed null for a contact point. -- [bug] JAVA-315: Avoid LoadBalancingPolicy onDown+onUp at startup. -- [bug] JAVA-343: Avoid classloader leak in Tomcat. -- [bug] JAVA-387: Avoid deadlock in onAdd/onUp. -- [bug] JAVA-377, JAVA-391: Make metadata parsing more lenient. -- [bug] JAVA-394: Ensure defunct connections are completely closed. -- [bug] JAVA-342, JAVA-390: Fix memory and resource leak on closed Sessions. - - -### 2.0.3 - -- [new] The new AbsractSession makes mocking of Session easier. -- [new] JAVA-309: Allow to trigger a refresh of connected hosts. -- [new] JAVA-265: New Session#getState method allows to grab information on - which nodes a session is connected to. -- [new] JAVA-327: Add QueryBuilder syntax for tuples in where clauses (syntax - introduced in Cassandra 2.0.6). -- [improvement] JAVA-359: Properly validate arguments of PoolingOptions methods. -- [bug] JAVA-368: Fix bogus rejection of BigInteger in 'execute with values'. -- [bug] JAVA-367: Signal connection failure sooner to avoid missing them. -- [bug] JAVA-337: Throw UnsupportedOperationException for protocol batch - setSerialCL. - -Merged from 1.0 branch: - -- [bug] JAVA-325: Fix periodic reconnection to down hosts. - - -### 2.0.2 - -- [api] The type of the map key returned by NoHostAvailable#getErrors has changed from - InetAddress to InetSocketAddress. Same for Initializer#getContactPoints return and - for AuthProvider#newAuthenticator. -- [api] JAVA-296: The default load balacing policy is now DCAwareRoundRobinPolicy, and the local - datacenter is automatically picked based on the first connected node. Furthermore, - the TokenAwarePolicy is also used by default. -- [new] JAVA-145: New optional AddressTranslater. -- [bug] JAVA-321: Don't remove quotes on keyspace in the query builder. -- [bug] JAVA-320: Fix potential NPE while cluster undergo schema changes. -- [bug] JAVA-319: Fix thread-safety of page fetching. -- [bug] JAVA-318: Fix potential NPE using fetchMoreResults. - -Merged from 1.0 branch: - -- [new] JAVA-179: Expose the name of the partitioner in use in the cluster metadata. -- [new] Add new WhiteListPolicy to limit the nodes connected to a particular list. -- [improvement] JAVA-289: Do not hop DC for LOCAL_* CL in DCAwareRoundRobinPolicy. -- [bug] JAVA-313: Revert back to longs for dates in the query builder. -- [bug] JAVA-314: Don't reconnect to nodes ignored by the load balancing policy. - - -### 2.0.1 - -- [improvement] JAVA-278: Handle the static columns introduced in Cassandra 2.0.6. -- [improvement] JAVA-208: Add Cluster#newSession method to create Session without connecting - right away. -- [bug] JAVA-279: Add missing iso8601 patterns for parsing dates. -- [bug] Properly parse BytesType as the blob type. -- [bug] JAVA-280: Potential NPE when parsing schema of pre-CQL tables of C* 1.2 nodes. - -Merged from 1.0 branch: - -- [bug] JAVA-275: LatencyAwarePolicy.Builder#withScale doesn't set the scale. -- [new] JAVA-114: Add methods to check if a Cluster/Session instance has been closed already. - - -### 2.0.0 - -- [api] JAVA-269: Case sensitive identifier by default in Metadata. -- [bug] JAVA-274: Fix potential NPE in Cluster#connect. - -Merged from 1.0 branch: - -- [bug] JAVA-263: Always return the PreparedStatement object that is cache internally. -- [bug] JAVA-261: Fix race when multiple connect are done in parallel. -- [bug] JAVA-270: Don't connect at all to nodes that are ignored by the load balancing - policy. - - -### 2.0.0-rc3 - -- [improvement] The protocol version 1 is now supported (features only supported by the - version 2 of the protocol throw UnsupportedFeatureException). -- [improvement] JAVA-195: Make most main objects interface to facilitate testing/mocking. -- [improvement] Adds new getStatements and clear methods to BatchStatement. -- [api] JAVA-247: Renamed shutdown to closeAsync and ShutdownFuture to CloseFuture. Clustering - and Session also now implement Closeable. -- [bug] JAVA-232: Fix potential thread leaks when shutting down Metrics. -- [bug] JAVA-231: Fix potential NPE in HostConnectionPool. -- [bug] JAVA-244: Avoid NPE when node is in an unconfigured DC. -- [bug] JAVA-258: Don't block for scheduled reconnections on Cluster#close. - -Merged from 1.0 branch: - -- [new] JAVA-224: Added Session#prepareAsync calls. -- [new] JAVA-249: Added Cluster#getLoggedKeyspace. -- [improvement] Avoid preparing a statement multiple time per host with multiple sessions. -- [bug] JAVA-255: Make sure connections are returned to the right pools. -- [bug] JAVA-264: Use date string in query build to work-around CASSANDRA-6718. - - -### 2.0.0-rc2 - -- [new] JAVA-207: Add LOCAL_ONE consistency level support (requires using C* 2.0.2+). -- [bug] JAVA-219: Fix parsing of counter types. -- [bug] JAVA-218: Fix missing whitespace for IN clause in the query builder. -- [bug] JAVA-221: Fix replicas computation for token aware balancing. - -Merged from 1.0 branch: - -- [bug] JAVA-213: Fix regression from JAVA-201. -- [improvement] New getter to obtain a snapshot of the scores maintained by - LatencyAwarePolicy. - - -### 2.0.0-rc1 - -- [new] JAVA-199: Mark compression dependencies optional in maven. -- [api] Renamed TableMetadata#getClusteringKey to TableMetadata#getClusteringColumns. - -Merged from 1.0 branch: - -- [new] JAVA-142: OSGi bundle. -- [improvement] JAVA-205: Make collections returned by Row immutable. -- [improvement] JAVA-203: Limit internal thread pool size. -- [bug] JAVA-201: Don't retain unused PreparedStatement in memory. -- [bug] Add missing clustering order info in TableMetadata -- [bug] JAVA-196: Allow bind markers for collections in the query builder. - - -### 2.0.0-beta2 - -- [api] BoundStatement#setX(String, X) methods now set all values (if there is - more than one) having the provided name, not just the first occurence. -- [api] The Authenticator interface now has a onAuthenticationSuccess method that - allows to handle the potential last token sent by the server. -- [new] The query builder don't serialize large values to strings anymore by - default by making use the new ability to send values alongside the query string. -- [new] JAVA-140: The query builder has been updated for new CQL features. -- [bug] Fix exception when a conditional write timeout C* side. -- [bug] JAVA-182: Ensure connection is created when Cluster metadata are asked for. -- [bug] JAVA-187: Fix potential NPE during authentication. - - -### 2.0.0-beta1 - -- [api] The 2.0 version is an API-breaking upgrade of the driver. While most - of the breaking changes are minor, there are too numerous to be listed here - and you are encouraged to look at the Upgrade_guide_to_2.0 file that describe - those changes in details. -- [new] LZ4 compression is supported for the protocol. -- [new] JAVA-39: The driver does not depend on cassandra-all anymore. -- [new] New BatchStatement class allows to execute batch other statements. -- [new] Large ResultSet are now paged (incrementally fetched) by default. -- [new] SimpleStatement support values for bind-variables, to allow - prepare+execute behavior with one roundtrip. -- [new] Query parameters defaults (Consistency level, page size, ...) can be - configured globally. -- [new] New Cassandra 2.0 SERIAL and LOCAL_SERIAL consistency levels are - supported. -- [new] JAVA-116: Cluster#shutdown now waits for ongoing queries to complete by default. -- [new] Generic authentication through SASL is now exposed. -- [bug] JAVA-88: TokenAwarePolicy now takes all replica into account, instead of only the - first one. - - -### 1.0.5 - -- [new] JAVA-142: OSGi bundle. -- [new] JAVA-207: Add support for ConsistencyLevel.LOCAL_ONE; note that this - require Cassandra 1.2.12+. -- [improvement] JAVA-205: Make collections returned by Row immutable. -- [improvement] JAVA-203: Limit internal thread pool size. -- [improvement] New getter to obtain a snapshot of the scores maintained by - LatencyAwarePolicy. -- [improvement] JAVA-222: Avoid synchronization when getting codec for collection - types. -- [bug] JAVA-201, JAVA-213: Don't retain unused PreparedStatement in memory. -- [bug] Add missing clustering order info in TableMetadata -- [bug] JAVA-196: Allow bind markers for collections in the query builder. - - -### 1.0.4 - -- [api] JAVA-163: The Cluster.Builder#poolingOptions and Cluster.Builder#socketOptions - are now deprecated. They are replaced by the new withPoolingOptions and - withSocketOptions methods. -- [new] JAVA-129: A new LatencyAwarePolicy wrapping policy has been added, allowing to - add latency awareness to a wrapped load balancing policy. -- [new] JAVA-161: Cluster.Builder#deferInitialization: Allow defering cluster initialization. -- [new] JAVA-117: Add truncate statement in query builder. -- [new] JAVA-106: Support empty IN in the query builder. -- [bug] JAVA-166: Fix spurious "No current pool set; this should not happen" error - message. -- [bug] JAVA-184: Fix potential overflow in RoundRobinPolicy and correctly errors if - a balancing policy throws. -- [bug] Don't release Stream ID for timeouted queries (unless we do get back - the response) -- [bug] Correctly escape identifiers and use fully qualified table names when - exporting schema as string. - - -### 1.0.3 - -- [api] The query builder now correctly throw an exception when given a value - of a type it doesn't know about. -- [new] SocketOptions#setReadTimeout allows to set a timeout on how long we - wait for the answer of one node. See the javadoc for more details. -- [new] New Session#prepare method that takes a Statement. -- [bug] JAVA-143: Always take per-query CL, tracing, etc. into account for QueryBuilder - statements. -- [bug] Temporary fixup for TimestampType when talking to C* 2.0 nodes. - - -### 1.0.2 - -- [api] Host#getMonitor and all Host.HealthMonitor methods have been - deprecated. The new Host#isUp method is now prefered to the method - in the monitor and you should now register Host.StateListener against - the Cluster object directly (registering against a host HealthMonitor - was much more limited anyway). -- [new] JAVA-92: New serialize/deserialize methods in DataType to serialize/deserialize - values to/from bytes. -- [new] JAVA-128: New getIndexOf() method in ColumnDefinitions to find the index of - a given column name. -- [bug] JAVA-131: Fix a bug when thread could get blocked while setting the current - keyspace. -- [bug] JAVA-136: Quote inet addresses in the query builder since CQL3 requires it. - - -### 1.0.1 - -- [api] JAVA-100: Function call handling in the query builder has been modified in a - backward incompatible way. Function calls are not parsed from string values - anymore as this wasn't safe. Instead the new 'fcall' method should be used. -- [api] Some typos in method names in PoolingOptions have been fixed in a - backward incompatible way before the API get widespread. -- [bug] JAVA-123: Don't destroy composite partition key with BoundStatement and - TokenAwarePolicy. -- [new] null values support in the query builder. -- [new] JAVA-5: SSL support (requires C* >= 1.2.1). -- [new] JAVA-113: Allow generating unlogged batch in the query builder. -- [improvement] Better error message when no host are available. -- [improvement] Improves performance of the stress example application been. - - -### 1.0.0 - -- [api] The AuthInfoProvider has be (temporarily) removed. Instead, the - Cluster builder has a new withCredentials() method to provide a username - and password for use with Cassandra's PasswordAuthenticator. Custom - authenticator will be re-introduced in a future version but are not - supported at the moment. -- [api] The isMetricsEnabled() method in Configuration has been replaced by - getMetricsOptions(). An option to disabled JMX reporting (on by default) - has been added. -- [bug] JAVA-91: Don't make default load balancing policy a static singleton since it - is stateful. - - -### 1.0.0-RC1 - -- [new] JAVA-79: Null values are now supported in BoundStatement (but you will need at - least Cassandra 1.2.3 for it to work). The API of BoundStatement has been - slightly changed so that not binding a variable is not an error anymore, - the variable is simply considered null by default. The isReady() method has - been removed. -- [improvement] JAVA-75: The Cluster/Session shutdown methods now properly block until - the shutdown is complete. A version with at timeout has been added. -- [bug] JAVA-44: Fix use of CQL3 functions in the query builder. -- [bug] JAVA-77: Fix case where multiple schema changes too quickly wouldn't work - (only triggered when 0.0.0.0 was used for the rpc_address on the Cassandra - nodes). -- [bug] JAVA-72: Fix IllegalStateException thrown due to a reconnection made on an I/O - thread. -- [bug] JAVA-82: Correctly reports errors during authentication phase. - - -### 1.0.0-beta2 - -- [new] JAVA-51, JAVA-60, JAVA-58: Support blob constants, BigInteger, BigDecimal and counter batches in - the query builder. -- [new] JAVA-61: Basic support for custom CQL3 types. -- [new] JAVA-65: Add "execution infos" for a result set (this also move the query - trace in the new ExecutionInfos object, so users of beta1 will have to - update). -- [bug] JAVA-62: Fix failover bug in DCAwareRoundRobinPolicy. -- [bug] JAVA-66: Fix use of bind markers for routing keys in the query builder. - - -### 1.0.0-beta1 - -- initial release diff --git a/ci/create-user.sh b/ci/create-user.sh deleted file mode 100644 index fb193df9a00..00000000000 --- a/ci/create-user.sh +++ /dev/null @@ -1,60 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -################################ -# -# Prep -# -################################ - -if [ "$1" == "-h" ]; then - echo "$0 [-h] " - echo " this script is used internally by other scripts in the same directory to create a user with the running host user's same uid and gid" - exit 1 -fi - -# arguments -username=$1 -uid=$2 -gid=$3 -BUILD_HOME=$4 - -################################ -# -# Main -# -################################ - -# disable git directory ownership checks -su ${username} -c "git config --global safe.directory '*'" - -if grep "^ID=" /etc/os-release | grep -q 'debian\|ubuntu' ; then - deluser docker - adduser --quiet --disabled-login --no-create-home --uid $uid --gecos ${username} ${username} - groupmod --non-unique -g $gid $username - gpasswd -a ${username} sudo >/dev/null -else - adduser --no-create-home --uid $uid ${username} -fi - -# sudo priviledges -echo "${username} ALL=(root) NOPASSWD:ALL" > /etc/sudoers.d/${username} -chmod 0440 /etc/sudoers.d/${username} - -# proper permissions -chown -R ${username}:${username} /home/docker -chmod og+wx ${BUILD_HOME} \ No newline at end of file diff --git a/ci/install-jdk.sh b/ci/install-jdk.sh deleted file mode 100644 index 674961c2daf..00000000000 --- a/ci/install-jdk.sh +++ /dev/null @@ -1,318 +0,0 @@ -#!/usr/bin/env bash - -# -# Install JDK for Linux and Mac OS -# -# This script determines the most recent early-access build number, -# downloads the JDK archive to the user home directory and extracts -# it there. -# -# Exported environment variables (when sourcing this script) -# -# JAVA_HOME is set to the extracted JDK directory -# PATH is prepended with ${JAVA_HOME}/bin -# -# (C) 2018 Christian Stein -# -# https://github.com/sormuras/bach/blob/master/install-jdk.sh -# - -set -o errexit -#set -o nounset # https://github.com/travis-ci/travis-ci/issues/5434 -#set -o xtrace - -function initialize() { - readonly script_name="$(basename "${BASH_SOURCE[0]}")" - readonly script_version='2018-10-17' - - dry=false - silent=false - verbose=false - emit_java_home=false - - feature='ea' - license='GPL' - os='?' - url='?' - workspace="${HOME}" - target='?' - cacerts=false -} - -function usage() { -cat << EOF -Usage: ${script_name} [OPTION]... -Download and extract the latest-and-greatest JDK from java.net or Oracle. - -Version: ${script_version} -Options: - -h|--help Displays this help - -d|--dry-run Activates dry-run mode - -s|--silent Displays no output - -e|--emit-java-home Print value of "JAVA_HOME" to stdout (ignores silent mode) - -v|--verbose Displays verbose output - - -f|--feature 9|10|...|ea JDK feature release number, defaults to "ea" - -l|--license GPL|BCL License defaults to "GPL", BCL also indicates OTN-LA for Oracle Java SE - -o|--os linux-x64|osx-x64 Operating system identifier (works best with GPL license) - -u|--url "https://..." Use custom JDK archive (provided as .tar.gz file) - -w|--workspace PATH Working directory defaults to \${HOME} [${HOME}] - -t|--target PATH Target directory, defaults to first component of the tarball - -c|--cacerts Link system CA certificates (currently only Debian/Ubuntu is supported) -EOF -} - -function script_exit() { - if [[ $# -eq 1 ]]; then - printf '%s\n' "$1" - exit 0 - fi - - if [[ $# -eq 2 && $2 =~ ^[0-9]+$ ]]; then - printf '%b\n' "$1" - exit "$2" - fi - - script_exit 'Invalid arguments passed to script_exit()!' 2 -} - -function say() { - if [[ ${silent} != true ]]; then - echo "$@" - fi -} - -function verbose() { - if [[ ${verbose} == true ]]; then - echo "$@" - fi -} - -function parse_options() { - local option - while [[ $# -gt 0 ]]; do - option="$1" - shift - case ${option} in - -h|-H|--help) - usage - exit 0 - ;; - -v|-V|--verbose) - verbose=true - ;; - -s|-S|--silent) - silent=true - verbose "Silent mode activated" - ;; - -d|-D|--dry-run) - dry=true - verbose "Dry-run mode activated" - ;; - -e|-E|--emit-java-home) - emit_java_home=true - verbose "Emitting JAVA_HOME" - ;; - -f|-F|--feature) - feature="$1" - verbose "feature=${feature}" - shift - ;; - -l|-L|--license) - license="$1" - verbose "license=${license}" - shift - ;; - -o|-O|--os) - os="$1" - verbose "os=${os}" - shift - ;; - -u|-U|--url) - url="$1" - verbose "url=${url}" - shift - ;; - -w|-W|--workspace) - workspace="$1" - verbose "workspace=${workspace}" - shift - ;; - -t|-T|--target) - target="$1" - verbose "target=${target}" - shift - ;; - -c|-C|--cacerts) - cacerts=true - verbose "Linking system CA certificates" - ;; - *) - script_exit "Invalid argument was provided: ${option}" 2 - ;; - esac - done -} - -function determine_latest_jdk() { - local number - local curl_result - local url - - verbose "Determine latest JDK feature release number" - number=9 - while [[ ${number} != 99 ]] - do - url=http://jdk.java.net/${number} - curl_result=$(curl -o /dev/null --silent --head --write-out %{http_code} ${url}) - if [[ ${curl_result} -ge 400 ]]; then - break - fi - verbose " Found ${url} [${curl_result}]" - latest_jdk=${number} - number=$[$number +1] - done - - verbose "Latest JDK feature release number is: ${latest_jdk}" -} - -function perform_sanity_checks() { - if [[ ${feature} == '?' ]] || [[ ${feature} == 'ea' ]]; then - feature=${latest_jdk} - fi - if [[ ${feature} -lt 9 ]] || [[ ${feature} -gt ${latest_jdk} ]]; then - script_exit "Expected feature release number in range of 9 to ${latest_jdk}, but got: ${feature}" 3 - fi - if [[ -d "$target" ]]; then - script_exit "Target directory must not exist, but it does: $(du -hs '${target}')" 3 - fi -} - -function determine_url() { - local DOWNLOAD='https://download.java.net/java' - local ORACLE='http://download.oracle.com/otn-pub/java/jdk' - - # Archived feature or official GA build? - case "${feature}-${license}" in - 9-GPL) url="${DOWNLOAD}/GA/jdk9/9.0.4/binaries/openjdk-9.0.4_${os}_bin.tar.gz"; return;; - 9-BCL) url="${ORACLE}/9.0.4+11/c2514751926b4512b076cc82f959763f/jdk-9.0.4_${os}_bin.tar.gz"; return;; - 10-GPL) url="${DOWNLOAD}/GA/jdk10/10.0.2/19aef61b38124481863b1413dce1855f/13/openjdk-10.0.2_${os}_bin.tar.gz"; return;; - 10-BCL) url="${ORACLE}/10.0.2+13/19aef61b38124481863b1413dce1855f/jdk-10.0.2_${os}_bin.tar.gz"; return;; - 11-GPL) url="${DOWNLOAD}/GA/jdk11/13/GPL/openjdk-11.0.1_${os}_bin.tar.gz"; return;; - 11-BCL) url="${ORACLE}/11.0.1+13/90cf5d8f270a4347a95050320eef3fb7/jdk-11.0.1_${os}_bin.tar.gz"; return;; - esac - - # EA or RC build? - local JAVA_NET="http://jdk.java.net/${feature}" - local candidates=$(wget --quiet --output-document - ${JAVA_NET} | grep -Eo 'href[[:space:]]*=[[:space:]]*"[^\"]+"' | grep -Eo '(http|https)://[^"]+') - url=$(echo "${candidates}" | grep -Eo "${DOWNLOAD}/.+/jdk${feature}/.+/${license}/.*jdk-${feature}.+${os}_bin.tar.gz$" || true) - - if [[ -z ${url} ]]; then - script_exit "Couldn't determine a download url for ${feature}-${license} on ${os}" 1 - fi -} - -function prepare_variables() { - if [[ ${os} == '?' ]]; then - if [[ "$OSTYPE" == "darwin"* ]]; then - os='osx-x64' - else - os='linux-x64' - fi - fi - if [[ ${url} == '?' ]]; then - determine_latest_jdk - perform_sanity_checks - determine_url - else - feature='' - license='' - os='' - fi - archive="${workspace}/$(basename ${url})" - status=$(curl -o /dev/null --silent --head --write-out %{http_code} ${url}) -} - -function print_variables() { -cat << EOF -Variables: - feature = ${feature} - license = ${license} - os = ${os} - url = ${url} - status = ${status} - archive = ${archive} -EOF -} - -function download_and_extract_and_set_target() { - local quiet='--quiet'; if [[ ${verbose} == true ]]; then quiet=''; fi - local local="--directory-prefix ${workspace}" - local remote='--timestamping --continue' - local wget_options="${quiet} ${local} ${remote}" - local tar_options="--file ${archive}" - - say "Downloading JDK from ${url}..." - verbose "Using wget options: ${wget_options}" - if [[ ${license} == 'GPL' ]]; then - wget ${wget_options} ${url} - else - wget ${wget_options} --header "Cookie: oraclelicense=accept-securebackup-cookie" ${url} - fi - - verbose "Using tar options: ${tar_options}" - if [[ ${target} == '?' ]]; then - tar --extract ${tar_options} -C "${workspace}" - if [[ "$OSTYPE" != "darwin"* ]]; then - target="${workspace}"/$(tar --list ${tar_options} | grep 'bin/javac' | tr '/' '\n' | tail -3 | head -1) - else - target="${workspace}"/$(tar --list ${tar_options} | head -2 | tail -1 | cut -f 2 -d '/' -)/Contents/Home - fi - else - if [[ "$OSTYPE" != "darwin"* ]]; then - mkdir --parents "${target}" - tar --extract ${tar_options} -C "${target}" --strip-components=1 - else - mkdir -p "${target}" - tar --extract ${tar_options} -C "${target}" --strip-components=4 # . / / Contents / Home - fi - fi - - if [[ ${verbose} == true ]]; then - echo "Set target to: ${target}" - echo "Content of target directory:" - ls "${target}" - echo "Content of release file:" - [[ ! -f "${target}/release" ]] || cat "${target}/release" - fi - - # Link to system certificates - # http://openjdk.java.net/jeps/319 - # https://bugs.openjdk.java.net/browse/JDK-8196141 - # TODO: Provide support for other distributions than Debian/Ubuntu - if [[ ${cacerts} == true ]]; then - mv "${target}/lib/security/cacerts" "${target}/lib/security/cacerts.jdk" - ln -s /etc/ssl/certs/java/cacerts "${target}/lib/security/cacerts" - fi -} - -function main() { - initialize - say "$script_name $script_version" - - parse_options "$@" - prepare_variables - - if [[ ${silent} == false ]]; then print_variables; fi - if [[ ${dry} == true ]]; then exit 0; fi - - download_and_extract_and_set_target - - export JAVA_HOME=$(cd "${target}"; pwd) - export PATH=${JAVA_HOME}/bin:$PATH - - if [[ ${silent} == false ]]; then java -version; fi - if [[ ${emit_java_home} == true ]]; then echo "${JAVA_HOME}"; fi -} - -main "$@" \ No newline at end of file diff --git a/ci/run-tests.sh b/ci/run-tests.sh deleted file mode 100755 index 5268bdd7113..00000000000 --- a/ci/run-tests.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash -x - -. ~/.jabba/jabba.sh -. ~/env.txt -cd $(dirname "$(readlink -f "$0")")/.. -printenv | sort -mvn -B -V install -DskipTests -Dmaven.javadoc.skip=true -jabba use ${TEST_JAVA_VERSION} -# Find out the latest patch version of Cassandra -PATCH_SERVER_VERSION=$(curl -s https://downloads.apache.org/cassandra/ | grep -oP '(?<=href=\")[0-9]+\.[0-9]+\.[0-9]+(?=)' | sort -rV | uniq -w 3 | grep $SERVER_VERSION) -printenv | sort -mvn -B -V verify -T 1 -Ptest-jdk-${TEST_JAVA_MAJOR_VERSION} -DtestJavaHome=$(jabba which ${TEST_JAVA_VERSION}) -Dccm.version=${PATCH_SERVER_VERSION} -Dccm.dse=false -Dmaven.test.failure.ignore=true -Dmaven.javadoc.skip=true diff --git a/core-shaded/pom.xml b/core-shaded/pom.xml deleted file mode 100644 index 84cb4b15398..00000000000 --- a/core-shaded/pom.xml +++ /dev/null @@ -1,366 +0,0 @@ - - - - 4.0.0 - - org.apache.cassandra - java-driver-parent - 4.19.3-SNAPSHOT - - java-driver-core-shaded - Apache Cassandra Java Driver - core with shaded deps - - - - ${project.groupId} - java-driver-bom - ${project.version} - pom - import - - - - - - - org.apache.cassandra - java-driver-core - - - - com.datastax.oss - native-protocol - - - org.apache.cassandra - java-driver-guava-shaded - - - com.typesafe - config - - - com.github.jnr - jnr-posix - - - org.xerial.snappy - snappy-java - true - - - at.yawk.lz4 - lz4-java - true - - - org.slf4j - slf4j-api - - - io.dropwizard.metrics - metrics-core - - - org.hdrhistogram - HdrHistogram - - - com.esri.geometry - esri-geometry-api - true - - - org.apache.tinkerpop - gremlin-core - true - - - org.apache.tinkerpop - tinkergraph-gremlin - true - - - org.reactivestreams - reactive-streams - - - com.github.stephenc.jcip - jcip-annotations - provided - - - com.github.spotbugs - spotbugs-annotations - provided - - - - - - - src/main/resources - - - ${project.basedir}/.. - - LICENSE - NOTICE_binary.txt - NOTICE.txt - - META-INF - - - - - maven-shade-plugin - - - shade-core-dependencies - package - - shade - - - true - true - - - - org.apache.cassandra:java-driver-core - io.netty:* - com.fasterxml.jackson.core:* - - - - - - io.netty - com.datastax.oss.driver.shaded.netty - - - com.fasterxml.jackson - com.datastax.oss.driver.shaded.fasterxml.jackson - - - - - - org.apache.cassandra:* - - - META-INF/MANIFEST.MF - META-INF/maven/** - - - - io.netty:* - - META-INF/** - - - - com.fasterxml.jackson.core:* - - META-INF/** - - - - - - - - - maven-dependency-plugin - - - unpack-shaded-classes - package - - unpack - - - - - org.apache.cassandra - java-driver-core-shaded - jar - ${project.build.outputDirectory} - - - - - - - unpack-shaded-sources - package - - unpack - - - - - org.apache.cassandra - java-driver-core-shaded - jar - sources - ${project.build.directory}/shaded-sources - - - - - - - - - com.google.code.maven-replacer-plugin - replacer - 1.5.3 - - - shade-graalvm-files - package - - replace - - - - - false - ${project.build.directory}/classes/META-INF/native-image/com.datastax.oss/java-driver-core/reflection.json,${project.build.directory}/shaded-sources/META-INF/native-image/com.datastax.oss/java-driver-core/reflection.json - - - io.netty - com.datastax.oss.driver.shaded.netty - - - - - - org.apache.felix - maven-bundle-plugin - true - - - generate-shaded-manifest - package - - manifest - - - - com.datastax.oss.driver.core - com.datastax.oss.driver.core - - * - - !com.datastax.oss.driver.shaded.netty.*, !com.datastax.oss.driver.shaded.fasterxml.jackson.*, - !net.jcip.annotations.*, !edu.umd.cs.findbugs.annotations.*, - !org.graalvm.*, !com.oracle.svm.*, - jnr.*;resolution:=optional, com.esri.core.geometry.*;resolution:=optional,org.reactivestreams.*;resolution:=optional, org.apache.tinkerpop.*;resolution:=optional, org.javatuples.*;resolution:=optional, reactor.blockhound.*;resolution:=optional, - !com.google.protobuf.*, !com.jcraft.jzlib.*, !com.ning.compress.*, !lzma.sdk.*, !net.jpountz.xxhash.*, !org.bouncycastle.*, !org.conscrypt.*, !org.apache.commons.logging.*, !org.apache.log4j.*, !org.apache.logging.log4j.*, !org.eclipse.jetty.*, !org.jboss.marshalling.*, !sun.misc.*, !sun.security.*, !com.barchart.udt.*, !com.fasterxml.aalto.*, !com.sun.nio.sctp.*, !gnu.io.*, !org.xml.sax.*, !org.w3c.dom.*, !com.aayushatharva.brotli4j.*, !com.github.luben.zstd.*, * - - - com.datastax.oss.driver.api.core.*, com.datastax.oss.driver.internal.core.*, com.datastax.dse.driver.api.core.*, com.datastax.dse.driver.internal.core.*, com.datastax.oss.driver.shaded.netty.*, com.datastax.oss.driver.shaded.fasterxml.jackson.*, - - true - - - - - - maven-assembly-plugin - - - generate-final-shaded-jar - package - - single - - - - - ${project.build.outputDirectory}/META-INF/MANIFEST.MF - - - src/assembly/shaded-jar.xml - - - false - - - - - - org.revapi - revapi-maven-plugin - - true - - - - - diff --git a/core-shaded/src/assembly/shaded-jar.xml b/core-shaded/src/assembly/shaded-jar.xml deleted file mode 100644 index 449eb77bd1a..00000000000 --- a/core-shaded/src/assembly/shaded-jar.xml +++ /dev/null @@ -1,45 +0,0 @@ - - - - shaded-jar - - jar - - false - - - - ${project.build.outputDirectory} - - - - - - - ${project.basedir}/dependency-reduced-pom.xml - META-INF/maven/com.datastax.oss/java-driver-core-shaded - pom.xml - - - diff --git a/core/console.scala b/core/console.scala deleted file mode 100644 index 491add7edea..00000000000 --- a/core/console.scala +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Allows quick manual tests from the Scala console: - * - * cd core/ - * mvn scala:console - * - * The script below is run at init, then you can do `val cluster = builder.build()` and play with - * it. - * - * Note: on MacOS, the Scala plugin seems to break the terminal if you exit the console with `:q`. - * Use Ctrl+C instead. - */ -import com.datastax.oss.driver.api.core._ -import com.datastax.oss.driver.internal.core.metadata.TopologyEvent -import com.datastax.oss.driver.internal.core.context.InternalDriverContext -import java.net.InetSocketAddress - -import CqlSession - -// Heartbeat logs every 30 seconds are annoying in the console, raise the interval -System.setProperty("datastax-java-driver.advanced.heartbeat.interval", "1 hour") - -val address1 = new InetSocketAddress("127.0.0.1", 9042) -val address2 = new InetSocketAddress("127.0.0.2", 9042) -val address3 = new InetSocketAddress("127.0.0.3", 9042) -val address4 = new InetSocketAddress("127.0.0.4", 9042) -val address5 = new InetSocketAddress("127.0.0.5", 9042) -val address6 = new InetSocketAddress("127.0.0.6", 9042) - -val builder = CqlSession.builder().addContactPoint(address1) - -println("********************************************") -println("* To start a driver instance, run: *") -println("* implicit val session = builder.build *") -println("********************************************") - -def fire(event: AnyRef)(implicit session: CqlSession): Unit = { - session.getContext.asInstanceOf[InternalDriverContext].getEventBus().fire(event) -} diff --git a/core/pom.xml b/core/pom.xml deleted file mode 100644 index 8758d20d78a..00000000000 --- a/core/pom.xml +++ /dev/null @@ -1,356 +0,0 @@ - - - - 4.0.0 - - org.apache.cassandra - java-driver-parent - 4.19.3-SNAPSHOT - - java-driver-core - bundle - Apache Cassandra Java Driver - core - - - - ${project.groupId} - java-driver-bom - ${project.version} - pom - import - - - - - - com.datastax.oss - native-protocol - - - io.netty - netty-handler - - - org.apache.cassandra - java-driver-guava-shaded - - - com.typesafe - config - - - - com.github.jnr - jnr-posix - - - org.xerial.snappy - snappy-java - true - - - at.yawk.lz4 - lz4-java - true - - - org.slf4j - slf4j-api - - - io.dropwizard.metrics - metrics-core - - - org.hdrhistogram - HdrHistogram - - - com.esri.geometry - esri-geometry-api - true - - - org.apache.tinkerpop - gremlin-core - true - - - org.apache.tinkerpop - tinkergraph-gremlin - true - - - com.fasterxml.jackson.core - jackson-core - - - com.fasterxml.jackson.core - jackson-databind - - - org.reactivestreams - reactive-streams - - - com.github.stephenc.jcip - jcip-annotations - provided - - - com.github.spotbugs - spotbugs-annotations - provided - - - org.graalvm.sdk - graal-sdk - provided - - - org.graalvm.nativeimage - svm - provided - - - io.projectreactor.tools - blockhound - provided - - - ch.qos.logback - logback-classic - test - - - junit - junit - test - - - com.tngtech.java - junit-dataprovider - test - - - org.assertj - assertj-core - test - - - org.mockito - mockito-core - test - - - io.reactivex.rxjava2 - rxjava - test - - - org.reactivestreams - reactive-streams-tck - test - - - org.awaitility - awaitility - test - - - org.testng - testng - test - - - com.github.tomakehurst - wiremock - test - - - - - - src/main/resources - - com/datastax/oss/driver/Driver.properties - - true - - - src/main/resources - - com/datastax/oss/driver/Driver.properties - - false - - - ${project.basedir}/.. - - LICENSE - NOTICE_binary.txt - NOTICE.txt - - META-INF - - - - - src/test/resources - - project.properties - - true - - - src/test/resources - - project.properties - - false - - - - - maven-jar-plugin - - - - com.datastax.oss.driver.core - - - - - - test-jar - - test-jar - - - - logback-test.xml - - - - - - - maven-surefire-plugin - - ${testing.jvm}/bin/java - ${mockitoopens.argline} - 1 - - - listener - com.datastax.oss.driver.DriverRunListener - - - - junit - false - - - suitename - Reactive Streams TCK - - - - - - org.apache.maven.surefire - surefire-junit47 - ${surefire.version} - - - org.apache.maven.surefire - surefire-testng - ${surefire.version} - - - - - org.apache.felix - maven-bundle-plugin - true - - - - bundle - - - - com.datastax.oss.driver.core - - * - - !net.jcip.annotations.*, !edu.umd.cs.findbugs.annotations.*, - !org.graalvm.*, !com.oracle.svm.*, - jnr.*;resolution:=optional, com.esri.core.geometry.*;resolution:=optional, org.reactivestreams.*;resolution:=optional, org.apache.tinkerpop.*;resolution:=optional, org.javatuples.*;resolution:=optional, reactor.blockhound.*;resolution:=optional, * - - com.datastax.oss.driver.*.core.*, com.datastax.dse.driver.*.core.* - - - - - - - maven-dependency-plugin - - - generate-dependency-list - - list - - generate-resources - - runtime - true - com.datastax.cassandra,com.datastax.dse - ${project.build.outputDirectory}/com/datastax/dse/driver/internal/deps.txt - - - - - - - diff --git a/core/revapi.json b/core/revapi.json deleted file mode 100644 index d56566bc2b9..00000000000 --- a/core/revapi.json +++ /dev/null @@ -1,1568 +0,0 @@ -{ - "revapi": { - "java": { - "filter": { - "packages": { - "regex": true, - "exclude": [ - "com\\.datastax\\.(oss|dse)\\.protocol\\.internal(\\..+)?", - "com\\.datastax\\.(oss|dse)\\.driver\\.internal(\\..+)?", - "com\\.datastax\\.oss\\.driver\\.shaded(\\..+)?", - "org\\.assertj(\\..+)?" - ] - } - } - }, - "ignore": [ - { - "code": "java.class.nonPublicPartOfAPI", - "old": "class com.datastax.oss.driver.internal.core.config.typesafe.TypesafeDriverExecutionProfile.Base", - "justification": "CASSJAVA-102: Fix spurious complaints about optional dependencies" - }, - { - "code": "java.class.nonPublicPartOfAPI", - "old": "class com.fasterxml.jackson.databind.type.TypeParser.MyTokenizer", - "justification": "CASSJAVA-102: Fix spurious complaints about optional dependencies" - }, - { - "code": "java.class.nonPublicPartOfAPI", - "old": "class org.apache.tinkerpop.shaded.jackson.databind.type.TypeParser.MyTokenizer", - "justification": "CASSJAVA-102: Fix spurious complaints about optional dependencies" - }, - { - "code": "java.class.externalClassExposedInAPI", - "justification": "CASSJAVA-102: Migrate revapi config into dedicated config files, ported from pom.xml" - }, - { - "code": "java.method.varargOverloadsOnlyDifferInVarargParameter", - "justification": "CASSJAVA-102: Migrate revapi config into dedicated config files, ported from pom.xml" - }, - { - "ignore": true, - "code": "java.field.serialVersionUIDUnchanged", - "old": "field com.fasterxml.jackson.annotation.JacksonInject.Value.serialVersionUID", - "new": "field com.fasterxml.jackson.annotation.JacksonInject.Value.serialVersionUID", - "serialVersionUID": "1", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.numberOfParametersChanged", - "old": "method void com.fasterxml.jackson.annotation.JacksonInject.Value::(java.lang.Object, java.lang.Boolean)", - "new": "method void com.fasterxml.jackson.annotation.JacksonInject.Value::(java.lang.Object, java.lang.Boolean, java.lang.Boolean)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.serialVersionUIDUnchanged", - "old": "field com.fasterxml.jackson.annotation.JsonAutoDetect.Value.serialVersionUID", - "new": "field com.fasterxml.jackson.annotation.JsonAutoDetect.Value.serialVersionUID", - "serialVersionUID": "1", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.annotation.JsonFormat.Feature.ACCEPT_CASE_INSENSITIVE_VALUES", - "new": "field com.fasterxml.jackson.annotation.JsonFormat.Feature.ACCEPT_CASE_INSENSITIVE_VALUES", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.annotation.JsonFormat.Feature.ADJUST_DATES_TO_CONTEXT_TIME_ZONE", - "new": "field com.fasterxml.jackson.annotation.JsonFormat.Feature.ADJUST_DATES_TO_CONTEXT_TIME_ZONE", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.annotation.JsonFormat.Feature.WRITE_DATES_WITH_ZONE_ID", - "new": "field com.fasterxml.jackson.annotation.JsonFormat.Feature.WRITE_DATES_WITH_ZONE_ID", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.annotation.JsonFormat.Feature.WRITE_DATE_TIMESTAMPS_AS_NANOSECONDS", - "new": "field com.fasterxml.jackson.annotation.JsonFormat.Feature.WRITE_DATE_TIMESTAMPS_AS_NANOSECONDS", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.annotation.JsonFormat.Feature.WRITE_SINGLE_ELEM_ARRAYS_UNWRAPPED", - "new": "field com.fasterxml.jackson.annotation.JsonFormat.Feature.WRITE_SINGLE_ELEM_ARRAYS_UNWRAPPED", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.annotation.JsonFormat.Feature.WRITE_SORTED_MAP_ENTRIES", - "new": "field com.fasterxml.jackson.annotation.JsonFormat.Feature.WRITE_SORTED_MAP_ENTRIES", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.annotation.JsonFormat.Shape.ANY", - "new": "field com.fasterxml.jackson.annotation.JsonFormat.Shape.ANY", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.annotation.JsonFormat.Shape.ARRAY", - "new": "field com.fasterxml.jackson.annotation.JsonFormat.Shape.ARRAY", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.annotation.JsonFormat.Shape.BINARY", - "new": "field com.fasterxml.jackson.annotation.JsonFormat.Shape.BINARY", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.annotation.JsonFormat.Shape.BOOLEAN", - "new": "field com.fasterxml.jackson.annotation.JsonFormat.Shape.BOOLEAN", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.annotation.JsonFormat.Shape.NATURAL", - "new": "field com.fasterxml.jackson.annotation.JsonFormat.Shape.NATURAL", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.annotation.JsonFormat.Shape.NUMBER", - "new": "field com.fasterxml.jackson.annotation.JsonFormat.Shape.NUMBER", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.annotation.JsonFormat.Shape.NUMBER_FLOAT", - "new": "field com.fasterxml.jackson.annotation.JsonFormat.Shape.NUMBER_FLOAT", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.annotation.JsonFormat.Shape.NUMBER_INT", - "new": "field com.fasterxml.jackson.annotation.JsonFormat.Shape.NUMBER_INT", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.annotation.JsonFormat.Shape.OBJECT", - "new": "field com.fasterxml.jackson.annotation.JsonFormat.Shape.OBJECT", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.annotation.JsonFormat.Shape.SCALAR", - "new": "field com.fasterxml.jackson.annotation.JsonFormat.Shape.SCALAR", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.annotation.JsonFormat.Shape.STRING", - "new": "field com.fasterxml.jackson.annotation.JsonFormat.Shape.STRING", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.removed", - "old": "method void com.fasterxml.jackson.annotation.JsonFormat.Value::(java.lang.String, com.fasterxml.jackson.annotation.JsonFormat.Shape, java.util.Locale, java.util.TimeZone, com.fasterxml.jackson.annotation.JsonFormat.Features)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.removed", - "old": "method void com.fasterxml.jackson.annotation.JsonFormat.Value::(java.lang.String, com.fasterxml.jackson.annotation.JsonFormat.Shape, java.util.Locale, java.lang.String, java.util.TimeZone, com.fasterxml.jackson.annotation.JsonFormat.Features)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.removed", - "old": "method void com.fasterxml.jackson.annotation.JsonFormat.Value::(java.lang.String, com.fasterxml.jackson.annotation.JsonFormat.Shape, java.lang.String, java.lang.String, com.fasterxml.jackson.annotation.JsonFormat.Features)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.annotation.PropertyAccessor.ALL", - "new": "field com.fasterxml.jackson.annotation.PropertyAccessor.ALL", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.annotation.PropertyAccessor.CREATOR", - "new": "field com.fasterxml.jackson.annotation.PropertyAccessor.CREATOR", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.annotation.PropertyAccessor.FIELD", - "new": "field com.fasterxml.jackson.annotation.PropertyAccessor.FIELD", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.annotation.PropertyAccessor.IS_GETTER", - "new": "field com.fasterxml.jackson.annotation.PropertyAccessor.IS_GETTER", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.annotation.PropertyAccessor.NONE", - "new": "field com.fasterxml.jackson.annotation.PropertyAccessor.NONE", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.serialVersionUIDUnchanged", - "old": "field com.fasterxml.jackson.core.JsonFactory.serialVersionUID", - "new": "field com.fasterxml.jackson.core.JsonFactory.serialVersionUID", - "serialVersionUID": "2", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.finalMethodAddedToNonFinalClass", - "new": "method int com.fasterxml.jackson.core.JsonFactory::getFactoryFeatures()", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.core.JsonParser.Feature.ALLOW_BACKSLASH_ESCAPING_ANY_CHARACTER", - "new": "field com.fasterxml.jackson.core.JsonParser.Feature.ALLOW_BACKSLASH_ESCAPING_ANY_CHARACTER", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.core.JsonParser.Feature.ALLOW_LEADING_DECIMAL_POINT_FOR_NUMBERS", - "new": "field com.fasterxml.jackson.core.JsonParser.Feature.ALLOW_LEADING_DECIMAL_POINT_FOR_NUMBERS", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.core.JsonParser.Feature.ALLOW_MISSING_VALUES", - "new": "field com.fasterxml.jackson.core.JsonParser.Feature.ALLOW_MISSING_VALUES", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.core.JsonParser.Feature.ALLOW_NON_NUMERIC_NUMBERS", - "new": "field com.fasterxml.jackson.core.JsonParser.Feature.ALLOW_NON_NUMERIC_NUMBERS", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.core.JsonParser.Feature.ALLOW_NUMERIC_LEADING_ZEROS", - "new": "field com.fasterxml.jackson.core.JsonParser.Feature.ALLOW_NUMERIC_LEADING_ZEROS", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.core.JsonParser.Feature.ALLOW_TRAILING_COMMA", - "new": "field com.fasterxml.jackson.core.JsonParser.Feature.ALLOW_TRAILING_COMMA", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.core.JsonParser.Feature.IGNORE_UNDEFINED", - "new": "field com.fasterxml.jackson.core.JsonParser.Feature.IGNORE_UNDEFINED", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.core.JsonParser.Feature.INCLUDE_SOURCE_IN_LOCATION", - "new": "field com.fasterxml.jackson.core.JsonParser.Feature.INCLUDE_SOURCE_IN_LOCATION", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.core.JsonParser.Feature.STRICT_DUPLICATE_DETECTION", - "new": "field com.fasterxml.jackson.core.JsonParser.Feature.STRICT_DUPLICATE_DETECTION", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.numberOfParametersChanged", - "old": "method void com.fasterxml.jackson.core.JsonPointer::(java.lang.String, java.lang.String, com.fasterxml.jackson.core.JsonPointer)", - "new": "method void com.fasterxml.jackson.core.JsonPointer::(com.fasterxml.jackson.core.JsonPointer, com.fasterxml.jackson.core.JsonPointer)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.numberOfParametersChanged", - "old": "method void com.fasterxml.jackson.core.JsonPointer::(java.lang.String, java.lang.String, int, com.fasterxml.jackson.core.JsonPointer)", - "new": "method void com.fasterxml.jackson.core.JsonPointer::(com.fasterxml.jackson.core.JsonPointer, java.lang.String, int)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.removed", - "old": "method com.fasterxml.jackson.core.JsonPointer com.fasterxml.jackson.core.JsonPointer::_constructHead(int, com.fasterxml.jackson.core.JsonPointer)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.removed", - "old": "method com.fasterxml.jackson.core.JsonPointer com.fasterxml.jackson.core.JsonPointer::_parseQuotedTail(java.lang.String, int)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.finalMethodAddedToNonFinalClass", - "new": "method int com.fasterxml.jackson.core.JsonStreamContext::getNestingDepth()", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.core.StreamReadFeature.IGNORE_UNDEFINED", - "new": "field com.fasterxml.jackson.core.StreamReadFeature.IGNORE_UNDEFINED", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.core.StreamReadFeature.INCLUDE_SOURCE_IN_LOCATION", - "new": "field com.fasterxml.jackson.core.StreamReadFeature.INCLUDE_SOURCE_IN_LOCATION", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.core.StreamReadFeature.STRICT_DUPLICATE_DETECTION", - "new": "field com.fasterxml.jackson.core.StreamReadFeature.STRICT_DUPLICATE_DETECTION", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.core.StreamWriteFeature.AUTO_CLOSE_CONTENT", - "new": "field com.fasterxml.jackson.core.StreamWriteFeature.AUTO_CLOSE_CONTENT", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.core.StreamWriteFeature.AUTO_CLOSE_TARGET", - "new": "field com.fasterxml.jackson.core.StreamWriteFeature.AUTO_CLOSE_TARGET", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.core.StreamWriteFeature.IGNORE_UNKNOWN", - "new": "field com.fasterxml.jackson.core.StreamWriteFeature.IGNORE_UNKNOWN", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.core.StreamWriteFeature.STRICT_DUPLICATE_DETECTION", - "new": "field com.fasterxml.jackson.core.StreamWriteFeature.STRICT_DUPLICATE_DETECTION", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.serialVersionUIDUnchanged", - "old": "field com.fasterxml.jackson.core.io.ContentReference.serialVersionUID", - "new": "field com.fasterxml.jackson.core.io.ContentReference.serialVersionUID", - "serialVersionUID": "1", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.removed", - "old": "method int com.fasterxml.jackson.core.io.ContentReference::maxContentSnippetLength()", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.core.json.JsonReadFeature.ALLOW_BACKSLASH_ESCAPING_ANY_CHARACTER", - "new": "field com.fasterxml.jackson.core.json.JsonReadFeature.ALLOW_BACKSLASH_ESCAPING_ANY_CHARACTER", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.core.json.JsonReadFeature.ALLOW_LEADING_ZEROS_FOR_NUMBERS", - "new": "field com.fasterxml.jackson.core.json.JsonReadFeature.ALLOW_LEADING_ZEROS_FOR_NUMBERS", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.core.json.JsonReadFeature.ALLOW_MISSING_VALUES", - "new": "field com.fasterxml.jackson.core.json.JsonReadFeature.ALLOW_MISSING_VALUES", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.core.json.JsonReadFeature.ALLOW_NON_NUMERIC_NUMBERS", - "new": "field com.fasterxml.jackson.core.json.JsonReadFeature.ALLOW_NON_NUMERIC_NUMBERS", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.core.json.JsonReadFeature.ALLOW_SINGLE_QUOTES", - "new": "field com.fasterxml.jackson.core.json.JsonReadFeature.ALLOW_SINGLE_QUOTES", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.core.json.JsonReadFeature.ALLOW_TRAILING_COMMA", - "new": "field com.fasterxml.jackson.core.json.JsonReadFeature.ALLOW_TRAILING_COMMA", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.core.json.JsonReadFeature.ALLOW_UNQUOTED_FIELD_NAMES", - "new": "field com.fasterxml.jackson.core.json.JsonReadFeature.ALLOW_UNQUOTED_FIELD_NAMES", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.core.json.JsonWriteFeature.ESCAPE_NON_ASCII", - "new": "field com.fasterxml.jackson.core.json.JsonWriteFeature.ESCAPE_NON_ASCII", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.core.json.JsonWriteFeature.QUOTE_FIELD_NAMES", - "new": "field com.fasterxml.jackson.core.json.JsonWriteFeature.QUOTE_FIELD_NAMES", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.core.json.JsonWriteFeature.WRITE_NAN_AS_STRINGS", - "new": "field com.fasterxml.jackson.core.json.JsonWriteFeature.WRITE_NAN_AS_STRINGS", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.core.json.JsonWriteFeature.WRITE_NUMBERS_AS_STRINGS", - "new": "field com.fasterxml.jackson.core.json.JsonWriteFeature.WRITE_NUMBERS_AS_STRINGS", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.removed", - "old": "field com.fasterxml.jackson.core.sym.ByteQuadsCanonicalizer._intern", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.exception.checkedAdded", - "old": "method void com.fasterxml.jackson.core.sym.ByteQuadsCanonicalizer::_reportTooManyCollisions()", - "new": "method void com.fasterxml.jackson.core.sym.ByteQuadsCanonicalizer::_reportTooManyCollisions() throws com.fasterxml.jackson.core.exc.StreamConstraintsException", - "exception": "com.fasterxml.jackson.core.exc.StreamConstraintsException", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.exception.checkedAdded", - "old": "method java.lang.String com.fasterxml.jackson.core.sym.ByteQuadsCanonicalizer::addName(java.lang.String, int)", - "new": "method java.lang.String com.fasterxml.jackson.core.sym.ByteQuadsCanonicalizer::addName(java.lang.String, int) throws com.fasterxml.jackson.core.exc.StreamConstraintsException", - "exception": "com.fasterxml.jackson.core.exc.StreamConstraintsException", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.exception.checkedAdded", - "old": "method java.lang.String com.fasterxml.jackson.core.sym.ByteQuadsCanonicalizer::addName(java.lang.String, int, int)", - "new": "method java.lang.String com.fasterxml.jackson.core.sym.ByteQuadsCanonicalizer::addName(java.lang.String, int, int) throws com.fasterxml.jackson.core.exc.StreamConstraintsException", - "exception": "com.fasterxml.jackson.core.exc.StreamConstraintsException", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.exception.checkedAdded", - "old": "method java.lang.String com.fasterxml.jackson.core.sym.ByteQuadsCanonicalizer::addName(java.lang.String, int, int, int)", - "new": "method java.lang.String com.fasterxml.jackson.core.sym.ByteQuadsCanonicalizer::addName(java.lang.String, int, int, int) throws com.fasterxml.jackson.core.exc.StreamConstraintsException", - "exception": "com.fasterxml.jackson.core.exc.StreamConstraintsException", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.exception.checkedAdded", - "old": "method java.lang.String com.fasterxml.jackson.core.sym.ByteQuadsCanonicalizer::addName(java.lang.String, int[], int)", - "new": "method java.lang.String com.fasterxml.jackson.core.sym.ByteQuadsCanonicalizer::addName(java.lang.String, int[], int) throws com.fasterxml.jackson.core.exc.StreamConstraintsException", - "exception": "com.fasterxml.jackson.core.exc.StreamConstraintsException", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.removed", - "old": "field com.fasterxml.jackson.core.sym.CharsToNameCanonicalizer._flags", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.exception.checkedAdded", - "old": "method void com.fasterxml.jackson.core.sym.CharsToNameCanonicalizer::_reportTooManyCollisions(int)", - "new": "method void com.fasterxml.jackson.core.sym.CharsToNameCanonicalizer::_reportTooManyCollisions(int) throws com.fasterxml.jackson.core.exc.StreamConstraintsException", - "exception": "com.fasterxml.jackson.core.exc.StreamConstraintsException", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.visibilityIncreased", - "old": "method com.fasterxml.jackson.core.sym.CharsToNameCanonicalizer com.fasterxml.jackson.core.sym.CharsToNameCanonicalizer::createRoot(int)", - "new": "method com.fasterxml.jackson.core.sym.CharsToNameCanonicalizer com.fasterxml.jackson.core.sym.CharsToNameCanonicalizer::createRoot(int)", - "oldVisibility": "protected", - "newVisibility": "public", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.exception.checkedAdded", - "old": "method java.lang.String com.fasterxml.jackson.core.sym.CharsToNameCanonicalizer::findSymbol(char[], int, int, int)", - "new": "method java.lang.String com.fasterxml.jackson.core.sym.CharsToNameCanonicalizer::findSymbol(char[], int, int, int) throws java.io.IOException", - "exception": "java.io.IOException", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.serialVersionUIDUnchanged", - "old": "field com.fasterxml.jackson.core.util.Separators.serialVersionUID", - "new": "field com.fasterxml.jackson.core.util.Separators.serialVersionUID", - "serialVersionUID": "1", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.exception.checkedAdded", - "old": "method void com.fasterxml.jackson.core.util.TextBuffer::append(char)", - "new": "method void com.fasterxml.jackson.core.util.TextBuffer::append(char) throws java.io.IOException", - "exception": "java.io.IOException", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.exception.checkedAdded", - "old": "method void com.fasterxml.jackson.core.util.TextBuffer::append(char[], int, int)", - "new": "method void com.fasterxml.jackson.core.util.TextBuffer::append(char[], int, int) throws java.io.IOException", - "exception": "java.io.IOException", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.exception.checkedAdded", - "old": "method void com.fasterxml.jackson.core.util.TextBuffer::append(java.lang.String, int, int)", - "new": "method void com.fasterxml.jackson.core.util.TextBuffer::append(java.lang.String, int, int) throws java.io.IOException", - "exception": "java.io.IOException", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.exception.checkedAdded", - "old": "method char[] com.fasterxml.jackson.core.util.TextBuffer::contentsAsArray()", - "new": "method char[] com.fasterxml.jackson.core.util.TextBuffer::contentsAsArray() throws java.io.IOException", - "exception": "java.io.IOException", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.exception.checkedAdded", - "old": "method java.lang.String com.fasterxml.jackson.core.util.TextBuffer::contentsAsString()", - "new": "method java.lang.String com.fasterxml.jackson.core.util.TextBuffer::contentsAsString() throws java.io.IOException", - "exception": "java.io.IOException", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.exception.checkedAdded", - "old": "method char[] com.fasterxml.jackson.core.util.TextBuffer::finishCurrentSegment()", - "new": "method char[] com.fasterxml.jackson.core.util.TextBuffer::finishCurrentSegment() throws java.io.IOException", - "exception": "java.io.IOException", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.exception.checkedAdded", - "old": "method char[] com.fasterxml.jackson.core.util.TextBuffer::getTextBuffer()", - "new": "method char[] com.fasterxml.jackson.core.util.TextBuffer::getTextBuffer() throws java.io.IOException", - "exception": "java.io.IOException", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.exception.checkedAdded", - "old": "method void com.fasterxml.jackson.core.util.TextBuffer::resetWithCopy(char[], int, int)", - "new": "method void com.fasterxml.jackson.core.util.TextBuffer::resetWithCopy(char[], int, int) throws java.io.IOException", - "exception": "java.io.IOException", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.exception.checkedAdded", - "old": "method void com.fasterxml.jackson.core.util.TextBuffer::resetWithCopy(java.lang.String, int, int)", - "new": "method void com.fasterxml.jackson.core.util.TextBuffer::resetWithCopy(java.lang.String, int, int) throws java.io.IOException", - "exception": "java.io.IOException", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.exception.checkedAdded", - "old": "method void com.fasterxml.jackson.core.util.TextBuffer::resetWithString(java.lang.String)", - "new": "method void com.fasterxml.jackson.core.util.TextBuffer::resetWithString(java.lang.String) throws java.io.IOException", - "exception": "java.io.IOException", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.exception.checkedAdded", - "old": "method java.lang.String com.fasterxml.jackson.core.util.TextBuffer::setCurrentAndReturn(int)", - "new": "method java.lang.String com.fasterxml.jackson.core.util.TextBuffer::setCurrentAndReturn(int) throws java.io.IOException", - "exception": "java.io.IOException", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.removed", - "old": "method java.lang.Class com.fasterxml.jackson.databind.AnnotationIntrospector::findDeserializationContentType(com.fasterxml.jackson.databind.introspect.Annotated, com.fasterxml.jackson.databind.JavaType)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.removed", - "old": "method java.lang.Class com.fasterxml.jackson.databind.AnnotationIntrospector::findDeserializationKeyType(com.fasterxml.jackson.databind.introspect.Annotated, com.fasterxml.jackson.databind.JavaType)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.removed", - "old": "method java.lang.Class com.fasterxml.jackson.databind.AnnotationIntrospector::findDeserializationType(com.fasterxml.jackson.databind.introspect.Annotated, com.fasterxml.jackson.databind.JavaType)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.removed", - "old": "method java.lang.Boolean com.fasterxml.jackson.databind.AnnotationIntrospector::findIgnoreUnknownProperties(com.fasterxml.jackson.databind.introspect.AnnotatedClass)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.removed", - "old": "method java.lang.String[] com.fasterxml.jackson.databind.AnnotationIntrospector::findPropertiesToIgnore(com.fasterxml.jackson.databind.introspect.Annotated, boolean)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.removed", - "old": "method java.lang.Class com.fasterxml.jackson.databind.AnnotationIntrospector::findSerializationContentType(com.fasterxml.jackson.databind.introspect.Annotated, com.fasterxml.jackson.databind.JavaType)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.removed", - "old": "method com.fasterxml.jackson.annotation.JsonInclude.Include com.fasterxml.jackson.databind.AnnotationIntrospector::findSerializationInclusion(com.fasterxml.jackson.databind.introspect.Annotated, com.fasterxml.jackson.annotation.JsonInclude.Include)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.removed", - "old": "method com.fasterxml.jackson.annotation.JsonInclude.Include com.fasterxml.jackson.databind.AnnotationIntrospector::findSerializationInclusionForContent(com.fasterxml.jackson.databind.introspect.Annotated, com.fasterxml.jackson.annotation.JsonInclude.Include)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.removed", - "old": "method java.lang.Class com.fasterxml.jackson.databind.AnnotationIntrospector::findSerializationKeyType(com.fasterxml.jackson.databind.introspect.Annotated, com.fasterxml.jackson.databind.JavaType)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.removed", - "old": "method java.lang.Class com.fasterxml.jackson.databind.AnnotationIntrospector::findSerializationType(com.fasterxml.jackson.databind.introspect.Annotated)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.class.defaultSerializationChanged", - "old": "class com.fasterxml.jackson.databind.AnnotationIntrospector", - "new": "class com.fasterxml.jackson.databind.AnnotationIntrospector", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.removed", - "old": "method com.fasterxml.jackson.databind.type.TypeBindings com.fasterxml.jackson.databind.BeanDescription::bindingsForBeanType()", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.removed", - "old": "method com.fasterxml.jackson.databind.introspect.AnnotatedMethod com.fasterxml.jackson.databind.BeanDescription::findAnySetter()", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.removed", - "old": "method com.fasterxml.jackson.databind.introspect.AnnotatedMember com.fasterxml.jackson.databind.BeanDescription::findAnySetterField()", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.removed", - "old": "method java.util.Map com.fasterxml.jackson.databind.BeanDescription::findBackReferenceProperties()", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.abstractMethodAdded", - "new": "method com.fasterxml.jackson.annotation.JsonFormat.Value com.fasterxml.jackson.databind.BeanDescription::findExpectedFormat()", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.removed", - "old": "method java.lang.reflect.Method com.fasterxml.jackson.databind.BeanDescription::findFactoryMethod(java.lang.Class[])", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.removed", - "old": "method com.fasterxml.jackson.databind.introspect.AnnotatedMethod com.fasterxml.jackson.databind.BeanDescription::findJsonValueMethod()", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.removed", - "old": "method java.lang.reflect.Constructor com.fasterxml.jackson.databind.BeanDescription::findSingleArgConstructor(java.lang.Class[])", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.abstractMethodAdded", - "new": "method com.fasterxml.jackson.databind.introspect.PotentialCreators com.fasterxml.jackson.databind.BeanDescription::getPotentialCreators()", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.removed", - "old": "method com.fasterxml.jackson.databind.JavaType com.fasterxml.jackson.databind.BeanDescription::resolveType(java.lang.reflect.Type)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.abstractMethodAdded", - "new": "method com.fasterxml.jackson.databind.cfg.DatatypeFeatures com.fasterxml.jackson.databind.DatabindContext::getDatatypeFeatures()", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.abstractMethodAdded", - "new": "method boolean com.fasterxml.jackson.databind.DatabindContext::isEnabled(com.fasterxml.jackson.databind.cfg.DatatypeFeature)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.abstractMethodAdded", - "new": "method T com.fasterxml.jackson.databind.DatabindContext::reportBadTypeDefinition(com.fasterxml.jackson.databind.BeanDescription, java.lang.String, java.lang.Object[]) throws com.fasterxml.jackson.databind.JsonMappingException", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.serialVersionUIDUnchanged", - "old": "field com.fasterxml.jackson.databind.DeserializationConfig.serialVersionUID", - "new": "field com.fasterxml.jackson.databind.DeserializationConfig.serialVersionUID", - "serialVersionUID": "2", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.numberOfParametersChanged", - "old": "method void com.fasterxml.jackson.databind.DeserializationConfig::(com.fasterxml.jackson.databind.DeserializationConfig, com.fasterxml.jackson.databind.introspect.SimpleMixInResolver, com.fasterxml.jackson.databind.util.RootNameLookup, com.fasterxml.jackson.databind.cfg.ConfigOverrides)", - "new": "method void com.fasterxml.jackson.databind.DeserializationConfig::(com.fasterxml.jackson.databind.DeserializationConfig, com.fasterxml.jackson.databind.cfg.DatatypeFeatures)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.numberOfParametersChanged", - "old": "method void com.fasterxml.jackson.databind.DeserializationConfig::(com.fasterxml.jackson.databind.cfg.BaseSettings, com.fasterxml.jackson.databind.jsontype.SubtypeResolver, com.fasterxml.jackson.databind.introspect.SimpleMixInResolver, com.fasterxml.jackson.databind.util.RootNameLookup, com.fasterxml.jackson.databind.cfg.ConfigOverrides)", - "new": "method void com.fasterxml.jackson.databind.DeserializationConfig::(com.fasterxml.jackson.databind.cfg.BaseSettings, com.fasterxml.jackson.databind.jsontype.SubtypeResolver, com.fasterxml.jackson.databind.introspect.SimpleMixInResolver, com.fasterxml.jackson.databind.util.RootNameLookup, com.fasterxml.jackson.databind.cfg.ConfigOverrides, com.fasterxml.jackson.databind.cfg.CoercionConfigs, com.fasterxml.jackson.databind.cfg.DatatypeFeatures)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.removed", - "old": "method void com.fasterxml.jackson.databind.DeserializationConfig::(com.fasterxml.jackson.databind.cfg.BaseSettings, com.fasterxml.jackson.databind.jsontype.SubtypeResolver, com.fasterxml.jackson.databind.introspect.SimpleMixInResolver, com.fasterxml.jackson.databind.util.RootNameLookup, com.fasterxml.jackson.databind.cfg.ConfigOverrides, com.fasterxml.jackson.databind.cfg.CoercionConfigs)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.removed", - "old": "method com.fasterxml.jackson.databind.BeanDescription com.fasterxml.jackson.databind.DeserializationConfig::introspectForBuilder(com.fasterxml.jackson.databind.JavaType)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.numberOfParametersChanged", - "old": "method void com.fasterxml.jackson.databind.DeserializationContext::(com.fasterxml.jackson.databind.deser.DeserializerFactory)", - "new": "method void com.fasterxml.jackson.databind.DeserializationContext::(com.fasterxml.jackson.databind.DeserializationContext, com.fasterxml.jackson.databind.deser.DeserializerCache)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.removed", - "old": "method com.fasterxml.jackson.databind.JsonMappingException com.fasterxml.jackson.databind.DeserializationContext::endOfInputException(java.lang.Class)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.finalMethodAddedToNonFinalClass", - "new": "method java.lang.Object com.fasterxml.jackson.databind.DeserializationContext::findInjectableValue(java.lang.Object, com.fasterxml.jackson.databind.BeanProperty, java.lang.Object, java.lang.Boolean, java.lang.Boolean) throws com.fasterxml.jackson.databind.JsonMappingException", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.finalMethodAddedToNonFinalClass", - "new": "method com.fasterxml.jackson.databind.cfg.DatatypeFeatures com.fasterxml.jackson.databind.DeserializationContext::getDatatypeFeatures()", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.removed", - "old": "method java.text.DateFormat com.fasterxml.jackson.databind.DeserializationContext::getDateFormat()", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.finalMethodAddedToNonFinalClass", - "new": "method boolean com.fasterxml.jackson.databind.DeserializationContext::isEnabled(com.fasterxml.jackson.databind.cfg.DatatypeFeature)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.removed", - "old": "method com.fasterxml.jackson.databind.JsonMappingException com.fasterxml.jackson.databind.DeserializationContext::mappingException(java.lang.Class)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.removed", - "old": "method com.fasterxml.jackson.databind.JsonMappingException com.fasterxml.jackson.databind.DeserializationContext::mappingException(java.lang.Class, com.fasterxml.jackson.core.JsonToken)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.removed", - "old": "method com.fasterxml.jackson.databind.JsonMappingException com.fasterxml.jackson.databind.DeserializationContext::mappingException(java.lang.String)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.removed", - "old": "method com.fasterxml.jackson.databind.JsonMappingException com.fasterxml.jackson.databind.DeserializationContext::mappingException(java.lang.String, java.lang.Object[])", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.removed", - "old": "method T com.fasterxml.jackson.databind.DeserializationContext::reportBadMerge(com.fasterxml.jackson.databind.JsonDeserializer) throws com.fasterxml.jackson.databind.JsonMappingException", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.removed", - "old": "method void com.fasterxml.jackson.databind.DeserializationContext::reportMappingException(java.lang.String, java.lang.Object[]) throws com.fasterxml.jackson.databind.JsonMappingException", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.removed", - "old": "method void com.fasterxml.jackson.databind.DeserializationContext::reportMissingContent(java.lang.String, java.lang.Object[]) throws com.fasterxml.jackson.databind.JsonMappingException", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.removed", - "old": "method void com.fasterxml.jackson.databind.DeserializationContext::reportUnknownProperty(java.lang.Object, java.lang.String, com.fasterxml.jackson.databind.JsonDeserializer) throws com.fasterxml.jackson.databind.JsonMappingException", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.removed", - "old": "method void com.fasterxml.jackson.databind.DeserializationContext::reportWrongTokenException(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.core.JsonToken, java.lang.String, java.lang.Object[]) throws com.fasterxml.jackson.databind.JsonMappingException", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.removed", - "old": "method com.fasterxml.jackson.databind.JsonMappingException com.fasterxml.jackson.databind.DeserializationContext::unknownTypeException(com.fasterxml.jackson.databind.JavaType, java.lang.String, java.lang.String)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.removed", - "old": "method com.fasterxml.jackson.databind.JsonMappingException com.fasterxml.jackson.databind.DeserializationContext::wrongTokenException(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.core.JsonToken, java.lang.String)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.DeserializationFeature.ACCEPT_EMPTY_ARRAY_AS_NULL_OBJECT", - "new": "field com.fasterxml.jackson.databind.DeserializationFeature.ACCEPT_EMPTY_ARRAY_AS_NULL_OBJECT", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.DeserializationFeature.ACCEPT_EMPTY_STRING_AS_NULL_OBJECT", - "new": "field com.fasterxml.jackson.databind.DeserializationFeature.ACCEPT_EMPTY_STRING_AS_NULL_OBJECT", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.DeserializationFeature.ACCEPT_FLOAT_AS_INT", - "new": "field com.fasterxml.jackson.databind.DeserializationFeature.ACCEPT_FLOAT_AS_INT", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.DeserializationFeature.ACCEPT_SINGLE_VALUE_AS_ARRAY", - "new": "field com.fasterxml.jackson.databind.DeserializationFeature.ACCEPT_SINGLE_VALUE_AS_ARRAY", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.DeserializationFeature.ADJUST_DATES_TO_CONTEXT_TIME_ZONE", - "new": "field com.fasterxml.jackson.databind.DeserializationFeature.ADJUST_DATES_TO_CONTEXT_TIME_ZONE", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.DeserializationFeature.EAGER_DESERIALIZER_FETCH", - "new": "field com.fasterxml.jackson.databind.DeserializationFeature.EAGER_DESERIALIZER_FETCH", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.DeserializationFeature.READ_DATE_TIMESTAMPS_AS_NANOSECONDS", - "new": "field com.fasterxml.jackson.databind.DeserializationFeature.READ_DATE_TIMESTAMPS_AS_NANOSECONDS", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.DeserializationFeature.READ_ENUMS_USING_TO_STRING", - "new": "field com.fasterxml.jackson.databind.DeserializationFeature.READ_ENUMS_USING_TO_STRING", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.DeserializationFeature.READ_UNKNOWN_ENUM_VALUES_AS_NULL", - "new": "field com.fasterxml.jackson.databind.DeserializationFeature.READ_UNKNOWN_ENUM_VALUES_AS_NULL", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.DeserializationFeature.READ_UNKNOWN_ENUM_VALUES_USING_DEFAULT_VALUE", - "new": "field com.fasterxml.jackson.databind.DeserializationFeature.READ_UNKNOWN_ENUM_VALUES_USING_DEFAULT_VALUE", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.DeserializationFeature.UNWRAP_ROOT_VALUE", - "new": "field com.fasterxml.jackson.databind.DeserializationFeature.UNWRAP_ROOT_VALUE", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.DeserializationFeature.UNWRAP_SINGLE_VALUE_ARRAYS", - "new": "field com.fasterxml.jackson.databind.DeserializationFeature.UNWRAP_SINGLE_VALUE_ARRAYS", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.DeserializationFeature.WRAP_EXCEPTIONS", - "new": "field com.fasterxml.jackson.databind.DeserializationFeature.WRAP_EXCEPTIONS", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.finalMethodAddedToNonFinalClass", - "new": "method com.fasterxml.jackson.databind.node.ArrayNode com.fasterxml.jackson.databind.JsonNode::withArray(com.fasterxml.jackson.core.JsonPointer)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.finalMethodAddedToNonFinalClass", - "new": "method com.fasterxml.jackson.databind.node.ObjectNode com.fasterxml.jackson.databind.JsonNode::withObject(com.fasterxml.jackson.core.JsonPointer)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.finalMethodAddedToNonFinalClass", - "new": "method com.fasterxml.jackson.databind.node.ObjectNode com.fasterxml.jackson.databind.JsonNode::withObject(java.lang.String, com.fasterxml.jackson.databind.JsonNode.OverwriteMode, boolean)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.MapperFeature.ACCEPT_CASE_INSENSITIVE_ENUMS", - "new": "field com.fasterxml.jackson.databind.MapperFeature.ACCEPT_CASE_INSENSITIVE_ENUMS", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.MapperFeature.ACCEPT_CASE_INSENSITIVE_PROPERTIES", - "new": "field com.fasterxml.jackson.databind.MapperFeature.ACCEPT_CASE_INSENSITIVE_PROPERTIES", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.MapperFeature.ACCEPT_CASE_INSENSITIVE_VALUES", - "new": "field com.fasterxml.jackson.databind.MapperFeature.ACCEPT_CASE_INSENSITIVE_VALUES", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.MapperFeature.ALLOW_COERCION_OF_SCALARS", - "new": "field com.fasterxml.jackson.databind.MapperFeature.ALLOW_COERCION_OF_SCALARS", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.MapperFeature.ALLOW_EXPLICIT_PROPERTY_RENAMING", - "new": "field com.fasterxml.jackson.databind.MapperFeature.ALLOW_EXPLICIT_PROPERTY_RENAMING", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.MapperFeature.ALLOW_VOID_VALUED_PROPERTIES", - "new": "field com.fasterxml.jackson.databind.MapperFeature.ALLOW_VOID_VALUED_PROPERTIES", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.MapperFeature.APPLY_DEFAULT_VALUES", - "new": "field com.fasterxml.jackson.databind.MapperFeature.APPLY_DEFAULT_VALUES", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.MapperFeature.BLOCK_UNSAFE_POLYMORPHIC_BASE_TYPES", - "new": "field com.fasterxml.jackson.databind.MapperFeature.BLOCK_UNSAFE_POLYMORPHIC_BASE_TYPES", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.MapperFeature.CAN_OVERRIDE_ACCESS_MODIFIERS", - "new": "field com.fasterxml.jackson.databind.MapperFeature.CAN_OVERRIDE_ACCESS_MODIFIERS", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.MapperFeature.DEFAULT_VIEW_INCLUSION", - "new": "field com.fasterxml.jackson.databind.MapperFeature.DEFAULT_VIEW_INCLUSION", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.MapperFeature.IGNORE_DUPLICATE_MODULE_REGISTRATIONS", - "new": "field com.fasterxml.jackson.databind.MapperFeature.IGNORE_DUPLICATE_MODULE_REGISTRATIONS", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.MapperFeature.IGNORE_MERGE_FOR_UNMERGEABLE", - "new": "field com.fasterxml.jackson.databind.MapperFeature.IGNORE_MERGE_FOR_UNMERGEABLE", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.MapperFeature.INFER_BUILDER_TYPE_BINDINGS", - "new": "field com.fasterxml.jackson.databind.MapperFeature.INFER_BUILDER_TYPE_BINDINGS", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.MapperFeature.OVERRIDE_PUBLIC_ACCESS_MODIFIERS", - "new": "field com.fasterxml.jackson.databind.MapperFeature.OVERRIDE_PUBLIC_ACCESS_MODIFIERS", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.MapperFeature.SORT_CREATOR_PROPERTIES_FIRST", - "new": "field com.fasterxml.jackson.databind.MapperFeature.SORT_CREATOR_PROPERTIES_FIRST", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.MapperFeature.SORT_PROPERTIES_ALPHABETICALLY", - "new": "field com.fasterxml.jackson.databind.MapperFeature.SORT_PROPERTIES_ALPHABETICALLY", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.MapperFeature.USE_BASE_TYPE_AS_DEFAULT_IMPL", - "new": "field com.fasterxml.jackson.databind.MapperFeature.USE_BASE_TYPE_AS_DEFAULT_IMPL", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.MapperFeature.USE_STATIC_TYPING", - "new": "field com.fasterxml.jackson.databind.MapperFeature.USE_STATIC_TYPING", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.MapperFeature.USE_STD_BEAN_NAMING", - "new": "field com.fasterxml.jackson.databind.MapperFeature.USE_STD_BEAN_NAMING", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.MapperFeature.USE_WRAPPER_NAME_AS_PROPERTY_NAME", - "new": "field com.fasterxml.jackson.databind.MapperFeature.USE_WRAPPER_NAME_AS_PROPERTY_NAME", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.ObjectMapper.DefaultTyping.EVERYTHING", - "new": "field com.fasterxml.jackson.databind.ObjectMapper.DefaultTyping.EVERYTHING", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.removed", - "old": "field com.fasterxml.jackson.databind.PropertyNamingStrategy.CAMEL_CASE_TO_LOWER_CASE_WITH_UNDERSCORES", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.removed", - "old": "field com.fasterxml.jackson.databind.PropertyNamingStrategy.KEBAB_CASE", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.removed", - "old": "field com.fasterxml.jackson.databind.PropertyNamingStrategy.LOWER_CAMEL_CASE", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.removed", - "old": "field com.fasterxml.jackson.databind.PropertyNamingStrategy.LOWER_CASE", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.removed", - "old": "field com.fasterxml.jackson.databind.PropertyNamingStrategy.LOWER_DOT_CASE", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.removed", - "old": "field com.fasterxml.jackson.databind.PropertyNamingStrategy.PASCAL_CASE_TO_CAMEL_CASE", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.removed", - "old": "field com.fasterxml.jackson.databind.PropertyNamingStrategy.SNAKE_CASE", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.removed", - "old": "field com.fasterxml.jackson.databind.PropertyNamingStrategy.UPPER_CAMEL_CASE", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.serialVersionUIDUnchanged", - "old": "field com.fasterxml.jackson.databind.SerializationConfig.serialVersionUID", - "new": "field com.fasterxml.jackson.databind.SerializationConfig.serialVersionUID", - "serialVersionUID": "1", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.numberOfParametersChanged", - "old": "method void com.fasterxml.jackson.databind.SerializationConfig::(com.fasterxml.jackson.databind.SerializationConfig, com.fasterxml.jackson.databind.introspect.SimpleMixInResolver, com.fasterxml.jackson.databind.util.RootNameLookup, com.fasterxml.jackson.databind.cfg.ConfigOverrides)", - "new": "method void com.fasterxml.jackson.databind.SerializationConfig::(com.fasterxml.jackson.databind.SerializationConfig, com.fasterxml.jackson.databind.cfg.ConstructorDetector)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.SerializationFeature.EAGER_SERIALIZER_FETCH", - "new": "field com.fasterxml.jackson.databind.SerializationFeature.EAGER_SERIALIZER_FETCH", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.enumConstantOrderChanged", - "old": "field com.fasterxml.jackson.databind.SerializationFeature.USE_EQUALITY_FOR_OBJECT_ID", - "new": "field com.fasterxml.jackson.databind.SerializationFeature.USE_EQUALITY_FOR_OBJECT_ID", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.finalMethodAddedToNonFinalClass", - "new": "method com.fasterxml.jackson.databind.cfg.DatatypeFeatures com.fasterxml.jackson.databind.SerializerProvider::getDatatypeFeatures()", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.finalMethodAddedToNonFinalClass", - "new": "method boolean com.fasterxml.jackson.databind.SerializerProvider::isEnabled(com.fasterxml.jackson.databind.cfg.DatatypeFeature)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.serialVersionUIDUnchanged", - "old": "field com.fasterxml.jackson.databind.cfg.BaseSettings.serialVersionUID", - "new": "field com.fasterxml.jackson.databind.cfg.BaseSettings.serialVersionUID", - "serialVersionUID": "1", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.numberOfParametersChanged", - "old": "method void com.fasterxml.jackson.databind.cfg.BaseSettings::(com.fasterxml.jackson.databind.introspect.ClassIntrospector, com.fasterxml.jackson.databind.AnnotationIntrospector, com.fasterxml.jackson.databind.PropertyNamingStrategy, com.fasterxml.jackson.databind.type.TypeFactory, com.fasterxml.jackson.databind.jsontype.TypeResolverBuilder, java.text.DateFormat, com.fasterxml.jackson.databind.cfg.HandlerInstantiator, java.util.Locale, java.util.TimeZone, com.fasterxml.jackson.core.Base64Variant, com.fasterxml.jackson.databind.jsontype.PolymorphicTypeValidator)", - "new": "method void com.fasterxml.jackson.databind.cfg.BaseSettings::(com.fasterxml.jackson.databind.introspect.ClassIntrospector, com.fasterxml.jackson.databind.AnnotationIntrospector, com.fasterxml.jackson.databind.PropertyNamingStrategy, com.fasterxml.jackson.databind.type.TypeFactory, com.fasterxml.jackson.databind.jsontype.TypeResolverBuilder, java.text.DateFormat, com.fasterxml.jackson.databind.cfg.HandlerInstantiator, java.util.Locale, java.util.TimeZone, com.fasterxml.jackson.core.Base64Variant, com.fasterxml.jackson.databind.jsontype.PolymorphicTypeValidator, com.fasterxml.jackson.databind.introspect.AccessorNamingStrategy.Provider, com.fasterxml.jackson.databind.cfg.CacheProvider)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.abstractMethodAdded", - "new": "method com.fasterxml.jackson.databind.cfg.ConstructorDetector com.fasterxml.jackson.databind.cfg.MapperConfig>::getConstructorDetector()", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.abstractMethodAdded", - "new": "method com.fasterxml.jackson.databind.cfg.DatatypeFeatures com.fasterxml.jackson.databind.cfg.MapperConfig>::getDatatypeFeatures()", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.finalMethodAddedToNonFinalClass", - "new": "method com.fasterxml.jackson.databind.EnumNamingStrategy com.fasterxml.jackson.databind.cfg.MapperConfig>::getEnumNamingStrategy()", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.abstractMethodAdded", - "new": "method boolean com.fasterxml.jackson.databind.cfg.MapperConfig>::isEnabled(com.fasterxml.jackson.databind.cfg.DatatypeFeature)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.abstractMethodAdded", - "new": "method com.fasterxml.jackson.databind.deser.DefaultDeserializationContext com.fasterxml.jackson.databind.deser.DefaultDeserializationContext::withCaches(com.fasterxml.jackson.databind.cfg.CacheProvider)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.typeChanged", - "old": "field com.fasterxml.jackson.databind.deser.DeserializerCache._cachedDeserializers", - "new": "field com.fasterxml.jackson.databind.deser.DeserializerCache._cachedDeserializers", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.serialVersionUIDUnchanged", - "old": "field com.fasterxml.jackson.databind.deser.DeserializerCache.serialVersionUID", - "new": "field com.fasterxml.jackson.databind.deser.DeserializerCache.serialVersionUID", - "serialVersionUID": "1", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.removed", - "old": "method void com.fasterxml.jackson.databind.deser.SettableAnyProperty::(com.fasterxml.jackson.databind.BeanProperty, com.fasterxml.jackson.databind.introspect.AnnotatedMember, com.fasterxml.jackson.databind.JavaType, com.fasterxml.jackson.databind.JsonDeserializer, com.fasterxml.jackson.databind.jsontype.TypeDeserializer)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.abstractMethodAdded", - "new": "method void com.fasterxml.jackson.databind.deser.SettableAnyProperty::_set(java.lang.Object, java.lang.Object, java.lang.Object) throws java.lang.Exception", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.nowAbstract", - "old": "method com.fasterxml.jackson.databind.deser.SettableAnyProperty com.fasterxml.jackson.databind.deser.SettableAnyProperty::withValueDeserializer(com.fasterxml.jackson.databind.JsonDeserializer)", - "new": "method com.fasterxml.jackson.databind.deser.SettableAnyProperty com.fasterxml.jackson.databind.deser.SettableAnyProperty::withValueDeserializer(com.fasterxml.jackson.databind.JsonDeserializer)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.class.nowAbstract", - "old": "class com.fasterxml.jackson.databind.deser.SettableAnyProperty", - "new": "class com.fasterxml.jackson.databind.deser.SettableAnyProperty", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.class.defaultSerializationChanged", - "old": "class com.fasterxml.jackson.databind.deser.SettableBeanProperty", - "new": "class com.fasterxml.jackson.databind.deser.SettableBeanProperty", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.removed", - "old": "method void com.fasterxml.jackson.databind.deser.UnresolvedForwardReference::(java.lang.String)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.removed", - "old": "method void com.fasterxml.jackson.databind.deser.UnresolvedForwardReference::(java.lang.String, com.fasterxml.jackson.core.JsonLocation, com.fasterxml.jackson.databind.deser.impl.ReadableObjectId)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.nowFinal", - "old": "method void com.fasterxml.jackson.databind.deser.impl.PropertyValue::assign(java.lang.Object) throws java.io.IOException", - "new": "method void com.fasterxml.jackson.databind.deser.impl.PropertyValue::assign(java.lang.Object) throws java.io.IOException", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.exception.checkedAdded", - "old": "method java.lang.Object[] com.fasterxml.jackson.databind.deser.impl.PropertyValueBuffer::getParameters(com.fasterxml.jackson.databind.deser.SettableBeanProperty[]) throws com.fasterxml.jackson.databind.JsonMappingException", - "new": "method java.lang.Object[] com.fasterxml.jackson.databind.deser.impl.PropertyValueBuffer::getParameters(com.fasterxml.jackson.databind.deser.SettableBeanProperty[]) throws com.fasterxml.jackson.databind.JsonMappingException, java.io.IOException", - "exception": "java.io.IOException", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.numberOfParametersChanged", - "old": "method void com.fasterxml.jackson.databind.deser.impl.ValueInjector::(com.fasterxml.jackson.databind.PropertyName, com.fasterxml.jackson.databind.JavaType, com.fasterxml.jackson.databind.util.Annotations, com.fasterxml.jackson.databind.introspect.AnnotatedMember, java.lang.Object)", - "new": "method void com.fasterxml.jackson.databind.deser.impl.ValueInjector::(com.fasterxml.jackson.databind.PropertyName, com.fasterxml.jackson.databind.JavaType, com.fasterxml.jackson.databind.introspect.AnnotatedMember, java.lang.Object, java.lang.Boolean, java.lang.Boolean)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.removed", - "old": "method com.fasterxml.jackson.databind.introspect.AnnotatedClass com.fasterxml.jackson.databind.introspect.AnnotatedClass::construct(com.fasterxml.jackson.databind.JavaType, com.fasterxml.jackson.databind.cfg.MapperConfig)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.removed", - "old": "method com.fasterxml.jackson.databind.introspect.AnnotatedClass com.fasterxml.jackson.databind.introspect.AnnotatedClass::construct(com.fasterxml.jackson.databind.JavaType, com.fasterxml.jackson.databind.cfg.MapperConfig, com.fasterxml.jackson.databind.introspect.ClassIntrospector.MixInResolver)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.removed", - "old": "method com.fasterxml.jackson.databind.introspect.AnnotatedClass com.fasterxml.jackson.databind.introspect.AnnotatedClass::constructWithoutSuperTypes(java.lang.Class, com.fasterxml.jackson.databind.cfg.MapperConfig)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.removed", - "old": "method com.fasterxml.jackson.databind.introspect.AnnotatedClass com.fasterxml.jackson.databind.introspect.AnnotatedClass::constructWithoutSuperTypes(java.lang.Class, com.fasterxml.jackson.databind.cfg.MapperConfig, com.fasterxml.jackson.databind.introspect.ClassIntrospector.MixInResolver)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.removed", - "old": "method java.util.List com.fasterxml.jackson.databind.introspect.AnnotatedClass::getStaticMethods()", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.removed", - "old": "method com.fasterxml.jackson.databind.BeanDescription com.fasterxml.jackson.databind.introspect.ClassIntrospector::forDeserializationWithBuilder(com.fasterxml.jackson.databind.DeserializationConfig, com.fasterxml.jackson.databind.JavaType, com.fasterxml.jackson.databind.introspect.ClassIntrospector.MixInResolver)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.returnTypeChangedCovariantly", - "old": "method T com.fasterxml.jackson.databind.JsonNode::with(java.lang.String) @ com.fasterxml.jackson.databind.node.ArrayNode", - "new": "method com.fasterxml.jackson.databind.node.ObjectNode com.fasterxml.jackson.databind.node.ArrayNode::with(java.lang.String)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.generics.formalTypeParameterRemoved", - "old": "method T com.fasterxml.jackson.databind.JsonNode::with(java.lang.String) @ com.fasterxml.jackson.databind.node.ArrayNode", - "new": "method com.fasterxml.jackson.databind.node.ObjectNode com.fasterxml.jackson.databind.node.ArrayNode::with(java.lang.String)", - "typeParameter": "T extends com.fasterxml.jackson.databind.JsonNode", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.finalMethodAddedToNonFinalClass", - "new": "method com.fasterxml.jackson.annotation.JsonFormat.Value com.fasterxml.jackson.databind.introspect.ConcreteBeanPropertyBase::findFormatOverrides(com.fasterxml.jackson.databind.AnnotationIntrospector) @ com.fasterxml.jackson.databind.ser.AnyGetterWriter", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.class.nonFinalClassInheritsFromNewClass", - "old": "class com.fasterxml.jackson.databind.ser.AnyGetterWriter", - "new": "class com.fasterxml.jackson.databind.ser.AnyGetterWriter", - "superClass": "com.fasterxml.jackson.databind.introspect.ConcreteBeanPropertyBase", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.abstractMethodAdded", - "new": "method com.fasterxml.jackson.databind.ser.DefaultSerializerProvider com.fasterxml.jackson.databind.ser.DefaultSerializerProvider::withCaches(com.fasterxml.jackson.databind.cfg.CacheProvider)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.parameterTypeChanged", - "old": "parameter void com.fasterxml.jackson.databind.ser.impl.ReadOnlyClassToSerializerMap::(===java.util.Map>===)", - "new": "parameter void com.fasterxml.jackson.databind.ser.impl.ReadOnlyClassToSerializerMap::(===com.fasterxml.jackson.databind.util.LookupCache>===)", - "parameterIndex": "0", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.parameterTypeChanged", - "old": "parameter com.fasterxml.jackson.databind.ser.impl.ReadOnlyClassToSerializerMap com.fasterxml.jackson.databind.ser.impl.ReadOnlyClassToSerializerMap::from(===java.util.HashMap>===)", - "new": "parameter com.fasterxml.jackson.databind.ser.impl.ReadOnlyClassToSerializerMap com.fasterxml.jackson.databind.ser.impl.ReadOnlyClassToSerializerMap::from(===com.fasterxml.jackson.databind.util.LookupCache>===)", - "parameterIndex": "0", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.removed", - "old": "field com.fasterxml.jackson.databind.ser.std.BeanSerializerBase._anyGetterWriter", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.class.nonFinalClassInheritsFromNewClass", - "old": "class com.fasterxml.jackson.databind.type.ResolvedRecursiveType", - "new": "class com.fasterxml.jackson.databind.type.ResolvedRecursiveType", - "superClass": "com.fasterxml.jackson.databind.type.IdentityEqualityType", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.removed", - "old": "field com.fasterxml.jackson.databind.type.TypeFactory.CORE_TYPE_CLASS", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.removed", - "old": "method void com.fasterxml.jackson.databind.type.TypeFactory::(com.fasterxml.jackson.databind.util.LRUMap, com.fasterxml.jackson.databind.type.TypeParser, com.fasterxml.jackson.databind.type.TypeModifier[], java.lang.ClassLoader)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.removed", - "old": "method void com.fasterxml.jackson.databind.type.TypeFactory::(com.fasterxml.jackson.databind.util.LRUMap)", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.numberOfParametersChanged", - "old": "method com.fasterxml.jackson.databind.JavaType com.fasterxml.jackson.databind.type.TypeParser::parseType(com.fasterxml.jackson.databind.type.TypeParser.MyTokenizer) throws java.lang.IllegalArgumentException", - "new": "method com.fasterxml.jackson.databind.JavaType com.fasterxml.jackson.databind.type.TypeParser::parseType(com.fasterxml.jackson.databind.type.TypeParser.MyTokenizer, int) throws java.lang.IllegalArgumentException", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.method.numberOfParametersChanged", - "old": "method java.util.List com.fasterxml.jackson.databind.type.TypeParser::parseTypes(com.fasterxml.jackson.databind.type.TypeParser.MyTokenizer) throws java.lang.IllegalArgumentException", - "new": "method java.util.List com.fasterxml.jackson.databind.type.TypeParser::parseTypes(com.fasterxml.jackson.databind.type.TypeParser.MyTokenizer, int) throws java.lang.IllegalArgumentException", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.removed", - "old": "field com.fasterxml.jackson.databind.util.LRUMap._jdkSerializeMaxEntries", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.typeChanged", - "old": "field com.fasterxml.jackson.databind.util.LRUMap._map", - "new": "field com.fasterxml.jackson.databind.util.LRUMap._map", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - }, - { - "ignore": true, - "code": "java.field.serialVersionUIDChanged", - "old": "field com.fasterxml.jackson.databind.util.LRUMap.serialVersionUID", - "new": "field com.fasterxml.jackson.databind.util.LRUMap.serialVersionUID", - "oldSerialVersionUID": "1", - "newSerialVersionUID": "2", - "justification": "ADD YOUR EXPLANATION FOR THE NECESSITY OF THIS CHANGE" - } - ] - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/DseProtocolVersion.java b/core/src/main/java/com/datastax/dse/driver/api/core/DseProtocolVersion.java deleted file mode 100644 index dc420970427..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/DseProtocolVersion.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core; - -import com.datastax.dse.protocol.internal.DseProtocolConstants; -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.ProtocolVersion; - -/** - * A DSE-specific protocol version. - * - *

Legacy DSE versions did not have a specific version, but instead reused a Cassandra protocol - * version: DSE 5.0 is supported via {@link DefaultProtocolVersion#V4}, and DSE 4.7 and 4.8 via - * {@link DefaultProtocolVersion#V3}. - * - *

DSE 4.6 and earlier are not supported by this version of the driver, use the 1.x series. - */ -public enum DseProtocolVersion implements ProtocolVersion { - - /** Version 1, supported by DSE 5.1.0 and above. */ - DSE_V1(DseProtocolConstants.Version.DSE_V1, false), - - /** Version 2, supported by DSE 6 and above. */ - DSE_V2(DseProtocolConstants.Version.DSE_V2, false), - ; - - private final int code; - private final boolean beta; - - DseProtocolVersion(int code, boolean beta) { - this.code = code; - this.beta = beta; - } - - @Override - public int getCode() { - return code; - } - - @Override - public boolean isBeta() { - return beta; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/DseSession.java b/core/src/main/java/com/datastax/dse/driver/api/core/DseSession.java deleted file mode 100644 index 8251aaf767c..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/DseSession.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.MavenCoordinates; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * @deprecated All DSE functionality is now available directly on {@link CqlSession}. This type is - * preserved for backward compatibility, but you should now use {@link CqlSession} instead. - */ -@Deprecated -public interface DseSession extends CqlSession { - - /** - * @deprecated the DSE driver is no longer published as a separate artifact. This field is - * preserved for backward compatibility, but it returns the same value as {@link - * CqlSession#OSS_DRIVER_COORDINATES}. - */ - @Deprecated @NonNull MavenCoordinates DSE_DRIVER_COORDINATES = CqlSession.OSS_DRIVER_COORDINATES; - - /** - * Returns a builder to create a new instance. - * - *

Note that this builder is mutable and not thread-safe. - */ - @NonNull - static DseSessionBuilder builder() { - return new DseSessionBuilder(); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/DseSessionBuilder.java b/core/src/main/java/com/datastax/dse/driver/api/core/DseSessionBuilder.java deleted file mode 100644 index 01e5f9f9125..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/DseSessionBuilder.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.session.SessionBuilder; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.NotThreadSafe; - -/** - * @deprecated DSE functionality is now exposed directly on {@link CqlSession}. This class is - * preserved for backward compatibility, but {@link CqlSession#builder()} should be used - * instead. - */ -@NotThreadSafe -@Deprecated -public class DseSessionBuilder extends SessionBuilder { - - @NonNull - @Override - protected DseSession wrap(@NonNull CqlSession defaultSession) { - return new com.datastax.dse.driver.internal.core.session.DefaultDseSession(defaultSession); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/auth/BaseDseAuthenticator.java b/core/src/main/java/com/datastax/dse/driver/api/core/auth/BaseDseAuthenticator.java deleted file mode 100644 index abd68b530b6..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/auth/BaseDseAuthenticator.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.auth; - -import com.datastax.oss.driver.api.core.auth.SyncAuthenticator; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import net.jcip.annotations.ThreadSafe; - -/** - * Base class for {@link SyncAuthenticator} implementations that want to make use of the - * authentication scheme negotiation in DseAuthenticator. - */ -@ThreadSafe -public abstract class BaseDseAuthenticator implements SyncAuthenticator { - - private static final String DSE_AUTHENTICATOR = - "com.datastax.bdp.cassandra.auth.DseAuthenticator"; - - private final String serverAuthenticator; - - protected BaseDseAuthenticator(@NonNull String serverAuthenticator) { - this.serverAuthenticator = serverAuthenticator; - } - - /** - * Return a byte buffer containing the required SASL mechanism. - * - *

This should be one of: - * - *

    - *
  • PLAIN - *
  • GSSAPI - *
- * - * This must be either a {@linkplain ByteBuffer#asReadOnlyBuffer() read-only} buffer, or a new - * instance every time. - */ - @NonNull - protected abstract ByteBuffer getMechanism(); - - /** - * Return a byte buffer containing the expected successful server challenge. - * - *

This should be one of: - * - *

    - *
  • PLAIN-START - *
  • GSSAPI-START - *
- * - * This must be either a {@linkplain ByteBuffer#asReadOnlyBuffer() read-only} buffer, or a new - * instance every time. - */ - @NonNull - protected abstract ByteBuffer getInitialServerChallenge(); - - @Nullable - @Override - public ByteBuffer initialResponseSync() { - // DseAuthenticator communicates back the mechanism in response to server authenticate message. - // older authenticators simply expect the auth response with credentials. - if (isDseAuthenticator()) { - return getMechanism(); - } else { - return evaluateChallengeSync(getInitialServerChallenge()); - } - } - - @Override - public void onAuthenticationSuccessSync(@Nullable ByteBuffer token) {} - - private boolean isDseAuthenticator() { - return serverAuthenticator.equals(DSE_AUTHENTICATOR); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/auth/DseGssApiAuthProviderBase.java b/core/src/main/java/com/datastax/dse/driver/api/core/auth/DseGssApiAuthProviderBase.java deleted file mode 100644 index 48a0e5b0ef3..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/auth/DseGssApiAuthProviderBase.java +++ /dev/null @@ -1,378 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.auth; - -import com.datastax.oss.driver.api.core.auth.AuthProvider; -import com.datastax.oss.driver.api.core.auth.AuthenticationException; -import com.datastax.oss.driver.api.core.auth.Authenticator; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.shaded.guava.common.base.Charsets; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.protocol.internal.util.Bytes; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.net.InetSocketAddress; -import java.nio.ByteBuffer; -import java.security.PrivilegedActionException; -import java.security.PrivilegedExceptionAction; -import java.util.HashMap; -import java.util.Map; -import java.util.Objects; -import javax.security.auth.Subject; -import javax.security.auth.login.AppConfigurationEntry; -import javax.security.auth.login.Configuration; -import javax.security.auth.login.LoginContext; -import javax.security.auth.login.LoginException; -import javax.security.sasl.Sasl; -import javax.security.sasl.SaslClient; -import javax.security.sasl.SaslException; -import net.jcip.annotations.Immutable; -import net.jcip.annotations.NotThreadSafe; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -public abstract class DseGssApiAuthProviderBase implements AuthProvider { - - /** The default SASL service name used by this auth provider. */ - public static final String DEFAULT_SASL_SERVICE_NAME = "dse"; - - /** The name of the system property to use to specify the SASL service name. */ - public static final String SASL_SERVICE_NAME_PROPERTY = "dse.sasl.service"; - - /** - * Legacy system property for SASL protocol name. Clients should migrate to - * SASL_SERVICE_NAME_PROPERTY above. - */ - private static final String LEGACY_SASL_PROTOCOL_PROPERTY = "dse.sasl.protocol"; - - private static final Logger LOG = LoggerFactory.getLogger(DseGssApiAuthProviderBase.class); - - private final String logPrefix; - - /** - * @param logPrefix a string that will get prepended to the logs (this is used for discrimination - * when you have multiple driver instances executing in the same JVM). Config-based - * implementations fill this with {@link Session#getName()}. - */ - protected DseGssApiAuthProviderBase(@NonNull String logPrefix) { - this.logPrefix = Objects.requireNonNull(logPrefix); - } - - @NonNull - protected abstract GssApiOptions getOptions( - @NonNull EndPoint endPoint, @NonNull String serverAuthenticator); - - @NonNull - @Override - public Authenticator newAuthenticator( - @NonNull EndPoint endPoint, @NonNull String serverAuthenticator) - throws AuthenticationException { - return new GssApiAuthenticator( - getOptions(endPoint, serverAuthenticator), endPoint, serverAuthenticator); - } - - @Override - public void onMissingChallenge(@NonNull EndPoint endPoint) { - LOG.warn( - "[{}] {} did not send an authentication challenge; " - + "This is suspicious because the driver expects authentication", - logPrefix, - endPoint); - } - - @Override - public void close() { - // nothing to do - } - - /** - * The options to initialize a new authenticator. - * - *

Use {@link #builder()} to create an instance. - */ - @Immutable - public static class GssApiOptions { - - @NonNull - public static Builder builder() { - return new Builder(); - } - - private final Configuration loginConfiguration; - private final Subject subject; - private final String saslProtocol; - private final String authorizationId; - private final Map saslProperties; - - private GssApiOptions( - @Nullable Configuration loginConfiguration, - @Nullable Subject subject, - @Nullable String saslProtocol, - @Nullable String authorizationId, - @NonNull Map saslProperties) { - this.loginConfiguration = loginConfiguration; - this.subject = subject; - this.saslProtocol = saslProtocol; - this.authorizationId = authorizationId; - this.saslProperties = saslProperties; - } - - @Nullable - public Configuration getLoginConfiguration() { - return loginConfiguration; - } - - @Nullable - public Subject getSubject() { - return subject; - } - - @Nullable - public String getSaslProtocol() { - return saslProtocol; - } - - @Nullable - public String getAuthorizationId() { - return authorizationId; - } - - @NonNull - public Map getSaslProperties() { - return saslProperties; - } - - @NotThreadSafe - public static class Builder { - - private Configuration loginConfiguration; - private Subject subject; - private String saslProtocol; - private String authorizationId; - private final Map saslProperties = new HashMap<>(); - - public Builder() { - saslProperties.put(Sasl.SERVER_AUTH, "true"); - saslProperties.put(Sasl.QOP, "auth"); - } - - /** - * Sets a login configuration that will be used to create a {@link LoginContext}. - * - *

You MUST call either a withLoginConfiguration method or {@link #withSubject(Subject)}; - * if both are called, the subject takes precedence, and the login configuration will be - * ignored. - * - * @see #withLoginConfiguration(Map) - */ - @NonNull - public Builder withLoginConfiguration(@Nullable Configuration loginConfiguration) { - this.loginConfiguration = loginConfiguration; - return this; - } - /** - * Sets a login configuration that will be used to create a {@link LoginContext}. - * - *

This is an alternative to {@link #withLoginConfiguration(Configuration)}, that builds - * the configuration from {@code Krb5LoginModule} with the given options. - * - *

You MUST call either a withLoginConfiguration method or {@link #withSubject(Subject)}; - * if both are called, the subject takes precedence, and the login configuration will be - * ignored. - */ - @NonNull - public Builder withLoginConfiguration(@Nullable Map loginConfiguration) { - this.loginConfiguration = fetchLoginConfiguration(loginConfiguration); - return this; - } - - /** - * Sets a previously authenticated subject to reuse. - * - *

You MUST call either this method or {@link #withLoginConfiguration(Configuration)}; if - * both are called, the subject takes precedence, and the login configuration will be ignored. - */ - @NonNull - public Builder withSubject(@Nullable Subject subject) { - this.subject = subject; - return this; - } - - /** - * Sets the SASL protocol name to use; should match the username of the Kerberos service - * principal used by the DSE server. - */ - @NonNull - public Builder withSaslProtocol(@Nullable String saslProtocol) { - this.saslProtocol = saslProtocol; - return this; - } - - /** Sets the authorization ID (allows proxy authentication). */ - @NonNull - public Builder withAuthorizationId(@Nullable String authorizationId) { - this.authorizationId = authorizationId; - return this; - } - - /** - * Add a SASL property to use when creating the SASL client. - * - *

Note that this builder pre-initializes these two default properties: - * - *

-       * javax.security.sasl.server.authentication = true
-       * javax.security.sasl.qop = auth
-       * 
- */ - @NonNull - public Builder addSaslProperty(@NonNull String name, @NonNull String value) { - this.saslProperties.put(Objects.requireNonNull(name), Objects.requireNonNull(value)); - return this; - } - - @NonNull - public GssApiOptions build() { - return new GssApiOptions( - loginConfiguration, - subject, - saslProtocol, - authorizationId, - ImmutableMap.copyOf(saslProperties)); - } - - public static Configuration fetchLoginConfiguration(Map options) { - return new Configuration() { - - @Override - public AppConfigurationEntry[] getAppConfigurationEntry(String name) { - return new AppConfigurationEntry[] { - new AppConfigurationEntry( - "com.sun.security.auth.module.Krb5LoginModule", - AppConfigurationEntry.LoginModuleControlFlag.REQUIRED, - options) - }; - } - }; - } - } - } - - protected static class GssApiAuthenticator extends BaseDseAuthenticator { - - private static final ByteBuffer MECHANISM = - ByteBuffer.wrap("GSSAPI".getBytes(Charsets.UTF_8)).asReadOnlyBuffer(); - private static final ByteBuffer SERVER_INITIAL_CHALLENGE = - ByteBuffer.wrap("GSSAPI-START".getBytes(Charsets.UTF_8)).asReadOnlyBuffer(); - private static final ByteBuffer EMPTY_BYTE_ARRAY = - ByteBuffer.wrap(new byte[0]).asReadOnlyBuffer(); - private static final String JAAS_CONFIG_ENTRY = "DseClient"; - private static final String[] SUPPORTED_MECHANISMS = new String[] {"GSSAPI"}; - - private Subject subject; - private SaslClient saslClient; - private EndPoint endPoint; - - protected GssApiAuthenticator( - GssApiOptions options, EndPoint endPoint, String serverAuthenticator) { - super(serverAuthenticator); - - try { - if (options.getSubject() != null) { - this.subject = options.getSubject(); - } else { - Configuration loginConfiguration = options.getLoginConfiguration(); - if (loginConfiguration == null) { - throw new IllegalArgumentException("Must provide one of subject or loginConfiguration"); - } - LoginContext login = new LoginContext(JAAS_CONFIG_ENTRY, null, null, loginConfiguration); - login.login(); - this.subject = login.getSubject(); - } - String protocol = options.getSaslProtocol(); - if (protocol == null) { - protocol = - System.getProperty( - SASL_SERVICE_NAME_PROPERTY, - System.getProperty(LEGACY_SASL_PROTOCOL_PROPERTY, DEFAULT_SASL_SERVICE_NAME)); - } - this.saslClient = - Sasl.createSaslClient( - SUPPORTED_MECHANISMS, - options.getAuthorizationId(), - protocol, - ((InetSocketAddress) endPoint.resolve()).getAddress().getCanonicalHostName(), - options.getSaslProperties(), - null); - } catch (LoginException | SaslException e) { - throw new AuthenticationException(endPoint, e.getMessage()); - } - this.endPoint = endPoint; - } - - @NonNull - @Override - protected ByteBuffer getMechanism() { - return MECHANISM; - } - - @NonNull - @Override - protected ByteBuffer getInitialServerChallenge() { - return SERVER_INITIAL_CHALLENGE; - } - - @Nullable - @Override - public ByteBuffer evaluateChallengeSync(@Nullable ByteBuffer challenge) { - - byte[] challengeBytes; - if (SERVER_INITIAL_CHALLENGE.equals(challenge)) { - if (!saslClient.hasInitialResponse()) { - return EMPTY_BYTE_ARRAY; - } - challengeBytes = new byte[0]; - } else { - // The native protocol spec says the incoming challenge can be null depending on the - // implementation. But saslClient.evaluateChallenge clearly documents that the byte array - // can't be null, which probably means that a SASL authenticator never sends back null. - if (challenge == null) { - throw new AuthenticationException(this.endPoint, "Unexpected null challenge from server"); - } - challengeBytes = Bytes.getArray(challenge); - } - try { - - return ByteBuffer.wrap( - Subject.doAs( - subject, - new PrivilegedExceptionAction() { - @Override - public byte[] run() throws SaslException { - return saslClient.evaluateChallenge(challengeBytes); - } - })); - } catch (PrivilegedActionException e) { - throw new AuthenticationException(this.endPoint, e.getMessage(), e.getException()); - } - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/auth/DsePlainTextAuthProviderBase.java b/core/src/main/java/com/datastax/dse/driver/api/core/auth/DsePlainTextAuthProviderBase.java deleted file mode 100644 index 7c5ee23bd6c..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/auth/DsePlainTextAuthProviderBase.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.auth; - -import com.datastax.oss.driver.api.core.auth.PlainTextAuthProviderBase; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.ThreadSafe; - -/** - * @deprecated The driver's default plain text providers now support both Apache Cassandra and DSE. - * This type was preserved for backward compatibility, but implementors should now extend {@link - * PlainTextAuthProviderBase} instead. - */ -@ThreadSafe -@Deprecated -public abstract class DsePlainTextAuthProviderBase extends PlainTextAuthProviderBase { - - protected DsePlainTextAuthProviderBase(@NonNull String logPrefix) { - super(logPrefix); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/auth/ProgrammaticDseGssApiAuthProvider.java b/core/src/main/java/com/datastax/dse/driver/api/core/auth/ProgrammaticDseGssApiAuthProvider.java deleted file mode 100644 index 64ee5265b5a..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/auth/ProgrammaticDseGssApiAuthProvider.java +++ /dev/null @@ -1,174 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.auth; - -import com.datastax.oss.driver.api.core.auth.AuthProvider; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * {@link AuthProvider} that provides GSSAPI authenticator instances for clients to connect to DSE - * clusters secured with {@code DseAuthenticator}, in a programmatic way. - * - *

To use this provider the corresponding GssApiOptions must be passed into the provider - * directly, for example: - * - *

- *     DseGssApiAuthProviderBase.GssApiOptions.Builder builder =
- *         DseGssApiAuthProviderBase.GssApiOptions.builder();
- *     Map<String, String> loginConfig =
- *         ImmutableMap.of(
- *             "principal",
- *             "user principal here ex cassandra@DATASTAX.COM",
- *             "useKeyTab",
- *             "true",
- *             "refreshKrb5Config",
- *             "true",
- *             "keyTab",
- *             "Path to keytab file here");
- *
- *     builder.withLoginConfiguration(loginConfig);
- *
- *     CqlSession session =
- *         CqlSession.builder()
- *             .withAuthProvider(new ProgrammaticDseGssApiAuthProvider(builder.build()))
- *             .build();
- * 
- * - * or alternatively - * - *
- *     DseGssApiAuthProviderBase.GssApiOptions.Builder builder =
- *         DseGssApiAuthProviderBase.GssApiOptions.builder().withSubject(subject);
- *     CqlSession session =
- *         CqlSession.builder()
- *             .withAuthProvider(new ProgrammaticDseGssApiAuthProvider(builder.build()))
- *             .build();
- * 
- * - *

Kerberos Authentication

- * - * Keytab and ticket cache settings are specified using a standard JAAS configuration file. The - * location of the file can be set using the java.security.auth.login.config system - * property or by adding a login.config.url.n entry in the java.security - * properties file. Alternatively a login-configuration, or subject can be provided to the provider - * via the GssApiOptions (see above). - * - *

See the following documents for further details: - * - *

    - *
  1. JAAS - * Login Configuration File; - *
  2. Krb5LoginModule - * options; - *
  3. JAAS - * Authentication Tutorial for more on JAAS in general. - *
- * - *

Authentication using ticket cache

- * - * Run kinit to obtain a ticket and populate the cache before connecting. JAAS config: - * - *
- * DseClient {
- *   com.sun.security.auth.module.Krb5LoginModule required
- *     useTicketCache=true
- *     renewTGT=true;
- * };
- * 
- * - *

Authentication using a keytab file

- * - * To enable authentication using a keytab file, specify its location on disk. If your keytab - * contains more than one principal key, you should also specify which one to select. This - * information can also be specified in the driver config, under the login-configuration section. - * - *
- * DseClient {
- *     com.sun.security.auth.module.Krb5LoginModule required
- *       useKeyTab=true
- *       keyTab="/path/to/file.keytab"
- *       principal="user@MYDOMAIN.COM";
- * };
- * 
- * - *

Specifying SASL protocol name

- * - * The SASL protocol name used by this auth provider defaults to " - * {@value #DEFAULT_SASL_SERVICE_NAME}". - * - *

Important: the SASL protocol name should match the username of the Kerberos - * service principal used by the DSE server. This information is specified in the dse.yaml file by - * the {@code service_principal} option under the kerberos_options - * section, and may vary from one DSE installation to another – especially if you installed - * DSE with an automated package installer. - * - *

For example, if your dse.yaml file contains the following: - * - *

{@code
- * kerberos_options:
- *     ...
- *     service_principal: cassandra/my.host.com@MY.REALM.COM
- * }
- * - * The correct SASL protocol name to use when authenticating against this DSE server is "{@code - * cassandra}". - * - *

Should you need to change the SASL protocol name specify it in the GssApiOptions, use the - * method below: - * - *

- *     DseGssApiAuthProviderBase.GssApiOptions.Builder builder =
- *         DseGssApiAuthProviderBase.GssApiOptions.builder();
- *     builder.withSaslProtocol("alternate");
- *     DseGssApiAuthProviderBase.GssApiOptions options = builder.build();
- * 
- * - *

Should internal sasl properties need to be set such as qop. This can also be accomplished by - * setting it in the GssApiOptions: - * - *

- *   DseGssApiAuthProviderBase.GssApiOptions.Builder builder =
- *         DseGssApiAuthProviderBase.GssApiOptions.builder();
- *     builder.addSaslProperty("javax.security.sasl.qop", "auth-conf");
- *     DseGssApiAuthProviderBase.GssApiOptions options = builder.build();
- * 
- * - * @see Authenticating - * a DSE cluster with Kerberos - */ -public class ProgrammaticDseGssApiAuthProvider extends DseGssApiAuthProviderBase { - private final GssApiOptions options; - - public ProgrammaticDseGssApiAuthProvider(GssApiOptions options) { - super("Programmatic-Kerberos"); - this.options = options; - } - - @NonNull - @Override - protected GssApiOptions getOptions( - @NonNull EndPoint endPoint, @NonNull String serverAuthenticator) { - return options; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/auth/ProxyAuthentication.java b/core/src/main/java/com/datastax/dse/driver/api/core/auth/ProxyAuthentication.java deleted file mode 100644 index a3624ba736d..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/auth/ProxyAuthentication.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.auth; - -import com.datastax.dse.driver.api.core.graph.GraphStatement; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.shaded.guava.common.base.Charsets; -import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.util.Map; - -public class ProxyAuthentication { - private static final String PROXY_EXECUTE = "ProxyExecute"; - - /** - * Adds proxy authentication information to a CQL statement. - * - *

This allows executing a statement as another role than the one the session is currently - * authenticated as. - * - * @param userOrRole the role to use for execution. If the statement was already configured with - * another role, it will get replaced by this one. - * @param statement the statement to modify. - * @return a statement that will run the same CQL query as {@code statement}, but acting as the - * provided role. Note: with the driver's default implementations, this will always be a copy; - * but if you use a custom implementation, it might return the same instance (depending on the - * behavior of {@link Statement#setCustomPayload(Map) statement.setCustomPayload()}). - * @see Setting - * up roles for applications (DSE 6.0 admin guide) - */ - @NonNull - public static > StatementT executeAs( - @NonNull String userOrRole, @NonNull StatementT statement) { - return statement.setCustomPayload( - addProxyExecuteEntry(statement.getCustomPayload(), userOrRole)); - } - - /** - * Adds proxy authentication information to a graph statement. - * - * @see #executeAs(String, Statement) - */ - @NonNull - public static > StatementT executeAs( - @NonNull String userOrRole, @NonNull StatementT statement) { - return statement.setCustomPayload( - addProxyExecuteEntry(statement.getCustomPayload(), userOrRole)); - } - - private static Map addProxyExecuteEntry( - Map currentPayload, @NonNull String userOrRole) { - NullAllowingImmutableMap.Builder builder = - NullAllowingImmutableMap.builder(); - builder.put(PROXY_EXECUTE, ByteBuffer.wrap(userOrRole.getBytes(Charsets.UTF_8))); - if (!currentPayload.isEmpty()) { - for (Map.Entry entry : currentPayload.entrySet()) { - String key = entry.getKey(); - if (!key.equals(PROXY_EXECUTE)) { - builder.put(key, entry.getValue()); - } - } - } - return builder.build(); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/config/DseDriverConfigLoader.java b/core/src/main/java/com/datastax/dse/driver/api/core/config/DseDriverConfigLoader.java deleted file mode 100644 index 2694b51ffca..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/config/DseDriverConfigLoader.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.config; - -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.config.ProgrammaticDriverConfigLoaderBuilder; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.File; -import java.net.URL; - -/** - * @deprecated This class only exists for backward compatibility. All of its methods delegate to - * their counterparts on {@link DriverConfigLoader}, which you should call directly instead. - */ -@Deprecated -public class DseDriverConfigLoader { - - /** - * @deprecated This method only exists for backward compatibility. It delegates to {@link - * DriverConfigLoader#fromClasspath(String)}, which you should call directly instead. - */ - @Deprecated - @NonNull - public static DriverConfigLoader fromClasspath(@NonNull String resourceBaseName) { - return DriverConfigLoader.fromClasspath(resourceBaseName); - } - - /** - * @deprecated This method only exists for backward compatibility. It delegates to {@link - * DriverConfigLoader#fromFile(File)}, which you should call directly instead. - */ - @Deprecated - @NonNull - public static DriverConfigLoader fromFile(@NonNull File file) { - return DriverConfigLoader.fromFile(file); - } - - /** - * @deprecated This method only exists for backward compatibility. It delegates to {@link - * DriverConfigLoader#fromUrl(URL)}, which you should call directly instead. - */ - @Deprecated - @NonNull - public static DriverConfigLoader fromUrl(@NonNull URL url) { - return DriverConfigLoader.fromUrl(url); - } - - /** - * @deprecated This method only exists for backward compatibility. It delegates to {@link - * DriverConfigLoader#programmaticBuilder()}, which you should call directly instead. - */ - @Deprecated - @NonNull - public static ProgrammaticDriverConfigLoaderBuilder programmaticBuilder() { - return DriverConfigLoader.programmaticBuilder(); - } - - private DseDriverConfigLoader() { - throw new AssertionError("Not meant to be instantiated"); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/config/DseDriverOption.java b/core/src/main/java/com/datastax/dse/driver/api/core/config/DseDriverOption.java deleted file mode 100644 index 4d10501f6d2..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/config/DseDriverOption.java +++ /dev/null @@ -1,334 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.config; - -import com.datastax.oss.driver.api.core.config.DriverOption; -import edu.umd.cs.findbugs.annotations.NonNull; - -public enum DseDriverOption implements DriverOption { - /** - * The name of the application using the session. - * - *

Value type: {@link String} - */ - APPLICATION_NAME("basic.application.name"), - /** - * The version of the application using the session. - * - *

Value type: {@link String} - */ - APPLICATION_VERSION("basic.application.version"), - - /** - * Proxy authentication for GSSAPI authentication: allows to login as another user or role. - * - *

Value type: {@link String} - */ - AUTH_PROVIDER_AUTHORIZATION_ID("advanced.auth-provider.authorization-id"), - /** - * Service name for GSSAPI authentication. - * - *

Value type: {@link String} - */ - AUTH_PROVIDER_SERVICE("advanced.auth-provider.service"), - /** - * Login configuration for GSSAPI authentication. - * - *

Value type: {@link java.util.Map Map}<{@link String},{@link String}> - */ - AUTH_PROVIDER_LOGIN_CONFIGURATION("advanced.auth-provider.login-configuration"), - /** - * Internal SASL properties, if any, such as QOP, for GSSAPI authentication. - * - *

Value type: {@link java.util.Map Map}<{@link String},{@link String}> - */ - AUTH_PROVIDER_SASL_PROPERTIES("advanced.auth-provider.sasl-properties"), - - /** - * The page size for continuous paging. - * - *

Value type: int - */ - CONTINUOUS_PAGING_PAGE_SIZE("advanced.continuous-paging.page-size"), - /** - * Whether {@link #CONTINUOUS_PAGING_PAGE_SIZE} should be interpreted in number of rows or bytes. - * - *

Value type: boolean - */ - CONTINUOUS_PAGING_PAGE_SIZE_BYTES("advanced.continuous-paging.page-size-in-bytes"), - /** - * The maximum number of continuous pages to return. - * - *

Value type: int - */ - CONTINUOUS_PAGING_MAX_PAGES("advanced.continuous-paging.max-pages"), - /** - * The maximum number of continuous pages per second. - * - *

Value type: int - */ - CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND("advanced.continuous-paging.max-pages-per-second"), - /** - * The maximum number of continuous pages that can be stored in the local queue. - * - *

Value type: int - */ - CONTINUOUS_PAGING_MAX_ENQUEUED_PAGES("advanced.continuous-paging.max-enqueued-pages"), - /** - * How long to wait for the coordinator to send the first continuous page. - * - *

Value-type: {@link java.time.Duration Duration} - */ - CONTINUOUS_PAGING_TIMEOUT_FIRST_PAGE("advanced.continuous-paging.timeout.first-page"), - /** - * How long to wait for the coordinator to send subsequent continuous pages. - * - *

Value-type: {@link java.time.Duration Duration} - */ - CONTINUOUS_PAGING_TIMEOUT_OTHER_PAGES("advanced.continuous-paging.timeout.other-pages"), - - /** - * The largest latency that we expect to record for continuous requests. - * - *

Value-type: {@link java.time.Duration Duration} - */ - CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_HIGHEST( - "advanced.metrics.session.continuous-cql-requests.highest-latency"), - /** - * The number of significant decimal digits to which internal structures will maintain for - * continuous requests. - * - *

Value-type: int - */ - CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_DIGITS( - "advanced.metrics.session.continuous-cql-requests.significant-digits"), - /** - * The interval at which percentile data is refreshed for continuous requests. - * - *

Value-type: {@link java.time.Duration Duration} - */ - CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_INTERVAL( - "advanced.metrics.session.continuous-cql-requests.refresh-interval"), - - /** - * The read consistency level to use for graph statements. - * - *

Value type: {@link String} - */ - GRAPH_READ_CONSISTENCY_LEVEL("basic.graph.read-consistency-level"), - /** - * The write consistency level to use for graph statements. - * - *

Value type: {@link String} - */ - GRAPH_WRITE_CONSISTENCY_LEVEL("basic.graph.write-consistency-level"), - /** - * The traversal source to use for graph statements. - * - *

Value type: {@link String} - */ - GRAPH_TRAVERSAL_SOURCE("basic.graph.traversal-source"), - /** - * The sub-protocol the driver will use to communicate with DSE Graph, on top of the Cassandra - * native protocol. - * - *

Value type: {@link String} - */ - GRAPH_SUB_PROTOCOL("advanced.graph.sub-protocol"), - /** - * Whether a script statement represents a system query. - * - *

Value type: boolean - */ - GRAPH_IS_SYSTEM_QUERY("basic.graph.is-system-query"), - /** - * The name of the graph targeted by graph statements. - * - *

Value type: {@link String} - */ - GRAPH_NAME("basic.graph.name"), - /** - * How long the driver waits for a graph request to complete. - * - *

Value-type: {@link java.time.Duration Duration} - */ - GRAPH_TIMEOUT("basic.graph.timeout"), - - /** - * Whether to send events for Insights monitoring. - * - *

Value type: boolean - */ - MONITOR_REPORTING_ENABLED("advanced.monitor-reporting.enabled"), - - /** - * Whether to enable paging for Graph queries. - * - *

Value type: {@link String} - */ - GRAPH_PAGING_ENABLED("advanced.graph.paging-enabled"), - - /** - * The page size for Graph continuous paging. - * - *

Value type: int - */ - GRAPH_CONTINUOUS_PAGING_PAGE_SIZE("advanced.graph.paging-options.page-size"), - - /** - * The maximum number of Graph continuous pages to return. - * - *

Value type: int - */ - GRAPH_CONTINUOUS_PAGING_MAX_PAGES("advanced.graph.paging-options.max-pages"), - /** - * The maximum number of Graph continuous pages per second. - * - *

Value type: int - */ - GRAPH_CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND( - "advanced.graph.paging-options.max-pages-per-second"), - /** - * The maximum number of Graph continuous pages that can be stored in the local queue. - * - *

Value type: int - */ - GRAPH_CONTINUOUS_PAGING_MAX_ENQUEUED_PAGES("advanced.graph.paging-options.max-enqueued-pages"), - /** - * The largest latency that we expect to record for graph requests. - * - *

Value-type: {@link java.time.Duration Duration} - */ - METRICS_SESSION_GRAPH_REQUESTS_HIGHEST("advanced.metrics.session.graph-requests.highest-latency"), - /** - * The number of significant decimal digits to which internal structures will maintain for graph - * requests. - * - *

Value-type: int - */ - METRICS_SESSION_GRAPH_REQUESTS_DIGITS( - "advanced.metrics.session.graph-requests.significant-digits"), - /** - * The interval at which percentile data is refreshed for graph requests. - * - *

Value-type: {@link java.time.Duration Duration} - */ - METRICS_SESSION_GRAPH_REQUESTS_INTERVAL( - "advanced.metrics.session.graph-requests.refresh-interval"), - /** - * The largest latency that we expect to record for graph requests. - * - *

Value-type: {@link java.time.Duration Duration} - */ - METRICS_NODE_GRAPH_MESSAGES_HIGHEST("advanced.metrics.node.graph-messages.highest-latency"), - /** - * The number of significant decimal digits to which internal structures will maintain for graph - * requests. - * - *

Value-type: int - */ - METRICS_NODE_GRAPH_MESSAGES_DIGITS("advanced.metrics.node.graph-messages.significant-digits"), - /** - * The interval at which percentile data is refreshed for graph requests. - * - *

Value-type: {@link java.time.Duration Duration} - */ - METRICS_NODE_GRAPH_MESSAGES_INTERVAL("advanced.metrics.node.graph-messages.refresh-interval"), - - /** - * The shortest latency that we expect to record for continuous requests. - * - *

Value-type: {@link java.time.Duration Duration} - */ - CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_LOWEST( - "advanced.metrics.session.continuous-cql-requests.lowest-latency"), - /** - * Optional service-level objectives to meet, as a list of latencies to track. - * - *

Value-type: {@link java.time.Duration Duration} - */ - CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_SLO( - "advanced.metrics.session.continuous-cql-requests.slo"), - - /** - * The shortest latency that we expect to record for graph requests. - * - *

Value-type: {@link java.time.Duration Duration} - */ - METRICS_SESSION_GRAPH_REQUESTS_LOWEST("advanced.metrics.session.graph-requests.lowest-latency"), - /** - * Optional service-level objectives to meet, as a list of latencies to track. - * - *

Value-type: {@link java.time.Duration Duration} - */ - METRICS_SESSION_GRAPH_REQUESTS_SLO("advanced.metrics.session.graph-requests.slo"), - - /** - * The shortest latency that we expect to record for graph requests. - * - *

Value-type: {@link java.time.Duration Duration} - */ - METRICS_NODE_GRAPH_MESSAGES_LOWEST("advanced.metrics.node.graph-messages.lowest-latency"), - /** - * Optional service-level objectives to meet, as a list of latencies to track. - * - *

Value-type: {@link java.time.Duration Duration} - */ - METRICS_NODE_GRAPH_MESSAGES_SLO("advanced.metrics.node.graph-messages.slo"), - /** - * Optional list of percentiles to publish for graph-requests metric. Produces an additional time - * series for each requested percentile. This percentile is computed locally, and so can't be - * aggregated with percentiles computed across other dimensions (e.g. in a different instance). - * - *

Value type: {@link java.util.List List}<{@link Double}> - */ - METRICS_SESSION_GRAPH_REQUESTS_PUBLISH_PERCENTILES( - "advanced.metrics.session.graph-requests.publish-percentiles"), - /** - * Optional list of percentiles to publish for node graph-messages metric. Produces an additional - * time series for each requested percentile. This percentile is computed locally, and so can't be - * aggregated with percentiles computed across other dimensions (e.g. in a different instance). - * - *

Value type: {@link java.util.List List}<{@link Double}> - */ - METRICS_NODE_GRAPH_MESSAGES_PUBLISH_PERCENTILES( - "advanced.metrics.node.graph-messages.publish-percentiles"), - /** - * Optional list of percentiles to publish for continuous paging requests metric. Produces an - * additional time series for each requested percentile. This percentile is computed locally, and - * so can't be aggregated with percentiles computed across other dimensions (e.g. in a different - * instance). - * - *

Value type: {@link java.util.List List}<{@link Double}> - */ - CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_PUBLISH_PERCENTILES( - "advanced.metrics.session.continuous-cql-requests.publish-percentiles"), - ; - - private final String path; - - DseDriverOption(String path) { - this.path = path; - } - - @NonNull - @Override - public String getPath() { - return path; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/cql/continuous/ContinuousAsyncResultSet.java b/core/src/main/java/com/datastax/dse/driver/api/core/cql/continuous/ContinuousAsyncResultSet.java deleted file mode 100644 index a9491ec2414..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/cql/continuous/ContinuousAsyncResultSet.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.cql.continuous; - -import com.datastax.oss.driver.api.core.AsyncPagingIterable; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.Statement; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.util.concurrent.CancellationException; - -/** - * The result of an {@linkplain ContinuousSession#executeContinuouslyAsync(Statement) asynchronous - * continuous paging query}. - * - *

DSE replies to a continuous query with a stream of response frames. There is one instance of - * this class for each frame. - */ -public interface ContinuousAsyncResultSet - extends AsyncPagingIterable { - - /** Returns the current page's number. Pages are numbered starting from 1. */ - int pageNumber(); - - /** - * Cancels the continuous query. - * - *

There might still be rows available in the {@linkplain #currentPage() current page} after - * the cancellation; these rows can be retrieved normally. - * - *

Also, there might be more pages available in the driver's local page cache after the - * cancellation; these extra pages will be discarded. - * - *

Therefore, if you plan to resume the iteration later, the correct procedure is as follows: - * - *

    - *
  1. Cancel the operation by invoking this method, or by cancelling the {@linkplain - * #fetchNextPage() next page's future}; - *
  2. Keep iterating on the current page until it doesn't return any more rows; - *
  3. Retrieve the paging state with {@link #getExecutionInfo() - * getExecutionInfo().getPagingState()}; - *
  4. {@linkplain Statement#setPagingState(ByteBuffer) Re-inject the paging state} in the - * statement; - *
  5. Resume the operation by invoking {@link - * ContinuousSession#executeContinuouslyAsync(Statement) executeContinuouslyAsync} again. - *
- * - * After a cancellation, futures returned by {@link #fetchNextPage()} that are not yet complete - * will always complete exceptionally by throwing a {@link CancellationException}, even if - * they were obtained before the cancellation. - */ - void cancel(); - - /** - * {@inheritDoc} - * - *

Note: because the driver does not support query traces for continuous queries, {@link - * ExecutionInfo#getTracingId()} will always be {@code null}. - */ - @NonNull - @Override - ExecutionInfo getExecutionInfo(); -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/cql/continuous/ContinuousResultSet.java b/core/src/main/java/com/datastax/dse/driver/api/core/cql/continuous/ContinuousResultSet.java deleted file mode 100644 index a333801a59a..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/cql/continuous/ContinuousResultSet.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.cql.continuous; - -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Statement; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.util.List; - -/** - * The result of a {@linkplain ContinuousSession#executeContinuously(Statement) synchronous - * continuous paging query}. - * - *

It uses {@linkplain ContinuousAsyncResultSet asynchronous calls} internally, but blocks on the - * results in order to provide a synchronous API to its clients. If the query is paged, only the - * first page will be fetched initially, and iteration will trigger background fetches of the next - * pages when necessary. - * - *

Note that this object can only be iterated once: rows are "consumed" as they are read, - * subsequent calls to {@code iterator()} will return the same iterator instance. - * - *

Implementations of this type are not thread-safe. They can only be iterated by the - * thread that invoked {@code session.executeContinuously}. - */ -public interface ContinuousResultSet extends ResultSet { - - /** - * Cancels the continuous query. - * - *

There might still be rows available in the current page after the cancellation; the - * iteration will only stop when such rows are fully iterated upon. - * - *

Also, there might be more pages available in the driver's local page cache after the - * cancellation; these extra pages will be discarded. - * - *

Therefore, if you plan to resume the iteration later, the correct procedure is as follows: - * - *

    - *
  1. Cancel the operation by invoking this method; - *
  2. Keep iterating on this object until it doesn't return any more rows; - *
  3. Retrieve the paging state with {@link #getExecutionInfo() - * getExecutionInfo().getPagingState()}; - *
  4. {@linkplain Statement#setPagingState(ByteBuffer) Re-inject the paging state} in the - * statement; - *
  5. Resume the operation by invoking {@link ContinuousSession#executeContinuously(Statement) - * executeContinuously} again. - *
- */ - void cancel(); - - /** - * {@inheritDoc} - * - *

Note: because the driver does not support query traces for continuous queries, {@link - * ExecutionInfo#getTracingId()} will always be {@code null}. - */ - @NonNull - @Override - default ExecutionInfo getExecutionInfo() { - List infos = getExecutionInfos(); - return infos.get(infos.size() - 1); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/cql/continuous/ContinuousSession.java b/core/src/main/java/com/datastax/dse/driver/api/core/cql/continuous/ContinuousSession.java deleted file mode 100644 index 1c647b33b92..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/cql/continuous/ContinuousSession.java +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.cql.continuous; - -import com.datastax.dse.driver.internal.core.cql.continuous.ContinuousCqlRequestAsyncProcessor; -import com.datastax.dse.driver.internal.core.cql.continuous.ContinuousCqlRequestSyncProcessor; -import com.datastax.oss.driver.api.core.DefaultConsistencyLevel; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.session.Session; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.util.Objects; -import java.util.concurrent.CompletionStage; - -/** - * A session that has the ability to execute continuous paging queries. - * - *

Continuous paging is a new method of streaming bulk amounts of records from DataStax - * Enterprise (DSE) to the Java Driver, available since DSE 5.1. It is mainly intended to be - * leveraged by DSE - * Analytics and Apache Spark™, or by any similar analytics tool that needs to read large - * portions of a table in one single operation, as quick and reliably as possible. - * - *

Continuous paging provides the best performance improvement against regular paging when the - * following conditions are met: - * - *

    - *
  1. The statement must target a single partition or a token range owned by one single replica; - * in practice, this means that the statement must have either a {@linkplain - * Statement#setRoutingKey(ByteBuffer) routing key} or a {@linkplain - * Statement#setRoutingToken(Token) routing token} set; - *
  2. The coordinator must be a replica; in practice, this is usually achieved by using - * token-aware routing (if you are using the driver's default {@link LoadBalancingPolicy}, - * then this condition is met); - *
  3. The consistency level must be {@link DefaultConsistencyLevel#ONE ONE} (or {@link - * DefaultConsistencyLevel#LOCAL_ONE LOCAL_ONE}). - *
- * - *

If the above conditions are met, the coordinator will be able to optimize the read path and - * serve results from local data, thus significantly improving response times; if however these - * conditions cannot be met, continuous paging would still work, but response times wouldn't be - * significantly better than those of regular paging anymore. - * - * @see Continuous - * paging options in cassandra.yaml configuration file - * @see DSE - * Continuous Paging Tuning and Support Guide - */ -public interface ContinuousSession extends Session { - - /** - * Executes the provided query with continuous paging synchronously. - * - *

This method takes care of chaining the successive results into a convenient iterable, - * provided that you always access the result from the same thread. For more flexibility, consider - * using the {@linkplain #executeContinuouslyAsync(Statement) asynchronous variant} of this method - * instead. - * - *

See {@link ContinuousSession} for more explanations about continuous paging. - * - *

This feature is only available with DataStax Enterprise. Executing continuous queries - * against an Apache Cassandra© cluster will result in a runtime error. - * - * @param statement the query to execute. - * @return a synchronous iterable on the results. - */ - @NonNull - default ContinuousResultSet executeContinuously(@NonNull Statement statement) { - return Objects.requireNonNull( - execute(statement, ContinuousCqlRequestSyncProcessor.CONTINUOUS_RESULT_SYNC)); - } - - /** - * Executes the provided query with continuous paging asynchronously. - * - *

The server will push all requested pages asynchronously, according to the options defined in - * the current execution profile. The client should consume all pages as quickly as possible, to - * avoid blocking the server for too long. The server will adjust the rate according to the client - * speed, but it will give up if the client does not consume any pages in a period of time equal - * to the read request timeout. - * - *

See {@link ContinuousSession} for more explanations about continuous paging. - * - *

This feature is only available with DataStax Enterprise. Executing continuous queries - * against an Apache Cassandra© cluster will result in a runtime error. - * - * @param statement the query to execute. - * @return a future to the first asynchronous result. - */ - @NonNull - default CompletionStage executeContinuouslyAsync( - @NonNull Statement statement) { - return Objects.requireNonNull( - execute(statement, ContinuousCqlRequestAsyncProcessor.CONTINUOUS_RESULT_ASYNC)); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/cql/continuous/reactive/ContinuousReactiveResultSet.java b/core/src/main/java/com/datastax/dse/driver/api/core/cql/continuous/reactive/ContinuousReactiveResultSet.java deleted file mode 100644 index 6b645ad05bf..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/cql/continuous/reactive/ContinuousReactiveResultSet.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.cql.continuous.reactive; - -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveResultSet; -import com.datastax.oss.driver.api.core.cql.Statement; - -/** - * A marker interface for publishers returned by {@link ContinuousReactiveSession}. - * - * @see ContinuousReactiveSession#executeContinuouslyReactive(String) - * @see ContinuousReactiveSession#executeContinuouslyReactive(Statement) - */ -public interface ContinuousReactiveResultSet extends ReactiveResultSet {} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/cql/continuous/reactive/ContinuousReactiveSession.java b/core/src/main/java/com/datastax/dse/driver/api/core/cql/continuous/reactive/ContinuousReactiveSession.java deleted file mode 100644 index d00013731cb..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/cql/continuous/reactive/ContinuousReactiveSession.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.cql.continuous.reactive; - -import com.datastax.dse.driver.api.core.cql.continuous.ContinuousSession; -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveRow; -import com.datastax.dse.driver.internal.core.cql.continuous.reactive.ContinuousCqlRequestReactiveProcessor; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.session.Session; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Objects; -import org.reactivestreams.Publisher; - -/** - * A {@link Session} that offers utility methods to issue queries using reactive-style programming - * and continuous paging, combined together. - * - *

Methods in this interface all return {@link ContinuousReactiveResultSet} instances. All - * publishers support multiple subscriptions in a unicast fashion: each subscriber triggers an - * independent request execution and gets its own copy of the results. - * - *

Also, note that the publishers may emit items to their subscribers on an internal driver IO - * thread. Subscriber implementors are encouraged to abide by Reactive Streams - * Specification rule 2.2 and avoid performing heavy computations or blocking calls inside - * {@link org.reactivestreams.Subscriber#onNext(Object) onNext} calls, as doing so could slow down - * the driver and impact performance. Instead, they should asynchronously dispatch received signals - * to their processing logic. - * - * @see ReactiveRow - */ -public interface ContinuousReactiveSession extends Session { - - /** - * Returns a {@link Publisher} that, once subscribed to, executes the given query continuously and - * emits all the results. - * - *

See {@link ContinuousSession} for more explanations about continuous paging. - * - *

This feature is only available with DataStax Enterprise. Executing continuous queries - * against an Apache Cassandra® cluster will result in a runtime error. - * - * @param query the query to execute. - * @return The {@link Publisher} that will publish the returned results. - */ - @NonNull - default ContinuousReactiveResultSet executeContinuouslyReactive(@NonNull String query) { - return executeContinuouslyReactive(SimpleStatement.newInstance(query)); - } - - /** - * Returns a {@link Publisher} that, once subscribed to, executes the given query continuously and - * emits all the results. - * - *

See {@link ContinuousSession} for more explanations about continuous paging. - * - *

This feature is only available with DataStax Enterprise. Executing continuous queries - * against an Apache Cassandra® cluster will result in a runtime error. - * - * @param statement the statement to execute. - * @return The {@link Publisher} that will publish the returned results. - */ - @NonNull - default ContinuousReactiveResultSet executeContinuouslyReactive(@NonNull Statement statement) { - return Objects.requireNonNull( - execute(statement, ContinuousCqlRequestReactiveProcessor.CONTINUOUS_REACTIVE_RESULT_SET)); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/cql/reactive/ReactiveQueryMetadata.java b/core/src/main/java/com/datastax/dse/driver/api/core/cql/reactive/ReactiveQueryMetadata.java deleted file mode 100644 index 55a898cd3ee..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/cql/reactive/ReactiveQueryMetadata.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.cql.reactive; - -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import edu.umd.cs.findbugs.annotations.NonNull; -import org.reactivestreams.Publisher; - -/** - * Interface implemented by all the reactive result set publishers provided by the driver, and - * notably by {@link ReactiveResultSet}. - */ -public interface ReactiveQueryMetadata { - - /** - * Returns metadata about the {@linkplain ColumnDefinitions columns} contained in this result set. - * - *

This publisher emits exactly one item as soon as the first response arrives, then completes. - * If the query execution fails within the first request-response cycle, then this - * publisher will fail with the same error; however if the error happens after the first - * response, then this publisher will be already completed and will not acknowledge that - * error in any way. - * - *

By default, publishers returned by this method do not support multiple subscriptions. - * - * @see ReactiveRow#getColumnDefinitions() - */ - @NonNull - Publisher getColumnDefinitions(); - - /** - * Returns {@linkplain ExecutionInfo information about the execution} of all requests that have - * been performed so far to assemble this result set. - * - *

If the query is not paged, this publisher will emit exactly one item as soon as the response - * arrives, then complete. If the query is paged, it will emit multiple items, one per page; then - * it will complete when the last page arrives. If the query execution fails, then this publisher - * will fail with the same error. - * - *

By default, publishers returned by this method do not support multiple subscriptions. - * - * @see ReactiveRow#getExecutionInfo() - */ - @NonNull - Publisher getExecutionInfos(); - - /** - * If the query that produced this result was a conditional update, indicates whether it was - * successfully applied. - * - *

This publisher emits exactly one item as soon as the first response arrives, then completes. - * If the query execution fails within the first request-response cycle, then this - * publisher will fail with the same error; however if the error happens after the first - * response, then this publisher will be already completed and will not acknowledge that - * error in any way. - * - *

By default, publishers returned by this method do not support multiple subscriptions. - * - *

For consistency, this method always returns {@code true} for non-conditional queries - * (although there is no reason to call the method in that case). This is also the case for - * conditional DDL statements ({@code CREATE KEYSPACE... IF NOT EXISTS}, {@code CREATE TABLE... IF - * NOT EXISTS}), for which Cassandra doesn't return an {@code [applied]} column. - * - *

Note that, for versions of Cassandra strictly lower than 2.1.0-rc2, a server-side bug (CASSANDRA-7337) causes this - * method to always return {@code true} for batches containing conditional queries. - * - * @see ReactiveRow#wasApplied() - */ - @NonNull - Publisher wasApplied(); -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/cql/reactive/ReactiveResultSet.java b/core/src/main/java/com/datastax/dse/driver/api/core/cql/reactive/ReactiveResultSet.java deleted file mode 100644 index 0e44dab8cab..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/cql/reactive/ReactiveResultSet.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.cql.reactive; - -import com.datastax.oss.driver.api.core.cql.Statement; -import org.reactivestreams.Publisher; - -/** - * A {@link Publisher} of {@link ReactiveRow}s returned by a {@link ReactiveSession}. - * - *

By default, all implementations returned by the driver are cold, unicast, single-subscriber - * only publishers. In other words, they do not support multiple subscriptions; consider - * caching the results produced by such publishers if you need to consume them by more than one - * downstream subscriber. - * - *

Also, note that reactive result sets may emit items to their subscribers on an internal driver - * IO thread. Subscriber implementors are encouraged to abide by Reactive Streams - * Specification rule 2.2 and avoid performing heavy computations or blocking calls inside - * {@link org.reactivestreams.Subscriber#onNext(Object) onNext} calls, as doing so could slow down - * the driver and impact performance. Instead, they should asynchronously dispatch received signals - * to their processing logic. - * - *

This interface exists mainly to expose useful information about {@linkplain - * #getExecutionInfos() request execution} and {@linkplain #getColumnDefinitions() query metadata}. - * This is particularly convenient for queries that do not return rows; for queries that do return - * rows, it is also possible, and oftentimes easier, to access that same information {@linkplain - * ReactiveRow at row level}. - * - * @see ReactiveSession#executeReactive(String) - * @see ReactiveSession#executeReactive(Statement) - * @see ReactiveRow - */ -public interface ReactiveResultSet extends Publisher, ReactiveQueryMetadata {} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/cql/reactive/ReactiveRow.java b/core/src/main/java/com/datastax/dse/driver/api/core/cql/reactive/ReactiveRow.java deleted file mode 100644 index c3b94689580..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/cql/reactive/ReactiveRow.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.cql.reactive; - -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.type.DataTypes; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * A row produced by a {@linkplain ReactiveResultSet reactive result set}. - * - *

This is essentially an extension of the driver's {@link Row} object that also exposes useful - * information about {@linkplain #getExecutionInfo() request execution} and {@linkplain - * #getColumnDefinitions() query metadata} (note however that this information is also exposed at - * result set level for convenience). - * - * @see ReactiveSession - * @see ReactiveResultSet - */ -public interface ReactiveRow extends Row { - - /** - * Returns the column definitions contained in this row. - * - *

This object is the same for all rows pertaining to the same result set. - * - * @return the column definitions contained in this row. - * @see ReactiveResultSet#getColumnDefinitions() - */ - @NonNull - @Override - ColumnDefinitions getColumnDefinitions(); - - /** - * The execution information for the paged request that produced this result. - * - *

This object is the same for two rows pertaining to the same page, but differs for rows - * pertaining to different pages. - * - * @return the execution information for the paged request that produced this result. - * @see ReactiveResultSet#getExecutionInfos() - */ - @NonNull - ExecutionInfo getExecutionInfo(); - - /** - * If the query that produced this result was a conditional update, indicates whether it was - * successfully applied. - * - *

This is equivalent to calling: - * - *

{@code
-   * ReactiveRow row = ...
-   * boolean wasApplied = row.getBoolean("[applied]");
-   * }
- * - *

For consistency, this method always returns {@code true} for non-conditional queries - * (although there is no reason to call the method in that case). This is also the case for - * conditional DDL statements ({@code CREATE KEYSPACE... IF NOT EXISTS}, {@code CREATE TABLE... IF - * NOT EXISTS}), for which Cassandra doesn't return an {@code [applied]} column. - * - *

Note that, for versions of Cassandra strictly lower than 2.1.0-rc2, a server-side bug (CASSANDRA-7337) causes this - * method to always return {@code true} for batches containing conditional queries. - * - *

This method always return the same value for all results in the result set. - * - * @return {@code true} for non-conditional queries and for conditional queries that were - * successfully applied, {@code false} otherwise. - */ - default boolean wasApplied() { - return !getColumnDefinitions().contains("[applied]") - || !getColumnDefinitions().get("[applied]").getType().equals(DataTypes.BOOLEAN) - || getBoolean("[applied]"); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/cql/reactive/ReactiveSession.java b/core/src/main/java/com/datastax/dse/driver/api/core/cql/reactive/ReactiveSession.java deleted file mode 100644 index 2fd8ffe41c2..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/cql/reactive/ReactiveSession.java +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.cql.reactive; - -import com.datastax.dse.driver.internal.core.cql.reactive.CqlRequestReactiveProcessor; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.session.Session; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; -import java.util.Objects; -import org.reactivestreams.Publisher; - -/** - * A {@link Session} that offers utility methods to issue queries using reactive-style programming. - * - *

Methods in this interface all return {@link ReactiveResultSet} instances. See the javadocs of - * this interface for important remarks anc caveats regarding the subscription to and consumption of - * reactive result sets. - * - * @see ReactiveResultSet - * @see ReactiveRow - */ -public interface ReactiveSession extends Session { - - /** - * Returns a {@link Publisher} that, once subscribed to, executes the given query and emits all - * the results. - * - *

This is an alias for {@link #executeReactive(Statement)} - * executeReactive(SimpleStatement.newInstance(query))}. - * - * @param query the query to execute. - * @return The {@link Publisher} that will publish the returned results. - * @see SimpleStatement#newInstance(String) - */ - @NonNull - default ReactiveResultSet executeReactive(@NonNull String query) { - return executeReactive(SimpleStatement.newInstance(query)); - } - - /** - * Returns a {@link Publisher} that, once subscribed to, executes the given query and emits all - * the results. - * - *

This is an alias for {@link #executeReactive(Statement)} - * executeReactive(SimpleStatement.newInstance(query, values))}. - * - * @param query the query to execute. - * @param values the values for placeholders in the query string. Individual values can be {@code - * null}, but the vararg array itself can't. - * @return The {@link Publisher} that will publish the returned results. - * @see SimpleStatement#newInstance(String,Object...) - */ - @NonNull - default ReactiveResultSet executeReactive(@NonNull String query, @NonNull Object... values) { - return executeReactive(SimpleStatement.newInstance(query, values)); - } - - /** - * Returns a {@link Publisher} that, once subscribed to, executes the given query and emits all - * the results. - * - *

This is an alias for {@link #executeReactive(Statement)} - * executeReactive(SimpleStatement.newInstance(query,values))}. - * - * @param query the query to execute. - * @param values the values for named placeholders in the query string. Individual values can be - * {@code null}, but the map itself can't. - * @return The {@link Publisher} that will publish the returned results. - * @see SimpleStatement#newInstance(String,Map) - */ - @NonNull - default ReactiveResultSet executeReactive( - @NonNull String query, @NonNull Map values) { - return executeReactive(SimpleStatement.newInstance(query, values)); - } - - /** - * Returns a {@link Publisher} that, once subscribed to, executes the given query and emits all - * the results. - * - * @param statement the statement to execute. - * @return The {@link Publisher} that will publish the returned results. - */ - @NonNull - default ReactiveResultSet executeReactive(@NonNull Statement statement) { - return Objects.requireNonNull( - execute(statement, CqlRequestReactiveProcessor.REACTIVE_RESULT_SET)); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/cql/reactive/package-info.java b/core/src/main/java/com/datastax/dse/driver/api/core/cql/reactive/package-info.java deleted file mode 100644 index 01a5f514aba..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/cql/reactive/package-info.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Types related to CQL execution using reactive-style programming. - * - *

Note that this is located in a {@code dse} package for historical reasons; reactive queries - * can now be used with open-source Cassandra as well. - */ -package com.datastax.dse.driver.api.core.cql.reactive; diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/data/geometry/Geometry.java b/core/src/main/java/com/datastax/dse/driver/api/core/data/geometry/Geometry.java deleted file mode 100644 index 66a5708832e..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/data/geometry/Geometry.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.data.geometry; - -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; - -/** - * The driver-side representation for a DSE geospatial type. - * - *

- *     Row row = dseSession.execute("SELECT coords FROM points_of_interest WHERE name = 'Eiffel Tower'").one();
- *     Point coords = row.get("coords", Point.class);
- * 
- * - * The default implementations returned by the driver are immutable and serializable. If you write - * your own implementations, they should at least be thread-safe; serializability is not mandatory, - * but recommended for use with some 3rd-party tools like Apache Spark ™. - */ -public interface Geometry { - - /** - * Returns a Well-known Text (WKT) - * representation of this geospatial type. - */ - @NonNull - String asWellKnownText(); - - /** - * Returns a Well-known - * Binary (WKB) representation of this geospatial type. - * - *

Note that, due to DSE implementation details, the resulting byte buffer always uses - * little-endian order, regardless of the platform's native order. - */ - @NonNull - ByteBuffer asWellKnownBinary(); - - /** Returns a JSON representation of this geospatial type. */ - @NonNull - String asGeoJson(); - - /** - * Tests whether this geospatial type instance contains another instance. - * - * @param other the other instance. - * @return whether {@code this} contains {@code other}. - */ - boolean contains(@NonNull Geometry other); -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/data/geometry/LineString.java b/core/src/main/java/com/datastax/dse/driver/api/core/data/geometry/LineString.java deleted file mode 100644 index 7f77b3202a2..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/data/geometry/LineString.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.data.geometry; - -import com.datastax.dse.driver.internal.core.data.geometry.DefaultGeometry; -import com.datastax.dse.driver.internal.core.data.geometry.DefaultLineString; -import com.esri.core.geometry.ogc.OGCLineString; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.util.List; - -/** - * The driver-side representation for DSE's {@code LineString}. - * - *

This is a curve in a two-dimensional XY-plane, represented by a set of points (with linear - * interpolation between them). - * - *

The default implementation returned by the driver is immutable. - */ -public interface LineString extends Geometry { - /** - * Creates a line string from its Well-known Text (WKT) representation. - * - * @param source the Well-known Text representation to parse. - * @return the line string represented by the WKT. - * @throws IllegalArgumentException if the string does not contain a valid Well-known Text - * representation. - */ - @NonNull - static LineString fromWellKnownText(@NonNull String source) { - return new DefaultLineString(DefaultGeometry.fromOgcWellKnownText(source, OGCLineString.class)); - } - - /** - * Creates a line string from its Well-known Binary - * (WKB) representation. - * - * @param source the Well-known Binary representation to parse. - * @return the line string represented by the WKB. - * @throws IllegalArgumentException if the provided {@link ByteBuffer} does not contain a valid - * Well-known Binary representation. - */ - @NonNull - static LineString fromWellKnownBinary(@NonNull ByteBuffer source) { - return new DefaultLineString( - DefaultGeometry.fromOgcWellKnownBinary(source, OGCLineString.class)); - } - - /** - * Creates a line string from a GeoJSON - * LineString representation. - * - * @param source the GeoJSON - * LineString representation to parse. - * @return the line string represented by the GeoJSON LineString. - * @throws IllegalArgumentException if the string does not contain a valid GeoJSON LineString - * representation. - */ - @NonNull - static LineString fromGeoJson(@NonNull String source) { - return new DefaultLineString(DefaultGeometry.fromOgcGeoJson(source, OGCLineString.class)); - } - - /** Creates a line string from two or more points. */ - @NonNull - static LineString fromPoints(@NonNull Point p1, @NonNull Point p2, @NonNull Point... pn) { - return new DefaultLineString(p1, p2, pn); - } - - @NonNull - List getPoints(); -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/data/geometry/Point.java b/core/src/main/java/com/datastax/dse/driver/api/core/data/geometry/Point.java deleted file mode 100644 index b064b3fb222..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/data/geometry/Point.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.data.geometry; - -import com.datastax.dse.driver.internal.core.data.geometry.DefaultGeometry; -import com.datastax.dse.driver.internal.core.data.geometry.DefaultPoint; -import com.esri.core.geometry.ogc.OGCPoint; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; - -/** - * The driver-side representation of DSE's {@code Point}. - * - *

This is a zero-dimensional object that represents a specific (X,Y) location in a - * two-dimensional XY-plane. In case of Geographic Coordinate Systems, the X coordinate is the - * longitude and the Y is the latitude. - * - *

The default implementation returned by the driver is immutable. - */ -public interface Point extends Geometry { - - /** - * Creates a point from its Well-known - * Text (WKT) representation. - * - * @param source the Well-known Text representation to parse. - * @return the point represented by the WKT. - * @throws IllegalArgumentException if the string does not contain a valid Well-known Text - * representation. - */ - @NonNull - static Point fromWellKnownText(@NonNull String source) { - return new DefaultPoint(DefaultGeometry.fromOgcWellKnownText(source, OGCPoint.class)); - } - - /** - * Creates a point from its Well-known Binary - * (WKB) representation. - * - * @param source the Well-known Binary representation to parse. - * @return the point represented by the WKB. - * @throws IllegalArgumentException if the provided {@link ByteBuffer} does not contain a valid - * Well-known Binary representation. - */ - @NonNull - static Point fromWellKnownBinary(@NonNull ByteBuffer source) { - return new DefaultPoint(DefaultGeometry.fromOgcWellKnownBinary(source, OGCPoint.class)); - } - - /** - * Creates a point from a GeoJSON - * Point representation. - * - * @param source the GeoJSON Point - * representation to parse. - * @return the point represented by the GeoJSON Point. - * @throws IllegalArgumentException if the string does not contain a valid GeoJSON Point representation. - */ - @NonNull - static Point fromGeoJson(@NonNull String source) { - return new DefaultPoint(DefaultGeometry.fromOgcGeoJson(source, OGCPoint.class)); - } - - /** - * Creates a new point. - * - * @param x The X coordinate of this point (or its longitude in Geographic Coordinate Systems). - * @param y The Y coordinate of this point (or its latitude in Geographic Coordinate Systems). - * @return the point represented by coordinates. - */ - @NonNull - static Point fromCoordinates(double x, double y) { - return new DefaultPoint(x, y); - } - - /** - * Returns the X coordinate of this 2D point (or its longitude in Geographic Coordinate Systems). - */ - double X(); - - /** - * Returns the Y coordinate of this 2D point (or its latitude in Geographic Coordinate Systems). - */ - double Y(); -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/data/geometry/Polygon.java b/core/src/main/java/com/datastax/dse/driver/api/core/data/geometry/Polygon.java deleted file mode 100644 index d793704defa..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/data/geometry/Polygon.java +++ /dev/null @@ -1,127 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.data.geometry; - -import com.datastax.dse.driver.internal.core.data.geometry.DefaultGeometry; -import com.datastax.dse.driver.internal.core.data.geometry.DefaultPolygon; -import com.esri.core.geometry.ogc.OGCPolygon; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.util.List; - -/** - * The driver-side representation of DSE's {@code Polygon}. - * - *

This is a planar surface in a two-dimensional XY-plane, represented by one exterior boundary - * and 0 or more interior boundaries. - * - *

The default implementation returned by the driver is immutable. - */ -public interface Polygon extends Geometry { - /** - * Creates a polygon from its Well-known - * Text (WKT) representation. - * - * @param source the Well-known Text representation to parse. - * @return the polygon represented by the WKT. - * @throws IllegalArgumentException if the string does not contain a valid Well-known Text - * representation. - */ - @NonNull - static Polygon fromWellKnownText(@NonNull String source) { - return new DefaultPolygon(DefaultGeometry.fromOgcWellKnownText(source, OGCPolygon.class)); - } - - /** - * Creates a polygon from its Well-known Binary - * (WKB) representation. - * - * @param source the Well-known Binary representation to parse. - * @return the polygon represented by the WKB. - * @throws IllegalArgumentException if the provided {@link ByteBuffer} does not contain a valid - * Well-known Binary representation. - */ - @NonNull - static Polygon fromWellKnownBinary(@NonNull ByteBuffer source) { - return new DefaultPolygon(DefaultGeometry.fromOgcWellKnownBinary(source, OGCPolygon.class)); - } - - /** - * Creates a polygon from a GeoJSON - * Polygon representation. - * - * @param source the GeoJSON Polygon - * representation to parse. - * @return the polygon represented by the GeoJSON Polygon. - * @throws IllegalArgumentException if the string does not contain a valid GeoJSON Polygon representation. - */ - @NonNull - static Polygon fromGeoJson(@NonNull String source) { - return new DefaultPolygon(DefaultGeometry.fromOgcGeoJson(source, OGCPolygon.class)); - } - - /** Creates a polygon from a series of 3 or more points. */ - @NonNull - static Polygon fromPoints( - @NonNull Point p1, @NonNull Point p2, @NonNull Point p3, @NonNull Point... pn) { - return new DefaultPolygon(p1, p2, p3, pn); - } - - /** - * Returns a polygon builder. - * - *

This is intended for complex polygons with multiple rings (i.e. holes inside the polygon). - * For simple cases, consider {@link #fromPoints(Point, Point, Point, Point...)} instead. - */ - @NonNull - static Builder builder() { - return new DefaultPolygon.Builder(); - } - - /** Returns the external ring of the polygon. */ - @NonNull - List getExteriorRing(); - - /** - * Returns the internal rings of the polygon, i.e. any holes inside of it (or islands inside of - * the holes). - */ - @NonNull - List> getInteriorRings(); - - /** Provides a simple DSL to build a polygon. */ - interface Builder { - /** - * Adds a new ring for this polygon. - * - *

There can be one or more outer rings and zero or more inner rings. If a polygon has an - * inner ring, the inner ring looks like a hole. If the hole contains another outer ring, that - * outer ring looks like an island. - * - *

There must be one "main" outer ring that contains all the others. - */ - @NonNull - Builder addRing(@NonNull Point p1, @NonNull Point p2, @NonNull Point p3, @NonNull Point... pn); - - @NonNull - Polygon build(); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/data/time/DateRange.java b/core/src/main/java/com/datastax/dse/driver/api/core/data/time/DateRange.java deleted file mode 100644 index 3dd48915dba..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/data/time/DateRange.java +++ /dev/null @@ -1,257 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.data.time; - -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.base.Strings; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.io.Serializable; -import java.text.ParseException; -import java.time.ZonedDateTime; -import java.util.Objects; -import java.util.Optional; - -/** - * A date range, as defined by the server type {@code - * org.apache.cassandra.db.marshal.DateRangeType}, corresponding to the Apache Solr type {@code - * DateRangeField}. - * - *

A date range can be either {@linkplain DateRange#DateRange(DateRangeBound) single-bounded}, in - * which case it represents a unique instant (e.g. "{@code 2001-01-01}"), or {@linkplain - * #DateRange(DateRangeBound, DateRangeBound) double-bounded}, in which case it represents an - * interval of time (e.g. "{@code [2001-01-01 TO 2002]}"). - * - *

Date range {@linkplain DateRangeBound bounds} are always inclusive; they must be either valid - * dates, or the special value {@link DateRangeBound#UNBOUNDED UNBOUNDED}, represented by a "{@code - * *}", e.g. "{@code [2001 TO *]}". - * - *

Instances can be more easily created with the {@link #parse(String)} method. - * - *

This class is immutable and thread-safe. - * - * @since DSE 5.1 - */ -public class DateRange implements Serializable { - - /** - * Parses the given string as a date range. - * - *

The given input must be compliant with Apache Solr type {@code - * DateRangeField} syntax; it can either be a {@linkplain #DateRange(DateRangeBound) - * single-bounded range}, or a {@linkplain #DateRange(DateRangeBound, DateRangeBound) - * double-bounded range}. - * - * @throws ParseException if the given string could not be parsed into a valid range. - * @see DateRangeBound#parseLowerBound(String) - * @see DateRangeBound#parseUpperBound(String) - */ - @NonNull - public static DateRange parse(@NonNull String source) throws ParseException { - if (Strings.isNullOrEmpty(source)) { - throw new ParseException("Date range is null or empty", 0); - } - - if (source.charAt(0) == '[') { - if (source.charAt(source.length() - 1) != ']') { - throw new ParseException( - "If date range starts with '[' it must end with ']'; got " + source, - source.length() - 1); - } - int middle = source.indexOf(" TO "); - if (middle < 0) { - throw new ParseException( - "If date range starts with '[' it must contain ' TO '; got " + source, 0); - } - String lowerBoundString = source.substring(1, middle); - int upperBoundStart = middle + 4; - String upperBoundString = source.substring(upperBoundStart, source.length() - 1); - DateRangeBound lowerBound; - try { - lowerBound = DateRangeBound.parseLowerBound(lowerBoundString); - } catch (Exception e) { - throw newParseException("Cannot parse date range lower bound: " + source, 1, e); - } - DateRangeBound upperBound; - try { - upperBound = DateRangeBound.parseUpperBound(upperBoundString); - } catch (Exception e) { - throw newParseException( - "Cannot parse date range upper bound: " + source, upperBoundStart, e); - } - return new DateRange(lowerBound, upperBound); - } else { - try { - return new DateRange(DateRangeBound.parseLowerBound(source)); - } catch (Exception e) { - throw newParseException("Cannot parse single date range bound: " + source, 0, e); - } - } - } - - @NonNull private final DateRangeBound lowerBound; - @Nullable private final DateRangeBound upperBound; - - /** - * Creates a "single bounded" instance, i.e., a date range whose upper and lower bounds are - * identical. - * - * @throws NullPointerException if {@code singleBound} is null. - */ - public DateRange(@NonNull DateRangeBound singleBound) { - this.lowerBound = Preconditions.checkNotNull(singleBound, "singleBound cannot be null"); - this.upperBound = null; - } - - /** - * Creates an instance composed of two distinct bounds. - * - * @throws NullPointerException if {@code lowerBound} or {@code upperBound} is null. - * @throws IllegalArgumentException if both {@code lowerBound} and {@code upperBound} are not - * unbounded and {@code lowerBound} is greater than {@code upperBound}. - */ - public DateRange(@NonNull DateRangeBound lowerBound, @NonNull DateRangeBound upperBound) { - Preconditions.checkNotNull(lowerBound, "lowerBound cannot be null"); - Preconditions.checkNotNull(upperBound, "upperBound cannot be null"); - if (!lowerBound.isUnbounded() - && !upperBound.isUnbounded() - && lowerBound.getTimestamp().compareTo(upperBound.getTimestamp()) >= 0) { - throw new IllegalArgumentException( - String.format( - "Lower bound of a date range should be before upper bound, got: [%s TO %s]", - lowerBound, upperBound)); - } - this.lowerBound = lowerBound; - this.upperBound = upperBound; - } - - /** Returns the lower bound of this range (inclusive). */ - @NonNull - public DateRangeBound getLowerBound() { - return lowerBound; - } - - /** - * Returns the upper bound of this range (inclusive), or empty if the range is {@linkplain - * #isSingleBounded() single-bounded}. - */ - @NonNull - public Optional getUpperBound() { - return Optional.ofNullable(upperBound); - } - - /** - * Returns whether this range is single-bounded, i.e. if the upper and lower bounds are identical. - */ - public boolean isSingleBounded() { - return upperBound == null; - } - - /** - * Returns the string representation of this range, in a format compatible with Apache Solr - * DateRageField syntax - * - * @see DateRangeBound#toString() - */ - @NonNull - @Override - public String toString() { - if (isSingleBounded()) { - return lowerBound.toString(); - } else { - return String.format("[%s TO %s]", lowerBound, upperBound); - } - } - - @Override - public boolean equals(@Nullable Object other) { - if (other == this) { - return true; - } else if (other instanceof DateRange) { - DateRange that = (DateRange) other; - return Objects.equals(this.lowerBound, that.lowerBound) - && Objects.equals(this.upperBound, that.upperBound); - - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(lowerBound, upperBound); - } - - private static ParseException newParseException(String message, int offset, Exception cause) { - ParseException parseException = new ParseException(message, offset); - parseException.initCause(cause); - return parseException; - } - - /** - * This object gets replaced by an internal proxy for serialization. - * - * @serialData the lower bound timestamp and precision, followed by the upper bound timestamp and - * precision, or two {@code null}s if the range is single-bounded. - */ - private Object writeReplace() { - return new SerializationProxy(this); - } - - private static class SerializationProxy implements Serializable { - - private static final long serialVersionUID = 1L; - - private final ZonedDateTime lowerBoundTimestamp; - private final DateRangePrecision lowerBoundPrecision; - private final ZonedDateTime upperBoundTimestamp; - private final DateRangePrecision upperBoundPrecision; - - SerializationProxy(DateRange input) { - this.lowerBoundTimestamp = input.lowerBound.getTimestamp(); - this.lowerBoundPrecision = input.lowerBound.getPrecision(); - if (input.upperBound != null) { - this.upperBoundTimestamp = input.upperBound.getTimestamp(); - this.upperBoundPrecision = input.upperBound.getPrecision(); - } else { - this.upperBoundTimestamp = null; - this.upperBoundPrecision = null; - } - } - - private Object readResolve() { - if (upperBoundTimestamp == null ^ upperBoundPrecision == null) { - // Should not happen, but protect against corrupted streams - throw new IllegalArgumentException( - "Invalid serialized form, upper bound timestamp and precision " - + "should be either both null or both non-null"); - } - - if (upperBoundTimestamp == null) { - return new DateRange(DateRangeBound.lowerBound(lowerBoundTimestamp, lowerBoundPrecision)); - } else { - return new DateRange( - DateRangeBound.lowerBound(lowerBoundTimestamp, lowerBoundPrecision), - DateRangeBound.upperBound(upperBoundTimestamp, upperBoundPrecision)); - } - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/data/time/DateRangeBound.java b/core/src/main/java/com/datastax/dse/driver/api/core/data/time/DateRangeBound.java deleted file mode 100644 index 1621b8bf742..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/data/time/DateRangeBound.java +++ /dev/null @@ -1,212 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.data.time; - -import com.datastax.dse.driver.internal.core.search.DateRangeUtil; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.text.ParseException; -import java.time.ZonedDateTime; -import java.util.Calendar; -import java.util.Objects; - -/** - * A date range bound. - * - *

It is composed of a {@link ZonedDateTime} field and a corresponding {@link - * DateRangePrecision}. - * - *

Date range bounds are inclusive. The special value {@link #UNBOUNDED} denotes an un unbounded - * (infinite) bound, represented by a {@code *} sign. - * - *

This class is immutable and thread-safe. - */ -public class DateRangeBound { - - /** - * The unbounded {@link DateRangeBound} instance. It is syntactically represented by a {@code *} - * (star) sign. - */ - public static final DateRangeBound UNBOUNDED = new DateRangeBound(); - - /** - * Parses the given input as a lower date range bound. - * - *

The input should be a Lucene-compliant - * string. - * - *

The returned bound will have its {@linkplain DateRangePrecision precision} inferred from the - * input, and its timestamp will be {@linkplain DateRangePrecision#roundDown(ZonedDateTime) - * rounded down} to that precision. - * - *

Note that, in order to align with the server's parsing behavior, dates will always be parsed - * in the UTC time zone. - * - * @throws NullPointerException if {@code lowerBound} is {@code null}. - * @throws ParseException if the given input cannot be parsed. - */ - @NonNull - public static DateRangeBound parseLowerBound(@NonNull String source) throws ParseException { - Preconditions.checkNotNull(source); - Calendar calendar = DateRangeUtil.parseCalendar(source); - DateRangePrecision precision = DateRangeUtil.getPrecision(calendar); - return (precision == null) - ? UNBOUNDED - : lowerBound(DateRangeUtil.toZonedDateTime(calendar), precision); - } - - /** - * Parses the given input as an upper date range bound. - * - *

The input should be a Lucene-compliant - * string. - * - *

The returned bound will have its {@linkplain DateRangePrecision precision} inferred from the - * input, and its timestamp will be {@linkplain DateRangePrecision#roundUp(ZonedDateTime)} rounded - * up} to that precision. - * - *

Note that, in order to align with the server's behavior (e.g. when using date range literals - * in CQL query strings), dates must always be in the UTC time zone: an optional trailing {@code - * Z}" is allowed, but no other time zone ID (not even {@code UTC}, {@code GMT} or {@code +00:00}) - * is permitted. - * - * @throws NullPointerException if {@code upperBound} is {@code null}. - * @throws ParseException if the given input cannot be parsed. - */ - public static DateRangeBound parseUpperBound(String source) throws ParseException { - Preconditions.checkNotNull(source); - Calendar calendar = DateRangeUtil.parseCalendar(source); - DateRangePrecision precision = DateRangeUtil.getPrecision(calendar); - return (precision == null) - ? UNBOUNDED - : upperBound(DateRangeUtil.toZonedDateTime(calendar), precision); - } - - /** - * Creates a date range lower bound from the given date and precision. Temporal fields smaller - * than the precision will be rounded down. - */ - public static DateRangeBound lowerBound(ZonedDateTime timestamp, DateRangePrecision precision) { - return new DateRangeBound(precision.roundDown(timestamp), precision); - } - - /** - * Creates a date range upper bound from the given date and precision. Temporal fields smaller - * than the precision will be rounded up. - */ - public static DateRangeBound upperBound(ZonedDateTime timestamp, DateRangePrecision precision) { - return new DateRangeBound(precision.roundUp(timestamp), precision); - } - - @Nullable private final ZonedDateTime timestamp; - @Nullable private final DateRangePrecision precision; - - private DateRangeBound(@NonNull ZonedDateTime timestamp, @NonNull DateRangePrecision precision) { - Preconditions.checkNotNull(timestamp); - Preconditions.checkNotNull(precision); - this.timestamp = timestamp; - this.precision = precision; - } - - // constructor used for the special UNBOUNDED value - private DateRangeBound() { - this.timestamp = null; - this.precision = null; - } - - /** Whether this bound is unbounded (i.e. denotes the special {@code *} value). */ - public boolean isUnbounded() { - return this.timestamp == null && this.precision == null; - } - - /** - * Returns the timestamp of this bound. - * - * @throws IllegalStateException if this bound is {@linkplain #isUnbounded() unbounded}. - */ - @NonNull - public ZonedDateTime getTimestamp() { - if (isUnbounded()) { - throw new IllegalStateException( - "Can't call this method on UNBOUNDED, use isUnbounded() to check first"); - } - assert timestamp != null; - return timestamp; - } - - /** - * Returns the precision of this bound. - * - * @throws IllegalStateException if this bound is {@linkplain #isUnbounded() unbounded}. - */ - @NonNull - public DateRangePrecision getPrecision() { - if (isUnbounded()) { - throw new IllegalStateException( - "Can't call this method on UNBOUNDED, use isUnbounded() to check first"); - } - assert precision != null; - return precision; - } - - /** - * Returns this bound as a Lucene-compliant string. - * - *

Unbounded bounds always return "{@code *}"; all other bounds are formatted in one of the - * common ISO-8601 datetime formats, depending on their precision. - * - *

Note that Lucene expects timestamps in UTC only. Timezone presence is always optional, and - * if present, it must be expressed with the symbol "Z" exclusively. Therefore this method does - * not include any timezone information in the returned string, except for bounds with {@linkplain - * DateRangePrecision#MILLISECOND millisecond} precision, where the symbol "Z" is always appended - * to the resulting string. - */ - @NonNull - @Override - public String toString() { - if (isUnbounded()) { - return "*"; - } else { - assert timestamp != null && precision != null; - return precision.format(timestamp); - } - } - - @Override - public boolean equals(@Nullable Object other) { - if (other == this) { - return true; - } else if (other instanceof DateRangeBound) { - DateRangeBound that = (DateRangeBound) other; - return Objects.equals(this.timestamp, that.timestamp) - && Objects.equals(this.precision, that.precision); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(timestamp, precision); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/data/time/DateRangePrecision.java b/core/src/main/java/com/datastax/dse/driver/api/core/data/time/DateRangePrecision.java deleted file mode 100644 index ce811466c38..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/data/time/DateRangePrecision.java +++ /dev/null @@ -1,197 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.data.time; - -import com.datastax.dse.driver.internal.core.search.DateRangeUtil; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.time.ZoneOffset; -import java.time.ZonedDateTime; -import java.time.format.DateTimeFormatter; -import java.time.format.DateTimeFormatterBuilder; -import java.time.temporal.ChronoField; -import java.time.temporal.ChronoUnit; -import java.util.Locale; -import java.util.Map; - -/** The precision of a {@link DateRangeBound}. */ -public enum DateRangePrecision { - MILLISECOND( - 0x06, - ChronoUnit.MILLIS, - new DateTimeFormatterBuilder() - .parseCaseSensitive() - .parseStrict() - .appendPattern("uuuu-MM-dd'T'HH:mm:ss.SSS") - .optionalStart() - .appendZoneId() - .optionalEnd() - .toFormatter() - .withZone(ZoneOffset.UTC) - .withLocale(Locale.ROOT)), - SECOND( - 0x05, - ChronoUnit.SECONDS, - new DateTimeFormatterBuilder() - .parseCaseSensitive() - .parseStrict() - .appendPattern("uuuu-MM-dd'T'HH:mm:ss") - .parseDefaulting(ChronoField.MILLI_OF_SECOND, 0) - .toFormatter() - .withZone(ZoneOffset.UTC) - .withLocale(Locale.ROOT)), - MINUTE( - 0x04, - ChronoUnit.MINUTES, - new DateTimeFormatterBuilder() - .parseCaseSensitive() - .parseStrict() - .appendPattern("uuuu-MM-dd'T'HH:mm") - .parseDefaulting(ChronoField.SECOND_OF_MINUTE, 0) - .parseDefaulting(ChronoField.MILLI_OF_SECOND, 0) - .toFormatter() - .withZone(ZoneOffset.UTC) - .withLocale(Locale.ROOT)), - HOUR( - 0x03, - ChronoUnit.HOURS, - new DateTimeFormatterBuilder() - .parseCaseSensitive() - .parseStrict() - .appendPattern("uuuu-MM-dd'T'HH") - .parseDefaulting(ChronoField.MINUTE_OF_HOUR, 0) - .parseDefaulting(ChronoField.SECOND_OF_MINUTE, 0) - .parseDefaulting(ChronoField.MILLI_OF_SECOND, 0) - .toFormatter() - .withZone(ZoneOffset.UTC) - .withLocale(Locale.ROOT)), - DAY( - 0x02, - ChronoUnit.DAYS, - new DateTimeFormatterBuilder() - .parseCaseSensitive() - .parseStrict() - .appendPattern("uuuu-MM-dd") - .parseDefaulting(ChronoField.HOUR_OF_DAY, 0) - .parseDefaulting(ChronoField.MINUTE_OF_HOUR, 0) - .parseDefaulting(ChronoField.SECOND_OF_MINUTE, 0) - .parseDefaulting(ChronoField.MILLI_OF_SECOND, 0) - .toFormatter() - .withZone(ZoneOffset.UTC) - .withLocale(Locale.ROOT)), - MONTH( - 0x01, - ChronoUnit.MONTHS, - new DateTimeFormatterBuilder() - .parseCaseSensitive() - .parseStrict() - .appendPattern("uuuu-MM") - .parseDefaulting(ChronoField.DAY_OF_MONTH, 1) - .parseDefaulting(ChronoField.HOUR_OF_DAY, 0) - .parseDefaulting(ChronoField.MINUTE_OF_HOUR, 0) - .parseDefaulting(ChronoField.SECOND_OF_MINUTE, 0) - .parseDefaulting(ChronoField.MILLI_OF_SECOND, 0) - .toFormatter() - .withZone(ZoneOffset.UTC) - .withLocale(Locale.ROOT)), - YEAR( - 0x00, - ChronoUnit.YEARS, - new DateTimeFormatterBuilder() - .parseCaseSensitive() - .parseStrict() - .appendPattern("uuuu") - .parseDefaulting(ChronoField.MONTH_OF_YEAR, 1) - .parseDefaulting(ChronoField.DAY_OF_MONTH, 1) - .parseDefaulting(ChronoField.HOUR_OF_DAY, 0) - .parseDefaulting(ChronoField.MINUTE_OF_HOUR, 0) - .parseDefaulting(ChronoField.SECOND_OF_MINUTE, 0) - .parseDefaulting(ChronoField.MILLI_OF_SECOND, 0) - .toFormatter() - .withZone(ZoneOffset.UTC) - .withLocale(Locale.ROOT)); - - private final byte encoding; - private final ChronoUnit roundingUnit; - // The formatter is only used for formatting (parsing is done with DateRangeUtil.parseCalendar to - // be exactly the same as DSE's). - // If that ever were to change, note that DateTimeFormatters with a time zone have a parsing bug - // in Java 8: the formatter's zone will always be used, even if the input string specifies one - // explicitly. - // See https://stackoverflow.com/questions/41999421 - private final DateTimeFormatter formatter; - - DateRangePrecision(int encoding, ChronoUnit roundingUnit, DateTimeFormatter formatter) { - this.encoding = (byte) encoding; - this.roundingUnit = roundingUnit; - this.formatter = formatter; - } - - private static final Map ENCODINGS; - - static { - ImmutableMap.Builder builder = ImmutableMap.builder(); - for (DateRangePrecision precision : values()) { - builder.put(precision.encoding, precision); - } - ENCODINGS = builder.build(); - } - - public static DateRangePrecision fromEncoding(byte encoding) { - DateRangePrecision precision = ENCODINGS.get(encoding); - if (precision == null) { - throw new IllegalArgumentException("Invalid precision encoding: " + encoding); - } - return precision; - } - - /** The code used to represent the precision when a date range is encoded to binary. */ - public byte getEncoding() { - return encoding; - } - - /** - * Rounds up the given timestamp to this precision. - * - *

Temporal fields smaller than this precision will be rounded up; other fields will be left - * untouched. - */ - @NonNull - public ZonedDateTime roundUp(@NonNull ZonedDateTime timestamp) { - Preconditions.checkNotNull(timestamp); - return DateRangeUtil.roundUp(timestamp, roundingUnit); - } - - /** - * Rounds down the given timestamp to this precision. - * - *

Temporal fields smaller than this precision will be rounded down; other fields will be left - * untouched. - */ - @NonNull - public ZonedDateTime roundDown(@NonNull ZonedDateTime timestamp) { - Preconditions.checkNotNull(timestamp); - return DateRangeUtil.roundDown(timestamp, roundingUnit); - } - - /** Formats the given timestamp according to this precision. */ - public String format(ZonedDateTime timestamp) { - return formatter.format(timestamp); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/AsyncGraphResultSet.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/AsyncGraphResultSet.java deleted file mode 100644 index 995de53959b..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/graph/AsyncGraphResultSet.java +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import com.datastax.dse.driver.internal.core.graph.GraphExecutionInfoConverter; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Iterator; -import java.util.concurrent.CompletionStage; - -/** - * The result of an asynchronous graph query. - * - *

The default implementation returned by the driver is not thread-safe: the iterable - * returned by {@link #currentPage()} should only be iterated by a single thread. However, if - * subsequent pages are requested via {@link #fetchNextPage()}, it's safe to process those new - * instances in other threads (as long as each individual page of results is not accessed - * concurrently). - * - * @see GraphResultSet - */ -public interface AsyncGraphResultSet { - - /** The execution information for this page of results. */ - @NonNull - default ExecutionInfo getRequestExecutionInfo() { - return GraphExecutionInfoConverter.convert(getExecutionInfo()); - } - - /** - * The execution information for this page of results. - * - * @deprecated Use {@link #getRequestExecutionInfo()} instead. - */ - @Deprecated - @NonNull - com.datastax.dse.driver.api.core.graph.GraphExecutionInfo getExecutionInfo(); - - /** How many rows are left before the current page is exhausted. */ - int remaining(); - - /** - * The nodes in the current page. To keep iterating beyond that, use {@link #hasMorePages()} and - * {@link #fetchNextPage()}. - * - *

Note that this method always returns the same object, and that that object can only be - * iterated once: nodes are "consumed" as they are read. - */ - @NonNull - Iterable currentPage(); - - /** - * Returns the next node, or {@code null} if the result set is exhausted. - * - *

This is convenient for queries that are known to return exactly one node. - */ - @Nullable - default GraphNode one() { - Iterator iterator = currentPage().iterator(); - return iterator.hasNext() ? iterator.next() : null; - } - - /** - * Whether there are more pages of results. If so, call {@link #fetchNextPage()} to fetch the next - * one asynchronously. - */ - boolean hasMorePages(); - - /** - * Fetch the next page of results asynchronously. - * - * @throws IllegalStateException if there are no more pages. Use {@link #hasMorePages()} to check - * if you can call this method. - */ - @NonNull - CompletionStage fetchNextPage() throws IllegalStateException; - - /** - * Cancels the query and asks the server to stop sending results. - * - *

At this time, graph queries are not paginated and the server sends all the results at once; - * therefore this method has no effect. - */ - void cancel(); -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/BatchGraphStatement.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/BatchGraphStatement.java deleted file mode 100644 index 2169dc5f053..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/graph/BatchGraphStatement.java +++ /dev/null @@ -1,150 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import com.datastax.dse.driver.internal.core.graph.DefaultBatchGraphStatement; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Collections; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; - -/** - * A graph statement that groups multiple mutating traversals together, to be executed in the - * same transaction. - * - *

It is reserved for graph mutations, and does not return any result. - * - *

All the mutations grouped in the batch will either all succeed, or they will all be discarded - * and return an error. - * - *

The default implementation returned by the driver is immutable and thread-safe. Each mutation - * operation returns a copy. If you chain many of those operations, it is recommended to use {@link - * #builder()} instead for better memory usage. - * - *

Typically used like so: - * - *

{@code
- * import static com.datastax.dse.driver.api.core.graph.DseGraph.g;
- *
- * BatchGraphStatement statement =
- *     BatchGraphStatement.builder()
- *         .addTraversal(
- *                 g.addV("person").property("name", "batch1").property("age", 1))
- *         .addTraversal(
- *                 g.addV("person").property("name", "batch2").property("age", 2))
- *         .build();
- *
- * GraphResultSet graphResultSet = dseSession.execute(statement);
- * }
- * - * @see DseGraph#g - */ -public interface BatchGraphStatement - extends GraphStatement, Iterable { - - /** - * Create a new, empty instance. - * - *

Traversals can be added with {@link #addTraversal(GraphTraversal)}. - */ - @NonNull - static BatchGraphStatement newInstance() { - return new DefaultBatchGraphStatement( - ImmutableList.of(), - null, - null, - null, - Statement.NO_DEFAULT_TIMESTAMP, - null, - null, - Collections.emptyMap(), - null, - null, - null, - null, - null, - null); - } - - /** Create a new instance from the given list of traversals. */ - @NonNull - static BatchGraphStatement newInstance(@NonNull Iterable traversals) { - return new DefaultBatchGraphStatement( - traversals, - null, - null, - null, - Statement.NO_DEFAULT_TIMESTAMP, - null, - null, - Collections.emptyMap(), - null, - null, - null, - null, - null, - null); - } - - /** Create a new instance from the given list of traversals. */ - @NonNull - static BatchGraphStatement newInstance(@NonNull GraphTraversal... traversals) { - return newInstance(ImmutableList.copyOf(traversals)); - } - - /** - * Create a builder helper object to start creating a new instance. - * - *

Note that this builder is mutable and not thread-safe. - */ - @NonNull - static BatchGraphStatementBuilder builder() { - return new BatchGraphStatementBuilder(); - } - - /** - * Create a builder helper object to start creating a new instance with an existing statement as a - * template. The traversals and options set on the template will be copied for the new statement - * at the moment this method is called. - * - *

Note that this builder is mutable and not thread-safe. - */ - @NonNull - static BatchGraphStatementBuilder builder(@NonNull BatchGraphStatement template) { - return new BatchGraphStatementBuilder(template); - } - - /** - * Add a traversal to this statement. If many traversals need to be added, use a {@link - * #builder()}, or the {@link #addTraversals(Iterable)} method instead to avoid intermediary - * copies. - */ - @NonNull - BatchGraphStatement addTraversal(@NonNull GraphTraversal traversal); - - /** - * Adds several traversals to this statement. If this method is to be called many times, consider - * using a {@link #builder()} instead to avoid intermediary copies. - */ - @NonNull - BatchGraphStatement addTraversals(@NonNull Iterable traversals); - - /** Get the number of traversals already added to this statement. */ - int size(); -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/BatchGraphStatementBuilder.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/BatchGraphStatementBuilder.java deleted file mode 100644 index ac1b85bdc71..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/graph/BatchGraphStatementBuilder.java +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import com.datastax.dse.driver.internal.core.graph.DefaultBatchGraphStatement; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.NotThreadSafe; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; - -/** - * A builder to create a batch graph statement. - * - *

This class is mutable and not thread-safe. - */ -@NotThreadSafe -public class BatchGraphStatementBuilder - extends GraphStatementBuilderBase { - - private ImmutableList.Builder traversalsBuilder = ImmutableList.builder(); - private int traversalsCount; - - public BatchGraphStatementBuilder() { - // nothing to do - } - - public BatchGraphStatementBuilder(BatchGraphStatement template) { - super(template); - traversalsBuilder.addAll(template); - traversalsCount = template.size(); - } - - /** Add a traversal to this builder to include in the generated {@link BatchGraphStatement}. */ - @NonNull - public BatchGraphStatementBuilder addTraversal(@NonNull GraphTraversal traversal) { - traversalsBuilder.add(traversal); - traversalsCount += 1; - return this; - } - - /** - * Add several traversals to this builder to include in the generated {@link BatchGraphStatement}. - */ - @NonNull - public BatchGraphStatementBuilder addTraversals(@NonNull Iterable traversals) { - for (GraphTraversal traversal : traversals) { - traversalsBuilder.add(traversal); - traversalsCount += 1; - } - return this; - } - - /** - * Add several traversals to this builder to include in the generated {@link BatchGraphStatement}. - */ - @NonNull - public BatchGraphStatementBuilder addTraversals(@NonNull GraphTraversal... traversals) { - for (GraphTraversal traversal : traversals) { - traversalsBuilder.add(traversal); - traversalsCount += 1; - } - return this; - } - - /** Clears all the traversals previously added to this builder. */ - @NonNull - public BatchGraphStatementBuilder clearTraversals() { - traversalsBuilder = ImmutableList.builder(); - traversalsCount = 0; - return this; - } - - /** Returns the number of traversals added to this statement so far. */ - public int getTraversalsCount() { - return traversalsCount; - } - - @NonNull - @Override - public BatchGraphStatement build() { - return new DefaultBatchGraphStatement( - traversalsBuilder.build(), - isIdempotent, - timeout, - node, - timestamp, - executionProfile, - executionProfileName, - buildCustomPayload(), - graphName, - traversalSource, - subProtocol, - consistencyLevel, - readConsistencyLevel, - writeConsistencyLevel); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/DseGraph.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/DseGraph.java deleted file mode 100644 index dd1dbe95bc8..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/graph/DseGraph.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import com.datastax.dse.driver.internal.core.graph.DefaultDseRemoteConnectionBuilder; -import com.datastax.oss.driver.api.core.CqlSession; -import org.apache.tinkerpop.gremlin.process.traversal.AnonymousTraversalSource; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; -import org.apache.tinkerpop.gremlin.structure.util.empty.EmptyGraph; - -/** - * General purpose utility class for interaction with DSE Graph via the DataStax Enterprise Java - * driver. - */ -public class DseGraph { - - /** - * IMPORTANT: As of Tinkerpop 3.3.5, you should no longer use this shortcut if you intend - * to connect the traversal to DSE Graph using a {@linkplain - * org.apache.tinkerpop.gremlin.process.remote.RemoteConnection remote connection}, for example - * via the {@link #remoteConnectionBuilder} method declared below. Instead of: - * - *

{@code
-   * DseSession session = ...;
-   * RemoteConnection remoteConnection = DseGraph.remoteConnectionBuilder(session).build();
-   * GraphTraversalSource g = DseGraph.g.withRemote(remoteConnection);
-   * }
- * - * You should now use {@link AnonymousTraversalSource#traversal()}, and adopt the following idiom: - * - *
{@code
-   * DseSession session = ...;
-   * RemoteConnection remoteConnection = DseGraph.remoteConnectionBuilder(session).build();
-   * GraphTraversalSource g = AnonymousTraversalSource.traversal().withRemote(remoteConnection);
-   * }
- * - * A general-purpose shortcut for a non-connected TinkerPop {@link GraphTraversalSource} - * based on an immutable empty graph. This is really just a shortcut to {@code - * EmptyGraph.instance().traversal();}. - * - *

It can be used to create {@link FluentGraphStatement} instances (recommended); for ease of - * use you may statically import this variable. - * - *

Calling {@code g.getGraph()} will return a local immutable empty graph which is in no way - * connected to the DSE Graph server, it will not allow to modify a DSE Graph directly. To act on - * data stored in DSE Graph you must use {@linkplain - * org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal traversal}s such as - * {@code DseGraph.g.V()}, {@code DseGraph.g.addV/addE()}. - */ - public static final GraphTraversalSource g = EmptyGraph.instance().traversal(); - - /** - * Returns a builder helper class to help create {@link - * org.apache.tinkerpop.gremlin.process.remote.RemoteConnection} implementations that seamlessly - * connect to DSE Graph using the {@link CqlSession} in parameter. - */ - public static DseGraphRemoteConnectionBuilder remoteConnectionBuilder(CqlSession dseSession) { - return new DefaultDseRemoteConnectionBuilder(dseSession); - } - - private DseGraph() { - // nothing to do - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/DseGraphRemoteConnectionBuilder.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/DseGraphRemoteConnectionBuilder.java deleted file mode 100644 index c4210a5b3dd..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/graph/DseGraphRemoteConnectionBuilder.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import org.apache.tinkerpop.gremlin.process.remote.RemoteConnection; - -/** - * A builder helper to create a {@link RemoteConnection} that will be used to build - * implicitly-executing fluent traversals. - * - *

To create an instance of this, use the {@link DseGraph#remoteConnectionBuilder(CqlSession)} - * method: - * - *

{@code
- * DseSession dseSession = DseSession.builder().build();
- * GraphTraversalSource g = AnonymousTraversalSource.traversal().withRemote(DseGraph.remoteConnectionBuilder(dseSession).build());
- * List vertices = g.V().hasLabel("person").toList();
- * }
- * - * @see CqlSession - */ -public interface DseGraphRemoteConnectionBuilder { - - /** Build the remote connection that was configured with this builder. */ - RemoteConnection build(); - - /** - * Set a configuration profile that will be used for every traversal built using the remote - * connection. - * - *

For the list of options available for Graph requests, see the {@code reference.conf} - * configuration file. - */ - DseGraphRemoteConnectionBuilder withExecutionProfile(DriverExecutionProfile executionProfile); - - /** - * Set the name of an execution profile that will be used for every traversal using from the - * remote connection. Named profiles are pre-defined in the driver configuration. - * - *

For the list of options available for Graph requests, see the {@code reference.conf} - * configuration file. - */ - DseGraphRemoteConnectionBuilder withExecutionProfileName(String executionProfileName); -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/FluentGraphStatement.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/FluentGraphStatement.java deleted file mode 100644 index 051c6501c65..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/graph/FluentGraphStatement.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import com.datastax.dse.driver.internal.core.graph.DefaultFluentGraphStatement; -import com.datastax.oss.driver.api.core.cql.Statement; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Collections; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; - -/** - * A graph statement that uses a TinkerPop {@link GraphTraversal} as the query. - * - *

Typically used like so: - * - *

{@code
- * import static com.datastax.dse.driver.api.core.graph.DseGraph.g;
- *
- * FluentGraphStatement statement = FluentGraphStatement.newInstance(g.V().has("name", "marko"));
- *
- * GraphResultSet graphResultSet = dseSession.execute(statement);
- * }
- * - * @see DseGraph#g - */ -public interface FluentGraphStatement extends GraphStatement { - - /** - * Create a new instance from the given traversal. - * - *

Use {@link #builder(GraphTraversal)} if you want to set more options before building the - * final statement instance. - */ - @NonNull - static FluentGraphStatement newInstance(@NonNull GraphTraversal traversal) { - return new DefaultFluentGraphStatement( - traversal, - null, - null, - null, - Statement.NO_DEFAULT_TIMESTAMP, - null, - null, - Collections.emptyMap(), - null, - null, - null, - null, - null, - null); - } - - /** - * Create a builder object to start creating a new instance from the given traversal. - * - *

Note that this builder is mutable and not thread-safe. - */ - @NonNull - static FluentGraphStatementBuilder builder(@NonNull GraphTraversal traversal) { - return new FluentGraphStatementBuilder(traversal); - } - - /** - * Create a builder helper object to start creating a new instance with an existing statement as a - * template. The traversal and options set on the template will be copied for the new statement at - * the moment this method is called. - * - *

Note that this builder is mutable and not thread-safe. - */ - @NonNull - static FluentGraphStatementBuilder builder(@NonNull FluentGraphStatement template) { - return new FluentGraphStatementBuilder(template); - } - - /** The underlying TinkerPop object representing the traversal executed by this statement. */ - @NonNull - GraphTraversal getTraversal(); -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/FluentGraphStatementBuilder.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/FluentGraphStatementBuilder.java deleted file mode 100644 index 59e588c564a..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/graph/FluentGraphStatementBuilder.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import com.datastax.dse.driver.internal.core.graph.DefaultFluentGraphStatement; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.NotThreadSafe; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; - -/** - * A builder to create a fluent graph statement. - * - *

This class is mutable and not thread-safe. - */ -@NotThreadSafe -public class FluentGraphStatementBuilder - extends GraphStatementBuilderBase { - - private GraphTraversal traversal; - - public FluentGraphStatementBuilder(@NonNull GraphTraversal traversal) { - this.traversal = traversal; - } - - public FluentGraphStatementBuilder(@NonNull FluentGraphStatement template) { - super(template); - this.traversal = template.getTraversal(); - } - - @NonNull - @Override - public FluentGraphStatement build() { - return new DefaultFluentGraphStatement( - this.traversal, - isIdempotent, - timeout, - node, - timestamp, - executionProfile, - executionProfileName, - buildCustomPayload(), - graphName, - traversalSource, - subProtocol, - consistencyLevel, - readConsistencyLevel, - writeConsistencyLevel); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphExecutionInfo.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphExecutionInfo.java deleted file mode 100644 index 758f6b358ed..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphExecutionInfo.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.specex.SpeculativeExecutionPolicy; -import java.nio.ByteBuffer; -import java.util.List; -import java.util.Map; - -/** - * Information about the execution of a graph statement. - * - * @deprecated This interface is not used by any driver component anymore; the driver now exposes - * instances of {@link com.datastax.oss.driver.api.core.cql.ExecutionInfo} for all Graph - * queries. - */ -@Deprecated -public interface GraphExecutionInfo { - - /** The statement that was executed. */ - GraphStatement getStatement(); - - /** The node that was used as a coordinator to successfully complete the query. */ - Node getCoordinator(); - - /** - * The number of speculative executions that were started for this query. - * - *

This does not include the initial, normal execution of the query. Therefore, if speculative - * executions are disabled, this will always be 0. If they are enabled and one speculative - * execution was triggered in addition to the initial execution, this will be 1, etc. - * - * @see SpeculativeExecutionPolicy - */ - int getSpeculativeExecutionCount(); - - /** - * The index of the execution that completed this query. - * - *

0 represents the initial, normal execution of the query, 1 the first speculative execution, - * etc. - * - * @see SpeculativeExecutionPolicy - */ - int getSuccessfulExecutionIndex(); - - /** - * The errors encountered on previous coordinators, if any. - * - *

The list is in chronological order, based on the time that the driver processed the error - * responses. If speculative executions are enabled, they run concurrently so their errors will be - * interleaved. A node can appear multiple times (if the retry policy decided to retry on the same - * node). - */ - List> getErrors(); - - /** - * The server-side warnings for this query, if any (otherwise the list will be empty). - * - *

This feature is only available with {@link DefaultProtocolVersion#V4} or above; with lower - * versions, this list will always be empty. - */ - List getWarnings(); - - /** - * The custom payload sent back by the server with the response, if any (otherwise the map will be - * empty). - * - *

This method returns a read-only view of the original map, but its values remain inherently - * mutable. If multiple clients will read these values, care should be taken not to corrupt the - * data (in particular, preserve the indices by calling {@link ByteBuffer#duplicate()}). - * - *

This feature is only available with {@link DefaultProtocolVersion#V4} or above; with lower - * versions, this map will always be empty. - */ - Map getIncomingPayload(); -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphNode.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphNode.java deleted file mode 100644 index 97d48a6b04d..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphNode.java +++ /dev/null @@ -1,234 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import java.util.List; -import java.util.Map; -import java.util.Set; -import org.apache.tinkerpop.gremlin.process.traversal.Path; -import org.apache.tinkerpop.gremlin.structure.Edge; -import org.apache.tinkerpop.gremlin.structure.Property; -import org.apache.tinkerpop.gremlin.structure.Vertex; -import org.apache.tinkerpop.gremlin.structure.VertexProperty; - -/** - * A node in a tree-like structure representing a Graph or a Graph component. - * - *

It can be: - * - *

    - *
  • a scalar value of a primitive type (boolean, string, int, long, double); - *
  • a graph element (vertex, edge, path or property); - *
  • a list of nodes; - *
  • a set of nodes; - *
  • a map of nodes. - *
- * - * This interface provides test methods to find out what a node represents, and conversion methods - * to cast it to a particular Java type. Two generic methods {@link #as(Class)} and {@link - * #as(GenericType)} can produce any arbitrary Java type, provided that the underlying serialization - * runtime has been correctly configured to support the requested conversion. - */ -public interface GraphNode { - - /** Whether this node represents a {@code null} value. */ - boolean isNull(); - - /** - * Returns {@code true} if this node is a {@link Map}, and {@code false} otherwise. - * - *

If this method returns {@code true}, you can convert this node with {@link #asMap()}, or use - * {@link #keys()} and {@link #getByKey(Object)} to access the individual fields (note that - * entries are not ordered, so {@link #getByIndex(int)} does not work). - */ - boolean isMap(); - - /** The keys of this map node, or an empty iterator if it is not a map. */ - Iterable keys(); - - /** - * Returns the value for the given key as a node. - * - *

If this node is not a map, or does not contain the specified key, {@code null} is returned. - * - *

If the property value has been explicitly set to {@code null}, implementors may return a - * special "null node" instead of {@code null}. - */ - GraphNode getByKey(Object key); - - /** Deserializes and returns this node as a {@link Map}. */ - Map asMap(); - - /** - * Returns {@code true} if this node is a {@link List}, and {@code false} otherwise. - * - *

If this method returns {@code true}, you can convert this node with {@link #asList()}, or - * use {@link #size()} and {@link #getByIndex(int)} to access the individual fields. - */ - boolean isList(); - - /** The size of the current node, if it is a list or map, or {@code 0} otherwise. */ - int size(); - - /** - * Returns the element at the given index as a node. - * - *

If this node is not a list, or {@code index} is out of bounds (i.e. less than zero or {@code - * >= size()}, {@code null} is returned; no exception will be thrown. - * - *

If the requested element has been explicitly set to {@code null}, implementors may return a - * special "null node" instead of {@code null}. - */ - GraphNode getByIndex(int index); - - /** Deserializes and returns this node as a {@link List}. */ - List asList(); - - /** - * Returns {@code true} if this node is a simple scalar value, (i.e., string, boolean or number), - * and {@code false} otherwise. - * - *

If this method returns {@code true}, you can convert this node with {@link #asString()}, - * {@link #asBoolean()}, {@link #asInt()}, {@link #asLong()} or {@link #asDouble()}. - */ - boolean isValue(); - - /** - * Returns this node as an integer. - * - *

If the underlying object is not convertible to integer, implementors may choose to either - * throw {@link ClassCastException} or return [null | empty | some default value], whichever is - * deemed more appropriate. - */ - int asInt(); - - /** - * Returns this node as a boolean. - * - *

If the underlying object is not convertible to boolean, implementors may choose to either - * throw {@link ClassCastException} or return [null | empty | some default value], whichever is - * deemed more appropriate. - */ - boolean asBoolean(); - - /** - * Returns this node as a long integer. - * - *

If the underlying object is not convertible to long, implementors may choose to either throw - * {@link ClassCastException} or return [null | empty | some default value], whichever is deemed - * more appropriate. - */ - long asLong(); - - /** - * Returns this node as a long integer. - * - *

If the underlying object is not convertible to double, implementors may choose to either - * throw {@link ClassCastException} or return [null | empty | some default value], whichever is - * deemed more appropriate. - */ - double asDouble(); - - /** - * A valid string representation of this node. - * - *

If the underlying object is not convertible to a string, implementors may choose to either - * throw {@link ClassCastException} or return an empty string, whichever is deemed more - * appropriate. - */ - String asString(); - - /** - * Deserializes and returns this node as an instance of {@code clazz}. - * - *

Before attempting such a conversion, there must be an appropriate converter configured on - * the underlying serialization runtime. - */ - ResultT as(Class clazz); - - /** - * Deserializes and returns this node as an instance of the given {@link GenericType type}. - * - *

Before attempting such a conversion, there must be an appropriate converter configured on - * the underlying serialization runtime. - */ - ResultT as(GenericType type); - - /** - * Returns {@code true} if this node is a {@link Vertex}, and {@code false} otherwise. - * - *

If this method returns {@code true}, then {@link #asVertex()} can be safely called. - */ - boolean isVertex(); - - /** Returns this node as a Tinkerpop {@link Vertex}. */ - Vertex asVertex(); - - /** - * Returns {@code true} if this node is a {@link Edge}, and {@code false} otherwise. - * - *

If this method returns {@code true}, then {@link #asEdge()} can be safely called. - */ - boolean isEdge(); - - /** Returns this node as a Tinkerpop {@link Edge}. */ - Edge asEdge(); - - /** - * Returns {@code true} if this node is a {@link Path}, and {@code false} otherwise. - * - *

If this method returns {@code true}, then {@link #asPath()} can be safely called. - */ - boolean isPath(); - - /** Returns this node as a Tinkerpop {@link Path}. */ - Path asPath(); - - /** - * Returns {@code true} if this node is a {@link Property}, and {@code false} otherwise. - * - *

If this method returns {@code true}, then {@link #asProperty()} can be safely called. - */ - boolean isProperty(); - - /** Returns this node as a Tinkerpop {@link Property}. */ - Property asProperty(); - - /** - * Returns {@code true} if this node is a {@link VertexProperty}, and {@code false} otherwise. - * - *

If this method returns {@code true}, then {@link #asVertexProperty()} ()} can be safely - * called. - */ - boolean isVertexProperty(); - - /** Returns this node as a Tinkerpop {@link VertexProperty}. */ - VertexProperty asVertexProperty(); - - /** - * Returns {@code true} if this node is a {@link Set}, and {@code false} otherwise. - * - *

If this method returns {@code true}, you can convert this node with {@link #asSet()}, or use - * {@link #size()}. - */ - boolean isSet(); - - /** Deserializes and returns this node as a {@link Set}. */ - Set asSet(); -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphResultSet.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphResultSet.java deleted file mode 100644 index d9c8d8fa460..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphResultSet.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import com.datastax.dse.driver.internal.core.graph.GraphExecutionInfoConverter; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; - -/** - * The result of a synchronous Graph query. - * - *

This object is a container for {@link GraphNode} objects that will contain the data returned - * by Graph queries. - * - *

Note that this object can only be iterated once: items are "consumed" as they are read, - * subsequent calls to {@code iterator()} will return the same iterator instance. - * - *

The default implementation returned by the driver is not thread-safe. It can only be - * iterated by the thread that invoked {@code dseSession.execute}. - * - * @see GraphNode - * @see GraphSession#execute(GraphStatement) - */ -public interface GraphResultSet extends Iterable { - - /** - * Returns the next node, or {@code null} if the result set is exhausted. - * - *

This is convenient for queries that are known to return exactly one row, for example count - * queries. - */ - @Nullable - default GraphNode one() { - Iterator graphNodeIterator = iterator(); - return graphNodeIterator.hasNext() ? graphNodeIterator.next() : null; - } - - /** - * Returns all the remaining nodes as a list; not recommended for paginated queries that return - * a large number of nodes. - * - *

At this time (DSE 6.0.0), graph queries are not paginated and the server sends all the - * results at once. - */ - @NonNull - default List all() { - if (!iterator().hasNext()) { - return Collections.emptyList(); - } - return ImmutableList.copyOf(this); - } - - /** - * Cancels the query and asks the server to stop sending results. - * - *

At this time (DSE 6.0.0), graph queries are not paginated and the server sends all the - * results at once; therefore this method has no effect. - */ - void cancel(); - - /** - * The execution information for the query that have been performed to assemble this result set. - */ - @NonNull - default ExecutionInfo getRequestExecutionInfo() { - return GraphExecutionInfoConverter.convert(getExecutionInfo()); - } - - /** @deprecated Use {@link #getRequestExecutionInfo()} instead. */ - @Deprecated - @NonNull - com.datastax.dse.driver.api.core.graph.GraphExecutionInfo getExecutionInfo(); -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphSession.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphSession.java deleted file mode 100644 index b985bc56353..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphSession.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.session.Session; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Objects; -import java.util.concurrent.CompletionStage; - -/** - * A session that has the ability to execute DSE Graph requests. - * - *

Generally this interface won't be referenced directly in an application; instead, you should - * use {@link CqlSession}, which is a combination of this interface and many others for a more - * integrated usage of DataStax Enterprise's multi-model database via a single entry point. However, - * it is still possible to cast a {@code CqlSession} to a {@code GraphSession} to only expose the - * DSE Graph execution methods. - */ -public interface GraphSession extends Session { - - /** - * Executes a graph statement synchronously (the calling thread blocks until the result becomes - * available). - * - *

The driver provides different kinds of graph statements: - * - *

    - *
  • {@link FluentGraphStatement} (recommended): wraps a fluent TinkerPop {@linkplain - * org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal traversal}; - *
  • {@link BatchGraphStatement}: groups together multiple mutating traversals ({@code - * g.addV()/g.addE()}) inside a single transaction and avoids multiple client-server - * round-trips. Improves performance in data ingestion scenarios; - *
  • {@link ScriptGraphStatement}: wraps a Gremlin-groovy script provided as a plain Java - * string. Required for administrative queries such as creating/dropping a graph, - * configuration and schema. - *
- * - *

This feature is only available with DataStax Enterprise. Executing graph queries against an - * Apache Cassandra® cluster will result in a runtime error. - * - * @see GraphResultSet - * @param graphStatement the graph query to execute (that can be any {@code GraphStatement}). - * @return the result of the graph query. That result will never be null but can be empty. - */ - @NonNull - default GraphResultSet execute(@NonNull GraphStatement graphStatement) { - return Objects.requireNonNull( - execute(graphStatement, GraphStatement.SYNC), - "The graph processor should never return a null result"); - } - - /** - * Executes a graph statement asynchronously (the call returns as soon as the statement was sent, - * generally before the result is available). - * - *

This feature is only available with DataStax Enterprise. Executing graph queries against an - * Apache Cassandra® cluster will result in a runtime error. - * - * @see #execute(GraphStatement) - * @see AsyncGraphResultSet - * @param graphStatement the graph query to execute (that can be any {@code GraphStatement}). - * @return the {@code CompletionStage} on the result of the graph query. - */ - @NonNull - default CompletionStage executeAsync( - @NonNull GraphStatement graphStatement) { - return Objects.requireNonNull( - execute(graphStatement, GraphStatement.ASYNC), - "The graph processor should never return a null result"); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphStatement.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphStatement.java deleted file mode 100644 index f770469b824..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphStatement.java +++ /dev/null @@ -1,378 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.NoNodeAvailableException; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.specex.SpeculativeExecutionPolicy; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.CheckReturnValue; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.Map; -import java.util.concurrent.CompletionStage; - -/** - * A request to execute a DSE Graph query. - * - * @param the "self type" used for covariant returns in subtypes. - */ -public interface GraphStatement> extends Request { - - /** - * The type returned when a graph statement is executed synchronously. - * - *

Most users won't use this explicitly. It is needed for the generic execute method ({@link - * Session#execute(Request, GenericType)}), but graph statements will generally be run with one of - * the DSE driver's built-in helper methods (such as {@link CqlSession#execute(GraphStatement)}). - */ - GenericType SYNC = GenericType.of(GraphResultSet.class); - - /** - * The type returned when a graph statement is executed asynchronously. - * - *

Most users won't use this explicitly. It is needed for the generic execute method ({@link - * Session#execute(Request, GenericType)}), but graph statements will generally be run with one of - * the DSE driver's built-in helper methods (such as {@link - * CqlSession#executeAsync(GraphStatement)}). - */ - GenericType> ASYNC = - new GenericType>() {}; - - /** - * Set the idempotence to use for execution. - * - *

Idempotence defines whether it will be possible to speculatively re-execute the statement, - * based on a {@link SpeculativeExecutionPolicy}. - * - *

All the driver's built-in implementations are immutable, and return a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - * - * @param idempotent a boolean instance to set a statement-specific value, or {@code null} to use - * the default idempotence defined in the configuration. - */ - @NonNull - @CheckReturnValue - SelfT setIdempotent(@Nullable Boolean idempotent); - - /** - * {@inheritDoc} - * - *

Note that, if this method returns {@code null}, graph statements fall back to a dedicated - * configuration option: {@code basic.graph.timeout}. See {@code reference.conf} in the DSE driver - * distribution for more details. - */ - @Nullable - @Override - Duration getTimeout(); - - /** - * Sets how long to wait for this request to complete. This is a global limit on the duration of a - * session.execute() call, including any retries the driver might do. - * - *

All the driver's built-in implementations are immutable, and return a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - * - * @param newTimeout the timeout to use, or {@code null} to use the default value defined in the - * configuration. - * @see #getTimeout() - */ - @NonNull - @CheckReturnValue - SelfT setTimeout(@Nullable Duration newTimeout); - - /** - * Sets the {@link Node} that should handle this query. - * - *

In the general case, use of this method is heavily discouraged and should only be - * used in specific cases, such as applying a series of schema changes, which may be advantageous - * to execute in sequence on the same node. - * - *

Configuring a specific node causes the configured {@link LoadBalancingPolicy} to be - * completely bypassed. However, if the load balancing policy dictates that the node is at - * distance {@link NodeDistance#IGNORED} or there is no active connectivity to the node, the - * request will fail with a {@link NoNodeAvailableException}. - * - *

All the driver's built-in implementations are immutable, and return a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - * - * @param newNode The node that should be used to handle executions of this statement or null to - * delegate to the configured load balancing policy. - */ - @NonNull - @CheckReturnValue - SelfT setNode(@Nullable Node newNode); - - /** - * Get the timestamp set on the statement. - * - *

By default, if left unset, the value returned by this is {@code Long.MIN_VALUE}, which means - * that the timestamp will be set via the Timestamp Generator. - * - * @return the timestamp set on this statement. - */ - long getTimestamp(); - - /** - * Set the timestamp to use for execution. - * - *

By default the timestamp generator (see reference config file) will be used for timestamps, - * unless set explicitly via this method. - * - *

All the driver's built-in implementations are immutable, and return a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - */ - @CheckReturnValue - SelfT setTimestamp(long timestamp); - - /** - * Sets the configuration profile to use for execution. - * - *

All the driver's built-in implementations are immutable, and return a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - */ - @NonNull - @CheckReturnValue - SelfT setExecutionProfile(@Nullable DriverExecutionProfile executionProfile); - - /** - * Sets the name of the driver configuration profile that will be used for execution. - * - *

For all the driver's built-in implementations, this method has no effect if {@link - * #setExecutionProfile} has been called with a non-null argument. - * - *

All the driver's built-in implementations are immutable, and return a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - */ - @NonNull - @CheckReturnValue - SelfT setExecutionProfileName(@Nullable String name); - - /** - * Sets the custom payload to use for execution. - * - *

This is intended for advanced use cases, such as tools with very advanced knowledge of DSE - * Graph, and reserved for internal settings like transaction settings. Note that the driver also - * adds graph-related options to the payload, in addition to the ones provided here; it won't - * override any option that is already present. - * - *

All the driver's built-in statement implementations are immutable, and return a new instance - * from this method. However custom implementations may choose to be mutable and return the same - * instance. - * - *

Note that it's your responsibility to provide a thread-safe map. This can be achieved with a - * concurrent or immutable implementation, or by making it effectively immutable (meaning that - * it's never modified after being set on the statement). - * - *

All the driver's built-in implementations are immutable, and return a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - */ - @NonNull - @CheckReturnValue - SelfT setCustomPayload(@NonNull Map newCustomPayload); - - /** - * The name of the graph to use for this statement. - * - *

This is the programmatic equivalent of the configuration option {@code basic.graph.name}, - * and takes precedence over it. That is, if this property is non-null, then the configuration - * will be ignored. - */ - @Nullable - String getGraphName(); - - /** - * Sets the graph name. - * - *

All the driver's built-in implementations are immutable, and return a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - * - * @see #getGraphName() - */ - @NonNull - @CheckReturnValue - SelfT setGraphName(@Nullable String newGraphName); - - /** - * The name of the traversal source to use for this statement. - * - *

This is the programmatic equivalent of the configuration option {@code - * basic.graph.traversal-source}, and takes precedence over it. That is, if this property is - * non-null, then the configuration will be ignored. - */ - @Nullable - String getTraversalSource(); - - /** - * Sets the traversal source. - * - *

All the driver's built-in implementations are immutable, and return a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - * - * @see #getTraversalSource() - */ - @NonNull - @CheckReturnValue - SelfT setTraversalSource(@Nullable String newTraversalSource); - - /** - * The DSE graph sub-protocol to use for this statement. - * - *

This is the programmatic equivalent of the configuration option {@code - * advanced.graph.sub-protocol}, and takes precedence over it. That is, if this property is - * non-null, then the configuration will be ignored. - */ - @Nullable - String getSubProtocol(); - - /** - * Sets the sub-protocol. - * - *

All the driver's built-in implementations are immutable, and return a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - * - * @see #getSubProtocol() - */ - @NonNull - @CheckReturnValue - SelfT setSubProtocol(@Nullable String newSubProtocol); - - /** - * Returns the consistency level to use for the statement. - * - *

This is the programmatic equivalent of the configuration option {@code - * basic.request.consistency}, and takes precedence over it. That is, if this property is - * non-null, then the configuration will be ignored. - */ - @Nullable - ConsistencyLevel getConsistencyLevel(); - - /** - * Sets the consistency level to use for this statement. - * - *

All the driver's built-in implementations are immutable, and return a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - * - * @param newConsistencyLevel the consistency level to use, or null to use the default value - * defined in the configuration. - * @see #getConsistencyLevel() - */ - @CheckReturnValue - SelfT setConsistencyLevel(@Nullable ConsistencyLevel newConsistencyLevel); - - /** - * The consistency level to use for the internal read queries that will be produced by this - * statement. - * - *

This is the programmatic equivalent of the configuration option {@code - * basic.graph.read-consistency-level}, and takes precedence over it. That is, if this property is - * non-null, then the configuration will be ignored. - * - *

If this property isn't set here or in the configuration, the default consistency level will - * be used ({@link #getConsistencyLevel()} or {@code basic.request.consistency}). - */ - @Nullable - ConsistencyLevel getReadConsistencyLevel(); - - /** - * Sets the read consistency level. - * - *

All the driver's built-in implementations are immutable, and return a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - * - * @see #getReadConsistencyLevel() - */ - @NonNull - @CheckReturnValue - SelfT setReadConsistencyLevel(@Nullable ConsistencyLevel newReadConsistencyLevel); - - /** - * The consistency level to use for the internal write queries that will be produced by this - * statement. - * - *

This is the programmatic equivalent of the configuration option {@code - * basic.graph.write-consistency-level}, and takes precedence over it. That is, if this property - * is non-null, then the configuration will be ignored. - * - *

If this property isn't set here or in the configuration, the default consistency level will - * be used ({@link #getConsistencyLevel()} or {@code basic.request.consistency}). - */ - @Nullable - ConsistencyLevel getWriteConsistencyLevel(); - - /** - * Sets the write consistency level. - * - *

All the driver's built-in implementations are immutable, and return a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - * - * @see #getWriteConsistencyLevel() - */ - @NonNull - @CheckReturnValue - SelfT setWriteConsistencyLevel(@Nullable ConsistencyLevel newWriteConsistencyLevel); - - /** Graph statements do not have a per-query keyspace, this method always returns {@code null}. */ - @Nullable - @Override - default CqlIdentifier getKeyspace() { - return null; - } - - /** Graph statements can't be routed, this method always returns {@code null}. */ - @Nullable - @Override - default CqlIdentifier getRoutingKeyspace() { - return null; - } - - /** Graph statements can't be routed, this method always returns {@code null}. */ - @Nullable - @Override - default ByteBuffer getRoutingKey() { - return null; - } - - /** Graph statements can't be routed, this method always returns {@code null}. */ - @Nullable - @Override - default Token getRoutingToken() { - return null; - } - - /** - * Whether tracing information should be recorded for this statement. - * - *

This method is only exposed for future extensibility. At the time of writing, graph - * statements do not support tracing, and this always returns {@code false}. - */ - default boolean isTracing() { - return false; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphStatementBuilderBase.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphStatementBuilderBase.java deleted file mode 100644 index 5cb48613cf5..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphStatementBuilderBase.java +++ /dev/null @@ -1,190 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.Map; -import net.jcip.annotations.NotThreadSafe; - -@NotThreadSafe -public abstract class GraphStatementBuilderBase< - SelfT extends GraphStatementBuilderBase, - StatementT extends GraphStatement> { - - @SuppressWarnings({"unchecked"}) - private final SelfT self = (SelfT) this; - - protected Boolean isIdempotent; - protected Duration timeout; - protected Node node; - protected long timestamp = Statement.NO_DEFAULT_TIMESTAMP; - protected DriverExecutionProfile executionProfile; - protected String executionProfileName; - private NullAllowingImmutableMap.Builder customPayloadBuilder; - protected String graphName; - protected String traversalSource; - protected String subProtocol; - protected ConsistencyLevel consistencyLevel; - protected ConsistencyLevel readConsistencyLevel; - protected ConsistencyLevel writeConsistencyLevel; - - protected GraphStatementBuilderBase() { - // nothing to do - } - - protected GraphStatementBuilderBase(StatementT template) { - this.isIdempotent = template.isIdempotent(); - this.timeout = template.getTimeout(); - this.node = template.getNode(); - this.timestamp = template.getTimestamp(); - this.executionProfile = template.getExecutionProfile(); - this.executionProfileName = template.getExecutionProfileName(); - if (!template.getCustomPayload().isEmpty()) { - this.customPayloadBuilder = - NullAllowingImmutableMap.builder() - .putAll(template.getCustomPayload()); - } - this.graphName = template.getGraphName(); - this.traversalSource = template.getTraversalSource(); - this.subProtocol = template.getSubProtocol(); - this.consistencyLevel = template.getConsistencyLevel(); - this.readConsistencyLevel = template.getReadConsistencyLevel(); - this.writeConsistencyLevel = template.getWriteConsistencyLevel(); - } - - /** @see GraphStatement#setIdempotent(Boolean) */ - @NonNull - public SelfT setIdempotence(@Nullable Boolean idempotent) { - this.isIdempotent = idempotent; - return self; - } - - /** @see GraphStatement#setTimeout(Duration) */ - @NonNull - public SelfT setTimeout(@Nullable Duration timeout) { - this.timeout = timeout; - return self; - } - - /** @see GraphStatement#setNode(Node) */ - @NonNull - public SelfT setNode(@Nullable Node node) { - this.node = node; - return self; - } - - /** @see GraphStatement#setTimestamp(long) */ - @NonNull - public SelfT setTimestamp(long timestamp) { - this.timestamp = timestamp; - return self; - } - - /** @see GraphStatement#setExecutionProfileName(String) */ - @NonNull - public SelfT setExecutionProfileName(@Nullable String executionProfileName) { - this.executionProfileName = executionProfileName; - return self; - } - - /** @see GraphStatement#setExecutionProfile(DriverExecutionProfile) */ - @NonNull - public SelfT setExecutionProfile(@Nullable DriverExecutionProfile executionProfile) { - this.executionProfile = executionProfile; - this.executionProfileName = null; - return self; - } - - /** @see GraphStatement#setCustomPayload(Map) */ - @NonNull - public SelfT addCustomPayload(@NonNull String key, @Nullable ByteBuffer value) { - if (customPayloadBuilder == null) { - customPayloadBuilder = NullAllowingImmutableMap.builder(); - } - customPayloadBuilder.put(key, value); - return self; - } - - /** @see GraphStatement#setCustomPayload(Map) */ - @NonNull - public SelfT clearCustomPayload() { - customPayloadBuilder = null; - return self; - } - - /** @see GraphStatement#setGraphName(String) */ - @NonNull - public SelfT setGraphName(@Nullable String graphName) { - this.graphName = graphName; - return self; - } - - /** @see GraphStatement#setTraversalSource(String) */ - @NonNull - public SelfT setTraversalSource(@Nullable String traversalSource) { - this.traversalSource = traversalSource; - return self; - } - - /** @see GraphStatement#setSubProtocol(String) */ - @NonNull - public SelfT setSubProtocol(@Nullable String subProtocol) { - this.subProtocol = subProtocol; - return self; - } - - /** @see GraphStatement#setConsistencyLevel(ConsistencyLevel) */ - @NonNull - public SelfT setConsistencyLevel(@Nullable ConsistencyLevel consistencyLevel) { - this.consistencyLevel = consistencyLevel; - return self; - } - - /** @see GraphStatement#setReadConsistencyLevel(ConsistencyLevel) */ - @NonNull - public SelfT setReadConsistencyLevel(@Nullable ConsistencyLevel readConsistencyLevel) { - this.readConsistencyLevel = readConsistencyLevel; - return self; - } - - /** @see GraphStatement#setWriteConsistencyLevel(ConsistencyLevel) */ - @NonNull - public SelfT setWriteConsistencyLevel(@Nullable ConsistencyLevel writeConsistencyLevel) { - this.writeConsistencyLevel = writeConsistencyLevel; - return self; - } - - @NonNull - protected Map buildCustomPayload() { - return (customPayloadBuilder == null) - ? NullAllowingImmutableMap.of() - : customPayloadBuilder.build(); - } - - /** Create the statement with the configuration defined by this builder object. */ - @NonNull - public abstract StatementT build(); -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/PagingEnabledOptions.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/PagingEnabledOptions.java deleted file mode 100644 index f59d0e50e93..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/graph/PagingEnabledOptions.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -public enum PagingEnabledOptions { - ENABLED, - DISABLED, - AUTO -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/ScriptGraphStatement.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/ScriptGraphStatement.java deleted file mode 100644 index 2ad7aafc232..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/graph/ScriptGraphStatement.java +++ /dev/null @@ -1,171 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import com.datastax.dse.driver.internal.core.graph.DefaultScriptGraphStatement; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Collections; -import java.util.Map; - -/** - * A graph statement that uses a Gremlin-groovy script the query. - * - *

These statements are generally used for DSE Graph set-up queries, such as creating or dropping - * a graph, or defining a graph schema. For graph traversals, we recommend using {@link - * FluentGraphStatement} instead. To do bulk data ingestion in graph, we recommend using {@link - * BatchGraphStatement} instead. - * - *

Typical usage: - * - *

{@code
- * ScriptGraphStatement statement = ScriptGraphStatement.newInstance("schema.propertyKey('age').Int().create()");
- *
- * GraphResultSet graphResultSet = dseSession.execute(statement);
- * }
- */ -public interface ScriptGraphStatement extends GraphStatement { - - /** Create a new instance from the given script. */ - @NonNull - static ScriptGraphStatement newInstance(@NonNull String script) { - return new DefaultScriptGraphStatement( - script, - NullAllowingImmutableMap.of(), - null, - null, - null, - null, - Statement.NO_DEFAULT_TIMESTAMP, - null, - null, - Collections.emptyMap(), - null, - null, - null, - null, - null, - null); - } - - /** - * Create a builder object to start creating a new instance from the given script. - * - *

Note that this builder is mutable and not thread-safe. - */ - @NonNull - static ScriptGraphStatementBuilder builder(@NonNull String script) { - return new ScriptGraphStatementBuilder(script); - } - - /** - * Create a builder helper object to start creating a new instance with an existing statement as a - * template. The script and options set on the template will be copied for the new statement at - * the moment this method is called. - * - *

Note that this builder is mutable and not thread-safe. - */ - @NonNull - static ScriptGraphStatementBuilder builder(@NonNull ScriptGraphStatement template) { - return new ScriptGraphStatementBuilder(template); - } - - /** The Gremlin-groovy script representing the graph query. */ - @NonNull - String getScript(); - - /** - * Whether the statement is a system query, or {@code null} if it defaults to the value defined in - * the configuration. - * - * @see #setSystemQuery(Boolean) - */ - @Nullable - Boolean isSystemQuery(); - - /** - * Defines if this statement is a system query. - * - *

Script statements that access the {@code system} variable must not specify a graph - * name (otherwise {@code system} is not available). However, if your application executes a lot - * of non-system statements, it is convenient to configure the graph name in your configuration to - * avoid repeating it every time. This method allows you to ignore that global graph name for a - * specific statement. - * - *

This property is the programmatic equivalent of the configuration option {@code - * basic.graph.is-system-query}, and takes precedence over it. That is, if this property is - * non-null, then the configuration will be ignored. - * - *

The driver's built-in implementation is immutable, and returns a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - * - * @param newValue {@code true} to mark this statement as a system query (the driver will ignore - * any graph name set on the statement or the configuration); {@code false} to mark it as a - * non-system query; {@code null} to default to the value defined in the configuration. - * @see #isSystemQuery() - */ - @NonNull - ScriptGraphStatement setSystemQuery(@Nullable Boolean newValue); - - /** - * The query parameters to send along the request. - * - * @see #setQueryParam(String, Object) - */ - @NonNull - Map getQueryParams(); - - /** - * Set a value for a parameter defined in the Groovy script. - * - *

The script engine in the DSE Graph server allows to define parameters in a Groovy script and - * set the values of these parameters as a binding. Defining parameters allows to re-use scripts - * and only change their parameters values, which improves the performance of the script executed, - * so defining parameters is encouraged; however, for optimal Graph traversal performance, we - * recommend either using {@link BatchGraphStatement}s for data ingestion, or {@link - * FluentGraphStatement} for normal traversals. - * - *

Parameters in a Groovy script are always named; unlike CQL, they are not prefixed by a - * column ({@code :}). - * - *

The driver's built-in implementation is immutable, and returns a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - * If many parameters are to be set in a query, it is recommended to create the statement with - * {@link #builder(String)} instead. - * - * @param name the name of the parameter defined in the script. If the statement already had a - * binding for this name, it gets replaced. - * @param value the value that will be transmitted with the request. - */ - @NonNull - ScriptGraphStatement setQueryParam(@NonNull String name, @Nullable Object value); - - /** - * Removes a binding for the given name from this statement. - * - *

If the statement did not have such a binding, this method has no effect and returns the same - * statement instance. Otherwise, the driver's built-in implementation returns a new instance - * (however custom implementations may choose to be mutable and return the same instance). - * - * @see #setQueryParam(String, Object) - */ - @NonNull - ScriptGraphStatement removeQueryParam(@NonNull String name); -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/ScriptGraphStatementBuilder.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/ScriptGraphStatementBuilder.java deleted file mode 100644 index 1985c58955f..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/graph/ScriptGraphStatementBuilder.java +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import com.datastax.dse.driver.internal.core.graph.DefaultScriptGraphStatement; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.collect.Maps; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Map; -import net.jcip.annotations.NotThreadSafe; - -/** - * A builder to create a script graph statement. - * - *

This class is mutable and not thread-safe. - */ -@NotThreadSafe -public class ScriptGraphStatementBuilder - extends GraphStatementBuilderBase { - - private String script; - private Boolean isSystemQuery; - private final Map queryParams; - - public ScriptGraphStatementBuilder() { - this.queryParams = Maps.newHashMap(); - } - - public ScriptGraphStatementBuilder(String script) { - this.script = script; - this.queryParams = Maps.newHashMap(); - } - - public ScriptGraphStatementBuilder(ScriptGraphStatement template) { - super(template); - this.script = template.getScript(); - this.queryParams = Maps.newHashMap(template.getQueryParams()); - this.isSystemQuery = template.isSystemQuery(); - } - - @NonNull - public ScriptGraphStatementBuilder setScript(@NonNull String script) { - this.script = script; - return this; - } - - /** @see ScriptGraphStatement#isSystemQuery() */ - @NonNull - public ScriptGraphStatementBuilder setSystemQuery(@Nullable Boolean isSystemQuery) { - this.isSystemQuery = isSystemQuery; - return this; - } - - /** - * Set a value for a parameter defined in the script query. - * - * @see ScriptGraphStatement#setQueryParam(String, Object) - */ - @NonNull - public ScriptGraphStatementBuilder setQueryParam(@NonNull String name, @Nullable Object value) { - this.queryParams.put(name, value); - return this; - } - - /** - * Set multiple values for named parameters defined in the script query. - * - * @see ScriptGraphStatement#setQueryParam(String, Object) - */ - @NonNull - public ScriptGraphStatementBuilder setQueryParams(@NonNull Map params) { - this.queryParams.putAll(params); - return this; - } - - /** - * Removes a parameter. - * - *

This is useful if the builder was {@linkplain - * ScriptGraphStatement#builder(ScriptGraphStatement) initialized with a template statement} that - * has more parameters than desired. - * - * @see ScriptGraphStatement#setQueryParam(String, Object) - * @see #clearQueryParams() - */ - @NonNull - public ScriptGraphStatementBuilder removeQueryParam(@NonNull String name) { - this.queryParams.remove(name); - return this; - } - - /** Clears all the parameters previously added to this builder. */ - public ScriptGraphStatementBuilder clearQueryParams() { - this.queryParams.clear(); - return this; - } - - @NonNull - @Override - public ScriptGraphStatement build() { - Preconditions.checkNotNull(this.script, "Script hasn't been defined in this builder."); - return new DefaultScriptGraphStatement( - this.script, - this.queryParams, - this.isSystemQuery, - isIdempotent, - timeout, - node, - timestamp, - executionProfile, - executionProfileName, - buildCustomPayload(), - graphName, - traversalSource, - subProtocol, - consistencyLevel, - readConsistencyLevel, - writeConsistencyLevel); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/predicates/CqlCollection.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/predicates/CqlCollection.java deleted file mode 100644 index fdbf3fbe397..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/graph/predicates/CqlCollection.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph.predicates; - -import com.datastax.dse.driver.internal.core.graph.CqlCollectionPredicate; -import java.util.Collection; -import java.util.Map; -import org.apache.tinkerpop.gremlin.process.traversal.P; -import org.javatuples.Pair; - -/** - * Predicates that can be used on CQL collections (lists, sets and maps). - * - *

Note: CQL collection predicates are only available when using the binary subprotocol. - */ -public class CqlCollection { - - /** - * Checks if the target collection contains the given value. - * - * @param value the value to look for; cannot be {@code null}. - * @return a predicate to apply in a {@link - * org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal}. - */ - @SuppressWarnings("unchecked") - public static , V> P contains(V value) { - return new P(CqlCollectionPredicate.contains, value); - } - - /** - * Checks if the target map contains the given key. - * - * @param key the key to look for; cannot be {@code null}. - * @return a predicate to apply in a {@link - * org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal}. - */ - @SuppressWarnings("unchecked") - public static , K> P containsKey(K key) { - return new P(CqlCollectionPredicate.containsKey, key); - } - - /** - * Checks if the target map contains the given value. - * - * @param value the value to look for; cannot be {@code null}. - * @return a predicate to apply in a {@link - * org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal}. - */ - @SuppressWarnings("unchecked") - public static , V> P containsValue(V value) { - return new P(CqlCollectionPredicate.containsValue, value); - } - - /** - * Checks if the target map contains the given entry. - * - * @param key the key to look for; cannot be {@code null}. - * @param value the value to look for; cannot be {@code null}. - * @return a predicate to apply in a {@link - * org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal}. - */ - @SuppressWarnings("unchecked") - public static , K, V> P entryEq(K key, V value) { - return new P(CqlCollectionPredicate.entryEq, new Pair<>(key, value)); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/predicates/Geo.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/predicates/Geo.java deleted file mode 100644 index 65dd84d0076..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/graph/predicates/Geo.java +++ /dev/null @@ -1,160 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph.predicates; - -import com.datastax.dse.driver.api.core.data.geometry.LineString; -import com.datastax.dse.driver.api.core.data.geometry.Point; -import com.datastax.dse.driver.api.core.data.geometry.Polygon; -import com.datastax.dse.driver.internal.core.data.geometry.Distance; -import com.datastax.dse.driver.internal.core.graph.GeoPredicate; -import com.datastax.dse.driver.internal.core.graph.GeoUtils; -import edu.umd.cs.findbugs.annotations.NonNull; -import org.apache.tinkerpop.gremlin.process.traversal.P; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; - -public interface Geo { - - enum Unit { - MILES(GeoUtils.MILES_TO_KM * GeoUtils.KM_TO_DEG), - KILOMETERS(GeoUtils.KM_TO_DEG), - METERS(GeoUtils.KM_TO_DEG / 1000.0), - DEGREES(1); - - private final double multiplier; - - Unit(double multiplier) { - this.multiplier = multiplier; - } - - /** Convert distance to degrees (used internally only). */ - public double toDegrees(double distance) { - return distance * multiplier; - } - } - - /** - * Finds whether an entity is inside the given circular area using a geo coordinate system. - * - * @return a predicate to apply in a {@link GraphTraversal}. - */ - static P inside(Point center, double radius, Unit units) { - return new P<>(GeoPredicate.inside, new Distance(center, units.toDegrees(radius))); - } - - /** - * Finds whether an entity is inside the given circular area using a cartesian coordinate system. - * - * @return a predicate to apply in a {@link GraphTraversal}. - */ - static P inside(Point center, double radius) { - return new P<>(GeoPredicate.insideCartesian, new Distance(center, radius)); - } - - /** - * Finds whether an entity is inside the given polygon. - * - * @return a predicate to apply in a {@link GraphTraversal}. - */ - static P inside(Polygon polygon) { - return new P<>(GeoPredicate.insideCartesian, polygon); - } - - /** - * Creates a point from the given coordinates. - * - *

This is just a shortcut to {@link Point#fromCoordinates(double, double)}. It is duplicated - * here so that {@code Geo} can be used as a single entry point in Gremlin-groovy scripts. - */ - @NonNull - static Point point(double x, double y) { - return Point.fromCoordinates(x, y); - } - - /** - * Creates a line string from the given (at least 2) points. - * - *

This is just a shortcut to {@link LineString#fromPoints(Point, Point, Point...)}. It is - * duplicated here so that {@code Geo} can be used as a single entry point in Gremlin-groovy - * scripts. - */ - @NonNull - static LineString lineString( - @NonNull Point point1, @NonNull Point point2, @NonNull Point... otherPoints) { - return LineString.fromPoints(point1, point2, otherPoints); - } - - /** - * Creates a line string from the coordinates of its points. - * - *

This is provided for backward compatibility with previous DSE versions. We recommend {@link - * #lineString(Point, Point, Point...)} instead. - */ - @NonNull - static LineString lineString(double... coordinates) { - if (coordinates.length % 2 != 0) { - throw new IllegalArgumentException("lineString() must be passed an even number of arguments"); - } else if (coordinates.length < 4) { - throw new IllegalArgumentException( - "lineString() must be passed at least 4 arguments (2 points)"); - } - Point point1 = Point.fromCoordinates(coordinates[0], coordinates[1]); - Point point2 = Point.fromCoordinates(coordinates[2], coordinates[3]); - Point[] otherPoints = new Point[coordinates.length / 2 - 2]; - for (int i = 4; i < coordinates.length; i += 2) { - otherPoints[i / 2 - 2] = Point.fromCoordinates(coordinates[i], coordinates[i + 1]); - } - return LineString.fromPoints(point1, point2, otherPoints); - } - - /** - * Creates a polygon from the given (at least 3) points. - * - *

This is just a shortcut to {@link Polygon#fromPoints(Point, Point, Point, Point...)}. It is - * duplicated here so that {@code Geo} can be used as a single entry point in Gremlin-groovy - * scripts. - */ - @NonNull - static Polygon polygon( - @NonNull Point p1, @NonNull Point p2, @NonNull Point p3, @NonNull Point... otherPoints) { - return Polygon.fromPoints(p1, p2, p3, otherPoints); - } - - /** - * Creates a polygon from the coordinates of its points. - * - *

This is provided for backward compatibility with previous DSE versions. We recommend {@link - * #polygon(Point, Point, Point, Point...)} instead. - */ - @NonNull - static Polygon polygon(double... coordinates) { - if (coordinates.length % 2 != 0) { - throw new IllegalArgumentException("polygon() must be passed an even number of arguments"); - } else if (coordinates.length < 6) { - throw new IllegalArgumentException( - "polygon() must be passed at least 6 arguments (3 points)"); - } - Point point1 = Point.fromCoordinates(coordinates[0], coordinates[1]); - Point point2 = Point.fromCoordinates(coordinates[2], coordinates[3]); - Point point3 = Point.fromCoordinates(coordinates[4], coordinates[5]); - Point[] otherPoints = new Point[coordinates.length / 2 - 3]; - for (int i = 6; i < coordinates.length; i += 2) { - otherPoints[i / 2 - 3] = Point.fromCoordinates(coordinates[i], coordinates[i + 1]); - } - return Polygon.fromPoints(point1, point2, point3, otherPoints); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/predicates/Search.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/predicates/Search.java deleted file mode 100644 index e285c118c8a..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/graph/predicates/Search.java +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph.predicates; - -import com.datastax.dse.driver.internal.core.graph.EditDistance; -import com.datastax.dse.driver.internal.core.graph.SearchPredicate; -import org.apache.tinkerpop.gremlin.process.traversal.P; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; - -public interface Search { - - /** - * Search any instance of a certain token within the text property targeted (case insensitive). - * - * @return a predicate to apply in a {@link GraphTraversal}. - */ - static P token(String value) { - return new P<>(SearchPredicate.token, value); - } - - /** - * Search any instance of a certain token prefix within the text property targeted (case - * insensitive). - * - * @return a predicate to apply in a {@link GraphTraversal}. - */ - static P tokenPrefix(String value) { - return new P<>(SearchPredicate.tokenPrefix, value); - } - - /** - * Search any instance of the provided regular expression for the targeted property (case - * insensitive). - * - * @return a predicate to apply in a {@link GraphTraversal}. - */ - static P tokenRegex(String value) { - return new P<>(SearchPredicate.tokenRegex, value); - } - - /** - * Search for a specific prefix at the beginning of the text property targeted (case sensitive). - * - * @return a predicate to apply in a {@link GraphTraversal}. - */ - static P prefix(String value) { - return new P<>(SearchPredicate.prefix, value); - } - - /** - * Search for this regular expression inside the text property targeted (case sensitive). - * - * @return a predicate to apply in a {@link GraphTraversal}. - */ - static P regex(String value) { - return new P<>(SearchPredicate.regex, value); - } - - /** - * Supports finding words which are a within a specific distance away (case insensitive). - * - *

Example: the search expression is {@code phrase("Hello world", 2)} - * - *

    - *
  • the inserted value "Hello world" is found - *
  • the inserted value "Hello wild world" is found - *
  • the inserted value "Hello big wild world" is found - *
  • the inserted value "Hello the big wild world" is not found - *
  • the inserted value "Goodbye world" is not found. - *
- * - * @param query the string to look for in the value - * @param distance the number of terms allowed between two correct terms to find a value. - * @return a predicate to apply in a {@link GraphTraversal}. - */ - static P phrase(String query, int distance) { - return new P<>(SearchPredicate.phrase, new EditDistance(query, distance)); - } - - /** - * Supports fuzzy searches based on the Damerau-Levenshtein Distance, or Edit Distance algorithm - * (case sensitive). - * - *

Example: the search expression is {@code fuzzy("david", 1)} - * - *

    - *
  • the inserted value "david" is found - *
  • the inserted value "dawid" is found - *
  • the inserted value "davids" is found - *
  • the inserted value "dewid" is not found - *
- * - * @param query the string to look for in the value - * @param distance the number of "uncertainties" allowed for the Levenshtein algorithm. - * @return a predicate to apply in a {@link GraphTraversal}. - */ - static P fuzzy(String query, int distance) { - return new P<>(SearchPredicate.fuzzy, new EditDistance(query, distance)); - } - - /** - * Supports fuzzy searches based on the Damerau-Levenshtein Distance, or Edit Distance algorithm - * after having tokenized the data stored (case insensitive). - * - *

Example: the search expression is {@code tokenFuzzy("david", 1)} - * - *

    - *
  • the inserted value "david" is found - *
  • the inserted value "dawid" is found - *
  • the inserted value "hello-dawid" is found - *
  • the inserted value "dewid" is not found - *
- * - * @param query the string to look for in the value - * @param distance the number of "uncertainties" allowed for the Levenshtein algorithm. - * @return a predicate to apply in a {@link GraphTraversal}. - */ - static P tokenFuzzy(String query, int distance) { - return new P<>(SearchPredicate.tokenFuzzy, new EditDistance(query, distance)); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/reactive/ReactiveGraphNode.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/reactive/ReactiveGraphNode.java deleted file mode 100644 index ad7849633c6..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/graph/reactive/ReactiveGraphNode.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph.reactive; - -import com.datastax.dse.driver.api.core.graph.GraphNode; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * A {@link GraphNode} produced by a {@linkplain ReactiveGraphResultSet reactive graph result set}. - * - *

This is essentially an extension of the driver's {@link GraphNode} object that also exposes - * useful information about {@linkplain #getExecutionInfo() request execution} (note however that - * this information is also exposed at result set level for convenience). - * - * @see ReactiveGraphSession - * @see ReactiveGraphResultSet - */ -public interface ReactiveGraphNode extends GraphNode { - - /** - * The execution information for the paged request that produced this result. - * - *

This object is the same for two rows pertaining to the same page, but differs for rows - * pertaining to different pages. - * - * @return the execution information for the paged request that produced this result. - * @see ReactiveGraphResultSet#getExecutionInfos() - */ - @NonNull - ExecutionInfo getExecutionInfo(); -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/reactive/ReactiveGraphResultSet.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/reactive/ReactiveGraphResultSet.java deleted file mode 100644 index a0e3231750e..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/graph/reactive/ReactiveGraphResultSet.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph.reactive; - -import com.datastax.dse.driver.api.core.graph.GraphStatement; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import edu.umd.cs.findbugs.annotations.NonNull; -import org.reactivestreams.Publisher; - -/** - * A {@link Publisher} of {@link ReactiveGraphNode}s returned by a {@link ReactiveGraphSession}. - * - *

By default, all implementations returned by the driver are cold, unicast, single-subscriber - * only publishers. In other words, they do not support multiple subscriptions; consider - * caching the results produced by such publishers if you need to consume them by more than one - * downstream subscriber. - * - *

Also, note that reactive graph result sets may emit items to their subscribers on an internal - * driver IO thread. Subscriber implementors are encouraged to abide by Reactive Streams - * Specification rule 2.2 and avoid performing heavy computations or blocking calls inside - * {@link org.reactivestreams.Subscriber#onNext(Object) onNext} calls, as doing so could slow down - * the driver and impact performance. Instead, they should asynchronously dispatch received signals - * to their processing logic. - * - *

This interface exists mainly to expose useful information about {@linkplain - * #getExecutionInfos() request execution}. This is particularly convenient for queries that do not - * return rows; for queries that do return rows, it is also possible, and oftentimes easier, to - * access that same information {@linkplain ReactiveGraphNode at node level}. - * - * @see ReactiveGraphSession#executeReactive(GraphStatement) - * @see ReactiveGraphNode - */ -public interface ReactiveGraphResultSet extends Publisher { - - /** - * Returns {@linkplain ExecutionInfo information about the execution} of all requests that have - * been performed so far to assemble this result set. - * - *

If the query is not paged, this publisher will emit exactly one item as soon as the response - * arrives, then complete. If the query is paged, it will emit multiple items, one per page; then - * it will complete when the last page arrives. If the query execution fails, then this publisher - * will fail with the same error. - * - *

By default, publishers returned by this method do not support multiple subscriptions. - * - * @see ReactiveGraphNode#getExecutionInfo() - */ - @NonNull - Publisher getExecutionInfos(); -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/reactive/ReactiveGraphSession.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/reactive/ReactiveGraphSession.java deleted file mode 100644 index 88f0e5def61..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/graph/reactive/ReactiveGraphSession.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph.reactive; - -import com.datastax.dse.driver.api.core.graph.GraphStatement; -import com.datastax.dse.driver.internal.core.graph.reactive.ReactiveGraphRequestProcessor; -import com.datastax.oss.driver.api.core.session.Session; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Objects; - -/** - * A {@link Session} that offers utility methods to issue graph queries using reactive-style - * programming. - */ -public interface ReactiveGraphSession extends Session { - - /** - * Returns a {@link ReactiveGraphResultSet} that, once subscribed to, executes the given query and - * emits all the results. - * - *

See the javadocs of {@link ReactiveGraphResultSet} for important remarks anc caveats - * regarding the subscription to and consumption of reactive graph result sets. - * - * @param statement the statement to execute. - * @return The {@link ReactiveGraphResultSet} that will publish the returned results. - * @see ReactiveGraphResultSet - * @see ReactiveGraphNode - */ - @NonNull - default ReactiveGraphResultSet executeReactive(@NonNull GraphStatement statement) { - return Objects.requireNonNull( - execute(statement, ReactiveGraphRequestProcessor.REACTIVE_GRAPH_RESULT_SET)); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/DseNodeProperties.java b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/DseNodeProperties.java deleted file mode 100644 index 88dbc164588..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/DseNodeProperties.java +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.metadata; - -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.metadata.Node; - -/** The keys for the additional DSE-specific properties stored in {@link Node#getExtras()}. */ -public class DseNodeProperties { - - /** - * The DSE version that the node is running. - * - *

The associated value in {@link Node#getExtras()} is a {@link Version}). - */ - public static final String DSE_VERSION = "DSE_VERSION"; - - /** - * The value of the {@code server_id} field in the {@code peers} system table for this node. - * - *

This is the single identifier of the machine running a DSE instance. If DSE has been - * configured with Multi-Instance, the {@code server_id} helps identifying the single physical - * machine that runs the multiple DSE instances. If DSE is not configured with DSE Multi-Instance, - * the {@code server_id} will be automatically set and be unique for each node. - * - *

This information is only available if connecting to a DSE 6.0+ node. - * - *

The associated value in {@link Node#getExtras()} is a {@code String}). - * - * @see DSE - * Multi-Instance (DSE Administrator Guide) - * @see - * server_id (DSE Administrator Guide) - */ - public static final String SERVER_ID = "SERVER_ID"; - - /** - * The DSE workloads that the node is running. - * - *

This is based on the {@code workload} or {@code workloads} columns in {@code system.local} - * and {@code system.peers}. - * - *

Workload labels may vary depending on the DSE version in use; e.g. DSE 5.1 may report two - * distinct workloads: {@code Search} and {@code Analytics}, while DSE 5.0 would report a single - * {@code SearchAnalytics} workload instead. It is up to users to deal with such discrepancies; - * the driver simply returns the workload labels as reported by DSE, without any form of - * pre-processing (with the exception of Graph in DSE 5.0, which is stored in a separate column, - * but will be reported as {@code Graph} here). - * - *

The associated value in {@link Node#getExtras()} is an immutable {@code Set}. - */ - public static final String DSE_WORKLOADS = "DSE_WORKLOADS"; - - /** - * The port for the native transport connections on the DSE node. - * - *

The native transport port is {@code 9042} by default but can be changed on instances - * requiring specific firewall configurations. This can be configured in the {@code - * cassandra.yaml} configuration file under the {@code native_transport_port} property. - * - *

This information is only available if connecting the driver to a DSE 6.0+ node. - * - *

The associated value in {@link Node#getExtras()} is an {@code Integer}. - */ - public static final String NATIVE_TRANSPORT_PORT = "NATIVE_TRANSPORT_PORT"; - - /** - * The port for the encrypted native transport connections on the DSE node. - * - *

In most scenarios enabling client communications in DSE will result in using a single port - * that will only accept encrypted connections (by default the port {@code 9042} is reused since - * unencrypted connections are not allowed). - * - *

However, it is possible to configure DSE to use both encrypted and a non-encrypted - * communication ports with clients. In that case the port accepting encrypted connections will - * differ from the non-encrypted one (see {@link #NATIVE_TRANSPORT_PORT}) and will be exposed via - * this method. - * - *

This information is only available if connecting the driver to a DSE 6.0+ node. - * - *

The associated value in {@link Node#getExtras()} is an {@code Integer}. - */ - public static final String NATIVE_TRANSPORT_PORT_SSL = "NATIVE_TRANSPORT_PORT_SSL"; - - /** - * The storage port used by the DSE node. - * - *

The storage port is used for internal communication between the DSE server nodes. This port - * is never used by the driver. - * - *

This information is only available if connecting the driver to a DSE 6.0+ node. - * - *

The associated value in {@link Node#getExtras()} is an {@code Integer}. - */ - public static final String STORAGE_PORT = "STORAGE_PORT"; - - /** - * The encrypted storage port used by the DSE node. - * - *

If inter-node encryption is enabled on the DSE cluster, nodes will communicate securely - * between each other via this port. This port is never used by the driver. - * - *

This information is only available if connecting the driver to a DSE 6.0+ node. - * - *

The associated value in {@link Node#getExtras()} is an {@code Integer}. - */ - public static final String STORAGE_PORT_SSL = "STORAGE_PORT_SSL"; - - /** - * The JMX port used by this node. - * - *

The JMX port can be configured in the {@code cassandra-env.sh} configuration file separately - * on each node. - * - *

This information is only available if connecting the driver to a DSE 6.0+ node. - * - *

The associated value in {@link Node#getExtras()} is an {@code Integer}. - */ - public static final String JMX_PORT = "JMX_PORT"; -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseAggregateMetadata.java b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseAggregateMetadata.java deleted file mode 100644 index 609c64f7c15..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseAggregateMetadata.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.metadata.schema; - -import com.datastax.oss.driver.api.core.metadata.schema.AggregateMetadata; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.internal.core.metadata.schema.ScriptBuilder; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Optional; - -/** - * Specialized aggregate metadata for DSE. - * - *

It adds support for the DSE-specific {@link #getDeterministic() DETERMINISTIC} keyword. - */ -public interface DseAggregateMetadata extends AggregateMetadata { - - /** @deprecated Use {@link #getDeterministic()} instead. */ - @Deprecated - boolean isDeterministic(); - - /** - * Indicates if this aggregate is deterministic. A deterministic aggregate means that given a - * particular input, the aggregate will always produce the same output. - * - *

This method returns {@linkplain Optional#empty() empty} if this information was not found in - * the system tables, regardless of the actual aggregate characteristics; this is the case for all - * versions of DSE older than 6.0.0. - * - * @return Whether or not this aggregate is deterministic; or {@linkplain Optional#empty() empty} - * if such information is not available in the system tables. - */ - default Optional getDeterministic() { - return Optional.of(isDeterministic()); - } - - @NonNull - @Override - default String describe(boolean pretty) { - // Easiest to just copy the OSS describe() method and add in DETERMINISTIC - ScriptBuilder builder = new ScriptBuilder(pretty); - builder - .append("CREATE AGGREGATE ") - .append(getKeyspace()) - .append(".") - .append(getSignature().getName()) - .append("("); - boolean first = true; - for (int i = 0; i < getSignature().getParameterTypes().size(); i++) { - if (first) { - first = false; - } else { - builder.append(","); - } - DataType type = getSignature().getParameterTypes().get(i); - builder.append(type.asCql(false, pretty)); - } - builder - .increaseIndent() - .append(")") - .newLine() - .append("SFUNC ") - .append(getStateFuncSignature().getName()) - .newLine() - .append("STYPE ") - .append(getStateType().asCql(false, pretty)); - - if (getFinalFuncSignature().isPresent()) { - builder.newLine().append("FINALFUNC ").append(getFinalFuncSignature().get().getName()); - } - if (getInitCond().isPresent()) { - Optional formatInitCond = formatInitCond(); - assert formatInitCond.isPresent(); - builder.newLine().append("INITCOND ").append(formatInitCond.get()); - } - // add DETERMINISTIC if present - if (getDeterministic().orElse(false)) { - builder.newLine().append("DETERMINISTIC"); - } - return builder.append(";").build(); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseColumnMetadata.java b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseColumnMetadata.java deleted file mode 100644 index 62b5650697e..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseColumnMetadata.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.metadata.schema; - -import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; - -/** - * Specialized column metadata for DSE. - * - *

This type exists only for future extensibility; currently, it is identical to {@link - * ColumnMetadata}. - */ -public interface DseColumnMetadata extends ColumnMetadata {} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseEdgeMetadata.java b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseEdgeMetadata.java deleted file mode 100644 index 59ee8a277ff..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseEdgeMetadata.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.metadata.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.List; - -/** Edge metadata, for a table that was created with CREATE TABLE ... WITH EDGE LABEL. */ -public interface DseEdgeMetadata { - - /** The label of the edge in graph. */ - @NonNull - CqlIdentifier getLabelName(); - - /** The identifier of the table representing the incoming vertex. */ - @NonNull - CqlIdentifier getFromTable(); - - /** The label of the incoming vertex in graph. */ - @NonNull - CqlIdentifier getFromLabel(); - - /** The columns in this table that match the partition key of the incoming vertex table. */ - @NonNull - List getFromPartitionKeyColumns(); - - /** The columns in this table that match the clustering columns of the incoming vertex table. */ - @NonNull - List getFromClusteringColumns(); - - /** The identifier of the table representing the outgoing vertex. */ - @NonNull - CqlIdentifier getToTable(); - - /** The label of the outgoing vertex in graph. */ - @NonNull - CqlIdentifier getToLabel(); - - /** The columns in this table that match the partition key of the outgoing vertex table. */ - @NonNull - List getToPartitionKeyColumns(); - - /** The columns in this table that match the clustering columns of the outgoing vertex table. */ - @NonNull - List getToClusteringColumns(); -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseFunctionMetadata.java b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseFunctionMetadata.java deleted file mode 100644 index 91298795959..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseFunctionMetadata.java +++ /dev/null @@ -1,184 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.metadata.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.FunctionMetadata; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.internal.core.metadata.schema.ScriptBuilder; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.List; -import java.util.Optional; - -/** - * Specialized function metadata for DSE. - * - *

It adds support for the DSE-specific {@link #getDeterministic() DETERMINISTIC} and {@link - * #getMonotonicity() MONOTONIC} keywords. - */ -public interface DseFunctionMetadata extends FunctionMetadata { - - /** The monotonicity of a function. */ - enum Monotonicity { - - /** - * Indicates that the function is fully monotonic on all of its arguments. This means that it is - * either entirely non-increasing or non-decreasing. Full monotonicity is required to use the - * function in a GROUP BY clause. - */ - FULLY_MONOTONIC, - - /** - * Indicates that the function is partially monotonic, meaning that partial application over - * some of the its arguments is monotonic. Currently (DSE 6.0.0), CQL only allows partial - * monotonicity on exactly one argument. This may change in a future CQL version. - */ - PARTIALLY_MONOTONIC, - - /** Indicates that the function is not monotonic. */ - NOT_MONOTONIC, - } - - /** @deprecated Use {@link #getDeterministic()} instead. */ - @Deprecated - boolean isDeterministic(); - - /** - * Indicates if this function is deterministic. A deterministic function means that given a - * particular input, the function will always produce the same output. - * - *

This method returns {@linkplain Optional#empty() empty} if this information was not found in - * the system tables, regardless of the actual function characteristics; this is the case for all - * versions of DSE older than 6.0.0. - * - * @return Whether or not this function is deterministic; or {@linkplain Optional#empty() empty} - * if such information is not available in the system tables. - */ - default Optional getDeterministic() { - return Optional.of(isDeterministic()); - } - - /** @deprecated use {@link #getMonotonicity()} instead. */ - @Deprecated - boolean isMonotonic(); - - /** - * Returns this function's {@link Monotonicity}. - * - *

A function can be either: - * - *

    - *
  • fully monotonic. In that case, this method returns {@link Monotonicity#FULLY_MONOTONIC}, - * and {@link #getMonotonicArgumentNames()} returns all the arguments; - *
  • partially monotonic, meaning that partial application over some of the arguments is - * monotonic. Currently (DSE 6.0.0), CQL only allows partial monotonicity on exactly one - * argument. This may change in a future CQL version. In that case, this method returns - * {@link Monotonicity#PARTIALLY_MONOTONIC}, and {@link #getMonotonicArgumentNames()} - * returns a singleton list; - *
  • not monotonic. In that case, this method return {@link Monotonicity#NOT_MONOTONIC} and - * {@link #getMonotonicArgumentNames()} returns an empty list. - *
- * - *

Full monotonicity is required to use the function in a GROUP BY clause. - * - *

This method returns {@linkplain Optional#empty() empty} if this information was not found in - * the system tables, regardless of the actual function characteristics; this is the case for all - * versions of DSE older than 6.0.0. - * - * @return this function's {@link Monotonicity}; or {@linkplain Optional#empty() empty} if such - * information is not available in the system tables. - */ - default Optional getMonotonicity() { - return Optional.of( - isMonotonic() - ? Monotonicity.FULLY_MONOTONIC - : getMonotonicArgumentNames().isEmpty() - ? Monotonicity.NOT_MONOTONIC - : Monotonicity.PARTIALLY_MONOTONIC); - } - - /** - * Returns a list of argument names that are monotonic. - * - *

See {@link #getMonotonicity()} for explanations on monotonicity, and the possible values - * returned by this method. - * - *

NOTE: For versions of DSE older than 6.0.0, this method will always return an empty list, - * regardless of the actual function characteristics. - * - * @return the argument names that the function is monotonic on. - */ - @NonNull - List getMonotonicArgumentNames(); - - @NonNull - @Override - default String describe(boolean pretty) { - ScriptBuilder builder = new ScriptBuilder(pretty); - builder - .append("CREATE FUNCTION ") - .append(getKeyspace()) - .append(".") - .append(getSignature().getName()) - .append("("); - boolean first = true; - for (int i = 0; i < getSignature().getParameterTypes().size(); i++) { - if (first) { - first = false; - } else { - builder.append(","); - } - DataType type = getSignature().getParameterTypes().get(i); - CqlIdentifier name = getParameterNames().get(i); - builder.append(name).append(" ").append(type.asCql(false, pretty)); - } - builder - .append(")") - .increaseIndent() - .newLine() - .append(isCalledOnNullInput() ? "CALLED ON NULL INPUT" : "RETURNS NULL ON NULL INPUT") - .newLine() - .append("RETURNS ") - .append(getReturnType().asCql(false, true)) - .newLine(); - // handle deterministic and monotonic - if (getDeterministic().orElse(false)) { - builder.append("DETERMINISTIC").newLine(); - } - if (getMonotonicity().isPresent()) { - switch (getMonotonicity().get()) { - case FULLY_MONOTONIC: - builder.append("MONOTONIC").newLine(); - break; - case PARTIALLY_MONOTONIC: - builder.append("MONOTONIC ON ").append(getMonotonicArgumentNames().get(0)).newLine(); - break; - default: - break; - } - } - builder - .append("LANGUAGE ") - .append(getLanguage()) - .newLine() - .append("AS '") - .append(getBody()) - .append("';"); - return builder.build(); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseGraphKeyspaceMetadata.java b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseGraphKeyspaceMetadata.java deleted file mode 100644 index 8978a8858f9..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseGraphKeyspaceMetadata.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.metadata.schema; - -import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; -import com.datastax.oss.driver.internal.core.metadata.schema.ScriptBuilder; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; -import java.util.Optional; - -/** - * Specialized keyspace metadata, that handles the graph-specific properties introduced in DSE 6.8. - * - *

This type only exists to avoid breaking binary compatibility. When the driver is connected to - * a DSE cluster, all the {@link KeyspaceMetadata} instances it returns can be safely downcast to - * this interface. - */ -public interface DseGraphKeyspaceMetadata extends DseKeyspaceMetadata { - - /** The graph engine that will be used to interpret this keyspace. */ - @NonNull - Optional getGraphEngine(); - - @NonNull - @Override - default String describe(boolean pretty) { - ScriptBuilder builder = new ScriptBuilder(pretty); - if (isVirtual()) { - builder.append("/* VIRTUAL "); - } else { - builder.append("CREATE "); - } - builder - .append("KEYSPACE ") - .append(getName()) - .append(" WITH replication = { 'class' : '") - .append(getReplication().get("class")) - .append("'"); - for (Map.Entry entry : getReplication().entrySet()) { - if (!entry.getKey().equals("class")) { - builder - .append(", '") - .append(entry.getKey()) - .append("': '") - .append(entry.getValue()) - .append("'"); - } - } - builder.append(" } AND durable_writes = ").append(Boolean.toString(isDurableWrites())); - getGraphEngine() - .ifPresent( - graphEngine -> builder.append(" AND graph_engine ='").append(graphEngine).append("'")); - builder.append(";"); - if (isVirtual()) { - builder.append(" */"); - } - return builder.build(); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseGraphTableMetadata.java b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseGraphTableMetadata.java deleted file mode 100644 index 8f340b3b447..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseGraphTableMetadata.java +++ /dev/null @@ -1,157 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.metadata.schema; - -import com.datastax.dse.driver.internal.core.metadata.schema.ScriptHelper; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.ClusteringOrder; -import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; -import com.datastax.oss.driver.internal.core.metadata.schema.ScriptBuilder; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.RelationParser; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; -import java.util.Optional; - -/** - * Specialized table metadata, that handles the graph-specific properties introduced in DSE 6.8. - * - *

This type only exists to avoid breaking binary compatibility. When the driver is connected to - * a DSE cluster, all the {@link TableMetadata} instances it returns can be safely downcast to this - * interface. - */ -public interface DseGraphTableMetadata extends DseTableMetadata { - /** - * The vertex metadata if this table represents a vertex in graph, otherwise empty. - * - *

This is mutually exclusive with {@link #getEdge()}. - */ - @NonNull - Optional getVertex(); - - /** - * The edge metadata if this table represents an edge in graph, otherwise empty. - * - *

This is mutually exclusive with {@link #getVertex()}. - */ - @NonNull - Optional getEdge(); - - @NonNull - @Override - default String describe(boolean pretty) { - ScriptBuilder builder = new ScriptBuilder(pretty); - if (isVirtual()) { - builder.append("/* VIRTUAL "); - } else { - builder.append("CREATE "); - } - - builder - .append("TABLE ") - .append(getKeyspace()) - .append(".") - .append(getName()) - .append(" (") - .newLine() - .increaseIndent(); - - for (ColumnMetadata column : getColumns().values()) { - builder.append(column.getName()).append(" ").append(column.getType().asCql(true, pretty)); - if (column.isStatic()) { - builder.append(" static"); - } - builder.append(",").newLine(); - } - - // PK - builder.append("PRIMARY KEY ("); - if (getPartitionKey().size() == 1) { // PRIMARY KEY (k - builder.append(getPartitionKey().get(0).getName()); - } else { // PRIMARY KEY ((k1, k2) - builder.append("("); - boolean first = true; - for (ColumnMetadata pkColumn : getPartitionKey()) { - if (first) { - first = false; - } else { - builder.append(", "); - } - builder.append(pkColumn.getName()); - } - builder.append(")"); - } - // PRIMARY KEY (, cc1, cc2, cc3) - for (ColumnMetadata clusteringColumn : getClusteringColumns().keySet()) { - builder.append(", ").append(clusteringColumn.getName()); - } - builder.append(")"); - - builder.newLine().decreaseIndent().append(")"); - - builder.increaseIndent(); - if (isCompactStorage()) { - builder.andWith().append("COMPACT STORAGE"); - } - if (getClusteringColumns().containsValue(ClusteringOrder.DESC)) { - builder.andWith().append("CLUSTERING ORDER BY ("); - boolean first = true; - for (Map.Entry entry : - getClusteringColumns().entrySet()) { - if (first) { - first = false; - } else { - builder.append(", "); - } - builder.append(entry.getKey().getName()).append(" ").append(entry.getValue().name()); - } - builder.append(")"); - } - getVertex() - .ifPresent( - vertex -> { - builder.andWith().append("VERTEX LABEL").append(" ").append(vertex.getLabelName()); - }); - getEdge() - .ifPresent( - edge -> { - builder.andWith().append("EDGE LABEL").append(" ").append(edge.getLabelName()); - ScriptHelper.appendEdgeSide( - builder, - edge.getFromTable(), - edge.getFromLabel(), - edge.getFromPartitionKeyColumns(), - edge.getFromClusteringColumns(), - "FROM"); - ScriptHelper.appendEdgeSide( - builder, - edge.getToTable(), - edge.getToLabel(), - edge.getToPartitionKeyColumns(), - edge.getToClusteringColumns(), - "TO"); - }); - Map options = getOptions(); - RelationParser.appendOptions(options, builder); - builder.append(";"); - if (isVirtual()) { - builder.append(" */"); - } - return builder.build(); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseIndexMetadata.java b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseIndexMetadata.java deleted file mode 100644 index ac4c1057fbf..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseIndexMetadata.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.metadata.schema; - -import com.datastax.oss.driver.api.core.metadata.schema.IndexMetadata; - -/** - * Specialized index metadata for DSE. - * - *

This type exists only for future extensibility; currently, it is identical to {@link - * IndexMetadata}. - */ -public interface DseIndexMetadata extends IndexMetadata {} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseKeyspaceMetadata.java b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseKeyspaceMetadata.java deleted file mode 100644 index bc5cb002802..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseKeyspaceMetadata.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.metadata.schema; - -import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; - -/** - * Specialized keyspace metadata for DSE. - * - *

Notes: - * - *

    - *
  • this type can always be safely downcast to {@link DseGraphKeyspaceMetadata} (the only - * reason the two interfaces are separate is for backward compatibility). - *
  • all returned elements can be cast to their DSE counterparts, for example {@link - * TableMetadata} to {@link DseTableMetadata}. - *
- */ -public interface DseKeyspaceMetadata extends KeyspaceMetadata {} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseRelationMetadata.java b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseRelationMetadata.java deleted file mode 100644 index 55b36cb7fe5..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseRelationMetadata.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.metadata.schema; - -import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.RelationMetadata; - -/** - * Specialized table or materialized view metadata for DSE. - * - *

This type exists only for future extensibility; currently, it is identical to {@link - * RelationMetadata}. - * - *

Note that all returned {@link ColumnMetadata} can be cast to {@link DseColumnMetadata}. - */ -public interface DseRelationMetadata extends RelationMetadata {} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseTableMetadata.java b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseTableMetadata.java deleted file mode 100644 index a140f93bc2e..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseTableMetadata.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.metadata.schema; - -import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.IndexMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; - -/** - * Specialized table metadata for DSE. - * - *

Notes: - * - *

    - *
  • this type can always be safely downcast to {@link DseGraphTableMetadata} (the only reason - * the two interfaces are separate is for backward compatibility). - *
  • all returned {@link ColumnMetadata} can be cast to {@link DseColumnMetadata}, and all - * {@link IndexMetadata} to {@link DseIndexMetadata}. - *
- */ -public interface DseTableMetadata extends DseRelationMetadata, TableMetadata {} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseVertexMetadata.java b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseVertexMetadata.java deleted file mode 100644 index c08a7eb1d60..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseVertexMetadata.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.metadata.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** Vertex metadata, for a table that was created with CREATE TABLE ... WITH VERTEX LABEL. */ -public interface DseVertexMetadata { - - /** The label of the vertex in graph. */ - @NonNull - CqlIdentifier getLabelName(); -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseViewMetadata.java b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseViewMetadata.java deleted file mode 100644 index 0f68ea7e456..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseViewMetadata.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.metadata.schema; - -import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.ViewMetadata; - -/** - * Specialized materialized view metadata for DSE. - * - *

This type exists only for future extensibility; currently, it is identical to {@link - * ViewMetadata}. - * - *

Note that all returned {@link ColumnMetadata} can be cast to {@link DseColumnMetadata}. - */ -public interface DseViewMetadata extends DseRelationMetadata, ViewMetadata {} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/metrics/DseNodeMetric.java b/core/src/main/java/com/datastax/dse/driver/api/core/metrics/DseNodeMetric.java deleted file mode 100644 index cf4b4d0aa18..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/metrics/DseNodeMetric.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.metrics; - -import com.datastax.oss.driver.api.core.metrics.NodeMetric; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; - -/** See {@code reference.conf} for a description of each metric. */ -public enum DseNodeMetric implements NodeMetric { - GRAPH_MESSAGES("graph-messages"); - - private static final Map BY_PATH = sortByPath(); - - private final String path; - - DseNodeMetric(String path) { - this.path = path; - } - - @Override - @NonNull - public String getPath() { - return path; - } - - @NonNull - public static DseNodeMetric fromPath(@NonNull String path) { - DseNodeMetric metric = BY_PATH.get(path); - if (metric == null) { - throw new IllegalArgumentException("Unknown node metric path " + path); - } - return metric; - } - - private static Map sortByPath() { - ImmutableMap.Builder result = ImmutableMap.builder(); - for (DseNodeMetric value : values()) { - result.put(value.getPath(), value); - } - return result.build(); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/metrics/DseSessionMetric.java b/core/src/main/java/com/datastax/dse/driver/api/core/metrics/DseSessionMetric.java deleted file mode 100644 index 79584f3c44a..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/metrics/DseSessionMetric.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.metrics; - -import com.datastax.oss.driver.api.core.metrics.SessionMetric; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; - -/** See {@code reference.conf} for a description of each metric. */ -public enum DseSessionMetric implements SessionMetric { - CONTINUOUS_CQL_REQUESTS("continuous-cql-requests"), - GRAPH_REQUESTS("graph-requests"), - GRAPH_CLIENT_TIMEOUTS("graph-client-timeouts"), - ; - - private static final Map BY_PATH = sortByPath(); - - private final String path; - - DseSessionMetric(String path) { - this.path = path; - } - - @NonNull - @Override - public String getPath() { - return path; - } - - @NonNull - public static DseSessionMetric fromPath(@NonNull String path) { - DseSessionMetric metric = BY_PATH.get(path); - if (metric == null) { - throw new IllegalArgumentException("Unknown DSE session metric path " + path); - } - return metric; - } - - private static Map sortByPath() { - ImmutableMap.Builder result = ImmutableMap.builder(); - for (DseSessionMetric value : values()) { - result.put(value.getPath(), value); - } - return result.build(); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/servererrors/UnfitClientException.java b/core/src/main/java/com/datastax/dse/driver/api/core/servererrors/UnfitClientException.java deleted file mode 100644 index 8bf4d80699d..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/servererrors/UnfitClientException.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.servererrors; - -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.servererrors.CoordinatorException; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** - * A server-side error triggered when DSE can't send asynchronous results back to the client. - * - *

Currently, this is used when the client is unable to keep up with the rate during a continuous - * paging session. - * - *

Note that the protocol specification refers to this error as {@code CLIENT_WRITE_FAILURE}; we - * don't follow that terminology because it would be too misleading (this is not a client error, and - * it doesn't occur while writing data to DSE). - */ -public class UnfitClientException extends CoordinatorException { - - public UnfitClientException(@NonNull Node coordinator, @NonNull String message) { - this(coordinator, message, null, false); - } - - private UnfitClientException( - @NonNull Node coordinator, - @NonNull String message, - @Nullable ExecutionInfo executionInfo, - boolean writableStackTrace) { - super(coordinator, message, executionInfo, writableStackTrace); - } - - @Override - @NonNull - public UnfitClientException copy() { - return new UnfitClientException(getCoordinator(), getMessage(), getExecutionInfo(), true); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/type/DseDataTypes.java b/core/src/main/java/com/datastax/dse/driver/api/core/type/DseDataTypes.java deleted file mode 100644 index 6003274e09a..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/type/DseDataTypes.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.type; - -import com.datastax.oss.driver.api.core.type.CustomType; -import com.datastax.oss.driver.api.core.type.DataTypes; - -/** Extends {@link DataTypes} to handle DSE-specific types. */ -public class DseDataTypes extends DataTypes { - - public static final CustomType LINE_STRING = - (CustomType) custom("org.apache.cassandra.db.marshal.LineStringType"); - - public static final CustomType POINT = - (CustomType) custom("org.apache.cassandra.db.marshal.PointType"); - - public static final CustomType POLYGON = - (CustomType) custom("org.apache.cassandra.db.marshal.PolygonType"); - - public static final CustomType DATE_RANGE = - (CustomType) custom("org.apache.cassandra.db.marshal.DateRangeType"); -} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/type/codec/DseTypeCodecs.java b/core/src/main/java/com/datastax/dse/driver/api/core/type/codec/DseTypeCodecs.java deleted file mode 100644 index fb0225970b4..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/api/core/type/codec/DseTypeCodecs.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.type.codec; - -import com.datastax.dse.driver.api.core.data.geometry.LineString; -import com.datastax.dse.driver.api.core.data.geometry.Point; -import com.datastax.dse.driver.api.core.data.geometry.Polygon; -import com.datastax.dse.driver.api.core.data.time.DateRange; -import com.datastax.dse.driver.internal.core.type.codec.geometry.LineStringCodec; -import com.datastax.dse.driver.internal.core.type.codec.geometry.PointCodec; -import com.datastax.dse.driver.internal.core.type.codec.geometry.PolygonCodec; -import com.datastax.dse.driver.internal.core.type.codec.time.DateRangeCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; - -/** Extends {@link TypeCodecs} to handle DSE-specific types. */ -public class DseTypeCodecs extends TypeCodecs { - - public static final TypeCodec LINE_STRING = new LineStringCodec(); - - public static final TypeCodec POINT = new PointCodec(); - - public static final TypeCodec POLYGON = new PolygonCodec(); - - public static final TypeCodec DATE_RANGE = new DateRangeCodec(); -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/DseProtocolFeature.java b/core/src/main/java/com/datastax/dse/driver/internal/core/DseProtocolFeature.java deleted file mode 100644 index 95f245061d2..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/DseProtocolFeature.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core; - -import com.datastax.oss.driver.internal.core.ProtocolFeature; - -/** - * Features that are supported by DataStax Enterprise (DSE) protocol versions. - * - * @see com.datastax.dse.driver.api.core.DseProtocolVersion - * @see com.datastax.oss.driver.internal.core.DefaultProtocolFeature - */ -public enum DseProtocolFeature implements ProtocolFeature { - - /** - * The ability to execute continuous paging requests. - * - * @see CASSANDRA-11521 - * @see com.datastax.dse.driver.api.core.cql.continuous.ContinuousSession - */ - CONTINUOUS_PAGING, - ; -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/InsightsClientLifecycleListener.java b/core/src/main/java/com/datastax/dse/driver/internal/core/InsightsClientLifecycleListener.java deleted file mode 100644 index e4dd6f93bf7..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/InsightsClientLifecycleListener.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core; - -import static com.datastax.dse.driver.api.core.config.DseDriverOption.MONITOR_REPORTING_ENABLED; - -import com.datastax.dse.driver.internal.core.insights.InsightsClient; -import com.datastax.dse.driver.internal.core.insights.configuration.InsightsConfiguration; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.context.LifecycleListener; - -public class InsightsClientLifecycleListener implements LifecycleListener { - private static final boolean DEFAULT_INSIGHTS_ENABLED = true; - private static final long STATUS_EVENT_DELAY_MILLIS = 300000L; - private final InternalDriverContext context; - private final StackTraceElement[] initCallStackTrace; - private volatile InsightsClient insightsClient; - - public InsightsClientLifecycleListener( - InternalDriverContext context, StackTraceElement[] initCallStackTrace) { - this.context = context; - this.initCallStackTrace = initCallStackTrace; - } - - @Override - public void onSessionReady() { - boolean monitorReportingEnabled = - context - .getConfig() - .getDefaultProfile() - .getBoolean(MONITOR_REPORTING_ENABLED, DEFAULT_INSIGHTS_ENABLED); - - this.insightsClient = - InsightsClient.createInsightsClient( - new InsightsConfiguration( - monitorReportingEnabled, - STATUS_EVENT_DELAY_MILLIS, - context.getNettyOptions().adminEventExecutorGroup().next()), - context, - initCallStackTrace); - insightsClient.sendStartupMessage(); - insightsClient.scheduleStatusMessageSend(); - } - - @Override - public void close() { - if (insightsClient != null) { - insightsClient.shutdown(); - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/auth/AuthUtils.java b/core/src/main/java/com/datastax/dse/driver/internal/core/auth/AuthUtils.java deleted file mode 100644 index 38f1644bcb7..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/auth/AuthUtils.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.auth; - -import com.datastax.oss.driver.api.core.auth.AuthenticationException; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.config.DriverOption; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import java.util.ArrayList; -import java.util.List; - -public class AuthUtils { - /** - * Utility function that checks for the existence of settings and throws an exception if they - * aren't present - * - * @param config Current working driver configuration - * @param authenticatorName name of authenticator for logging purposes - * @param endPoint the host we are attempting to authenticate to - * @param options a list of DriverOptions to check to see if they are present - */ - public static void validateConfigPresent( - DriverExecutionProfile config, - String authenticatorName, - EndPoint endPoint, - DriverOption... options) { - List missingOptions = new ArrayList<>(); - for (DriverOption option : options) { - - if (!config.isDefined(option)) { - missingOptions.add(option); - } - if (missingOptions.size() > 0) { - String message = - "Missing required configuration options for authenticator " + authenticatorName + ":"; - for (DriverOption missingOption : missingOptions) { - message = message + " " + missingOption.getPath(); - } - throw new AuthenticationException(endPoint, message); - } - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/auth/DseGssApiAuthProvider.java b/core/src/main/java/com/datastax/dse/driver/internal/core/auth/DseGssApiAuthProvider.java deleted file mode 100644 index 6ef6596a870..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/auth/DseGssApiAuthProvider.java +++ /dev/null @@ -1,198 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.auth; - -import com.datastax.dse.driver.api.core.auth.DseGssApiAuthProviderBase; -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.oss.driver.api.core.auth.AuthProvider; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; -import net.jcip.annotations.ThreadSafe; - -/** - * {@link AuthProvider} that provides GSSAPI authenticator instances for clients to connect to DSE - * clusters secured with {@code DseAuthenticator}. - * - *

To activate this provider an {@code auth-provider} section must be included in the driver - * configuration, for example: - * - *

- * dse-java-driver {
- *  auth-provider {
- *      class = com.datastax.dse.driver.internal.core.auth.DseGssApiAuthProvider
- *      login-configuration {
- *          principal = "user principal here ex cassandra@DATASTAX.COM"
- *          useKeyTab = "true"
- *          refreshKrb5Config = "true"
- *          keyTab = "Path to keytab file here"
- *      }
- *   }
- * }
- * 
- * - *

Kerberos Authentication

- * - * Keytab and ticket cache settings are specified using a standard JAAS configuration file. The - * location of the file can be set using the java.security.auth.login.config system - * property or by adding a login.config.url.n entry in the java.security - * properties file. Alternatively a login-configuration section can be included in the driver - * configuration. - * - *

See the following documents for further details: - * - *

    - *
  1. JAAS - * Login Configuration File; - *
  2. Krb5LoginModule - * options; - *
  3. JAAS - * Authentication Tutorial for more on JAAS in general. - *
- * - *

Authentication using ticket cache

- * - * Run kinit to obtain a ticket and populate the cache before connecting. JAAS config: - * - *
- * DseClient {
- *   com.sun.security.auth.module.Krb5LoginModule required
- *     useTicketCache=true
- *     renewTGT=true;
- * };
- * 
- * - *

Authentication using a keytab file

- * - * To enable authentication using a keytab file, specify its location on disk. If your keytab - * contains more than one principal key, you should also specify which one to select. This - * information can also be specified in the driver config, under the login-configuration section. - * - *
- * DseClient {
- *     com.sun.security.auth.module.Krb5LoginModule required
- *       useKeyTab=true
- *       keyTab="/path/to/file.keytab"
- *       principal="user@MYDOMAIN.COM";
- * };
- * 
- * - *

Specifying SASL protocol name

- * - * The SASL protocol name used by this auth provider defaults to " - * {@value #DEFAULT_SASL_SERVICE_NAME}". - * - *

Important: the SASL protocol name should match the username of the Kerberos - * service principal used by the DSE server. This information is specified in the dse.yaml file by - * the {@code service_principal} option under the kerberos_options - * section, and may vary from one DSE installation to another – especially if you installed - * DSE with an automated package installer. - * - *

For example, if your dse.yaml file contains the following: - * - *

{@code
- * kerberos_options:
- *     ...
- *     service_principal: cassandra/my.host.com@MY.REALM.COM
- * }
- * - * The correct SASL protocol name to use when authenticating against this DSE server is "{@code - * cassandra}". - * - *

Should you need to change the SASL protocol name, use one of the methods below: - * - *

    - *
  1. Specify the service name in the driver config. - *
    - * dse-java-driver {
    - *   auth-provider {
    - *     class = com.datastax.dse.driver.internal.core.auth.DseGssApiAuthProvider
    - *     service = "alternate"
    - *   }
    - * }
    - * 
    - *
  2. Specify the service name with the {@code dse.sasl.service} system property when starting - * your application, e.g. {@code -Ddse.sasl.service=cassandra}. - *
- * - * If a non-null SASL service name is provided to the aforementioned config, that name takes - * precedence over the contents of the {@code dse.sasl.service} system property. - * - *

Should internal sasl properties need to be set such as qop. This can be accomplished by - * including a sasl-properties in the driver config, for example: - * - *

- * dse-java-driver {
- *   auth-provider {
- *     class = com.datastax.dse.driver.internal.core.auth.DseGssApiAuthProvider
- *     sasl-properties {
- *       javax.security.sasl.qop = "auth-conf"
- *     }
- *   }
- * }
- * 
- */ -@ThreadSafe -public class DseGssApiAuthProvider extends DseGssApiAuthProviderBase { - - private final DriverExecutionProfile config; - - public DseGssApiAuthProvider(DriverContext context) { - super(context.getSessionName()); - - this.config = context.getConfig().getDefaultProfile(); - } - - @NonNull - @Override - protected GssApiOptions getOptions( - @NonNull EndPoint endPoint, @NonNull String serverAuthenticator) { - // A login configuration is always necessary, throw an exception if that option is missing. - AuthUtils.validateConfigPresent( - config, - DseGssApiAuthProvider.class.getName(), - endPoint, - DseDriverOption.AUTH_PROVIDER_LOGIN_CONFIGURATION); - - GssApiOptions.Builder optionsBuilder = GssApiOptions.builder(); - - if (config.isDefined(DseDriverOption.AUTH_PROVIDER_AUTHORIZATION_ID)) { - optionsBuilder.withAuthorizationId( - config.getString(DseDriverOption.AUTH_PROVIDER_AUTHORIZATION_ID)); - } - if (config.isDefined(DseDriverOption.AUTH_PROVIDER_SERVICE)) { - optionsBuilder.withSaslProtocol(config.getString(DseDriverOption.AUTH_PROVIDER_SERVICE)); - } - if (config.isDefined(DseDriverOption.AUTH_PROVIDER_SASL_PROPERTIES)) { - for (Map.Entry entry : - config.getStringMap(DseDriverOption.AUTH_PROVIDER_SASL_PROPERTIES).entrySet()) { - optionsBuilder.addSaslProperty(entry.getKey(), entry.getValue()); - } - } - Map loginConfigurationMap = - config.getStringMap(DseDriverOption.AUTH_PROVIDER_LOGIN_CONFIGURATION); - optionsBuilder.withLoginConfiguration(loginConfigurationMap); - return optionsBuilder.build(); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/auth/DsePlainTextAuthProvider.java b/core/src/main/java/com/datastax/dse/driver/internal/core/auth/DsePlainTextAuthProvider.java deleted file mode 100644 index 6cf82aef03e..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/auth/DsePlainTextAuthProvider.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.auth; - -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.internal.core.auth.PlainTextAuthProvider; -import net.jcip.annotations.ThreadSafe; - -/** - * @deprecated The driver's default plain text providers now support both Apache Cassandra and DSE. - * This type was preserved for backward compatibility, but {@link PlainTextAuthProvider} should - * be used instead. - */ -@ThreadSafe -@Deprecated -public class DsePlainTextAuthProvider extends PlainTextAuthProvider { - - public DsePlainTextAuthProvider(DriverContext context) { - super(context); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/DseConversions.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/DseConversions.java deleted file mode 100644 index 15aab143150..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/DseConversions.java +++ /dev/null @@ -1,155 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql; - -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.dse.driver.api.core.servererrors.UnfitClientException; -import com.datastax.dse.protocol.internal.DseProtocolConstants; -import com.datastax.dse.protocol.internal.request.query.ContinuousPagingOptions; -import com.datastax.dse.protocol.internal.request.query.DseQueryOptions; -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.servererrors.CoordinatorException; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.internal.core.ConsistencyLevelRegistry; -import com.datastax.oss.driver.internal.core.DefaultProtocolFeature; -import com.datastax.oss.driver.internal.core.ProtocolVersionRegistry; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.cql.Conversions; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.request.Execute; -import com.datastax.oss.protocol.internal.request.Query; -import com.datastax.oss.protocol.internal.response.Error; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.nio.ByteBuffer; -import java.util.Collections; -import java.util.List; -import java.util.Map; - -public class DseConversions { - - public static Message toContinuousPagingMessage( - Statement statement, DriverExecutionProfile config, InternalDriverContext context) { - ConsistencyLevelRegistry consistencyLevelRegistry = context.getConsistencyLevelRegistry(); - ConsistencyLevel consistency = statement.getConsistencyLevel(); - int consistencyCode = - (consistency == null) - ? consistencyLevelRegistry.nameToCode( - config.getString(DefaultDriverOption.REQUEST_CONSISTENCY)) - : consistency.getProtocolCode(); - int pageSize = config.getInt(DseDriverOption.CONTINUOUS_PAGING_PAGE_SIZE); - boolean pageSizeInBytes = config.getBoolean(DseDriverOption.CONTINUOUS_PAGING_PAGE_SIZE_BYTES); - int maxPages = config.getInt(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES); - int maxPagesPerSecond = config.getInt(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND); - int maxEnqueuedPages = config.getInt(DseDriverOption.CONTINUOUS_PAGING_MAX_ENQUEUED_PAGES); - ContinuousPagingOptions options = - new ContinuousPagingOptions(maxPages, maxPagesPerSecond, maxEnqueuedPages); - ConsistencyLevel serialConsistency = statement.getSerialConsistencyLevel(); - int serialConsistencyCode = - (serialConsistency == null) - ? consistencyLevelRegistry.nameToCode( - config.getString(DefaultDriverOption.REQUEST_SERIAL_CONSISTENCY)) - : serialConsistency.getProtocolCode(); - long timestamp = statement.getQueryTimestamp(); - if (timestamp == Statement.NO_DEFAULT_TIMESTAMP) { - timestamp = context.getTimestampGenerator().next(); - } - CodecRegistry codecRegistry = context.getCodecRegistry(); - ProtocolVersion protocolVersion = context.getProtocolVersion(); - ProtocolVersionRegistry protocolVersionRegistry = context.getProtocolVersionRegistry(); - CqlIdentifier keyspace = statement.getKeyspace(); - if (statement instanceof SimpleStatement) { - SimpleStatement simpleStatement = (SimpleStatement) statement; - List positionalValues = simpleStatement.getPositionalValues(); - Map namedValues = simpleStatement.getNamedValues(); - if (!positionalValues.isEmpty() && !namedValues.isEmpty()) { - throw new IllegalArgumentException( - "Can't have both positional and named values in a statement."); - } - if (keyspace != null - && !protocolVersionRegistry.supports( - protocolVersion, DefaultProtocolFeature.PER_REQUEST_KEYSPACE)) { - throw new IllegalArgumentException( - "Can't use per-request keyspace with protocol " + protocolVersion); - } - DseQueryOptions queryOptions = - new DseQueryOptions( - consistencyCode, - Conversions.encode(positionalValues, codecRegistry, protocolVersion), - Conversions.encode(namedValues, codecRegistry, protocolVersion), - false, - pageSize, - statement.getPagingState(), - serialConsistencyCode, - timestamp, - (keyspace == null) ? null : keyspace.asInternal(), - pageSizeInBytes, - options); - return new Query(simpleStatement.getQuery(), queryOptions); - } else if (statement instanceof BoundStatement) { - BoundStatement boundStatement = (BoundStatement) statement; - if (!protocolVersionRegistry.supports( - protocolVersion, DefaultProtocolFeature.UNSET_BOUND_VALUES)) { - Conversions.ensureAllSet(boundStatement); - } - boolean skipMetadata = - boundStatement.getPreparedStatement().getResultSetDefinitions().size() > 0; - DseQueryOptions queryOptions = - new DseQueryOptions( - consistencyCode, - boundStatement.getValues(), - Collections.emptyMap(), - skipMetadata, - pageSize, - statement.getPagingState(), - serialConsistencyCode, - timestamp, - null, - pageSizeInBytes, - options); - PreparedStatement preparedStatement = boundStatement.getPreparedStatement(); - ByteBuffer id = preparedStatement.getId(); - ByteBuffer resultMetadataId = preparedStatement.getResultMetadataId(); - return new Execute( - Bytes.getArray(id), - (resultMetadataId == null) ? null : Bytes.getArray(resultMetadataId), - queryOptions); - } else { - throw new IllegalArgumentException( - "Unsupported statement type: " + statement.getClass().getName()); - } - } - - public static CoordinatorException toThrowable( - Node node, Error errorMessage, InternalDriverContext context) { - switch (errorMessage.code) { - case DseProtocolConstants.ErrorCode.CLIENT_WRITE_FAILURE: - return new UnfitClientException(node, errorMessage.message); - default: - return Conversions.toThrowable(node, errorMessage, context); - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestAsyncProcessor.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestAsyncProcessor.java deleted file mode 100644 index 8a098bf2895..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestAsyncProcessor.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.continuous; - -import com.datastax.dse.driver.api.core.cql.continuous.ContinuousAsyncResultSet; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.internal.core.session.RequestProcessor; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import java.util.concurrent.CompletionStage; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class ContinuousCqlRequestAsyncProcessor - implements RequestProcessor, CompletionStage> { - - public static final GenericType> - CONTINUOUS_RESULT_ASYNC = new GenericType>() {}; - - @Override - public boolean canProcess(Request request, GenericType resultType) { - return request instanceof Statement && resultType.equals(CONTINUOUS_RESULT_ASYNC); - } - - @Override - public CompletionStage process( - Statement request, - DefaultSession session, - InternalDriverContext context, - String sessionLogPrefix) { - return new ContinuousCqlRequestHandler(request, session, context, sessionLogPrefix).handle(); - } - - @Override - public CompletionStage newFailure(RuntimeException error) { - return CompletableFutures.failedFuture(error); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandler.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandler.java deleted file mode 100644 index dd308c11854..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandler.java +++ /dev/null @@ -1,171 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.continuous; - -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.dse.driver.api.core.cql.continuous.ContinuousAsyncResultSet; -import com.datastax.dse.driver.api.core.metrics.DseSessionMetric; -import com.datastax.dse.driver.internal.core.cql.DseConversions; -import com.datastax.dse.protocol.internal.response.result.DseRowsMetadata; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; -import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.cql.Conversions; -import com.datastax.oss.driver.internal.core.cql.DefaultRow; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.internal.core.util.CountingIterator; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.response.result.Rows; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.List; -import java.util.Map; -import java.util.Queue; -import net.jcip.annotations.ThreadSafe; - -/** - * Handles a request that supports multiple response messages (a.k.a. continuous paging request). - */ -@ThreadSafe -public class ContinuousCqlRequestHandler - extends ContinuousRequestHandlerBase, ContinuousAsyncResultSet> { - - ContinuousCqlRequestHandler( - @NonNull Statement statement, - @NonNull DefaultSession session, - @NonNull InternalDriverContext context, - @NonNull String sessionLogPrefix) { - super( - statement, - session, - context, - sessionLogPrefix, - ContinuousAsyncResultSet.class, - false, - DefaultSessionMetric.CQL_CLIENT_TIMEOUTS, - DseSessionMetric.CONTINUOUS_CQL_REQUESTS, - DefaultNodeMetric.CQL_MESSAGES); - // NOTE that ordering of the following statement matters. - // We should register this request after all fields have been initialized. - throttler.register(this); - } - - @NonNull - @Override - protected Duration getGlobalTimeout() { - return Duration.ZERO; - } - - @NonNull - @Override - protected Duration getPageTimeout(@NonNull Statement statement, int pageNumber) { - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(statement, context); - if (pageNumber == 1) { - return executionProfile.getDuration(DseDriverOption.CONTINUOUS_PAGING_TIMEOUT_FIRST_PAGE); - } else { - return executionProfile.getDuration(DseDriverOption.CONTINUOUS_PAGING_TIMEOUT_OTHER_PAGES); - } - } - - @NonNull - @Override - protected Duration getReviseRequestTimeout(@NonNull Statement statement) { - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(statement, context); - return executionProfile.getDuration(DseDriverOption.CONTINUOUS_PAGING_TIMEOUT_OTHER_PAGES); - } - - @Override - protected int getMaxEnqueuedPages(@NonNull Statement statement) { - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(statement, context); - return executionProfile.getInt(DseDriverOption.CONTINUOUS_PAGING_MAX_ENQUEUED_PAGES); - } - - @Override - protected int getMaxPages(@NonNull Statement statement) { - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(statement, context); - return executionProfile.getInt(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES); - } - - @NonNull - @Override - protected Message getMessage(@NonNull Statement statement) { - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(statement, context); - return DseConversions.toContinuousPagingMessage(statement, executionProfile, context); - } - - @Override - protected boolean isTracingEnabled(@NonNull Statement statement) { - return false; - } - - @NonNull - @Override - protected Map createPayload(@NonNull Statement statement) { - return statement.getCustomPayload(); - } - - @NonNull - @Override - protected ContinuousAsyncResultSet createEmptyResultSet(@NonNull ExecutionInfo executionInfo) { - return DefaultContinuousAsyncResultSet.empty(executionInfo); - } - - @NonNull - @Override - protected DefaultContinuousAsyncResultSet createResultSet( - @NonNull Statement statement, - @NonNull Rows rows, - @NonNull ExecutionInfo executionInfo, - @NonNull ColumnDefinitions columnDefinitions) { - Queue> data = rows.getData(); - CountingIterator iterator = - new CountingIterator(data.size()) { - @Override - protected Row computeNext() { - List rowData = data.poll(); - return (rowData == null) - ? endOfData() - : new DefaultRow(columnDefinitions, rowData, context); - } - }; - DseRowsMetadata metadata = (DseRowsMetadata) rows.getMetadata(); - return new DefaultContinuousAsyncResultSet( - iterator, - columnDefinitions, - metadata.continuousPageNumber, - !metadata.isLastContinuousPage, - executionInfo, - this); - } - - @Override - protected int pageNumber(@NonNull ContinuousAsyncResultSet resultSet) { - return resultSet.pageNumber(); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestSyncProcessor.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestSyncProcessor.java deleted file mode 100644 index f151eb7eae2..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestSyncProcessor.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.continuous; - -import com.datastax.dse.driver.api.core.cql.continuous.ContinuousAsyncResultSet; -import com.datastax.dse.driver.api.core.cql.continuous.ContinuousResultSet; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.internal.core.session.RequestProcessor; -import com.datastax.oss.driver.internal.core.util.concurrent.BlockingOperation; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class ContinuousCqlRequestSyncProcessor - implements RequestProcessor, ContinuousResultSet> { - - public static final GenericType CONTINUOUS_RESULT_SYNC = - GenericType.of(ContinuousResultSet.class); - - private final ContinuousCqlRequestAsyncProcessor asyncProcessor; - - public ContinuousCqlRequestSyncProcessor(ContinuousCqlRequestAsyncProcessor asyncProcessor) { - this.asyncProcessor = asyncProcessor; - } - - @Override - public boolean canProcess(Request request, GenericType resultType) { - return request instanceof Statement && resultType.equals(CONTINUOUS_RESULT_SYNC); - } - - @Override - public ContinuousResultSet process( - Statement request, - DefaultSession session, - InternalDriverContext context, - String sessionLogPrefix) { - BlockingOperation.checkNotDriverThread(); - ContinuousAsyncResultSet firstPage = - CompletableFutures.getUninterruptibly( - asyncProcessor.process(request, session, context, sessionLogPrefix)); - return new DefaultContinuousResultSet(firstPage); - } - - @Override - public ContinuousResultSet newFailure(RuntimeException error) { - throw error; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousRequestHandlerBase.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousRequestHandlerBase.java deleted file mode 100644 index 0453022cb6a..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousRequestHandlerBase.java +++ /dev/null @@ -1,1645 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.continuous; - -import com.datastax.dse.driver.api.core.DseProtocolVersion; -import com.datastax.dse.driver.api.core.cql.continuous.ContinuousAsyncResultSet; -import com.datastax.dse.driver.internal.core.DseProtocolFeature; -import com.datastax.dse.driver.internal.core.cql.DseConversions; -import com.datastax.dse.protocol.internal.request.Revise; -import com.datastax.dse.protocol.internal.response.result.DseRowsMetadata; -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.DriverTimeoutException; -import com.datastax.oss.driver.api.core.NodeUnavailableException; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.RequestThrottlingException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.connection.FrameTooLongException; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; -import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; -import com.datastax.oss.driver.api.core.metrics.NodeMetric; -import com.datastax.oss.driver.api.core.metrics.SessionMetric; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.retry.RetryVerdict; -import com.datastax.oss.driver.api.core.servererrors.BootstrappingException; -import com.datastax.oss.driver.api.core.servererrors.CoordinatorException; -import com.datastax.oss.driver.api.core.servererrors.FunctionFailureException; -import com.datastax.oss.driver.api.core.servererrors.ProtocolError; -import com.datastax.oss.driver.api.core.servererrors.QueryValidationException; -import com.datastax.oss.driver.api.core.servererrors.ReadTimeoutException; -import com.datastax.oss.driver.api.core.servererrors.UnavailableException; -import com.datastax.oss.driver.api.core.servererrors.WriteTimeoutException; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.session.throttling.RequestThrottler; -import com.datastax.oss.driver.api.core.session.throttling.Throttled; -import com.datastax.oss.driver.internal.core.adminrequest.ThrottledAdminRequestHandler; -import com.datastax.oss.driver.internal.core.adminrequest.UnexpectedResponseException; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.channel.ResponseCallback; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.cql.Conversions; -import com.datastax.oss.driver.internal.core.cql.DefaultExecutionInfo; -import com.datastax.oss.driver.internal.core.metadata.DefaultNode; -import com.datastax.oss.driver.internal.core.metrics.NodeMetricUpdater; -import com.datastax.oss.driver.internal.core.metrics.SessionMetricUpdater; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.internal.core.session.RepreparePayload; -import com.datastax.oss.driver.internal.core.util.Loggers; -import com.datastax.oss.driver.internal.core.util.collection.SimpleQueryPlan; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.protocol.internal.Frame; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.request.Prepare; -import com.datastax.oss.protocol.internal.response.Error; -import com.datastax.oss.protocol.internal.response.Result; -import com.datastax.oss.protocol.internal.response.error.Unprepared; -import com.datastax.oss.protocol.internal.response.result.Rows; -import com.datastax.oss.protocol.internal.response.result.Void; -import com.datastax.oss.protocol.internal.util.Bytes; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import io.netty.handler.codec.EncoderException; -import io.netty.util.Timeout; -import io.netty.util.Timer; -import io.netty.util.concurrent.Future; -import io.netty.util.concurrent.GenericFutureListener; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.AbstractMap; -import java.util.ArrayDeque; -import java.util.List; -import java.util.Map; -import java.util.Queue; -import java.util.concurrent.CancellationException; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.locks.ReentrantLock; -import net.jcip.annotations.GuardedBy; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Handles a request that supports multiple response messages (a.k.a. continuous paging request). - */ -@ThreadSafe -public abstract class ContinuousRequestHandlerBase - implements Throttled { - - private static final Logger LOG = LoggerFactory.getLogger(ContinuousRequestHandlerBase.class); - - protected final String logPrefix; - protected final StatementT initialStatement; - protected final DefaultSession session; - private final CqlIdentifier keyspace; - protected final InternalDriverContext context; - private final Queue queryPlan; - protected final RequestThrottler throttler; - private final boolean protocolBackpressureAvailable; - private final Timer timer; - private final SessionMetricUpdater sessionMetricUpdater; - private final boolean specExecEnabled; - private final SessionMetric clientTimeoutsMetric; - private final SessionMetric continuousRequestsMetric; - private final NodeMetric messagesMetric; - private final List scheduledExecutions; - - // The errors on the nodes that were already tried. - // We don't use a map because nodes can appear multiple times. - protected final List> errors = new CopyOnWriteArrayList<>(); - - /** - * The list of in-flight executions, one per node. Executions may be triggered by speculative - * executions or retries. An execution is added to this list when the write operation completes. - * It is removed from this list when the callback has done reading responses. - */ - private final List inFlightCallbacks = new CopyOnWriteArrayList<>(); - - /** The callback selected to stream results back to the client. */ - private final CompletableFuture chosenCallback = new CompletableFuture<>(); - - /** - * How many speculative executions are currently running (including the initial execution). We - * track this in order to know when to fail the request if all executions have reached the end of - * the query plan. - */ - private final AtomicInteger activeExecutionsCount = new AtomicInteger(0); - - /** - * How many speculative executions have started (excluding the initial execution), whether they - * have completed or not. We track this in order to fill execution info objects with this - * information. - */ - protected final AtomicInteger startedSpeculativeExecutionsCount = new AtomicInteger(0); - - // Set when the execution starts, and is never modified after. - private final long startTimeNanos; - private volatile Timeout globalTimeout; - - private final Class resultSetClass; - - public ContinuousRequestHandlerBase( - @NonNull StatementT statement, - @NonNull DefaultSession session, - @NonNull InternalDriverContext context, - @NonNull String sessionLogPrefix, - @NonNull Class resultSetClass, - boolean specExecEnabled, - SessionMetric clientTimeoutsMetric, - SessionMetric continuousRequestsMetric, - NodeMetric messagesMetric) { - this.resultSetClass = resultSetClass; - - ProtocolVersion protocolVersion = context.getProtocolVersion(); - if (!context - .getProtocolVersionRegistry() - .supports(protocolVersion, DseProtocolFeature.CONTINUOUS_PAGING)) { - throw new IllegalStateException( - "Cannot execute continuous paging requests with protocol version " + protocolVersion); - } - this.clientTimeoutsMetric = clientTimeoutsMetric; - this.continuousRequestsMetric = continuousRequestsMetric; - this.messagesMetric = messagesMetric; - this.logPrefix = sessionLogPrefix + "|" + this.hashCode(); - LOG.trace("[{}] Creating new continuous handler for request {}", logPrefix, statement); - this.initialStatement = statement; - this.session = session; - this.keyspace = session.getKeyspace().orElse(null); - this.context = context; - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(statement, context); - this.queryPlan = - statement.getNode() != null - ? new SimpleQueryPlan(statement.getNode()) - : context - .getLoadBalancingPolicyWrapper() - .newQueryPlan(statement, executionProfile.getName(), session); - this.timer = context.getNettyOptions().getTimer(); - - this.protocolBackpressureAvailable = - protocolVersion.getCode() >= DseProtocolVersion.DSE_V2.getCode(); - this.throttler = context.getRequestThrottler(); - this.sessionMetricUpdater = session.getMetricUpdater(); - this.startTimeNanos = System.nanoTime(); - this.specExecEnabled = specExecEnabled; - this.scheduledExecutions = this.specExecEnabled ? new CopyOnWriteArrayList<>() : null; - } - - @NonNull - protected abstract Duration getGlobalTimeout(); - - @NonNull - protected abstract Duration getPageTimeout(@NonNull StatementT statement, int pageNumber); - - @NonNull - protected abstract Duration getReviseRequestTimeout(@NonNull StatementT statement); - - protected abstract int getMaxEnqueuedPages(@NonNull StatementT statement); - - protected abstract int getMaxPages(@NonNull StatementT statement); - - @NonNull - protected abstract Message getMessage(@NonNull StatementT statement); - - protected abstract boolean isTracingEnabled(@NonNull StatementT statement); - - @NonNull - protected abstract Map createPayload(@NonNull StatementT statement); - - @NonNull - protected abstract ResultSetT createEmptyResultSet(@NonNull ExecutionInfo executionInfo); - - protected abstract int pageNumber(@NonNull ResultSetT resultSet); - - @NonNull - protected abstract ResultSetT createResultSet( - @NonNull StatementT statement, - @NonNull Rows rows, - @NonNull ExecutionInfo executionInfo, - @NonNull ColumnDefinitions columnDefinitions) - throws IOException; - - // MAIN LIFECYCLE - - @Override - public void onThrottleReady(boolean wasDelayed) { - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(initialStatement, context); - if (wasDelayed - // avoid call to nanoTime() if metric is disabled: - && sessionMetricUpdater.isEnabled( - DefaultSessionMetric.THROTTLING_DELAY, executionProfile.getName())) { - session - .getMetricUpdater() - .updateTimer( - DefaultSessionMetric.THROTTLING_DELAY, - executionProfile.getName(), - System.nanoTime() - startTimeNanos, - TimeUnit.NANOSECONDS); - } - activeExecutionsCount.incrementAndGet(); - sendRequest(initialStatement, null, 0, 0, specExecEnabled); - } - - @Override - public void onThrottleFailure(@NonNull RequestThrottlingException error) { - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(initialStatement, context); - session - .getMetricUpdater() - .incrementCounter(DefaultSessionMetric.THROTTLING_ERRORS, executionProfile.getName()); - abortGlobalRequestOrChosenCallback(error); - } - - private void abortGlobalRequestOrChosenCallback(@NonNull Throwable error) { - if (!chosenCallback.completeExceptionally(error)) { - chosenCallback.thenAccept(callback -> callback.abort(error, false)); - } - } - - public CompletionStage handle() { - globalTimeout = scheduleGlobalTimeout(); - return fetchNextPage(); - } - - /** - * Builds the future that will get returned to the user from the initial execute call or a - * fetchNextPage() on the async API. - */ - public CompletionStage fetchNextPage() { - CompletableFuture result = new CompletableFuture<>(); - - // This is equivalent to - // `chosenCallback.thenCompose(NodeResponseCallback::dequeueOrCreatePending)`, except - // that we need to cancel `result` if `resultSetError` is a CancellationException. - chosenCallback.whenComplete( - (callback, callbackError) -> { - if (callbackError != null) { - result.completeExceptionally(callbackError); - } else { - callback - .dequeueOrCreatePending() - .whenComplete( - (resultSet, resultSetError) -> { - if (resultSetError != null) { - result.completeExceptionally(resultSetError); - } else { - result.complete(resultSet); - } - }); - } - }); - - // If the user cancels the future, propagate to our internal components - result.whenComplete( - (rs, t) -> { - if (t instanceof CancellationException) { - cancel(); - } - }); - - return result; - } - - /** - * Sends the initial request to the next available node. - * - * @param node if not null, it will be attempted first before the rest of the query plan. It - * happens only when we retry on the same host. - * @param currentExecutionIndex 0 for the initial execution, 1 for the first speculative one, etc. - * @param retryCount the number of times that the retry policy was invoked for this execution - * already (note that some internal retries don't go through the policy, and therefore don't - * increment this counter) - * @param scheduleSpeculativeExecution whether to schedule the next speculative execution - */ - private void sendRequest( - StatementT statement, - @Nullable Node node, - int currentExecutionIndex, - int retryCount, - boolean scheduleSpeculativeExecution) { - DriverChannel channel = null; - if (node == null || (channel = session.getChannel(node, logPrefix)) == null) { - while ((node = queryPlan.poll()) != null) { - channel = session.getChannel(node, logPrefix); - if (channel != null) { - break; - } else { - recordError(node, new NodeUnavailableException(node)); - } - } - } - if (channel == null) { - // We've reached the end of the query plan without finding any node to write to; abort the - // continuous paging session. - if (activeExecutionsCount.decrementAndGet() == 0) { - abortGlobalRequestOrChosenCallback(AllNodesFailedException.fromErrors(errors)); - } - } else if (!chosenCallback.isDone()) { - NodeResponseCallback nodeResponseCallback = - new NodeResponseCallback( - statement, - node, - channel, - currentExecutionIndex, - retryCount, - scheduleSpeculativeExecution, - logPrefix); - inFlightCallbacks.add(nodeResponseCallback); - channel - .write( - getMessage(statement), - isTracingEnabled(statement), - createPayload(statement), - nodeResponseCallback) - .addListener(nodeResponseCallback); - } - } - - private Timeout scheduleGlobalTimeout() { - Duration globalTimeout = getGlobalTimeout(); - if (globalTimeout.toNanos() <= 0) { - return null; - } - LOG.trace("[{}] Scheduling global timeout for pages in {}", logPrefix, globalTimeout); - return timer.newTimeout( - timeout -> - abortGlobalRequestOrChosenCallback( - new DriverTimeoutException("Query timed out after " + globalTimeout)), - globalTimeout.toNanos(), - TimeUnit.NANOSECONDS); - } - - /** - * Cancels the continuous paging request. - * - *

Called from user code, see {@link DefaultContinuousAsyncResultSet#cancel()}, or from a - * driver I/O thread. - */ - public void cancel() { - // If chosenCallback is already set, this is a no-op and the chosen callback will be handled by - // cancelScheduledTasks - chosenCallback.cancel(true); - - cancelScheduledTasks(null); - cancelGlobalTimeout(); - throttler.signalCancel(this); - } - - private void cancelGlobalTimeout() { - if (globalTimeout != null) { - globalTimeout.cancel(); - } - } - - /** - * Cancel all pending and scheduled executions, except the one passed as an argument to the - * method. - * - * @param toIgnore An optional execution to ignore (will not be cancelled). - */ - private void cancelScheduledTasks(@Nullable NodeResponseCallback toIgnore) { - if (scheduledExecutions != null) { - for (Timeout scheduledExecution : scheduledExecutions) { - scheduledExecution.cancel(); - } - } - for (NodeResponseCallback callback : inFlightCallbacks) { - if (toIgnore == null || toIgnore != callback) { - callback.cancel(); - } - } - } - - @VisibleForTesting - int getState() { - try { - return chosenCallback.get().getState(); - } catch (CancellationException e) { - // Happens if the test cancels before the callback was chosen - return NodeResponseCallback.STATE_FAILED; - } catch (InterruptedException | ExecutionException e) { - // We never interrupt or fail chosenCallback (other than canceling) - throw new AssertionError("Unexpected error", e); - } - } - - @VisibleForTesting - CompletableFuture getPendingResult() { - try { - return chosenCallback.get().getPendingResult(); - } catch (Exception e) { - // chosenCallback should always be complete by the time tests call this - throw new AssertionError("Expected callback to be chosen at this point"); - } - } - - private void recordError(@NonNull Node node, @NonNull Throwable error) { - errors.add(new AbstractMap.SimpleEntry<>(node, error)); - } - - /** - * Handles the interaction with a single node in the query plan. - * - *

An instance of this class is created each time we (re)try a node. The first callback that - * has something ready to enqueue will be allowed to stream results back to the client; the others - * will be cancelled. - */ - private class NodeResponseCallback - implements ResponseCallback, GenericFutureListener> { - - private final long messageStartTimeNanos = System.nanoTime(); - private final StatementT statement; - private final Node node; - private final DriverChannel channel; - // The identifier of the current execution (0 for the initial execution, 1 for the first - // speculative execution, etc.) - private final int executionIndex; - // How many times we've invoked the retry policy and it has returned a "retry" decision (0 for - // the first attempt of each execution). - private final String logPrefix; - private final boolean scheduleSpeculativeExecution; - - private final DriverExecutionProfile executionProfile; - - // Coordinates concurrent accesses between the client and I/O threads - private final ReentrantLock lock = new ReentrantLock(); - - // The page queue, storing responses that we have received and have not been consumed by the - // client yet. We instantiate it lazily to avoid unnecessary allocation; this is also used to - // check if the callback ever tried to enqueue something. - @GuardedBy("lock") - private Queue queue; - - // If the client requests a page and we can't serve it immediately (empty queue), then we create - // this future and have the client wait on it. Otherwise this field is null. - @GuardedBy("lock") - private CompletableFuture pendingResult; - - // How many pages were requested. This is the total number of pages requested from the - // beginning. - // It will be zero if the protocol does not support numPagesRequested (DSE_V1) - @GuardedBy("lock") - private int numPagesRequested; - - // An integer that represents the state of the continuous paging request: - // - if positive, it is the sequence number of the next expected page; - // - if negative, it is a terminal state, identified by the constants below. - @GuardedBy("lock") - private int state = 1; - - // Whether isLastResponse has returned true already - @GuardedBy("lock") - private boolean sawLastResponse; - - @GuardedBy("lock") - private boolean sentCancelRequest; - - private static final int STATE_FINISHED = -1; - private static final int STATE_FAILED = -2; - - @GuardedBy("lock") - private int streamId = -1; - - // These are set when the first page arrives, and are never modified after. - private volatile ColumnDefinitions columnDefinitions; - - private volatile Timeout pageTimeout; - - // How many times we've invoked the retry policy and it has returned a "retry" decision (0 for - // the first attempt, 1 for the first retry, etc.). - private final int retryCount; - - // SpeculativeExecution node metrics should be executed only for the first page (first - // invocation) - private final AtomicBoolean stopNodeMessageTimerReported = new AtomicBoolean(false); - private final AtomicBoolean nodeErrorReported = new AtomicBoolean(false); - private final AtomicBoolean nodeSuccessReported = new AtomicBoolean(false); - - public NodeResponseCallback( - StatementT statement, - Node node, - DriverChannel channel, - int executionIndex, - int retryCount, - boolean scheduleSpeculativeExecution, - String logPrefix) { - this.statement = statement; - this.node = node; - this.channel = channel; - this.executionIndex = executionIndex; - this.retryCount = retryCount; - this.scheduleSpeculativeExecution = scheduleSpeculativeExecution; - this.logPrefix = logPrefix + "|" + executionIndex; - this.executionProfile = Conversions.resolveExecutionProfile(statement, context); - } - - @Override - public void onStreamIdAssigned(int streamId) { - LOG.trace("[{}] Assigned streamId {} on node {}", logPrefix, streamId, node); - lock.lock(); - try { - this.streamId = streamId; - if (state < 0) { - // This happens if we were cancelled before getting the stream id, we have a request in - // flight that needs to be cancelled - releaseStreamId(); - } - } finally { - lock.unlock(); - } - } - - @Override - public boolean isLastResponse(@NonNull Frame responseFrame) { - lock.lock(); - try { - Message message = responseFrame.message; - boolean isLastResponse; - - if (sentCancelRequest) { - // The only response we accept is the SERVER_ERROR triggered by a successful cancellation. - // Otherwise we risk releasing and reusing the stream id while the cancel request is still - // in flight, and it might end up cancelling an unrelated request. - // Note that there is a chance that the request ends normally right after we send the - // cancel request. In that case this method never returns true and the stream id will - // remain orphaned forever. This should be very rare so this is acceptable. - if (message instanceof Error) { - Error error = (Error) message; - isLastResponse = - (error.code == ProtocolConstants.ErrorCode.SERVER_ERROR) - && error.message.contains("Session cancelled by the user"); - } else { - isLastResponse = false; - } - } else if (message instanceof Rows) { - Rows rows = (Rows) message; - DseRowsMetadata metadata = (DseRowsMetadata) rows.getMetadata(); - isLastResponse = metadata.isLastContinuousPage; - } else { - isLastResponse = message instanceof Error; - } - - if (isLastResponse) { - sawLastResponse = true; - } - return isLastResponse; - } finally { - lock.unlock(); - } - } - - /** - * Invoked when the write from {@link #sendRequest} completes. - * - * @param future The future representing the outcome of the write operation. - */ - @Override - public void operationComplete(@NonNull Future future) { - if (!future.isSuccess()) { - Throwable error = future.cause(); - if (error instanceof EncoderException - && error.getCause() instanceof FrameTooLongException) { - trackNodeError(node, error.getCause()); - lock.lock(); - try { - abort(error.getCause(), false); - } finally { - lock.unlock(); - } - } else { - LOG.trace( - "[{}] Failed to send request on {}, trying next node (cause: {})", - logPrefix, - channel, - error); - ((DefaultNode) node) - .getMetricUpdater() - .incrementCounter(DefaultNodeMetric.UNSENT_REQUESTS, executionProfile.getName()); - recordError(node, error); - trackNodeError(node, error.getCause()); - sendRequest(statement, null, executionIndex, retryCount, scheduleSpeculativeExecution); - } - } else { - LOG.trace("[{}] Request sent on {}", logPrefix, channel); - if (scheduleSpeculativeExecution - && Conversions.resolveIdempotence(statement, executionProfile)) { - int nextExecution = executionIndex + 1; - // Note that `node` is the first node of the execution, it might not be the "slow" one - // if there were retries, but in practice retries are rare. - long nextDelay = - Conversions.resolveSpeculativeExecutionPolicy(context, executionProfile) - .nextExecution(node, keyspace, statement, nextExecution); - if (nextDelay >= 0) { - scheduleSpeculativeExecution(nextExecution, nextDelay); - } else { - LOG.trace( - "[{}] Speculative execution policy returned {}, no next execution", - logPrefix, - nextDelay); - } - } - pageTimeout = schedulePageTimeout(1); - } - } - - private void scheduleSpeculativeExecution(int nextExecutionIndex, long delay) { - LOG.trace( - "[{}] Scheduling speculative execution {} in {} ms", - logPrefix, - nextExecutionIndex, - delay); - try { - scheduledExecutions.add( - timer.newTimeout( - (Timeout timeout) -> { - if (!chosenCallback.isDone()) { - LOG.trace( - "[{}] Starting speculative execution {}", logPrefix, nextExecutionIndex); - activeExecutionsCount.incrementAndGet(); - startedSpeculativeExecutionsCount.incrementAndGet(); - NodeMetricUpdater nodeMetricUpdater = ((DefaultNode) node).getMetricUpdater(); - if (nodeMetricUpdater.isEnabled( - DefaultNodeMetric.SPECULATIVE_EXECUTIONS, executionProfile.getName())) { - nodeMetricUpdater.incrementCounter( - DefaultNodeMetric.SPECULATIVE_EXECUTIONS, executionProfile.getName()); - } - sendRequest(statement, null, nextExecutionIndex, 0, true); - } - }, - delay, - TimeUnit.MILLISECONDS)); - } catch (IllegalStateException e) { - logTimeoutSchedulingError(e); - } - } - - private Timeout schedulePageTimeout(int expectedPage) { - if (expectedPage < 0) { - return null; - } - Duration timeout = getPageTimeout(statement, expectedPage); - if (timeout.toNanos() <= 0) { - return null; - } - LOG.trace("[{}] Scheduling timeout for page {} in {}", logPrefix, expectedPage, timeout); - return timer.newTimeout( - t -> onPageTimeout(expectedPage), timeout.toNanos(), TimeUnit.NANOSECONDS); - } - - private void onPageTimeout(int expectedPage) { - lock.lock(); - try { - if (state == expectedPage) { - abort( - new DriverTimeoutException( - String.format("Timed out waiting for page %d", expectedPage)), - false); - } else { - // Ignore timeout if the request has moved on in the interim. - LOG.trace( - "[{}] Timeout fired for page {} but query already at state {}, skipping", - logPrefix, - expectedPage, - state); - } - } finally { - lock.unlock(); - } - } - - /** - * Invoked when a continuous paging response is received, either a successful or failed one. - * - *

Delegates further processing to appropriate methods: {@link #processResultResponse(Result, - * Frame)} if the response was successful, or {@link #processErrorResponse(Error)} if it wasn't. - * - * @param response the received {@link Frame}. - */ - @Override - public void onResponse(@NonNull Frame response) { - stopNodeMessageTimer(); - cancelTimeout(pageTimeout); - lock.lock(); - try { - if (state < 0) { - LOG.trace("[{}] Got result but the request has been cancelled, ignoring", logPrefix); - return; - } - try { - Message responseMessage = response.message; - if (responseMessage instanceof Result) { - LOG.trace("[{}] Got result", logPrefix); - processResultResponse((Result) responseMessage, response); - } else if (responseMessage instanceof Error) { - LOG.trace("[{}] Got error response", logPrefix); - processErrorResponse((Error) responseMessage); - } else { - IllegalStateException error = - new IllegalStateException("Unexpected response " + responseMessage); - trackNodeError(node, error); - abort(error, false); - } - } catch (Throwable t) { - trackNodeError(node, t); - abort(t, false); - } - } finally { - lock.unlock(); - } - } - - /** - * Invoked when a continuous paging request hits an unexpected error. - * - *

Delegates further processing to to the retry policy ({@link - * #processRetryVerdict(RetryVerdict, Throwable)}. - * - * @param error the error encountered, usually a network problem. - */ - @Override - public void onFailure(@NonNull Throwable error) { - cancelTimeout(pageTimeout); - LOG.trace(String.format("[%s] Request failure", logPrefix), error); - RetryVerdict verdict; - if (!Conversions.resolveIdempotence(statement, executionProfile) - || error instanceof FrameTooLongException) { - verdict = RetryVerdict.RETHROW; - } else { - try { - RetryPolicy retryPolicy = Conversions.resolveRetryPolicy(context, executionProfile); - verdict = retryPolicy.onRequestAbortedVerdict(statement, error, retryCount); - } catch (Throwable cause) { - abort( - new IllegalStateException("Unexpected error while invoking the retry policy", cause), - false); - return; - } - } - updateErrorMetrics( - ((DefaultNode) node).getMetricUpdater(), - verdict, - DefaultNodeMetric.ABORTED_REQUESTS, - DefaultNodeMetric.RETRIES_ON_ABORTED, - DefaultNodeMetric.IGNORES_ON_ABORTED); - lock.lock(); - try { - processRetryVerdict(verdict, error); - } finally { - lock.unlock(); - } - } - - // PROCESSING METHODS - - /** - * Processes a new result response, creating the corresponding {@link ResultSetT} object and - * then enqueuing it or serving it directly to the user if he was waiting for it. - * - * @param result the result to process. It is normally a {@link Rows} object, but may be a - * {@link Void} object if the retry policy decided to ignore an error. - * @param frame the {@link Frame} (used to create the {@link ExecutionInfo} the first time). - */ - @SuppressWarnings("GuardedBy") // this method is only called with the lock held - private void processResultResponse(@NonNull Result result, @Nullable Frame frame) { - assert lock.isHeldByCurrentThread(); - try { - ExecutionInfo executionInfo = createExecutionInfo(result, frame); - if (result instanceof Rows) { - DseRowsMetadata rowsMetadata = (DseRowsMetadata) ((Rows) result).getMetadata(); - if (columnDefinitions == null) { - // Contrary to ROWS responses from regular queries, - // the first page always includes metadata so we use this - // regardless of whether or not the query was from a prepared statement. - columnDefinitions = Conversions.toColumnDefinitions(rowsMetadata, context); - } - int pageNumber = rowsMetadata.continuousPageNumber; - int currentPage = state; - if (pageNumber != currentPage) { - abort( - new IllegalStateException( - String.format( - "Received page %d but was expecting %d", pageNumber, currentPage)), - false); - } else { - int pageSize = ((Rows) result).getData().size(); - ResultSetT resultSet = - createResultSet(statement, (Rows) result, executionInfo, columnDefinitions); - if (rowsMetadata.isLastContinuousPage) { - LOG.trace("[{}] Received last page ({} - {} rows)", logPrefix, pageNumber, pageSize); - state = STATE_FINISHED; - reenableAutoReadIfNeeded(); - enqueueOrCompletePending(resultSet); - stopGlobalRequestTimer(); - cancelTimeout(globalTimeout); - } else { - LOG.trace("[{}] Received page {} ({} rows)", logPrefix, pageNumber, pageSize); - if (currentPage > 0) { - state = currentPage + 1; - } - enqueueOrCompletePending(resultSet); - } - } - } else { - // Void responses happen only when the retry decision is ignore. - assert result instanceof Void; - ResultSetT resultSet = createEmptyResultSet(executionInfo); - LOG.trace( - "[{}] Continuous paging interrupted by retry policy decision to ignore error", - logPrefix); - state = STATE_FINISHED; - reenableAutoReadIfNeeded(); - enqueueOrCompletePending(resultSet); - stopGlobalRequestTimer(); - cancelTimeout(globalTimeout); - } - } catch (Throwable error) { - abort(error, false); - } - } - - /** - * Processes an unsuccessful response. - * - *

Depending on the error, may trigger: - * - *

    - *
  1. a re-prepare cycle, see {@link #processUnprepared(Unprepared)}; - *
  2. an immediate retry on the next host, bypassing the retry policy, if the host was - * bootstrapping; - *
  3. an immediate abortion if the error is unrecoverable; - *
  4. further processing if the error is recoverable, see {@link - * #processRecoverableError(CoordinatorException)} - *
- * - * @param errorMessage the error message received. - */ - @SuppressWarnings("GuardedBy") // this method is only called with the lock held - private void processErrorResponse(@NonNull Error errorMessage) { - assert lock.isHeldByCurrentThread(); - if (errorMessage instanceof Unprepared) { - processUnprepared((Unprepared) errorMessage); - } else { - CoordinatorException error = DseConversions.toThrowable(node, errorMessage, context); - if (error instanceof BootstrappingException) { - LOG.trace("[{}] {} is bootstrapping, trying next node", logPrefix, node); - recordError(node, error); - trackNodeError(node, error); - sendRequest(statement, null, executionIndex, retryCount, false); - } else if (error instanceof QueryValidationException - || error instanceof FunctionFailureException - || error instanceof ProtocolError - || state > 1) { - // we only process recoverable errors for the first page, - // errors on subsequent pages will always trigger an immediate abortion - LOG.trace("[{}] Unrecoverable error, rethrowing", logPrefix); - NodeMetricUpdater metricUpdater = ((DefaultNode) node).getMetricUpdater(); - metricUpdater.incrementCounter( - DefaultNodeMetric.OTHER_ERRORS, executionProfile.getName()); - trackNodeError(node, error); - abort(error, true); - } else { - try { - processRecoverableError(error); - } catch (Throwable cause) { - abort(cause, false); - } - } - } - } - - /** - * Processes a recoverable error. - * - *

In most cases, delegates to the retry policy and its decision, see {@link - * #processRetryVerdict(RetryVerdict, Throwable)}. - * - * @param error the recoverable error. - */ - private void processRecoverableError(@NonNull CoordinatorException error) { - assert lock.isHeldByCurrentThread(); - NodeMetricUpdater metricUpdater = ((DefaultNode) node).getMetricUpdater(); - RetryVerdict verdict; - RetryPolicy retryPolicy = Conversions.resolveRetryPolicy(context, executionProfile); - if (error instanceof ReadTimeoutException) { - ReadTimeoutException readTimeout = (ReadTimeoutException) error; - verdict = - retryPolicy.onReadTimeoutVerdict( - statement, - readTimeout.getConsistencyLevel(), - readTimeout.getBlockFor(), - readTimeout.getReceived(), - readTimeout.wasDataPresent(), - retryCount); - updateErrorMetrics( - metricUpdater, - verdict, - DefaultNodeMetric.READ_TIMEOUTS, - DefaultNodeMetric.RETRIES_ON_READ_TIMEOUT, - DefaultNodeMetric.IGNORES_ON_READ_TIMEOUT); - } else if (error instanceof WriteTimeoutException) { - WriteTimeoutException writeTimeout = (WriteTimeoutException) error; - if (Conversions.resolveIdempotence(statement, executionProfile)) { - verdict = - retryPolicy.onWriteTimeoutVerdict( - statement, - writeTimeout.getConsistencyLevel(), - writeTimeout.getWriteType(), - writeTimeout.getBlockFor(), - writeTimeout.getReceived(), - retryCount); - } else { - verdict = RetryVerdict.RETHROW; - } - updateErrorMetrics( - metricUpdater, - verdict, - DefaultNodeMetric.WRITE_TIMEOUTS, - DefaultNodeMetric.RETRIES_ON_WRITE_TIMEOUT, - DefaultNodeMetric.IGNORES_ON_WRITE_TIMEOUT); - } else if (error instanceof UnavailableException) { - UnavailableException unavailable = (UnavailableException) error; - verdict = - retryPolicy.onUnavailableVerdict( - statement, - unavailable.getConsistencyLevel(), - unavailable.getRequired(), - unavailable.getAlive(), - retryCount); - updateErrorMetrics( - metricUpdater, - verdict, - DefaultNodeMetric.UNAVAILABLES, - DefaultNodeMetric.RETRIES_ON_UNAVAILABLE, - DefaultNodeMetric.IGNORES_ON_UNAVAILABLE); - } else { - verdict = - Conversions.resolveIdempotence(statement, executionProfile) - ? retryPolicy.onErrorResponseVerdict(statement, error, retryCount) - : RetryVerdict.RETHROW; - updateErrorMetrics( - metricUpdater, - verdict, - DefaultNodeMetric.OTHER_ERRORS, - DefaultNodeMetric.RETRIES_ON_OTHER_ERROR, - DefaultNodeMetric.IGNORES_ON_OTHER_ERROR); - } - processRetryVerdict(verdict, error); - } - - /** - * Processes an {@link Unprepared} error by re-preparing then retrying on the same host. - * - * @param errorMessage the unprepared error message. - */ - @SuppressWarnings("GuardedBy") // this method is only called with the lock held - private void processUnprepared(@NonNull Unprepared errorMessage) { - assert lock.isHeldByCurrentThread(); - ByteBuffer idToReprepare = ByteBuffer.wrap(errorMessage.id); - LOG.trace( - "[{}] Statement {} is not prepared on {}, re-preparing", - logPrefix, - Bytes.toHexString(idToReprepare), - node); - RepreparePayload repreparePayload = session.getRepreparePayloads().get(idToReprepare); - if (repreparePayload == null) { - throw new IllegalStateException( - String.format( - "Tried to execute unprepared query %s but we don't have the data to re-prepare it", - Bytes.toHexString(idToReprepare))); - } - Prepare prepare = repreparePayload.toMessage(); - Duration timeout = executionProfile.getDuration(DefaultDriverOption.REQUEST_TIMEOUT); - ThrottledAdminRequestHandler.prepare( - channel, - true, - prepare, - repreparePayload.customPayload, - timeout, - throttler, - sessionMetricUpdater, - logPrefix) - .start() - .whenComplete( - (repreparedId, exception) -> { - // If we run into an unrecoverable error, surface it to the client instead of - // retrying - Throwable fatalError = null; - if (exception == null) { - if (!repreparedId.equals(idToReprepare)) { - IllegalStateException illegalStateException = - new IllegalStateException( - String.format( - "ID mismatch while trying to reprepare (expected %s, got %s). " - + "This prepared statement won't work anymore. " - + "This usually happens when you run a 'USE...' query after " - + "the statement was prepared.", - Bytes.toHexString(idToReprepare), Bytes.toHexString(repreparedId))); - trackNodeError(node, illegalStateException); - fatalError = illegalStateException; - } else { - LOG.trace( - "[{}] Re-prepare successful, retrying on the same node ({})", - logPrefix, - node); - sendRequest(statement, node, executionIndex, retryCount, false); - } - } else { - if (exception instanceof UnexpectedResponseException) { - Message prepareErrorMessage = ((UnexpectedResponseException) exception).message; - if (prepareErrorMessage instanceof Error) { - CoordinatorException prepareError = - DseConversions.toThrowable(node, (Error) prepareErrorMessage, context); - if (prepareError instanceof QueryValidationException - || prepareError instanceof FunctionFailureException - || prepareError instanceof ProtocolError) { - LOG.trace("[{}] Unrecoverable error on re-prepare, rethrowing", logPrefix); - trackNodeError(node, prepareError); - fatalError = prepareError; - } - } - } else if (exception instanceof RequestThrottlingException) { - trackNodeError(node, exception); - fatalError = exception; - } - if (fatalError == null) { - LOG.trace("[{}] Re-prepare failed, trying next node", logPrefix); - recordError(node, exception); - trackNodeError(node, exception); - sendRequest(statement, null, executionIndex, retryCount, false); - } - } - if (fatalError != null) { - lock.lock(); - try { - abort(fatalError, true); - } finally { - lock.unlock(); - } - } - }); - } - - /** - * Processes the retry decision by triggering a retry, aborting or ignoring; also records the - * failures for further access. - * - * @param verdict the verdict to process. - * @param error the original error. - */ - private void processRetryVerdict(@NonNull RetryVerdict verdict, @NonNull Throwable error) { - assert lock.isHeldByCurrentThread(); - LOG.trace("[{}] Processing retry decision {}", logPrefix, verdict); - switch (verdict.getRetryDecision()) { - case RETRY_SAME: - recordError(node, error); - trackNodeError(node, error); - sendRequest( - verdict.getRetryRequest(statement), node, executionIndex, retryCount + 1, false); - break; - case RETRY_NEXT: - recordError(node, error); - trackNodeError(node, error); - sendRequest( - verdict.getRetryRequest(statement), null, executionIndex, retryCount + 1, false); - break; - case RETHROW: - trackNodeError(node, error); - abort(error, true); - break; - case IGNORE: - processResultResponse(Void.INSTANCE, null); - break; - } - } - - // PAGE HANDLING - - /** - * Enqueues a response or, if the client was already waiting for it, completes the pending - * future. - * - *

Guarded by {@link #lock}. - * - * @param pageOrError the next page, or an error. - */ - @SuppressWarnings("GuardedBy") // this method is only called with the lock held - private void enqueueOrCompletePending(@NonNull Object pageOrError) { - assert lock.isHeldByCurrentThread(); - - if (queue == null) { - // This is the first time this callback tries to stream something back to the client, check - // if it can be selected - if (!chosenCallback.complete(this)) { - if (LOG.isTraceEnabled()) { - LOG.trace( - "[{}] Trying to enqueue {} but another callback was already chosen, aborting", - logPrefix, - asTraceString(pageOrError)); - } - // Discard the data, this callback will be canceled shortly since the chosen callback - // invoked cancelScheduledTasks - return; - } - - queue = new ArrayDeque<>(getMaxEnqueuedPages(statement)); - numPagesRequested = protocolBackpressureAvailable ? getMaxEnqueuedPages(statement) : 0; - cancelScheduledTasks(this); - } - - if (pendingResult != null) { - if (LOG.isTraceEnabled()) { - LOG.trace( - "[{}] Client was waiting on empty queue, completing with {}", - logPrefix, - asTraceString(pageOrError)); - } - CompletableFuture tmp = pendingResult; - // null out pendingResult before completing it because its completion - // may trigger a call to fetchNextPage -> dequeueOrCreatePending, - // which expects pendingResult to be null. - pendingResult = null; - completeResultSetFuture(tmp, pageOrError); - } else { - if (LOG.isTraceEnabled()) { - LOG.trace("[{}] Enqueuing {}", logPrefix, asTraceString(pageOrError)); - } - queue.add(pageOrError); - // Backpressure without protocol support: if the queue grows too large, - // disable auto-read so that the channel eventually becomes - // non-writable on the server side (causing it to back off for a while) - if (!protocolBackpressureAvailable - && queue.size() == getMaxEnqueuedPages(statement) - && state > 0) { - LOG.trace( - "[{}] Exceeded {} queued response pages, disabling auto-read", - logPrefix, - queue.size()); - channel.config().setAutoRead(false); - } - } - } - - /** - * Dequeue a response or, if the queue is empty, create the future that will get notified of the - * next response, when it arrives. - * - *

Called from user code, see {@link ContinuousAsyncResultSet#fetchNextPage()}. - * - * @return the next page's future; never null. - */ - @NonNull - public CompletableFuture dequeueOrCreatePending() { - lock.lock(); - try { - // If the client was already waiting for a page, there's no way it can call this method - // again - // (this is guaranteed by our public API because in order to ask for the next page, - // you need the reference to the previous page). - assert pendingResult == null; - - Object head = null; - if (queue != null) { - head = queue.poll(); - if (!protocolBackpressureAvailable - && head != null - && queue.size() == getMaxEnqueuedPages(statement) - 1) { - LOG.trace( - "[{}] Back to {} queued response pages, re-enabling auto-read", - logPrefix, - queue.size()); - channel.config().setAutoRead(true); - } - maybeRequestMore(); - } - - if (head != null) { - if (state == STATE_FAILED && !(head instanceof Throwable)) { - LOG.trace( - "[{}] Client requested next page on cancelled queue, discarding page and returning cancelled future", - logPrefix); - return cancelledResultSetFuture(); - } else { - if (LOG.isTraceEnabled()) { - LOG.trace( - "[{}] Client requested next page on non-empty queue, returning immediate future of {}", - logPrefix, - asTraceString(head)); - } - return immediateResultSetFuture(head); - } - } else { - if (state == STATE_FAILED) { - LOG.trace( - "[{}] Client requested next page on cancelled empty queue, returning cancelled future", - logPrefix); - return cancelledResultSetFuture(); - } else { - LOG.trace( - "[{}] Client requested next page but queue is empty, installing future", logPrefix); - pendingResult = new CompletableFuture<>(); - // Only schedule a timeout if we're past the first page (the first page's timeout is - // handled in sendRequest). - if (state > 1) { - pageTimeout = schedulePageTimeout(state); - // Note: each new timeout is cancelled when the next response arrives, see - // onResponse(Frame). - } - return pendingResult; - } - } - } finally { - lock.unlock(); - } - } - - /** - * If the total number of results in the queue and in-flight (requested - received) is less than - * half the queue size, then request more pages, unless the {@link #state} is failed, we're - * still waiting for the first page (so maybe still throttled or in the middle of a retry), or - * we don't support backpressure at the protocol level. - */ - @SuppressWarnings("GuardedBy") - private void maybeRequestMore() { - assert lock.isHeldByCurrentThread(); - if (state < 2 || streamId == -1 || !protocolBackpressureAvailable) { - return; - } - // if we have already requested more than the client needs, then no need to request some more - int maxPages = getMaxPages(statement); - if (maxPages > 0 && numPagesRequested >= maxPages) { - return; - } - // the pages received so far, which is the state minus one - int received = state - 1; - int requested = numPagesRequested; - // the pages that fit in the queue, which is the queue free space minus the requests in flight - int freeSpace = getMaxEnqueuedPages(statement) - queue.size(); - int inFlight = requested - received; - int numPagesFittingInQueue = freeSpace - inFlight; - if (numPagesFittingInQueue > 0 - && numPagesFittingInQueue >= getMaxEnqueuedPages(statement) / 2) { - LOG.trace("[{}] Requesting more {} pages", logPrefix, numPagesFittingInQueue); - numPagesRequested = requested + numPagesFittingInQueue; - sendMorePagesRequest(numPagesFittingInQueue); - } - } - - /** - * Sends a request for more pages (a.k.a. backpressure request). - * - * @param nextPages the number of extra pages to request. - */ - @SuppressWarnings("GuardedBy") - private void sendMorePagesRequest(int nextPages) { - assert lock.isHeldByCurrentThread(); - assert channel != null : "expected valid connection in order to request more pages"; - assert protocolBackpressureAvailable; - assert streamId != -1; - - LOG.trace("[{}] Sending request for more pages", logPrefix); - ThrottledAdminRequestHandler.query( - channel, - true, - Revise.requestMoreContinuousPages(streamId, nextPages), - statement.getCustomPayload(), - getReviseRequestTimeout(statement), - throttler, - session.getMetricUpdater(), - logPrefix, - "request " + nextPages + " more pages for id " + streamId) - .start() - .handle( - (result, error) -> { - if (error != null) { - Loggers.warnWithException( - LOG, "[{}] Error requesting more pages, aborting.", logPrefix, error); - lock.lock(); - try { - // Set fromServer to false because we want the callback to still cancel the - // session if possible or else the server will wait on a timeout. - abort(error, false); - } finally { - lock.unlock(); - } - } - return null; - }); - } - - /** Cancels the given timeout, if non null. */ - private void cancelTimeout(Timeout timeout) { - if (timeout != null) { - LOG.trace("[{}] Cancelling timeout", logPrefix); - timeout.cancel(); - } - } - - // CANCELLATION - - public void cancel() { - lock.lock(); - try { - if (state < 0) { - return; - } else { - LOG.trace( - "[{}] Cancelling continuous paging session with state {} on node {}", - logPrefix, - state, - node); - state = STATE_FAILED; - if (pendingResult != null) { - pendingResult.cancel(true); - } - releaseStreamId(); - } - } finally { - lock.unlock(); - } - reenableAutoReadIfNeeded(); - } - - @SuppressWarnings("GuardedBy") - private void releaseStreamId() { - assert lock.isHeldByCurrentThread(); - // If we saw the last response already, InFlightHandler will release the id so no need to - // cancel explicitly - if (streamId >= 0 && !sawLastResponse && !channel.closeFuture().isDone()) { - // This orphans the stream id, but it will still be held until we see the last response: - channel.cancel(this); - // This tells the server to stop streaming, and send a terminal response: - sendCancelRequest(); - } - } - - @SuppressWarnings("GuardedBy") - private void sendCancelRequest() { - assert lock.isHeldByCurrentThread(); - LOG.trace("[{}] Sending cancel request", logPrefix); - ThrottledAdminRequestHandler.query( - channel, - true, - Revise.cancelContinuousPaging(streamId), - statement.getCustomPayload(), - getReviseRequestTimeout(statement), - throttler, - session.getMetricUpdater(), - logPrefix, - "cancel request") - .start() - .handle( - (result, error) -> { - if (error != null) { - Loggers.warnWithException( - LOG, - "[{}] Error sending cancel request. " - + "This is not critical (the request will eventually time out server-side).", - logPrefix, - error); - } else { - LOG.trace("[{}] Continuous paging session cancelled successfully", logPrefix); - } - return null; - }); - sentCancelRequest = true; - } - - // TERMINATION - - private void reenableAutoReadIfNeeded() { - // Make sure we don't leave the channel unreadable - LOG.trace("[{}] Re-enabling auto-read", logPrefix); - if (!protocolBackpressureAvailable) { - channel.config().setAutoRead(true); - } - } - - // ERROR HANDLING - - private void trackNodeError(@NonNull Node node, @NonNull Throwable error) { - if (nodeErrorReported.compareAndSet(false, true)) { - long latencyNanos = System.nanoTime() - this.messageStartTimeNanos; - context - .getRequestTracker() - .onNodeError(this.statement, error, latencyNanos, executionProfile, node, logPrefix); - } - } - - /** - * Aborts the continuous paging session due to an error that can be either from the server or - * the client. - * - * @param error the error that causes the abortion. - * @param fromServer whether the error was triggered by the coordinator or by the driver. - */ - @SuppressWarnings("GuardedBy") // this method is only called with the lock held - private void abort(@NonNull Throwable error, boolean fromServer) { - assert lock.isHeldByCurrentThread(); - LOG.trace( - "[{}] Aborting due to {} ({})", - logPrefix, - error.getClass().getSimpleName(), - error.getMessage()); - if (channel == null) { - // This only happens when sending the initial request, if no host was available - // or if the iterator returned by the LBP threw an exception. - // In either case the write was not even attempted, and - // we set the state right now. - enqueueOrCompletePending(error); - state = STATE_FAILED; - } else if (state > 0) { - enqueueOrCompletePending(error); - if (fromServer) { - // We can safely assume the server won't send any more responses, - // so set the state and call release() right now. - state = STATE_FAILED; - reenableAutoReadIfNeeded(); - } else { - // attempt to cancel first, i.e. ask server to stop sending responses, - // and only then release. - cancel(); - } - } - stopGlobalRequestTimer(); - cancelTimeout(globalTimeout); - } - - // METRICS - - private void stopNodeMessageTimer() { - if (stopNodeMessageTimerReported.compareAndSet(false, true)) { - ((DefaultNode) node) - .getMetricUpdater() - .updateTimer( - messagesMetric, - executionProfile.getName(), - System.nanoTime() - messageStartTimeNanos, - TimeUnit.NANOSECONDS); - } - } - - private void stopGlobalRequestTimer() { - session - .getMetricUpdater() - .updateTimer( - continuousRequestsMetric, - null, - System.nanoTime() - startTimeNanos, - TimeUnit.NANOSECONDS); - } - - private void updateErrorMetrics( - @NonNull NodeMetricUpdater metricUpdater, - @NonNull RetryVerdict verdict, - @NonNull DefaultNodeMetric error, - @NonNull DefaultNodeMetric retriesOnError, - @NonNull DefaultNodeMetric ignoresOnError) { - metricUpdater.incrementCounter(error, executionProfile.getName()); - switch (verdict.getRetryDecision()) { - case RETRY_SAME: - case RETRY_NEXT: - metricUpdater.incrementCounter(DefaultNodeMetric.RETRIES, executionProfile.getName()); - metricUpdater.incrementCounter(retriesOnError, executionProfile.getName()); - break; - case IGNORE: - metricUpdater.incrementCounter(DefaultNodeMetric.IGNORES, executionProfile.getName()); - metricUpdater.incrementCounter(ignoresOnError, executionProfile.getName()); - break; - case RETHROW: - // nothing do do - } - } - - // UTILITY METHODS - - @NonNull - private CompletableFuture immediateResultSetFuture(@NonNull Object pageOrError) { - CompletableFuture future = new CompletableFuture<>(); - completeResultSetFuture(future, pageOrError); - return future; - } - - @NonNull - private CompletableFuture cancelledResultSetFuture() { - return immediateResultSetFuture( - new CancellationException( - "Can't get more results because the continuous query has failed already. " - + "Most likely this is because the query was cancelled")); - } - - private void completeResultSetFuture( - @NonNull CompletableFuture future, @NonNull Object pageOrError) { - long now = System.nanoTime(); - long totalLatencyNanos = now - startTimeNanos; - long nodeLatencyNanos = now - messageStartTimeNanos; - if (resultSetClass.isInstance(pageOrError)) { - if (future.complete(resultSetClass.cast(pageOrError))) { - throttler.signalSuccess(ContinuousRequestHandlerBase.this); - if (nodeSuccessReported.compareAndSet(false, true)) { - context - .getRequestTracker() - .onNodeSuccess(statement, nodeLatencyNanos, executionProfile, node, logPrefix); - } - context - .getRequestTracker() - .onSuccess(statement, totalLatencyNanos, executionProfile, node, logPrefix); - } - } else { - Throwable error = (Throwable) pageOrError; - if (future.completeExceptionally(error)) { - context - .getRequestTracker() - .onError(statement, error, totalLatencyNanos, executionProfile, node, logPrefix); - if (error instanceof DriverTimeoutException) { - throttler.signalTimeout(ContinuousRequestHandlerBase.this); - session - .getMetricUpdater() - .incrementCounter(clientTimeoutsMetric, executionProfile.getName()); - } else if (!(error instanceof RequestThrottlingException)) { - throttler.signalError(ContinuousRequestHandlerBase.this, error); - } - } - } - } - - @NonNull - private ExecutionInfo createExecutionInfo(@NonNull Result result, @Nullable Frame response) { - ByteBuffer pagingState = - result instanceof Rows ? ((Rows) result).getMetadata().pagingState : null; - return new DefaultExecutionInfo( - statement, - node, - startedSpeculativeExecutionsCount.get(), - executionIndex, - errors, - pagingState, - response, - true, - session, - context, - executionProfile); - } - - private void logTimeoutSchedulingError(IllegalStateException timeoutError) { - // If we're racing with session shutdown, the timer might be stopped already. We don't want - // to schedule more executions anyway, so swallow the error. - if (!"cannot be started once stopped".equals(timeoutError.getMessage())) { - Loggers.warnWithException( - LOG, "[{}] Error while scheduling timeout", logPrefix, timeoutError); - } - } - - @NonNull - private String asTraceString(@NonNull Object pageOrError) { - return resultSetClass.isInstance(pageOrError) - ? "page " + pageNumber(resultSetClass.cast(pageOrError)) - : ((Exception) pageOrError).getClass().getSimpleName(); - } - - private int getState() { - lock.lock(); - try { - return state; - } finally { - lock.unlock(); - } - } - - private CompletableFuture getPendingResult() { - lock.lock(); - try { - return pendingResult; - } finally { - lock.unlock(); - } - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/DefaultContinuousAsyncResultSet.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/DefaultContinuousAsyncResultSet.java deleted file mode 100644 index 8562fde5905..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/DefaultContinuousAsyncResultSet.java +++ /dev/null @@ -1,169 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.continuous; - -import com.datastax.dse.driver.api.core.cql.continuous.ContinuousAsyncResultSet; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.internal.core.cql.EmptyColumnDefinitions; -import com.datastax.oss.driver.internal.core.util.CountingIterator; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Collections; -import java.util.concurrent.CompletionStage; -import net.jcip.annotations.NotThreadSafe; - -@NotThreadSafe // wraps a mutable queue -public class DefaultContinuousAsyncResultSet implements ContinuousAsyncResultSet { - - private final Iterable currentPage; - private final ColumnDefinitions columnDefinitions; - private final int pageNumber; - private final boolean hasMorePages; - private final ExecutionInfo executionInfo; - private final ContinuousCqlRequestHandler handler; - private final CountingIterator iterator; - - public DefaultContinuousAsyncResultSet( - CountingIterator iterator, - ColumnDefinitions columnDefinitions, - int pageNumber, - boolean hasMorePages, - ExecutionInfo executionInfo, - ContinuousCqlRequestHandler handler) { - this.columnDefinitions = columnDefinitions; - this.pageNumber = pageNumber; - this.hasMorePages = hasMorePages; - this.executionInfo = executionInfo; - this.handler = handler; - this.iterator = iterator; - this.currentPage = () -> iterator; - } - - @NonNull - @Override - public ColumnDefinitions getColumnDefinitions() { - return columnDefinitions; - } - - @Override - public boolean wasApplied() { - // always return true for non-conditional updates - return true; - } - - @NonNull - @Override - public ExecutionInfo getExecutionInfo() { - return executionInfo; - } - - @Override - public int pageNumber() { - return pageNumber; - } - - @Override - public boolean hasMorePages() { - return hasMorePages; - } - - @NonNull - @Override - public Iterable currentPage() { - return currentPage; - } - - @Override - public int remaining() { - return iterator.remaining(); - } - - @NonNull - @Override - public CompletionStage fetchNextPage() throws IllegalStateException { - if (!hasMorePages()) { - throw new IllegalStateException( - "Can't call fetchNextPage() on the last page (use hasMorePages() to check)"); - } - return handler.fetchNextPage(); - } - - @Override - public void cancel() { - handler.cancel(); - } - - public static ContinuousAsyncResultSet empty(ExecutionInfo executionInfo) { - - return new ContinuousAsyncResultSet() { - - @NonNull - @Override - public ColumnDefinitions getColumnDefinitions() { - return EmptyColumnDefinitions.INSTANCE; - } - - @NonNull - @Override - public ExecutionInfo getExecutionInfo() { - return executionInfo; - } - - @NonNull - @Override - public Iterable currentPage() { - return Collections.emptyList(); - } - - @Override - public int remaining() { - return 0; - } - - @Override - public boolean hasMorePages() { - return false; - } - - @Override - public int pageNumber() { - return 1; - } - - @NonNull - @Override - public CompletionStage fetchNextPage() - throws IllegalStateException { - throw new IllegalStateException( - "Can't call fetchNextPage() on the last page (use hasMorePages() to check)"); - } - - @Override - public void cancel() { - // noop - } - - @Override - public boolean wasApplied() { - // always true - return true; - } - }; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/DefaultContinuousResultSet.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/DefaultContinuousResultSet.java deleted file mode 100644 index 929400bc7a6..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/DefaultContinuousResultSet.java +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.continuous; - -import com.datastax.dse.driver.api.core.cql.continuous.ContinuousAsyncResultSet; -import com.datastax.dse.driver.api.core.cql.continuous.ContinuousResultSet; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.internal.core.util.CountingIterator; -import com.datastax.oss.driver.internal.core.util.concurrent.BlockingOperation; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import net.jcip.annotations.NotThreadSafe; - -/** - * This class is roughly equivalent to {@link - * com.datastax.oss.driver.internal.core.cql.MultiPageResultSet}, except that {@link - * RowIterator#maybeMoveToNextPage()} needs to check for cancellation before fetching the next page. - */ -@NotThreadSafe -public class DefaultContinuousResultSet implements ContinuousResultSet { - - private final RowIterator iterator; - private final List executionInfos = new ArrayList<>(); - private final ColumnDefinitions columnDefinitions; - - public DefaultContinuousResultSet(ContinuousAsyncResultSet firstPage) { - iterator = new RowIterator(firstPage); - columnDefinitions = firstPage.getColumnDefinitions(); - executionInfos.add(firstPage.getExecutionInfo()); - } - - @Override - public void cancel() { - iterator.cancel(); - } - - @NonNull - @Override - public ColumnDefinitions getColumnDefinitions() { - return columnDefinitions; - } - - @NonNull - @Override - public List getExecutionInfos() { - return executionInfos; - } - - @NonNull - @Override - public Iterator iterator() { - return iterator; - } - - @Override - public boolean isFullyFetched() { - return iterator.isFullyFetched(); - } - - @Override - public int getAvailableWithoutFetching() { - return iterator.remaining(); - } - - @Override - public boolean wasApplied() { - return iterator.wasApplied(); - } - - private class RowIterator extends CountingIterator { - private ContinuousAsyncResultSet currentPage; - private Iterator currentRows; - private boolean cancelled = false; - - private RowIterator(ContinuousAsyncResultSet firstPage) { - super(firstPage.remaining()); - currentPage = firstPage; - currentRows = firstPage.currentPage().iterator(); - } - - @Override - protected Row computeNext() { - maybeMoveToNextPage(); - return currentRows.hasNext() ? currentRows.next() : endOfData(); - } - - private void maybeMoveToNextPage() { - if (!cancelled && !currentRows.hasNext() && currentPage.hasMorePages()) { - BlockingOperation.checkNotDriverThread(); - ContinuousAsyncResultSet nextPage = - CompletableFutures.getUninterruptibly(currentPage.fetchNextPage()); - currentPage = nextPage; - remaining += currentPage.remaining(); - currentRows = nextPage.currentPage().iterator(); - executionInfos.add(nextPage.getExecutionInfo()); - } - } - - private boolean isFullyFetched() { - return !currentPage.hasMorePages(); - } - - private boolean wasApplied() { - return currentPage.wasApplied(); - } - - private void cancel() { - currentPage.cancel(); - cancelled = true; - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/reactive/ContinuousCqlRequestReactiveProcessor.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/reactive/ContinuousCqlRequestReactiveProcessor.java deleted file mode 100644 index afe0e864181..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/reactive/ContinuousCqlRequestReactiveProcessor.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.continuous.reactive; - -import com.datastax.dse.driver.api.core.cql.continuous.reactive.ContinuousReactiveResultSet; -import com.datastax.dse.driver.internal.core.cql.continuous.ContinuousCqlRequestAsyncProcessor; -import com.datastax.dse.driver.internal.core.cql.reactive.FailedReactiveResultSet; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.internal.core.session.RequestProcessor; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class ContinuousCqlRequestReactiveProcessor - implements RequestProcessor, ContinuousReactiveResultSet> { - - public static final GenericType CONTINUOUS_REACTIVE_RESULT_SET = - GenericType.of(ContinuousReactiveResultSet.class); - - private final ContinuousCqlRequestAsyncProcessor asyncProcessor; - - public ContinuousCqlRequestReactiveProcessor(ContinuousCqlRequestAsyncProcessor asyncProcessor) { - this.asyncProcessor = asyncProcessor; - } - - @Override - public boolean canProcess(Request request, GenericType resultType) { - return request instanceof Statement && resultType.equals(CONTINUOUS_REACTIVE_RESULT_SET); - } - - @Override - public ContinuousReactiveResultSet process( - Statement request, - DefaultSession session, - InternalDriverContext context, - String sessionLogPrefix) { - return new DefaultContinuousReactiveResultSet( - () -> asyncProcessor.process(request, session, context, sessionLogPrefix)); - } - - @Override - public ContinuousReactiveResultSet newFailure(RuntimeException error) { - return new FailedReactiveResultSet(error); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/reactive/DefaultContinuousReactiveResultSet.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/reactive/DefaultContinuousReactiveResultSet.java deleted file mode 100644 index b3f301edea6..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/reactive/DefaultContinuousReactiveResultSet.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.continuous.reactive; - -import com.datastax.dse.driver.api.core.cql.continuous.ContinuousAsyncResultSet; -import com.datastax.dse.driver.api.core.cql.continuous.reactive.ContinuousReactiveResultSet; -import com.datastax.dse.driver.internal.core.cql.reactive.ReactiveResultSetBase; -import java.util.concurrent.Callable; -import java.util.concurrent.CompletionStage; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class DefaultContinuousReactiveResultSet - extends ReactiveResultSetBase implements ContinuousReactiveResultSet { - - public DefaultContinuousReactiveResultSet( - Callable> firstPage) { - super(firstPage); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/CqlRequestReactiveProcessor.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/CqlRequestReactiveProcessor.java deleted file mode 100644 index 3539c2e698c..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/CqlRequestReactiveProcessor.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.reactive; - -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveResultSet; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.cql.CqlRequestAsyncProcessor; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.internal.core.session.RequestProcessor; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class CqlRequestReactiveProcessor - implements RequestProcessor, ReactiveResultSet> { - - public static final GenericType REACTIVE_RESULT_SET = - GenericType.of(ReactiveResultSet.class); - - private final CqlRequestAsyncProcessor asyncProcessor; - - public CqlRequestReactiveProcessor(CqlRequestAsyncProcessor asyncProcessor) { - this.asyncProcessor = asyncProcessor; - } - - @Override - public boolean canProcess(Request request, GenericType resultType) { - return request instanceof Statement && resultType.equals(REACTIVE_RESULT_SET); - } - - @Override - public ReactiveResultSet process( - Statement request, - DefaultSession session, - InternalDriverContext context, - String sessionLogPrefix) { - return new DefaultReactiveResultSet( - () -> asyncProcessor.process(request, session, context, sessionLogPrefix)); - } - - @Override - public ReactiveResultSet newFailure(RuntimeException error) { - return new FailedReactiveResultSet(error); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/DefaultReactiveResultSet.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/DefaultReactiveResultSet.java deleted file mode 100644 index 33b6dc02f48..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/DefaultReactiveResultSet.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.reactive; - -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import java.util.concurrent.Callable; -import java.util.concurrent.CompletionStage; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class DefaultReactiveResultSet extends ReactiveResultSetBase { - - public DefaultReactiveResultSet(Callable> firstPage) { - super(firstPage); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/DefaultReactiveRow.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/DefaultReactiveRow.java deleted file mode 100644 index ca3b93e7f6b..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/DefaultReactiveRow.java +++ /dev/null @@ -1,580 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.reactive; - -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveRow; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.data.CqlDuration; -import com.datastax.oss.driver.api.core.data.TupleValue; -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.math.BigDecimal; -import java.math.BigInteger; -import java.net.InetAddress; -import java.nio.ByteBuffer; -import java.time.Instant; -import java.time.LocalDate; -import java.time.LocalTime; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; -import net.jcip.annotations.NotThreadSafe; - -@NotThreadSafe -class DefaultReactiveRow implements ReactiveRow { - - private final Row row; - private final ExecutionInfo executionInfo; - - DefaultReactiveRow(@NonNull Row row, @NonNull ExecutionInfo executionInfo) { - this.row = row; - this.executionInfo = executionInfo; - } - - @NonNull - @Override - public ExecutionInfo getExecutionInfo() { - return executionInfo; - } - - @NonNull - @Override - public ColumnDefinitions getColumnDefinitions() { - return row.getColumnDefinitions(); - } - - @Override - public ByteBuffer getBytesUnsafe(int i) { - return row.getBytesUnsafe(i); - } - - @Override - public boolean isNull(int i) { - return row.isNull(i); - } - - @Override - public T get(int i, TypeCodec codec) { - return row.get(i, codec); - } - - @Override - public T get(int i, GenericType targetType) { - return row.get(i, targetType); - } - - @Override - public T get(int i, Class targetClass) { - return row.get(i, targetClass); - } - - @Override - public Object getObject(int i) { - return row.getObject(i); - } - - @Override - public boolean getBoolean(int i) { - return row.getBoolean(i); - } - - @Override - public byte getByte(int i) { - return row.getByte(i); - } - - @Override - public double getDouble(int i) { - return row.getDouble(i); - } - - @Override - public float getFloat(int i) { - return row.getFloat(i); - } - - @Override - public int getInt(int i) { - return row.getInt(i); - } - - @Override - public long getLong(int i) { - return row.getLong(i); - } - - @Override - public short getShort(int i) { - return row.getShort(i); - } - - @Override - public Instant getInstant(int i) { - return row.getInstant(i); - } - - @Override - public LocalDate getLocalDate(int i) { - return row.getLocalDate(i); - } - - @Override - public LocalTime getLocalTime(int i) { - return row.getLocalTime(i); - } - - @Override - public ByteBuffer getByteBuffer(int i) { - return row.getByteBuffer(i); - } - - @Override - public String getString(int i) { - return row.getString(i); - } - - @Override - public BigInteger getBigInteger(int i) { - return row.getBigInteger(i); - } - - @Override - public BigDecimal getBigDecimal(int i) { - return row.getBigDecimal(i); - } - - @Override - public UUID getUuid(int i) { - return row.getUuid(i); - } - - @Override - public InetAddress getInetAddress(int i) { - return row.getInetAddress(i); - } - - @Override - public CqlDuration getCqlDuration(int i) { - return row.getCqlDuration(i); - } - - @Override - public Token getToken(int i) { - return row.getToken(i); - } - - @Override - public List getList(int i, @NonNull Class elementsClass) { - return row.getList(i, elementsClass); - } - - @Override - public Set getSet(int i, @NonNull Class elementsClass) { - return row.getSet(i, elementsClass); - } - - @Override - public Map getMap(int i, @NonNull Class keyClass, @NonNull Class valueClass) { - return row.getMap(i, keyClass, valueClass); - } - - @Override - public UdtValue getUdtValue(int i) { - return row.getUdtValue(i); - } - - @Override - public TupleValue getTupleValue(int i) { - return row.getTupleValue(i); - } - - @Override - public int size() { - return row.size(); - } - - @NonNull - @Override - public DataType getType(int i) { - return row.getType(i); - } - - @NonNull - @Override - public CodecRegistry codecRegistry() { - return row.codecRegistry(); - } - - @NonNull - @Override - public ProtocolVersion protocolVersion() { - return row.protocolVersion(); - } - - @Override - public ByteBuffer getBytesUnsafe(@NonNull String name) { - return row.getBytesUnsafe(name); - } - - @Override - public boolean isNull(@NonNull String name) { - return row.isNull(name); - } - - @Override - public T get(@NonNull String name, @NonNull TypeCodec codec) { - return row.get(name, codec); - } - - @Override - public T get(@NonNull String name, @NonNull GenericType targetType) { - return row.get(name, targetType); - } - - @Override - public T get(@NonNull String name, @NonNull Class targetClass) { - return row.get(name, targetClass); - } - - @Override - public Object getObject(@NonNull String name) { - return row.getObject(name); - } - - @Override - public boolean getBoolean(@NonNull String name) { - return row.getBoolean(name); - } - - @Override - public byte getByte(@NonNull String name) { - return row.getByte(name); - } - - @Override - public double getDouble(@NonNull String name) { - return row.getDouble(name); - } - - @Override - public float getFloat(@NonNull String name) { - return row.getFloat(name); - } - - @Override - public int getInt(@NonNull String name) { - return row.getInt(name); - } - - @Override - public long getLong(@NonNull String name) { - return row.getLong(name); - } - - @Override - public short getShort(@NonNull String name) { - return row.getShort(name); - } - - @Override - public Instant getInstant(@NonNull String name) { - return row.getInstant(name); - } - - @Override - public LocalDate getLocalDate(@NonNull String name) { - return row.getLocalDate(name); - } - - @Override - public LocalTime getLocalTime(@NonNull String name) { - return row.getLocalTime(name); - } - - @Override - public ByteBuffer getByteBuffer(@NonNull String name) { - return row.getByteBuffer(name); - } - - @Override - public String getString(@NonNull String name) { - return row.getString(name); - } - - @Override - public BigInteger getBigInteger(@NonNull String name) { - return row.getBigInteger(name); - } - - @Override - public BigDecimal getBigDecimal(@NonNull String name) { - return row.getBigDecimal(name); - } - - @Override - public UUID getUuid(@NonNull String name) { - return row.getUuid(name); - } - - @Override - public InetAddress getInetAddress(@NonNull String name) { - return row.getInetAddress(name); - } - - @Override - public CqlDuration getCqlDuration(@NonNull String name) { - return row.getCqlDuration(name); - } - - @Override - public Token getToken(@NonNull String name) { - return row.getToken(name); - } - - @Override - public List getList(@NonNull String name, @NonNull Class elementsClass) { - return row.getList(name, elementsClass); - } - - @Override - public Set getSet(@NonNull String name, @NonNull Class elementsClass) { - return row.getSet(name, elementsClass); - } - - @Override - public Map getMap( - @NonNull String name, @NonNull Class keyClass, @NonNull Class valueClass) { - return row.getMap(name, keyClass, valueClass); - } - - @Override - public UdtValue getUdtValue(@NonNull String name) { - return row.getUdtValue(name); - } - - @Override - public TupleValue getTupleValue(@NonNull String name) { - return row.getTupleValue(name); - } - - @NonNull - @Override - public List allIndicesOf(@NonNull String name) { - return row.allIndicesOf(name); - } - - @Override - public int firstIndexOf(@NonNull String name) { - return row.firstIndexOf(name); - } - - @NonNull - @Override - public DataType getType(@NonNull String name) { - return row.getType(name); - } - - @Override - public ByteBuffer getBytesUnsafe(@NonNull CqlIdentifier id) { - return row.getBytesUnsafe(id); - } - - @Override - public boolean isNull(@NonNull CqlIdentifier id) { - return row.isNull(id); - } - - @Override - public T get(@NonNull CqlIdentifier id, @NonNull TypeCodec codec) { - return row.get(id, codec); - } - - @Override - public T get(@NonNull CqlIdentifier id, @NonNull GenericType targetType) { - return row.get(id, targetType); - } - - @Override - public T get(@NonNull CqlIdentifier id, @NonNull Class targetClass) { - return row.get(id, targetClass); - } - - @Override - public Object getObject(@NonNull CqlIdentifier id) { - return row.getObject(id); - } - - @Override - public boolean getBoolean(@NonNull CqlIdentifier id) { - return row.getBoolean(id); - } - - @Override - public byte getByte(@NonNull CqlIdentifier id) { - return row.getByte(id); - } - - @Override - public double getDouble(@NonNull CqlIdentifier id) { - return row.getDouble(id); - } - - @Override - public float getFloat(@NonNull CqlIdentifier id) { - return row.getFloat(id); - } - - @Override - public int getInt(@NonNull CqlIdentifier id) { - return row.getInt(id); - } - - @Override - public long getLong(@NonNull CqlIdentifier id) { - return row.getLong(id); - } - - @Override - public short getShort(@NonNull CqlIdentifier id) { - return row.getShort(id); - } - - @Override - public Instant getInstant(@NonNull CqlIdentifier id) { - return row.getInstant(id); - } - - @Override - public LocalDate getLocalDate(@NonNull CqlIdentifier id) { - return row.getLocalDate(id); - } - - @Override - public LocalTime getLocalTime(@NonNull CqlIdentifier id) { - return row.getLocalTime(id); - } - - @Override - public ByteBuffer getByteBuffer(@NonNull CqlIdentifier id) { - return row.getByteBuffer(id); - } - - @Override - public String getString(@NonNull CqlIdentifier id) { - return row.getString(id); - } - - @Override - public BigInteger getBigInteger(@NonNull CqlIdentifier id) { - return row.getBigInteger(id); - } - - @Override - public BigDecimal getBigDecimal(@NonNull CqlIdentifier id) { - return row.getBigDecimal(id); - } - - @Override - public UUID getUuid(@NonNull CqlIdentifier id) { - return row.getUuid(id); - } - - @Override - public InetAddress getInetAddress(@NonNull CqlIdentifier id) { - return row.getInetAddress(id); - } - - @Override - public CqlDuration getCqlDuration(@NonNull CqlIdentifier id) { - return row.getCqlDuration(id); - } - - @Override - public Token getToken(@NonNull CqlIdentifier id) { - return row.getToken(id); - } - - @Override - public List getList(@NonNull CqlIdentifier id, @NonNull Class elementsClass) { - return row.getList(id, elementsClass); - } - - @Override - public Set getSet(@NonNull CqlIdentifier id, @NonNull Class elementsClass) { - return row.getSet(id, elementsClass); - } - - @Override - public Map getMap( - @NonNull CqlIdentifier id, @NonNull Class keyClass, @NonNull Class valueClass) { - return row.getMap(id, keyClass, valueClass); - } - - @Override - public UdtValue getUdtValue(@NonNull CqlIdentifier id) { - return row.getUdtValue(id); - } - - @Override - public TupleValue getTupleValue(@NonNull CqlIdentifier id) { - return row.getTupleValue(id); - } - - @NonNull - @Override - public List allIndicesOf(@NonNull CqlIdentifier id) { - return row.allIndicesOf(id); - } - - @Override - public int firstIndexOf(@NonNull CqlIdentifier id) { - return row.firstIndexOf(id); - } - - @NonNull - @Override - public DataType getType(@NonNull CqlIdentifier id) { - return row.getType(id); - } - - @Override - public boolean isDetached() { - return row.isDetached(); - } - - @Override - public void attach(@NonNull AttachmentPoint attachmentPoint) { - row.attach(attachmentPoint); - } - - @Override - public String toString() { - return "DefaultReactiveRow{row=" + row + ", executionInfo=" + executionInfo + '}'; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/EmptySubscription.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/EmptySubscription.java deleted file mode 100644 index f760ecc395e..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/EmptySubscription.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.reactive; - -import org.reactivestreams.Subscription; - -public class EmptySubscription implements Subscription { - - public static final EmptySubscription INSTANCE = new EmptySubscription(); - - private EmptySubscription() {} - - @Override - public void request(long n) {} - - @Override - public void cancel() {} -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/FailedPublisher.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/FailedPublisher.java deleted file mode 100644 index 638434bb2d0..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/FailedPublisher.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.reactive; - -import java.util.Objects; -import org.reactivestreams.Publisher; -import org.reactivestreams.Subscriber; - -/** - * A {@link Publisher} that immediately signals the error passed at instantiation to all its - * subscribers. - */ -public class FailedPublisher implements Publisher { - - protected final Throwable error; - - public FailedPublisher(Throwable error) { - this.error = error; - } - - @Override - public void subscribe(Subscriber subscriber) { - Objects.requireNonNull(subscriber, "Subscriber cannot be null"); - // Per rule 1.9, we need to call onSubscribe before any other signal. Pass a dummy - // subscription since we know it will never be used. - subscriber.onSubscribe(EmptySubscription.INSTANCE); - // Signal the error to the subscriber right away. This is safe to do because per rule 2.10, - // a Subscriber MUST be prepared to receive an onError signal without a preceding - // Subscription.request(long n) call. - // Also, per rule 2.13: onError MUST return normally except when any provided parameter - // is null (which is not the case here); so we don't need care about catching errors here. - subscriber.onError(error); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/FailedReactiveResultSet.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/FailedReactiveResultSet.java deleted file mode 100644 index 31c34d649aa..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/FailedReactiveResultSet.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.reactive; - -import com.datastax.dse.driver.api.core.cql.continuous.reactive.ContinuousReactiveResultSet; -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveResultSet; -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveRow; -import com.datastax.dse.driver.internal.core.cql.continuous.reactive.ContinuousCqlRequestReactiveProcessor; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import edu.umd.cs.findbugs.annotations.NonNull; -import org.reactivestreams.Publisher; - -/** - * A {@link ReactiveResultSet} that immediately signals the error passed at instantiation to all its - * subscribers. - * - * @see CqlRequestReactiveProcessor#newFailure(java.lang.RuntimeException) - * @see ContinuousCqlRequestReactiveProcessor#newFailure(java.lang.RuntimeException) - */ -public class FailedReactiveResultSet extends FailedPublisher - implements ReactiveResultSet, ContinuousReactiveResultSet { - - public FailedReactiveResultSet(Throwable error) { - super(error); - } - - @NonNull - @Override - public Publisher getColumnDefinitions() { - return new FailedPublisher<>(error); - } - - @NonNull - @Override - public Publisher getExecutionInfos() { - return new FailedPublisher<>(error); - } - - @NonNull - @Override - public Publisher wasApplied() { - return new FailedPublisher<>(error); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/ReactiveOperators.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/ReactiveOperators.java deleted file mode 100644 index f058149f570..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/ReactiveOperators.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.reactive; - -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.concurrent.atomic.AtomicLong; - -public final class ReactiveOperators { - - /** - * Atomically adds the given value to the given AtomicLong, bound to Long.MAX_VALUE. - * - * @param current the current value. - * @param toAdd the delta to add. - */ - public static void addCap(@NonNull AtomicLong current, long toAdd) { - long r, u; - do { - r = current.get(); - if (r == Long.MAX_VALUE) { - return; - } - u = r + toAdd; - if (u < 0L) { - u = Long.MAX_VALUE; - } - } while (!current.compareAndSet(r, u)); - } - - /** - * Atomically subtracts the given value from the given AtomicLong, bound to 0. - * - * @param current the current value. - * @param toSub the delta to subtract. - */ - public static void subCap(@NonNull AtomicLong current, long toSub) { - long r, u; - do { - r = current.get(); - if (r == 0 || r == Long.MAX_VALUE) { - return; - } - u = Math.max(r - toSub, 0); - } while (!current.compareAndSet(r, u)); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/ReactiveResultSetBase.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/ReactiveResultSetBase.java deleted file mode 100644 index 5ba00e22298..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/ReactiveResultSetBase.java +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.reactive; - -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveResultSet; -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveRow; -import com.datastax.oss.driver.api.core.AsyncPagingIterable; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.Row; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Objects; -import java.util.concurrent.Callable; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.atomic.AtomicBoolean; -import net.jcip.annotations.ThreadSafe; -import org.reactivestreams.Publisher; -import org.reactivestreams.Subscriber; - -@ThreadSafe -public abstract class ReactiveResultSetBase> - implements ReactiveResultSet { - - private final Callable> firstPage; - - private final AtomicBoolean alreadySubscribed = new AtomicBoolean(false); - - private final SimpleUnicastProcessor columnDefinitionsPublisher = - new SimpleUnicastProcessor<>(); - - private final SimpleUnicastProcessor executionInfosPublisher = - new SimpleUnicastProcessor<>(); - - private final SimpleUnicastProcessor wasAppliedPublisher = - new SimpleUnicastProcessor<>(); - - protected ReactiveResultSetBase(Callable> firstPage) { - this.firstPage = firstPage; - } - - @Override - public void subscribe(@NonNull Subscriber subscriber) { - // As per rule 1.9, we need to throw an NPE if subscriber is null - Objects.requireNonNull(subscriber, "Subscriber cannot be null"); - // As per rule 1.11, this publisher is allowed to support only one subscriber. - if (alreadySubscribed.compareAndSet(false, true)) { - ReactiveResultSetSubscription subscription = - new ReactiveResultSetSubscription<>( - subscriber, columnDefinitionsPublisher, executionInfosPublisher, wasAppliedPublisher); - try { - subscriber.onSubscribe(subscription); - // must be done after onSubscribe - subscription.start(firstPage); - } catch (Throwable t) { - // As per rule 2.13: In the case that this rule is violated, - // any associated Subscription to the Subscriber MUST be considered as - // cancelled, and the caller MUST raise this error condition in a fashion - // that is adequate for the runtime environment. - subscription.doOnError( - new IllegalStateException( - subscriber - + " violated the Reactive Streams rule 2.13 by throwing an exception from onSubscribe.", - t)); - } - } else { - subscriber.onSubscribe(EmptySubscription.INSTANCE); - subscriber.onError( - new IllegalStateException("This publisher does not support multiple subscriptions")); - } - // As per 2.13, this method must return normally (i.e. not throw) - } - - @NonNull - @Override - public Publisher getColumnDefinitions() { - return columnDefinitionsPublisher; - } - - @NonNull - @Override - public Publisher getExecutionInfos() { - return executionInfosPublisher; - } - - @NonNull - @Override - public Publisher wasApplied() { - return wasAppliedPublisher; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/ReactiveResultSetSubscription.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/ReactiveResultSetSubscription.java deleted file mode 100644 index 500a291e9d2..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/ReactiveResultSetSubscription.java +++ /dev/null @@ -1,493 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.reactive; - -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveRow; -import com.datastax.dse.driver.internal.core.util.concurrent.BoundedConcurrentQueue; -import com.datastax.oss.driver.api.core.AsyncPagingIterable; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.driver.shaded.guava.common.collect.Iterators; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Collections; -import java.util.Iterator; -import java.util.Objects; -import java.util.concurrent.Callable; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionException; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; -import net.jcip.annotations.ThreadSafe; -import org.reactivestreams.Subscriber; -import org.reactivestreams.Subscription; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A single-subscriber subscription that executes one single query and emits all the returned rows. - * - *

This class can handle both continuous and non-continuous result sets. - */ -@ThreadSafe -public class ReactiveResultSetSubscription> - implements Subscription { - - private static final Logger LOG = LoggerFactory.getLogger(ReactiveResultSetSubscription.class); - - private static final int MAX_ENQUEUED_PAGES = 4; - - /** Tracks the number of items requested by the subscriber. */ - private final AtomicLong requested = new AtomicLong(0); - - /** The pages received so far, with a maximum of MAX_ENQUEUED_PAGES elements. */ - private final BoundedConcurrentQueue> pages = - new BoundedConcurrentQueue<>(MAX_ENQUEUED_PAGES); - - /** - * Used to signal that a thread is currently draining, i.e., emitting items to the subscriber. - * When it is zero, that means there is no ongoing emission. This mechanism effectively serializes - * access to the drain() method, and also keeps track of missed attempts to enter it, since each - * thread that attempts to drain will increment this counter. - * - * @see #drain() - */ - private final AtomicInteger draining = new AtomicInteger(0); - - /** - * Waited upon by the driver and completed when the subscriber requests its first item. - * - *

Used to hold off emitting results until the subscriber issues its first request for items. - * Since this future is only completed from {@link #request(long)}, this effectively conditions - * the enqueueing of the first page to the reception of the subscriber's first request. - * - *

This mechanism avoids sending terminal signals before a request is made when the stream is - * empty. Note that as per 2.9, "a Subscriber MUST be prepared to receive an onComplete signal - * with or without a preceding Subscription.request(long n) call." However, the TCK considers it - * as unfair behavior. - * - * @see #start(Callable) - * @see #request(long) - */ - private final CompletableFuture firstSubscriberRequestArrived = new CompletableFuture<>(); - - /** non-final because it has to be de-referenced, see {@link #clear()}. */ - private volatile Subscriber mainSubscriber; - - private volatile Subscriber columnDefinitionsSubscriber; - - private volatile Subscriber executionInfosSubscriber; - - private volatile Subscriber wasAppliedSubscriber; - - /** - * Set to true when the subscription is cancelled, which happens when an error is encountered, - * when the result set is fully consumed and the subscription terminates, or when the subscriber - * manually calls {@link #cancel()}. - */ - private volatile boolean cancelled = false; - - ReactiveResultSetSubscription( - @NonNull Subscriber mainSubscriber, - @NonNull Subscriber columnDefinitionsSubscriber, - @NonNull Subscriber executionInfosSubscriber, - @NonNull Subscriber wasAppliedSubscriber) { - this.mainSubscriber = mainSubscriber; - this.columnDefinitionsSubscriber = columnDefinitionsSubscriber; - this.executionInfosSubscriber = executionInfosSubscriber; - this.wasAppliedSubscriber = wasAppliedSubscriber; - } - - /** - * Starts the query execution. - * - *

Must be called immediately after creating the subscription, but after {@link - * Subscriber#onSubscribe(Subscription)}. - * - * @param firstPage The future that, when complete, will produce the first page. - */ - void start(@NonNull Callable> firstPage) { - firstSubscriberRequestArrived.thenAccept( - (aVoid) -> fetchNextPageAndEnqueue(new Page<>(firstPage), true)); - } - - @Override - public void request(long n) { - // As per 3.6: after the Subscription is cancelled, additional - // calls to request() MUST be NOPs. - if (!cancelled) { - if (n < 1) { - // Validate request as per rule 3.9 - doOnError( - new IllegalArgumentException( - mainSubscriber - + " violated the Reactive Streams rule 3.9 by requesting a non-positive number of elements.")); - } else { - // As per rule 3.17, when demand overflows Long.MAX_VALUE - // it can be treated as "effectively unbounded" - ReactiveOperators.addCap(requested, n); - // Set the first future to true if not done yet. - // This will make the first page of results ready for consumption, - // see start(). - // As per 2.7 it is the subscriber's responsibility to provide - // external synchronization when calling request(), - // so the check-then-act idiom below is good enough - // (and besides, complete() is idempotent). - if (!firstSubscriberRequestArrived.isDone()) { - firstSubscriberRequestArrived.complete(null); - } - drain(); - } - } - } - - @Override - public void cancel() { - // As per 3.5: Subscription.cancel() MUST respect the responsiveness of - // its caller by returning in a timely manner, MUST be idempotent and - // MUST be thread-safe. - if (!cancelled) { - cancelled = true; - if (draining.getAndIncrement() == 0) { - // If nobody is draining, clear now; - // otherwise, the draining thread will notice - // that the cancelled flag was set - // and will clear for us. - clear(); - } - } - } - - /** - * Attempts to drain available items, i.e. emit them to the subscriber. - * - *

Access to this method is serialized by the field {@link #draining}: only one thread at a - * time can drain, but threads that attempt to drain while other thread is already draining - * increment that field; the draining thread, before finishing its work, checks for such failed - * attempts and triggers another round of draining if that was the case. - * - *

The loop is interrupted when 1) the requested amount has been met or 2) when there are no - * more items readily available or 3) the subscription has been cancelled. - * - *

The loop also checks for stream exhaustion and emits a terminal {@code onComplete} signal in - * this case. - * - *

This method may run on a driver IO thread when invoked from {@link - * #fetchNextPageAndEnqueue(Page, boolean)}, or on a subscriber thread, when invoked from {@link - * #request(long)}. - */ - @SuppressWarnings("ConditionalBreakInInfiniteLoop") - private void drain() { - // As per 3.4: this method SHOULD respect the responsiveness - // of its caller by returning in a timely manner. - // We accomplish this by a wait-free implementation. - if (draining.getAndIncrement() != 0) { - // Someone else is already draining, so do nothing, - // the other thread will notice that we attempted to drain. - // This also allows to abide by rule 3.3 and avoid - // cycles such as request() -> onNext() -> request() etc. - return; - } - int missed = 1; - // Note: when termination is detected inside this loop, - // we MUST call clear() manually. - for (; ; ) { - // The requested number of items at this point - long r = requested.get(); - // The number of items emitted thus far - long emitted = 0L; - while (emitted != r) { - if (cancelled) { - clear(); - return; - } - Object result; - try { - result = tryNext(); - } catch (Throwable t) { - doOnError(t); - clear(); - return; - } - if (result == null) { - break; - } - if (result instanceof Throwable) { - doOnError((Throwable) result); - clear(); - return; - } - doOnNext((ReactiveRow) result); - emitted++; - } - if (isExhausted()) { - doOnComplete(); - clear(); - return; - } - if (cancelled) { - clear(); - return; - } - if (emitted != 0) { - // if any item was emitted, adjust the requested field - ReactiveOperators.subCap(requested, emitted); - } - // if another thread tried to call drain() while we were busy, - // then we should do another drain round. - missed = draining.addAndGet(-missed); - if (missed == 0) { - break; - } - } - } - - /** - * Tries to return the next item, if one is readily available, and returns {@code null} otherwise. - * - *

Cannot run concurrently due to the {@link #draining} field. - */ - @Nullable - private Object tryNext() { - Page current = pages.peek(); - if (current != null) { - if (current.hasMoreRows()) { - return current.nextRow(); - } else if (current.hasMorePages()) { - // Discard current page as it is consumed. - // Don't discard the last page though as we need it - // to test isExhausted(). It will be GC'ed when a terminal signal - // is issued anyway, so that's no big deal. - if (pages.poll() == null) { - throw new AssertionError("Queue is empty, this should not happen"); - } - // if the next page is readily available, - // serve its first row now, no need to wait - // for the next drain. - return tryNext(); - } - } - // No items available right now. - return null; - } - - /** - * Returns {@code true} when the entire stream has been consumed and no more items can be emitted. - * When that is the case, a terminal signal is sent. - * - *

Cannot run concurrently due to the draining field. - */ - private boolean isExhausted() { - Page current = pages.peek(); - // Note: current can only be null when: - // 1) we are waiting for the first page and it hasn't arrived yet; - // 2) we just discarded the current page, but the next page hasn't arrived yet. - // In any case, a null here means it is not the last page, since the last page - // stays in the queue until the very end of the operation. - return current != null && !current.hasMoreRows() && !current.hasMorePages(); - } - - /** - * Runs on a subscriber thread initially, see {@link #start(Callable)}. Subsequent executions run - * on the thread that completes the pair of futures [current.fetchNextPage, pages.offer] and - * enqueues. This can be a driver IO thread or a subscriber thread; in both cases, cannot run - * concurrently due to the fact that one can only fetch the next page when the current one is - * arrived and enqueued. - */ - private void fetchNextPageAndEnqueue(@NonNull Page current, boolean firstPage) { - current - .fetchNextPage() - // as soon as the response arrives, - // create the new page - .handle( - (rs, t) -> { - Page page; - if (t == null) { - page = toPage(rs); - executionInfosSubscriber.onNext(rs.getExecutionInfo()); - if (!page.hasMorePages()) { - executionInfosSubscriber.onComplete(); - } - if (firstPage) { - columnDefinitionsSubscriber.onNext(rs.getColumnDefinitions()); - columnDefinitionsSubscriber.onComplete(); - // Avoid calling wasApplied on empty pages as some implementations may throw - // IllegalStateException; if the page is empty, this wasn't a CAS query, in which - // case, as per the method's contract, wasApplied should be true. - boolean wasApplied = rs.remaining() == 0 || rs.wasApplied(); - wasAppliedSubscriber.onNext(wasApplied); - wasAppliedSubscriber.onComplete(); - } - } else { - // Unwrap CompletionExceptions created by combined futures - if (t instanceof CompletionException) { - t = t.getCause(); - } - page = toErrorPage(t); - executionInfosSubscriber.onError(t); - if (firstPage) { - columnDefinitionsSubscriber.onError(t); - wasAppliedSubscriber.onError(t); - } - } - return page; - }) - .thenCompose(pages::offer) - .thenAccept( - page -> { - if (page.hasMorePages() && !cancelled) { - // preemptively fetch the next page, if available - fetchNextPageAndEnqueue(page, false); - } - drain(); - }); - } - - private void doOnNext(@NonNull ReactiveRow result) { - try { - mainSubscriber.onNext(result); - } catch (Throwable t) { - LOG.error( - mainSubscriber - + " violated the Reactive Streams rule 2.13 by throwing an exception from onNext.", - t); - cancel(); - } - } - - private void doOnComplete() { - try { - // Then we signal onComplete as per rules 1.2 and 1.5 - mainSubscriber.onComplete(); - } catch (Throwable t) { - LOG.error( - mainSubscriber - + " violated the Reactive Streams rule 2.13 by throwing an exception from onComplete.", - t); - } - // We need to consider this Subscription as cancelled as per rule 1.6 - cancel(); - } - - // package-private because it can be invoked by the publisher if the subscription handshake - // process fails. - void doOnError(@NonNull Throwable error) { - try { - // Then we signal the error downstream, as per rules 1.2 and 1.4. - mainSubscriber.onError(error); - } catch (Throwable t) { - t.addSuppressed(error); - LOG.error( - mainSubscriber - + " violated the Reactive Streams rule 2.13 by throwing an exception from onError.", - t); - } - // We need to consider this Subscription as cancelled as per rule 1.6 - cancel(); - } - - private void clear() { - // We don't need these pages anymore and should not hold references - // to them. - pages.clear(); - // As per 3.13, Subscription.cancel() MUST request the Publisher to - // eventually drop any references to the corresponding subscriber. - // Our own publishers do not keep references to this subscription, - // but downstream processors might do so, which is why we need to - // defensively clear the subscriber reference when we are done. - mainSubscriber = null; - columnDefinitionsSubscriber = null; - executionInfosSubscriber = null; - wasAppliedSubscriber = null; - } - - /** - * Converts the received result object into a {@link Page}. - * - * @param rs the result object to convert. - * @return a new page. - */ - @NonNull - private Page toPage(@NonNull ResultSetT rs) { - ExecutionInfo executionInfo = rs.getExecutionInfo(); - Iterator results = - Iterators.transform( - rs.currentPage().iterator(), - row -> new DefaultReactiveRow(Objects.requireNonNull(row), executionInfo)); - return new Page<>(results, rs.hasMorePages() ? rs::fetchNextPage : null); - } - - /** Converts the given error into a {@link Page}, containing the error as its only element. */ - @NonNull - private Page toErrorPage(@NonNull Throwable t) { - return new Page<>(Iterators.singletonIterator(t), null); - } - - /** - * A page object comprises an iterator over the page's results, and a future pointing to the next - * page (or {@code null}, if it's the last page). - */ - static class Page> { - - @NonNull final Iterator iterator; - - // A pointer to the next page, or null if this is the last page. - @Nullable final Callable> nextPage; - - /** called only from start() */ - Page(@NonNull Callable> nextPage) { - this.iterator = Collections.emptyIterator(); - this.nextPage = nextPage; - } - - Page(@NonNull Iterator iterator, @Nullable Callable> nextPage) { - this.iterator = iterator; - this.nextPage = nextPage; - } - - boolean hasMorePages() { - return nextPage != null; - } - - @NonNull - CompletionStage fetchNextPage() { - try { - return Objects.requireNonNull(nextPage).call(); - } catch (Exception e) { - // This is a synchronous failure in the driver. - // It can happen in rare cases when the driver throws an exception instead of returning a - // failed future; e.g. if someone tries to execute a continuous paging request but the - // protocol version in use does not support it. - // We treat it as a failed future. - return CompletableFutures.failedFuture(e); - } - } - - boolean hasMoreRows() { - return iterator.hasNext(); - } - - @NonNull - Object nextRow() { - return iterator.next(); - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/SimpleUnicastProcessor.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/SimpleUnicastProcessor.java deleted file mode 100644 index 845cbe2349b..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/SimpleUnicastProcessor.java +++ /dev/null @@ -1,265 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.reactive; - -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Objects; -import java.util.Queue; -import java.util.concurrent.ConcurrentLinkedDeque; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; -import org.reactivestreams.Processor; -import org.reactivestreams.Subscriber; -import org.reactivestreams.Subscription; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A simple {@link Processor} that receives items form an upstream publisher, stores them in an - * internal queue, then serves them to one single downstream subscriber. It does not support - * multiple subscriptions. - * - *

Implementation note: this class is intended to serve as the common implementation for all - * secondary publishers exposed by the driver's reactive API, and in particular, for publishers of - * query metadata objects. Since such publishers are not critical, and usually only publish a - * handful of items, this implementation favors simplicity over efficiency (in particular, it uses - * an unbounded linked queue, but in practice there is no risk that this queue could grow - * uncontrollably). - * - * @param The type of elements received and emitted by this processor. - */ -public class SimpleUnicastProcessor - implements Processor, Subscription { - - private static final Logger LOG = LoggerFactory.getLogger(SimpleUnicastProcessor.class); - - private static final Object ON_COMPLETE = new Object(); - - private final Queue queue = new ConcurrentLinkedDeque<>(); - - private final AtomicBoolean once = new AtomicBoolean(false); - - private final AtomicInteger draining = new AtomicInteger(0); - - private final AtomicLong requested = new AtomicLong(0); - - private volatile Subscriber subscriber; - - private volatile boolean cancelled; - - @Override - public void subscribe(Subscriber subscriber) { - // As per rule 1.9, we need to throw an NPE if subscriber is null - Objects.requireNonNull(subscriber, "Subscriber cannot be null"); - // As per rule 1.11, this publisher supports only one subscriber. - if (once.compareAndSet(false, true)) { - this.subscriber = subscriber; - try { - subscriber.onSubscribe(this); - } catch (Throwable t) { - // As per rule 2.13: In the case that this rule is violated, - // any associated Subscription to the Subscriber MUST be considered as - // cancelled, and the caller MUST raise this error condition in a fashion - // that is adequate for the runtime environment. - doOnError( - new IllegalStateException( - subscriber - + " violated the Reactive Streams rule 2.13 by throwing an exception from onSubscribe.", - t)); - } - } else { - subscriber.onSubscribe(EmptySubscription.INSTANCE); - subscriber.onError( - new IllegalStateException("This publisher does not support multiple subscriptions")); - } - // As per 2.13, this method must return normally (i.e. not throw) - } - - @Override - public void onSubscribe(Subscription s) { - // no-op - } - - @Override - public void onNext(ElementT value) { - if (!cancelled) { - queue.offer(value); - drain(); - } - } - - @Override - public void onError(Throwable error) { - if (!cancelled) { - queue.offer(error); - drain(); - } - } - - @Override - public void onComplete() { - if (!cancelled) { - queue.offer(ON_COMPLETE); - drain(); - } - } - - @Override - public void request(long n) { - // As per 3.6: after the Subscription is cancelled, additional - // calls to request() MUST be NOPs. - if (!cancelled) { - if (n < 1) { - // Validate request as per rule 3.9 - doOnError( - new IllegalArgumentException( - subscriber - + " violated the Reactive Streams rule 3.9 by requesting a non-positive number of elements.")); - } else { - // As per rule 3.17, when demand overflows Long.MAX_VALUE - // it can be treated as "effectively unbounded" - ReactiveOperators.addCap(requested, n); - drain(); - } - } - } - - @Override - public void cancel() { - // As per 3.5: Subscription.cancel() MUST respect the responsiveness of - // its caller by returning in a timely manner, MUST be idempotent and - // MUST be thread-safe. - if (!cancelled) { - cancelled = true; - if (draining.getAndIncrement() == 0) { - // If nobody is draining, clear now; - // otherwise, the draining thread will notice - // that the cancelled flag was set - // and will clear for us. - clear(); - } - } - } - - @SuppressWarnings("ConditionalBreakInInfiniteLoop") - private void drain() { - if (draining.getAndIncrement() != 0) { - return; - } - int missed = 1; - for (; ; ) { - // Note: when termination is detected inside this loop, - // we MUST call clear() manually. - long requested = this.requested.get(); - long emitted = 0L; - while (requested != emitted) { - if (cancelled) { - clear(); - return; - } - Object t = queue.poll(); - if (t == null) { - break; - } - if (t instanceof Throwable) { - Throwable error = (Throwable) t; - doOnError(error); - clear(); - return; - } else if (t == ON_COMPLETE) { - doOnComplete(); - clear(); - return; - } else { - @SuppressWarnings("unchecked") - ElementT item = (ElementT) t; - doOnNext(item); - emitted++; - } - } - if (cancelled) { - clear(); - return; - } - if (emitted != 0) { - // if any item was emitted, adjust the requested field - ReactiveOperators.subCap(this.requested, emitted); - } - // if another thread tried to call drain() while we were busy, - // then we should do another drain round. - missed = draining.addAndGet(-missed); - if (missed == 0) { - break; - } - } - } - - private void doOnNext(@NonNull ElementT result) { - try { - subscriber.onNext(result); - } catch (Throwable t) { - LOG.error( - subscriber - + " violated the Reactive Streams rule 2.13 by throwing an exception from onNext.", - t); - cancel(); - } - } - - private void doOnComplete() { - try { - // Then we signal onComplete as per rules 1.2 and 1.5 - subscriber.onComplete(); - } catch (Throwable t) { - LOG.error( - subscriber - + " violated the Reactive Streams rule 2.13 by throwing an exception from onComplete.", - t); - } - // We need to consider this Subscription as cancelled as per rule 1.6 - cancel(); - } - - private void doOnError(@NonNull Throwable error) { - try { - // Then we signal the error downstream, as per rules 1.2 and 1.4. - subscriber.onError(error); - } catch (Throwable t) { - t.addSuppressed(error); - LOG.error( - subscriber - + " violated the Reactive Streams rule 2.13 by throwing an exception from onError.", - t); - } - // We need to consider this Subscription as cancelled as per rule 1.6 - cancel(); - } - - private void clear() { - // We don't need the elements anymore and should not hold references - // to them. - queue.clear(); - // As per 3.13, Subscription.cancel() MUST request the Publisher to - // eventually drop any references to the corresponding subscriber. - // Our own publishers do not keep references to this subscription, - // but downstream processors might do so, which is why we need to - // defensively clear the subscriber reference when we are done. - subscriber = null; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultGeometry.java b/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultGeometry.java deleted file mode 100644 index 885d9bd48b7..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultGeometry.java +++ /dev/null @@ -1,202 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.data.geometry; - -import com.datastax.dse.driver.api.core.data.geometry.Geometry; -import com.datastax.dse.driver.api.core.data.geometry.Point; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.esri.core.geometry.GeometryException; -import com.esri.core.geometry.SpatialReference; -import com.esri.core.geometry.ogc.OGCGeometry; -import com.esri.core.geometry.ogc.OGCLineString; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.InvalidObjectException; -import java.io.ObjectInputStream; -import java.io.Serializable; -import java.nio.ByteBuffer; -import net.jcip.annotations.Immutable; - -@Immutable -public abstract class DefaultGeometry implements Geometry, Serializable { - - private static final long serialVersionUID = 1L; - - /** - * Default spatial reference for Well Known Text / Well Known Binary. - * - *

4326 is the EPSG identifier of the World Geodetic System (WGS) in - * its later revision, WGS 84. - */ - public static final SpatialReference SPATIAL_REFERENCE_4326 = SpatialReference.create(4326); - - @NonNull - public static T fromOgcWellKnownText( - @NonNull String source, @NonNull Class klass) { - OGCGeometry geometry; - try { - geometry = OGCGeometry.fromText(source); - } catch (IllegalArgumentException e) { - throw new IllegalArgumentException(e.getMessage()); - } - validateType(geometry, klass); - return klass.cast(geometry); - } - - @NonNull - public static T fromOgcWellKnownBinary( - @NonNull ByteBuffer source, @NonNull Class klass) { - OGCGeometry geometry; - try { - geometry = OGCGeometry.fromBinary(source); - } catch (IllegalArgumentException e) { - throw new IllegalArgumentException(e.getMessage()); - } - validateType(geometry, klass); - return klass.cast(geometry); - } - - @NonNull - public static T fromOgcGeoJson( - @NonNull String source, @NonNull Class klass) { - OGCGeometry geometry; - try { - geometry = OGCGeometry.fromGeoJson(source); - } catch (Exception e) { - throw new IllegalArgumentException(e.getMessage()); - } - validateType(geometry, klass); - return klass.cast(geometry); - } - - private static void validateType(OGCGeometry geometry, Class klass) { - if (!geometry.getClass().equals(klass)) { - throw new IllegalArgumentException( - String.format( - "%s is not of type %s", geometry.getClass().getSimpleName(), klass.getSimpleName())); - } - } - - private final OGCGeometry ogcGeometry; - - protected DefaultGeometry(@NonNull OGCGeometry ogcGeometry) { - this.ogcGeometry = ogcGeometry; - Preconditions.checkNotNull(ogcGeometry); - validateOgcGeometry(ogcGeometry); - } - - private static void validateOgcGeometry(OGCGeometry geometry) { - try { - if (geometry.is3D()) { - throw new IllegalArgumentException(String.format("'%s' is not 2D", geometry.asText())); - } - if (!geometry.isSimple()) { - throw new IllegalArgumentException( - String.format( - "'%s' is not simple. Points and edges cannot self-intersect.", geometry.asText())); - } - } catch (GeometryException e) { - throw new IllegalArgumentException("Invalid geometry" + e.getMessage()); - } - } - - @NonNull - public static ImmutableList getPoints(@NonNull OGCLineString lineString) { - ImmutableList.Builder builder = ImmutableList.builder(); - for (int i = 0; i < lineString.numPoints(); i++) { - builder.add(new DefaultPoint(lineString.pointN(i))); - } - return builder.build(); - } - - protected static com.esri.core.geometry.Point toEsri(Point p) { - return new com.esri.core.geometry.Point(p.X(), p.Y()); - } - - @NonNull - public OGCGeometry getOgcGeometry() { - return ogcGeometry; - } - - @NonNull - public com.esri.core.geometry.Geometry getEsriGeometry() { - return ogcGeometry.getEsriGeometry(); - } - - @NonNull - @Override - public String asWellKnownText() { - return ogcGeometry.asText(); - } - - @NonNull - @Override - public ByteBuffer asWellKnownBinary() { - return WkbUtil.asLittleEndianBinary(ogcGeometry); - } - - @NonNull - @Override - public String asGeoJson() { - return ogcGeometry.asGeoJson(); - } - - @Override - public boolean contains(@NonNull Geometry other) { - Preconditions.checkNotNull(other); - if (other instanceof DefaultGeometry) { - DefaultGeometry defautlOther = (DefaultGeometry) other; - return getOgcGeometry().contains(defautlOther.getOgcGeometry()); - } - return false; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof DefaultGeometry)) { - return false; - } - DefaultGeometry that = (DefaultGeometry) o; - return this.getOgcGeometry().equals(that.getOgcGeometry()); - } - - @Override - public int hashCode() { - // OGCGeometry subclasses do not overwrite Object.hashCode() - // while com.esri.core.geometry.Geometry subclasses usually do, - // so use these instead; this is consistent with equals - // because OGCGeometry.equals() actually compare between - // com.esri.core.geometry.Geometry objects - return getEsriGeometry().hashCode(); - } - - // Should never be called since we serialize a proxy (see subclasses) - @SuppressWarnings("UnusedVariable") - private void readObject(ObjectInputStream stream) throws InvalidObjectException { - throw new InvalidObjectException("Proxy required"); - } - - @Override - public String toString() { - return asWellKnownText(); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultLineString.java b/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultLineString.java deleted file mode 100644 index 1cf64bb366d..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultLineString.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.data.geometry; - -import com.datastax.dse.driver.api.core.data.geometry.LineString; -import com.datastax.dse.driver.api.core.data.geometry.Point; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.esri.core.geometry.Polyline; -import com.esri.core.geometry.ogc.OGCLineString; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.List; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultLineString extends DefaultGeometry implements LineString { - - private static final long serialVersionUID = 1280189361978382248L; - - private static OGCLineString fromPoints(Point p1, Point p2, Point... pn) { - Polyline polyline = new Polyline(toEsri(p1), toEsri(p2)); - for (Point p : pn) { - polyline.lineTo(toEsri(p)); - } - return new OGCLineString(polyline, 0, DefaultGeometry.SPATIAL_REFERENCE_4326); - } - - private final List points; - - public DefaultLineString(@NonNull Point p1, @NonNull Point p2, @NonNull Point... pn) { - super(fromPoints(p1, p2, pn)); - this.points = ImmutableList.builder().add(p1).add(p2).add(pn).build(); - } - - public DefaultLineString(@NonNull OGCLineString lineString) { - super(lineString); - this.points = getPoints(lineString); - } - - @NonNull - @Override - public List getPoints() { - return points; - } - - /** - * This object gets replaced by an internal proxy for serialization. - * - * @serialData a single byte array containing the Well-Known Binary representation. - */ - private Object writeReplace() { - return new WkbSerializationProxy(this.asWellKnownBinary()); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultPoint.java b/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultPoint.java deleted file mode 100644 index c9540b10d8a..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultPoint.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.data.geometry; - -import com.datastax.dse.driver.api.core.data.geometry.Point; -import com.esri.core.geometry.ogc.OGCPoint; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultPoint extends DefaultGeometry implements Point { - - private static final long serialVersionUID = -8337622213980781285L; - - public DefaultPoint(double x, double y) { - this( - new OGCPoint( - new com.esri.core.geometry.Point(x, y), DefaultGeometry.SPATIAL_REFERENCE_4326)); - } - - public DefaultPoint(@NonNull OGCPoint point) { - super(point); - } - - @NonNull - @Override - public OGCPoint getOgcGeometry() { - return (OGCPoint) super.getOgcGeometry(); - } - - @Override - public double X() { - return getOgcGeometry().X(); - } - - @Override - public double Y() { - return getOgcGeometry().Y(); - } - - /** - * This object gets replaced by an internal proxy for serialization. - * - * @serialData a single byte array containing the Well-Known Binary representation. - */ - private Object writeReplace() { - return new WkbSerializationProxy(this.asWellKnownBinary()); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultPolygon.java b/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultPolygon.java deleted file mode 100644 index 27d375d42b1..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultPolygon.java +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.data.geometry; - -import com.datastax.dse.driver.api.core.data.geometry.Point; -import com.datastax.dse.driver.api.core.data.geometry.Polygon; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.esri.core.geometry.Operator; -import com.esri.core.geometry.OperatorFactoryLocal; -import com.esri.core.geometry.OperatorSimplifyOGC; -import com.esri.core.geometry.ogc.OGCPolygon; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Collections; -import java.util.List; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultPolygon extends DefaultGeometry implements Polygon { - - private static final long serialVersionUID = 3694196802962890314L; - - private final List exteriorRing; - private final List> interiorRings; - - public DefaultPolygon( - @NonNull Point p1, @NonNull Point p2, @NonNull Point p3, @NonNull Point... pn) { - super(fromPoints(p1, p2, p3, pn)); - this.exteriorRing = ImmutableList.builder().add(p1).add(p2).add(p3).add(pn).build(); - this.interiorRings = Collections.emptyList(); - } - - public DefaultPolygon(@NonNull OGCPolygon polygon) { - super(polygon); - if (polygon.isEmpty()) { - this.exteriorRing = ImmutableList.of(); - } else { - this.exteriorRing = getPoints(polygon.exteriorRing()); - } - - ImmutableList.Builder> builder = ImmutableList.builder(); - for (int i = 0; i < polygon.numInteriorRing(); i++) { - builder.add(getPoints(polygon.interiorRingN(i))); - } - this.interiorRings = builder.build(); - } - - @NonNull - @Override - public List getExteriorRing() { - return exteriorRing; - } - - @NonNull - @Override - public List> getInteriorRings() { - return interiorRings; - } - - private static OGCPolygon fromPoints(Point p1, Point p2, Point p3, Point... pn) { - com.esri.core.geometry.Polygon polygon = new com.esri.core.geometry.Polygon(); - addPath(polygon, p1, p2, p3, pn); - return new OGCPolygon(simplify(polygon), DefaultGeometry.SPATIAL_REFERENCE_4326); - } - - private static void addPath( - com.esri.core.geometry.Polygon polygon, Point p1, Point p2, Point p3, Point[] pn) { - - polygon.startPath(toEsri(p1)); - polygon.lineTo(toEsri(p2)); - polygon.lineTo(toEsri(p3)); - for (Point p : pn) { - polygon.lineTo(toEsri(p)); - } - } - - private static com.esri.core.geometry.Polygon simplify(com.esri.core.geometry.Polygon polygon) { - OperatorSimplifyOGC op = - (OperatorSimplifyOGC) - OperatorFactoryLocal.getInstance().getOperator(Operator.Type.SimplifyOGC); - return (com.esri.core.geometry.Polygon) - op.execute(polygon, DefaultGeometry.SPATIAL_REFERENCE_4326, true, null); - } - - /** - * This object gets replaced by an internal proxy for serialization. - * - * @serialData a single byte array containing the Well-Known Binary representation. - */ - private Object writeReplace() { - return new WkbSerializationProxy(this.asWellKnownBinary()); - } - - public static class Builder implements Polygon.Builder { - private final com.esri.core.geometry.Polygon polygon = new com.esri.core.geometry.Polygon(); - - @NonNull - @Override - public Builder addRing( - @NonNull Point p1, @NonNull Point p2, @NonNull Point p3, @NonNull Point... pn) { - addPath(polygon, p1, p2, p3, pn); - return this; - } - - /** - * Builds the polygon. - * - * @return the polygon. - */ - @NonNull - @Override - public Polygon build() { - return new DefaultPolygon( - new OGCPolygon(simplify(polygon), DefaultGeometry.SPATIAL_REFERENCE_4326)); - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/Distance.java b/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/Distance.java deleted file mode 100644 index 518f6aa1346..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/Distance.java +++ /dev/null @@ -1,237 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.data.geometry; - -import static java.util.regex.Pattern.CASE_INSENSITIVE; - -import com.datastax.dse.driver.api.core.data.geometry.Geometry; -import com.datastax.dse.driver.api.core.data.geometry.LineString; -import com.datastax.dse.driver.api.core.data.geometry.Point; -import com.datastax.dse.driver.api.core.data.geometry.Polygon; -import com.datastax.dse.driver.api.core.graph.predicates.Geo; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.esri.core.geometry.MultiPath; -import com.esri.core.geometry.ogc.OGCGeometry; -import com.esri.core.geometry.ogc.OGCPoint; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.util.Objects; -import java.util.regex.Matcher; -import java.util.regex.Pattern; -import net.jcip.annotations.Immutable; - -/** - * The driver-side representation of DSE's {@code Geo.distance}. - * - *

This is a circle in a two-dimensional XY plane represented by its center point and radius. It - * is used as a search criteria to determine whether or not another geospatial object lies within a - * circular area. - * - *

Note that this shape has no equivalent in the OGC and GeoJSON standards: as a consequence, - * {@link #asWellKnownText()} returns a custom format, and {@link #getOgcGeometry()}, {@link - * #asWellKnownBinary()}, and {@link #asGeoJson()} throw {@link UnsupportedOperationException}. - * - *

Unlike other geo types, this class is never exposed directly to driver clients: it is used - * internally by {@linkplain Geo#inside(Point, double) geo predicates}, but cannot be a column type, - * nor appear in CQL or graph results. Therefore it doesn't have a public-facing interface, nor a - * built-in codec. - */ -@Immutable -public class Distance extends DefaultGeometry { - - private static final Pattern WKT_PATTERN = - Pattern.compile( - "distance *\\( *\\( *([\\d\\.-]+) *([\\d+\\.-]+) *\\) *([\\d+\\.-]+) *\\)", - CASE_INSENSITIVE); - - /** - * Creates a distance from its Well-known - * Text (WKT) representation. - * - * @param source the Well-known Text representation to parse. - * @return the point represented by the WKT. - * @throws IllegalArgumentException if the string does not contain a valid Well-known Text - * representation. - * @see Distance#asWellKnownText() - */ - @NonNull - public static Distance fromWellKnownText(@NonNull String source) { - Matcher matcher = WKT_PATTERN.matcher(source.trim()); - if (matcher.matches() && matcher.groupCount() == 3) { - try { - return new Distance( - new DefaultPoint( - Double.parseDouble(matcher.group(1)), Double.parseDouble(matcher.group(2))), - Double.parseDouble(matcher.group(3))); - } catch (NumberFormatException var3) { - throw new IllegalArgumentException(String.format("Unable to parse %s", source)); - } - } else { - throw new IllegalArgumentException(String.format("Unable to parse %s", source)); - } - } - - private final DefaultPoint center; - - private final double radius; - - /** - * Creates a new distance with the given center and radius. - * - * @param center The center point. - * @param radius The radius of the circle representing distance. - */ - public Distance(@NonNull Point center, double radius) { - super(((DefaultPoint) center).getOgcGeometry()); - Preconditions.checkNotNull(center); - Preconditions.checkArgument(radius >= 0.0D, "Radius must be >= 0 (got %s)", radius); - this.center = ((DefaultPoint) center); - this.radius = radius; - } - - /** @return The center point of the circle representing this distance. */ - @NonNull - public Point getCenter() { - return center; - } - - /** @return The radius of the circle representing this distance. */ - public double getRadius() { - return radius; - } - - /** - * Returns a Well-known Text (WKT) - * representation of this geospatial type. - * - *

Since there is no Well-known Text specification for Distance, this returns a custom format - * of: DISTANCE((center.x center.y) radius) - * - * @return a Well-known Text representation of this object. - */ - @NonNull - @Override - public String asWellKnownText() { - return String.format("DISTANCE((%s %s) %s)", this.center.X(), this.center.Y(), this.radius); - } - - /** - * The distance type has no equivalent in the OGC standard: this method throws an {@link - * UnsupportedOperationException}. - */ - @NonNull - @Override - public OGCGeometry getOgcGeometry() { - throw new UnsupportedOperationException(); - } - - /** - * The distance type has no equivalent in the OGC standard: this method throws an {@link - * UnsupportedOperationException}. - */ - @NonNull - @Override - public ByteBuffer asWellKnownBinary() { - throw new UnsupportedOperationException(); - } - - /** - * The distance type has no equivalent in the GeoJSON standard: this method throws an {@link - * UnsupportedOperationException}. - */ - @Override - @NonNull - public String asGeoJson() { - throw new UnsupportedOperationException(); - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof Distance) { - Distance that = (Distance) other; - return Objects.equals(this.center, that.center) && this.radius == that.radius; - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(center, radius); - } - - @SuppressWarnings("SimplifiableConditionalExpression") - @Override - public boolean contains(@NonNull Geometry geometry) { - return geometry instanceof Distance - ? this.containsDistance((Distance) geometry) - : geometry instanceof Point - ? this.containsPoint((Point) geometry) - : geometry instanceof LineString - ? this.containsLineString((LineString) geometry) - : geometry instanceof Polygon ? this.containsPolygon((Polygon) geometry) : false; - } - - private boolean containsDistance(Distance distance) { - return this.center.getOgcGeometry().distance(distance.center.getOgcGeometry()) + distance.radius - <= this.radius; - } - - private boolean containsPoint(Point point) { - return this.containsOGCPoint(((DefaultPoint) point).getOgcGeometry()); - } - - private boolean containsLineString(LineString lineString) { - MultiPath multiPath = - (MultiPath) ((DefaultLineString) lineString).getOgcGeometry().getEsriGeometry(); - return containsMultiPath(multiPath); - } - - private boolean containsPolygon(Polygon polygon) { - MultiPath multiPath = - (com.esri.core.geometry.Polygon) - ((DefaultPolygon) polygon).getOgcGeometry().getEsriGeometry(); - return containsMultiPath(multiPath); - } - - private boolean containsMultiPath(MultiPath multiPath) { - int numPoints = multiPath.getPointCount(); - for (int i = 0; i < numPoints; ++i) { - OGCPoint point = new OGCPoint(multiPath.getPoint(i), DefaultGeometry.SPATIAL_REFERENCE_4326); - if (!this.containsOGCPoint(point)) { - return false; - } - } - return true; - } - - private boolean containsOGCPoint(OGCPoint point) { - return this.center.getOgcGeometry().distance(point) <= this.radius; - } - - /** - * This object gets replaced by an internal proxy for serialization. - * - * @serialData Point (wkb) for center followed by double for radius - */ - private Object writeReplace() { - return new DistanceSerializationProxy(this); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/DistanceSerializationProxy.java b/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/DistanceSerializationProxy.java deleted file mode 100644 index 515af121980..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/DistanceSerializationProxy.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.data.geometry; - -import com.datastax.dse.driver.api.core.data.geometry.Point; -import java.io.Serializable; - -/** - * A thin wrapper around {@link Distance}, that gets substituted during the serialization / - * deserialization process. This allows {@link Distance} to be immutable and reference centers' OGC - * counterpart. - */ -public class DistanceSerializationProxy implements Serializable { - - private static final long serialVersionUID = 1L; - - private final Point center; - private final double radius; - - public DistanceSerializationProxy(Distance distance) { - this.center = distance.getCenter(); - this.radius = distance.getRadius(); - } - - private Object readResolve() { - return new Distance(center, radius); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/WkbSerializationProxy.java b/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/WkbSerializationProxy.java deleted file mode 100644 index 92c0f6de2d5..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/WkbSerializationProxy.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.data.geometry; - -import com.datastax.dse.driver.api.core.data.geometry.LineString; -import com.datastax.dse.driver.api.core.data.geometry.Point; -import com.datastax.dse.driver.api.core.data.geometry.Polygon; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.io.Serializable; -import java.nio.ByteBuffer; -import java.nio.ByteOrder; -import net.jcip.annotations.Immutable; - -/** - * A thin wrapper around a Well-Known Binary byte sequence, that gets substituted for {@link - * DefaultGeometry} instances during the serialization / deserialization process. This allows - * immutable geometry classes. - */ -@Immutable -class WkbSerializationProxy implements Serializable { - - private static final long serialVersionUID = 1L; - - private final byte[] wkb; - - WkbSerializationProxy(ByteBuffer wkb) { - this.wkb = Bytes.getArray(wkb); - } - - private Object readResolve() { - ByteBuffer buffer = ByteBuffer.wrap(wkb).order(ByteOrder.nativeOrder()); - int type = buffer.getInt(1); - - if (type == 1) { - return Point.fromWellKnownBinary(buffer); - } else if (type == 2) { - return LineString.fromWellKnownBinary(buffer); - } else if (type == 3) { - return Polygon.fromWellKnownBinary(buffer); - } else { - throw new IllegalArgumentException( - "Unknown geospatial type code in serialized form: " + type); - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/WkbUtil.java b/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/WkbUtil.java deleted file mode 100644 index 3f18b32fda2..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/WkbUtil.java +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.data.geometry; - -import com.esri.core.geometry.Geometry; -import com.esri.core.geometry.Operator; -import com.esri.core.geometry.OperatorExportToWkb; -import com.esri.core.geometry.OperatorFactoryLocal; -import com.esri.core.geometry.WkbExportFlags; -import com.esri.core.geometry.ogc.OGCGeometry; -import com.esri.core.geometry.ogc.OGCLineString; -import com.esri.core.geometry.ogc.OGCPoint; -import com.esri.core.geometry.ogc.OGCPolygon; -import java.lang.reflect.Method; -import java.nio.ByteBuffer; -import java.nio.ByteOrder; - -/** - * Helper class to serialize OGC geometries to Well-Known Binary, forcing the byte order to little - * endian. - * - *

WKB encodes the byte order, so in theory we could send the buffer in any order, even if it is - * different from the server. However DSE server performs an additional validation step server-side: - * it deserializes to Java, serializes back to WKB, and then compares the original buffer to the - * "re-serialized" one. If they don't match, a MarshalException is thrown. So with a client in - * big-endian and a server in little-endian, we would get: - * - *

- * incoming buffer (big endian) --> Java --> reserialized buffer (little endian)
- * 
- * - * Since the two buffers have a different endian-ness, they don't match. - * - *

The ESRI library defaults to the native byte order and doesn't let us change it. Therefore: - * - *

    - *
  • if the native order is little endian (vast majority of cases), this class simply delegates - * to the appropriate public API method; - *
  • if the native order is big endian, it re-implements the serialization code, using - * reflection to get access to a private method. If reflection fails for any reason (updated - * ESRI library, security manager...), a runtime exception will be thrown. - *
- */ -class WkbUtil { - - private static final boolean IS_NATIVE_LITTLE_ENDIAN = - ByteOrder.nativeOrder().equals(ByteOrder.LITTLE_ENDIAN) - && System.getProperty("com.datastax.driver.dse.geometry.FORCE_REFLECTION_WKB") - == null; // only for tests - - static ByteBuffer asLittleEndianBinary(OGCGeometry ogcGeometry) { - if (IS_NATIVE_LITTLE_ENDIAN) { - return ogcGeometry.asBinary(); // the default implementation does what we want - } else { - int exportFlags; - if (ogcGeometry instanceof OGCPoint) { - exportFlags = 0; - } else if (ogcGeometry instanceof OGCLineString) { - exportFlags = WkbExportFlags.wkbExportLineString; - } else if (ogcGeometry instanceof OGCPolygon) { - exportFlags = WkbExportFlags.wkbExportPolygon; - } else { - throw new AssertionError("Unsupported type: " + ogcGeometry.getClass()); - } - - // Copy-pasted from OperatorExportToWkbLocal#execute, except for the flags and order - int size = exportToWKB(exportFlags, ogcGeometry.getEsriGeometry(), null); - ByteBuffer wkbBuffer = ByteBuffer.allocate(size).order(ByteOrder.LITTLE_ENDIAN); - exportToWKB(exportFlags, ogcGeometry.getEsriGeometry(), wkbBuffer); - return wkbBuffer; - } - } - - // Provides reflective access to the private static method OperatorExportToWkbLocal#exportToWKB - private static int exportToWKB(int exportFlags, Geometry geometry, ByteBuffer wkbBuffer) { - assert !IS_NATIVE_LITTLE_ENDIAN; - try { - return (Integer) exportToWKB.invoke(null, exportFlags, geometry, wkbBuffer); - } catch (Exception e) { - throw new RuntimeException( - "Couldn't invoke private method OperatorExportToWkbLocal#exportToWKB", e); - } - } - - private static final Method exportToWKB; - - static { - if (IS_NATIVE_LITTLE_ENDIAN) { - exportToWKB = null; // won't be used - } else { - try { - OperatorExportToWkb op = - (OperatorExportToWkb) - OperatorFactoryLocal.getInstance().getOperator(Operator.Type.ExportToWkb); - exportToWKB = - op.getClass() - .getDeclaredMethod("exportToWKB", int.class, Geometry.class, ByteBuffer.class); - exportToWKB.setAccessible(true); - } catch (NoSuchMethodException e) { - throw new RuntimeException( - "Couldn't get access to private method OperatorExportToWkbLocal#exportToWKB", e); - } - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/ByteBufUtil.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/ByteBufUtil.java deleted file mode 100644 index 333ba6099d3..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/ByteBufUtil.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import io.netty.buffer.ByteBuf; -import io.netty.buffer.Unpooled; -import java.nio.ByteBuffer; - -public class ByteBufUtil { - - // Does not move the reader index of the ByteBuf parameter - public static ByteBuffer toByteBuffer(ByteBuf buffer) { - if (buffer.isDirect()) { - return buffer.nioBuffer(); - } - final byte[] bytes = new byte[buffer.readableBytes()]; - buffer.getBytes(buffer.readerIndex(), bytes); - return ByteBuffer.wrap(bytes); - } - - static ByteBuf toByteBuf(ByteBuffer buffer) { - return Unpooled.wrappedBuffer(buffer); - } - - // read a predefined amount of bytes from the netty buffer and move its readerIndex - public static ByteBuffer readBytes(ByteBuf nettyBuf, int size) { - ByteBuffer res = ByteBuffer.allocate(size); - nettyBuf.readBytes(res); - res.flip(); - return res; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/BytecodeGraphStatement.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/BytecodeGraphStatement.java deleted file mode 100644 index b6fe05a987c..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/BytecodeGraphStatement.java +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.dse.driver.api.core.graph.FluentGraphStatement; -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.metadata.Node; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.Collections; -import java.util.Map; -import org.apache.tinkerpop.gremlin.process.traversal.Bytecode; - -/** - * A dedicated statement implementation for implicit traversal execution via a {@link - * DseGraphRemoteConnection}. - * - *

This is a simplified version of {@link FluentGraphStatement} that exposes the bytecode - * directly instead of the traversal. - * - *

This class is for internal use only. - */ -public class BytecodeGraphStatement extends GraphStatementBase { - - private final Bytecode bytecode; - - public BytecodeGraphStatement( - Bytecode bytecode, DriverExecutionProfile executionProfile, String executionProfileName) { - this( - bytecode, - null, - null, - null, - Statement.NO_DEFAULT_TIMESTAMP, - executionProfile, - executionProfileName, - Collections.emptyMap(), - null, - null, - null, - null, - null, - null); - } - - private BytecodeGraphStatement( - Bytecode bytecode, - Boolean isIdempotent, - Duration timeout, - Node node, - long timestamp, - DriverExecutionProfile executionProfile, - String executionProfileName, - Map customPayload, - String graphName, - String traversalSource, - String subProtocol, - ConsistencyLevel consistencyLevel, - ConsistencyLevel readConsistencyLevel, - ConsistencyLevel writeConsistencyLevel) { - super( - isIdempotent, - timeout, - node, - timestamp, - executionProfile, - executionProfileName, - customPayload, - graphName, - traversalSource, - subProtocol, - consistencyLevel, - readConsistencyLevel, - writeConsistencyLevel); - this.bytecode = bytecode; - } - - public Bytecode getBytecode() { - return bytecode; - } - - @Override - protected BytecodeGraphStatement newInstance( - Boolean isIdempotent, - Duration timeout, - Node node, - long timestamp, - DriverExecutionProfile executionProfile, - String executionProfileName, - Map customPayload, - String graphName, - String traversalSource, - String subProtocol, - ConsistencyLevel consistencyLevel, - ConsistencyLevel readConsistencyLevel, - ConsistencyLevel writeConsistencyLevel) { - return new BytecodeGraphStatement( - bytecode, - isIdempotent, - timeout, - node, - timestamp, - executionProfile, - executionProfileName, - customPayload, - graphName, - traversalSource, - subProtocol, - consistencyLevel, - readConsistencyLevel, - writeConsistencyLevel); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/ContinuousAsyncGraphResultSet.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/ContinuousAsyncGraphResultSet.java deleted file mode 100644 index 9c7f773c3a2..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/ContinuousAsyncGraphResultSet.java +++ /dev/null @@ -1,151 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet; -import com.datastax.dse.driver.api.core.graph.GraphNode; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.internal.core.util.CountingIterator; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Collections; -import java.util.Queue; -import java.util.concurrent.CompletionStage; -import net.jcip.annotations.NotThreadSafe; - -@NotThreadSafe // wraps a mutable queue -public class ContinuousAsyncGraphResultSet implements AsyncGraphResultSet { - - private final CountingIterator iterator; - private final int pageNumber; - private final boolean hasMorePages; - private final ExecutionInfo executionInfo; - private final ContinuousGraphRequestHandler continuousGraphRequestHandler; - private final Iterable currentPage; - - public ContinuousAsyncGraphResultSet( - ExecutionInfo executionInfo, - Queue data, - int pageNumber, - boolean hasMorePages, - ContinuousGraphRequestHandler continuousGraphRequestHandler, - GraphProtocol graphProtocol) { - - this.iterator = new GraphResultIterator(data, graphProtocol); - this.pageNumber = pageNumber; - this.hasMorePages = hasMorePages; - this.executionInfo = executionInfo; - this.continuousGraphRequestHandler = continuousGraphRequestHandler; - this.currentPage = () -> iterator; - } - - @NonNull - @Override - public ExecutionInfo getRequestExecutionInfo() { - return executionInfo; - } - - @NonNull - @Override - @Deprecated - public com.datastax.dse.driver.api.core.graph.GraphExecutionInfo getExecutionInfo() { - return GraphExecutionInfoConverter.convert(executionInfo); - } - - @Override - public int remaining() { - return iterator.remaining(); - } - - @NonNull - @Override - public Iterable currentPage() { - return currentPage; - } - - @Override - public boolean hasMorePages() { - return hasMorePages; - } - - @NonNull - @Override - public CompletionStage fetchNextPage() throws IllegalStateException { - if (!hasMorePages()) { - throw new IllegalStateException( - "Can't call fetchNextPage() on the last page (use hasMorePages() to check)"); - } - return continuousGraphRequestHandler.fetchNextPage(); - } - - @Override - public void cancel() { - continuousGraphRequestHandler.cancel(); - } - - /** Returns the current page's number. Pages are numbered starting from 1. */ - public int pageNumber() { - return pageNumber; - } - - static AsyncGraphResultSet empty(ExecutionInfo executionInfo) { - - return new AsyncGraphResultSet() { - - @NonNull - @Override - public ExecutionInfo getRequestExecutionInfo() { - return executionInfo; - } - - @NonNull - @Override - @Deprecated - public com.datastax.dse.driver.api.core.graph.GraphExecutionInfo getExecutionInfo() { - return GraphExecutionInfoConverter.convert(executionInfo); - } - - @NonNull - @Override - public Iterable currentPage() { - return Collections.emptyList(); - } - - @Override - public int remaining() { - return 0; - } - - @Override - public boolean hasMorePages() { - return false; - } - - @NonNull - @Override - public CompletionStage fetchNextPage() throws IllegalStateException { - throw new IllegalStateException( - "Can't call fetchNextPage() on the last page (use hasMorePages() to check)"); - } - - @Override - public void cancel() { - // noop - } - }; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/ContinuousGraphRequestHandler.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/ContinuousGraphRequestHandler.java deleted file mode 100644 index 07d9e4c84a3..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/ContinuousGraphRequestHandler.java +++ /dev/null @@ -1,191 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet; -import com.datastax.dse.driver.api.core.graph.GraphNode; -import com.datastax.dse.driver.api.core.graph.GraphStatement; -import com.datastax.dse.driver.api.core.metrics.DseNodeMetric; -import com.datastax.dse.driver.api.core.metrics.DseSessionMetric; -import com.datastax.dse.driver.internal.core.cql.continuous.ContinuousRequestHandlerBase; -import com.datastax.dse.driver.internal.core.graph.binary.GraphBinaryModule; -import com.datastax.dse.protocol.internal.response.result.DseRowsMetadata; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.cql.Conversions; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.shaded.guava.common.base.MoreObjects; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.response.result.Rows; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.ArrayDeque; -import java.util.List; -import java.util.Map; -import java.util.Queue; -import net.jcip.annotations.ThreadSafe; - -/** - * Handles a Graph request that supports multiple response messages (a.k.a. continuous paging - * request). - */ -@ThreadSafe -public class ContinuousGraphRequestHandler - extends ContinuousRequestHandlerBase, AsyncGraphResultSet> { - - private final GraphBinaryModule graphBinaryModule; - private final GraphSupportChecker graphSupportChecker; - private final Duration globalTimeout; - - ContinuousGraphRequestHandler( - @NonNull GraphStatement statement, - @NonNull DefaultSession session, - @NonNull InternalDriverContext context, - @NonNull String sessionLogPrefix, - @NonNull GraphBinaryModule graphBinaryModule, - @NonNull GraphSupportChecker graphSupportChecker) { - super( - statement, - session, - context, - sessionLogPrefix, - AsyncGraphResultSet.class, - true, - DseSessionMetric.GRAPH_CLIENT_TIMEOUTS, - DseSessionMetric.GRAPH_REQUESTS, - DseNodeMetric.GRAPH_MESSAGES); - this.graphBinaryModule = graphBinaryModule; - this.graphSupportChecker = graphSupportChecker; - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(statement, context); - globalTimeout = - MoreObjects.firstNonNull( - statement.getTimeout(), - executionProfile.getDuration(DseDriverOption.GRAPH_TIMEOUT, Duration.ZERO)); - // NOTE that ordering of the following statement matters. - // We should register this request after all fields have been initialized. - throttler.register(this); - } - - @NonNull - @Override - protected Duration getGlobalTimeout() { - return globalTimeout; - } - - @NonNull - @Override - protected Duration getPageTimeout(@NonNull GraphStatement statement, int pageNumber) { - return Duration.ZERO; - } - - @NonNull - @Override - protected Duration getReviseRequestTimeout(@NonNull GraphStatement statement) { - return Duration.ZERO; - } - - @Override - protected int getMaxEnqueuedPages(@NonNull GraphStatement statement) { - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(statement, context); - return executionProfile.getInt(DseDriverOption.GRAPH_CONTINUOUS_PAGING_MAX_ENQUEUED_PAGES); - } - - @Override - protected int getMaxPages(@NonNull GraphStatement statement) { - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(statement, context); - return executionProfile.getInt(DseDriverOption.GRAPH_CONTINUOUS_PAGING_MAX_PAGES); - } - - @NonNull - @Override - protected Message getMessage(@NonNull GraphStatement statement) { - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(statement, context); - GraphProtocol subProtocol = - graphSupportChecker.inferGraphProtocol(statement, executionProfile, context); - return GraphConversions.createContinuousMessageFromGraphStatement( - statement, subProtocol, executionProfile, context, graphBinaryModule); - } - - @Override - protected boolean isTracingEnabled(@NonNull GraphStatement statement) { - return statement.isTracing(); - } - - @NonNull - @Override - protected Map createPayload(@NonNull GraphStatement statement) { - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(statement, context); - GraphProtocol subProtocol = - graphSupportChecker.inferGraphProtocol(statement, executionProfile, context); - return GraphConversions.createCustomPayload( - statement, subProtocol, executionProfile, context, graphBinaryModule); - } - - @NonNull - @Override - protected AsyncGraphResultSet createEmptyResultSet(@NonNull ExecutionInfo executionInfo) { - return ContinuousAsyncGraphResultSet.empty(executionInfo); - } - - @NonNull - @Override - protected ContinuousAsyncGraphResultSet createResultSet( - @NonNull GraphStatement statement, - @NonNull Rows rows, - @NonNull ExecutionInfo executionInfo, - @NonNull ColumnDefinitions columnDefinitions) - throws IOException { - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(statement, context); - GraphProtocol subProtocol = - graphSupportChecker.inferGraphProtocol(statement, executionProfile, context); - - Queue graphNodes = new ArrayDeque<>(); - for (List row : rows.getData()) { - if (subProtocol.isGraphBinary()) { - graphNodes.offer(GraphConversions.createGraphBinaryGraphNode(row, this.graphBinaryModule)); - } else { - graphNodes.offer(GraphSONUtils.createGraphNode(row, subProtocol)); - } - } - - DseRowsMetadata metadata = (DseRowsMetadata) rows.getMetadata(); - return new ContinuousAsyncGraphResultSet( - executionInfo, - graphNodes, - metadata.continuousPageNumber, - !metadata.isLastContinuousPage, - this, - subProtocol); - } - - @Override - protected int pageNumber(@NonNull AsyncGraphResultSet resultSet) { - return ((ContinuousAsyncGraphResultSet) resultSet).pageNumber(); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/CqlCollectionPredicate.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/CqlCollectionPredicate.java deleted file mode 100644 index 349321da0cf..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/CqlCollectionPredicate.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import java.util.Collection; -import java.util.Map; -import java.util.Objects; -import org.javatuples.Pair; - -/** Predicates that can be used on CQL Collections. */ -public enum CqlCollectionPredicate implements DsePredicate { - contains { - @Override - public boolean test(Object value, Object condition) { - preEvaluate(condition); - Preconditions.checkArgument(value instanceof Collection); - return ((Collection) value).contains(condition); - } - }, - - containsKey { - @Override - public boolean test(Object value, Object condition) { - preEvaluate(condition); - Preconditions.checkArgument(value instanceof Map); - return ((Map) value).containsKey(condition); - } - }, - - containsValue { - @Override - public boolean test(Object value, Object condition) { - preEvaluate(condition); - Preconditions.checkArgument(value instanceof Map); - return ((Map) value).containsValue(condition); - } - }, - - entryEq { - @Override - public boolean test(Object value, Object condition) { - preEvaluate(condition); - Preconditions.checkArgument(condition instanceof Pair); - Preconditions.checkArgument(value instanceof Map); - Pair pair = (Pair) condition; - Map map = (Map) value; - return Objects.equals(map.get(pair.getValue0()), pair.getValue1()); - } - }; - - @Override - public boolean isValidCondition(Object condition) { - if (condition instanceof Pair) { - Pair pair = (Pair) condition; - return pair.getValue0() != null && pair.getValue1() != null; - } - return condition != null; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DefaultAsyncGraphResultSet.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DefaultAsyncGraphResultSet.java deleted file mode 100644 index abc7cc9514e..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DefaultAsyncGraphResultSet.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet; -import com.datastax.dse.driver.api.core.graph.GraphNode; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.internal.core.util.CountingIterator; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Queue; -import java.util.concurrent.CompletionStage; -import net.jcip.annotations.NotThreadSafe; - -@NotThreadSafe // wraps a mutable queue -public class DefaultAsyncGraphResultSet implements AsyncGraphResultSet { - - private final ExecutionInfo executionInfo; - private final CountingIterator iterator; - private final Iterable currentPage; - - public DefaultAsyncGraphResultSet( - ExecutionInfo executionInfo, Queue data, GraphProtocol graphProtocol) { - this.executionInfo = executionInfo; - this.iterator = new GraphResultIterator(data, graphProtocol); - this.currentPage = () -> iterator; - } - - @NonNull - @Override - public ExecutionInfo getRequestExecutionInfo() { - return executionInfo; - } - - @NonNull - @Override - @Deprecated - public com.datastax.dse.driver.api.core.graph.GraphExecutionInfo getExecutionInfo() { - return GraphExecutionInfoConverter.convert(executionInfo); - } - - @Override - public int remaining() { - return iterator.remaining(); - } - - @NonNull - @Override - public Iterable currentPage() { - return currentPage; - } - - @Override - public boolean hasMorePages() { - return false; - } - - @NonNull - @Override - public CompletionStage fetchNextPage() throws IllegalStateException { - throw new IllegalStateException( - "No next page. Use #hasMorePages before calling this method to avoid this error."); - } - - @Override - public void cancel() { - // nothing to do - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DefaultBatchGraphStatement.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DefaultBatchGraphStatement.java deleted file mode 100644 index e16287c415d..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DefaultBatchGraphStatement.java +++ /dev/null @@ -1,154 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.dse.driver.api.core.graph.BatchGraphStatement; -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import net.jcip.annotations.Immutable; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; - -@Immutable -public class DefaultBatchGraphStatement extends GraphStatementBase - implements BatchGraphStatement { - - private final List traversals; - - public DefaultBatchGraphStatement( - Iterable traversals, - Boolean isIdempotent, - Duration timeout, - Node node, - long timestamp, - DriverExecutionProfile executionProfile, - String executionProfileName, - Map customPayload, - String graphName, - String traversalSource, - String subProtocol, - ConsistencyLevel consistencyLevel, - ConsistencyLevel readConsistencyLevel, - ConsistencyLevel writeConsistencyLevel) { - super( - isIdempotent, - timeout, - node, - timestamp, - executionProfile, - executionProfileName, - customPayload, - graphName, - traversalSource, - subProtocol, - consistencyLevel, - readConsistencyLevel, - writeConsistencyLevel); - this.traversals = ImmutableList.copyOf(traversals); - } - - @NonNull - @Override - public DefaultBatchGraphStatement addTraversal(@NonNull GraphTraversal newTraversal) { - return new DefaultBatchGraphStatement( - ImmutableList.builder().addAll(traversals).add(newTraversal).build(), - isIdempotent(), - getTimeout(), - getNode(), - getTimestamp(), - getExecutionProfile(), - getExecutionProfileName(), - getCustomPayload(), - getGraphName(), - getTraversalSource(), - getSubProtocol(), - getConsistencyLevel(), - getReadConsistencyLevel(), - getWriteConsistencyLevel()); - } - - @NonNull - @Override - public DefaultBatchGraphStatement addTraversals(@NonNull Iterable newTraversals) { - return new DefaultBatchGraphStatement( - ImmutableList.builder().addAll(traversals).addAll(newTraversals).build(), - isIdempotent(), - getTimeout(), - getNode(), - getTimestamp(), - getExecutionProfile(), - getExecutionProfileName(), - getCustomPayload(), - getGraphName(), - getTraversalSource(), - getSubProtocol(), - getConsistencyLevel(), - getReadConsistencyLevel(), - getWriteConsistencyLevel()); - } - - @Override - public int size() { - return this.traversals.size(); - } - - @Override - protected BatchGraphStatement newInstance( - Boolean isIdempotent, - Duration timeout, - Node node, - long timestamp, - DriverExecutionProfile executionProfile, - String executionProfileName, - Map customPayload, - String graphName, - String traversalSource, - String subProtocol, - ConsistencyLevel consistencyLevel, - ConsistencyLevel readConsistencyLevel, - ConsistencyLevel writeConsistencyLevel) { - return new DefaultBatchGraphStatement( - traversals, - isIdempotent, - timeout, - node, - timestamp, - executionProfile, - executionProfileName, - customPayload, - graphName, - traversalSource, - subProtocol, - consistencyLevel, - readConsistencyLevel, - writeConsistencyLevel); - } - - @NonNull - @Override - public Iterator iterator() { - return this.traversals.iterator(); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DefaultDseRemoteConnectionBuilder.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DefaultDseRemoteConnectionBuilder.java deleted file mode 100644 index 146e8e17ea2..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DefaultDseRemoteConnectionBuilder.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.dse.driver.api.core.graph.DseGraphRemoteConnectionBuilder; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import net.jcip.annotations.NotThreadSafe; -import org.apache.tinkerpop.gremlin.process.remote.RemoteConnection; - -@NotThreadSafe -public class DefaultDseRemoteConnectionBuilder implements DseGraphRemoteConnectionBuilder { - - private final CqlSession session; - private DriverExecutionProfile executionProfile; - private String executionProfileName; - - public DefaultDseRemoteConnectionBuilder(CqlSession session) { - this.session = session; - } - - @Override - public RemoteConnection build() { - return new DseGraphRemoteConnection(session, executionProfile, executionProfileName); - } - - @Override - public DseGraphRemoteConnectionBuilder withExecutionProfile( - DriverExecutionProfile executionProfile) { - this.executionProfile = executionProfile; - return this; - } - - @Override - public DseGraphRemoteConnectionBuilder withExecutionProfileName(String executionProfileName) { - this.executionProfileName = executionProfileName; - return this; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DefaultFluentGraphStatement.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DefaultFluentGraphStatement.java deleted file mode 100644 index 0f6f1faabbf..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DefaultFluentGraphStatement.java +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.dse.driver.api.core.graph.FluentGraphStatement; -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.Map; -import net.jcip.annotations.Immutable; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; - -@Immutable -public class DefaultFluentGraphStatement extends GraphStatementBase - implements FluentGraphStatement { - - private final GraphTraversal traversal; - - public DefaultFluentGraphStatement( - GraphTraversal traversal, - Boolean isIdempotent, - Duration timeout, - Node node, - long timestamp, - DriverExecutionProfile executionProfile, - String executionProfileName, - Map customPayload, - String graphName, - String traversalSource, - String subProtocol, - ConsistencyLevel consistencyLevel, - ConsistencyLevel readConsistencyLevel, - ConsistencyLevel writeConsistencyLevel) { - super( - isIdempotent, - timeout, - node, - timestamp, - executionProfile, - executionProfileName, - customPayload, - graphName, - traversalSource, - subProtocol, - consistencyLevel, - readConsistencyLevel, - writeConsistencyLevel); - this.traversal = traversal; - } - - @Override - protected FluentGraphStatement newInstance( - Boolean isIdempotent, - Duration timeout, - Node node, - long timestamp, - DriverExecutionProfile executionProfile, - String executionProfileName, - Map customPayload, - String graphName, - String traversalSource, - String subProtocol, - ConsistencyLevel consistencyLevel, - ConsistencyLevel readConsistencyLevel, - ConsistencyLevel writeConsistencyLevel) { - return new DefaultFluentGraphStatement( - traversal, - isIdempotent, - timeout, - node, - timestamp, - executionProfile, - executionProfileName, - customPayload, - graphName, - traversalSource, - subProtocol, - consistencyLevel, - readConsistencyLevel, - writeConsistencyLevel); - } - - @NonNull - @Override - public GraphTraversal getTraversal() { - return traversal; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DefaultScriptGraphStatement.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DefaultScriptGraphStatement.java deleted file mode 100644 index 71f79134237..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DefaultScriptGraphStatement.java +++ /dev/null @@ -1,207 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.Map; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultScriptGraphStatement extends GraphStatementBase - implements ScriptGraphStatement { - - private final String script; - private final Boolean isSystemQuery; - private final NullAllowingImmutableMap queryParams; - - public DefaultScriptGraphStatement( - String script, - Map queryParams, - Boolean isSystemQuery, - Boolean isIdempotent, - Duration timeout, - Node node, - long timestamp, - DriverExecutionProfile executionProfile, - String executionProfileName, - Map customPayload, - String graphName, - String traversalSource, - String subProtocol, - ConsistencyLevel consistencyLevel, - ConsistencyLevel readConsistencyLevel, - ConsistencyLevel writeConsistencyLevel) { - super( - isIdempotent, - timeout, - node, - timestamp, - executionProfile, - executionProfileName, - customPayload, - graphName, - traversalSource, - subProtocol, - consistencyLevel, - readConsistencyLevel, - writeConsistencyLevel); - this.script = script; - this.isSystemQuery = isSystemQuery; - this.queryParams = NullAllowingImmutableMap.copyOf(queryParams); - } - - //// Script GraphStatement level options - - @NonNull - @Override - public String getScript() { - return script; - } - - @NonNull - @Override - public ScriptGraphStatement setSystemQuery(@Nullable Boolean newValue) { - return new DefaultScriptGraphStatement( - script, - queryParams, - newValue, - isIdempotent(), - getTimeout(), - getNode(), - getTimestamp(), - getExecutionProfile(), - getExecutionProfileName(), - getCustomPayload(), - getGraphName(), - getTraversalSource(), - getSubProtocol(), - getConsistencyLevel(), - getReadConsistencyLevel(), - getWriteConsistencyLevel()); - } - - @Nullable - @Override - public Boolean isSystemQuery() { - return isSystemQuery; - } - - @NonNull - @Override - public Map getQueryParams() { - return this.queryParams; - } - - @NonNull - @Override - public ScriptGraphStatement setQueryParam(@NonNull String name, @Nullable Object value) { - NullAllowingImmutableMap.Builder newQueryParamsBuilder = - NullAllowingImmutableMap.builder(); - for (Map.Entry entry : queryParams.entrySet()) { - if (!entry.getKey().equals(name)) { - newQueryParamsBuilder.put(entry.getKey(), entry.getValue()); - } - } - newQueryParamsBuilder.put(name, value); - return setQueryParams(newQueryParamsBuilder.build()); - } - - @NonNull - @Override - public ScriptGraphStatement removeQueryParam(@NonNull String name) { - if (!queryParams.containsKey(name)) { - return this; - } else { - NullAllowingImmutableMap.Builder newQueryParamsBuilder = - NullAllowingImmutableMap.builder(); - for (Map.Entry entry : queryParams.entrySet()) { - if (!entry.getKey().equals(name)) { - newQueryParamsBuilder.put(entry.getKey(), entry.getValue()); - } - } - return setQueryParams(newQueryParamsBuilder.build()); - } - } - - private ScriptGraphStatement setQueryParams(Map newQueryParams) { - return new DefaultScriptGraphStatement( - script, - newQueryParams, - isSystemQuery, - isIdempotent(), - getTimeout(), - getNode(), - getTimestamp(), - getExecutionProfile(), - getExecutionProfileName(), - getCustomPayload(), - getGraphName(), - getTraversalSource(), - getSubProtocol(), - getConsistencyLevel(), - getReadConsistencyLevel(), - getWriteConsistencyLevel()); - } - - @Override - protected ScriptGraphStatement newInstance( - Boolean isIdempotent, - Duration timeout, - Node node, - long timestamp, - DriverExecutionProfile executionProfile, - String executionProfileName, - Map customPayload, - String graphName, - String traversalSource, - String subProtocol, - ConsistencyLevel consistencyLevel, - ConsistencyLevel readConsistencyLevel, - ConsistencyLevel writeConsistencyLevel) { - return new DefaultScriptGraphStatement( - script, - queryParams, - isSystemQuery, - isIdempotent, - timeout, - node, - timestamp, - executionProfile, - executionProfileName, - customPayload, - graphName, - traversalSource, - subProtocol, - consistencyLevel, - readConsistencyLevel, - writeConsistencyLevel); - } - - @Override - public String toString() { - return String.format("ScriptGraphStatement['%s', params: %s]", this.script, this.queryParams); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DseGraphRemoteConnection.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DseGraphRemoteConnection.java deleted file mode 100644 index a5ec0a1d115..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DseGraphRemoteConnection.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import java.util.concurrent.CompletableFuture; -import net.jcip.annotations.Immutable; -import org.apache.tinkerpop.gremlin.process.remote.RemoteConnection; -import org.apache.tinkerpop.gremlin.process.remote.traversal.RemoteTraversal; -import org.apache.tinkerpop.gremlin.process.traversal.Bytecode; - -@Immutable -public class DseGraphRemoteConnection implements RemoteConnection { - - private final CqlSession session; - private final DriverExecutionProfile executionProfile; - private final String executionProfileName; - - public DseGraphRemoteConnection( - CqlSession session, DriverExecutionProfile executionProfile, String executionProfileName) { - this.session = session; - this.executionProfile = executionProfile; - this.executionProfileName = executionProfileName; - } - - @Override - public CompletableFuture> submitAsync(Bytecode bytecode) { - return session - .executeAsync(new BytecodeGraphStatement(bytecode, executionProfile, executionProfileName)) - .toCompletableFuture() - .thenApply(DseGraphTraversal::new); - } - - @Override - public void close() throws Exception { - // do not close the DseSession here. - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DseGraphTraversal.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DseGraphTraversal.java deleted file mode 100644 index e0a5cf2d675..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DseGraphTraversal.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet; -import com.datastax.dse.driver.api.core.graph.GraphNode; -import java.util.Iterator; -import java.util.NoSuchElementException; -import net.jcip.annotations.NotThreadSafe; -import org.apache.tinkerpop.gremlin.process.remote.traversal.AbstractRemoteTraversal; -import org.apache.tinkerpop.gremlin.process.remote.traversal.DefaultRemoteTraverser; -import org.apache.tinkerpop.gremlin.process.traversal.Traverser; - -@NotThreadSafe -class DseGraphTraversal extends AbstractRemoteTraversal { - - private final Iterator graphNodeIterator; - - public DseGraphTraversal(AsyncGraphResultSet firstPage) { - this.graphNodeIterator = GraphResultSets.toSync(firstPage).iterator(); - } - - @Override - public boolean hasNext() { - return graphNodeIterator.hasNext(); - } - - @Override - public E next() { - return nextTraverser().get(); - } - - @Override - @SuppressWarnings("unchecked") - public Traverser.Admin nextTraverser() { - if (hasNext()) { - GraphNode nextGraphNode = graphNodeIterator.next(); - - // get the Raw object from the ObjectGraphNode, create a new remote Traverser - // with bulk = 1 because bulk is not supported yet. Casting should be ok - // because we have been able to deserialize into the right type. - return new DefaultRemoteTraverser<>((E) nextGraphNode.as(Object.class), 1); - } else { - // finished iterating/nothing to iterate. Normal behaviour. - throw new NoSuchElementException(); - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DsePredicate.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DsePredicate.java deleted file mode 100644 index b5f8c30fd8c..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DsePredicate.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import java.util.function.BiPredicate; - -/** - * An extension of TinkerPop's {@link BiPredicate} adding simple pre-condition checking methods that - * have to be written in the implementations. - */ -public interface DsePredicate extends BiPredicate { - - default void preEvaluate(Object condition) { - Preconditions.checkArgument( - this.isValidCondition(condition), "Invalid condition provided: %s", condition); - } - - boolean isValidCondition(Object condition); -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/EditDistance.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/EditDistance.java deleted file mode 100644 index 5ab836babbf..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/EditDistance.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.oss.driver.shaded.guava.common.base.Objects; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import java.io.Serializable; -import net.jcip.annotations.Immutable; - -/** - * A container for a term and maximum edit distance. - * - *

The context in which this is used determines the semantics of the edit distance. For instance, - * it might indicate single-character edits if used with fuzzy search queries or whole word - * movements if used with phrase proximity queries. - */ -@Immutable -public class EditDistance implements Serializable { - - private static final long serialVersionUID = 1L; - - public static final int DEFAULT_EDIT_DISTANCE = 0; - - public final String query; - public final int distance; - - public EditDistance(String query) { - this(query, DEFAULT_EDIT_DISTANCE); - } - - public EditDistance(String query, int distance) { - Preconditions.checkNotNull(query, "Query cannot be null."); - Preconditions.checkArgument(distance >= 0, "Edit distance cannot be negative."); - this.query = query; - this.distance = distance; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof EditDistance)) { - return false; - } - EditDistance that = (EditDistance) o; - return distance == that.distance && Objects.equal(query, that.query); - } - - @Override - public int hashCode() { - return Objects.hashCode(query, distance); - } - - @Override - public String toString() { - return "EditDistance{" + "query='" + query + '\'' + ", distance=" + distance + '}'; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GeoPredicate.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GeoPredicate.java deleted file mode 100644 index 39949e97198..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GeoPredicate.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.dse.driver.api.core.data.geometry.Geometry; -import com.datastax.dse.driver.api.core.data.geometry.LineString; -import com.datastax.dse.driver.api.core.data.geometry.Point; -import com.datastax.dse.driver.api.core.data.geometry.Polygon; -import com.datastax.dse.driver.api.core.graph.predicates.Geo; -import com.datastax.dse.driver.internal.core.data.geometry.Distance; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; - -/** - * List of predicates for geolocation usage with DseGraph and Search indexes. Should not be accessed - * directly but through the {@link Geo} static methods. - */ -public enum GeoPredicate implements DsePredicate { - - /** Matches values within the distance specified by the condition over a Haversine geometry. */ - inside { - @Override - public boolean test(Object value, Object condition) { - preEvaluate(condition); - if (value == null) { - return false; - } - Preconditions.checkArgument(value instanceof Geometry); - Distance distance = (Distance) condition; - if (value instanceof Point) { - return haversineDistanceInDegrees(distance.getCenter(), (Point) value) - <= distance.getRadius(); - } else if (value instanceof Polygon) { - for (Point point : ((Polygon) value).getExteriorRing()) { - if (haversineDistanceInDegrees(distance.getCenter(), point) > distance.getRadius()) { - return false; - } - } - } else if (value instanceof LineString) { - for (Point point : ((LineString) value).getPoints()) { - if (haversineDistanceInDegrees(distance.getCenter(), point) > distance.getRadius()) { - return false; - } - } - } else { - throw new UnsupportedOperationException( - String.format("Value type '%s' unsupported", value.getClass().getName())); - } - - return true; - } - - @Override - public String toString() { - return "inside"; - } - }, - - /** - * Matches values contained in the geometric entity specified by the condition on a 2D Euclidean - * plane. - */ - insideCartesian { - @Override - public boolean test(Object value, Object condition) { - preEvaluate(condition); - if (value == null) { - return false; - } - Preconditions.checkArgument(value instanceof Geometry); - return ((Geometry) condition).contains((Geometry) value); - } - - @Override - public String toString() { - return "insideCartesian"; - } - }; - - @Override - public boolean isValidCondition(Object condition) { - return condition != null; - } - - static double haversineDistanceInDegrees(Point p1, Point p2) { - double dLat = Math.toRadians(p2.Y() - p1.Y()); - double dLon = Math.toRadians(p2.X() - p1.X()); - double lat1 = Math.toRadians(p1.Y()); - double lat2 = Math.toRadians(p2.Y()); - - double a = - Math.pow(Math.sin(dLat / 2), 2) - + Math.pow(Math.sin(dLon / 2), 2) * Math.cos(lat1) * Math.cos(lat2); - double c = 2 * Math.asin(Math.sqrt(a)); - return Math.toDegrees(c); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GeoUtils.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GeoUtils.java deleted file mode 100644 index 80d55dac69d..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GeoUtils.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -public class GeoUtils { - private static final double DEGREES_TO_RADIANS = Math.PI / 180; - private static final double EARTH_MEAN_RADIUS_KM = 6371.0087714; - private static final double DEG_TO_KM = DEGREES_TO_RADIANS * EARTH_MEAN_RADIUS_KM; - private static final double KM_TO_MILES = 0.621371192; - public static final double KM_TO_DEG = 1 / DEG_TO_KM; - public static final double MILES_TO_KM = 1 / KM_TO_MILES; -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphConversions.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphConversions.java deleted file mode 100644 index c95b26b2e26..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphConversions.java +++ /dev/null @@ -1,410 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import static java.nio.charset.StandardCharsets.UTF_8; - -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.dse.driver.api.core.graph.BatchGraphStatement; -import com.datastax.dse.driver.api.core.graph.FluentGraphStatement; -import com.datastax.dse.driver.api.core.graph.GraphNode; -import com.datastax.dse.driver.api.core.graph.GraphStatement; -import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; -import com.datastax.dse.driver.internal.core.graph.binary.GraphBinaryModule; -import com.datastax.dse.driver.internal.core.graph.binary.buffer.DseNettyBufferFactory; -import com.datastax.dse.protocol.internal.request.RawBytesQuery; -import com.datastax.dse.protocol.internal.request.query.ContinuousPagingOptions; -import com.datastax.dse.protocol.internal.request.query.DseQueryOptions; -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.DefaultConsistencyLevel; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.cql.Conversions; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.Iterators; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.request.Query; -import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableMap; -import io.netty.buffer.ByteBuf; -import java.io.IOException; -import java.io.UncheckedIOException; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import org.apache.tinkerpop.gremlin.process.traversal.Traverser; -import org.apache.tinkerpop.gremlin.structure.io.Buffer; -import org.apache.tinkerpop.gremlin.structure.io.BufferFactory; - -/** - * Utility class to move boilerplate out of {@link GraphRequestHandler}. - * - *

We extend {@link Conversions} only for methods that can be directly reused as-is; if something - * needs to be customized, it will be duplicated here instead of making the parent method - * "pluggable". - */ -public class GraphConversions extends Conversions { - - static final String GRAPH_LANG_OPTION_KEY = "graph-language"; - static final String GRAPH_NAME_OPTION_KEY = "graph-name"; - static final String GRAPH_SOURCE_OPTION_KEY = "graph-source"; - static final String GRAPH_READ_CONSISTENCY_LEVEL_OPTION_KEY = "graph-read-consistency"; - static final String GRAPH_WRITE_CONSISTENCY_LEVEL_OPTION_KEY = "graph-write-consistency"; - static final String GRAPH_RESULTS_OPTION_KEY = "graph-results"; - static final String GRAPH_TIMEOUT_OPTION_KEY = "request-timeout"; - static final String GRAPH_BINARY_QUERY_OPTION_KEY = "graph-binary-query"; - - static final String LANGUAGE_GROOVY = "gremlin-groovy"; - static final String LANGUAGE_BYTECODE = "bytecode-json"; - - private static final BufferFactory FACTORY = new DseNettyBufferFactory(); - - @VisibleForTesting static final byte[] EMPTY_STRING_QUERY = "".getBytes(UTF_8); - - public static Message createContinuousMessageFromGraphStatement( - GraphStatement statement, - GraphProtocol subProtocol, - DriverExecutionProfile config, - InternalDriverContext context, - GraphBinaryModule graphBinaryModule) { - - final List encodedQueryParams; - if (!(statement instanceof ScriptGraphStatement) - || ((ScriptGraphStatement) statement).getQueryParams().isEmpty()) { - encodedQueryParams = Collections.emptyList(); - } else { - try { - Map queryParams = ((ScriptGraphStatement) statement).getQueryParams(); - if (subProtocol.isGraphBinary()) { - Buffer graphBinaryParams = graphBinaryModule.serialize(queryParams); - encodedQueryParams = Collections.singletonList(graphBinaryParams.nioBuffer()); - graphBinaryParams.release(); - } else { - encodedQueryParams = - Collections.singletonList( - GraphSONUtils.serializeToByteBuffer(queryParams, subProtocol)); - } - } catch (IOException e) { - throw new UncheckedIOException( - "Couldn't serialize parameters for GraphStatement: " + statement, e); - } - } - - int consistencyLevel = - DefaultConsistencyLevel.valueOf(config.getString(DefaultDriverOption.REQUEST_CONSISTENCY)) - .getProtocolCode(); - - long timestamp = statement.getTimestamp(); - if (timestamp == Statement.NO_DEFAULT_TIMESTAMP) { - timestamp = context.getTimestampGenerator().next(); - } - - int pageSize = config.getInt(DseDriverOption.GRAPH_CONTINUOUS_PAGING_PAGE_SIZE); - int maxPages = config.getInt(DseDriverOption.GRAPH_CONTINUOUS_PAGING_MAX_PAGES); - int maxPagesPerSecond = - config.getInt(DseDriverOption.GRAPH_CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND); - int maxEnqueuedPages = - config.getInt(DseDriverOption.GRAPH_CONTINUOUS_PAGING_MAX_ENQUEUED_PAGES); - ContinuousPagingOptions options = - new ContinuousPagingOptions(maxPages, maxPagesPerSecond, maxEnqueuedPages); - - DseQueryOptions queryOptions = - new DseQueryOptions( - consistencyLevel, - encodedQueryParams, - Collections.emptyMap(), // ignored by the DSE Graph server - true, // also ignored - pageSize, - null, - ProtocolConstants.ConsistencyLevel.LOCAL_SERIAL, // also ignored - timestamp, - null, // also ignored - false, // graph CP does not support sizeInBytes - options); - - if (statement instanceof ScriptGraphStatement) { - return new Query(((ScriptGraphStatement) statement).getScript(), queryOptions); - } else { - return new RawBytesQuery(getQueryBytes(statement, subProtocol), queryOptions); - } - } - - static Message createMessageFromGraphStatement( - GraphStatement statement, - GraphProtocol subProtocol, - DriverExecutionProfile config, - InternalDriverContext context, - GraphBinaryModule graphBinaryModule) { - - final List encodedQueryParams; - if (!(statement instanceof ScriptGraphStatement) - || ((ScriptGraphStatement) statement).getQueryParams().isEmpty()) { - encodedQueryParams = Collections.emptyList(); - } else { - try { - Map queryParams = ((ScriptGraphStatement) statement).getQueryParams(); - if (subProtocol.isGraphBinary()) { - Buffer graphBinaryParams = graphBinaryModule.serialize(queryParams); - encodedQueryParams = Collections.singletonList(graphBinaryParams.nioBuffer()); - graphBinaryParams.release(); - } else { - encodedQueryParams = - Collections.singletonList( - GraphSONUtils.serializeToByteBuffer(queryParams, subProtocol)); - } - } catch (IOException e) { - throw new UncheckedIOException( - "Couldn't serialize parameters for GraphStatement: " + statement, e); - } - } - - ConsistencyLevel consistency = statement.getConsistencyLevel(); - int consistencyLevel = - (consistency == null) - ? context - .getConsistencyLevelRegistry() - .nameToCode(config.getString(DefaultDriverOption.REQUEST_CONSISTENCY)) - : consistency.getProtocolCode(); - - long timestamp = statement.getTimestamp(); - if (timestamp == Statement.NO_DEFAULT_TIMESTAMP) { - timestamp = context.getTimestampGenerator().next(); - } - - DseQueryOptions queryOptions = - new DseQueryOptions( - consistencyLevel, - encodedQueryParams, - Collections.emptyMap(), // ignored by the DSE Graph server - true, // also ignored - 50, // also ignored - null, // also ignored - ProtocolConstants.ConsistencyLevel.LOCAL_SERIAL, // also ignored - timestamp, - null, // also ignored - false, // also ignored - null // also ignored - ); - - if (statement instanceof ScriptGraphStatement) { - return new Query(((ScriptGraphStatement) statement).getScript(), queryOptions); - } else { - return new RawBytesQuery(getQueryBytes(statement, subProtocol), queryOptions); - } - } - - // This method returns either a Bytecode object, or a List if the statement is a - // BatchGraphStatement - @VisibleForTesting - public static Object bytecodeToSerialize(GraphStatement statement) { - Preconditions.checkArgument( - statement instanceof FluentGraphStatement - || statement instanceof BatchGraphStatement - || statement instanceof BytecodeGraphStatement, - "To serialize bytecode the query must be a fluent or batch statement, but was: %s", - statement.getClass()); - - Object toSerialize; - if (statement instanceof FluentGraphStatement) { - toSerialize = ((FluentGraphStatement) statement).getTraversal().asAdmin().getBytecode(); - } else if (statement instanceof BatchGraphStatement) { - // transform the Iterator to List - toSerialize = - ImmutableList.copyOf( - Iterators.transform( - ((BatchGraphStatement) statement).iterator(), - traversal -> traversal.asAdmin().getBytecode())); - } else { - toSerialize = ((BytecodeGraphStatement) statement).getBytecode(); - } - return toSerialize; - } - - private static byte[] getQueryBytes(GraphStatement statement, GraphProtocol graphSubProtocol) { - try { - return graphSubProtocol.isGraphBinary() - // if GraphBinary, the query is encoded in the custom payload, and not in the query field - // see GraphConversions#createCustomPayload() - ? EMPTY_STRING_QUERY - : GraphSONUtils.serializeToBytes(bytecodeToSerialize(statement), graphSubProtocol); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - } - - public static Map createCustomPayload( - GraphStatement statement, - GraphProtocol subProtocol, - DriverExecutionProfile config, - InternalDriverContext context, - GraphBinaryModule graphBinaryModule) { - - ProtocolVersion protocolVersion = context.getProtocolVersion(); - - NullAllowingImmutableMap.Builder payload = - NullAllowingImmutableMap.builder(); - Map statementOptions = statement.getCustomPayload(); - payload.putAll(statementOptions); - - final String graphLanguage; - - // Don't override anything that's already provided at the statement level - if (!statementOptions.containsKey(GRAPH_LANG_OPTION_KEY)) { - graphLanguage = - statement instanceof ScriptGraphStatement ? LANGUAGE_GROOVY : LANGUAGE_BYTECODE; - payload.put(GRAPH_LANG_OPTION_KEY, TypeCodecs.TEXT.encode(graphLanguage, protocolVersion)); - } else { - graphLanguage = - TypeCodecs.TEXT.decode(statementOptions.get(GRAPH_LANG_OPTION_KEY), protocolVersion); - Preconditions.checkNotNull( - graphLanguage, "A null value was set for the graph-language custom payload key."); - } - - if (!isSystemQuery(statement, config)) { - if (!statementOptions.containsKey(GRAPH_NAME_OPTION_KEY)) { - String graphName = statement.getGraphName(); - if (graphName == null) { - graphName = config.getString(DseDriverOption.GRAPH_NAME, null); - } - if (graphName != null) { - payload.put(GRAPH_NAME_OPTION_KEY, TypeCodecs.TEXT.encode(graphName, protocolVersion)); - } - } - if (!statementOptions.containsKey(GRAPH_SOURCE_OPTION_KEY)) { - String traversalSource = statement.getTraversalSource(); - if (traversalSource == null) { - traversalSource = config.getString(DseDriverOption.GRAPH_TRAVERSAL_SOURCE, null); - } - if (traversalSource != null) { - payload.put( - GRAPH_SOURCE_OPTION_KEY, TypeCodecs.TEXT.encode(traversalSource, protocolVersion)); - } - } - } - - // the payload allows null entry values so doing a get directly here and checking for null - final ByteBuffer payloadInitialProtocol = statementOptions.get(GRAPH_RESULTS_OPTION_KEY); - if (payloadInitialProtocol == null) { - Preconditions.checkNotNull(subProtocol); - payload.put( - GRAPH_RESULTS_OPTION_KEY, - TypeCodecs.TEXT.encode(subProtocol.toInternalCode(), protocolVersion)); - } else { - subProtocol = - GraphProtocol.fromString(TypeCodecs.TEXT.decode(payloadInitialProtocol, protocolVersion)); - } - - if (subProtocol.isGraphBinary() && graphLanguage.equals(LANGUAGE_BYTECODE)) { - Object bytecodeQuery = bytecodeToSerialize(statement); - try { - Buffer bytecodeByteBuf = graphBinaryModule.serialize(bytecodeQuery); - payload.put(GRAPH_BINARY_QUERY_OPTION_KEY, bytecodeByteBuf.nioBuffer()); - bytecodeByteBuf.release(); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - } - - if (!statementOptions.containsKey(GRAPH_READ_CONSISTENCY_LEVEL_OPTION_KEY)) { - ConsistencyLevel readCl = statement.getReadConsistencyLevel(); - String readClString = - readCl != null - ? readCl.name() - : config.getString(DseDriverOption.GRAPH_READ_CONSISTENCY_LEVEL, null); - if (readClString != null) { - payload.put( - GRAPH_READ_CONSISTENCY_LEVEL_OPTION_KEY, - TypeCodecs.TEXT.encode(readClString, protocolVersion)); - } - } - - if (!statementOptions.containsKey(GRAPH_WRITE_CONSISTENCY_LEVEL_OPTION_KEY)) { - ConsistencyLevel writeCl = statement.getWriteConsistencyLevel(); - String writeClString = - writeCl != null - ? writeCl.name() - : config.getString(DseDriverOption.GRAPH_WRITE_CONSISTENCY_LEVEL, null); - if (writeClString != null) { - payload.put( - GRAPH_WRITE_CONSISTENCY_LEVEL_OPTION_KEY, - TypeCodecs.TEXT.encode(writeClString, protocolVersion)); - } - } - - if (!statementOptions.containsKey(GRAPH_TIMEOUT_OPTION_KEY)) { - Duration timeout = statement.getTimeout(); - if (timeout == null) { - timeout = config.getDuration(DseDriverOption.GRAPH_TIMEOUT, Duration.ZERO); - } - if (timeout != null && !timeout.isZero()) { - payload.put( - GRAPH_TIMEOUT_OPTION_KEY, - TypeCodecs.BIGINT.encode(timeout.toMillis(), protocolVersion)); - } - } - return payload.build(); - } - - private static boolean isSystemQuery(GraphStatement statement, DriverExecutionProfile config) { - if (statement instanceof ScriptGraphStatement) { - Boolean statementValue = ((ScriptGraphStatement) statement).isSystemQuery(); - if (statementValue != null) { - return statementValue; - } - } - return config.getBoolean(DseDriverOption.GRAPH_IS_SYSTEM_QUERY, false); - } - - public static GraphNode createGraphBinaryGraphNode( - List data, GraphBinaryModule graphBinaryModule) throws IOException { - // there should be only one column in the given row - Preconditions.checkArgument(data.size() == 1, "Invalid row given to deserialize"); - - Buffer toDeserialize = FACTORY.wrap(data.get(0)); - Object deserializedObject = graphBinaryModule.deserialize(toDeserialize); - toDeserialize.release(); - assert deserializedObject instanceof Traverser - : "Graph protocol error. Received object should be a Traverser but it is not."; - return new ObjectGraphNode(deserializedObject); - } - - public static Duration resolveGraphRequestTimeout( - GraphStatement statement, InternalDriverContext context) { - DriverExecutionProfile executionProfile = resolveExecutionProfile(statement, context); - return statement.getTimeout() != null - ? statement.getTimeout() - : executionProfile.getDuration(DseDriverOption.GRAPH_TIMEOUT); - } - - public static GraphProtocol resolveGraphSubProtocol( - GraphStatement statement, - GraphSupportChecker graphSupportChecker, - InternalDriverContext context) { - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(statement, context); - return graphSupportChecker.inferGraphProtocol(statement, executionProfile, context); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphExecutionInfoConverter.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphExecutionInfoConverter.java deleted file mode 100644 index b6472f690d3..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphExecutionInfoConverter.java +++ /dev/null @@ -1,180 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.dse.driver.api.core.graph.GraphStatement; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.QueryTrace; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.UUID; -import java.util.concurrent.CompletionStage; - -/** - * Handles conversions from / to GraphExecutionInfo and ExecutionInfo since GraphExecutionInfo has - * been deprecated by JAVA-2556. - */ -public class GraphExecutionInfoConverter { - - /** - * Called exclusively from default methods in API interfaces {@link - * com.datastax.dse.driver.api.core.graph.GraphResultSet} and {@link - * com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet}. Graph result set implementations - * do not use this method but rather the other one below. - */ - @SuppressWarnings("deprecation") - public static ExecutionInfo convert( - com.datastax.dse.driver.api.core.graph.GraphExecutionInfo graphExecutionInfo) { - return new ExecutionInfo() { - - @NonNull - @Override - public Request getRequest() { - return graphExecutionInfo.getStatement(); - } - - @NonNull - @Override - public Statement getStatement() { - throw new ClassCastException("GraphStatement cannot be cast to Statement"); - } - - @Nullable - @Override - public Node getCoordinator() { - return graphExecutionInfo.getCoordinator(); - } - - @Override - public int getSpeculativeExecutionCount() { - return graphExecutionInfo.getSpeculativeExecutionCount(); - } - - @Override - public int getSuccessfulExecutionIndex() { - return graphExecutionInfo.getSuccessfulExecutionIndex(); - } - - @NonNull - @Override - public List> getErrors() { - return graphExecutionInfo.getErrors(); - } - - @Nullable - @Override - public ByteBuffer getPagingState() { - return null; - } - - @NonNull - @Override - public List getWarnings() { - return graphExecutionInfo.getWarnings(); - } - - @NonNull - @Override - public Map getIncomingPayload() { - return graphExecutionInfo.getIncomingPayload(); - } - - @Override - public boolean isSchemaInAgreement() { - return true; - } - - @Nullable - @Override - public UUID getTracingId() { - return null; - } - - @NonNull - @Override - public CompletionStage getQueryTraceAsync() { - return CompletableFutures.failedFuture( - new IllegalStateException("Tracing was disabled for this request")); - } - - @Override - public int getResponseSizeInBytes() { - return -1; - } - - @Override - public int getCompressedResponseSizeInBytes() { - return -1; - } - }; - } - - /** - * Called from graph result set implementations, to convert the original {@link ExecutionInfo} - * produced by request handlers into the (deprecated) type GraphExecutionInfo. - */ - @SuppressWarnings("deprecation") - public static com.datastax.dse.driver.api.core.graph.GraphExecutionInfo convert( - ExecutionInfo executionInfo) { - return new com.datastax.dse.driver.api.core.graph.GraphExecutionInfo() { - - @Override - public GraphStatement getStatement() { - return (GraphStatement) executionInfo.getRequest(); - } - - @Override - public Node getCoordinator() { - return executionInfo.getCoordinator(); - } - - @Override - public int getSpeculativeExecutionCount() { - return executionInfo.getSpeculativeExecutionCount(); - } - - @Override - public int getSuccessfulExecutionIndex() { - return executionInfo.getSuccessfulExecutionIndex(); - } - - @Override - public List> getErrors() { - return executionInfo.getErrors(); - } - - @Override - public List getWarnings() { - return executionInfo.getWarnings(); - } - - @Override - public Map getIncomingPayload() { - return executionInfo.getIncomingPayload(); - } - }; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphProtocol.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphProtocol.java deleted file mode 100644 index 6b7a9f4c430..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphProtocol.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; - -public enum GraphProtocol { - GRAPHSON_1_0("graphson-1.0"), - GRAPHSON_2_0("graphson-2.0"), - GRAPH_BINARY_1_0("graph-binary-1.0"), - ; - - private static final Map BY_CODE; - - static { - Map tmp = new HashMap<>(); - for (GraphProtocol value : values()) { - tmp.put(value.stringRepresentation, value); - } - BY_CODE = Collections.unmodifiableMap(tmp); - } - - private final String stringRepresentation; - - GraphProtocol(String stringRepresentation) { - this.stringRepresentation = stringRepresentation; - } - - @NonNull - public String toInternalCode() { - return stringRepresentation; - } - - @NonNull - public static GraphProtocol fromString(@Nullable String stringRepresentation) { - if (stringRepresentation == null || !BY_CODE.containsKey(stringRepresentation)) { - StringBuilder sb = - new StringBuilder( - String.format( - "Graph protocol used [\"%s\"] unknown. Possible values are: [ \"%s\"", - stringRepresentation, GraphProtocol.values()[0].toInternalCode())); - for (int i = 1; i < GraphProtocol.values().length; i++) { - sb.append(String.format(", \"%s\"", GraphProtocol.values()[i].toInternalCode())); - } - sb.append("]"); - throw new IllegalArgumentException(sb.toString()); - } - return BY_CODE.get(stringRepresentation); - } - - public boolean isGraphBinary() { - return this == GRAPH_BINARY_1_0; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphRequestAsyncProcessor.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphRequestAsyncProcessor.java deleted file mode 100644 index 050b03c66f4..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphRequestAsyncProcessor.java +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet; -import com.datastax.dse.driver.api.core.graph.BatchGraphStatement; -import com.datastax.dse.driver.api.core.graph.FluentGraphStatement; -import com.datastax.dse.driver.api.core.graph.GraphStatement; -import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; -import com.datastax.dse.driver.internal.core.graph.binary.GraphBinaryModule; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.internal.core.session.RequestProcessor; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.concurrent.CompletionStage; -import net.jcip.annotations.ThreadSafe; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; -import org.apache.tinkerpop.gremlin.structure.io.binary.TypeSerializerRegistry; - -@ThreadSafe -public class GraphRequestAsyncProcessor - implements RequestProcessor, CompletionStage> { - - private final GraphBinaryModule graphBinaryModule; - private final GraphSupportChecker graphSupportChecker; - - public GraphRequestAsyncProcessor( - DefaultDriverContext context, GraphSupportChecker graphSupportChecker) { - TypeSerializerRegistry typeSerializerRegistry = - GraphBinaryModule.createDseTypeSerializerRegistry(context); - this.graphBinaryModule = - new GraphBinaryModule( - new GraphBinaryReader(typeSerializerRegistry), - new GraphBinaryWriter(typeSerializerRegistry)); - this.graphSupportChecker = graphSupportChecker; - } - - @NonNull - public GraphBinaryModule getGraphBinaryModule() { - return graphBinaryModule; - } - - @Override - public boolean canProcess(Request request, GenericType resultType) { - return (request instanceof ScriptGraphStatement - || request instanceof FluentGraphStatement - || request instanceof BatchGraphStatement - || request instanceof BytecodeGraphStatement) - && resultType.equals(GraphStatement.ASYNC); - } - - @Override - public CompletionStage process( - GraphStatement request, - DefaultSession session, - InternalDriverContext context, - String sessionLogPrefix) { - - if (graphSupportChecker.isPagingEnabled(request, context)) { - return new ContinuousGraphRequestHandler( - request, - session, - context, - sessionLogPrefix, - getGraphBinaryModule(), - graphSupportChecker) - .handle(); - } else { - return new GraphRequestHandler( - request, - session, - context, - sessionLogPrefix, - getGraphBinaryModule(), - graphSupportChecker) - .handle(); - } - } - - @Override - public CompletionStage newFailure(RuntimeException error) { - return CompletableFutures.failedFuture(error); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphRequestHandler.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphRequestHandler.java deleted file mode 100644 index 5c9ceb00df2..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphRequestHandler.java +++ /dev/null @@ -1,871 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet; -import com.datastax.dse.driver.api.core.graph.GraphNode; -import com.datastax.dse.driver.api.core.graph.GraphStatement; -import com.datastax.dse.driver.api.core.metrics.DseNodeMetric; -import com.datastax.dse.driver.api.core.metrics.DseSessionMetric; -import com.datastax.dse.driver.internal.core.graph.binary.GraphBinaryModule; -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.DriverTimeoutException; -import com.datastax.oss.driver.api.core.NodeUnavailableException; -import com.datastax.oss.driver.api.core.RequestThrottlingException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.connection.FrameTooLongException; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; -import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.retry.RetryVerdict; -import com.datastax.oss.driver.api.core.servererrors.BootstrappingException; -import com.datastax.oss.driver.api.core.servererrors.CoordinatorException; -import com.datastax.oss.driver.api.core.servererrors.FunctionFailureException; -import com.datastax.oss.driver.api.core.servererrors.ProtocolError; -import com.datastax.oss.driver.api.core.servererrors.QueryValidationException; -import com.datastax.oss.driver.api.core.servererrors.ReadTimeoutException; -import com.datastax.oss.driver.api.core.servererrors.UnavailableException; -import com.datastax.oss.driver.api.core.servererrors.WriteTimeoutException; -import com.datastax.oss.driver.api.core.session.throttling.RequestThrottler; -import com.datastax.oss.driver.api.core.session.throttling.Throttled; -import com.datastax.oss.driver.api.core.tracker.RequestTracker; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.channel.ResponseCallback; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.cql.Conversions; -import com.datastax.oss.driver.internal.core.cql.DefaultExecutionInfo; -import com.datastax.oss.driver.internal.core.metadata.DefaultNode; -import com.datastax.oss.driver.internal.core.metrics.NodeMetricUpdater; -import com.datastax.oss.driver.internal.core.metrics.SessionMetricUpdater; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.internal.core.tracker.NoopRequestTracker; -import com.datastax.oss.driver.internal.core.tracker.RequestLogger; -import com.datastax.oss.driver.internal.core.util.Loggers; -import com.datastax.oss.driver.internal.core.util.collection.SimpleQueryPlan; -import com.datastax.oss.protocol.internal.Frame; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.response.Error; -import com.datastax.oss.protocol.internal.response.Result; -import com.datastax.oss.protocol.internal.response.result.Rows; -import com.datastax.oss.protocol.internal.response.result.Void; -import edu.umd.cs.findbugs.annotations.NonNull; -import io.netty.handler.codec.EncoderException; -import io.netty.util.Timeout; -import io.netty.util.Timer; -import io.netty.util.concurrent.Future; -import io.netty.util.concurrent.GenericFutureListener; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.AbstractMap; -import java.util.ArrayDeque; -import java.util.List; -import java.util.Map; -import java.util.Queue; -import java.util.concurrent.CancellationException; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -public class GraphRequestHandler implements Throttled { - - private static final Logger LOG = LoggerFactory.getLogger(GraphRequestHandler.class); - - private static final long NANOTIME_NOT_MEASURED_YET = -1; - private static final int NO_SUCCESSFUL_EXECUTION = -1; - - private final long startTimeNanos; - private final String logPrefix; - private final GraphStatement initialStatement; - private final DefaultSession session; - private final InternalDriverContext context; - protected final CompletableFuture result; - private final Timer timer; - - /** - * How many speculative executions are currently running (including the initial execution). We - * track this in order to know when to fail the request if all executions have reached the end of - * the query plan. - */ - private final AtomicInteger activeExecutionsCount; - - /** - * How many speculative executions have started (excluding the initial execution), whether they - * have completed or not. We track this in order to fill {@link - * ExecutionInfo#getSpeculativeExecutionCount()}. - */ - private final AtomicInteger startedSpeculativeExecutionsCount; - - private final Timeout scheduledTimeout; - private final List scheduledExecutions; - private final List inFlightCallbacks; - private final RequestThrottler throttler; - private final RequestTracker requestTracker; - private final SessionMetricUpdater sessionMetricUpdater; - private final GraphBinaryModule graphBinaryModule; - private final GraphSupportChecker graphSupportChecker; - - // The errors on the nodes that were already tried (lazily initialized on the first error). - // We don't use a map because nodes can appear multiple times. - private volatile List> errors; - - GraphRequestHandler( - @NonNull GraphStatement statement, - @NonNull DefaultSession dseSession, - @NonNull InternalDriverContext context, - @NonNull String sessionLogPrefix, - @NonNull GraphBinaryModule graphBinaryModule, - @NonNull GraphSupportChecker graphSupportChecker) { - this.startTimeNanos = System.nanoTime(); - this.logPrefix = sessionLogPrefix + "|" + this.hashCode(); - LOG.trace("[{}] Creating new Graph request handler for request {}", logPrefix, statement); - this.initialStatement = statement; - this.session = dseSession; - this.context = context; - this.graphSupportChecker = graphSupportChecker; - this.result = new CompletableFuture<>(); - this.result.exceptionally( - t -> { - try { - if (t instanceof CancellationException) { - cancelScheduledTasks(); - context.getRequestThrottler().signalCancel(this); - } - } catch (Throwable t2) { - Loggers.warnWithException(LOG, "[{}] Uncaught exception", logPrefix, t2); - } - return null; - }); - this.graphBinaryModule = graphBinaryModule; - this.timer = context.getNettyOptions().getTimer(); - - this.activeExecutionsCount = new AtomicInteger(1); - this.startedSpeculativeExecutionsCount = new AtomicInteger(0); - this.scheduledExecutions = new CopyOnWriteArrayList<>(); - this.inFlightCallbacks = new CopyOnWriteArrayList<>(); - - this.requestTracker = context.getRequestTracker(); - this.sessionMetricUpdater = session.getMetricUpdater(); - - Duration timeout = GraphConversions.resolveGraphRequestTimeout(statement, context); - this.scheduledTimeout = scheduleTimeout(timeout); - - this.throttler = context.getRequestThrottler(); - this.throttler.register(this); - } - - @Override - public void onThrottleReady(boolean wasDelayed) { - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(initialStatement, context); - if (wasDelayed - // avoid call to nanoTime() if metric is disabled: - && sessionMetricUpdater.isEnabled( - DefaultSessionMetric.THROTTLING_DELAY, executionProfile.getName())) { - sessionMetricUpdater.updateTimer( - DefaultSessionMetric.THROTTLING_DELAY, - executionProfile.getName(), - System.nanoTime() - startTimeNanos, - TimeUnit.NANOSECONDS); - } - Queue queryPlan = - initialStatement.getNode() != null - ? new SimpleQueryPlan(initialStatement.getNode()) - : context - .getLoadBalancingPolicyWrapper() - .newQueryPlan(initialStatement, executionProfile.getName(), session); - sendRequest(initialStatement, null, queryPlan, 0, 0, true); - } - - public CompletionStage handle() { - return result; - } - - private Timeout scheduleTimeout(Duration timeoutDuration) { - if (timeoutDuration != null && timeoutDuration.toNanos() > 0) { - try { - return this.timer.newTimeout( - (Timeout timeout1) -> - setFinalError( - initialStatement, - new DriverTimeoutException("Query timed out after " + timeoutDuration), - null, - NO_SUCCESSFUL_EXECUTION), - timeoutDuration.toNanos(), - TimeUnit.NANOSECONDS); - } catch (IllegalStateException e) { - // If we raced with session shutdown the timer might be closed already, rethrow with a more - // explicit message - result.completeExceptionally( - "cannot be started once stopped".equals(e.getMessage()) - ? new IllegalStateException("Session is closed") - : e); - } - } - return null; - } - - /** - * Sends the request to the next available node. - * - * @param retriedNode if not null, it will be attempted first before the rest of the query plan. - * @param queryPlan the list of nodes to try (shared with all other executions) - * @param currentExecutionIndex 0 for the initial execution, 1 for the first speculative one, etc. - * @param retryCount the number of times that the retry policy was invoked for this execution - * already (note that some internal retries don't go through the policy, and therefore don't - * increment this counter) - * @param scheduleNextExecution whether to schedule the next speculative execution - */ - private void sendRequest( - GraphStatement statement, - Node retriedNode, - Queue queryPlan, - int currentExecutionIndex, - int retryCount, - boolean scheduleNextExecution) { - if (result.isDone()) { - return; - } - Node node = retriedNode; - DriverChannel channel = null; - if (node == null || (channel = session.getChannel(node, logPrefix)) == null) { - while (!result.isDone() && (node = queryPlan.poll()) != null) { - channel = session.getChannel(node, logPrefix); - if (channel != null) { - break; - } else { - recordError(node, new NodeUnavailableException(node)); - } - } - } - if (channel == null) { - // We've reached the end of the query plan without finding any node to write to - if (!result.isDone() && activeExecutionsCount.decrementAndGet() == 0) { - // We're the last execution so fail the result - setFinalError( - statement, - AllNodesFailedException.fromErrors(this.errors), - null, - NO_SUCCESSFUL_EXECUTION); - } - } else { - NodeResponseCallback nodeResponseCallback = - new NodeResponseCallback( - statement, - node, - queryPlan, - channel, - currentExecutionIndex, - retryCount, - scheduleNextExecution, - logPrefix); - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(statement, context); - GraphProtocol graphSubProtocol = - GraphConversions.resolveGraphSubProtocol(statement, graphSupportChecker, context); - Message message = - GraphConversions.createMessageFromGraphStatement( - statement, graphSubProtocol, executionProfile, context, graphBinaryModule); - Map customPayload = - GraphConversions.createCustomPayload( - statement, graphSubProtocol, executionProfile, context, graphBinaryModule); - channel - .write(message, statement.isTracing(), customPayload, nodeResponseCallback) - .addListener(nodeResponseCallback); - } - } - - private void recordError(Node node, Throwable error) { - // Use a local variable to do only a single single volatile read in the nominal case - List> errorsSnapshot = this.errors; - if (errorsSnapshot == null) { - synchronized (GraphRequestHandler.this) { - errorsSnapshot = this.errors; - if (errorsSnapshot == null) { - this.errors = errorsSnapshot = new CopyOnWriteArrayList<>(); - } - } - } - errorsSnapshot.add(new AbstractMap.SimpleEntry<>(node, error)); - } - - private void cancelScheduledTasks() { - if (this.scheduledTimeout != null) { - this.scheduledTimeout.cancel(); - } - if (scheduledExecutions != null) { - for (Timeout scheduledExecution : scheduledExecutions) { - scheduledExecution.cancel(); - } - } - for (NodeResponseCallback callback : inFlightCallbacks) { - callback.cancel(); - } - } - - private void setFinalResult( - Result resultMessage, Frame responseFrame, NodeResponseCallback callback) { - try { - ExecutionInfo executionInfo = buildExecutionInfo(callback, responseFrame); - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(callback.statement, context); - GraphProtocol subProtocol = - GraphConversions.resolveGraphSubProtocol( - callback.statement, graphSupportChecker, context); - Queue graphNodes = new ArrayDeque<>(); - for (List row : ((Rows) resultMessage).getData()) { - if (subProtocol.isGraphBinary()) { - graphNodes.offer( - GraphConversions.createGraphBinaryGraphNode( - row, GraphRequestHandler.this.graphBinaryModule)); - } else { - graphNodes.offer(GraphSONUtils.createGraphNode(row, subProtocol)); - } - } - - DefaultAsyncGraphResultSet resultSet = - new DefaultAsyncGraphResultSet(executionInfo, graphNodes, subProtocol); - if (result.complete(resultSet)) { - cancelScheduledTasks(); - throttler.signalSuccess(this); - - // Only call nanoTime() if we're actually going to use it - long completionTimeNanos = NANOTIME_NOT_MEASURED_YET, - totalLatencyNanos = NANOTIME_NOT_MEASURED_YET; - if (!(requestTracker instanceof NoopRequestTracker)) { - completionTimeNanos = System.nanoTime(); - totalLatencyNanos = completionTimeNanos - startTimeNanos; - long nodeLatencyNanos = completionTimeNanos - callback.nodeStartTimeNanos; - requestTracker.onNodeSuccess( - callback.statement, nodeLatencyNanos, executionProfile, callback.node, logPrefix); - requestTracker.onSuccess( - callback.statement, totalLatencyNanos, executionProfile, callback.node, logPrefix); - } - if (sessionMetricUpdater.isEnabled( - DseSessionMetric.GRAPH_REQUESTS, executionProfile.getName())) { - if (completionTimeNanos == NANOTIME_NOT_MEASURED_YET) { - completionTimeNanos = System.nanoTime(); - totalLatencyNanos = completionTimeNanos - startTimeNanos; - } - sessionMetricUpdater.updateTimer( - DseSessionMetric.GRAPH_REQUESTS, - executionProfile.getName(), - totalLatencyNanos, - TimeUnit.NANOSECONDS); - } - } - // log the warnings if they have NOT been disabled - if (!executionInfo.getWarnings().isEmpty() - && executionProfile.getBoolean(DefaultDriverOption.REQUEST_LOG_WARNINGS) - && LOG.isWarnEnabled()) { - logServerWarnings(callback.statement, executionInfo.getWarnings()); - } - } catch (Throwable error) { - setFinalError(callback.statement, error, callback.node, NO_SUCCESSFUL_EXECUTION); - } - } - - private void logServerWarnings(GraphStatement statement, List warnings) { - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(statement, context); - // use the RequestLogFormatter to format the query - StringBuilder statementString = new StringBuilder(); - context - .getRequestLogFormatter() - .appendRequest( - statement, - executionProfile.getInt( - DefaultDriverOption.REQUEST_LOGGER_MAX_QUERY_LENGTH, - RequestLogger.DEFAULT_REQUEST_LOGGER_MAX_QUERY_LENGTH), - executionProfile.getBoolean( - DefaultDriverOption.REQUEST_LOGGER_VALUES, - RequestLogger.DEFAULT_REQUEST_LOGGER_SHOW_VALUES), - executionProfile.getInt( - DefaultDriverOption.REQUEST_LOGGER_MAX_VALUES, - RequestLogger.DEFAULT_REQUEST_LOGGER_MAX_VALUES), - executionProfile.getInt( - DefaultDriverOption.REQUEST_LOGGER_MAX_VALUE_LENGTH, - RequestLogger.DEFAULT_REQUEST_LOGGER_MAX_VALUE_LENGTH), - statementString); - // log each warning separately - warnings.forEach( - (warning) -> - LOG.warn("Query '{}' generated server side warning(s): {}", statementString, warning)); - } - - private ExecutionInfo buildExecutionInfo(NodeResponseCallback callback, Frame responseFrame) { - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(callback.statement, context); - return new DefaultExecutionInfo( - callback.statement, - callback.node, - startedSpeculativeExecutionsCount.get(), - callback.execution, - errors, - null, - responseFrame, - true, - session, - context, - executionProfile); - } - - @Override - public void onThrottleFailure(@NonNull RequestThrottlingException error) { - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(initialStatement, context); - sessionMetricUpdater.incrementCounter( - DefaultSessionMetric.THROTTLING_ERRORS, executionProfile.getName()); - setFinalError(initialStatement, error, null, NO_SUCCESSFUL_EXECUTION); - } - - private void setFinalError( - GraphStatement statement, Throwable error, Node node, int execution) { - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(statement, context); - if (error instanceof DriverException) { - ((DriverException) error) - .setExecutionInfo( - new DefaultExecutionInfo( - statement, - node, - startedSpeculativeExecutionsCount.get(), - execution, - errors, - null, - null, - true, - session, - context, - executionProfile)); - } - if (result.completeExceptionally(error)) { - cancelScheduledTasks(); - if (!(requestTracker instanceof NoopRequestTracker)) { - long latencyNanos = System.nanoTime() - startTimeNanos; - requestTracker.onError(statement, error, latencyNanos, executionProfile, node, logPrefix); - } - if (error instanceof DriverTimeoutException) { - throttler.signalTimeout(this); - sessionMetricUpdater.incrementCounter( - DseSessionMetric.GRAPH_CLIENT_TIMEOUTS, executionProfile.getName()); - } else if (!(error instanceof RequestThrottlingException)) { - throttler.signalError(this, error); - } - } - } - - /** - * Handles the interaction with a single node in the query plan. - * - *

An instance of this class is created each time we (re)try a node. - */ - private class NodeResponseCallback - implements ResponseCallback, GenericFutureListener> { - - private final long nodeStartTimeNanos = System.nanoTime(); - private final GraphStatement statement; - private final Node node; - private final Queue queryPlan; - private final DriverChannel channel; - // The identifier of the current execution (0 for the initial execution, 1 for the first - // speculative execution, etc.) - private final int execution; - // How many times we've invoked the retry policy and it has returned a "retry" decision (0 for - // the first attempt of each execution). - private final int retryCount; - private final boolean scheduleNextExecution; - private final String logPrefix; - private final DriverExecutionProfile executionProfile; - - private NodeResponseCallback( - GraphStatement statement, - Node node, - Queue queryPlan, - DriverChannel channel, - int execution, - int retryCount, - boolean scheduleNextExecution, - String logPrefix) { - this.statement = statement; - this.node = node; - this.queryPlan = queryPlan; - this.channel = channel; - this.execution = execution; - this.retryCount = retryCount; - this.scheduleNextExecution = scheduleNextExecution; - this.logPrefix = logPrefix + "|" + execution; - this.executionProfile = Conversions.resolveExecutionProfile(statement, context); - } - - // this gets invoked once the write completes. - @Override - public void operationComplete(Future future) { - if (!future.isSuccess()) { - Throwable error = future.cause(); - if (error instanceof EncoderException - && error.getCause() instanceof FrameTooLongException) { - trackNodeError(node, error.getCause(), NANOTIME_NOT_MEASURED_YET); - setFinalError(statement, error.getCause(), node, execution); - } else { - LOG.trace( - "[{}] Failed to send request on {}, trying next node (cause: {})", - logPrefix, - channel, - error); - recordError(node, error); - trackNodeError(node, error, NANOTIME_NOT_MEASURED_YET); - ((DefaultNode) node) - .getMetricUpdater() - .incrementCounter(DefaultNodeMetric.UNSENT_REQUESTS, executionProfile.getName()); - sendRequest( - statement, - null, - queryPlan, - execution, - retryCount, - scheduleNextExecution); // try next node - } - } else { - LOG.trace("[{}] Request sent on {}", logPrefix, channel); - if (result.isDone()) { - // If the handler completed since the last time we checked, cancel directly because we - // don't know if cancelScheduledTasks() has run yet - cancel(); - } else { - inFlightCallbacks.add(this); - if (scheduleNextExecution - && Conversions.resolveIdempotence(statement, executionProfile)) { - int nextExecution = execution + 1; - long nextDelay; - try { - nextDelay = - Conversions.resolveSpeculativeExecutionPolicy(context, executionProfile) - .nextExecution(node, null, statement, nextExecution); - } catch (Throwable cause) { - // This is a bug in the policy, but not fatal since we have at least one other - // execution already running. Don't fail the whole request. - LOG.error( - "[{}] Unexpected error while invoking the speculative execution policy", - logPrefix, - cause); - return; - } - if (nextDelay >= 0) { - scheduleSpeculativeExecution(nextExecution, nextDelay); - } else { - LOG.trace( - "[{}] Speculative execution policy returned {}, no next execution", - logPrefix, - nextDelay); - } - } - } - } - } - - private void scheduleSpeculativeExecution(int index, long delay) { - LOG.trace("[{}] Scheduling speculative execution {} in {} ms", logPrefix, index, delay); - try { - scheduledExecutions.add( - timer.newTimeout( - (Timeout timeout1) -> { - if (!result.isDone()) { - LOG.trace( - "[{}] Starting speculative execution {}", - GraphRequestHandler.this.logPrefix, - index); - activeExecutionsCount.incrementAndGet(); - startedSpeculativeExecutionsCount.incrementAndGet(); - // Note that `node` is the first node of the execution, it might not be the - // "slow" one if there were retries, but in practice retries are rare. - ((DefaultNode) node) - .getMetricUpdater() - .incrementCounter( - DefaultNodeMetric.SPECULATIVE_EXECUTIONS, executionProfile.getName()); - sendRequest(statement, null, queryPlan, index, 0, true); - } - }, - delay, - TimeUnit.MILLISECONDS)); - } catch (IllegalStateException e) { - // If we're racing with session shutdown, the timer might be stopped already. We don't want - // to schedule more executions anyway, so swallow the error. - if (!"cannot be started once stopped".equals(e.getMessage())) { - Loggers.warnWithException( - LOG, "[{}] Error while scheduling speculative execution", logPrefix, e); - } - } - } - - @Override - public void onResponse(Frame responseFrame) { - long nodeResponseTimeNanos = NANOTIME_NOT_MEASURED_YET; - NodeMetricUpdater nodeMetricUpdater = ((DefaultNode) node).getMetricUpdater(); - if (nodeMetricUpdater.isEnabled(DseNodeMetric.GRAPH_MESSAGES, executionProfile.getName())) { - nodeResponseTimeNanos = System.nanoTime(); - long nodeLatency = System.nanoTime() - nodeStartTimeNanos; - nodeMetricUpdater.updateTimer( - DseNodeMetric.GRAPH_MESSAGES, - executionProfile.getName(), - nodeLatency, - TimeUnit.NANOSECONDS); - } - inFlightCallbacks.remove(this); - if (result.isDone()) { - return; - } - try { - Message responseMessage = responseFrame.message; - if (responseMessage instanceof Result) { - LOG.trace("[{}] Got result, completing", logPrefix); - setFinalResult((Result) responseMessage, responseFrame, this); - } else if (responseMessage instanceof Error) { - LOG.trace("[{}] Got error response, processing", logPrefix); - processErrorResponse((Error) responseMessage); - } else { - trackNodeError( - node, - new IllegalStateException("Unexpected response " + responseMessage), - nodeResponseTimeNanos); - setFinalError( - statement, - new IllegalStateException("Unexpected response " + responseMessage), - node, - execution); - } - } catch (Throwable t) { - trackNodeError(node, t, nodeResponseTimeNanos); - setFinalError(statement, t, node, execution); - } - } - - private void processErrorResponse(Error errorMessage) { - CoordinatorException error = Conversions.toThrowable(node, errorMessage, context); - NodeMetricUpdater metricUpdater = ((DefaultNode) node).getMetricUpdater(); - if (error instanceof BootstrappingException) { - LOG.trace("[{}] {} is bootstrapping, trying next node", logPrefix, node); - recordError(node, error); - trackNodeError(node, error, NANOTIME_NOT_MEASURED_YET); - sendRequest(statement, null, queryPlan, execution, retryCount, false); - } else if (error instanceof QueryValidationException - || error instanceof FunctionFailureException - || error instanceof ProtocolError) { - LOG.trace("[{}] Unrecoverable error, rethrowing", logPrefix); - metricUpdater.incrementCounter(DefaultNodeMetric.OTHER_ERRORS, executionProfile.getName()); - trackNodeError(node, error, NANOTIME_NOT_MEASURED_YET); - setFinalError(statement, error, node, execution); - } else { - RetryPolicy retryPolicy = Conversions.resolveRetryPolicy(context, executionProfile); - RetryVerdict verdict; - if (error instanceof ReadTimeoutException) { - ReadTimeoutException readTimeout = (ReadTimeoutException) error; - verdict = - retryPolicy.onReadTimeoutVerdict( - statement, - readTimeout.getConsistencyLevel(), - readTimeout.getBlockFor(), - readTimeout.getReceived(), - readTimeout.wasDataPresent(), - retryCount); - updateErrorMetrics( - metricUpdater, - verdict, - DefaultNodeMetric.READ_TIMEOUTS, - DefaultNodeMetric.RETRIES_ON_READ_TIMEOUT, - DefaultNodeMetric.IGNORES_ON_READ_TIMEOUT); - } else if (error instanceof WriteTimeoutException) { - WriteTimeoutException writeTimeout = (WriteTimeoutException) error; - verdict = - Conversions.resolveIdempotence(statement, executionProfile) - ? retryPolicy.onWriteTimeoutVerdict( - statement, - writeTimeout.getConsistencyLevel(), - writeTimeout.getWriteType(), - writeTimeout.getBlockFor(), - writeTimeout.getReceived(), - retryCount) - : RetryVerdict.RETHROW; - updateErrorMetrics( - metricUpdater, - verdict, - DefaultNodeMetric.WRITE_TIMEOUTS, - DefaultNodeMetric.RETRIES_ON_WRITE_TIMEOUT, - DefaultNodeMetric.IGNORES_ON_WRITE_TIMEOUT); - } else if (error instanceof UnavailableException) { - UnavailableException unavailable = (UnavailableException) error; - verdict = - retryPolicy.onUnavailableVerdict( - statement, - unavailable.getConsistencyLevel(), - unavailable.getRequired(), - unavailable.getAlive(), - retryCount); - updateErrorMetrics( - metricUpdater, - verdict, - DefaultNodeMetric.UNAVAILABLES, - DefaultNodeMetric.RETRIES_ON_UNAVAILABLE, - DefaultNodeMetric.IGNORES_ON_UNAVAILABLE); - } else { - verdict = - Conversions.resolveIdempotence(statement, executionProfile) - ? retryPolicy.onErrorResponseVerdict(statement, error, retryCount) - : RetryVerdict.RETHROW; - updateErrorMetrics( - metricUpdater, - verdict, - DefaultNodeMetric.OTHER_ERRORS, - DefaultNodeMetric.RETRIES_ON_OTHER_ERROR, - DefaultNodeMetric.IGNORES_ON_OTHER_ERROR); - } - processRetryVerdict(verdict, error); - } - } - - private void processRetryVerdict(RetryVerdict verdict, Throwable error) { - LOG.trace("[{}] Processing retry decision {}", logPrefix, verdict); - switch (verdict.getRetryDecision()) { - case RETRY_SAME: - recordError(node, error); - trackNodeError(node, error, NANOTIME_NOT_MEASURED_YET); - sendRequest( - verdict.getRetryRequest(statement), - node, - queryPlan, - execution, - retryCount + 1, - false); - break; - case RETRY_NEXT: - recordError(node, error); - trackNodeError(node, error, NANOTIME_NOT_MEASURED_YET); - sendRequest( - verdict.getRetryRequest(statement), - null, - queryPlan, - execution, - retryCount + 1, - false); - break; - case RETHROW: - trackNodeError(node, error, NANOTIME_NOT_MEASURED_YET); - setFinalError(statement, error, node, execution); - break; - case IGNORE: - setFinalResult(Void.INSTANCE, null, this); - break; - } - } - - private void updateErrorMetrics( - NodeMetricUpdater metricUpdater, - RetryVerdict verdict, - DefaultNodeMetric error, - DefaultNodeMetric retriesOnError, - DefaultNodeMetric ignoresOnError) { - metricUpdater.incrementCounter(error, executionProfile.getName()); - switch (verdict.getRetryDecision()) { - case RETRY_SAME: - case RETRY_NEXT: - metricUpdater.incrementCounter(DefaultNodeMetric.RETRIES, executionProfile.getName()); - metricUpdater.incrementCounter(retriesOnError, executionProfile.getName()); - break; - case IGNORE: - metricUpdater.incrementCounter(DefaultNodeMetric.IGNORES, executionProfile.getName()); - metricUpdater.incrementCounter(ignoresOnError, executionProfile.getName()); - break; - case RETHROW: - // nothing do do - } - } - - @Override - public void onFailure(Throwable error) { - inFlightCallbacks.remove(this); - if (result.isDone()) { - return; - } - LOG.trace("[{}] Request failure, processing: {}", logPrefix, error); - RetryVerdict verdict; - if (!Conversions.resolveIdempotence(statement, executionProfile) - || error instanceof FrameTooLongException) { - verdict = RetryVerdict.RETHROW; - } else { - try { - RetryPolicy retryPolicy = Conversions.resolveRetryPolicy(context, executionProfile); - verdict = retryPolicy.onRequestAbortedVerdict(statement, error, retryCount); - } catch (Throwable cause) { - setFinalError( - statement, - new IllegalStateException("Unexpected error while invoking the retry policy", cause), - node, - NO_SUCCESSFUL_EXECUTION); - return; - } - } - processRetryVerdict(verdict, error); - updateErrorMetrics( - ((DefaultNode) node).getMetricUpdater(), - verdict, - DefaultNodeMetric.ABORTED_REQUESTS, - DefaultNodeMetric.RETRIES_ON_ABORTED, - DefaultNodeMetric.IGNORES_ON_ABORTED); - } - - void cancel() { - try { - if (!channel.closeFuture().isDone()) { - this.channel.cancel(this); - } - } catch (Throwable t) { - Loggers.warnWithException(LOG, "[{}] Error cancelling", logPrefix, t); - } - } - - /** - * @param nodeResponseTimeNanos the time we received the response, if it's already been - * measured. If {@link #NANOTIME_NOT_MEASURED_YET}, it hasn't and we need to measure it now - * (this is to avoid unnecessary calls to System.nanoTime) - */ - private void trackNodeError(Node node, Throwable error, long nodeResponseTimeNanos) { - if (requestTracker instanceof NoopRequestTracker) { - return; - } - if (nodeResponseTimeNanos == NANOTIME_NOT_MEASURED_YET) { - nodeResponseTimeNanos = System.nanoTime(); - } - long latencyNanos = nodeResponseTimeNanos - this.nodeStartTimeNanos; - requestTracker.onNodeError(statement, error, latencyNanos, executionProfile, node, logPrefix); - } - - @Override - public String toString() { - return logPrefix; - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphRequestSyncProcessor.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphRequestSyncProcessor.java deleted file mode 100644 index bc2381482a8..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphRequestSyncProcessor.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet; -import com.datastax.dse.driver.api.core.graph.BatchGraphStatement; -import com.datastax.dse.driver.api.core.graph.FluentGraphStatement; -import com.datastax.dse.driver.api.core.graph.GraphResultSet; -import com.datastax.dse.driver.api.core.graph.GraphStatement; -import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.internal.core.session.RequestProcessor; -import com.datastax.oss.driver.internal.core.util.concurrent.BlockingOperation; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class GraphRequestSyncProcessor - implements RequestProcessor, GraphResultSet> { - - private final GraphRequestAsyncProcessor asyncProcessor; - - public GraphRequestSyncProcessor(GraphRequestAsyncProcessor asyncProcessor) { - this.asyncProcessor = asyncProcessor; - } - - @Override - public boolean canProcess(Request request, GenericType resultType) { - return (request instanceof ScriptGraphStatement - || request instanceof FluentGraphStatement - || request instanceof BatchGraphStatement - || request instanceof BytecodeGraphStatement) - && resultType.equals(GraphStatement.SYNC); - } - - @Override - public GraphResultSet process( - GraphStatement request, - DefaultSession session, - InternalDriverContext context, - String sessionLogPrefix) { - BlockingOperation.checkNotDriverThread(); - AsyncGraphResultSet firstPage = - CompletableFutures.getUninterruptibly( - asyncProcessor.process(request, session, context, sessionLogPrefix)); - return GraphResultSets.toSync(firstPage); - } - - @Override - public GraphResultSet newFailure(RuntimeException error) { - throw error; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphResultIterator.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphResultIterator.java deleted file mode 100644 index 7e9043affec..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphResultIterator.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.dse.driver.api.core.graph.GraphNode; -import com.datastax.oss.driver.internal.core.util.CountingIterator; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import java.util.Queue; -import net.jcip.annotations.NotThreadSafe; -import org.apache.tinkerpop.gremlin.process.traversal.Traverser; - -@NotThreadSafe // wraps a mutable queue -class GraphResultIterator extends CountingIterator { - - private final Queue data; - private final GraphProtocol graphProtocol; - - // Sometimes a traversal can yield the same result multiple times consecutively. To avoid - // duplicating the data, DSE graph sends it only once with a counter indicating how many times - // it's repeated. - private long repeat = 0; - private GraphNode lastGraphNode = null; - - GraphResultIterator(Queue data, GraphProtocol graphProtocol) { - super(data.size()); - this.data = data; - this.graphProtocol = graphProtocol; - } - - @Override - protected GraphNode computeNext() { - if (repeat > 1) { - repeat -= 1; - // Note that we don't make a defensive copy, we assume the client won't mutate the node - return lastGraphNode; - } - - GraphNode container = data.poll(); - if (container == null) { - return endOfData(); - } - - if (graphProtocol.isGraphBinary()) { - // results are contained in a Traverser object and not a Map if the protocol - // is GraphBinary - Preconditions.checkState( - container.as(Object.class) instanceof Traverser, - "Graph protocol error. Received object should be a Traverser but it is not."); - Traverser t = container.as(Traverser.class); - this.repeat = t.bulk(); - this.lastGraphNode = new ObjectGraphNode(t.get()); - return lastGraphNode; - } else { - // The repeat counter is called "bulk" in the JSON payload - GraphNode b = container.getByKey("bulk"); - if (b != null) { - this.repeat = b.asLong(); - } - - lastGraphNode = container.getByKey("result"); - return lastGraphNode; - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphResultSets.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphResultSets.java deleted file mode 100644 index fb21f857cfa..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphResultSets.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet; -import com.datastax.dse.driver.api.core.graph.GraphResultSet; - -public class GraphResultSets { - - public static GraphResultSet toSync(AsyncGraphResultSet firstPage) { - if (firstPage.hasMorePages()) { - return new MultiPageGraphResultSet(firstPage); - } else { - return new SinglePageGraphResultSet(firstPage); - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphSON1SerdeTP.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphSON1SerdeTP.java deleted file mode 100644 index f880bca3764..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphSON1SerdeTP.java +++ /dev/null @@ -1,346 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.dse.driver.api.core.data.geometry.Geometry; -import com.datastax.dse.driver.api.core.data.geometry.LineString; -import com.datastax.dse.driver.api.core.data.geometry.Point; -import com.datastax.dse.driver.api.core.data.geometry.Polygon; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.net.InetAddresses; -import java.io.IOException; -import java.net.Inet4Address; -import java.net.Inet6Address; -import java.net.InetAddress; -import java.util.List; -import java.util.Map; -import org.apache.tinkerpop.shaded.jackson.core.JsonGenerator; -import org.apache.tinkerpop.shaded.jackson.core.JsonParseException; -import org.apache.tinkerpop.shaded.jackson.core.JsonParser; -import org.apache.tinkerpop.shaded.jackson.core.Version; -import org.apache.tinkerpop.shaded.jackson.databind.DeserializationContext; -import org.apache.tinkerpop.shaded.jackson.databind.JsonDeserializer; -import org.apache.tinkerpop.shaded.jackson.databind.JsonSerializer; -import org.apache.tinkerpop.shaded.jackson.databind.SerializerProvider; -import org.apache.tinkerpop.shaded.jackson.databind.deser.std.StdDeserializer; -import org.apache.tinkerpop.shaded.jackson.databind.module.SimpleModule; -import org.apache.tinkerpop.shaded.jackson.databind.ser.std.StdSerializer; - -public class GraphSON1SerdeTP { - - //////////////////////// DESERIALIZERS //////////////////////// - - /** - * Default deserializer used by the driver for {@link InetAddress} instances. The actual subclass - * returned by this deserializer depends on the type of address: {@link Inet4Address IPV4} or - * {@link Inet6Address IPV6}. - */ - static class DefaultInetAddressDeserializer extends StdDeserializer { - - private static final long serialVersionUID = 1L; - - private final Class inetClass; - - DefaultInetAddressDeserializer(Class inetClass) { - super(inetClass); - this.inetClass = inetClass; - } - - @Override - public T deserialize(JsonParser parser, DeserializationContext ctx) throws IOException { - String ip = parser.readValueAs(String.class); - try { - InetAddress inet = InetAddresses.forString(ip); - return inetClass.cast(inet); - } catch (ClassCastException e) { - throw new JsonParseException( - parser, - String.format("Inet address cannot be cast to %s: %s", inetClass.getSimpleName(), ip), - e); - } catch (IllegalArgumentException e) { - throw new JsonParseException(parser, String.format("Expected inet address, got %s", ip), e); - } - } - } - - /** - * Default deserializer used by the driver for geospatial types. It deserializes such types into - * {@link Geometry} instances. The actual subclass depends on the type being deserialized. - */ - static class DefaultGeometryDeserializer extends StdDeserializer { - - private static final long serialVersionUID = 1L; - - private final Class geometryClass; - - DefaultGeometryDeserializer(Class geometryClass) { - super(geometryClass); - this.geometryClass = geometryClass; - } - - @Override - public T deserialize(JsonParser parser, DeserializationContext ctx) throws IOException { - String wkt = parser.readValueAs(String.class); - Geometry geometry; - if (wkt.startsWith("POINT")) geometry = Point.fromWellKnownText(wkt); - else if (wkt.startsWith("LINESTRING")) geometry = LineString.fromWellKnownText(wkt); - else if (wkt.startsWith("POLYGON")) geometry = Polygon.fromWellKnownText(wkt); - else throw new JsonParseException(parser, "Unknown geometry type: " + wkt); - return geometryClass.cast(geometry); - } - } - - /** Base class for serializing the {@code java.time.*} types to ISO-8061 formats. */ - abstract static class AbstractJavaTimeSerializer extends StdSerializer { - - private static final long serialVersionUID = 1L; - - AbstractJavaTimeSerializer(final Class clazz) { - super(clazz); - } - - @Override - public void serialize( - final T value, final JsonGenerator gen, final SerializerProvider serializerProvider) - throws IOException { - gen.writeString(value.toString()); - } - } - - /** Base class for deserializing the {@code java.time.*} types from ISO-8061 formats. */ - abstract static class AbstractJavaTimeJacksonDeserializer extends StdDeserializer { - - private static final long serialVersionUID = 1L; - - AbstractJavaTimeJacksonDeserializer(final Class clazz) { - super(clazz); - } - - abstract T parse(final String val); - - @Override - public T deserialize( - final JsonParser jsonParser, final DeserializationContext deserializationContext) - throws IOException { - return parse(jsonParser.getText()); - } - } - - static final class DurationJacksonSerializer - extends AbstractJavaTimeSerializer { - - private static final long serialVersionUID = 1L; - - DurationJacksonSerializer() { - super(java.time.Duration.class); - } - } - - static final class DurationJacksonDeserializer - extends AbstractJavaTimeJacksonDeserializer { - - private static final long serialVersionUID = 1L; - - DurationJacksonDeserializer() { - super(java.time.Duration.class); - } - - @Override - public java.time.Duration parse(final String val) { - return java.time.Duration.parse(val); - } - } - - static final class InstantJacksonSerializer - extends AbstractJavaTimeSerializer { - - private static final long serialVersionUID = 1L; - - InstantJacksonSerializer() { - super(java.time.Instant.class); - } - } - - static final class InstantJacksonDeserializer - extends AbstractJavaTimeJacksonDeserializer { - - private static final long serialVersionUID = 1L; - - InstantJacksonDeserializer() { - super(java.time.Instant.class); - } - - @Override - public java.time.Instant parse(final String val) { - return java.time.Instant.parse(val); - } - } - - static final class LocalDateJacksonSerializer - extends AbstractJavaTimeSerializer { - - private static final long serialVersionUID = 1L; - - LocalDateJacksonSerializer() { - super(java.time.LocalDate.class); - } - } - - static final class LocalDateJacksonDeserializer - extends AbstractJavaTimeJacksonDeserializer { - - private static final long serialVersionUID = 1L; - - LocalDateJacksonDeserializer() { - super(java.time.LocalDate.class); - } - - @Override - public java.time.LocalDate parse(final String val) { - return java.time.LocalDate.parse(val); - } - } - - static final class LocalTimeJacksonSerializer - extends AbstractJavaTimeSerializer { - - private static final long serialVersionUID = 1L; - - LocalTimeJacksonSerializer() { - super(java.time.LocalTime.class); - } - } - - static final class LocalTimeJacksonDeserializer - extends AbstractJavaTimeJacksonDeserializer { - - private static final long serialVersionUID = 1L; - - LocalTimeJacksonDeserializer() { - super(java.time.LocalTime.class); - } - - @Override - public java.time.LocalTime parse(final String val) { - return java.time.LocalTime.parse(val); - } - } - - //////////////////////// SERIALIZERS //////////////////////// - - /** Default serializer used by the driver for {@link LegacyGraphNode} instances. */ - static class DefaultGraphNodeSerializer extends StdSerializer { - - private static final long serialVersionUID = 1L; - - DefaultGraphNodeSerializer() { - super(LegacyGraphNode.class); - } - - @Override - public void serialize( - LegacyGraphNode value, JsonGenerator jsonGenerator, SerializerProvider serializerProvider) - throws IOException { - jsonGenerator.writeTree(value.getDelegate()); - } - } - - /** - * Default serializer used by the driver for geospatial types. It serializes {@link Geometry} - * instances into their Well-Known Text (WKT) equivalent. - */ - static class DefaultGeometrySerializer extends StdSerializer { - - private static final long serialVersionUID = 1L; - - DefaultGeometrySerializer() { - super(Geometry.class); - } - - @Override - public void serialize( - Geometry value, JsonGenerator jsonGenerator, SerializerProvider serializers) - throws IOException { - jsonGenerator.writeString(value.asWellKnownText()); - } - } - - /** The default Jackson module used by DSE Graph. */ - static class GraphSON1DefaultModule extends SimpleModule { - - private static final long serialVersionUID = 1L; - - GraphSON1DefaultModule(String name, Version version) { - super(name, version, createDeserializers(), createSerializers()); - } - - private static Map, JsonDeserializer> createDeserializers() { - - return ImmutableMap., JsonDeserializer>builder() - - // Inet (there is no built-in deserializer for InetAddress and subclasses) - .put(InetAddress.class, new DefaultInetAddressDeserializer<>(InetAddress.class)) - .put(Inet4Address.class, new DefaultInetAddressDeserializer<>(Inet4Address.class)) - .put(Inet6Address.class, new DefaultInetAddressDeserializer<>(Inet6Address.class)) - - // Geospatial types - .put(Geometry.class, new DefaultGeometryDeserializer<>(Geometry.class)) - .put(Point.class, new DefaultGeometryDeserializer<>(Point.class)) - .put(LineString.class, new DefaultGeometryDeserializer<>(LineString.class)) - .put(Polygon.class, new DefaultGeometryDeserializer<>(Polygon.class)) - .build(); - } - - private static List> createSerializers() { - return ImmutableList.>builder() - .add(new DefaultGraphNodeSerializer()) - .add(new DefaultGeometrySerializer()) - .build(); - } - } - - /** Serializers and deserializers for JSR 310 {@code java.time.*}. */ - static class GraphSON1JavaTimeModule extends SimpleModule { - - private static final long serialVersionUID = 1L; - - GraphSON1JavaTimeModule(String name, Version version) { - super(name, version, createDeserializers(), createSerializers()); - } - - private static Map, JsonDeserializer> createDeserializers() { - - return ImmutableMap., JsonDeserializer>builder() - .put(java.time.Duration.class, new DurationJacksonDeserializer()) - .put(java.time.Instant.class, new InstantJacksonDeserializer()) - .put(java.time.LocalDate.class, new LocalDateJacksonDeserializer()) - .put(java.time.LocalTime.class, new LocalTimeJacksonDeserializer()) - .build(); - } - - private static List> createSerializers() { - return ImmutableList.>builder() - .add(new DurationJacksonSerializer()) - .add(new InstantJacksonSerializer()) - .add(new LocalDateJacksonSerializer()) - .add(new LocalTimeJacksonSerializer()) - .build(); - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphSON2SerdeTP.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphSON2SerdeTP.java deleted file mode 100644 index d79afc71822..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphSON2SerdeTP.java +++ /dev/null @@ -1,430 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.dse.driver.api.core.data.geometry.Geometry; -import com.datastax.dse.driver.api.core.data.geometry.LineString; -import com.datastax.dse.driver.api.core.data.geometry.Point; -import com.datastax.dse.driver.api.core.data.geometry.Polygon; -import com.datastax.dse.driver.api.core.graph.predicates.Geo; -import com.datastax.dse.driver.api.core.graph.predicates.Search; -import com.datastax.dse.driver.internal.core.data.geometry.DefaultLineString; -import com.datastax.dse.driver.internal.core.data.geometry.DefaultPoint; -import com.datastax.dse.driver.internal.core.data.geometry.DefaultPolygon; -import com.datastax.dse.driver.internal.core.data.geometry.Distance; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import java.io.IOException; -import java.util.Collection; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import org.apache.tinkerpop.gremlin.process.traversal.P; -import org.apache.tinkerpop.gremlin.process.traversal.util.AndP; -import org.apache.tinkerpop.gremlin.process.traversal.util.ConnectiveP; -import org.apache.tinkerpop.gremlin.process.traversal.util.OrP; -import org.apache.tinkerpop.gremlin.structure.io.graphson.AbstractObjectDeserializer; -import org.apache.tinkerpop.gremlin.structure.io.graphson.GraphSONTokens; -import org.apache.tinkerpop.gremlin.structure.io.graphson.TinkerPopJacksonModule; -import org.apache.tinkerpop.shaded.jackson.core.JsonGenerator; -import org.apache.tinkerpop.shaded.jackson.core.JsonParser; -import org.apache.tinkerpop.shaded.jackson.databind.DeserializationContext; -import org.apache.tinkerpop.shaded.jackson.databind.SerializerProvider; -import org.apache.tinkerpop.shaded.jackson.databind.deser.std.StdDeserializer; -import org.apache.tinkerpop.shaded.jackson.databind.jsontype.TypeSerializer; -import org.apache.tinkerpop.shaded.jackson.databind.module.SimpleModule; -import org.apache.tinkerpop.shaded.jackson.databind.ser.std.StdScalarSerializer; -import org.apache.tinkerpop.shaded.jackson.databind.ser.std.StdSerializer; - -public class GraphSON2SerdeTP { - - /** - * A Jackson Module to use for TinkerPop serialization/deserialization. It extends {@link - * org.apache.tinkerpop.gremlin.structure.io.graphson.TinkerPopJacksonModule} because of the - * specific typing format used in GraphSON. - */ - public static class DseGraphModule extends TinkerPopJacksonModule { - - private static final long serialVersionUID = 1L; - - public DseGraphModule() { - super("dse-driver-2.0"); - addSerializer(DefaultPoint.class, new PointGeometrySerializer()); - addSerializer(DefaultLineString.class, new LineStringGeometrySerializer()); - addSerializer(DefaultPolygon.class, new PolygonGeometrySerializer()); - addSerializer(Distance.class, new DistanceGeometrySerializer()); - // override TinkerPop's P predicates because of DSE's Search and Geo predicates - addSerializer(P.class, new DsePJacksonSerializer()); - addSerializer(EditDistance.class, new EditDistanceSerializer()); - - addDeserializer(DefaultLineString.class, new LineStringGeometryDeserializer()); - addDeserializer(DefaultPoint.class, new PointGeometryDeserializer()); - addDeserializer(DefaultPolygon.class, new PolygonGeometryDeserializer()); - addDeserializer(Distance.class, new DistanceGeometryDeserializer()); - // override TinkerPop's P predicates because of DSE's Search and Geo predicates - addDeserializer(P.class, new DsePJacksonDeserializer()); - } - - @SuppressWarnings("rawtypes") - @Override - public Map getTypeDefinitions() { - Map definitions = new HashMap<>(); - definitions.put(DefaultLineString.class, "LineString"); - definitions.put(DefaultPoint.class, "Point"); - definitions.put(DefaultPolygon.class, "Polygon"); - definitions.put(byte[].class, "Blob"); - definitions.put(Distance.class, "Distance"); - definitions.put(P.class, "P"); - return definitions; - } - - @Override - public String getTypeNamespace() { - return "dse"; - } - - abstract static class AbstractGeometryJacksonDeserializer - extends StdDeserializer { - - private static final long serialVersionUID = 1L; - - AbstractGeometryJacksonDeserializer(final Class clazz) { - super(clazz); - } - - public abstract T parse(final String val); - - @Override - public T deserialize( - final JsonParser jsonParser, final DeserializationContext deserializationContext) - throws IOException { - return parse(jsonParser.getText()); - } - } - - abstract static class AbstractGeometryJacksonSerializer - extends StdScalarSerializer { - - private static final long serialVersionUID = 1L; - - AbstractGeometryJacksonSerializer(final Class clazz) { - super(clazz); - } - - @Override - public void serialize( - final T value, final JsonGenerator gen, final SerializerProvider serializerProvider) - throws IOException { - gen.writeString(value.asWellKnownText()); - } - } - - public static class LineStringGeometrySerializer - extends AbstractGeometryJacksonSerializer { - - private static final long serialVersionUID = 1L; - - LineStringGeometrySerializer() { - super(LineString.class); - } - } - - public static class LineStringGeometryDeserializer - extends AbstractGeometryJacksonDeserializer { - - private static final long serialVersionUID = 1L; - - LineStringGeometryDeserializer() { - super(DefaultLineString.class); - } - - @Override - public DefaultLineString parse(final String val) { - return (DefaultLineString) LineString.fromWellKnownText(val); - } - } - - public static class PolygonGeometrySerializer - extends AbstractGeometryJacksonSerializer { - - private static final long serialVersionUID = 1L; - - PolygonGeometrySerializer() { - super(Polygon.class); - } - } - - public static class PolygonGeometryDeserializer - extends AbstractGeometryJacksonDeserializer { - - private static final long serialVersionUID = 1L; - - PolygonGeometryDeserializer() { - super(DefaultPolygon.class); - } - - @Override - public DefaultPolygon parse(final String val) { - return (DefaultPolygon) Polygon.fromWellKnownText(val); - } - } - - public static class PointGeometrySerializer extends AbstractGeometryJacksonSerializer { - - private static final long serialVersionUID = 1L; - - PointGeometrySerializer() { - super(Point.class); - } - } - - public static class PointGeometryDeserializer - extends AbstractGeometryJacksonDeserializer { - - private static final long serialVersionUID = 1L; - - PointGeometryDeserializer() { - super(DefaultPoint.class); - } - - @Override - public DefaultPoint parse(final String val) { - return (DefaultPoint) Point.fromWellKnownText(val); - } - } - - public static class DistanceGeometrySerializer - extends AbstractGeometryJacksonSerializer { - - private static final long serialVersionUID = 1L; - - DistanceGeometrySerializer() { - super(Distance.class); - } - } - - public static class DistanceGeometryDeserializer - extends AbstractGeometryJacksonDeserializer { - - private static final long serialVersionUID = 1L; - - DistanceGeometryDeserializer() { - super(Distance.class); - } - - @Override - public Distance parse(final String val) { - return Distance.fromWellKnownText(val); - } - } - - @SuppressWarnings("rawtypes") - static final class DsePJacksonSerializer extends StdScalarSerializer

{ - - private static final long serialVersionUID = 1L; - - DsePJacksonSerializer() { - super(P.class); - } - - @Override - public void serialize( - final P p, final JsonGenerator jsonGenerator, final SerializerProvider serializerProvider) - throws IOException { - jsonGenerator.writeStartObject(); - jsonGenerator.writeStringField("predicateType", getPredicateType(p)); - jsonGenerator.writeStringField( - GraphSONTokens.PREDICATE, - p instanceof ConnectiveP - ? p instanceof AndP ? GraphSONTokens.AND : GraphSONTokens.OR - : p.getBiPredicate().toString()); - if (p instanceof ConnectiveP) { - jsonGenerator.writeArrayFieldStart(GraphSONTokens.VALUE); - for (final P predicate : ((ConnectiveP) p).getPredicates()) { - jsonGenerator.writeObject(predicate); - } - jsonGenerator.writeEndArray(); - } else { - if (p.getValue() instanceof Collection) { - jsonGenerator.writeArrayFieldStart(GraphSONTokens.VALUE); - for (final Object object : (Collection) p.getValue()) { - jsonGenerator.writeObject(object); - } - jsonGenerator.writeEndArray(); - } else { - jsonGenerator.writeObjectField(GraphSONTokens.VALUE, p.getValue()); - } - } - jsonGenerator.writeEndObject(); - } - - private String getPredicateType(P p) { - if (p.getBiPredicate() instanceof SearchPredicate) { - return Search.class.getSimpleName(); - } else if (p.getBiPredicate() instanceof GeoPredicate) { - return Geo.class.getSimpleName(); - } else { - return P.class.getSimpleName(); - } - } - } - - @SuppressWarnings({"unchecked", "rawtypes"}) - static final class DsePJacksonDeserializer extends AbstractObjectDeserializer

{ - - private static final long serialVersionUID = 1L; - - DsePJacksonDeserializer() { - super(P.class); - } - - @Override - public P createObject(final Map data) { - final String predicate = (String) data.get(GraphSONTokens.PREDICATE); - final String predicateType = (String) data.get("predicateType"); - final Object value = data.get(GraphSONTokens.VALUE); - if (predicate.equals(GraphSONTokens.AND) || predicate.equals(GraphSONTokens.OR)) { - return predicate.equals(GraphSONTokens.AND) - ? new AndP((List

) value) - : new OrP((List

) value); - } else { - try { - if (value instanceof Collection) { - if (predicate.equals("between")) { - return P.between(((List) value).get(0), ((List) value).get(1)); - } else if (predicateType.equals(P.class.getSimpleName()) - && predicate.equals("inside")) { - return P.between(((List) value).get(0), ((List) value).get(1)); - } else if (predicate.equals("outside")) { - return P.outside(((List) value).get(0), ((List) value).get(1)); - } else if (predicate.equals("within")) { - return P.within((Collection) value); - } else if (predicate.equals("without")) { - return P.without((Collection) value); - } else { - return (P) - P.class.getMethod(predicate, Collection.class).invoke(null, (Collection) value); - } - } else { - if (predicate.equals(SearchPredicate.prefix.name())) { - return Search.prefix((String) value); - } else if (predicate.equals(SearchPredicate.tokenPrefix.name())) { - return Search.tokenPrefix((String) value); - } else if (predicate.equals(SearchPredicate.regex.name())) { - return Search.regex((String) value); - } else if (predicate.equals(SearchPredicate.tokenRegex.name())) { - return Search.tokenRegex((String) value); - } else if (predicate.equals(SearchPredicate.token.name())) { - return Search.token((String) value); - } else if (predicate.equals(SearchPredicate.fuzzy.name())) { - Map arguments = (Map) value; - return Search.fuzzy( - (String) arguments.get("query"), (int) arguments.get("distance")); - } else if (predicate.equals(SearchPredicate.tokenFuzzy.name())) { - Map arguments = (Map) value; - return Search.tokenFuzzy( - (String) arguments.get("query"), (int) arguments.get("distance")); - } else if (predicate.equals(SearchPredicate.phrase.name())) { - Map arguments = (Map) value; - return Search.phrase( - (String) arguments.get("query"), (int) arguments.get("distance")); - } else if (predicateType.equals(Geo.class.getSimpleName()) - && predicate.equals(GeoPredicate.inside.name())) { - return Geo.inside( - ((Distance) value).getCenter(), - ((Distance) value).getRadius(), - Geo.Unit.DEGREES); - } else if (predicateType.equals(Geo.class.getSimpleName()) - && predicate.equals(GeoPredicate.insideCartesian.name())) { - return Geo.inside(((Distance) value).getCenter(), ((Distance) value).getRadius()); - } else { - return (P) P.class.getMethod(predicate, Object.class).invoke(null, value); - } - } - } catch (final Exception e) { - throw new IllegalStateException(e.getMessage(), e); - } - } - } - } - - public static class EditDistanceSerializer extends StdSerializer { - - private static final long serialVersionUID = 1L; - - EditDistanceSerializer() { - super(EditDistance.class); - } - - @Override - public void serialize( - EditDistance editDistance, JsonGenerator generator, SerializerProvider provider) - throws IOException { - generator.writeObject( - ImmutableMap.of("query", editDistance.query, "distance", editDistance.distance)); - } - - @Override - public void serializeWithType( - EditDistance editDistance, - JsonGenerator generator, - SerializerProvider provider, - TypeSerializer serializer) - throws IOException { - serialize(editDistance, generator, provider); - } - } - } - - public static class DriverObjectsModule extends SimpleModule { - - private static final long serialVersionUID = 1L; - - public DriverObjectsModule() { - super("datastax-driver-module"); - addSerializer(ObjectGraphNode.class, new ObjectGraphNodeGraphSON2Serializer()); - } - - static final class ObjectGraphNodeGraphSON2Serializer extends StdSerializer { - - private static final long serialVersionUID = 1L; - - protected ObjectGraphNodeGraphSON2Serializer() { - super(ObjectGraphNode.class); - } - - @Override - public void serialize( - ObjectGraphNode objectGraphNode, - JsonGenerator jsonGenerator, - SerializerProvider serializerProvider) - throws IOException { - jsonGenerator.writeObject(objectGraphNode.as(Object.class)); - } - - @Override - public void serializeWithType( - ObjectGraphNode objectGraphNode, - JsonGenerator jsonGenerator, - SerializerProvider serializerProvider, - TypeSerializer typeSerializer) - throws IOException { - serialize(objectGraphNode, jsonGenerator, serializerProvider); - } - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphSONUtils.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphSONUtils.java deleted file mode 100644 index 02b35f7ee36..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphSONUtils.java +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.dse.driver.api.core.graph.GraphNode; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.shaded.guava.common.base.Suppliers; -import com.datastax.oss.driver.shaded.guava.common.base.Throwables; -import com.datastax.oss.driver.shaded.guava.common.cache.CacheBuilder; -import com.datastax.oss.driver.shaded.guava.common.cache.CacheLoader; -import com.datastax.oss.driver.shaded.guava.common.cache.LoadingCache; -import com.datastax.oss.protocol.internal.util.Bytes; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.List; -import java.util.concurrent.ExecutionException; -import java.util.function.Supplier; -import org.apache.tinkerpop.gremlin.structure.io.graphson.GraphSONMapper; -import org.apache.tinkerpop.gremlin.structure.io.graphson.GraphSONReader; -import org.apache.tinkerpop.gremlin.structure.io.graphson.GraphSONVersion; -import org.apache.tinkerpop.gremlin.structure.io.graphson.GraphSONXModuleV2d0; -import org.apache.tinkerpop.gremlin.tinkergraph.structure.TinkerIoRegistryV2d0; -import org.apache.tinkerpop.shaded.jackson.core.Version; -import org.apache.tinkerpop.shaded.jackson.databind.ObjectMapper; - -public class GraphSONUtils { - - private static final LoadingCache OBJECT_MAPPERS = - CacheBuilder.newBuilder() - .build( - new CacheLoader() { - @Override - public ObjectMapper load(@NonNull GraphProtocol graphSubProtocol) throws Exception { - switch (graphSubProtocol) { - case GRAPHSON_1_0: - com.datastax.oss.driver.api.core.Version driverVersion = - CqlSession.OSS_DRIVER_COORDINATES.getVersion(); - Version driverJacksonVersion = - new Version( - driverVersion.getMajor(), - driverVersion.getMinor(), - driverVersion.getPatch(), - driverVersion.getPreReleaseLabels() != null - && driverVersion.getPreReleaseLabels().contains("SNAPSHOT") - ? "SNAPSHOT" - : null, - "com.datastax.dse", - "dse-java-driver-core"); - - ObjectMapper mapper = - GraphSONMapper.build() - .version(GraphSONVersion.V1_0) - .create() - .createMapper(); - mapper.registerModule( - new GraphSON1SerdeTP.GraphSON1DefaultModule( - "graph-graphson1default", driverJacksonVersion)); - mapper.registerModule( - new GraphSON1SerdeTP.GraphSON1JavaTimeModule( - "graph-graphson1javatime", driverJacksonVersion)); - - return mapper; - case GRAPHSON_2_0: - return GraphSONMapper.build() - .version(GraphSONVersion.V2_0) - .addCustomModule(GraphSONXModuleV2d0.build().create(false)) - .addRegistry(TinkerIoRegistryV2d0.instance()) - .addCustomModule(new GraphSON2SerdeTP.DseGraphModule()) - .addCustomModule(new GraphSON2SerdeTP.DriverObjectsModule()) - .create() - .createMapper(); - - default: - throw new IllegalStateException( - String.format("GraphSON sub-protocol unknown: {%s}", graphSubProtocol)); - } - } - }); - - static final Supplier GRAPHSON1_READER = - Suppliers.memoize( - () -> - GraphSONReader.build() - .mapper(GraphSONMapper.build().version(GraphSONVersion.V1_0).create()) - .create()); - - public static ByteBuffer serializeToByteBuffer(Object object, GraphProtocol graphSubProtocol) - throws IOException { - return ByteBuffer.wrap(serializeToBytes(object, graphSubProtocol)); - } - - static byte[] serializeToBytes(Object object, GraphProtocol graphSubProtocol) throws IOException { - try { - return OBJECT_MAPPERS.get(graphSubProtocol).writeValueAsBytes(object); - } catch (ExecutionException e) { - Throwables.throwIfUnchecked(e); - throw new RuntimeException(e); - } - } - - public static GraphNode createGraphNode(List data, GraphProtocol graphSubProtocol) - throws IOException { - try { - ObjectMapper mapper = OBJECT_MAPPERS.get(graphSubProtocol); - switch (graphSubProtocol) { - case GRAPHSON_1_0: - return new LegacyGraphNode(mapper.readTree(Bytes.getArray(data.get(0))), mapper); - case GRAPHSON_2_0: - return new ObjectGraphNode(mapper.readValue(Bytes.getArray(data.get(0)), Object.class)); - default: - // Should already be caught when we lookup in the cache - throw new AssertionError( - String.format("Unknown GraphSON sub-protocol: {%s}", graphSubProtocol)); - } - } catch (ExecutionException e) { - Throwables.throwIfUnchecked(e); - throw new RuntimeException(e); - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphStatementBase.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphStatementBase.java deleted file mode 100644 index b8baa2f5e49..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphStatementBase.java +++ /dev/null @@ -1,413 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.dse.driver.api.core.graph.GraphStatement; -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.Map; -import net.jcip.annotations.Immutable; - -@Immutable -public abstract class GraphStatementBase> - implements GraphStatement { - private final Boolean isIdempotent; - private final Duration timeout; - private final Node node; - private final long timestamp; - private final DriverExecutionProfile executionProfile; - private final String executionProfileName; - private final Map customPayload; - private final String graphName; - private final String traversalSource; - private final String subProtocol; - private final ConsistencyLevel consistencyLevel; - private final ConsistencyLevel readConsistencyLevel; - private final ConsistencyLevel writeConsistencyLevel; - - protected GraphStatementBase( - Boolean isIdempotent, - Duration timeout, - Node node, - long timestamp, - DriverExecutionProfile executionProfile, - String executionProfileName, - Map customPayload, - String graphName, - String traversalSource, - String subProtocol, - ConsistencyLevel consistencyLevel, - ConsistencyLevel readConsistencyLevel, - ConsistencyLevel writeConsistencyLevel) { - this.isIdempotent = isIdempotent; - this.timeout = timeout; - this.node = node; - this.timestamp = timestamp; - this.executionProfile = executionProfile; - this.executionProfileName = executionProfileName; - this.customPayload = customPayload; - this.graphName = graphName; - this.traversalSource = traversalSource; - this.subProtocol = subProtocol; - this.consistencyLevel = consistencyLevel; - this.readConsistencyLevel = readConsistencyLevel; - this.writeConsistencyLevel = writeConsistencyLevel; - } - - protected abstract SelfT newInstance( - Boolean isIdempotent, - Duration timeout, - Node node, - long timestamp, - DriverExecutionProfile executionProfile, - String executionProfileName, - Map customPayload, - String graphName, - String traversalSource, - String subProtocol, - ConsistencyLevel consistencyLevel, - ConsistencyLevel readConsistencyLevel, - ConsistencyLevel writeConsistencyLevel); - - @Override - public Boolean isIdempotent() { - return isIdempotent; - } - - @NonNull - @Override - public SelfT setIdempotent(@Nullable Boolean newIdempotence) { - return newInstance( - newIdempotence, - timeout, - node, - timestamp, - executionProfile, - executionProfileName, - customPayload, - graphName, - traversalSource, - subProtocol, - consistencyLevel, - readConsistencyLevel, - writeConsistencyLevel); - } - - @Nullable - @Override - public Duration getTimeout() { - return timeout; - } - - @NonNull - @Override - public SelfT setTimeout(@Nullable Duration newTimeout) { - return newInstance( - isIdempotent, - newTimeout, - node, - timestamp, - executionProfile, - executionProfileName, - customPayload, - graphName, - traversalSource, - subProtocol, - consistencyLevel, - readConsistencyLevel, - writeConsistencyLevel); - } - - @Nullable - @Override - public Node getNode() { - return node; - } - - @NonNull - @Override - public SelfT setNode(@Nullable Node newNode) { - return newInstance( - isIdempotent, - timeout, - newNode, - timestamp, - executionProfile, - executionProfileName, - customPayload, - graphName, - traversalSource, - subProtocol, - consistencyLevel, - readConsistencyLevel, - writeConsistencyLevel); - } - - @Override - public long getTimestamp() { - return this.timestamp; - } - - @NonNull - @Override - public SelfT setTimestamp(long newTimestamp) { - return newInstance( - isIdempotent, - timeout, - node, - newTimestamp, - executionProfile, - executionProfileName, - customPayload, - graphName, - traversalSource, - subProtocol, - consistencyLevel, - readConsistencyLevel, - writeConsistencyLevel); - } - - @Nullable - @Override - public DriverExecutionProfile getExecutionProfile() { - return executionProfile; - } - - @NonNull - @Override - public SelfT setExecutionProfile(@Nullable DriverExecutionProfile newExecutionProfile) { - return newInstance( - isIdempotent, - timeout, - node, - timestamp, - newExecutionProfile, - executionProfileName, - customPayload, - graphName, - traversalSource, - subProtocol, - consistencyLevel, - readConsistencyLevel, - writeConsistencyLevel); - } - - @Nullable - @Override - public String getExecutionProfileName() { - return executionProfileName; - } - - @NonNull - @Override - public SelfT setExecutionProfileName(@Nullable String newExecutionProfileName) { - return newInstance( - isIdempotent, - timeout, - node, - timestamp, - executionProfile, - newExecutionProfileName, - customPayload, - graphName, - traversalSource, - subProtocol, - consistencyLevel, - readConsistencyLevel, - writeConsistencyLevel); - } - - @NonNull - @Override - public Map getCustomPayload() { - return customPayload; - } - - @NonNull - @Override - public SelfT setCustomPayload(@NonNull Map newCustomPayload) { - return newInstance( - isIdempotent, - timeout, - node, - timestamp, - executionProfile, - executionProfileName, - newCustomPayload, - graphName, - traversalSource, - subProtocol, - consistencyLevel, - readConsistencyLevel, - writeConsistencyLevel); - } - - @Nullable - @Override - public String getGraphName() { - return graphName; - } - - @NonNull - @Override - public SelfT setGraphName(@Nullable String newGraphName) { - return newInstance( - isIdempotent, - timeout, - node, - timestamp, - executionProfile, - executionProfileName, - customPayload, - newGraphName, - traversalSource, - subProtocol, - consistencyLevel, - readConsistencyLevel, - writeConsistencyLevel); - } - - @Nullable - @Override - public String getTraversalSource() { - return traversalSource; - } - - @NonNull - @Override - public SelfT setTraversalSource(@Nullable String newTraversalSource) { - return newInstance( - isIdempotent, - timeout, - node, - timestamp, - executionProfile, - executionProfileName, - customPayload, - graphName, - newTraversalSource, - subProtocol, - consistencyLevel, - readConsistencyLevel, - writeConsistencyLevel); - } - - @Nullable - @Override - public String getSubProtocol() { - return subProtocol; - } - - @NonNull - @Override - public SelfT setSubProtocol(@Nullable String newSubProtocol) { - return newInstance( - isIdempotent, - timeout, - node, - timestamp, - executionProfile, - executionProfileName, - customPayload, - graphName, - traversalSource, - newSubProtocol, - consistencyLevel, - readConsistencyLevel, - writeConsistencyLevel); - } - - @Nullable - @Override - public ConsistencyLevel getConsistencyLevel() { - return consistencyLevel; - } - - @Override - public SelfT setConsistencyLevel(@Nullable ConsistencyLevel newConsistencyLevel) { - return newInstance( - isIdempotent, - timeout, - node, - timestamp, - executionProfile, - executionProfileName, - customPayload, - graphName, - traversalSource, - subProtocol, - newConsistencyLevel, - readConsistencyLevel, - writeConsistencyLevel); - } - - @Nullable - @Override - public ConsistencyLevel getReadConsistencyLevel() { - return readConsistencyLevel; - } - - @NonNull - @Override - public SelfT setReadConsistencyLevel(@Nullable ConsistencyLevel newReadConsistencyLevel) { - return newInstance( - isIdempotent, - timeout, - node, - timestamp, - executionProfile, - executionProfileName, - customPayload, - graphName, - traversalSource, - subProtocol, - consistencyLevel, - newReadConsistencyLevel, - writeConsistencyLevel); - } - - @Nullable - @Override - public ConsistencyLevel getWriteConsistencyLevel() { - return writeConsistencyLevel; - } - - @NonNull - @Override - public SelfT setWriteConsistencyLevel(@Nullable ConsistencyLevel newWriteConsistencyLevel) { - return newInstance( - isIdempotent, - timeout, - node, - timestamp, - executionProfile, - executionProfileName, - customPayload, - graphName, - traversalSource, - subProtocol, - consistencyLevel, - readConsistencyLevel, - newWriteConsistencyLevel); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphSupportChecker.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphSupportChecker.java deleted file mode 100644 index 6e586bbcf3f..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphSupportChecker.java +++ /dev/null @@ -1,176 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.dse.driver.api.core.graph.GraphStatement; -import com.datastax.dse.driver.api.core.graph.PagingEnabledOptions; -import com.datastax.dse.driver.api.core.metadata.DseNodeProperties; -import com.datastax.dse.driver.internal.core.DseProtocolFeature; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.cql.Conversions; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Collection; -import java.util.Objects; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class GraphSupportChecker { - - private static final Logger LOG = LoggerFactory.getLogger(GraphSupportChecker.class); - - /** - * The minimum DSE version supporting both graph paging and the GraphBinary sub-protocol is DSE - * 6.8. - */ - private static final Version MIN_DSE_VERSION_GRAPH_BINARY_AND_PAGING = - Objects.requireNonNull(Version.parse("6.8.0")); - - private volatile Boolean contextGraphPagingEnabled; - private volatile Boolean isDse68OrAbove; - - /** - * Checks whether graph paging is available. - * - *

Graph paging is available if: - * - *

    - *
  1. Continuous paging is generally available (this implies protocol version {@link - * com.datastax.dse.driver.api.core.DseProtocolVersion#DSE_V1 DSE_V1} or higher); - *
  2. Graph paging is set to ENABLED or AUTO in the configuration - * with {@link DseDriverOption#GRAPH_PAGING_ENABLED}; - *
  3. If graph paging is set to AUTO, then a check will be performed to verify - * that all hosts are running DSE 6.8+; if that is the case, then graph paging will be - * assumed to be available. - *
- * - * Note that the hosts check will be done only once, then memoized; if other hosts join the - * cluster later and do not support graph paging, the user has to manually disable graph paging. - */ - public boolean isPagingEnabled( - @NonNull GraphStatement graphStatement, @NonNull InternalDriverContext context) { - DriverExecutionProfile driverExecutionProfile = - Conversions.resolveExecutionProfile(graphStatement, context); - PagingEnabledOptions pagingEnabledOptions = - PagingEnabledOptions.valueOf( - driverExecutionProfile.getString(DseDriverOption.GRAPH_PAGING_ENABLED)); - if (LOG.isTraceEnabled()) { - LOG.trace("GRAPH_PAGING_ENABLED: {}", pagingEnabledOptions); - } - if (pagingEnabledOptions == PagingEnabledOptions.DISABLED) { - return false; - } else if (pagingEnabledOptions == PagingEnabledOptions.ENABLED) { - return true; - } else { - return isContextGraphPagingEnabled(context); - } - } - - /** - * Infers the {@link GraphProtocol} to use to execute the given statement. - * - *

The graph protocol is computed as follows: - * - *

    - *
  1. If the statement declares the protocol to use with {@link - * GraphStatement#getSubProtocol()}, then that protocol is returned. - *
  2. If the driver configuration explicitly defines the protocol to use (see {@link - * DseDriverOption#GRAPH_SUB_PROTOCOL} and reference.conf), then that protocol is returned. - *
  3. Otherwise, the graph protocol to use is determined by the DSE version of hosts in the - * cluster. If any host has DSE version 6.7.x or lower, the default graph protocol is {@link - * GraphProtocol#GRAPHSON_2_0}. If all hosts have DSE version 6.8.0 or higher, the default - * graph protocol is {@link GraphProtocol#GRAPH_BINARY_1_0}. - *
- * - * Note that the hosts check will be done only once, then memoized; if other hosts join the and do - * not support the computed graph protocol, the user has to manually set the graph protocol to - * use. - * - *

Also note that GRAPH_BINARY_1_0 can only be used with "core" graph engines; if - * you are targeting a "classic" graph engine instead, the user has to manually set the graph - * protocol to something else. - */ - @NonNull - public GraphProtocol inferGraphProtocol( - @NonNull GraphStatement statement, - @NonNull DriverExecutionProfile config, - @NonNull InternalDriverContext context) { - String graphProtocol = statement.getSubProtocol(); - if (graphProtocol == null) { - // use the protocol specified in configuration, otherwise get the default from the context - graphProtocol = - config.isDefined(DseDriverOption.GRAPH_SUB_PROTOCOL) - ? config.getString(DseDriverOption.GRAPH_SUB_PROTOCOL) - : getDefaultGraphProtocol(context).toInternalCode(); - } - // should not be null because we call config.getString() with a default value - Objects.requireNonNull( - graphProtocol, - "Could not determine the graph protocol for the query. This is a bug, please report."); - - return GraphProtocol.fromString(graphProtocol); - } - - private boolean isContextGraphPagingEnabled(InternalDriverContext context) { - if (contextGraphPagingEnabled == null) { - ProtocolVersion protocolVersion = context.getProtocolVersion(); - if (!context - .getProtocolVersionRegistry() - .supports(protocolVersion, DseProtocolFeature.CONTINUOUS_PAGING)) { - contextGraphPagingEnabled = false; - } else { - if (isDse68OrAbove == null) { - isDse68OrAbove = checkIsDse68OrAbove(context); - } - contextGraphPagingEnabled = isDse68OrAbove; - } - } - return contextGraphPagingEnabled; - } - - /** - * Determines the default {@link GraphProtocol} for the given context. - * - * @return The default GraphProtocol to used based on the provided context. - */ - @VisibleForTesting - GraphProtocol getDefaultGraphProtocol(@NonNull InternalDriverContext context) { - if (isDse68OrAbove == null) { - isDse68OrAbove = checkIsDse68OrAbove(context); - } - // if the DSE version can't be determined, default to GraphSON 2.0 - return isDse68OrAbove ? GraphProtocol.GRAPH_BINARY_1_0 : GraphProtocol.GRAPHSON_2_0; - } - - private boolean checkIsDse68OrAbove(@NonNull InternalDriverContext context) { - Collection nodes = context.getMetadataManager().getMetadata().getNodes().values(); - - for (Node node : nodes) { - Version dseVersion = (Version) node.getExtras().get(DseNodeProperties.DSE_VERSION); - if (dseVersion == null || dseVersion.compareTo(MIN_DSE_VERSION_GRAPH_BINARY_AND_PAGING) < 0) { - return false; - } - } - return true; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/LegacyGraphNode.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/LegacyGraphNode.java deleted file mode 100644 index 1749bf00873..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/LegacyGraphNode.java +++ /dev/null @@ -1,323 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.dse.driver.api.core.graph.GraphNode; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.shaded.guava.common.base.Objects; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import java.io.ByteArrayInputStream; -import java.io.IOException; -import java.io.UncheckedIOException; -import java.nio.charset.StandardCharsets; -import java.util.List; -import java.util.Map; -import java.util.Set; -import net.jcip.annotations.Immutable; -import org.apache.tinkerpop.gremlin.process.traversal.Path; -import org.apache.tinkerpop.gremlin.structure.Edge; -import org.apache.tinkerpop.gremlin.structure.Property; -import org.apache.tinkerpop.gremlin.structure.Vertex; -import org.apache.tinkerpop.gremlin.structure.VertexProperty; -import org.apache.tinkerpop.gremlin.structure.io.graphson.GraphSONTokens; -import org.apache.tinkerpop.gremlin.structure.util.Attachable; -import org.apache.tinkerpop.shaded.jackson.core.JsonParser; -import org.apache.tinkerpop.shaded.jackson.databind.JavaType; -import org.apache.tinkerpop.shaded.jackson.databind.JsonNode; -import org.apache.tinkerpop.shaded.jackson.databind.ObjectMapper; - -/** - * Legacy implementation for GraphSON 1 results. - * - *

The server returns plain JSON with no type information. The driver works with the JSON - * representation directly. - */ -@Immutable -public class LegacyGraphNode implements GraphNode { - private static final String TYPE = "type"; - private static final String VERTEX_TYPE = "vertex"; - private static final String EDGE_TYPE = "edge"; - - private static final GenericType> LIST_TYPE = GenericType.listOf(Object.class); - private static final GenericType> MAP_TYPE = - GenericType.mapOf(String.class, Object.class); - - private final JsonNode delegate; - private final ObjectMapper objectMapper; - - public LegacyGraphNode(JsonNode delegate, ObjectMapper objectMapper) { - Preconditions.checkNotNull(delegate); - Preconditions.checkNotNull(objectMapper); - this.delegate = delegate; - this.objectMapper = objectMapper; - } - - /** - * The underlying JSON representation. - * - *

This is an implementation detail, it's only exposed through the internal API. - */ - public JsonNode getDelegate() { - return delegate; - } - - /** - * The object mapper used to deserialize results in {@link #as(Class)} and {@link - * #as(GenericType)}. - * - *

This is an implementation detail, it's only exposed through the internal API. - */ - public ObjectMapper getObjectMapper() { - return objectMapper; - } - - @Override - public boolean isNull() { - return delegate.isNull(); - } - - @Override - public boolean isMap() { - return delegate.isObject(); - } - - @Override - public Iterable keys() { - return (Iterable) delegate::fieldNames; - } - - @Override - public LegacyGraphNode getByKey(Object key) { - if (!(key instanceof String)) { - return null; - } - JsonNode node = delegate.get(((String) key)); - if (node == null) { - return null; - } - return new LegacyGraphNode(node, objectMapper); - } - - @Override - @SuppressWarnings("unchecked") - public Map asMap() { - return (Map) as(MAP_TYPE); - } - - @Override - public boolean isList() { - return delegate.isArray(); - } - - @Override - public int size() { - return delegate.size(); - } - - @Override - public LegacyGraphNode getByIndex(int index) { - JsonNode node = delegate.get(index); - if (node == null) { - return null; - } - return new LegacyGraphNode(node, objectMapper); - } - - @Override - @SuppressWarnings("unchecked") - public List asList() { - return (List) as(LIST_TYPE); - } - - @Override - public boolean isValue() { - return delegate.isValueNode(); - } - - @Override - public int asInt() { - return delegate.asInt(); - } - - @Override - public boolean asBoolean() { - return delegate.asBoolean(); - } - - @Override - public long asLong() { - return delegate.asLong(); - } - - @Override - public double asDouble() { - return delegate.asDouble(); - } - - @Override - public String asString() { - return delegate.asText(); - } - - @Override - public boolean isVertex() { - return isType(VERTEX_TYPE); - } - - @Override - public Vertex asVertex() { - try { - return GraphSONUtils.GRAPHSON1_READER - .get() - .readVertex( - new ByteArrayInputStream(delegate.toString().getBytes(StandardCharsets.UTF_8)), - null, - null, - null); - } catch (IOException e) { - throw new UncheckedIOException("Could not deserialize node as Vertex.", e); - } - } - - @Override - public boolean isEdge() { - return isType(EDGE_TYPE); - } - - @Override - public Edge asEdge() { - try { - return GraphSONUtils.GRAPHSON1_READER - .get() - .readEdge( - new ByteArrayInputStream(delegate.toString().getBytes(StandardCharsets.UTF_8)), - Attachable::get); - } catch (IOException e) { - throw new UncheckedIOException("Could not deserialize node as Edge.", e); - } - } - - @Override - public boolean isPath() { - return false; - } - - @Override - public Path asPath() { - throw new UnsupportedOperationException( - "GraphSON1 does not support Path, use another Graph sub-protocol such as GraphSON2."); - } - - @Override - public boolean isProperty() { - return delegate.has(GraphSONTokens.KEY) && delegate.has(GraphSONTokens.VALUE); - } - - @Override - @SuppressWarnings("unchecked") - public Property asProperty() { - try { - return GraphSONUtils.GRAPHSON1_READER - .get() - .readProperty( - new ByteArrayInputStream(delegate.toString().getBytes(StandardCharsets.UTF_8)), - Attachable::get); - } catch (IOException e) { - throw new UncheckedIOException("Could not deserialize node as Property.", e); - } - } - - @Override - public boolean isVertexProperty() { - return delegate.has(GraphSONTokens.ID) - && delegate.has(GraphSONTokens.VALUE) - && delegate.has(GraphSONTokens.LABEL); - } - - @Override - @SuppressWarnings("unchecked") - public VertexProperty asVertexProperty() { - try { - return GraphSONUtils.GRAPHSON1_READER - .get() - .readVertexProperty( - new ByteArrayInputStream(delegate.toString().getBytes(StandardCharsets.UTF_8)), - Attachable::get); - } catch (IOException e) { - throw new UncheckedIOException("Could not deserialize node as VertexProperty.", e); - } - } - - @Override - public boolean isSet() { - return false; - } - - @Override - public Set asSet() { - throw new UnsupportedOperationException( - "GraphSON1 does not support Set, use another Graph sub-protocol such as GraphSON2."); - } - - @Override - public ResultT as(Class clazz) { - try { - return objectMapper.treeToValue(delegate, clazz); - } catch (IOException e) { - throw new UncheckedIOException("Could not deserialize node as: " + clazz, e); - } - } - - @Override - public ResultT as(GenericType type) { - try { - JsonParser parser = objectMapper.treeAsTokens(delegate); - JavaType javaType = objectMapper.constructType(type.__getToken().getType()); - return objectMapper.readValue(parser, javaType); - } catch (IOException e) { - throw new UncheckedIOException("Could not deserialize node as: " + type, e); - } - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof LegacyGraphNode)) { - return false; - } - LegacyGraphNode that = (LegacyGraphNode) o; - return Objects.equal(delegate, that.delegate); - } - - @Override - public int hashCode() { - return Objects.hashCode(delegate); - } - - @Override - public String toString() { - return delegate.toString(); - } - - private boolean isType(String expectedTypeName) { - JsonNode type = delegate.get(TYPE); - return type != null && expectedTypeName.equals(type.asText()); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/MultiPageGraphResultSet.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/MultiPageGraphResultSet.java deleted file mode 100644 index fe81d73ba00..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/MultiPageGraphResultSet.java +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet; -import com.datastax.dse.driver.api.core.graph.GraphNode; -import com.datastax.dse.driver.api.core.graph.GraphResultSet; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.internal.core.util.CountingIterator; -import com.datastax.oss.driver.internal.core.util.concurrent.BlockingOperation; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; - -public class MultiPageGraphResultSet implements GraphResultSet { - private final RowIterator iterator; - private final List executionInfos = new ArrayList<>(); - - public MultiPageGraphResultSet(AsyncGraphResultSet firstPage) { - iterator = new RowIterator(firstPage); - executionInfos.add(firstPage.getRequestExecutionInfo()); - } - - @Override - public void cancel() { - iterator.cancel(); - } - - @NonNull - @Override - public ExecutionInfo getRequestExecutionInfo() { - return executionInfos.get(executionInfos.size() - 1); - } - - @NonNull - @Override - @Deprecated - public com.datastax.dse.driver.api.core.graph.GraphExecutionInfo getExecutionInfo() { - return GraphExecutionInfoConverter.convert(getRequestExecutionInfo()); - } - - /** - * The execution information for all the queries that have been performed so far to assemble this - * iterable. - * - *

This will have multiple elements if the query is paged, since the driver performs blocking - * background queries to fetch additional pages transparently as the result set is being iterated. - */ - @NonNull - public List getRequestExecutionInfos() { - return executionInfos; - } - - /** @deprecated use {@link #getRequestExecutionInfos()} instead. */ - @NonNull - @Deprecated - public List getExecutionInfos() { - return Lists.transform(executionInfos, GraphExecutionInfoConverter::convert); - } - - @NonNull - @Override - public Iterator iterator() { - return iterator; - } - - public class RowIterator extends CountingIterator { - private AsyncGraphResultSet currentPage; - private Iterator currentRows; - private boolean cancelled = false; - - private RowIterator(AsyncGraphResultSet firstPage) { - super(firstPage.remaining()); - currentPage = firstPage; - currentRows = firstPage.currentPage().iterator(); - } - - @Override - protected GraphNode computeNext() { - maybeMoveToNextPage(); - return currentRows.hasNext() ? currentRows.next() : endOfData(); - } - - private void maybeMoveToNextPage() { - if (!cancelled && !currentRows.hasNext() && currentPage.hasMorePages()) { - BlockingOperation.checkNotDriverThread(); - AsyncGraphResultSet nextPage = - CompletableFutures.getUninterruptibly(currentPage.fetchNextPage()); - currentPage = nextPage; - remaining += currentPage.remaining(); - currentRows = nextPage.currentPage().iterator(); - executionInfos.add(nextPage.getRequestExecutionInfo()); - } - } - - private void cancel() { - currentPage.cancel(); - cancelled = true; - } - - public boolean isCancelled() { - return cancelled; - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/ObjectGraphNode.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/ObjectGraphNode.java deleted file mode 100644 index 56123799fdd..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/ObjectGraphNode.java +++ /dev/null @@ -1,244 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.dse.driver.api.core.graph.GraphNode; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.shaded.guava.common.base.Objects; -import java.util.List; -import java.util.Map; -import java.util.Set; -import net.jcip.annotations.Immutable; -import org.apache.tinkerpop.gremlin.process.traversal.Path; -import org.apache.tinkerpop.gremlin.structure.Edge; -import org.apache.tinkerpop.gremlin.structure.Property; -import org.apache.tinkerpop.gremlin.structure.Vertex; -import org.apache.tinkerpop.gremlin.structure.VertexProperty; - -/** - * Modern implementation for GraphSON 2+ results. - * - *

The server returns results with type information. The driver works with the decoded objects - * directly. - */ -@Immutable -public class ObjectGraphNode implements GraphNode { - - private final Object delegate; - - public ObjectGraphNode(Object delegate) { - this.delegate = delegate; - } - - @Override - public boolean isNull() { - return delegate == null; - } - - @Override - public boolean isMap() { - return delegate instanceof Map; - } - - @Override - public Iterable keys() { - return ((Map) delegate).keySet(); - } - - @Override - public GraphNode getByKey(Object key) { - if (!isMap()) { - return null; - } - Map map = asMap(); - if (map.containsKey(key)) { - return new ObjectGraphNode(map.get(key)); - } - return null; - } - - @Override - @SuppressWarnings("unchecked") - public Map asMap() { - return (Map) delegate; - } - - @Override - public boolean isList() { - return delegate instanceof List; - } - - @Override - public int size() { - if (isList()) { - return asList().size(); - } else if (isMap()) { - return asMap().size(); - } else if (isSet()) { - return asSet().size(); - } else { - return 0; - } - } - - @Override - public GraphNode getByIndex(int index) { - if (!isList() || index < 0 || index >= size()) { - return null; - } - return new ObjectGraphNode(asList().get(index)); - } - - @Override - @SuppressWarnings("unchecked") - public List asList() { - return (List) delegate; - } - - @Override - public boolean isValue() { - return !(isList() - || isMap() - || isSet() - || isVertex() - || isEdge() - || isPath() - || isProperty() - || isVertexProperty()); - } - - @Override - public boolean isVertexProperty() { - return delegate instanceof VertexProperty; - } - - @Override - public boolean isProperty() { - return delegate instanceof Property; - } - - @Override - public boolean isPath() { - return delegate instanceof Path; - } - - @Override - public int asInt() { - return (Integer) delegate; - } - - @Override - public boolean asBoolean() { - return (Boolean) delegate; - } - - @Override - public long asLong() { - return (Long) delegate; - } - - @Override - public double asDouble() { - return (Double) delegate; - } - - @Override - public String asString() { - return (String) delegate; - } - - @Override - @SuppressWarnings("unchecked") - public T as(Class clazz) { - return (T) delegate; - } - - @Override - @SuppressWarnings("unchecked") - public T as(GenericType type) { - return (T) delegate; - } - - @Override - public boolean isVertex() { - return delegate instanceof Vertex; - } - - @Override - public Vertex asVertex() { - return (Vertex) delegate; - } - - @Override - public boolean isEdge() { - return delegate instanceof Edge; - } - - @Override - public Edge asEdge() { - return (Edge) delegate; - } - - @Override - public Path asPath() { - return (Path) delegate; - } - - @Override - @SuppressWarnings("unchecked") - public Property asProperty() { - return (Property) delegate; - } - - @Override - @SuppressWarnings("unchecked") - public VertexProperty asVertexProperty() { - return (VertexProperty) delegate; - } - - @Override - public boolean isSet() { - return delegate instanceof Set; - } - - @Override - @SuppressWarnings("unchecked") - public Set asSet() { - return (Set) delegate; - } - - @Override - public String toString() { - return this.delegate.toString(); - } - - @Override - public boolean equals(Object other) { - if (this == other) { - return true; - } - // Compare each others' delegates. - return other instanceof ObjectGraphNode - && Objects.equal(this.delegate, ((ObjectGraphNode) other).delegate); - } - - @Override - public int hashCode() { - return Objects.hashCode(delegate); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/SearchPredicate.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/SearchPredicate.java deleted file mode 100644 index b69c3a59cf0..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/SearchPredicate.java +++ /dev/null @@ -1,301 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.oss.driver.shaded.guava.common.collect.Sets; -import java.util.List; -import java.util.Set; -import java.util.regex.Pattern; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -/** - * List of predicates for geolocation usage with DseGraph and Search indexes. Should not be accessed - * directly but through the {@link com.datastax.dse.driver.api.core.graph.predicates.Search} static - * methods. - */ -public enum SearchPredicate implements DsePredicate { - /** Whether the text contains a given term as a token in the text (case insensitive). */ - token { - @Override - public boolean test(Object value, Object condition) { - preEvaluate(condition); - return value != null && evaluate(value.toString(), (String) condition); - } - - boolean evaluate(String value, String terms) { - Set tokens = Sets.newHashSet(tokenize(value.toLowerCase())); - terms = terms.trim(); - List tokenTerms = tokenize(terms.toLowerCase()); - if (!terms.isEmpty() && tokenTerms.isEmpty()) { - return false; - } - for (String term : tokenTerms) { - if (!tokens.contains(term)) { - return false; - } - } - return true; - } - - @Override - public boolean isValidCondition(Object condition) { - return condition != null && isNotBlank((String) condition); - } - - @Override - public String toString() { - return "token"; - } - }, - - /** Whether the text contains a token that starts with a given term (case insensitive). */ - tokenPrefix { - @Override - public boolean test(Object value, Object condition) { - preEvaluate(condition); - return value != null && evaluate(value.toString(), (String) condition); - } - - boolean evaluate(String value, String prefix) { - for (String token : tokenize(value.toLowerCase())) { - if (token.startsWith(prefix.toLowerCase().trim())) { - return true; - } - } - return false; - } - - @Override - public boolean isValidCondition(Object condition) { - return condition != null; - } - - @Override - public String toString() { - return "tokenPrefix"; - } - }, - - /** Whether the text contains a token that matches a regular expression (case insensitive). */ - tokenRegex { - @Override - public boolean test(Object value, Object condition) { - preEvaluate(condition); - return value != null && evaluate(value.toString(), (String) condition); - } - - boolean evaluate(String value, String regex) { - Pattern compiled = Pattern.compile(regex, Pattern.CASE_INSENSITIVE); - for (String token : tokenize(value.toLowerCase())) { - if (compiled.matcher(token).matches()) { - return true; - } - } - return false; - } - - @Override - public boolean isValidCondition(Object condition) { - return condition != null && isNotBlank((String) condition); - } - - @Override - public String toString() { - return "tokenRegex"; - } - }, - - /** - * Whether some token in the text is within a given edit distance from the given term (case - * insensitive). - */ - tokenFuzzy { - @Override - public boolean test(Object value, Object condition) { - preEvaluate(condition); - if (value == null) { - return false; - } - - EditDistance fuzzyCondition = (EditDistance) condition; - - for (String token : tokenize(value.toString().toLowerCase())) { - if (SearchUtils.getOptimalStringAlignmentDistance(token, fuzzyCondition.query.toLowerCase()) - <= fuzzyCondition.distance) { - return true; - } - } - - return false; - } - - @Override - public boolean isValidCondition(Object condition) { - return condition != null; - } - - @Override - public String toString() { - return "tokenFuzzy"; - } - }, - - /** Whether the text starts with a given prefix (case sensitive). */ - prefix { - @Override - public boolean test(Object value, Object condition) { - preEvaluate(condition); - return value != null && value.toString().startsWith(((String) condition).trim()); - } - - @Override - public boolean isValidCondition(Object condition) { - return condition != null; - } - - @Override - public String toString() { - return "prefix"; - } - }, - - /** Whether the text matches a regular expression (case sensitive). */ - regex { - @Override - public boolean test(Object value, Object condition) { - preEvaluate(condition); - return value != null - && Pattern.compile((String) condition, Pattern.DOTALL) - .matcher(value.toString()) - .matches(); - } - - @Override - public boolean isValidCondition(Object condition) { - return condition != null && isNotBlank((String) condition); - } - - @Override - public String toString() { - return "regex"; - } - }, - - /** Whether the text is within a given edit distance from the given term (case sensitive). */ - fuzzy { - @Override - public boolean test(Object value, Object condition) { - preEvaluate(condition); - if (value == null) { - return false; - } - EditDistance fuzzyCondition = (EditDistance) condition; - return SearchUtils.getOptimalStringAlignmentDistance(value.toString(), fuzzyCondition.query) - <= fuzzyCondition.distance; - } - - @Override - public boolean isValidCondition(Object condition) { - return condition != null; - } - - @Override - public String toString() { - return "fuzzy"; - } - }, - - /** - * Whether tokenized text contains a given phrase, optionally within a given proximity (case - * insensitive). - */ - phrase { - @Override - public boolean test(Object value, Object condition) { - preEvaluate(condition); - if (value == null) { - return false; - } - - EditDistance phraseCondition = (EditDistance) condition; - - List valueTokens = tokenize(value.toString().toLowerCase()); - List phraseTokens = tokenize(phraseCondition.query.toLowerCase()); - - int valuePosition = 0; - int phrasePosition = 0; - int distance = 0; - - // Look for matches while phrase/value tokens and distance budget remain - while (phrasePosition < phraseTokens.size() - && valuePosition < valueTokens.size() - && distance <= phraseCondition.distance) { - - if (phraseTokens.get(phrasePosition).equals(valueTokens.get(valuePosition))) { - // Early return-true when we've matched the whole phrase (within the specified distance) - if (phrasePosition == phraseTokens.size() - 1) { - return true; - } - phrasePosition++; - } else if (0 < phrasePosition) { - // We've previously found at least one matching token in the input string, - // but the current token does not match the phrase. Increment distance. - distance++; - } - - valuePosition++; - } - - return false; - } - - @Override - public boolean isValidCondition(Object condition) { - return condition != null; - } - - @Override - public String toString() { - return "phrase"; - } - }; - - private static boolean isNotBlank(String str) { - if (str == null || str.isEmpty()) { - return false; - } - int strLen = str.length(); - for (int i = 0; i < strLen; i++) { - if (!Character.isWhitespace(str.charAt(i))) { - return true; - } - } - return false; - } - - // Match anything that is not either: - // 1) a unicode letter, regardless of subcategory (same as Character.isLetter), or - // 2) a unicode decimal digit number (same as Character.isDigit) - private static final Pattern TOKEN_SPLIT_PATTERN = Pattern.compile("[^\\p{L}\\p{Nd}]"); - - static List tokenize(String str) { - String[] rawTokens = TOKEN_SPLIT_PATTERN.split(str); // could contain empty strings - return Stream.of(rawTokens).filter(t -> 0 < t.length()).collect(Collectors.toList()); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/SearchUtils.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/SearchUtils.java deleted file mode 100644 index 3440c40e87a..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/SearchUtils.java +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -public class SearchUtils { - - /** - * Finds the Optimal - * string alignment distance – also referred to as the Damerau-Levenshtein distance – between - * two strings. - * - *

This is the number of changes needed to change one string into another (insertions, - * deletions or substitutions of a single character, or transpositions of two adjacent - * characters). - * - *

This implementation is based on the Apache Commons Lang implementation of the Levenshtein - * distance, only adding support for transpositions. - * - *

Note that this is the distance used in Lucene for {@code FuzzyTermsEnum}. Lucene itself has - * an implementation of this algorithm, but it is much less efficient in terms of space (also note - * that Lucene's implementation does not return the distance, but a similarity score based on it). - * - * @param s the first string, must not be {@code null}. - * @param t the second string, must not be {@code null}. - * @return The Optimal string alignment distance between the two strings. - * @throws IllegalArgumentException if either String input is {@code null}. - * @see org.apache.commons.lang.StringUtils#getLevenshteinDistance(String, String) - * @see - * LuceneLevenshteinDistance - */ - public static int getOptimalStringAlignmentDistance(String s, String t) { - - /* - * Code adapted from https://github.com/apache/commons-lang/blob/LANG_2_6/src/main/java/org/apache/commons/lang/StringUtils.java - * which was originally released under the Apache 2.0 license with the following copyright: - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - if (s == null || t == null) { - throw new IllegalArgumentException("Strings must not be null"); - } - - int n = s.length(); // length of s - int m = t.length(); // length of t - - if (n == 0) { - return m; - } else if (m == 0) { - return n; - } - - if (n > m) { - // swap the input strings to consume less memory - String tmp = s; - s = t; - t = tmp; - n = m; - m = t.length(); - } - - // instead of maintaining the full matrix in memory, - // we use a sliding window containing 3 lines: - // the current line being written to, and - // the two previous ones. - - int d[] = new int[n + 1]; // current line in the cost matrix - int p1[] = new int[n + 1]; // first line above the current one in the cost matrix - int p2[] = new int[n + 1]; // second line above the current one in the cost matrix - int _d[]; // placeholder to assist in swapping p1, p2 and d - - // indexes into strings s and t - int i; // iterates through s - int j; // iterates through t - - for (i = 0; i <= n; i++) { - p1[i] = i; - } - - for (j = 1; j <= m; j++) { - - // jth character of t - char t_j = t.charAt(j - 1); - d[0] = j; - - for (i = 1; i <= n; i++) { - - char s_i = s.charAt(i - 1); - int cost = s_i == t_j ? 0 : 1; - - int deletion = d[i - 1] + 1; // cell to the left + 1 - int insertion = p1[i] + 1; // cell to the top + 1 - int substitution = p1[i - 1] + cost; // cell diagonally left and up + cost - - d[i] = Math.min(Math.min(deletion, insertion), substitution); - - // transposition - if (i > 1 && j > 1 && s_i == t.charAt(j - 2) && s.charAt(i - 2) == t_j) { - d[i] = Math.min(d[i], p2[i - 2] + cost); - } - } - - // swap arrays - _d = p2; - p2 = p1; - p1 = d; - d = _d; - } - - // our last action in the above loop was to switch d and p1, so p1 now - // actually has the most recent cost counts - return p1[n]; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/SinglePageGraphResultSet.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/SinglePageGraphResultSet.java deleted file mode 100644 index ff1d984d745..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/SinglePageGraphResultSet.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet; -import com.datastax.dse.driver.api.core.graph.GraphNode; -import com.datastax.dse.driver.api.core.graph.GraphResultSet; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Iterator; -import net.jcip.annotations.NotThreadSafe; - -@NotThreadSafe -public class SinglePageGraphResultSet implements GraphResultSet { - - private final AsyncGraphResultSet onlyPage; - - public SinglePageGraphResultSet(AsyncGraphResultSet onlyPage) { - this.onlyPage = onlyPage; - assert !onlyPage.hasMorePages(); - } - - @NonNull - @Override - public ExecutionInfo getRequestExecutionInfo() { - return onlyPage.getRequestExecutionInfo(); - } - - @NonNull - @Override - @Deprecated - public com.datastax.dse.driver.api.core.graph.GraphExecutionInfo getExecutionInfo() { - return onlyPage.getExecutionInfo(); - } - - @NonNull - @Override - public Iterator iterator() { - return onlyPage.currentPage().iterator(); - } - - @Override - public void cancel() { - onlyPage.cancel(); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/TinkerpopBufferUtil.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/TinkerpopBufferUtil.java deleted file mode 100644 index 5650d904350..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/TinkerpopBufferUtil.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import java.nio.ByteBuffer; -import org.apache.tinkerpop.gremlin.structure.io.Buffer; - -/** Mirror of {@link ByteBufUtil} for Tinkerpop Buffer's */ -public class TinkerpopBufferUtil { - - public static ByteBuffer readBytes(Buffer tinkerBuff, int size) { - ByteBuffer res = ByteBuffer.allocate(size); - tinkerBuff.readBytes(res); - res.flip(); - return res; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/AbstractDynamicGraphBinaryCustomSerializer.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/AbstractDynamicGraphBinaryCustomSerializer.java deleted file mode 100644 index 649f5310c5d..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/AbstractDynamicGraphBinaryCustomSerializer.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph.binary; - -import java.io.IOException; -import org.apache.tinkerpop.gremlin.structure.io.Buffer; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; - -/** - * Convenience class for dynamic types implemented as Custom types in GraphBinary. This class will - * take care of handling {value_length} automatically for implementing classes. {@link - * #writeDynamicCustomValue(Object, Buffer, GraphBinaryWriter)} and {@link - * #readDynamicCustomValue(Buffer, GraphBinaryReader)} only need to handle writing the internal - * components of the custom type. - * - * @param the java type the implementing classes will encode and decode. - */ -public abstract class AbstractDynamicGraphBinaryCustomSerializer - extends AbstractSimpleGraphBinaryCustomSerializer { - protected abstract void writeDynamicCustomValue(T value, Buffer buffer, GraphBinaryWriter context) - throws IOException; - - protected abstract T readDynamicCustomValue(Buffer buffer, GraphBinaryReader context) - throws IOException; - - @Override - protected T readCustomValue(int valueLength, Buffer buffer, GraphBinaryReader context) - throws IOException { - int initialIndex = buffer.readerIndex(); - - // read actual custom value - T read = readDynamicCustomValue(buffer, context); - - // make sure we didn't read more than what was input as {value_length} - checkValueSize(valueLength, (buffer.readerIndex() - initialIndex)); - - return read; - } - - @Override - protected void writeCustomValue(T value, Buffer buffer, GraphBinaryWriter context) - throws IOException { - // Store the current writer index - final int valueLengthIndex = buffer.writerIndex(); - - // Write a dummy length that will be overwritten at the end of this method - buffer.writeInt(0); - - // Custom type's writer logic - writeDynamicCustomValue(value, buffer, context); - - // value_length = diff written - 4 bytes for the dummy length - final int valueLength = buffer.writerIndex() - valueLengthIndex - GraphBinaryUtils.sizeOfInt(); - - // Go back, write the {value_length} and then reset back the writer index - buffer.markWriterIndex().writerIndex(valueLengthIndex).writeInt(valueLength).resetWriterIndex(); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/AbstractSimpleGraphBinaryCustomSerializer.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/AbstractSimpleGraphBinaryCustomSerializer.java deleted file mode 100644 index 6dd149707e8..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/AbstractSimpleGraphBinaryCustomSerializer.java +++ /dev/null @@ -1,153 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph.binary; - -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import java.io.IOException; -import org.apache.tinkerpop.gremlin.structure.io.Buffer; -import org.apache.tinkerpop.gremlin.structure.io.binary.DataType; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; -import org.apache.tinkerpop.gremlin.structure.io.binary.types.CustomTypeSerializer; - -/** - * A base custom type serializer for DSE types that handles most of the boiler plate code associated - * with GraphBinary's custom types. - * - *

The full format of a custom type in GraphBinary is the following: - * - *

{type_code}{custom_type_name}{custom_type_info_length}{custom_type_info_bytes}{value_flag}{value_length}{value_bytes} - * - *

This class is made to handle - * {type_code}{custom_type_name}{custom_type_info_length}{custom_type_info_bytes}{value_flag} for - * DSE types. - * - *

Implementing classes are still in charge of encoding {value_length}{value_bytes} in the {@link - * #readCustomValue(int, Buffer, GraphBinaryReader)} implementations. - * - *

Implementing classes must override {@link CustomTypeSerializer#getTypeName()} with their own - * type name. - * - * @param the java type the implementing classes will encode and decode. - */ -abstract class AbstractSimpleGraphBinaryCustomSerializer implements CustomTypeSerializer { - AbstractSimpleGraphBinaryCustomSerializer() { - super(); - } - - protected static final String INCORRECT_VALUE_LENGTH_ERROR_MESSAGE = - "{value_length} read for this value does not correspond to the size of a '%s' value. [%s] bytes required but got [%s]"; - - protected abstract T readCustomValue(int valueLength, Buffer buffer, GraphBinaryReader context) - throws IOException; - - protected abstract void writeCustomValue(T value, Buffer buffer, GraphBinaryWriter context) - throws IOException; - - protected void checkValueSize(int lengthRequired, int lengthFound) { - Preconditions.checkArgument( - lengthFound == lengthRequired, - INCORRECT_VALUE_LENGTH_ERROR_MESSAGE, - getTypeName(), - lengthRequired, - lengthFound); - } - - @Override - public DataType getDataType() { - return DataType.CUSTOM; - } - - @Override - public T read(Buffer buffer, GraphBinaryReader context) throws IOException { - // the type serializer registry will take care of deserializing {custom_type_name} - // read {custom_type_info_length} and verify it is 0. - // See #write(T, ByteBuf, GraphBinaryWriter) for why it is set to 0 - if (context.readValue(buffer, Integer.class, false) != 0) { - throw new IOException("{custom_type_info} should not be provided for this custom type"); - } - - return readValue(buffer, context, true); - } - - @Override - public T readValue(Buffer buffer, GraphBinaryReader context, boolean nullable) - throws IOException { - if (nullable) { - // read {value_flag} - final byte valueFlag = buffer.readByte(); - - // if value is null and the value is nullable - if ((valueFlag & 1) == 1) { - return null; - } - // Note: we don't error out if the valueFlag == "value is null" and nullable == false because - // the serializer - // should have errored out at write time if that was the case. - } - - // Read the byte length of the value bytes - final int valueLength = buffer.readInt(); - - if (valueLength <= 0) { - throw new IOException(String.format("Unexpected value length: %d", valueLength)); - } - - if (valueLength > buffer.readableBytes()) { - throw new IOException( - String.format( - "Not enough readable bytes: %d bytes required for value (%d bytes available)", - valueLength, buffer.readableBytes())); - } - - // subclasses are responsible for reading {value} - return readCustomValue(valueLength, buffer, context); - } - - @Override - public void write(final T value, final Buffer buffer, final GraphBinaryWriter context) - throws IOException { - // the type serializer registry will take care of serializing {custom_type_name} - // write "{custom_type_info_length}" to 0 because we don't need it for the DSE types - context.writeValue(0, buffer, false); - writeValue(value, buffer, context, true); - } - - @Override - public void writeValue( - final T value, final Buffer buffer, final GraphBinaryWriter context, final boolean nullable) - throws IOException { - if (value == null) { - if (!nullable) { - throw new IOException("Unexpected null value when nullable is false"); - } - - // writes {value_flag} to "1" which means "the value is null" - context.writeValueFlagNull(buffer); - return; - } - - if (nullable) { - // writes {value_flag} to "0" which means "value is not null" - context.writeValueFlagNone(buffer); - } - - // sub classes will be responsible for writing {value_length} and {value_bytes} - writeCustomValue(value, buffer, context); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/ComplexTypeSerializerUtil.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/ComplexTypeSerializerUtil.java deleted file mode 100644 index bec3c78743a..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/ComplexTypeSerializerUtil.java +++ /dev/null @@ -1,152 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph.binary; - -import com.datastax.dse.driver.internal.core.graph.TinkerpopBufferUtil; -import com.datastax.dse.driver.internal.core.graph.binary.buffer.DseNettyBufferFactory; -import com.datastax.dse.driver.internal.core.protocol.TinkerpopBufferPrimitiveCodec; -import com.datastax.oss.driver.api.core.data.GettableByIndex; -import com.datastax.oss.driver.api.core.data.SettableByIndex; -import com.datastax.oss.driver.api.core.type.CustomType; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.ListType; -import com.datastax.oss.driver.api.core.type.MapType; -import com.datastax.oss.driver.api.core.type.SetType; -import com.datastax.oss.driver.api.core.type.TupleType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import com.datastax.oss.driver.internal.core.type.DataTypeHelper; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.protocol.internal.PrimitiveCodec; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.response.result.RawType; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.BufferUnderflowException; -import java.nio.ByteBuffer; -import java.util.Objects; -import org.apache.tinkerpop.gremlin.structure.io.Buffer; - -class ComplexTypeSerializerUtil { - - private static final PrimitiveCodec codec = - new TinkerpopBufferPrimitiveCodec(new DseNettyBufferFactory()); - - static void encodeTypeDefinition( - DataType type, Buffer buffer, DefaultDriverContext driverContext) { - RawType protocolType = toProtocolSpec(type); - protocolType.encode(buffer, codec, driverContext.getProtocolVersion().getCode()); - } - - static DataType decodeTypeDefinition(Buffer buffer, DefaultDriverContext driverContext) { - RawType type = RawType.decode(buffer, codec, driverContext.getProtocolVersion().getCode()); - return DataTypeHelper.fromProtocolSpec(type, driverContext); - } - - /* Tinkerpop-based encoding of UDT values, based on the UdtCoded.encode() method, but using Tinkerpop buffers directly to avoid - unnecessary NIO ByteBuffer copies. */ - static void encodeValue(@Nullable GettableByIndex value, Buffer tinkerBuff) { - if (value == null) { - return; - } - - for (int i = 0; i < value.size(); i++) { - ByteBuffer fieldBuffer = value.getBytesUnsafe(i); - if (fieldBuffer == null) { - tinkerBuff.writeInt(-1); - } else { - tinkerBuff.writeInt(fieldBuffer.remaining()); - tinkerBuff.writeBytes(fieldBuffer.duplicate()); - } - } - } - - /* This method will move forward the Tinkerpop buffer given in parameter based on the UDT value read. - Content of the method is roughly equivalent to UdtCodec.decode(), but using Tinkerpop buffers directly to avoid - unnecessary NIO ByteBuffer copies. */ - static > T decodeValue(Buffer tinkerBuff, T val, int size) { - try { - for (int i = 0; i < size; i++) { - int fieldSize = tinkerBuff.readInt(); - if (fieldSize >= 0) { - // the reassignment is to shut down the error-prone warning about ignoring return values. - val = val.setBytesUnsafe(i, TinkerpopBufferUtil.readBytes(tinkerBuff, fieldSize)); - } - } - return val; - } catch (BufferUnderflowException e) { - throw new IllegalArgumentException("Not enough bytes to deserialize a UDT value", e); - } - } - - private static RawType toProtocolSpec(DataType dataType) { - int id = dataType.getProtocolCode(); - RawType type = RawType.PRIMITIVES.get(id); - if (type != null) { - return type; - } - - switch (id) { - case ProtocolConstants.DataType.CUSTOM: - CustomType customType = ((CustomType) dataType); - type = new RawType.RawCustom(customType.getClassName()); - break; - case ProtocolConstants.DataType.LIST: - ListType listType = ((ListType) dataType); - type = new RawType.RawList(toProtocolSpec(listType.getElementType())); - break; - case ProtocolConstants.DataType.SET: - SetType setType = ((SetType) dataType); - type = new RawType.RawSet(toProtocolSpec(setType.getElementType())); - break; - case ProtocolConstants.DataType.MAP: - MapType mapType = ((MapType) dataType); - type = - new RawType.RawMap( - toProtocolSpec(mapType.getKeyType()), toProtocolSpec(mapType.getValueType())); - break; - case ProtocolConstants.DataType.TUPLE: - TupleType tupleType = ((TupleType) dataType); - ImmutableList.Builder subTypesList = - ImmutableList.builderWithExpectedSize(tupleType.getComponentTypes().size()); - for (int i = 0; i < tupleType.getComponentTypes().size(); i++) { - subTypesList.add(toProtocolSpec(tupleType.getComponentTypes().get(i))); - } - type = new RawType.RawTuple(subTypesList.build()); - break; - case ProtocolConstants.DataType.UDT: - UserDefinedType userDefinedType = ((UserDefinedType) dataType); - ImmutableMap.Builder subTypesMap = - ImmutableMap.builderWithExpectedSize(userDefinedType.getFieldNames().size()); - for (int i = 0; i < userDefinedType.getFieldTypes().size(); i++) { - subTypesMap.put( - userDefinedType.getFieldNames().get(i).asInternal(), - toProtocolSpec(userDefinedType.getFieldTypes().get(i))); - } - type = - new RawType.RawUdt( - Objects.requireNonNull(userDefinedType.getKeyspace()).asInternal(), - userDefinedType.getName().asInternal(), - subTypesMap.build()); - break; - default: - throw new IllegalArgumentException("Unsupported type: " + dataType.asCql(true, true)); - } - return type; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/CqlDurationSerializer.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/CqlDurationSerializer.java deleted file mode 100644 index 1ac97de0ef4..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/CqlDurationSerializer.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph.binary; - -import com.datastax.oss.driver.api.core.data.CqlDuration; -import java.io.IOException; -import org.apache.tinkerpop.gremlin.structure.io.Buffer; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; - -public class CqlDurationSerializer extends AbstractSimpleGraphBinaryCustomSerializer { - - @Override - public String getTypeName() { - return GraphBinaryModule.GRAPH_BINARY_DURATION_TYPE_NAME; - } - - @Override - protected CqlDuration readCustomValue( - final int valueLength, final Buffer buffer, final GraphBinaryReader context) - throws IOException { - checkValueSize(GraphBinaryUtils.sizeOfDuration(), valueLength); - return CqlDuration.newInstance( - context.readValue(buffer, Integer.class, false), - context.readValue(buffer, Integer.class, false), - context.readValue(buffer, Long.class, false)); - } - - @Override - protected void writeCustomValue(CqlDuration value, Buffer buffer, GraphBinaryWriter context) - throws IOException { - context.writeValue(GraphBinaryUtils.sizeOfDuration(), buffer, false); - context.writeValue(value.getMonths(), buffer, false); - context.writeValue(value.getDays(), buffer, false); - context.writeValue(value.getNanoseconds(), buffer, false); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/DistanceSerializer.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/DistanceSerializer.java deleted file mode 100644 index 9e281b2b84a..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/DistanceSerializer.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph.binary; - -import com.datastax.dse.driver.api.core.data.geometry.Point; -import com.datastax.dse.driver.internal.core.data.geometry.Distance; -import java.io.IOException; -import org.apache.tinkerpop.gremlin.structure.io.Buffer; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; - -public class DistanceSerializer extends AbstractSimpleGraphBinaryCustomSerializer { - @Override - public String getTypeName() { - return GraphBinaryModule.GRAPH_BINARY_DISTANCE_TYPE_NAME; - } - - @Override - protected Distance readCustomValue(int valueLength, Buffer buffer, GraphBinaryReader context) - throws IOException { - Point p = context.readValue(buffer, Point.class, false); - checkValueSize(GraphBinaryUtils.sizeOfDistance(p), valueLength); - return new Distance(p, context.readValue(buffer, Double.class, false)); - } - - @Override - protected void writeCustomValue(Distance value, Buffer buffer, GraphBinaryWriter context) - throws IOException { - context.writeValue(GraphBinaryUtils.sizeOfDistance(value.getCenter()), buffer, false); - context.writeValue(value.getCenter(), buffer, false); - context.writeValue(value.getRadius(), buffer, false); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/EditDistanceSerializer.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/EditDistanceSerializer.java deleted file mode 100644 index b2831040123..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/EditDistanceSerializer.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph.binary; - -import com.datastax.dse.driver.internal.core.graph.EditDistance; -import java.io.IOException; -import org.apache.tinkerpop.gremlin.structure.io.Buffer; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; - -public class EditDistanceSerializer - extends AbstractSimpleGraphBinaryCustomSerializer { - @Override - public String getTypeName() { - return GraphBinaryModule.GRAPH_BINARY_EDIT_DISTANCE_TYPE_NAME; - } - - @Override - protected EditDistance readCustomValue(int valueLength, Buffer buffer, GraphBinaryReader context) - throws IOException { - int distance = context.readValue(buffer, Integer.class, false); - String query = context.readValue(buffer, String.class, false); - checkValueSize(GraphBinaryUtils.sizeOfEditDistance(query), valueLength); - - return new EditDistance(query, distance); - } - - @Override - protected void writeCustomValue(EditDistance value, Buffer buffer, GraphBinaryWriter context) - throws IOException { - context.writeValue(GraphBinaryUtils.sizeOfEditDistance(value.query), buffer, false); - context.writeValue(value.distance, buffer, false); - context.writeValue(value.query, buffer, false); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/GeometrySerializer.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/GeometrySerializer.java deleted file mode 100644 index 996e79c7693..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/GeometrySerializer.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph.binary; - -import com.datastax.dse.driver.api.core.data.geometry.Geometry; -import com.datastax.dse.driver.internal.core.graph.TinkerpopBufferUtil; -import java.io.IOException; -import java.nio.ByteBuffer; -import org.apache.tinkerpop.gremlin.structure.io.Buffer; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; - -public abstract class GeometrySerializer - extends AbstractSimpleGraphBinaryCustomSerializer { - public abstract T fromWellKnownBinary(ByteBuffer buffer); - - @Override - protected T readCustomValue(int valueLength, Buffer buffer, GraphBinaryReader context) - throws IOException { - return fromWellKnownBinary(TinkerpopBufferUtil.readBytes(buffer, valueLength)); - } - - @Override - protected void writeCustomValue(T value, Buffer buffer, GraphBinaryWriter context) - throws IOException { - ByteBuffer bb = value.asWellKnownBinary(); - - // writing the {value_length} - context.writeValue(bb.remaining(), buffer, false); - buffer.writeBytes(bb); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/GraphBinaryModule.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/GraphBinaryModule.java deleted file mode 100644 index 59f966a34c2..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/GraphBinaryModule.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph.binary; - -import com.datastax.dse.driver.api.core.data.geometry.LineString; -import com.datastax.dse.driver.api.core.data.geometry.Point; -import com.datastax.dse.driver.api.core.data.geometry.Polygon; -import com.datastax.dse.driver.internal.core.data.geometry.Distance; -import com.datastax.dse.driver.internal.core.graph.EditDistance; -import com.datastax.dse.driver.internal.core.graph.binary.buffer.DseNettyBufferFactory; -import com.datastax.oss.driver.api.core.data.CqlDuration; -import com.datastax.oss.driver.api.core.data.TupleValue; -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.UnpooledByteBufAllocator; -import java.io.IOException; -import org.apache.tinkerpop.gremlin.structure.io.Buffer; -import org.apache.tinkerpop.gremlin.structure.io.BufferFactory; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; -import org.apache.tinkerpop.gremlin.structure.io.binary.TypeSerializerRegistry; -import org.javatuples.Pair; - -public class GraphBinaryModule { - public static final UnpooledByteBufAllocator ALLOCATOR = new UnpooledByteBufAllocator(false); - private static final BufferFactory FACTORY = new DseNettyBufferFactory(); - - static final String GRAPH_BINARY_POINT_TYPE_NAME = "driver.dse.geometry.Point"; - static final String GRAPH_BINARY_LINESTRING_TYPE_NAME = "driver.dse.geometry.LineString"; - static final String GRAPH_BINARY_POLYGON_TYPE_NAME = "driver.dse.geometry.Polygon"; - static final String GRAPH_BINARY_DISTANCE_TYPE_NAME = "driver.dse.geometry.Distance"; - static final String GRAPH_BINARY_DURATION_TYPE_NAME = "driver.core.Duration"; - static final String GRAPH_BINARY_EDIT_DISTANCE_TYPE_NAME = "driver.dse.search.EditDistance"; - static final String GRAPH_BINARY_TUPLE_VALUE_TYPE_NAME = "driver.core.TupleValue"; - static final String GRAPH_BINARY_UDT_VALUE_TYPE_NAME = "driver.core.UDTValue"; - static final String GRAPH_BINARY_PAIR_TYPE_NAME = "org.javatuples.Pair"; - - private final GraphBinaryReader reader; - private final GraphBinaryWriter writer; - - public GraphBinaryModule(GraphBinaryReader reader, GraphBinaryWriter writer) { - this.reader = reader; - this.writer = writer; - } - - public static TypeSerializerRegistry createDseTypeSerializerRegistry( - DefaultDriverContext driverContext) { - return TypeSerializerRegistry.build() - .addCustomType(CqlDuration.class, new CqlDurationSerializer()) - .addCustomType(Point.class, new PointSerializer()) - .addCustomType(LineString.class, new LineStringSerializer()) - .addCustomType(Polygon.class, new PolygonSerializer()) - .addCustomType(Distance.class, new DistanceSerializer()) - .addCustomType(EditDistance.class, new EditDistanceSerializer()) - .addCustomType(TupleValue.class, new TupleValueSerializer(driverContext)) - .addCustomType(UdtValue.class, new UdtValueSerializer(driverContext)) - .addCustomType(Pair.class, new PairSerializer()) - .create(); - } - - @SuppressWarnings("TypeParameterUnusedInFormals") - public T deserialize(final Buffer buffer) throws IOException { - return reader.read(buffer); - } - - public Buffer serialize(final T value) throws IOException { - return serialize(value, FACTORY.create(ALLOCATOR.heapBuffer())); - } - - public Buffer serialize(final T value, final Buffer buffer) throws IOException { - try { - writer.write(value, buffer); - return buffer; - } catch (Exception e) { - buffer.release(); - throw e; - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/GraphBinaryUtils.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/GraphBinaryUtils.java deleted file mode 100644 index 42283cd5167..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/GraphBinaryUtils.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph.binary; - -import com.datastax.dse.driver.api.core.data.geometry.Point; -import java.nio.charset.StandardCharsets; - -class GraphBinaryUtils { - static int sizeOfInt() { - return 4; - } - - static int sizeOfLong() { - return 8; - } - - static int sizeOfDouble() { - return 8; - } - - static int sizeOfPoint(Point point) { - return point.asWellKnownBinary().remaining(); - } - - /* assumes UTF8 */ - static int sizeOfString(String s) { - // length + data length - return sizeOfInt() + s.getBytes(StandardCharsets.UTF_8).length; - } - - static int sizeOfDuration() { - return sizeOfInt() + sizeOfInt() + sizeOfLong(); - } - - static int sizeOfDistance(Point point) { - return sizeOfPoint(point) + sizeOfDouble(); - } - - static int sizeOfEditDistance(String s) { - return sizeOfInt() + sizeOfString(s); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/LineStringSerializer.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/LineStringSerializer.java deleted file mode 100644 index 4dfa8f8f0f1..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/LineStringSerializer.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph.binary; - -import com.datastax.dse.driver.api.core.data.geometry.LineString; -import java.nio.ByteBuffer; - -public class LineStringSerializer extends GeometrySerializer { - @Override - public String getTypeName() { - return GraphBinaryModule.GRAPH_BINARY_LINESTRING_TYPE_NAME; - } - - @Override - public LineString fromWellKnownBinary(ByteBuffer buffer) { - return LineString.fromWellKnownBinary(buffer); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/PairSerializer.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/PairSerializer.java deleted file mode 100644 index 3f13dd5b3a0..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/PairSerializer.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph.binary; - -import java.io.IOException; -import org.apache.tinkerpop.gremlin.structure.io.Buffer; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; -import org.javatuples.Pair; - -public class PairSerializer extends AbstractDynamicGraphBinaryCustomSerializer { - - @Override - public String getTypeName() { - return GraphBinaryModule.GRAPH_BINARY_PAIR_TYPE_NAME; - } - - @Override - protected Pair readDynamicCustomValue(Buffer buffer, GraphBinaryReader context) - throws IOException { - return new Pair<>(context.read(buffer), context.read(buffer)); - } - - @Override - protected void writeDynamicCustomValue(Pair value, Buffer buffer, GraphBinaryWriter context) - throws IOException { - context.write(value.getValue0(), buffer); - context.write(value.getValue1(), buffer); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/PointSerializer.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/PointSerializer.java deleted file mode 100644 index 2204b0da073..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/PointSerializer.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph.binary; - -import com.datastax.dse.driver.api.core.data.geometry.Point; -import java.nio.ByteBuffer; - -public class PointSerializer extends GeometrySerializer { - - @Override - public String getTypeName() { - return GraphBinaryModule.GRAPH_BINARY_POINT_TYPE_NAME; - } - - @Override - public Point fromWellKnownBinary(ByteBuffer buffer) { - return Point.fromWellKnownBinary(buffer); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/PolygonSerializer.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/PolygonSerializer.java deleted file mode 100644 index 8e3bc67838a..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/PolygonSerializer.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph.binary; - -import com.datastax.dse.driver.api.core.data.geometry.Polygon; -import java.nio.ByteBuffer; - -public class PolygonSerializer extends GeometrySerializer { - @Override - public String getTypeName() { - return GraphBinaryModule.GRAPH_BINARY_POLYGON_TYPE_NAME; - } - - @Override - public Polygon fromWellKnownBinary(ByteBuffer buffer) { - return Polygon.fromWellKnownBinary(buffer); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/TupleValueSerializer.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/TupleValueSerializer.java deleted file mode 100644 index b7c6fc2098d..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/TupleValueSerializer.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph.binary; - -import com.datastax.oss.driver.api.core.data.TupleValue; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.TupleType; -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import java.io.IOException; -import org.apache.tinkerpop.gremlin.structure.io.Buffer; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; - -public class TupleValueSerializer extends AbstractDynamicGraphBinaryCustomSerializer { - - private final DefaultDriverContext driverContext; - - public TupleValueSerializer(DefaultDriverContext driverContext) { - this.driverContext = driverContext; - } - - @Override - public String getTypeName() { - return GraphBinaryModule.GRAPH_BINARY_TUPLE_VALUE_TYPE_NAME; - } - - @Override - public TupleValue readDynamicCustomValue(Buffer buffer, GraphBinaryReader context) - throws IOException { - // read the type first - DataType type = ComplexTypeSerializerUtil.decodeTypeDefinition(buffer, driverContext); - - assert type instanceof TupleType - : "GraphBinary TupleValue deserializer was called on a value that is not encoded as a TupleValue."; - - TupleType tupleType = (TupleType) type; - TupleValue value = tupleType.newValue(); - - // then decode the values from the buffer - return ComplexTypeSerializerUtil.decodeValue( - buffer, value, tupleType.getComponentTypes().size()); - } - - @Override - public void writeDynamicCustomValue(TupleValue value, Buffer buffer, GraphBinaryWriter context) - throws IOException { - // write type first in native protocol - ComplexTypeSerializerUtil.encodeTypeDefinition(value.getType(), buffer, driverContext); - - // write value after - ComplexTypeSerializerUtil.encodeValue(value, buffer); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/UdtValueSerializer.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/UdtValueSerializer.java deleted file mode 100644 index 3e617ebf926..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/UdtValueSerializer.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph.binary; - -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import java.io.IOException; -import org.apache.tinkerpop.gremlin.structure.io.Buffer; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; - -public class UdtValueSerializer extends AbstractDynamicGraphBinaryCustomSerializer { - private final DefaultDriverContext driverContext; - - public UdtValueSerializer(DefaultDriverContext driverContext) { - this.driverContext = driverContext; - } - - @Override - public String getTypeName() { - return GraphBinaryModule.GRAPH_BINARY_UDT_VALUE_TYPE_NAME; - } - - @Override - public UdtValue readDynamicCustomValue(Buffer buffer, GraphBinaryReader context) - throws IOException { - // read type definition first - DataType driverType = ComplexTypeSerializerUtil.decodeTypeDefinition(buffer, driverContext); - - assert driverType instanceof UserDefinedType - : "GraphBinary UdtValue deserializer was called on a value that is not encoded as a UdtValue."; - - UserDefinedType userDefinedType = (UserDefinedType) driverType; - UdtValue value = userDefinedType.newValue(); - - // then read values - return ComplexTypeSerializerUtil.decodeValue( - buffer, value, userDefinedType.getFieldTypes().size()); - } - - @Override - public void writeDynamicCustomValue(UdtValue value, Buffer buffer, GraphBinaryWriter context) - throws IOException { - // write type first in native protocol format - ComplexTypeSerializerUtil.encodeTypeDefinition(value.getType(), buffer, driverContext); - // write value after - ComplexTypeSerializerUtil.encodeValue(value, buffer); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/buffer/DseNettyBuffer.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/buffer/DseNettyBuffer.java deleted file mode 100644 index 590ac2e9be2..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/buffer/DseNettyBuffer.java +++ /dev/null @@ -1,267 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph.binary.buffer; - -import io.netty.buffer.ByteBuf; -import java.io.IOException; -import java.io.OutputStream; -import java.nio.ByteBuffer; -import org.apache.tinkerpop.gremlin.structure.io.Buffer; - -/** - * Internal impl of Tinkerpop Buffers. We implement an internal type here to allow for this class to - * use shaded Netty types (without bringing all of Tinkerpop into the shaded JAR). The impl is based - * on the initial impl of {@link NettyBuffer} but we don't guarantee that this class will mirror - * changes to that class over time. - */ -final class DseNettyBuffer implements Buffer { - private final ByteBuf buffer; - - /** - * Creates a new instance. - * - * @param buffer The buffer to wrap. - */ - DseNettyBuffer(ByteBuf buffer) { - if (buffer == null) { - throw new IllegalArgumentException("buffer can't be null"); - } - - this.buffer = buffer; - } - - @Override - public int readableBytes() { - return this.buffer.readableBytes(); - } - - @Override - public int readerIndex() { - return this.buffer.readerIndex(); - } - - @Override - public Buffer readerIndex(final int readerIndex) { - this.buffer.readerIndex(readerIndex); - return this; - } - - @Override - public int writerIndex() { - return this.buffer.writerIndex(); - } - - @Override - public Buffer writerIndex(final int writerIndex) { - this.buffer.writerIndex(writerIndex); - return this; - } - - @Override - public Buffer markWriterIndex() { - this.buffer.markWriterIndex(); - return this; - } - - @Override - public Buffer resetWriterIndex() { - this.buffer.resetWriterIndex(); - return this; - } - - @Override - public int capacity() { - return this.buffer.capacity(); - } - - @Override - public boolean isDirect() { - return this.buffer.isDirect(); - } - - @Override - public boolean readBoolean() { - return this.buffer.readBoolean(); - } - - @Override - public byte readByte() { - return this.buffer.readByte(); - } - - @Override - public short readShort() { - return this.buffer.readShort(); - } - - @Override - public int readInt() { - return this.buffer.readInt(); - } - - @Override - public long readLong() { - return this.buffer.readLong(); - } - - @Override - public float readFloat() { - return this.buffer.readFloat(); - } - - @Override - public double readDouble() { - return this.buffer.readDouble(); - } - - @Override - public Buffer readBytes(final byte[] destination) { - this.buffer.readBytes(destination); - return this; - } - - @Override - public Buffer readBytes(final byte[] destination, final int dstIndex, final int length) { - this.buffer.readBytes(destination, dstIndex, length); - return this; - } - - @Override - public Buffer readBytes(final ByteBuffer dst) { - this.buffer.readBytes(dst); - return this; - } - - @Override - public Buffer readBytes(final OutputStream out, final int length) throws IOException { - this.buffer.readBytes(out, length); - return this; - } - - @Override - public Buffer writeBoolean(final boolean value) { - this.buffer.writeBoolean(value); - return this; - } - - @Override - public Buffer writeByte(final int value) { - this.buffer.writeByte(value); - return this; - } - - @Override - public Buffer writeShort(final int value) { - this.buffer.writeShort(value); - return this; - } - - @Override - public Buffer writeInt(final int value) { - this.buffer.writeInt(value); - return this; - } - - @Override - public Buffer writeLong(final long value) { - this.buffer.writeLong(value); - return this; - } - - @Override - public Buffer writeFloat(final float value) { - this.buffer.writeFloat(value); - return this; - } - - @Override - public Buffer writeDouble(final double value) { - this.buffer.writeDouble(value); - return this; - } - - @Override - public Buffer writeBytes(final byte[] src) { - this.buffer.writeBytes(src); - return this; - } - - @Override - public Buffer writeBytes(final ByteBuffer src) { - this.buffer.writeBytes(src); - return this; - } - - @Override - public Buffer writeBytes(byte[] src, final int srcIndex, final int length) { - this.buffer.writeBytes(src, srcIndex, length); - return this; - } - - @Override - public boolean release() { - return this.buffer.release(); - } - - @Override - public Buffer retain() { - this.buffer.retain(); - return this; - } - - @Override - public int referenceCount() { - return this.buffer.refCnt(); - } - - @Override - public ByteBuffer[] nioBuffers() { - return this.buffer.nioBuffers(); - } - - @Override - public ByteBuffer nioBuffer() { - return this.buffer.nioBuffer(); - } - - @Override - public ByteBuffer nioBuffer(final int index, final int length) { - return this.buffer.nioBuffer(index, length); - } - - @Override - public ByteBuffer[] nioBuffers(final int index, final int length) { - return this.buffer.nioBuffers(index, length); - } - - @Override - public int nioBufferCount() { - return this.buffer.nioBufferCount(); - } - - @Override - public Buffer getBytes(final int index, final byte[] dst) { - this.buffer.getBytes(index, dst); - return this; - } - - /** Returns the underlying buffer. */ - public ByteBuf getUnderlyingBuffer() { - return this.buffer; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/buffer/DseNettyBufferFactory.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/buffer/DseNettyBufferFactory.java deleted file mode 100644 index 57ee3cb1a9d..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/buffer/DseNettyBufferFactory.java +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph.binary.buffer; - -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufAllocator; -import io.netty.buffer.CompositeByteBuf; -import io.netty.buffer.Unpooled; -import io.netty.buffer.UnpooledByteBufAllocator; -import java.nio.ByteBuffer; -import java.util.function.Supplier; -import org.apache.tinkerpop.gremlin.structure.io.Buffer; -import org.apache.tinkerpop.gremlin.structure.io.BufferFactory; - -/** - * Internal BufferFactory impl for creation of Tinkerpop buffers. We implement an internal type here - * to allow for this class to use shaded Netty types (without bringing all of Tinkerpop into the - * shaded JAR). The impl is based on the initial impl of {@code - * org.apache.tinkerpop.gremlin.driver.ser.NettyBufferFactory} but we don't guarantee that this - * class will mirror changes to that class over time. - */ -public class DseNettyBufferFactory implements BufferFactory { - - private static final ByteBufAllocator DEFAULT_ALLOCATOR = new UnpooledByteBufAllocator(false); - - private final ByteBufAllocator allocator; - - public DseNettyBufferFactory() { - this.allocator = DEFAULT_ALLOCATOR; - } - - public DseNettyBufferFactory(ByteBufAllocator allocator) { - this.allocator = allocator; - } - - @Override - public Buffer create(final ByteBuf value) { - return new DseNettyBuffer(value); - } - - @Override - public Buffer wrap(final ByteBuffer value) { - return create(Unpooled.wrappedBuffer(value)); - } - - public Buffer heap() { - return create(allocator.heapBuffer()); - } - - public Buffer heap(int initialSize) { - return create(allocator.heapBuffer(initialSize)); - } - - public Buffer heap(int initialSize, int maxSize) { - return create(allocator.heapBuffer(initialSize, maxSize)); - } - - public Buffer io() { - return create(allocator.ioBuffer()); - } - - public Buffer io(int initialSize) { - return create(allocator.ioBuffer(initialSize)); - } - - public Buffer io(int initialSize, int maxSize) { - return create(allocator.ioBuffer(initialSize, maxSize)); - } - - public Buffer direct() { - return create(allocator.directBuffer()); - } - - public Buffer direct(int initialSize) { - return create(allocator.directBuffer(initialSize)); - } - - public Buffer direct(int initialSize, int maxSize) { - return create(allocator.directBuffer(initialSize, maxSize)); - } - - public Buffer composite(ByteBuf... components) { - - CompositeByteBuf buff = allocator.compositeBuffer(components.length); - buff.addComponents(components); - return create(buff); - } - - public Buffer composite(Buffer... components) { - ByteBuf[] nettyBufs = new ByteBuf[components.length]; - for (int i = 0; i < components.length; ++i) { - if (!(components[i] instanceof DseNettyBuffer)) { - throw new IllegalArgumentException("Can only concatenate DseNettyBuffer instances"); - } - nettyBufs[i] = ((DseNettyBuffer) components[i]).getUnderlyingBuffer(); - } - return composite(nettyBufs); - } - - public Buffer withBytes(int... bytes) { - return withBytes(this::heap, bytes); - } - - public Buffer withBytes(Supplier supplier, int... bytes) { - Buffer buff = supplier.get(); - for (int val : bytes) { - buff.writeByte(val); - } - return buff; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/reactive/DefaultReactiveGraphNode.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/reactive/DefaultReactiveGraphNode.java deleted file mode 100644 index fda0eed5333..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/reactive/DefaultReactiveGraphNode.java +++ /dev/null @@ -1,205 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph.reactive; - -import com.datastax.dse.driver.api.core.graph.GraphNode; -import com.datastax.dse.driver.api.core.graph.reactive.ReactiveGraphNode; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.List; -import java.util.Map; -import java.util.Set; -import net.jcip.annotations.NotThreadSafe; -import org.apache.tinkerpop.gremlin.process.traversal.Path; -import org.apache.tinkerpop.gremlin.structure.Edge; -import org.apache.tinkerpop.gremlin.structure.Property; -import org.apache.tinkerpop.gremlin.structure.Vertex; -import org.apache.tinkerpop.gremlin.structure.VertexProperty; - -@NotThreadSafe -class DefaultReactiveGraphNode implements ReactiveGraphNode { - - private final GraphNode graphNode; - private final ExecutionInfo executionInfo; - - DefaultReactiveGraphNode(@NonNull GraphNode graphNode, @NonNull ExecutionInfo executionInfo) { - this.graphNode = graphNode; - this.executionInfo = executionInfo; - } - - @NonNull - @Override - public ExecutionInfo getExecutionInfo() { - return executionInfo; - } - - @Override - public boolean isNull() { - return graphNode.isNull(); - } - - @Override - public boolean isMap() { - return graphNode.isMap(); - } - - @Override - public Iterable keys() { - return graphNode.keys(); - } - - @Override - public GraphNode getByKey(Object key) { - return graphNode.getByKey(key); - } - - @Override - public Map asMap() { - return graphNode.asMap(); - } - - @Override - public boolean isList() { - return graphNode.isList(); - } - - @Override - public int size() { - return graphNode.size(); - } - - @Override - public GraphNode getByIndex(int index) { - return graphNode.getByIndex(index); - } - - @Override - public List asList() { - return graphNode.asList(); - } - - @Override - public boolean isValue() { - return graphNode.isValue(); - } - - @Override - public int asInt() { - return graphNode.asInt(); - } - - @Override - public boolean asBoolean() { - return graphNode.asBoolean(); - } - - @Override - public long asLong() { - return graphNode.asLong(); - } - - @Override - public double asDouble() { - return graphNode.asDouble(); - } - - @Override - public String asString() { - return graphNode.asString(); - } - - @Override - public ResultT as(Class clazz) { - return graphNode.as(clazz); - } - - @Override - public ResultT as(GenericType type) { - return graphNode.as(type); - } - - @Override - public boolean isVertex() { - return graphNode.isVertex(); - } - - @Override - public Vertex asVertex() { - return graphNode.asVertex(); - } - - @Override - public boolean isEdge() { - return graphNode.isEdge(); - } - - @Override - public Edge asEdge() { - return graphNode.asEdge(); - } - - @Override - public boolean isPath() { - return graphNode.isPath(); - } - - @Override - public Path asPath() { - return graphNode.asPath(); - } - - @Override - public boolean isProperty() { - return graphNode.isProperty(); - } - - @Override - public Property asProperty() { - return graphNode.asProperty(); - } - - @Override - public boolean isVertexProperty() { - return graphNode.isVertexProperty(); - } - - @Override - public VertexProperty asVertexProperty() { - return graphNode.asVertexProperty(); - } - - @Override - public boolean isSet() { - return graphNode.isSet(); - } - - @Override - public Set asSet() { - return graphNode.asSet(); - } - - @Override - public String toString() { - return "DefaultReactiveGraphNode{graphNode=" - + graphNode - + ", executionInfo=" - + executionInfo - + '}'; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/reactive/DefaultReactiveGraphResultSet.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/reactive/DefaultReactiveGraphResultSet.java deleted file mode 100644 index 137e44e4d95..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/reactive/DefaultReactiveGraphResultSet.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph.reactive; - -import com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet; -import com.datastax.dse.driver.api.core.graph.reactive.ReactiveGraphNode; -import com.datastax.dse.driver.api.core.graph.reactive.ReactiveGraphResultSet; -import com.datastax.dse.driver.internal.core.cql.reactive.EmptySubscription; -import com.datastax.dse.driver.internal.core.cql.reactive.SimpleUnicastProcessor; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Objects; -import java.util.concurrent.Callable; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.atomic.AtomicBoolean; -import net.jcip.annotations.ThreadSafe; -import org.reactivestreams.Publisher; -import org.reactivestreams.Subscriber; - -@ThreadSafe -public class DefaultReactiveGraphResultSet implements ReactiveGraphResultSet { - - private final Callable> firstPage; - - private final AtomicBoolean alreadySubscribed = new AtomicBoolean(false); - - private final SimpleUnicastProcessor executionInfosPublisher = - new SimpleUnicastProcessor<>(); - - public DefaultReactiveGraphResultSet(Callable> firstPage) { - this.firstPage = firstPage; - } - - @Override - public void subscribe(@NonNull Subscriber subscriber) { - // As per rule 1.9, we need to throw an NPE if subscriber is null - Objects.requireNonNull(subscriber, "Subscriber cannot be null"); - // As per rule 1.11, this publisher is allowed to support only one subscriber. - if (alreadySubscribed.compareAndSet(false, true)) { - ReactiveGraphResultSetSubscription subscription = - new ReactiveGraphResultSetSubscription(subscriber, executionInfosPublisher); - try { - subscriber.onSubscribe(subscription); - // must be done after onSubscribe - subscription.start(firstPage); - } catch (Throwable t) { - // As per rule 2.13: In the case that this rule is violated, - // any associated Subscription to the Subscriber MUST be considered as - // cancelled, and the caller MUST raise this error condition in a fashion - // that is adequate for the runtime environment. - subscription.doOnError( - new IllegalStateException( - subscriber - + " violated the Reactive Streams rule 2.13 by throwing an exception from onSubscribe.", - t)); - } - } else { - subscriber.onSubscribe(EmptySubscription.INSTANCE); - subscriber.onError( - new IllegalStateException("This publisher does not support multiple subscriptions")); - } - // As per 2.13, this method must return normally (i.e. not throw) - } - - @NonNull - @Override - public Publisher getExecutionInfos() { - return executionInfosPublisher; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/reactive/FailedReactiveGraphResultSet.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/reactive/FailedReactiveGraphResultSet.java deleted file mode 100644 index 45bbd8c62b0..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/reactive/FailedReactiveGraphResultSet.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph.reactive; - -import com.datastax.dse.driver.api.core.graph.reactive.ReactiveGraphNode; -import com.datastax.dse.driver.api.core.graph.reactive.ReactiveGraphResultSet; -import com.datastax.dse.driver.internal.core.cql.reactive.FailedPublisher; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import edu.umd.cs.findbugs.annotations.NonNull; -import org.reactivestreams.Publisher; - -public class FailedReactiveGraphResultSet extends FailedPublisher - implements ReactiveGraphResultSet { - - public FailedReactiveGraphResultSet(Throwable error) { - super(error); - } - - @NonNull - @Override - public Publisher getExecutionInfos() { - return new FailedPublisher<>(error); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/reactive/ReactiveGraphRequestProcessor.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/reactive/ReactiveGraphRequestProcessor.java deleted file mode 100644 index ed2cd28926c..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/reactive/ReactiveGraphRequestProcessor.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph.reactive; - -import com.datastax.dse.driver.api.core.graph.GraphStatement; -import com.datastax.dse.driver.api.core.graph.reactive.ReactiveGraphResultSet; -import com.datastax.dse.driver.internal.core.graph.GraphRequestAsyncProcessor; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.internal.core.session.RequestProcessor; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class ReactiveGraphRequestProcessor - implements RequestProcessor, ReactiveGraphResultSet> { - - public static final GenericType REACTIVE_GRAPH_RESULT_SET = - GenericType.of(ReactiveGraphResultSet.class); - - private final GraphRequestAsyncProcessor asyncGraphProcessor; - - public ReactiveGraphRequestProcessor(@NonNull GraphRequestAsyncProcessor asyncGraphProcessor) { - this.asyncGraphProcessor = asyncGraphProcessor; - } - - @Override - public boolean canProcess(Request request, GenericType resultType) { - return request instanceof GraphStatement && resultType.equals(REACTIVE_GRAPH_RESULT_SET); - } - - @Override - public ReactiveGraphResultSet process( - GraphStatement request, - DefaultSession session, - InternalDriverContext context, - String sessionLogPrefix) { - return new DefaultReactiveGraphResultSet( - () -> asyncGraphProcessor.process(request, session, context, sessionLogPrefix)); - } - - @Override - public ReactiveGraphResultSet newFailure(RuntimeException error) { - return new FailedReactiveGraphResultSet(error); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/reactive/ReactiveGraphResultSetSubscription.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/reactive/ReactiveGraphResultSetSubscription.java deleted file mode 100644 index c3234d74ebc..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/reactive/ReactiveGraphResultSetSubscription.java +++ /dev/null @@ -1,475 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph.reactive; - -import com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet; -import com.datastax.dse.driver.api.core.graph.reactive.ReactiveGraphNode; -import com.datastax.dse.driver.internal.core.cql.reactive.ReactiveOperators; -import com.datastax.dse.driver.internal.core.util.concurrent.BoundedConcurrentQueue; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.driver.shaded.guava.common.collect.Iterators; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Collections; -import java.util.Iterator; -import java.util.Objects; -import java.util.concurrent.Callable; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionException; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; -import net.jcip.annotations.ThreadSafe; -import org.reactivestreams.Subscriber; -import org.reactivestreams.Subscription; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This class is very similar to {@link - * com.datastax.dse.driver.internal.core.cql.reactive.ReactiveResultSetSubscription}. It exists - * merely because {@link AsyncGraphResultSet} is not a subtype of {@link - * com.datastax.oss.driver.api.core.AsyncPagingIterable} and thus it would be difficult to re-use - * ReactiveResultSetSubscription for graph result sets. - */ -@ThreadSafe -public class ReactiveGraphResultSetSubscription implements Subscription { - - private static final Logger LOG = - LoggerFactory.getLogger(ReactiveGraphResultSetSubscription.class); - - private static final int MAX_ENQUEUED_PAGES = 4; - - /** Tracks the number of items requested by the subscriber. */ - private final AtomicLong requested = new AtomicLong(0); - - /** The pages received so far, with a maximum of MAX_ENQUEUED_PAGES elements. */ - private final BoundedConcurrentQueue pages = - new BoundedConcurrentQueue<>(MAX_ENQUEUED_PAGES); - - /** - * Used to signal that a thread is currently draining, i.e., emitting items to the subscriber. - * When it is zero, that means there is no ongoing emission. This mechanism effectively serializes - * access to the drain() method, and also keeps track of missed attempts to enter it, since each - * thread that attempts to drain will increment this counter. - * - * @see #drain() - */ - private final AtomicInteger draining = new AtomicInteger(0); - - /** - * Waited upon by the driver and completed when the subscriber requests its first item. - * - *

Used to hold off emitting results until the subscriber issues its first request for items. - * Since this future is only completed from {@link #request(long)}, this effectively conditions - * the enqueueing of the first page to the reception of the subscriber's first request. - * - *

This mechanism avoids sending terminal signals before a request is made when the stream is - * empty. Note that as per 2.9, "a Subscriber MUST be prepared to receive an onComplete signal - * with or without a preceding Subscription.request(long n) call." However, the TCK considers it - * as unfair behavior. - * - * @see #start(Callable) - * @see #request(long) - */ - private final CompletableFuture firstSubscriberRequestArrived = new CompletableFuture<>(); - - /** non-final because it has to be de-referenced, see {@link #clear()}. */ - private volatile Subscriber mainSubscriber; - - private volatile Subscriber executionInfosSubscriber; - - /** - * Set to true when the subscription is cancelled, which happens when an error is encountered, - * when the result set is fully consumed and the subscription terminates, or when the subscriber - * manually calls {@link #cancel()}. - */ - private volatile boolean cancelled = false; - - ReactiveGraphResultSetSubscription( - @NonNull Subscriber mainSubscriber, - @NonNull Subscriber executionInfosSubscriber) { - this.mainSubscriber = mainSubscriber; - this.executionInfosSubscriber = executionInfosSubscriber; - } - - /** - * Starts the query execution. - * - *

Must be called immediately after creating the subscription, but after {@link - * Subscriber#onSubscribe(Subscription)}. - * - * @param firstPage The future that, when complete, will produce the first page. - */ - void start(@NonNull Callable> firstPage) { - firstSubscriberRequestArrived.thenAccept( - (aVoid) -> fetchNextPageAndEnqueue(new Page(firstPage))); - } - - @Override - public void request(long n) { - // As per 3.6: after the Subscription is cancelled, additional - // calls to request() MUST be NOPs. - if (!cancelled) { - if (n < 1) { - // Validate request as per rule 3.9 - doOnError( - new IllegalArgumentException( - mainSubscriber - + " violated the Reactive Streams rule 3.9 by requesting a non-positive number of elements.")); - } else { - // As per rule 3.17, when demand overflows Long.MAX_VALUE - // it can be treated as "effectively unbounded" - ReactiveOperators.addCap(requested, n); - // Set the first future to true if not done yet. - // This will make the first page of results ready for consumption, - // see start(). - // As per 2.7 it is the subscriber's responsibility to provide - // external synchronization when calling request(), - // so the check-then-act idiom below is good enough - // (and besides, complete() is idempotent). - if (!firstSubscriberRequestArrived.isDone()) { - firstSubscriberRequestArrived.complete(null); - } - drain(); - } - } - } - - @Override - public void cancel() { - // As per 3.5: Subscription.cancel() MUST respect the responsiveness of - // its caller by returning in a timely manner, MUST be idempotent and - // MUST be thread-safe. - if (!cancelled) { - cancelled = true; - if (draining.getAndIncrement() == 0) { - // If nobody is draining, clear now; - // otherwise, the draining thread will notice - // that the cancelled flag was set - // and will clear for us. - clear(); - } - } - } - - /** - * Attempts to drain available items, i.e. emit them to the subscriber. - * - *

Access to this method is serialized by the field {@link #draining}: only one thread at a - * time can drain, but threads that attempt to drain while other thread is already draining - * increment that field; the draining thread, before finishing its work, checks for such failed - * attempts and triggers another round of draining if that was the case. - * - *

The loop is interrupted when 1) the requested amount has been met or 2) when there are no - * more items readily available or 3) the subscription has been cancelled. - * - *

The loop also checks for stream exhaustion and emits a terminal {@code onComplete} signal in - * this case. - * - *

This method may run on a driver IO thread when invoked from {@link - * #fetchNextPageAndEnqueue(Page)}, or on a subscriber thread, when invoked from {@link - * #request(long)}. - */ - @SuppressWarnings("ConditionalBreakInInfiniteLoop") - private void drain() { - // As per 3.4: this method SHOULD respect the responsiveness - // of its caller by returning in a timely manner. - // We accomplish this by a wait-free implementation. - if (draining.getAndIncrement() != 0) { - // Someone else is already draining, so do nothing, - // the other thread will notice that we attempted to drain. - // This also allows to abide by rule 3.3 and avoid - // cycles such as request() -> onNext() -> request() etc. - return; - } - int missed = 1; - // Note: when termination is detected inside this loop, - // we MUST call clear() manually. - for (; ; ) { - // The requested number of items at this point - long r = requested.get(); - // The number of items emitted thus far - long emitted = 0L; - while (emitted != r) { - if (cancelled) { - clear(); - return; - } - Object result; - try { - result = tryNext(); - } catch (Throwable t) { - doOnError(t); - clear(); - return; - } - if (result == null) { - break; - } - if (result instanceof Throwable) { - doOnError((Throwable) result); - clear(); - return; - } - doOnNext((ReactiveGraphNode) result); - emitted++; - } - if (isExhausted()) { - doOnComplete(); - clear(); - return; - } - if (cancelled) { - clear(); - return; - } - if (emitted != 0) { - // if any item was emitted, adjust the requested field - ReactiveOperators.subCap(requested, emitted); - } - // if another thread tried to call drain() while we were busy, - // then we should do another drain round. - missed = draining.addAndGet(-missed); - if (missed == 0) { - break; - } - } - } - - /** - * Tries to return the next item, if one is readily available, and returns {@code null} otherwise. - * - *

Cannot run concurrently due to the {@link #draining} field. - */ - @Nullable - private Object tryNext() { - Page current = pages.peek(); - if (current != null) { - if (current.hasMoreRows()) { - return current.nextRow(); - } else if (current.hasMorePages()) { - // Discard current page as it is consumed. - // Don't discard the last page though as we need it - // to test isExhausted(). It will be GC'ed when a terminal signal - // is issued anyway, so that's no big deal. - if (pages.poll() == null) { - throw new AssertionError("Queue is empty, this should not happen"); - } - current = pages.peek(); - // if the next page is readily available, - // serve its first row now, no need to wait - // for the next drain. - if (current != null && current.hasMoreRows()) { - return current.nextRow(); - } - } - } - // No items available right now. - return null; - } - - /** - * Returns {@code true} when the entire stream has been consumed and no more items can be emitted. - * When that is the case, a terminal signal is sent. - * - *

Cannot run concurrently due to the draining field. - */ - private boolean isExhausted() { - Page current = pages.peek(); - // Note: current can only be null when: - // 1) we are waiting for the first page and it hasn't arrived yet; - // 2) we just discarded the current page, but the next page hasn't arrived yet. - // In any case, a null here means it is not the last page, since the last page - // stays in the queue until the very end of the operation. - return current != null && !current.hasMoreRows() && !current.hasMorePages(); - } - - /** - * Runs on a subscriber thread initially, see {@link #start(Callable)}. Subsequent executions run - * on the thread that completes the pair of futures [current.fetchNextPage, pages.offer] and - * enqueues. This can be a driver IO thread or a subscriber thread; in both cases, cannot run - * concurrently due to the fact that one can only fetch the next page when the current one is - * arrived and enqueued. - */ - private void fetchNextPageAndEnqueue(@NonNull Page current) { - current - .fetchNextPage() - // as soon as the response arrives, - // create the new page - .handle( - (rs, t) -> { - Page page; - if (t == null) { - page = toPage(rs); - executionInfosSubscriber.onNext(rs.getRequestExecutionInfo()); - if (!page.hasMorePages()) { - executionInfosSubscriber.onComplete(); - } - } else { - // Unwrap CompletionExceptions created by combined futures - if (t instanceof CompletionException) { - t = t.getCause(); - } - page = toErrorPage(t); - executionInfosSubscriber.onError(t); - } - return page; - }) - .thenCompose(pages::offer) - .thenAccept( - page -> { - if (page.hasMorePages() && !cancelled) { - // preemptively fetch the next page, if available - fetchNextPageAndEnqueue(page); - } - drain(); - }); - } - - private void doOnNext(@NonNull ReactiveGraphNode result) { - try { - mainSubscriber.onNext(result); - } catch (Throwable t) { - LOG.error( - mainSubscriber - + " violated the Reactive Streams rule 2.13 by throwing an exception from onNext.", - t); - cancel(); - } - } - - private void doOnComplete() { - try { - // Then we signal onComplete as per rules 1.2 and 1.5 - mainSubscriber.onComplete(); - } catch (Throwable t) { - LOG.error( - mainSubscriber - + " violated the Reactive Streams rule 2.13 by throwing an exception from onComplete.", - t); - } - // We need to consider this Subscription as cancelled as per rule 1.6 - cancel(); - } - - // package-private because it can be invoked by the publisher if the subscription handshake - // process fails. - void doOnError(@NonNull Throwable error) { - try { - // Then we signal the error downstream, as per rules 1.2 and 1.4. - mainSubscriber.onError(error); - } catch (Throwable t) { - t.addSuppressed(error); - LOG.error( - mainSubscriber - + " violated the Reactive Streams rule 2.13 by throwing an exception from onError.", - t); - } - // We need to consider this Subscription as cancelled as per rule 1.6 - cancel(); - } - - private void clear() { - // We don't need these pages anymore and should not hold references - // to them. - pages.clear(); - // As per 3.13, Subscription.cancel() MUST request the Publisher to - // eventually drop any references to the corresponding subscriber. - // Our own publishers do not keep references to this subscription, - // but downstream processors might do so, which is why we need to - // defensively clear the subscriber reference when we are done. - mainSubscriber = null; - executionInfosSubscriber = null; - } - - /** - * Converts the received result object into a {@link Page}. - * - * @param rs the result object to convert. - * @return a new page. - */ - @NonNull - private Page toPage(@NonNull AsyncGraphResultSet rs) { - ExecutionInfo executionInfo = rs.getRequestExecutionInfo(); - Iterator results = - Iterators.transform( - rs.currentPage().iterator(), - row -> new DefaultReactiveGraphNode(Objects.requireNonNull(row), executionInfo)); - return new Page(results, rs.hasMorePages() ? rs::fetchNextPage : null); - } - - /** Converts the given error into a {@link Page}, containing the error as its only element. */ - @NonNull - private Page toErrorPage(@NonNull Throwable t) { - return new Page(Iterators.singletonIterator(t), null); - } - - /** - * A page object comprises an iterator over the page's results, and a future pointing to the next - * page (or {@code null}, if it's the last page). - */ - static class Page { - - @NonNull final Iterator iterator; - - // A pointer to the next page, or null if this is the last page. - @Nullable final Callable> nextPage; - - /** called only from start() */ - Page(@NonNull Callable> nextPage) { - this.iterator = Collections.emptyIterator(); - this.nextPage = nextPage; - } - - Page( - @NonNull Iterator iterator, - @Nullable Callable> nextPage) { - this.iterator = iterator; - this.nextPage = nextPage; - } - - boolean hasMorePages() { - return nextPage != null; - } - - @NonNull - CompletionStage fetchNextPage() { - try { - return Objects.requireNonNull(nextPage).call(); - } catch (Exception e) { - // This is a synchronous failure in the driver. - // It can happen in rare cases when the driver throws an exception instead of returning a - // failed future; e.g. if someone tries to execute a continuous paging request but the - // protocol version in use does not support it. - // We treat it as a failed future. - return CompletableFutures.failedFuture(e); - } - } - - boolean hasMoreRows() { - return iterator.hasNext(); - } - - @NonNull - Object nextRow() { - return iterator.next(); - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/AddressFormatter.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/AddressFormatter.java deleted file mode 100644 index cecc951a3ab..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/AddressFormatter.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights; - -import java.net.InetAddress; -import java.net.InetSocketAddress; - -class AddressFormatter { - - static String nullSafeToString(Object address) { - if (address instanceof InetAddress) { - return nullSafeToString((InetAddress) address); - } else if (address instanceof InetSocketAddress) { - return nullSafeToString((InetSocketAddress) address); - } else if (address instanceof String) { - return address.toString(); - } else { - return ""; - } - } - - static String nullSafeToString(InetAddress inetAddress) { - return inetAddress != null ? inetAddress.getHostAddress() : null; - } - - static String nullSafeToString(InetSocketAddress inetSocketAddress) { - if (inetSocketAddress != null) { - if (inetSocketAddress.isUnresolved()) { - return String.format( - "%s:%s", - nullSafeToString(inetSocketAddress.getHostName()), inetSocketAddress.getPort()); - } else { - return String.format( - "%s:%s", nullSafeToString(inetSocketAddress.getAddress()), inetSocketAddress.getPort()); - } - } - return null; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/ConfigAntiPatternsFinder.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/ConfigAntiPatternsFinder.java deleted file mode 100644 index 7f5b9c20a0e..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/ConfigAntiPatternsFinder.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights; - -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.SSL_ENGINE_FACTORY_CLASS; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.SSL_HOSTNAME_VALIDATION; - -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import java.util.HashMap; -import java.util.Map; - -class ConfigAntiPatternsFinder { - Map findAntiPatterns(InternalDriverContext driverContext) { - Map antiPatterns = new HashMap<>(); - findSslAntiPattern(driverContext, antiPatterns); - return antiPatterns; - } - - private void findSslAntiPattern( - InternalDriverContext driverContext, Map antiPatterns) { - boolean isSslDefined = - driverContext.getConfig().getDefaultProfile().isDefined(SSL_ENGINE_FACTORY_CLASS); - boolean certValidation = - driverContext.getConfig().getDefaultProfile().getBoolean(SSL_HOSTNAME_VALIDATION, false); - if (isSslDefined && !certValidation) { - antiPatterns.put( - "sslWithoutCertValidation", - "Client-to-node encryption is enabled but server certificate validation is disabled"); - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/DataCentersFinder.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/DataCentersFinder.java deleted file mode 100644 index 7112b8dcdf7..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/DataCentersFinder.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights; - -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.CONNECTION_POOL_REMOTE_SIZE; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import java.util.Collection; -import java.util.HashSet; -import java.util.Set; - -class DataCentersFinder { - - Set getDataCenters(InternalDriverContext driverContext) { - return getDataCenters( - driverContext.getMetadataManager().getMetadata().getNodes().values(), - driverContext.getConfig().getDefaultProfile()); - } - - @VisibleForTesting - Set getDataCenters(Collection nodes, DriverExecutionProfile executionProfile) { - - int remoteConnectionsLength = executionProfile.getInt(CONNECTION_POOL_REMOTE_SIZE); - - Set dataCenters = new HashSet<>(); - for (Node n : nodes) { - NodeDistance distance = n.getDistance(); - - if (distance.equals(NodeDistance.LOCAL) - || (distance.equals(NodeDistance.REMOTE) && remoteConnectionsLength > 0)) { - dataCenters.add(n.getDatacenter()); - } - } - return dataCenters; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/ExecutionProfilesInfoFinder.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/ExecutionProfilesInfoFinder.java deleted file mode 100644 index a7c92d80d96..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/ExecutionProfilesInfoFinder.java +++ /dev/null @@ -1,184 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights; - -import static com.datastax.dse.driver.api.core.config.DseDriverOption.GRAPH_TRAVERSAL_SOURCE; - -import com.datastax.dse.driver.internal.core.insights.PackageUtil.ClassSettingDetails; -import com.datastax.dse.driver.internal.core.insights.schema.LoadBalancingInfo; -import com.datastax.dse.driver.internal.core.insights.schema.SpecificExecutionProfile; -import com.datastax.dse.driver.internal.core.insights.schema.SpeculativeExecutionInfo; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import java.util.HashMap; -import java.util.LinkedHashMap; -import java.util.Map; -import java.util.function.Function; -import java.util.stream.Collectors; - -class ExecutionProfilesInfoFinder { - Map getExecutionProfilesInfo( - InternalDriverContext driverContext) { - - SpecificExecutionProfile defaultProfile = - mapToSpecificProfile(driverContext.getConfig().getDefaultProfile()); - - return driverContext.getConfig().getProfiles().entrySet().stream() - .collect( - Collectors.toMap( - Map.Entry::getKey, - e -> { - if (isNotDefaultProfile(e)) { - SpecificExecutionProfile specificExecutionProfile = - mapToSpecificProfile(e.getValue()); - return retainOnlyDifferentFieldsFromSpecificProfile( - defaultProfile, specificExecutionProfile); - } else { - return defaultProfile; - } - })); - } - - private boolean isNotDefaultProfile(Map.Entry e) { - return !e.getKey().equals("default"); - } - - private SpecificExecutionProfile retainOnlyDifferentFieldsFromSpecificProfile( - SpecificExecutionProfile defaultProfile, SpecificExecutionProfile specificExecutionProfile) { - Integer readTimeout = - getIfDifferentOrReturnNull( - defaultProfile, specificExecutionProfile, SpecificExecutionProfile::getReadTimeout); - LoadBalancingInfo loadBalancingInfo = - getIfDifferentOrReturnNull( - defaultProfile, specificExecutionProfile, SpecificExecutionProfile::getLoadBalancing); - - SpeculativeExecutionInfo speculativeExecutionInfo = - getIfDifferentOrReturnNull( - defaultProfile, - specificExecutionProfile, - SpecificExecutionProfile::getSpeculativeExecution); - - String consistency = - getIfDifferentOrReturnNull( - defaultProfile, specificExecutionProfile, SpecificExecutionProfile::getConsistency); - - String serialConsistency = - getIfDifferentOrReturnNull( - defaultProfile, - specificExecutionProfile, - SpecificExecutionProfile::getSerialConsistency); - - Map graphOptions = - getIfDifferentOrReturnNull( - defaultProfile, specificExecutionProfile, SpecificExecutionProfile::getGraphOptions); - - return new SpecificExecutionProfile( - readTimeout, - loadBalancingInfo, - speculativeExecutionInfo, - consistency, - serialConsistency, - graphOptions); - } - - private T getIfDifferentOrReturnNull( - SpecificExecutionProfile defaultProfile, - SpecificExecutionProfile profile, - Function valueExtractor) { - T defaultProfileValue = valueExtractor.apply(defaultProfile); - T specificProfileValue = valueExtractor.apply(profile); - if (defaultProfileValue.equals(specificProfileValue)) { - return null; - } else { - return specificProfileValue; - } - } - - private SpecificExecutionProfile mapToSpecificProfile( - DriverExecutionProfile driverExecutionProfile) { - return new SpecificExecutionProfile( - (int) driverExecutionProfile.getDuration(DefaultDriverOption.REQUEST_TIMEOUT).toMillis(), - getLoadBalancingInfo(driverExecutionProfile), - getSpeculativeExecutionInfo(driverExecutionProfile), - driverExecutionProfile.getString(DefaultDriverOption.REQUEST_CONSISTENCY), - driverExecutionProfile.getString(DefaultDriverOption.REQUEST_SERIAL_CONSISTENCY), - getGraphOptions(driverExecutionProfile)); - } - - private SpeculativeExecutionInfo getSpeculativeExecutionInfo( - DriverExecutionProfile driverExecutionProfile) { - Map options = new LinkedHashMap<>(); - - putIfExists( - options, - "maxSpeculativeExecutions", - DefaultDriverOption.SPECULATIVE_EXECUTION_MAX, - driverExecutionProfile); - putIfExists( - options, "delay", DefaultDriverOption.SPECULATIVE_EXECUTION_DELAY, driverExecutionProfile); - - ClassSettingDetails speculativeExecutionDetails = - PackageUtil.getSpeculativeExecutionDetails( - driverExecutionProfile.getString( - DefaultDriverOption.SPECULATIVE_EXECUTION_POLICY_CLASS)); - return new SpeculativeExecutionInfo( - speculativeExecutionDetails.getClassName(), - options, - speculativeExecutionDetails.getFullPackage()); - } - - private void putIfExists( - Map options, - String key, - DefaultDriverOption option, - DriverExecutionProfile executionProfile) { - if (executionProfile.isDefined(option)) { - options.put(key, executionProfile.getInt(option)); - } - } - - private LoadBalancingInfo getLoadBalancingInfo(DriverExecutionProfile driverExecutionProfile) { - Map options = new LinkedHashMap<>(); - if (driverExecutionProfile.isDefined(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)) { - options.put( - "localDataCenter", - driverExecutionProfile.getString(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)); - } - @SuppressWarnings("deprecation") - boolean hasNodeFiltering = - driverExecutionProfile.isDefined(DefaultDriverOption.LOAD_BALANCING_FILTER_CLASS) - || driverExecutionProfile.isDefined( - DefaultDriverOption.LOAD_BALANCING_DISTANCE_EVALUATOR_CLASS); - options.put("filterFunction", hasNodeFiltering); - ClassSettingDetails loadBalancingDetails = - PackageUtil.getLoadBalancingDetails( - driverExecutionProfile.getString(DefaultDriverOption.LOAD_BALANCING_POLICY_CLASS)); - return new LoadBalancingInfo( - loadBalancingDetails.getClassName(), options, loadBalancingDetails.getFullPackage()); - } - - private Map getGraphOptions(DriverExecutionProfile driverExecutionProfile) { - Map graphOptionsMap = new HashMap<>(); - String graphTraversalSource = driverExecutionProfile.getString(GRAPH_TRAVERSAL_SOURCE, null); - if (graphTraversalSource != null) { - graphOptionsMap.put("source", graphTraversalSource); - } - return graphOptionsMap; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/InsightsClient.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/InsightsClient.java deleted file mode 100644 index f19687adf45..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/InsightsClient.java +++ /dev/null @@ -1,491 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights; - -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.AUTH_PROVIDER_CLASS; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.CONNECTION_POOL_REMOTE_SIZE; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.HEARTBEAT_INTERVAL; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.PROTOCOL_COMPRESSION; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.SSL_ENGINE_FACTORY_CLASS; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.SSL_HOSTNAME_VALIDATION; - -import com.datastax.dse.driver.api.core.DseProtocolVersion; -import com.datastax.dse.driver.internal.core.insights.PackageUtil.ClassSettingDetails; -import com.datastax.dse.driver.internal.core.insights.configuration.InsightsConfiguration; -import com.datastax.dse.driver.internal.core.insights.exceptions.InsightEventFormatException; -import com.datastax.dse.driver.internal.core.insights.schema.AuthProviderType; -import com.datastax.dse.driver.internal.core.insights.schema.Insight; -import com.datastax.dse.driver.internal.core.insights.schema.InsightMetadata; -import com.datastax.dse.driver.internal.core.insights.schema.InsightType; -import com.datastax.dse.driver.internal.core.insights.schema.InsightsStartupData; -import com.datastax.dse.driver.internal.core.insights.schema.InsightsStatusData; -import com.datastax.dse.driver.internal.core.insights.schema.PoolSizeByHostDistance; -import com.datastax.dse.driver.internal.core.insights.schema.SSL; -import com.datastax.dse.driver.internal.core.insights.schema.SessionStateForNode; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.session.SessionBuilder; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.uuid.Uuids; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRequestHandler; -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.context.StartupOptionsBuilder; -import com.datastax.oss.driver.internal.core.control.ControlConnection; -import com.datastax.oss.driver.internal.core.pool.ChannelPool; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.protocol.internal.request.Query; -import com.datastax.oss.protocol.internal.request.query.QueryOptions; -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.ObjectMapper; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.net.SocketAddress; -import java.net.UnknownHostException; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.Collections; -import java.util.Date; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.ScheduledFuture; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.function.Supplier; -import java.util.stream.Collectors; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class InsightsClient { - private static final Logger LOGGER = LoggerFactory.getLogger(InsightsClient.class); - private static final String STARTUP_MESSAGE_NAME = "driver.startup"; - private static final String STATUS_MESSAGE_NAME = "driver.status"; - private static final String REPORT_INSIGHT_RPC = "CALL InsightsRpc.reportInsight(?)"; - private static final Map TAGS = ImmutableMap.of("language", "java"); - private static final String STARTUP_VERSION_1_ID = "v1"; - private static final String STATUS_VERSION_1_ID = "v1"; - private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); - private static final int MAX_NUMBER_OF_STATUS_ERROR_LOGS = 5; - static final String DEFAULT_JAVA_APPLICATION = "Default Java Application"; - - private final ControlConnection controlConnection; - private final String id = Uuids.random().toString(); - private final InsightsConfiguration insightsConfiguration; - private final AtomicInteger numberOfStatusEventErrors = new AtomicInteger(); - - private final InternalDriverContext driverContext; - private final Supplier timestampSupplier; - private final PlatformInfoFinder platformInfoFinder; - private final ReconnectionPolicyInfoFinder reconnectionPolicyInfoInfoFinder; - private final ExecutionProfilesInfoFinder executionProfilesInfoFinder; - private final ConfigAntiPatternsFinder configAntiPatternsFinder; - private final DataCentersFinder dataCentersFinder; - private final StackTraceElement[] initCallStackTrace; - - private volatile ScheduledFuture scheduleInsightsTask; - - public static InsightsClient createInsightsClient( - InsightsConfiguration insightsConfiguration, - InternalDriverContext driverContext, - StackTraceElement[] initCallStackTrace) { - DataCentersFinder dataCentersFinder = new DataCentersFinder(); - return new InsightsClient( - driverContext, - () -> new Date().getTime(), - insightsConfiguration, - new PlatformInfoFinder(), - new ReconnectionPolicyInfoFinder(), - new ExecutionProfilesInfoFinder(), - new ConfigAntiPatternsFinder(), - dataCentersFinder, - initCallStackTrace); - } - - InsightsClient( - InternalDriverContext driverContext, - Supplier timestampSupplier, - InsightsConfiguration insightsConfiguration, - PlatformInfoFinder platformInfoFinder, - ReconnectionPolicyInfoFinder reconnectionPolicyInfoInfoFinder, - ExecutionProfilesInfoFinder executionProfilesInfoFinder, - ConfigAntiPatternsFinder configAntiPatternsFinder, - DataCentersFinder dataCentersFinder, - StackTraceElement[] initCallStackTrace) { - this.driverContext = driverContext; - this.controlConnection = driverContext.getControlConnection(); - this.timestampSupplier = timestampSupplier; - this.insightsConfiguration = insightsConfiguration; - this.platformInfoFinder = platformInfoFinder; - this.reconnectionPolicyInfoInfoFinder = reconnectionPolicyInfoInfoFinder; - this.executionProfilesInfoFinder = executionProfilesInfoFinder; - this.configAntiPatternsFinder = configAntiPatternsFinder; - this.dataCentersFinder = dataCentersFinder; - this.initCallStackTrace = initCallStackTrace; - } - - public CompletionStage sendStartupMessage() { - try { - if (!shouldSendEvent()) { - return CompletableFuture.completedFuture(null); - } else { - String startupMessage = createStartupMessage(); - return sendJsonMessage(startupMessage) - .whenComplete( - (aVoid, throwable) -> { - if (throwable != null) { - LOGGER.debug( - "Error while sending startup message to Insights. Message was: " - + trimToFirst500characters(startupMessage), - throwable); - } - }); - } - } catch (Exception e) { - LOGGER.debug("Unexpected error while sending startup message to Insights.", e); - return CompletableFutures.failedFuture(e); - } - } - - private static String trimToFirst500characters(String startupMessage) { - return startupMessage.substring(0, Math.min(startupMessage.length(), 500)); - } - - public void scheduleStatusMessageSend() { - if (!shouldSendEvent()) { - return; - } - scheduleInsightsTask = - scheduleInsightsTask( - insightsConfiguration.getStatusEventDelayMillis(), - insightsConfiguration.getExecutor(), - this::sendStatusMessage); - } - - public void shutdown() { - if (scheduleInsightsTask != null) { - scheduleInsightsTask.cancel(false); - } - } - - @VisibleForTesting - public CompletionStage sendStatusMessage() { - try { - String statusMessage = createStatusMessage(); - CompletionStage result = sendJsonMessage(statusMessage); - return result.whenComplete( - (aVoid, throwable) -> { - if (throwable != null) { - if (numberOfStatusEventErrors.getAndIncrement() < MAX_NUMBER_OF_STATUS_ERROR_LOGS) { - LOGGER.debug( - "Error while sending status message to Insights. Message was: " - + trimToFirst500characters(statusMessage), - throwable); - } - } - }); - } catch (Exception e) { - LOGGER.debug("Unexpected error while sending status message to Insights.", e); - return CompletableFutures.failedFuture(e); - } - } - - private CompletionStage sendJsonMessage(String jsonMessage) { - - QueryOptions queryOptions = createQueryOptionsWithJson(jsonMessage); - String logPrefix = driverContext.getSessionName(); - Duration timeout = - driverContext - .getConfig() - .getDefaultProfile() - .getDuration(DefaultDriverOption.CONTROL_CONNECTION_TIMEOUT); - LOGGER.debug("sending JSON message: {}", jsonMessage); - - Query query = new Query(REPORT_INSIGHT_RPC, queryOptions); - return AdminRequestHandler.call(controlConnection.channel(), query, timeout, logPrefix).start(); - } - - private QueryOptions createQueryOptionsWithJson(String json) { - TypeCodec codec = - driverContext.getCodecRegistry().codecFor(DataTypes.TEXT, String.class); - ByteBuffer startupMessageSerialized = codec.encode(json, DseProtocolVersion.DSE_V2); - return new QueryOptions( - QueryOptions.DEFAULT.consistency, - Collections.singletonList(startupMessageSerialized), - QueryOptions.DEFAULT.namedValues, - QueryOptions.DEFAULT.skipMetadata, - QueryOptions.DEFAULT.pageSize, - QueryOptions.DEFAULT.pagingState, - QueryOptions.DEFAULT.serialConsistency, - QueryOptions.DEFAULT.defaultTimestamp, - QueryOptions.DEFAULT.keyspace, - QueryOptions.DEFAULT.nowInSeconds); - } - - private boolean shouldSendEvent() { - try { - return insightsConfiguration.isMonitorReportingEnabled() - && InsightsSupportVerifier.supportsInsights( - driverContext.getMetadataManager().getMetadata().getNodes().values()); - } catch (Exception e) { - LOGGER.debug("Unexpected error while checking Insights support.", e); - return false; - } - } - - @VisibleForTesting - String createStartupMessage() { - InsightMetadata insightMetadata = createMetadata(STARTUP_MESSAGE_NAME, STARTUP_VERSION_1_ID); - InsightsStartupData data = createStartupData(); - - try { - return OBJECT_MAPPER.writeValueAsString(new Insight<>(insightMetadata, data)); - } catch (JsonProcessingException e) { - throw new InsightEventFormatException("Problem when creating: " + STARTUP_MESSAGE_NAME, e); - } - } - - @VisibleForTesting - String createStatusMessage() { - InsightMetadata insightMetadata = createMetadata(STATUS_MESSAGE_NAME, STATUS_VERSION_1_ID); - InsightsStatusData data = createStatusData(); - - try { - return OBJECT_MAPPER.writeValueAsString(new Insight<>(insightMetadata, data)); - } catch (JsonProcessingException e) { - throw new InsightEventFormatException("Problem when creating: " + STATUS_MESSAGE_NAME, e); - } - } - - private InsightsStatusData createStatusData() { - Map startupOptions = driverContext.getStartupOptions(); - return InsightsStatusData.builder() - .withClientId(getClientId(startupOptions)) - .withSessionId(id) - .withControlConnection(getControlConnectionSocketAddress()) - .withConnectedNodes(getConnectedNodes()) - .build(); - } - - private Map getConnectedNodes() { - Map pools = driverContext.getPoolManager().getPools(); - return pools.entrySet().stream() - .collect( - Collectors.toMap( - entry -> AddressFormatter.nullSafeToString(entry.getKey().getEndPoint().resolve()), - this::constructSessionStateForNode)); - } - - private SessionStateForNode constructSessionStateForNode(Map.Entry entry) { - return new SessionStateForNode( - entry.getKey().getOpenConnections(), entry.getValue().getInFlight()); - } - - private InsightsStartupData createStartupData() { - Map startupOptions = driverContext.getStartupOptions(); - return InsightsStartupData.builder() - .withClientId(getClientId(startupOptions)) - .withSessionId(id) - .withApplicationName(getApplicationName(startupOptions)) - .withApplicationVersion(getApplicationVersion(startupOptions)) - .withDriverName(getDriverName(startupOptions)) - .withDriverVersion(getDriverVersion(startupOptions)) - .withContactPoints( - getResolvedContactPoints( - driverContext.getMetadataManager().getContactPoints().stream() - .map(n -> n.getEndPoint().resolve()) - .filter(InetSocketAddress.class::isInstance) - .map(InetSocketAddress.class::cast) - .collect(Collectors.toSet()))) - .withInitialControlConnection(getControlConnectionSocketAddress()) - .withProtocolVersion(driverContext.getProtocolVersion().getCode()) - .withLocalAddress(getLocalAddress()) - .withExecutionProfiles(executionProfilesInfoFinder.getExecutionProfilesInfo(driverContext)) - .withPoolSizeByHostDistance(getPoolSizeByHostDistance()) - .withHeartbeatInterval( - driverContext - .getConfig() - .getDefaultProfile() - .getDuration(HEARTBEAT_INTERVAL) - .toMillis()) - .withCompression( - driverContext.getConfig().getDefaultProfile().getString(PROTOCOL_COMPRESSION, "none")) - .withReconnectionPolicy( - reconnectionPolicyInfoInfoFinder.getReconnectionPolicyInfo( - driverContext.getReconnectionPolicy(), - driverContext.getConfig().getDefaultProfile())) - .withSsl(getSsl()) - .withAuthProvider(getAuthProvider()) - .withOtherOptions(getOtherOptions()) - .withPlatformInfo(platformInfoFinder.getInsightsPlatformInfo()) - .withConfigAntiPatterns(configAntiPatternsFinder.findAntiPatterns(driverContext)) - .withPeriodicStatusInterval(getPeriodicStatusInterval()) - .withHostName(getLocalHostName()) - .withApplicationNameWasGenerated(isApplicationNameGenerated(startupOptions)) - .withDataCenters(dataCentersFinder.getDataCenters(driverContext)) - .build(); - } - - private AuthProviderType getAuthProvider() { - String authProviderClassName = - driverContext - .getConfig() - .getDefaultProfile() - .getString(AUTH_PROVIDER_CLASS, "NoAuthProvider"); - ClassSettingDetails authProviderDetails = - PackageUtil.getAuthProviderDetails(authProviderClassName); - return new AuthProviderType( - authProviderDetails.getClassName(), authProviderDetails.getFullPackage()); - } - - private long getPeriodicStatusInterval() { - return TimeUnit.MILLISECONDS.toSeconds(insightsConfiguration.getStatusEventDelayMillis()); - } - - @VisibleForTesting - static Map> getResolvedContactPoints(Set contactPoints) { - if (contactPoints == null) { - return Collections.emptyMap(); - } - return contactPoints.stream() - .collect( - Collectors.groupingBy( - InetSocketAddress::getHostName, - Collectors.mapping(AddressFormatter::nullSafeToString, Collectors.toList()))); - } - - private String getDriverVersion(Map startupOptions) { - return startupOptions.get(StartupOptionsBuilder.DRIVER_VERSION_KEY); - } - - private String getDriverName(Map startupOptions) { - return startupOptions.get(StartupOptionsBuilder.DRIVER_NAME_KEY); - } - - private String getClientId(Map startupOptions) { - return startupOptions.get(StartupOptionsBuilder.CLIENT_ID_KEY); - } - - private boolean isApplicationNameGenerated(Map startupOptions) { - return startupOptions.get(StartupOptionsBuilder.APPLICATION_NAME_KEY) == null; - } - - private String getApplicationVersion(Map startupOptions) { - String applicationVersion = startupOptions.get(StartupOptionsBuilder.APPLICATION_VERSION_KEY); - if (applicationVersion == null) { - return ""; - } - return applicationVersion; - } - - private String getApplicationName(Map startupOptions) { - String applicationName = startupOptions.get(StartupOptionsBuilder.APPLICATION_NAME_KEY); - if (applicationName == null || applicationName.isEmpty()) { - return getClusterCreateCaller(initCallStackTrace); - } - return applicationName; - } - - @VisibleForTesting - static String getClusterCreateCaller(StackTraceElement[] stackTrace) { - for (int i = 0; i < stackTrace.length - 1; i++) { - if (isClusterStackTrace(stackTrace[i])) { - int nextElement = i + 1; - if (!isClusterStackTrace(stackTrace[nextElement])) { - return stackTrace[nextElement].getClassName(); - } - } - } - return DEFAULT_JAVA_APPLICATION; - } - - private static boolean isClusterStackTrace(StackTraceElement stackTraceElement) { - return stackTraceElement.getClassName().equals(DefaultDriverContext.class.getName()) - || stackTraceElement.getClassName().equals(SessionBuilder.class.getName()); - } - - private String getLocalHostName() { - try { - return InetAddress.getLocalHost().getHostName(); - } catch (UnknownHostException e) { - LOGGER.warn("Can not resolve the name of a host, returning null", e); - return null; - } - } - - private Map getOtherOptions() { - return Collections.emptyMap(); // todo - } - - private SSL getSsl() { - boolean isSslDefined = - driverContext.getConfig().getDefaultProfile().isDefined(SSL_ENGINE_FACTORY_CLASS); - boolean certValidation = - driverContext.getConfig().getDefaultProfile().getBoolean(SSL_HOSTNAME_VALIDATION, false); - return new SSL(isSslDefined, certValidation); - } - - private PoolSizeByHostDistance getPoolSizeByHostDistance() { - - return new PoolSizeByHostDistance( - driverContext.getConfig().getDefaultProfile().getInt(CONNECTION_POOL_LOCAL_SIZE), - driverContext.getConfig().getDefaultProfile().getInt(CONNECTION_POOL_REMOTE_SIZE), - 0); - } - - private String getControlConnectionSocketAddress() { - SocketAddress controlConnectionAddress = controlConnection.channel().getEndPoint().resolve(); - return AddressFormatter.nullSafeToString(controlConnectionAddress); - } - - private String getLocalAddress() { - SocketAddress controlConnectionLocalAddress = controlConnection.channel().localAddress(); - if (controlConnectionLocalAddress instanceof InetSocketAddress) { - return AddressFormatter.nullSafeToString( - ((InetSocketAddress) controlConnectionLocalAddress).getAddress()); - } - return null; - } - - private InsightMetadata createMetadata(String messageName, String messageVersion) { - return new InsightMetadata( - messageName, timestampSupplier.get(), TAGS, InsightType.EVENT, messageVersion); - } - - @VisibleForTesting - static ScheduledFuture scheduleInsightsTask( - long statusEventDelayMillis, - ScheduledExecutorService scheduledTasksExecutor, - Runnable runnable) { - long initialDelay = - (long) Math.floor(statusEventDelayMillis - zeroToTenPercentRandom(statusEventDelayMillis)); - return scheduledTasksExecutor.scheduleWithFixedDelay( - runnable, initialDelay, statusEventDelayMillis, TimeUnit.MILLISECONDS); - } - - private static double zeroToTenPercentRandom(long statusEventDelayMillis) { - return 0.1 * statusEventDelayMillis * Math.random(); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/InsightsSupportVerifier.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/InsightsSupportVerifier.java deleted file mode 100644 index ec016ef52d8..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/InsightsSupportVerifier.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights; - -import com.datastax.dse.driver.api.core.metadata.DseNodeProperties; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.metadata.Node; -import java.util.Collection; - -class InsightsSupportVerifier { - private static final Version minDse6Version = Version.parse("6.0.5"); - private static final Version minDse51Version = Version.parse("5.1.13"); - private static final Version dse600Version = Version.parse("6.0.0"); - - static boolean supportsInsights(Collection nodes) { - assert minDse6Version != null; - assert dse600Version != null; - assert minDse51Version != null; - if (nodes.isEmpty()) return false; - - for (Node node : nodes) { - Object version = node.getExtras().get(DseNodeProperties.DSE_VERSION); - if (version == null) { - return false; - } - Version dseVersion = (Version) version; - if (!(dseVersion.compareTo(minDse6Version) >= 0 - || (dseVersion.compareTo(dse600Version) < 0 - && dseVersion.compareTo(minDse51Version) >= 0))) { - return false; - } - } - return true; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/PackageUtil.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/PackageUtil.java deleted file mode 100644 index 3c61dec4f20..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/PackageUtil.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights; - -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.base.Joiner; -import java.util.Arrays; -import java.util.regex.Pattern; - -class PackageUtil { - static final String DEFAULT_SPECULATIVE_EXECUTION_PACKAGE = - "com.datastax.oss.driver.internal.core.specex"; - static final String DEFAULT_LOAD_BALANCING_PACKAGE = - "com.datastax.oss.driver.internal.core.loadbalancing"; - static final String DEFAULT_AUTH_PROVIDER_PACKAGE = "com.datastax.oss.driver.internal.core.auth"; - private static final Pattern PACKAGE_SPLIT_REGEX = Pattern.compile("\\."); - private static final Joiner DOT_JOINER = Joiner.on("."); - - static String getNamespace(Class tClass) { - String namespace = ""; - Package packageInfo = tClass.getPackage(); - if (packageInfo != null) { - namespace = packageInfo.getName(); - } - return namespace; - } - - static ClassSettingDetails getSpeculativeExecutionDetails(String classSetting) { - return getClassSettingDetails(classSetting, DEFAULT_SPECULATIVE_EXECUTION_PACKAGE); - } - - static ClassSettingDetails getLoadBalancingDetails(String classSetting) { - return getClassSettingDetails(classSetting, DEFAULT_LOAD_BALANCING_PACKAGE); - } - - static ClassSettingDetails getAuthProviderDetails(String classSetting) { - return getClassSettingDetails(classSetting, DEFAULT_AUTH_PROVIDER_PACKAGE); - } - - private static ClassSettingDetails getClassSettingDetails( - String classSetting, String packageName) { - String className = getClassName(classSetting); - String fullPackage = getFullPackageOrDefault(classSetting, packageName); - return new ClassSettingDetails(className, fullPackage); - } - - @VisibleForTesting - static String getClassName(String classSetting) { - String[] split = PACKAGE_SPLIT_REGEX.split(classSetting); - if (split.length == 0) { - return ""; - } - return split[split.length - 1]; - } - - @VisibleForTesting - static String getFullPackageOrDefault(String classSetting, String defaultValue) { - String[] split = PACKAGE_SPLIT_REGEX.split(classSetting); - if (split.length <= 1) return defaultValue; - return DOT_JOINER.join(Arrays.copyOf(split, split.length - 1)); - } - - static class ClassSettingDetails { - private final String className; - private final String fullPackage; - - ClassSettingDetails(String className, String fullPackage) { - this.className = className; - this.fullPackage = fullPackage; - } - - String getClassName() { - return className; - } - - String getFullPackage() { - return fullPackage; - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/PlatformInfoFinder.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/PlatformInfoFinder.java deleted file mode 100644 index 30d41d40836..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/PlatformInfoFinder.java +++ /dev/null @@ -1,295 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights; - -import static com.datastax.dse.driver.internal.core.insights.schema.InsightsPlatformInfo.OS; -import static com.datastax.dse.driver.internal.core.insights.schema.InsightsPlatformInfo.RuntimeAndCompileTimeVersions; - -import com.datastax.dse.driver.internal.core.insights.schema.InsightsPlatformInfo; -import com.datastax.dse.driver.internal.core.insights.schema.InsightsPlatformInfo.CPUS; -import com.datastax.oss.driver.internal.core.os.Native; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import java.io.BufferedReader; -import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; -import java.net.URL; -import java.nio.charset.StandardCharsets; -import java.util.ArrayList; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Properties; -import java.util.function.Function; -import java.util.regex.Pattern; - -class PlatformInfoFinder { - private static final String MAVEN_IGNORE_LINE = "The following files have been resolved:"; - private static final Pattern DEPENDENCY_SPLIT_REGEX = Pattern.compile(":"); - static final String UNVERIFIED_RUNTIME_VERSION = "UNVERIFIED"; - private final Function propertiesUrlProvider; - - @SuppressWarnings("UnnecessaryLambda") - private static final Function M2_PROPERTIES_PROVIDER = - d -> { - ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); - if (contextClassLoader == null) { - contextClassLoader = PlatformInfoFinder.class.getClassLoader(); - } - return contextClassLoader.getResource( - "META-INF/maven/" + d.groupId + "/" + d.artifactId + "/pom.properties"); - }; - - PlatformInfoFinder() { - this(M2_PROPERTIES_PROVIDER); - } - - @VisibleForTesting - PlatformInfoFinder(Function pomPropertiesUrlProvider) { - this.propertiesUrlProvider = pomPropertiesUrlProvider; - } - - InsightsPlatformInfo getInsightsPlatformInfo() { - OS os = getOsInfo(); - CPUS cpus = getCpuInfo(); - Map> runtimeInfo = getRuntimeInfo(); - - return new InsightsPlatformInfo(os, cpus, runtimeInfo); - } - - private Map> getRuntimeInfo() { - Map coreDeps = - fetchDependenciesFromFile( - this.getClass().getResourceAsStream("/com/datastax/dse/driver/internal/deps.txt")); - - Map queryBuilderDeps = - fetchDependenciesFromFile( - this.getClass() - .getResourceAsStream("/com/datastax/dse/driver/internal/querybuilder/deps.txt")); - - Map mapperProcessorDeps = - fetchDependenciesFromFile( - this.getClass() - .getResourceAsStream( - "/com/datastax/dse/driver/internal/mapper/processor/deps.txt")); - - Map mapperRuntimeDeps = - fetchDependenciesFromFile( - this.getClass() - .getResourceAsStream("/com/datastax/dse/driver/internal/mapper/deps.txt")); - - Map> runtimeDependencies = - new LinkedHashMap<>(); - putIfNonEmpty(coreDeps, runtimeDependencies, "core"); - putIfNonEmpty(queryBuilderDeps, runtimeDependencies, "query-builder"); - putIfNonEmpty(mapperProcessorDeps, runtimeDependencies, "mapper-processor"); - putIfNonEmpty(mapperRuntimeDeps, runtimeDependencies, "mapper-runtime"); - addJavaVersion(runtimeDependencies); - return runtimeDependencies; - } - - private void putIfNonEmpty( - Map moduleDependencies, - Map> runtimeDependencies, - String moduleName) { - if (!moduleDependencies.isEmpty()) { - runtimeDependencies.put(moduleName, moduleDependencies); - } - } - - @VisibleForTesting - void addJavaVersion(Map> runtimeDependencies) { - Package javaPackage = Runtime.class.getPackage(); - Map javaDependencies = new LinkedHashMap<>(); - javaDependencies.put( - "version", toSameRuntimeAndCompileVersion(javaPackage.getImplementationVersion())); - javaDependencies.put( - "vendor", toSameRuntimeAndCompileVersion(javaPackage.getImplementationVendor())); - javaDependencies.put( - "title", toSameRuntimeAndCompileVersion(javaPackage.getImplementationTitle())); - putIfNonEmpty(javaDependencies, runtimeDependencies, "java"); - } - - private RuntimeAndCompileTimeVersions toSameRuntimeAndCompileVersion(String version) { - return new RuntimeAndCompileTimeVersions(version, version, false); - } - - /** - * Method is fetching dependencies from file. Lines in file should be in format: - * com.organization:artifactId:jar:1.2.0 or com.organization:artifactId:jar:native:1.2.0 - * - *

For such file the output will be: Map - * "com.organization:artifactId",{"runtimeVersion":"1.2.0", "compileVersion:"1.2.0", "optional": - * false} Duplicates will be omitted. If there are two dependencies for the exactly the same - * organizationId:artifactId it is not deterministic which version will be taken. In the case of - * an error while opening file this method will fail silently returning an empty Map - */ - @VisibleForTesting - Map fetchDependenciesFromFile(InputStream inputStream) { - Map dependencies = new LinkedHashMap<>(); - if (inputStream == null) { - return dependencies; - } - try { - List dependenciesFromFile = extractMavenDependenciesFromFile(inputStream); - for (DependencyFromFile d : dependenciesFromFile) { - dependencies.put(formatDependencyName(d), getRuntimeAndCompileVersion(d)); - } - } catch (IOException e) { - return dependencies; - } - return dependencies; - } - - private RuntimeAndCompileTimeVersions getRuntimeAndCompileVersion(DependencyFromFile d) { - URL url = propertiesUrlProvider.apply(d); - if (url == null) { - return new RuntimeAndCompileTimeVersions( - UNVERIFIED_RUNTIME_VERSION, d.getVersion(), d.isOptional()); - } - Properties properties = new Properties(); - try { - properties.load(url.openStream()); - } catch (IOException e) { - return new RuntimeAndCompileTimeVersions( - UNVERIFIED_RUNTIME_VERSION, d.getVersion(), d.isOptional()); - } - Object version = properties.get("version"); - if (version == null) { - return new RuntimeAndCompileTimeVersions( - UNVERIFIED_RUNTIME_VERSION, d.getVersion(), d.isOptional()); - } else { - return new RuntimeAndCompileTimeVersions(version.toString(), d.getVersion(), d.isOptional()); - } - } - - private String formatDependencyName(DependencyFromFile d) { - return String.format("%s:%s", d.getGroupId(), d.getArtifactId()); - } - - private List extractMavenDependenciesFromFile(InputStream inputStream) - throws IOException { - List dependenciesFromFile = new ArrayList<>(); - BufferedReader reader = - new BufferedReader(new InputStreamReader(inputStream, StandardCharsets.UTF_8)); - for (String line; (line = reader.readLine()) != null; ) { - if (lineWithDependencyInfo(line)) { - dependenciesFromFile.add(extractDependencyFromLine(line.trim())); - } - } - return dependenciesFromFile; - } - - private DependencyFromFile extractDependencyFromLine(String line) { - String[] split = DEPENDENCY_SPLIT_REGEX.split(line); - if (split.length == 6) { // case for i.e.: com.github.jnr:jffi:jar:native:1.2.16:compile - return new DependencyFromFile(split[0], split[1], split[4], checkIsOptional(split[5])); - } else { // case for normal: org.ow2.asm:asm:jar:5.0.3:compile - return new DependencyFromFile(split[0], split[1], split[3], checkIsOptional(split[4])); - } - } - - private boolean checkIsOptional(String scope) { - return scope.contains("(optional)"); - } - - private boolean lineWithDependencyInfo(String line) { - return (!line.equals(MAVEN_IGNORE_LINE) && !line.isEmpty()); - } - - private CPUS getCpuInfo() { - int numberOfProcessors = Runtime.getRuntime().availableProcessors(); - String model = Native.getCpu(); - return new CPUS(numberOfProcessors, model); - } - - private OS getOsInfo() { - String osName = System.getProperty("os.name"); - String osVersion = System.getProperty("os.version"); - String osArch = System.getProperty("os.arch"); - return new OS(osName, osVersion, osArch); - } - - static class DependencyFromFile { - private final String groupId; - private final String artifactId; - private final String version; - private final boolean optional; - - DependencyFromFile(String groupId, String artifactId, String version, boolean optional) { - this.groupId = groupId; - this.artifactId = artifactId; - this.version = version; - this.optional = optional; - } - - String getGroupId() { - return groupId; - } - - String getArtifactId() { - return artifactId; - } - - String getVersion() { - return version; - } - - boolean isOptional() { - return optional; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof DependencyFromFile)) { - return false; - } - DependencyFromFile that = (DependencyFromFile) o; - return optional == that.optional - && Objects.equals(groupId, that.groupId) - && Objects.equals(artifactId, that.artifactId) - && Objects.equals(version, that.version); - } - - @Override - public int hashCode() { - return Objects.hash(groupId, artifactId, version, optional); - } - - @Override - public String toString() { - return "DependencyFromFile{" - + "groupId='" - + groupId - + '\'' - + ", artifactId='" - + artifactId - + '\'' - + ", version='" - + version - + '\'' - + ", optional=" - + optional - + '}'; - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/ReconnectionPolicyInfoFinder.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/ReconnectionPolicyInfoFinder.java deleted file mode 100644 index af8aff74035..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/ReconnectionPolicyInfoFinder.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights; - -import com.datastax.dse.driver.internal.core.insights.schema.ReconnectionPolicyInfo; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.connection.ReconnectionPolicy; -import com.datastax.oss.driver.internal.core.connection.ConstantReconnectionPolicy; -import com.datastax.oss.driver.internal.core.connection.ExponentialReconnectionPolicy; -import java.util.HashMap; -import java.util.Map; - -class ReconnectionPolicyInfoFinder { - ReconnectionPolicyInfo getReconnectionPolicyInfo( - ReconnectionPolicy reconnectionPolicy, DriverExecutionProfile executionProfile) { - Class reconnectionPolicyClass = reconnectionPolicy.getClass(); - String type = reconnectionPolicyClass.getSimpleName(); - String namespace = PackageUtil.getNamespace(reconnectionPolicyClass); - Map options = new HashMap<>(); - if (reconnectionPolicy instanceof ConstantReconnectionPolicy) { - options.put( - "delayMs", - executionProfile.getDuration(DefaultDriverOption.RECONNECTION_BASE_DELAY).toMillis()); - } else if (reconnectionPolicy instanceof ExponentialReconnectionPolicy) { - ExponentialReconnectionPolicy exponentialReconnectionPolicy = - (ExponentialReconnectionPolicy) reconnectionPolicy; - options.put("maxDelayMs", exponentialReconnectionPolicy.getMaxDelayMs()); - options.put("baseDelayMs", exponentialReconnectionPolicy.getBaseDelayMs()); - options.put("maxAttempts", exponentialReconnectionPolicy.getMaxAttempts()); - } - return new ReconnectionPolicyInfo(type, options, namespace); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/configuration/InsightsConfiguration.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/configuration/InsightsConfiguration.java deleted file mode 100644 index ac27bb76389..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/configuration/InsightsConfiguration.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights.configuration; - -import io.netty.util.concurrent.EventExecutor; - -public class InsightsConfiguration { - private final boolean monitorReportingEnabled; - private final long statusEventDelayMillis; - private final EventExecutor executor; - - public InsightsConfiguration( - boolean monitorReportingEnabled, long statusEventDelayMillis, EventExecutor executor) { - this.monitorReportingEnabled = monitorReportingEnabled; - this.statusEventDelayMillis = statusEventDelayMillis; - this.executor = executor; - } - - public boolean isMonitorReportingEnabled() { - return monitorReportingEnabled; - } - - public long getStatusEventDelayMillis() { - return statusEventDelayMillis; - } - - public EventExecutor getExecutor() { - return executor; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/exceptions/InsightEventFormatException.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/exceptions/InsightEventFormatException.java deleted file mode 100644 index cfce68971ef..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/exceptions/InsightEventFormatException.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights.exceptions; - -public class InsightEventFormatException extends RuntimeException { - - public InsightEventFormatException(String message, Throwable cause) { - super(message, cause); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/AuthProviderType.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/AuthProviderType.java deleted file mode 100644 index 18aec53e899..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/AuthProviderType.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights.schema; - -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; -import java.util.Objects; - -public class AuthProviderType { - @JsonProperty("type") - private final String type; - - @JsonProperty("namespace") - private final String namespace; - - @JsonCreator - public AuthProviderType( - @JsonProperty("type") String type, @JsonProperty("namespace") String namespace) { - this.type = type; - this.namespace = namespace; - } - - public String getType() { - return type; - } - - public String getNamespace() { - return namespace; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof AuthProviderType)) { - return false; - } - AuthProviderType that = (AuthProviderType) o; - return Objects.equals(type, that.type) && Objects.equals(namespace, that.namespace); - } - - @Override - public int hashCode() { - return Objects.hash(type, namespace); - } - - @Override - public String toString() { - return "AuthProviderType{" + "type='" + type + '\'' + ", namespace='" + namespace + '\'' + '}'; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/Insight.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/Insight.java deleted file mode 100644 index ca4e6455345..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/Insight.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights.schema; - -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonIgnoreProperties; -import com.fasterxml.jackson.annotation.JsonInclude; -import com.fasterxml.jackson.annotation.JsonProperty; - -@JsonIgnoreProperties(ignoreUnknown = true) -@JsonInclude(JsonInclude.Include.NON_EMPTY) -public class Insight { - @JsonProperty("metadata") - private final InsightMetadata metadata; - - @JsonProperty("data") - private final T insightData; - - @JsonCreator - public Insight(@JsonProperty("metadata") InsightMetadata metadata, @JsonProperty("data") T data) { - this.metadata = metadata; - this.insightData = data; - } - - public InsightMetadata getMetadata() { - return metadata; - } - - public T getInsightData() { - return insightData; - } - - @Override - public String toString() { - return "Insight{" + "metadata=" + metadata + ", insightData=" + insightData + '}'; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/InsightMetadata.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/InsightMetadata.java deleted file mode 100644 index cfa2644b0c7..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/InsightMetadata.java +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights.schema; - -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.base.Strings; -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonInclude; -import com.fasterxml.jackson.annotation.JsonProperty; -import java.util.Map; -import java.util.Objects; - -public class InsightMetadata { - @JsonProperty("name") - private final String name; - - @JsonProperty("timestamp") - private final long timestamp; - - @JsonProperty("tags") - private final Map tags; - - @JsonProperty("insightType") - private final InsightType insightType; - - @JsonProperty("insightMappingId") - @JsonInclude(JsonInclude.Include.NON_NULL) - private String insightMappingId; - - @JsonCreator - public InsightMetadata( - @JsonProperty("name") String name, - @JsonProperty("timestamp") long timestamp, - @JsonProperty("tags") Map tags, - @JsonProperty("insightType") InsightType insightType, - @JsonProperty("insightMappingId") String insightMappingId) { - Preconditions.checkArgument(!Strings.isNullOrEmpty(name), "name is required"); - - this.name = name; - this.timestamp = timestamp; - this.tags = tags; - this.insightType = insightType; - this.insightMappingId = insightMappingId; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof InsightMetadata)) { - return false; - } - InsightMetadata that = (InsightMetadata) o; - return Objects.equals(name, that.name) - && Objects.equals(timestamp, that.timestamp) - && Objects.equals(tags, that.tags) - && insightType == that.insightType - && Objects.equals(insightMappingId, that.insightMappingId); - } - - @Override - public int hashCode() { - return Objects.hash(name, timestamp, tags, insightType, insightMappingId); - } - - @Override - public String toString() { - return "InsightMetadata{" - + "name='" - + name - + '\'' - + ", timestamp=" - + timestamp - + ", tags=" - + tags - + ", insightType=" - + insightType - + ", insightMappingId=" - + insightMappingId - + '}'; - } - - public String getName() { - return name; - } - - public long getTimestamp() { - return timestamp; - } - - public Map getTags() { - return tags; - } - - public InsightType getInsightType() { - return insightType; - } - - public String getInsightMappingId() { - return insightMappingId; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/InsightType.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/InsightType.java deleted file mode 100644 index ae91e27d227..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/InsightType.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights.schema; - -public enum InsightType { - EVENT, - GAUGE, - COUNTER, - HISTOGRAM, - TIMER, - METER, - LOG; -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/InsightsPlatformInfo.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/InsightsPlatformInfo.java deleted file mode 100644 index 231f082d785..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/InsightsPlatformInfo.java +++ /dev/null @@ -1,236 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights.schema; - -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; -import java.util.Map; -import java.util.Objects; - -public class InsightsPlatformInfo { - @JsonProperty("os") - private final OS os; - - @JsonProperty("cpus") - private CPUS cpus; - - /** - * All dependencies in a map format grouped by the module: {"core" : {"com.datastax.driver:core": - * {"runtimeVersion:" : "1.0.0", "compileVersion": "1.0.1"},...}}, "extras"" {...} - */ - @JsonProperty("runtime") - private Map> runtime; - - @JsonCreator - public InsightsPlatformInfo( - @JsonProperty("os") OS os, - @JsonProperty("cpus") CPUS cpus, - @JsonProperty("runtime") Map> runtime) { - this.os = os; - this.cpus = cpus; - this.runtime = runtime; - } - - public OS getOs() { - return os; - } - - public CPUS getCpus() { - return cpus; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof InsightsPlatformInfo)) { - return false; - } - InsightsPlatformInfo that = (InsightsPlatformInfo) o; - return Objects.equals(os, that.os) - && Objects.equals(cpus, that.cpus) - && Objects.equals(runtime, that.runtime); - } - - @Override - public int hashCode() { - return Objects.hash(os, cpus, runtime); - } - - Map> getRuntime() { - return runtime; - } - - public static class OS { - @JsonProperty("name") - private final String name; - - @JsonProperty("version") - private final String version; - - @JsonProperty("arch") - private final String arch; - - @JsonCreator - public OS( - @JsonProperty("name") String name, - @JsonProperty("version") String version, - @JsonProperty("arch") String arch) { - this.name = name; - this.version = version; - this.arch = arch; - } - - public String getName() { - return name; - } - - public String getVersion() { - return version; - } - - public String getArch() { - return arch; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof OS)) { - return false; - } - OS os = (OS) o; - return Objects.equals(name, os.name) - && Objects.equals(version, os.version) - && Objects.equals(arch, os.arch); - } - - @Override - public int hashCode() { - return Objects.hash(name, version, arch); - } - } - - public static class CPUS { - @JsonProperty("length") - private final int length; - - @JsonProperty("model") - private final String model; - - @JsonCreator - public CPUS(@JsonProperty("length") int length, @JsonProperty("model") String model) { - this.length = length; - this.model = model; - } - - public int getLength() { - return length; - } - - public String getModel() { - return model; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof CPUS)) { - return false; - } - CPUS cpus = (CPUS) o; - return length == cpus.length && Objects.equals(model, cpus.model); - } - - @Override - public int hashCode() { - return Objects.hash(length, model); - } - } - - public static class RuntimeAndCompileTimeVersions { - @JsonProperty("runtimeVersion") - private final String runtimeVersion; - - @JsonProperty("compileVersion") - private final String compileVersion; - - @JsonProperty("optional") - private final boolean optional; - - @JsonCreator - public RuntimeAndCompileTimeVersions( - @JsonProperty("runtimeVersion") String runtimeVersion, - @JsonProperty("compileVersion") String compileVersion, - @JsonProperty("optional") boolean optional) { - this.runtimeVersion = runtimeVersion; - this.compileVersion = compileVersion; - this.optional = optional; - } - - public String getRuntimeVersion() { - return runtimeVersion; - } - - public String getCompileVersion() { - return compileVersion; - } - - public boolean isOptional() { - return optional; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof RuntimeAndCompileTimeVersions)) { - return false; - } - RuntimeAndCompileTimeVersions that = (RuntimeAndCompileTimeVersions) o; - return optional == that.optional - && Objects.equals(runtimeVersion, that.runtimeVersion) - && Objects.equals(compileVersion, that.compileVersion); - } - - @Override - public int hashCode() { - return Objects.hash(runtimeVersion, compileVersion, optional); - } - - @Override - public String toString() { - return "RuntimeAndCompileTimeVersions{" - + "runtimeVersion='" - + runtimeVersion - + '\'' - + ", compileVersion='" - + compileVersion - + '\'' - + ", optional=" - + optional - + '}'; - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/InsightsStartupData.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/InsightsStartupData.java deleted file mode 100644 index bddd3ef94b3..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/InsightsStartupData.java +++ /dev/null @@ -1,425 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights.schema; - -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; -import java.util.List; -import java.util.Map; -import java.util.Set; - -public class InsightsStartupData { - @JsonProperty("clientId") - private final String clientId; - - @JsonProperty("sessionId") - private final String sessionId; - - @JsonProperty("applicationName") - private final String applicationName; - - @JsonProperty("applicationVersion") - private final String applicationVersion; - - @JsonProperty("contactPoints") - private final Map> contactPoints; - - @JsonProperty("initialControlConnection") - private final String initialControlConnection; - - @JsonProperty("protocolVersion") - private final int protocolVersion; - - @JsonProperty("localAddress") - private final String localAddress; - - @JsonProperty("executionProfiles") - private final Map executionProfiles; - - @JsonProperty("poolSizeByHostDistance") - private final PoolSizeByHostDistance poolSizeByHostDistance; - - @JsonProperty("heartbeatInterval") - private final long heartbeatInterval; - - @JsonProperty("compression") - private final String compression; - - @JsonProperty("reconnectionPolicy") - private final ReconnectionPolicyInfo reconnectionPolicy; - - @JsonProperty("ssl") - private final SSL ssl; - - @JsonProperty("authProvider") - private final AuthProviderType authProvider; - - @JsonProperty("otherOptions") - private final Map otherOptions; - - @JsonProperty("configAntiPatterns") - private final Map configAntiPatterns; - - @JsonProperty("periodicStatusInterval") - private final long periodicStatusInterval; - - @JsonProperty("platformInfo") - private final InsightsPlatformInfo platformInfo; - - @JsonProperty("hostName") - private final String hostName; - - @JsonProperty("driverName") - private String driverName; - - @JsonProperty("applicationNameWasGenerated") - private boolean applicationNameWasGenerated; - - @JsonProperty("driverVersion") - private String driverVersion; - - @JsonProperty("dataCenters") - private Set dataCenters; - - @JsonCreator - private InsightsStartupData( - @JsonProperty("clientId") String clientId, - @JsonProperty("sessionId") String sessionId, - @JsonProperty("applicationName") String applicationName, - @JsonProperty("applicationVersion") String applicationVersion, - @JsonProperty("contactPoints") Map> contactPoints, - @JsonProperty("initialControlConnection") String initialControlConnection, - @JsonProperty("protocolVersion") int protocolVersion, - @JsonProperty("localAddress") String localAddress, - @JsonProperty("executionProfiles") Map executionProfiles, - @JsonProperty("poolSizeByHostDistance") PoolSizeByHostDistance poolSizeByHostDistance, - @JsonProperty("heartbeatInterval") long heartbeatInterval, - @JsonProperty("compression") String compression, - @JsonProperty("reconnectionPolicy") ReconnectionPolicyInfo reconnectionPolicy, - @JsonProperty("ssl") SSL ssl, - @JsonProperty("authProvider") AuthProviderType authProvider, - @JsonProperty("otherOptions") Map otherOptions, - @JsonProperty("configAntiPatterns") Map configAntiPatterns, - @JsonProperty("periodicStatusInterval") long periodicStatusInterval, - @JsonProperty("platformInfo") InsightsPlatformInfo platformInfo, - @JsonProperty("hostName") String hostName, - @JsonProperty("driverName") String driverName, - @JsonProperty("applicationNameWasGenerated") boolean applicationNameWasGenerated, - @JsonProperty("driverVersion") String driverVersion, - @JsonProperty("dataCenters") Set dataCenters) { - this.clientId = clientId; - this.sessionId = sessionId; - this.applicationName = applicationName; - this.applicationVersion = applicationVersion; - this.contactPoints = contactPoints; - this.initialControlConnection = initialControlConnection; - this.protocolVersion = protocolVersion; - this.localAddress = localAddress; - this.executionProfiles = executionProfiles; - this.poolSizeByHostDistance = poolSizeByHostDistance; - this.heartbeatInterval = heartbeatInterval; - this.compression = compression; - this.reconnectionPolicy = reconnectionPolicy; - this.ssl = ssl; - this.authProvider = authProvider; - this.otherOptions = otherOptions; - this.configAntiPatterns = configAntiPatterns; - this.periodicStatusInterval = periodicStatusInterval; - this.platformInfo = platformInfo; - this.hostName = hostName; - this.driverName = driverName; - this.applicationNameWasGenerated = applicationNameWasGenerated; - this.driverVersion = driverVersion; - this.dataCenters = dataCenters; - } - - public String getClientId() { - return clientId; - } - - public String getSessionId() { - return sessionId; - } - - public String getApplicationName() { - return applicationName; - } - - public String getApplicationVersion() { - return applicationVersion; - } - - public Map> getContactPoints() { - return contactPoints; - } - - public String getInitialControlConnection() { - return initialControlConnection; - } - - public int getProtocolVersion() { - return protocolVersion; - } - - public String getLocalAddress() { - return localAddress; - } - - public Map getExecutionProfiles() { - return executionProfiles; - } - - public PoolSizeByHostDistance getPoolSizeByHostDistance() { - return poolSizeByHostDistance; - } - - public long getHeartbeatInterval() { - return heartbeatInterval; - } - - public String getCompression() { - return compression; - } - - public ReconnectionPolicyInfo getReconnectionPolicy() { - return reconnectionPolicy; - } - - public SSL getSsl() { - return ssl; - } - - public AuthProviderType getAuthProvider() { - return authProvider; - } - - public Map getOtherOptions() { - return otherOptions; - } - - public Map getConfigAntiPatterns() { - return configAntiPatterns; - } - - public long getPeriodicStatusInterval() { - return periodicStatusInterval; - } - - public InsightsPlatformInfo getPlatformInfo() { - return platformInfo; - } - - public String getHostName() { - return hostName; - } - - public String getDriverName() { - return driverName; - } - - public boolean isApplicationNameWasGenerated() { - return applicationNameWasGenerated; - } - - public String getDriverVersion() { - return driverVersion; - } - - public Set getDataCenters() { - return dataCenters; - } - - public static InsightsStartupData.Builder builder() { - return new InsightsStartupData.Builder(); - } - - public static class Builder { - private String clientId; - private String sessionId; - private String applicationName; - private String applicationVersion; - private Map> contactPoints; - private String initialControlConnection; - private int protocolVersion; - private String localAddress; - private Map executionProfiles; - private PoolSizeByHostDistance poolSizeByHostDistance; - private long heartbeatInterval; - private String compression; - private ReconnectionPolicyInfo reconnectionPolicy; - private SSL ssl; - private AuthProviderType authProvider; - private Map otherOptions; - private Map configAntiPatterns; - private long periodicStatusInterval; - private InsightsPlatformInfo platformInfo; - private String hostName; - private String driverName; - private String driverVersion; - private boolean applicationNameWasGenerated; - private Set dataCenters; - - public InsightsStartupData build() { - return new InsightsStartupData( - clientId, - sessionId, - applicationName, - applicationVersion, - contactPoints, - initialControlConnection, - protocolVersion, - localAddress, - executionProfiles, - poolSizeByHostDistance, - heartbeatInterval, - compression, - reconnectionPolicy, - ssl, - authProvider, - otherOptions, - configAntiPatterns, - periodicStatusInterval, - platformInfo, - hostName, - driverName, - applicationNameWasGenerated, - driverVersion, - dataCenters); - } - - public Builder withClientId(String clientId) { - this.clientId = clientId; - return this; - } - - public Builder withSessionId(String id) { - this.sessionId = id; - return this; - } - - public Builder withApplicationName(String applicationName) { - this.applicationName = applicationName; - return this; - } - - public Builder withApplicationVersion(String applicationVersion) { - this.applicationVersion = applicationVersion; - return this; - } - - public Builder withContactPoints(Map> contactPoints) { - this.contactPoints = contactPoints; - return this; - } - - public Builder withInitialControlConnection(String inetSocketAddress) { - this.initialControlConnection = inetSocketAddress; - return this; - } - - public Builder withProtocolVersion(int protocolVersion) { - this.protocolVersion = protocolVersion; - return this; - } - - public Builder withLocalAddress(String localAddress) { - this.localAddress = localAddress; - return this; - } - - public Builder withExecutionProfiles(Map executionProfiles) { - this.executionProfiles = executionProfiles; - return this; - } - - public Builder withPoolSizeByHostDistance(PoolSizeByHostDistance poolSizeByHostDistance) { - this.poolSizeByHostDistance = poolSizeByHostDistance; - return this; - } - - public Builder withHeartbeatInterval(long heartbeatInterval) { - this.heartbeatInterval = heartbeatInterval; - return this; - } - - public Builder withCompression(String compression) { - this.compression = compression; - return this; - } - - public Builder withReconnectionPolicy(ReconnectionPolicyInfo reconnectionPolicy) { - this.reconnectionPolicy = reconnectionPolicy; - return this; - } - - public Builder withSsl(SSL ssl) { - this.ssl = ssl; - return this; - } - - public Builder withAuthProvider(AuthProviderType authProvider) { - this.authProvider = authProvider; - return this; - } - - public Builder withOtherOptions(Map otherOptions) { - this.otherOptions = otherOptions; - return this; - } - - public Builder withConfigAntiPatterns(Map configAntiPatterns) { - this.configAntiPatterns = configAntiPatterns; - return this; - } - - public Builder withPeriodicStatusInterval(long periodicStatusInterval) { - this.periodicStatusInterval = periodicStatusInterval; - return this; - } - - public Builder withPlatformInfo(InsightsPlatformInfo insightsPlatformInfo) { - this.platformInfo = insightsPlatformInfo; - return this; - } - - public Builder withHostName(String hostName) { - this.hostName = hostName; - return this; - } - - public Builder withDriverName(String driverName) { - this.driverName = driverName; - return this; - } - - public Builder withDriverVersion(String driverVersion) { - this.driverVersion = driverVersion; - return this; - } - - public Builder withApplicationNameWasGenerated(boolean applicationNameWasGenerated) { - this.applicationNameWasGenerated = applicationNameWasGenerated; - return this; - } - - public Builder withDataCenters(Set dataCenters) { - this.dataCenters = dataCenters; - return this; - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/InsightsStatusData.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/InsightsStatusData.java deleted file mode 100644 index 6f5a135f7c4..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/InsightsStatusData.java +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights.schema; - -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; -import java.util.Map; -import java.util.Objects; - -public class InsightsStatusData { - @JsonProperty("clientId") - private final String clientId; - - @JsonProperty("sessionId") - private final String sessionId; - - @JsonProperty("controlConnection") - private final String controlConnection; - - @JsonProperty("connectedNodes") - private final Map connectedNodes; - - @JsonCreator - private InsightsStatusData( - @JsonProperty("clientId") String clientId, - @JsonProperty("sessionId") String sessionId, - @JsonProperty("controlConnection") String controlConnection, - @JsonProperty("connectedNodes") Map connectedNodes) { - this.clientId = clientId; - this.sessionId = sessionId; - this.controlConnection = controlConnection; - this.connectedNodes = connectedNodes; - } - - public String getClientId() { - return clientId; - } - - public String getSessionId() { - return sessionId; - } - - public String getControlConnection() { - return controlConnection; - } - - public Map getConnectedNodes() { - return connectedNodes; - } - - @Override - public String toString() { - return "InsightsStatusData{" - + "clientId='" - + clientId - + '\'' - + ", sessionId='" - + sessionId - + '\'' - + ", controlConnection=" - + controlConnection - + ", connectedNodes=" - + connectedNodes - + '}'; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof InsightsStatusData)) { - return false; - } - InsightsStatusData that = (InsightsStatusData) o; - return Objects.equals(clientId, that.clientId) - && Objects.equals(sessionId, that.sessionId) - && Objects.equals(controlConnection, that.controlConnection) - && Objects.equals(connectedNodes, that.connectedNodes); - } - - @Override - public int hashCode() { - return Objects.hash(clientId, sessionId, controlConnection, connectedNodes); - } - - public static InsightsStatusData.Builder builder() { - return new InsightsStatusData.Builder(); - } - - public static class Builder { - private String clientId; - private String sessionId; - private String controlConnection; - private Map connectedNodes; - - public Builder withClientId(String clientId) { - this.clientId = clientId; - return this; - } - - public Builder withSessionId(String id) { - this.sessionId = id; - return this; - } - - public Builder withControlConnection(String controlConnection) { - this.controlConnection = controlConnection; - return this; - } - - public Builder withConnectedNodes(Map connectedNodes) { - this.connectedNodes = connectedNodes; - return this; - } - - public InsightsStatusData build() { - return new InsightsStatusData(clientId, sessionId, controlConnection, connectedNodes); - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/LoadBalancingInfo.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/LoadBalancingInfo.java deleted file mode 100644 index 594583e3f28..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/LoadBalancingInfo.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights.schema; - -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; -import java.util.Map; -import java.util.Objects; - -public class LoadBalancingInfo { - @JsonProperty("type") - private final String type; - - @JsonProperty("options") - private final Map options; - - @JsonProperty("namespace") - private final String namespace; - - @JsonCreator - public LoadBalancingInfo( - @JsonProperty("type") String type, - @JsonProperty("options") Map options, - @JsonProperty("namespace") String namespace) { - this.type = type; - this.options = options; - this.namespace = namespace; - } - - public String getType() { - return type; - } - - public Map getOptions() { - return options; - } - - public String getNamespace() { - return namespace; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof LoadBalancingInfo)) { - return false; - } - LoadBalancingInfo that = (LoadBalancingInfo) o; - return Objects.equals(type, that.type) - && Objects.equals(options, that.options) - && Objects.equals(namespace, that.namespace); - } - - @Override - public int hashCode() { - return Objects.hash(type, options, namespace); - } - - @Override - public String toString() { - return "LoadBalancingInfo{" - + "type='" - + type - + '\'' - + ", options=" - + options - + ", namespace='" - + namespace - + '\'' - + '}'; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/PoolSizeByHostDistance.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/PoolSizeByHostDistance.java deleted file mode 100644 index 07f76a18d40..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/PoolSizeByHostDistance.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights.schema; - -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; -import java.util.Objects; - -public class PoolSizeByHostDistance { - @JsonProperty("local") - private final int local; - - @JsonProperty("remote") - private final int remote; - - @JsonProperty("ignored") - private final int ignored; - - @JsonCreator - public PoolSizeByHostDistance( - @JsonProperty("local") int local, - @JsonProperty("remote") int remote, - @JsonProperty("ignored") int ignored) { - - this.local = local; - this.remote = remote; - this.ignored = ignored; - } - - public int getLocal() { - return local; - } - - public int getRemote() { - return remote; - } - - public int getIgnored() { - return ignored; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof PoolSizeByHostDistance)) { - return false; - } - PoolSizeByHostDistance that = (PoolSizeByHostDistance) o; - return local == that.local && remote == that.remote && ignored == that.ignored; - } - - @Override - public int hashCode() { - return Objects.hash(local, remote, ignored); - } - - @Override - public String toString() { - return "PoolSizeByHostDistance{" - + "local=" - + local - + ", remote=" - + remote - + ", ignored=" - + ignored - + '}'; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/ReconnectionPolicyInfo.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/ReconnectionPolicyInfo.java deleted file mode 100644 index 463c23a4325..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/ReconnectionPolicyInfo.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights.schema; - -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; -import java.util.Map; -import java.util.Objects; - -public class ReconnectionPolicyInfo { - @JsonProperty("type") - private final String type; - - @JsonProperty("options") - private final Map options; - - @JsonProperty("namespace") - private final String namespace; - - @JsonCreator - public ReconnectionPolicyInfo( - @JsonProperty("type") String type, - @JsonProperty("options") Map options, - @JsonProperty("namespace") String namespace) { - - this.type = type; - this.options = options; - this.namespace = namespace; - } - - public String getType() { - return type; - } - - public Map getOptions() { - return options; - } - - public String getNamespace() { - return namespace; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof ReconnectionPolicyInfo)) { - return false; - } - ReconnectionPolicyInfo that = (ReconnectionPolicyInfo) o; - return Objects.equals(type, that.type) - && Objects.equals(options, that.options) - && Objects.equals(namespace, that.namespace); - } - - @Override - public int hashCode() { - return Objects.hash(type, options, namespace); - } - - @Override - public String toString() { - return "ReconnectionPolicyInfo{" - + "type='" - + type - + '\'' - + ", options=" - + options - + ", namespace='" - + namespace - + '\'' - + '}'; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/SSL.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/SSL.java deleted file mode 100644 index debcd85c025..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/SSL.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights.schema; - -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; -import java.util.Objects; - -public class SSL { - @JsonProperty("enabled") - private final boolean enabled; - - @JsonProperty("certValidation") - private final boolean certValidation; - - @JsonCreator - public SSL( - @JsonProperty("enabled") boolean enabled, - @JsonProperty("certValidation") boolean certValidation) { - this.enabled = enabled; - this.certValidation = certValidation; - } - - public boolean isEnabled() { - return enabled; - } - - public boolean isCertValidation() { - return certValidation; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof SSL)) { - return false; - } - SSL that = (SSL) o; - return enabled == that.enabled && certValidation == that.certValidation; - } - - @Override - public int hashCode() { - return Objects.hash(enabled, certValidation); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/SessionStateForNode.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/SessionStateForNode.java deleted file mode 100644 index 8b50e5b2313..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/SessionStateForNode.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights.schema; - -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; -import java.util.Objects; - -public class SessionStateForNode { - @JsonProperty("connections") - private final Integer connections; - - @JsonProperty("inFlightQueries") - private final Integer inFlightQueries; - - @JsonCreator - public SessionStateForNode( - @JsonProperty("connections") Integer connections, - @JsonProperty("inFlightQueries") Integer inFlightQueries) { - this.connections = connections; - this.inFlightQueries = inFlightQueries; - } - - public Integer getConnections() { - return connections; - } - - public Integer getInFlightQueries() { - return inFlightQueries; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof SessionStateForNode)) { - return false; - } - SessionStateForNode that = (SessionStateForNode) o; - return Objects.equals(connections, that.connections) - && Objects.equals(inFlightQueries, that.inFlightQueries); - } - - @Override - public int hashCode() { - return Objects.hash(connections, inFlightQueries); - } - - @Override - public String toString() { - return "SessionStateForNode{" - + "connections=" - + connections - + ", inFlightQueries=" - + inFlightQueries - + '}'; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/SpecificExecutionProfile.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/SpecificExecutionProfile.java deleted file mode 100644 index 58652fdf885..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/SpecificExecutionProfile.java +++ /dev/null @@ -1,133 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights.schema; - -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonInclude; -import com.fasterxml.jackson.annotation.JsonProperty; -import java.util.Map; -import java.util.Objects; - -@JsonInclude(JsonInclude.Include.NON_NULL) -public class SpecificExecutionProfile { - @JsonProperty("readTimeout") - private final Integer readTimeout; - - @JsonProperty("loadBalancing") - private final LoadBalancingInfo loadBalancing; - - @JsonProperty("speculativeExecution") - private SpeculativeExecutionInfo speculativeExecution; - - @JsonProperty("consistency") - private final String consistency; - - @JsonProperty("serialConsistency") - private final String serialConsistency; - - @JsonProperty("graphOptions") - private Map graphOptions; - - @JsonCreator - public SpecificExecutionProfile( - @JsonProperty("readTimeout") Integer readTimeoutMillis, - @JsonProperty("loadBalancing") LoadBalancingInfo loadBalancing, - @JsonProperty("speculativeExecution") SpeculativeExecutionInfo speculativeExecutionInfo, - @JsonProperty("consistency") String consistency, - @JsonProperty("serialConsistency") String serialConsistency, - @JsonProperty("graphOptions") Map graphOptions) { - readTimeout = readTimeoutMillis; - this.loadBalancing = loadBalancing; - this.speculativeExecution = speculativeExecutionInfo; - this.consistency = consistency; - this.serialConsistency = serialConsistency; - this.graphOptions = graphOptions; - } - - public Integer getReadTimeout() { - return readTimeout; - } - - public LoadBalancingInfo getLoadBalancing() { - return loadBalancing; - } - - public SpeculativeExecutionInfo getSpeculativeExecution() { - return speculativeExecution; - } - - public String getConsistency() { - return consistency; - } - - public String getSerialConsistency() { - return serialConsistency; - } - - public Map getGraphOptions() { - return graphOptions; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof SpecificExecutionProfile)) { - return false; - } - SpecificExecutionProfile that = (SpecificExecutionProfile) o; - return Objects.equals(readTimeout, that.readTimeout) - && Objects.equals(loadBalancing, that.loadBalancing) - && Objects.equals(speculativeExecution, that.speculativeExecution) - && Objects.equals(consistency, that.consistency) - && Objects.equals(serialConsistency, that.serialConsistency) - && Objects.equals(graphOptions, that.graphOptions); - } - - @Override - public int hashCode() { - return Objects.hash( - readTimeout, - loadBalancing, - speculativeExecution, - consistency, - serialConsistency, - graphOptions); - } - - @Override - public String toString() { - return "SpecificExecutionProfile{" - + "readTimeout=" - + readTimeout - + ", loadBalancing=" - + loadBalancing - + ", speculativeExecution=" - + speculativeExecution - + ", consistency='" - + consistency - + '\'' - + ", serialConsistency='" - + serialConsistency - + '\'' - + ", graphOptions=" - + graphOptions - + '}'; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/SpeculativeExecutionInfo.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/SpeculativeExecutionInfo.java deleted file mode 100644 index 779a4ed9e51..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/SpeculativeExecutionInfo.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights.schema; - -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; -import java.util.Map; -import java.util.Objects; - -public class SpeculativeExecutionInfo { - @JsonProperty("type") - private final String type; - - @JsonProperty("options") - private final Map options; - - @JsonProperty("namespace") - private String namespace; - - @JsonCreator - public SpeculativeExecutionInfo( - @JsonProperty("type") String type, - @JsonProperty("options") Map options, - @JsonProperty("namespace") String namespace) { - this.type = type; - this.options = options; - this.namespace = namespace; - } - - public String getType() { - return type; - } - - public Map getOptions() { - return options; - } - - public String getNamespace() { - return namespace; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof SpeculativeExecutionInfo)) { - return false; - } - SpeculativeExecutionInfo that = (SpeculativeExecutionInfo) o; - return Objects.equals(type, that.type) - && Objects.equals(options, that.options) - && Objects.equals(namespace, that.namespace); - } - - @Override - public int hashCode() { - return Objects.hash(type, options, namespace); - } - - @Override - public String toString() { - return "SpeculativeExecutionInfo{" - + "type='" - + type - + '\'' - + ", options=" - + options - + ", namespace='" - + namespace - + '\'' - + '}'; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/loadbalancing/DseDcInferringLoadBalancingPolicy.java b/core/src/main/java/com/datastax/dse/driver/internal/core/loadbalancing/DseDcInferringLoadBalancingPolicy.java deleted file mode 100644 index 501fa263258..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/loadbalancing/DseDcInferringLoadBalancingPolicy.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.loadbalancing; - -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.internal.core.loadbalancing.DcInferringLoadBalancingPolicy; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * @deprecated This class only exists for backward compatibility. It is equivalent to {@link - * DcInferringLoadBalancingPolicy}, which should now be used instead. - */ -@Deprecated -public class DseDcInferringLoadBalancingPolicy extends DcInferringLoadBalancingPolicy { - public DseDcInferringLoadBalancingPolicy( - @NonNull DriverContext context, @NonNull String profileName) { - super(context, profileName); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/loadbalancing/DseLoadBalancingPolicy.java b/core/src/main/java/com/datastax/dse/driver/internal/core/loadbalancing/DseLoadBalancingPolicy.java deleted file mode 100644 index 059a37c4774..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/loadbalancing/DseLoadBalancingPolicy.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.loadbalancing; - -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.internal.core.loadbalancing.DefaultLoadBalancingPolicy; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * @deprecated This class only exists for backward compatibility. It is equivalent to {@link - * DefaultLoadBalancingPolicy}, which should now be used instead. - */ -@Deprecated -public class DseLoadBalancingPolicy extends DefaultLoadBalancingPolicy { - public DseLoadBalancingPolicy(@NonNull DriverContext context, @NonNull String profileName) { - super(context, profileName); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseAggregateMetadata.java b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseAggregateMetadata.java deleted file mode 100644 index 52a0b846076..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseAggregateMetadata.java +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.metadata.schema; - -import com.datastax.dse.driver.api.core.metadata.schema.DseAggregateMetadata; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.FunctionSignature; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.internal.core.metadata.schema.DefaultAggregateMetadata; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Objects; -import java.util.Optional; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultDseAggregateMetadata extends DefaultAggregateMetadata - implements DseAggregateMetadata { - - @Nullable private final Boolean deterministic; - - public DefaultDseAggregateMetadata( - @NonNull CqlIdentifier keyspace, - @NonNull FunctionSignature signature, - @Nullable FunctionSignature finalFuncSignature, - @Nullable Object initCond, - @NonNull DataType returnType, - @NonNull FunctionSignature stateFuncSignature, - @NonNull DataType stateType, - @NonNull TypeCodec stateTypeCodec, - @Nullable Boolean deterministic) { - super( - keyspace, - signature, - finalFuncSignature, - initCond, - returnType, - stateFuncSignature, - stateType, - stateTypeCodec); - this.deterministic = deterministic; - } - - @Override - @Deprecated - public boolean isDeterministic() { - return deterministic != null && deterministic; - } - - @Override - @Nullable - public Optional getDeterministic() { - return Optional.ofNullable(deterministic); - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof DseAggregateMetadata) { - DseAggregateMetadata that = (DseAggregateMetadata) other; - return Objects.equals(this.getKeyspace(), that.getKeyspace()) - && Objects.equals(this.getSignature(), that.getSignature()) - && Objects.equals( - this.getFinalFuncSignature().orElse(null), that.getFinalFuncSignature().orElse(null)) - && Objects.equals(this.getInitCond().orElse(null), that.getInitCond().orElse(null)) - && Objects.equals(this.getReturnType(), that.getReturnType()) - && Objects.equals(this.getStateFuncSignature(), that.getStateFuncSignature()) - && Objects.equals(this.getStateType(), that.getStateType()) - && Objects.equals(this.deterministic, that.getDeterministic().orElse(null)); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash( - getKeyspace(), - getSignature(), - getFinalFuncSignature(), - getInitCond(), - getReturnType(), - getStateFuncSignature(), - getStateType(), - deterministic); - } - - @Override - public String toString() { - return "Aggregate Name: " - + getSignature().getName().asCql(false) - + ", Keyspace: " - + getKeyspace().asCql(false) - + ", Return Type: " - + getReturnType().asCql(false, false) - + ", Deterministic: " - + deterministic; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseColumnMetadata.java b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseColumnMetadata.java deleted file mode 100644 index 2168f20fdc7..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseColumnMetadata.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.metadata.schema; - -import com.datastax.dse.driver.api.core.metadata.schema.DseColumnMetadata; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.internal.core.metadata.schema.DefaultColumnMetadata; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultDseColumnMetadata extends DefaultColumnMetadata implements DseColumnMetadata { - - public DefaultDseColumnMetadata( - @NonNull CqlIdentifier keyspace, - @NonNull CqlIdentifier parent, - @NonNull CqlIdentifier name, - @NonNull DataType dataType, - boolean isStatic) { - super(keyspace, parent, name, dataType, isStatic); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseEdgeMetadata.java b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseEdgeMetadata.java deleted file mode 100644 index e4de62f294c..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseEdgeMetadata.java +++ /dev/null @@ -1,152 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.metadata.schema; - -import com.datastax.dse.driver.api.core.metadata.schema.DseEdgeMetadata; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.Serializable; -import java.util.List; -import java.util.Objects; - -public class DefaultDseEdgeMetadata implements DseEdgeMetadata, Serializable { - - private static final long serialVersionUID = 1; - - @NonNull private final CqlIdentifier labelName; - - @NonNull private final CqlIdentifier fromTable; - @NonNull private final CqlIdentifier fromLabel; - @NonNull private final List fromPartitionKeyColumns; - @NonNull private final List fromClusteringColumns; - - @NonNull private final CqlIdentifier toTable; - @NonNull private final CqlIdentifier toLabel; - @NonNull private final List toPartitionKeyColumns; - @NonNull private final List toClusteringColumns; - - public DefaultDseEdgeMetadata( - @NonNull CqlIdentifier labelName, - @NonNull CqlIdentifier fromTable, - @NonNull CqlIdentifier fromLabel, - @NonNull List fromPartitionKeyColumns, - @NonNull List fromClusteringColumns, - @NonNull CqlIdentifier toTable, - @NonNull CqlIdentifier toLabel, - @NonNull List toPartitionKeyColumns, - @NonNull List toClusteringColumns) { - this.labelName = Preconditions.checkNotNull(labelName); - this.fromTable = Preconditions.checkNotNull(fromTable); - this.fromLabel = Preconditions.checkNotNull(fromLabel); - this.fromPartitionKeyColumns = Preconditions.checkNotNull(fromPartitionKeyColumns); - this.fromClusteringColumns = Preconditions.checkNotNull(fromClusteringColumns); - this.toTable = Preconditions.checkNotNull(toTable); - this.toLabel = Preconditions.checkNotNull(toLabel); - this.toPartitionKeyColumns = Preconditions.checkNotNull(toPartitionKeyColumns); - this.toClusteringColumns = Preconditions.checkNotNull(toClusteringColumns); - } - - @NonNull - @Override - public CqlIdentifier getLabelName() { - return labelName; - } - - @NonNull - @Override - public CqlIdentifier getFromTable() { - return fromTable; - } - - @NonNull - @Override - public CqlIdentifier getFromLabel() { - return fromLabel; - } - - @NonNull - @Override - public List getFromPartitionKeyColumns() { - return fromPartitionKeyColumns; - } - - @NonNull - @Override - public List getFromClusteringColumns() { - return fromClusteringColumns; - } - - @NonNull - @Override - public CqlIdentifier getToTable() { - return toTable; - } - - @NonNull - @Override - public CqlIdentifier getToLabel() { - return toLabel; - } - - @NonNull - @Override - public List getToPartitionKeyColumns() { - return toPartitionKeyColumns; - } - - @NonNull - @Override - public List getToClusteringColumns() { - return toClusteringColumns; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof DseEdgeMetadata) { - DseEdgeMetadata that = (DseEdgeMetadata) other; - return Objects.equals(this.labelName, that.getLabelName()) - && Objects.equals(this.fromTable, that.getFromTable()) - && Objects.equals(this.fromLabel, that.getFromLabel()) - && Objects.equals(this.fromPartitionKeyColumns, that.getFromPartitionKeyColumns()) - && Objects.equals(this.fromClusteringColumns, that.getFromClusteringColumns()) - && Objects.equals(this.toTable, that.getToTable()) - && Objects.equals(this.toLabel, that.getToLabel()) - && Objects.equals(this.toPartitionKeyColumns, that.getToPartitionKeyColumns()) - && Objects.equals(this.toClusteringColumns, that.getToClusteringColumns()); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash( - labelName, - fromTable, - fromLabel, - fromPartitionKeyColumns, - fromClusteringColumns, - toTable, - toLabel, - toPartitionKeyColumns, - toClusteringColumns); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseFunctionMetadata.java b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseFunctionMetadata.java deleted file mode 100644 index 0a94491f1f7..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseFunctionMetadata.java +++ /dev/null @@ -1,147 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.metadata.schema; - -import com.datastax.dse.driver.api.core.metadata.schema.DseFunctionMetadata; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.FunctionSignature; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.internal.core.metadata.schema.DefaultFunctionMetadata; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.List; -import java.util.Objects; -import java.util.Optional; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultDseFunctionMetadata extends DefaultFunctionMetadata - implements DseFunctionMetadata { - - @Nullable private final Boolean deterministic; - @Nullable private final Monotonicity monotonicity; - @NonNull private final List monotonicArgumentNames; - - public DefaultDseFunctionMetadata( - @NonNull CqlIdentifier keyspace, - @NonNull FunctionSignature signature, - @NonNull List parameterNames, - @NonNull String body, - boolean calledOnNullInput, - @NonNull String language, - @NonNull DataType returnType, - @Nullable Boolean deterministic, - @Nullable Boolean monotonic, - @NonNull List monotonicArgumentNames) { - super(keyspace, signature, parameterNames, body, calledOnNullInput, language, returnType); - // set DSE extension attributes - this.deterministic = deterministic; - this.monotonicity = - monotonic == null - ? null - : monotonic - ? Monotonicity.FULLY_MONOTONIC - : monotonicArgumentNames.isEmpty() - ? Monotonicity.NOT_MONOTONIC - : Monotonicity.PARTIALLY_MONOTONIC; - this.monotonicArgumentNames = ImmutableList.copyOf(monotonicArgumentNames); - } - - @Override - @Deprecated - public boolean isDeterministic() { - return deterministic != null && deterministic; - } - - @Override - public Optional getDeterministic() { - return Optional.ofNullable(deterministic); - } - - @Override - @Deprecated - public boolean isMonotonic() { - return monotonicity == Monotonicity.FULLY_MONOTONIC; - } - - @Override - public Optional getMonotonicity() { - return Optional.ofNullable(monotonicity); - } - - @NonNull - @Override - public List getMonotonicArgumentNames() { - return this.monotonicArgumentNames; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof DseFunctionMetadata) { - DseFunctionMetadata that = (DseFunctionMetadata) other; - return Objects.equals(this.getKeyspace(), that.getKeyspace()) - && Objects.equals(this.getSignature(), that.getSignature()) - && Objects.equals(this.getParameterNames(), that.getParameterNames()) - && Objects.equals(this.getBody(), that.getBody()) - && this.isCalledOnNullInput() == that.isCalledOnNullInput() - && Objects.equals(this.getLanguage(), that.getLanguage()) - && Objects.equals(this.getReturnType(), that.getReturnType()) - && Objects.equals(this.deterministic, that.getDeterministic().orElse(null)) - && this.monotonicity == that.getMonotonicity().orElse(null) - && Objects.equals(this.monotonicArgumentNames, that.getMonotonicArgumentNames()); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash( - getKeyspace(), - getSignature(), - getParameterNames(), - getBody(), - isCalledOnNullInput(), - getLanguage(), - getReturnType(), - deterministic, - monotonicity, - monotonicArgumentNames); - } - - @Override - public String toString() { - return "Function Name: " - + this.getSignature().getName().asCql(false) - + ", Keyspace: " - + this.getKeyspace().asCql(false) - + ", Language: " - + this.getLanguage() - + ", Return Type: " - + getReturnType().asCql(false, false) - + ", Deterministic: " - + this.deterministic - + ", Monotonicity: " - + this.monotonicity - + ", Monotonic On: " - + (this.monotonicArgumentNames.isEmpty() ? "" : this.monotonicArgumentNames.get(0)); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseIndexMetadata.java b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseIndexMetadata.java deleted file mode 100644 index c66d7934151..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseIndexMetadata.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.metadata.schema; - -import com.datastax.dse.driver.api.core.metadata.schema.DseIndexMetadata; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.IndexKind; -import com.datastax.oss.driver.internal.core.metadata.schema.DefaultIndexMetadata; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultDseIndexMetadata extends DefaultIndexMetadata implements DseIndexMetadata { - - public DefaultDseIndexMetadata( - @NonNull CqlIdentifier keyspace, - @NonNull CqlIdentifier table, - @NonNull CqlIdentifier name, - @NonNull IndexKind kind, - @NonNull String target, - @NonNull Map options) { - super(keyspace, table, name, kind, target, options); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseKeyspaceMetadata.java b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseKeyspaceMetadata.java deleted file mode 100644 index 8e54c9082e1..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseKeyspaceMetadata.java +++ /dev/null @@ -1,183 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.metadata.schema; - -import com.datastax.dse.driver.api.core.metadata.schema.DseGraphKeyspaceMetadata; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.AggregateMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.FunctionMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.FunctionSignature; -import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.ViewMetadata; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.io.Serializable; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultDseKeyspaceMetadata implements DseGraphKeyspaceMetadata, Serializable { - - private static final long serialVersionUID = 1; - - @NonNull private final CqlIdentifier name; - private final boolean durableWrites; - private final boolean virtual; - @Nullable private final String graphEngine; - @NonNull private final Map replication; - @NonNull private final Map types; - @NonNull private final Map tables; - @NonNull private final Map views; - @NonNull private final Map functions; - @NonNull private final Map aggregates; - - public DefaultDseKeyspaceMetadata( - @NonNull CqlIdentifier name, - boolean durableWrites, - boolean virtual, - @Nullable String graphEngine, - @NonNull Map replication, - @NonNull Map types, - @NonNull Map tables, - @NonNull Map views, - @NonNull Map functions, - @NonNull Map aggregates) { - this.name = name; - this.durableWrites = durableWrites; - this.virtual = virtual; - this.graphEngine = graphEngine; - this.replication = replication; - this.types = types; - this.tables = tables; - this.views = views; - this.functions = functions; - this.aggregates = aggregates; - } - - @NonNull - @Override - public CqlIdentifier getName() { - return name; - } - - @Override - public boolean isDurableWrites() { - return durableWrites; - } - - @Override - public boolean isVirtual() { - return virtual; - } - - @NonNull - @Override - public Optional getGraphEngine() { - return Optional.ofNullable(graphEngine); - } - - @NonNull - @Override - public Map getReplication() { - return replication; - } - - @NonNull - @Override - public Map getUserDefinedTypes() { - return types; - } - - @NonNull - @Override - public Map getTables() { - return tables; - } - - @NonNull - @Override - public Map getViews() { - return views; - } - - @NonNull - @Override - public Map getFunctions() { - return functions; - } - - @NonNull - @Override - public Map getAggregates() { - return aggregates; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof DseGraphKeyspaceMetadata) { - DseGraphKeyspaceMetadata that = (DseGraphKeyspaceMetadata) other; - return Objects.equals(this.name, that.getName()) - && this.durableWrites == that.isDurableWrites() - && this.virtual == that.isVirtual() - && Objects.equals(this.graphEngine, that.getGraphEngine().orElse(null)) - && Objects.equals(this.replication, that.getReplication()) - && Objects.equals(this.types, that.getUserDefinedTypes()) - && Objects.equals(this.tables, that.getTables()) - && Objects.equals(this.views, that.getViews()) - && Objects.equals(this.functions, that.getFunctions()) - && Objects.equals(this.aggregates, that.getAggregates()); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash( - name, - durableWrites, - virtual, - graphEngine, - replication, - types, - tables, - views, - functions, - aggregates); - } - - @Override - public boolean shallowEquals(Object other) { - if (other == this) { - return true; - } else if (other instanceof DseGraphKeyspaceMetadata) { - DseGraphKeyspaceMetadata that = (DseGraphKeyspaceMetadata) other; - return Objects.equals(this.name, that.getName()) - && this.durableWrites == that.isDurableWrites() - && Objects.equals(this.graphEngine, that.getGraphEngine().orElse(null)) - && Objects.equals(this.replication, that.getReplication()); - } else { - return false; - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseTableMetadata.java b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseTableMetadata.java deleted file mode 100644 index f8fb8cc10d1..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseTableMetadata.java +++ /dev/null @@ -1,190 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.metadata.schema; - -import com.datastax.dse.driver.api.core.metadata.schema.DseEdgeMetadata; -import com.datastax.dse.driver.api.core.metadata.schema.DseGraphTableMetadata; -import com.datastax.dse.driver.api.core.metadata.schema.DseVertexMetadata; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.ClusteringOrder; -import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.IndexMetadata; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.io.Serializable; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; -import java.util.UUID; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultDseTableMetadata implements DseGraphTableMetadata, Serializable { - - private static final long serialVersionUID = 1; - - @NonNull private final CqlIdentifier keyspace; - @NonNull private final CqlIdentifier name; - // null for virtual tables - @Nullable private final UUID id; - private final boolean compactStorage; - private final boolean virtual; - @NonNull private final List partitionKey; - @NonNull private final Map clusteringColumns; - @NonNull private final Map columns; - @NonNull private final Map options; - @NonNull private final Map indexes; - @Nullable private final DseVertexMetadata vertex; - @Nullable private final DseEdgeMetadata edge; - - public DefaultDseTableMetadata( - @NonNull CqlIdentifier keyspace, - @NonNull CqlIdentifier name, - @Nullable UUID id, - boolean compactStorage, - boolean virtual, - @NonNull List partitionKey, - @NonNull Map clusteringColumns, - @NonNull Map columns, - @NonNull Map options, - @NonNull Map indexes, - @Nullable DseVertexMetadata vertex, - @Nullable DseEdgeMetadata edge) { - this.keyspace = keyspace; - this.name = name; - this.id = id; - this.compactStorage = compactStorage; - this.virtual = virtual; - this.partitionKey = partitionKey; - this.clusteringColumns = clusteringColumns; - this.columns = columns; - this.options = options; - this.indexes = indexes; - this.vertex = vertex; - this.edge = edge; - } - - @NonNull - @Override - public CqlIdentifier getKeyspace() { - return keyspace; - } - - @NonNull - @Override - public CqlIdentifier getName() { - return name; - } - - @NonNull - @Override - public Optional getId() { - return Optional.ofNullable(id); - } - - @Override - public boolean isCompactStorage() { - return compactStorage; - } - - @Override - public boolean isVirtual() { - return virtual; - } - - @NonNull - @Override - public List getPartitionKey() { - return partitionKey; - } - - @NonNull - @Override - public Map getClusteringColumns() { - return clusteringColumns; - } - - @NonNull - @Override - public Map getColumns() { - return columns; - } - - @NonNull - @Override - public Map getOptions() { - return options; - } - - @NonNull - @Override - public Map getIndexes() { - return indexes; - } - - @NonNull - @Override - public Optional getVertex() { - return Optional.ofNullable(vertex); - } - - @NonNull - @Override - public Optional getEdge() { - return Optional.ofNullable(edge); - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof DseGraphTableMetadata) { - DseGraphTableMetadata that = (DseGraphTableMetadata) other; - return Objects.equals(this.keyspace, that.getKeyspace()) - && Objects.equals(this.name, that.getName()) - && Objects.equals(this.id, that.getId().orElse(null)) - && this.compactStorage == that.isCompactStorage() - && this.virtual == that.isVirtual() - && Objects.equals(this.partitionKey, that.getPartitionKey()) - && Objects.equals(this.clusteringColumns, that.getClusteringColumns()) - && Objects.equals(this.columns, that.getColumns()) - && Objects.equals(this.indexes, that.getIndexes()) - && Objects.equals(this.vertex, that.getVertex().orElse(null)) - && Objects.equals(this.edge, that.getEdge().orElse(null)); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash( - keyspace, - name, - id, - compactStorage, - virtual, - partitionKey, - clusteringColumns, - columns, - indexes, - vertex, - edge); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseVertexMetadata.java b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseVertexMetadata.java deleted file mode 100644 index 05ba2823704..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseVertexMetadata.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.metadata.schema; - -import com.datastax.dse.driver.api.core.metadata.schema.DseVertexMetadata; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.Serializable; -import java.util.Objects; - -public class DefaultDseVertexMetadata implements DseVertexMetadata, Serializable { - - private static final long serialVersionUID = 1; - - @NonNull private final CqlIdentifier labelName; - - public DefaultDseVertexMetadata(@NonNull CqlIdentifier labelName) { - this.labelName = Preconditions.checkNotNull(labelName); - } - - @NonNull - @Override - public CqlIdentifier getLabelName() { - return labelName; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof DefaultDseVertexMetadata) { - DefaultDseVertexMetadata that = (DefaultDseVertexMetadata) other; - return Objects.equals(this.labelName, that.getLabelName()); - } else { - return false; - } - } - - @Override - public int hashCode() { - return labelName.hashCode(); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseViewMetadata.java b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseViewMetadata.java deleted file mode 100644 index f04b7640041..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseViewMetadata.java +++ /dev/null @@ -1,169 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.metadata.schema; - -import com.datastax.dse.driver.api.core.metadata.schema.DseViewMetadata; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.ClusteringOrder; -import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.io.Serializable; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; -import java.util.UUID; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultDseViewMetadata implements DseViewMetadata, Serializable { - - private static final long serialVersionUID = 1; - - @NonNull private final CqlIdentifier keyspace; - @NonNull private final CqlIdentifier name; - @NonNull private final CqlIdentifier baseTable; - private final boolean includesAllColumns; - @Nullable private final String whereClause; - @NonNull private final UUID id; - @NonNull private final ImmutableList partitionKey; - @NonNull private final ImmutableMap clusteringColumns; - @NonNull private final ImmutableMap columns; - @NonNull private final Map options; - - public DefaultDseViewMetadata( - @NonNull CqlIdentifier keyspace, - @NonNull CqlIdentifier name, - @NonNull CqlIdentifier baseTable, - boolean includesAllColumns, - @Nullable String whereClause, - @NonNull UUID id, - @NonNull ImmutableList partitionKey, - @NonNull ImmutableMap clusteringColumns, - @NonNull ImmutableMap columns, - @NonNull Map options) { - this.keyspace = keyspace; - this.name = name; - this.baseTable = baseTable; - this.includesAllColumns = includesAllColumns; - this.whereClause = whereClause; - this.id = id; - this.partitionKey = partitionKey; - this.clusteringColumns = clusteringColumns; - this.columns = columns; - this.options = options; - } - - @NonNull - @Override - public CqlIdentifier getKeyspace() { - return keyspace; - } - - @NonNull - @Override - public CqlIdentifier getName() { - return name; - } - - @NonNull - @Override - public Optional getId() { - return Optional.of(id); - } - - @NonNull - @Override - public CqlIdentifier getBaseTable() { - return baseTable; - } - - @Override - public boolean includesAllColumns() { - return includesAllColumns; - } - - @NonNull - @Override - public Optional getWhereClause() { - return Optional.ofNullable(whereClause); - } - - @NonNull - @Override - public List getPartitionKey() { - return partitionKey; - } - - @NonNull - @Override - public Map getClusteringColumns() { - return clusteringColumns; - } - - @NonNull - @Override - public Map getColumns() { - return columns; - } - - @NonNull - @Override - public Map getOptions() { - return options; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof DseViewMetadata) { - DseViewMetadata that = (DseViewMetadata) other; - return Objects.equals(this.keyspace, that.getKeyspace()) - && Objects.equals(this.name, that.getName()) - && Objects.equals(this.baseTable, that.getBaseTable()) - && this.includesAllColumns == that.includesAllColumns() - && Objects.equals(this.whereClause, that.getWhereClause().orElse(null)) - && Objects.equals(Optional.of(this.id), that.getId()) - && Objects.equals(this.partitionKey, that.getPartitionKey()) - && Objects.equals(this.clusteringColumns, that.getClusteringColumns()) - && Objects.equals(this.columns, that.getColumns()) - && Objects.equals(this.options, that.getOptions()); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash( - keyspace, - name, - baseTable, - includesAllColumns, - whereClause, - id, - partitionKey, - clusteringColumns, - columns, - options); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/ScriptHelper.java b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/ScriptHelper.java deleted file mode 100644 index 64f6cac19f0..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/ScriptHelper.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.metadata.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.internal.core.metadata.schema.ScriptBuilder; -import java.util.List; - -public class ScriptHelper { - - public static void appendEdgeSide( - ScriptBuilder builder, - CqlIdentifier table, - CqlIdentifier label, - List partitionKeyColumns, - List clusteringColumns, - String keyword) { - builder.append(" ").append(keyword).append(label).append("("); - - if (partitionKeyColumns.size() == 1) { // PRIMARY KEY (k - builder.append(partitionKeyColumns.get(0)); - } else { // PRIMARY KEY ((k1, k2) - builder.append("("); - boolean first = true; - for (CqlIdentifier pkColumn : partitionKeyColumns) { - if (first) { - first = false; - } else { - builder.append(", "); - } - builder.append(pkColumn); - } - builder.append(")"); - } - // PRIMARY KEY (, cc1, cc2, cc3) - for (CqlIdentifier clusteringColumn : clusteringColumns) { - builder.append(", ").append(clusteringColumn); - } - builder.append(")"); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/parsing/DseAggregateParser.java b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/parsing/DseAggregateParser.java deleted file mode 100644 index 37a7a2768c2..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/parsing/DseAggregateParser.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.metadata.schema.parsing; - -import com.datastax.dse.driver.api.core.metadata.schema.DseAggregateMetadata; -import com.datastax.dse.driver.internal.core.metadata.schema.DefaultDseAggregateMetadata; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.AggregateMetadata; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.AggregateParser; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.DataTypeParser; -import java.util.Map; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class DseAggregateParser { - - private final AggregateParser aggregateParser; - private final InternalDriverContext context; - - public DseAggregateParser(DataTypeParser dataTypeParser, InternalDriverContext context) { - this.aggregateParser = new AggregateParser(dataTypeParser, context); - this.context = context; - } - - public DseAggregateMetadata parseAggregate( - AdminRow row, - CqlIdentifier keyspaceId, - Map userDefinedTypes) { - AggregateMetadata aggregate = aggregateParser.parseAggregate(row, keyspaceId, userDefinedTypes); - // parse the DSE extended columns - final Boolean deterministic = - row.contains("deterministic") ? row.getBoolean("deterministic") : null; - - return new DefaultDseAggregateMetadata( - aggregate.getKeyspace(), - aggregate.getSignature(), - aggregate.getFinalFuncSignature().orElse(null), - aggregate.getInitCond().orElse(null), - aggregate.getReturnType(), - aggregate.getStateFuncSignature(), - aggregate.getStateType(), - context.getCodecRegistry().codecFor(aggregate.getStateType()), - deterministic); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/parsing/DseFunctionParser.java b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/parsing/DseFunctionParser.java deleted file mode 100644 index 0d88bce8740..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/parsing/DseFunctionParser.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.metadata.schema.parsing; - -import com.datastax.dse.driver.api.core.metadata.schema.DseFunctionMetadata; -import com.datastax.dse.driver.internal.core.metadata.schema.DefaultDseFunctionMetadata; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.FunctionMetadata; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.DataTypeParser; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.FunctionParser; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class DseFunctionParser { - - private final FunctionParser functionParser; - - public DseFunctionParser(DataTypeParser dataTypeParser, InternalDriverContext context) { - this.functionParser = new FunctionParser(dataTypeParser, context); - } - - public DseFunctionMetadata parseFunction( - AdminRow row, - CqlIdentifier keyspaceId, - Map userDefinedTypes) { - FunctionMetadata function = functionParser.parseFunction(row, keyspaceId, userDefinedTypes); - // parse the DSE extended columns - final Boolean deterministic = - row.contains("deterministic") ? row.getBoolean("deterministic") : null; - final Boolean monotonic = row.contains("monotonic") ? row.getBoolean("monotonic") : null; - // stream the list of strings into a list of CqlIdentifiers - final List monotonicOn = - row.contains("monotonic_on") - ? row.getListOfString("monotonic_on").stream() - .map(CqlIdentifier::fromInternal) - .collect(Collectors.toList()) - : Collections.emptyList(); - - return new DefaultDseFunctionMetadata( - function.getKeyspace(), - function.getSignature(), - function.getParameterNames(), - function.getBody(), - function.isCalledOnNullInput(), - function.getLanguage(), - function.getReturnType(), - deterministic, - monotonic, - monotonicOn); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/parsing/DseSchemaParser.java b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/parsing/DseSchemaParser.java deleted file mode 100644 index ca7fb74a746..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/parsing/DseSchemaParser.java +++ /dev/null @@ -1,237 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.metadata.schema.parsing; - -import com.datastax.dse.driver.api.core.metadata.schema.DseAggregateMetadata; -import com.datastax.dse.driver.api.core.metadata.schema.DseFunctionMetadata; -import com.datastax.dse.driver.api.core.metadata.schema.DseKeyspaceMetadata; -import com.datastax.dse.driver.api.core.metadata.schema.DseTableMetadata; -import com.datastax.dse.driver.api.core.metadata.schema.DseViewMetadata; -import com.datastax.dse.driver.internal.core.metadata.schema.DefaultDseKeyspaceMetadata; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.AggregateMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.FunctionMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.FunctionSignature; -import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.ViewMetadata; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.CassandraSchemaParser; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.SchemaParser; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.SimpleJsonParser; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.UserDefinedTypeParser; -import com.datastax.oss.driver.internal.core.metadata.schema.queries.SchemaRows; -import com.datastax.oss.driver.internal.core.metadata.schema.refresh.SchemaRefresh; -import com.datastax.oss.driver.internal.core.util.NanoTime; -import com.datastax.oss.driver.shaded.guava.common.base.MoreObjects; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.Multimap; -import java.util.Collections; -import java.util.Map; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Default parser implementation for DSE. - * - *

For modularity, the code for each element row is split into separate classes (schema stuff is - * not on the hot path, so creating a few extra objects doesn't matter). - */ -@ThreadSafe -public class DseSchemaParser implements SchemaParser { - - private static final Logger LOG = LoggerFactory.getLogger(CassandraSchemaParser.class); - - private final SchemaRows rows; - private final UserDefinedTypeParser userDefinedTypeParser; - private final DseTableParser tableParser; - private final DseViewParser viewParser; - private final DseFunctionParser functionParser; - private final DseAggregateParser aggregateParser; - private final String logPrefix; - private final long startTimeNs = System.nanoTime(); - - public DseSchemaParser(SchemaRows rows, InternalDriverContext context) { - this.rows = rows; - this.logPrefix = context.getSessionName(); - - this.userDefinedTypeParser = new UserDefinedTypeParser(rows.dataTypeParser(), context); - this.tableParser = new DseTableParser(rows, context); - this.viewParser = new DseViewParser(rows, context); - this.functionParser = new DseFunctionParser(rows.dataTypeParser(), context); - this.aggregateParser = new DseAggregateParser(rows.dataTypeParser(), context); - } - - @Override - public SchemaRefresh parse() { - ImmutableMap.Builder keyspacesBuilder = ImmutableMap.builder(); - for (AdminRow row : rows.keyspaces()) { - DseKeyspaceMetadata keyspace = parseKeyspace(row); - keyspacesBuilder.put(keyspace.getName(), keyspace); - } - for (AdminRow row : rows.virtualKeyspaces()) { - DseKeyspaceMetadata keyspace = parseVirtualKeyspace(row); - keyspacesBuilder.put(keyspace.getName(), keyspace); - } - SchemaRefresh refresh = new SchemaRefresh(keyspacesBuilder.build()); - LOG.debug("[{}] Schema parsing took {}", logPrefix, NanoTime.formatTimeSince(startTimeNs)); - return refresh; - } - - private DseKeyspaceMetadata parseKeyspace(AdminRow keyspaceRow) { - - // Cassandra <= 2.2 - // CREATE TABLE system.schema_keyspaces ( - // keyspace_name text PRIMARY KEY, - // durable_writes boolean, - // strategy_class text, - // strategy_options text - // ) - // - // Cassandra >= 3.0: - // CREATE TABLE system_schema.keyspaces ( - // keyspace_name text PRIMARY KEY, - // durable_writes boolean, - // replication frozen> - // ) - // - // DSE >= 6.8: same as Cassandra 3 + graph_engine text - CqlIdentifier keyspaceId = CqlIdentifier.fromInternal(keyspaceRow.getString("keyspace_name")); - boolean durableWrites = - MoreObjects.firstNonNull(keyspaceRow.getBoolean("durable_writes"), false); - String graphEngine = keyspaceRow.getString("graph_engine"); - - Map replicationOptions; - if (keyspaceRow.contains("strategy_class")) { - String strategyClass = keyspaceRow.getString("strategy_class"); - Map strategyOptions = - SimpleJsonParser.parseStringMap(keyspaceRow.getString("strategy_options")); - replicationOptions = - ImmutableMap.builder() - .putAll(strategyOptions) - .put("class", strategyClass) - .build(); - } else { - replicationOptions = keyspaceRow.getMapOfStringToString("replication"); - } - - Map types = parseTypes(keyspaceId); - - return new DefaultDseKeyspaceMetadata( - keyspaceId, - durableWrites, - false, - graphEngine, - replicationOptions, - types, - parseTables(keyspaceId, types), - parseViews(keyspaceId, types), - parseFunctions(keyspaceId, types), - parseAggregates(keyspaceId, types)); - } - - private Map parseTypes(CqlIdentifier keyspaceId) { - return userDefinedTypeParser.parse(rows.types().get(keyspaceId), keyspaceId); - } - - private Map parseTables( - CqlIdentifier keyspaceId, Map types) { - ImmutableMap.Builder tablesBuilder = ImmutableMap.builder(); - Multimap vertices = rows.vertices().get(keyspaceId); - Multimap edges = rows.edges().get(keyspaceId); - for (AdminRow tableRow : rows.tables().get(keyspaceId)) { - DseTableMetadata table = tableParser.parseTable(tableRow, keyspaceId, types, vertices, edges); - if (table != null) { - tablesBuilder.put(table.getName(), table); - } - } - return tablesBuilder.build(); - } - - private Map parseViews( - CqlIdentifier keyspaceId, Map types) { - ImmutableMap.Builder viewsBuilder = ImmutableMap.builder(); - for (AdminRow viewRow : rows.views().get(keyspaceId)) { - DseViewMetadata view = viewParser.parseView(viewRow, keyspaceId, types); - if (view != null) { - viewsBuilder.put(view.getName(), view); - } - } - return viewsBuilder.build(); - } - - private Map parseFunctions( - CqlIdentifier keyspaceId, Map types) { - ImmutableMap.Builder functionsBuilder = - ImmutableMap.builder(); - for (AdminRow functionRow : rows.functions().get(keyspaceId)) { - DseFunctionMetadata function = functionParser.parseFunction(functionRow, keyspaceId, types); - if (function != null) { - functionsBuilder.put(function.getSignature(), function); - } - } - return functionsBuilder.build(); - } - - private Map parseAggregates( - CqlIdentifier keyspaceId, Map types) { - ImmutableMap.Builder aggregatesBuilder = - ImmutableMap.builder(); - for (AdminRow aggregateRow : rows.aggregates().get(keyspaceId)) { - DseAggregateMetadata aggregate = - aggregateParser.parseAggregate(aggregateRow, keyspaceId, types); - if (aggregate != null) { - aggregatesBuilder.put(aggregate.getSignature(), aggregate); - } - } - return aggregatesBuilder.build(); - } - - private DseKeyspaceMetadata parseVirtualKeyspace(AdminRow keyspaceRow) { - - CqlIdentifier keyspaceId = CqlIdentifier.fromInternal(keyspaceRow.getString("keyspace_name")); - boolean durableWrites = - MoreObjects.firstNonNull(keyspaceRow.getBoolean("durable_writes"), false); - - return new DefaultDseKeyspaceMetadata( - keyspaceId, - durableWrites, - true, - null, - Collections.emptyMap(), - Collections.emptyMap(), - parseVirtualTables(keyspaceId), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap()); - } - - private Map parseVirtualTables(CqlIdentifier keyspaceId) { - ImmutableMap.Builder tablesBuilder = ImmutableMap.builder(); - for (AdminRow tableRow : rows.virtualTables().get(keyspaceId)) { - DseTableMetadata table = tableParser.parseVirtualTable(tableRow, keyspaceId); - if (table != null) { - tablesBuilder.put(table.getName(), table); - } - } - return tablesBuilder.build(); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/parsing/DseTableParser.java b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/parsing/DseTableParser.java deleted file mode 100644 index 7fd4a5f0167..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/parsing/DseTableParser.java +++ /dev/null @@ -1,425 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.metadata.schema.parsing; - -import com.datastax.dse.driver.api.core.metadata.schema.DseColumnMetadata; -import com.datastax.dse.driver.api.core.metadata.schema.DseEdgeMetadata; -import com.datastax.dse.driver.api.core.metadata.schema.DseIndexMetadata; -import com.datastax.dse.driver.api.core.metadata.schema.DseTableMetadata; -import com.datastax.dse.driver.api.core.metadata.schema.DseVertexMetadata; -import com.datastax.dse.driver.internal.core.metadata.schema.DefaultDseColumnMetadata; -import com.datastax.dse.driver.internal.core.metadata.schema.DefaultDseEdgeMetadata; -import com.datastax.dse.driver.internal.core.metadata.schema.DefaultDseIndexMetadata; -import com.datastax.dse.driver.internal.core.metadata.schema.DefaultDseTableMetadata; -import com.datastax.dse.driver.internal.core.metadata.schema.DefaultDseVertexMetadata; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.ClusteringOrder; -import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.IndexKind; -import com.datastax.oss.driver.api.core.metadata.schema.IndexMetadata; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.ListType; -import com.datastax.oss.driver.api.core.type.MapType; -import com.datastax.oss.driver.api.core.type.SetType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.core.CqlIdentifiers; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.DataTypeClassNameCompositeParser; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.RawColumn; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.RelationParser; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.TableParser; -import com.datastax.oss.driver.internal.core.metadata.schema.queries.SchemaRows; -import com.datastax.oss.driver.internal.core.util.Loggers; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMultimap; -import com.datastax.oss.driver.shaded.guava.common.collect.Multimap; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -public class DseTableParser extends RelationParser { - - private static final Logger LOG = LoggerFactory.getLogger(TableParser.class); - - public DseTableParser(SchemaRows rows, InternalDriverContext context) { - super(rows, context); - } - - public DseTableMetadata parseTable( - AdminRow tableRow, - CqlIdentifier keyspaceId, - Map userTypes, - Multimap vertices, - Multimap edges) { - // Cassandra <= 2.2: - // CREATE TABLE system.schema_columnfamilies ( - // keyspace_name text, - // columnfamily_name text, - // bloom_filter_fp_chance double, - // caching text, - // cf_id uuid, - // column_aliases text, (2.1 only) - // comment text, - // compaction_strategy_class text, - // compaction_strategy_options text, - // comparator text, - // compression_parameters text, - // default_time_to_live int, - // default_validator text, - // dropped_columns map, - // gc_grace_seconds int, - // index_interval int, - // is_dense boolean, (2.1 only) - // key_aliases text, (2.1 only) - // key_validator text, - // local_read_repair_chance double, - // max_compaction_threshold int, - // max_index_interval int, - // memtable_flush_period_in_ms int, - // min_compaction_threshold int, - // min_index_interval int, - // read_repair_chance double, - // speculative_retry text, - // subcomparator text, - // type text, - // value_alias text, (2.1 only) - // PRIMARY KEY (keyspace_name, columnfamily_name) - // ) WITH CLUSTERING ORDER BY (columnfamily_name ASC) - // - // Cassandra 3.0: - // CREATE TABLE system_schema.tables ( - // keyspace_name text, - // table_name text, - // bloom_filter_fp_chance double, - // caching frozen>, - // cdc boolean, - // comment text, - // compaction frozen>, - // compression frozen>, - // crc_check_chance double, - // dclocal_read_repair_chance double, - // default_time_to_live int, - // extensions frozen>, - // flags frozen>, - // gc_grace_seconds int, - // id uuid, - // max_index_interval int, - // memtable_flush_period_in_ms int, - // min_index_interval int, - // read_repair_chance double, - // speculative_retry text, - // PRIMARY KEY (keyspace_name, table_name) - // ) WITH CLUSTERING ORDER BY (table_name ASC) - CqlIdentifier tableId = - CqlIdentifier.fromInternal( - tableRow.getString( - tableRow.contains("table_name") ? "table_name" : "columnfamily_name")); - - UUID uuid = tableRow.contains("id") ? tableRow.getUuid("id") : tableRow.getUuid("cf_id"); - - List rawColumns = - RawColumn.toRawColumns( - rows.columns().getOrDefault(keyspaceId, ImmutableMultimap.of()).get(tableId)); - if (rawColumns.isEmpty()) { - LOG.warn( - "[{}] Processing TABLE refresh for {}.{} but found no matching rows, skipping", - logPrefix, - keyspaceId, - tableId); - return null; - } - - boolean isCompactStorage; - if (tableRow.contains("flags")) { - Set flags = tableRow.getSetOfString("flags"); - boolean isDense = flags.contains("dense"); - boolean isSuper = flags.contains("super"); - boolean isCompound = flags.contains("compound"); - isCompactStorage = isSuper || isDense || !isCompound; - boolean isStaticCompact = !isSuper && !isDense && !isCompound; - if (isStaticCompact) { - RawColumn.pruneStaticCompactTableColumns(rawColumns); - } else if (isDense) { - RawColumn.pruneDenseTableColumnsV3(rawColumns); - } - } else { - boolean isDense = tableRow.getBoolean("is_dense"); - if (isDense) { - RawColumn.pruneDenseTableColumnsV2(rawColumns); - } - DataTypeClassNameCompositeParser.ParseResult comparator = - new DataTypeClassNameCompositeParser() - .parseWithComposite(tableRow.getString("comparator"), keyspaceId, userTypes, context); - isCompactStorage = isDense || !comparator.isComposite; - } - - Collections.sort(rawColumns); - ImmutableMap.Builder allColumnsBuilder = ImmutableMap.builder(); - ImmutableList.Builder partitionKeyBuilder = ImmutableList.builder(); - ImmutableMap.Builder clusteringColumnsBuilder = - ImmutableMap.builder(); - ImmutableMap.Builder indexesBuilder = ImmutableMap.builder(); - - for (RawColumn raw : rawColumns) { - DataType dataType = rows.dataTypeParser().parse(keyspaceId, raw.dataType, userTypes, context); - DseColumnMetadata column = - new DefaultDseColumnMetadata( - keyspaceId, tableId, raw.name, dataType, raw.kind.equals(RawColumn.KIND_STATIC)); - switch (raw.kind) { - case RawColumn.KIND_PARTITION_KEY: - partitionKeyBuilder.add(column); - break; - case RawColumn.KIND_CLUSTERING_COLUMN: - clusteringColumnsBuilder.put( - column, raw.reversed ? ClusteringOrder.DESC : ClusteringOrder.ASC); - break; - default: - // nothing to do - } - allColumnsBuilder.put(column.getName(), column); - - DseIndexMetadata index = buildLegacyIndex(raw, column); - if (index != null) { - indexesBuilder.put(index.getName(), index); - } - } - - Map options; - try { - options = parseOptions(tableRow); - } catch (Exception e) { - // Options change the most often, so be especially lenient if anything goes wrong. - Loggers.warnWithException( - LOG, - "[{}] Error while parsing options for {}.{}, getOptions() will be empty", - logPrefix, - keyspaceId, - tableId, - e); - options = Collections.emptyMap(); - } - - Collection indexRows = - rows.indexes().getOrDefault(keyspaceId, ImmutableMultimap.of()).get(tableId); - for (AdminRow indexRow : indexRows) { - DseIndexMetadata index = buildModernIndex(keyspaceId, tableId, indexRow); - indexesBuilder.put(index.getName(), index); - } - - return new DefaultDseTableMetadata( - keyspaceId, - tableId, - uuid, - isCompactStorage, - false, - partitionKeyBuilder.build(), - clusteringColumnsBuilder.build(), - allColumnsBuilder.build(), - options, - indexesBuilder.build(), - buildVertex(tableId, vertices), - buildEdge(tableId, edges, vertices)); - } - - DseTableMetadata parseVirtualTable(AdminRow tableRow, CqlIdentifier keyspaceId) { - - CqlIdentifier tableId = CqlIdentifier.fromInternal(tableRow.getString("table_name")); - - List rawColumns = - RawColumn.toRawColumns( - rows.virtualColumns().getOrDefault(keyspaceId, ImmutableMultimap.of()).get(tableId)); - if (rawColumns.isEmpty()) { - LOG.warn( - "[{}] Processing TABLE refresh for {}.{} but found no matching rows, skipping", - logPrefix, - keyspaceId, - tableId); - return null; - } - - Collections.sort(rawColumns); - ImmutableMap.Builder allColumnsBuilder = ImmutableMap.builder(); - ImmutableList.Builder partitionKeyBuilder = ImmutableList.builder(); - ImmutableMap.Builder clusteringColumnsBuilder = - ImmutableMap.builder(); - - for (RawColumn raw : rawColumns) { - DataType dataType = - rows.dataTypeParser().parse(keyspaceId, raw.dataType, Collections.emptyMap(), context); - DseColumnMetadata column = - new DefaultDseColumnMetadata( - keyspaceId, tableId, raw.name, dataType, raw.kind.equals(RawColumn.KIND_STATIC)); - switch (raw.kind) { - case RawColumn.KIND_PARTITION_KEY: - partitionKeyBuilder.add(column); - break; - case RawColumn.KIND_CLUSTERING_COLUMN: - clusteringColumnsBuilder.put( - column, raw.reversed ? ClusteringOrder.DESC : ClusteringOrder.ASC); - break; - default: - } - - allColumnsBuilder.put(column.getName(), column); - } - - return new DefaultDseTableMetadata( - keyspaceId, - tableId, - null, - false, - true, - partitionKeyBuilder.build(), - clusteringColumnsBuilder.build(), - allColumnsBuilder.build(), - Collections.emptyMap(), - Collections.emptyMap(), - null, - null); - } - - // In C*<=2.2, index information is stored alongside the column. - private DseIndexMetadata buildLegacyIndex(RawColumn raw, ColumnMetadata column) { - if (raw.indexName == null) { - return null; - } - return new DefaultDseIndexMetadata( - column.getKeyspace(), - column.getParent(), - CqlIdentifier.fromInternal(raw.indexName), - IndexKind.valueOf(raw.indexType), - buildLegacyIndexTarget(column, raw.indexOptions), - raw.indexOptions); - } - - private static String buildLegacyIndexTarget(ColumnMetadata column, Map options) { - String columnName = column.getName().asCql(true); - DataType columnType = column.getType(); - if (options.containsKey("index_keys")) { - return String.format("keys(%s)", columnName); - } - if (options.containsKey("index_keys_and_values")) { - return String.format("entries(%s)", columnName); - } - if ((columnType instanceof ListType && ((ListType) columnType).isFrozen()) - || (columnType instanceof SetType && ((SetType) columnType).isFrozen()) - || (columnType instanceof MapType && ((MapType) columnType).isFrozen())) { - return String.format("full(%s)", columnName); - } - // Note: the keyword 'values' is not accepted as a valid index target function until 3.0 - return columnName; - } - - // In C*>=3.0, index information is stored in a dedicated table: - // CREATE TABLE system_schema.indexes ( - // keyspace_name text, - // table_name text, - // index_name text, - // kind text, - // options frozen>, - // PRIMARY KEY (keyspace_name, table_name, index_name) - // ) WITH CLUSTERING ORDER BY (table_name ASC, index_name ASC) - private DseIndexMetadata buildModernIndex( - CqlIdentifier keyspaceId, CqlIdentifier tableId, AdminRow row) { - CqlIdentifier name = CqlIdentifier.fromInternal(row.getString("index_name")); - IndexKind kind = IndexKind.valueOf(row.getString("kind")); - Map options = row.getMapOfStringToString("options"); - String target = options.get("target"); - return new DefaultDseIndexMetadata(keyspaceId, tableId, name, kind, target, options); - } - - private DseVertexMetadata buildVertex( - CqlIdentifier tableId, Multimap keyspaceVertices) { - - if (keyspaceVertices == null) { - return null; - } - Collection tableVertices = keyspaceVertices.get(tableId); - if (tableVertices == null || tableVertices.isEmpty()) { - return null; - } - - AdminRow row = tableVertices.iterator().next(); - return new DefaultDseVertexMetadata(getLabel(row)); - } - - private DseEdgeMetadata buildEdge( - CqlIdentifier tableId, - Multimap keyspaceEdges, - Multimap keyspaceVertices) { - - if (keyspaceEdges == null) { - return null; - } - - Collection tableEdges = keyspaceEdges.get(tableId); - if (tableEdges == null || tableEdges.isEmpty()) { - return null; - } - - AdminRow row = tableEdges.iterator().next(); - - CqlIdentifier fromTable = CqlIdentifier.fromInternal(row.getString("from_table")); - - CqlIdentifier toTable = CqlIdentifier.fromInternal(row.getString("to_table")); - - return new DefaultDseEdgeMetadata( - getLabel(row), - fromTable, - findVertexLabel(fromTable, keyspaceVertices, "incoming"), - CqlIdentifiers.wrapInternal(row.getListOfString("from_partition_key_columns")), - CqlIdentifiers.wrapInternal(row.getListOfString("from_clustering_columns")), - toTable, - findVertexLabel(toTable, keyspaceVertices, "outgoing"), - CqlIdentifiers.wrapInternal(row.getListOfString("to_partition_key_columns")), - CqlIdentifiers.wrapInternal(row.getListOfString("to_clustering_columns"))); - } - - private CqlIdentifier getLabel(AdminRow row) { - String rawLabel = row.getString("label_name"); - return (rawLabel == null || rawLabel.isEmpty()) ? null : CqlIdentifier.fromInternal(rawLabel); - } - - // system_schema.edges only contains vertex table names. We also expose the labels in our metadata - // objects, so we need to look them up in system_schema.vertices. - private CqlIdentifier findVertexLabel( - CqlIdentifier table, - Multimap keyspaceVertices, - String directionForErrorMessage) { - Collection tableVertices = - (keyspaceVertices == null) ? null : keyspaceVertices.get(table); - if (tableVertices == null || tableVertices.isEmpty()) { - throw new IllegalArgumentException( - String.format( - "Missing vertex definition for %s table %s", - directionForErrorMessage, table.asCql(true))); - } - - AdminRow row = tableVertices.iterator().next(); - return getLabel(row); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/parsing/DseViewParser.java b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/parsing/DseViewParser.java deleted file mode 100644 index 07a1e2b5c39..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/parsing/DseViewParser.java +++ /dev/null @@ -1,157 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.metadata.schema.parsing; - -import com.datastax.dse.driver.api.core.metadata.schema.DseColumnMetadata; -import com.datastax.dse.driver.api.core.metadata.schema.DseViewMetadata; -import com.datastax.dse.driver.internal.core.metadata.schema.DefaultDseColumnMetadata; -import com.datastax.dse.driver.internal.core.metadata.schema.DefaultDseViewMetadata; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.ClusteringOrder; -import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.RawColumn; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.RelationParser; -import com.datastax.oss.driver.internal.core.metadata.schema.queries.SchemaRows; -import com.datastax.oss.driver.internal.core.util.Loggers; -import com.datastax.oss.driver.shaded.guava.common.base.MoreObjects; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMultimap; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -public class DseViewParser extends RelationParser { - - private static final Logger LOG = LoggerFactory.getLogger(DseViewParser.class); - - public DseViewParser(SchemaRows rows, InternalDriverContext context) { - super(rows, context); - } - - public DseViewMetadata parseView( - AdminRow viewRow, CqlIdentifier keyspaceId, Map userTypes) { - // Cassandra 3.0 (no views in earlier versions): - // CREATE TABLE system_schema.views ( - // keyspace_name text, - // view_name text, - // base_table_id uuid, - // base_table_name text, - // bloom_filter_fp_chance double, - // caching frozen>, - // cdc boolean, - // comment text, - // compaction frozen>, - // compression frozen>, - // crc_check_chance double, - // dclocal_read_repair_chance double, - // default_time_to_live int, - // extensions frozen>, - // gc_grace_seconds int, - // id uuid, - // include_all_columns boolean, - // max_index_interval int, - // memtable_flush_period_in_ms int, - // min_index_interval int, - // read_repair_chance double, - // speculative_retry text, - // where_clause text, - // PRIMARY KEY (keyspace_name, view_name) - // ) WITH CLUSTERING ORDER BY (view_name ASC) - CqlIdentifier viewId = CqlIdentifier.fromInternal(viewRow.getString("view_name")); - - UUID uuid = viewRow.getUuid("id"); - CqlIdentifier baseTableId = CqlIdentifier.fromInternal(viewRow.getString("base_table_name")); - boolean includesAllColumns = - MoreObjects.firstNonNull(viewRow.getBoolean("include_all_columns"), false); - String whereClause = viewRow.getString("where_clause"); - - List rawColumns = - RawColumn.toRawColumns( - rows.columns().getOrDefault(keyspaceId, ImmutableMultimap.of()).get(viewId)); - if (rawColumns.isEmpty()) { - LOG.warn( - "[{}] Processing VIEW refresh for {}.{} but found no matching rows, skipping", - logPrefix, - keyspaceId, - viewId); - return null; - } - - Collections.sort(rawColumns); - ImmutableMap.Builder allColumnsBuilder = ImmutableMap.builder(); - ImmutableList.Builder partitionKeyBuilder = ImmutableList.builder(); - ImmutableMap.Builder clusteringColumnsBuilder = - ImmutableMap.builder(); - - for (RawColumn raw : rawColumns) { - DataType dataType = rows.dataTypeParser().parse(keyspaceId, raw.dataType, userTypes, context); - DseColumnMetadata column = - new DefaultDseColumnMetadata( - keyspaceId, viewId, raw.name, dataType, raw.kind.equals(RawColumn.KIND_STATIC)); - switch (raw.kind) { - case RawColumn.KIND_PARTITION_KEY: - partitionKeyBuilder.add(column); - break; - case RawColumn.KIND_CLUSTERING_COLUMN: - clusteringColumnsBuilder.put( - column, raw.reversed ? ClusteringOrder.DESC : ClusteringOrder.ASC); - break; - default: - // nothing to do - } - allColumnsBuilder.put(column.getName(), column); - } - - Map options; - try { - options = parseOptions(viewRow); - } catch (Exception e) { - // Options change the most often, so be especially lenient if anything goes wrong. - Loggers.warnWithException( - LOG, - "[{}] Error while parsing options for {}.{}, getOptions() will be empty", - logPrefix, - keyspaceId, - viewId, - e); - options = Collections.emptyMap(); - } - - return new DefaultDseViewMetadata( - keyspaceId, - viewId, - baseTableId, - includesAllColumns, - whereClause, - uuid, - partitionKeyBuilder.build(), - clusteringColumnsBuilder.build(), - allColumnsBuilder.build(), - options); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/protocol/TinkerpopBufferPrimitiveCodec.java b/core/src/main/java/com/datastax/dse/driver/internal/core/protocol/TinkerpopBufferPrimitiveCodec.java deleted file mode 100644 index 13238519e06..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/protocol/TinkerpopBufferPrimitiveCodec.java +++ /dev/null @@ -1,292 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.protocol; - -import com.datastax.dse.driver.internal.core.graph.binary.buffer.DseNettyBufferFactory; -import com.datastax.oss.driver.internal.core.protocol.ByteBufPrimitiveCodec; -import com.datastax.oss.driver.shaded.guava.common.base.Charsets; -import com.datastax.oss.protocol.internal.PrimitiveCodec; -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.nio.ByteBuffer; -import java.util.Arrays; -import java.util.zip.CRC32; -import org.apache.tinkerpop.gremlin.structure.io.Buffer; - -/** - * Minimal implementation of {@link PrimitiveCodec} for Tinkerpop {@link Buffer} instances. - * - *

This approach represents a temporary design compromise. PrimitiveCodec is primarily used for - * handling data directly from Netty, a task satisfied by {@link ByteBufPrimitiveCodec}. But - * PrimitiveCodec is also used to implement graph serialization for some of the "dynamic" types - * (notably UDTs and tuples). Since we're converting graph serialization to use the new Tinkerpop - * Buffer API we need just enough of a PrimitiveCodec impl to satisfy the needs of graph - * serialization... and nothing more. - * - *

A more explicit approach would be to change graph serialization to use a different interface, - * some kind of subset of PrimitiveCodec.... and then make PrimitiveCodec extend this interface. - * This is left as future work for now since it involves changes to the native-protocol lib(s). - */ -public class TinkerpopBufferPrimitiveCodec implements PrimitiveCodec { - - private final DseNettyBufferFactory factory; - - public TinkerpopBufferPrimitiveCodec(DseNettyBufferFactory factory) { - this.factory = factory; - } - - @Override - public Buffer allocate(int size) { - // Note: we use io() here to match up to what ByteBufPrimitiveCodec does, but be warned that - // ByteBufs created in this way don't support the array() method used elsewhere in this codec - // (readString() specifically). As such usage of this method to create Buffer instances is - // discouraged; we have a factory for that. - return this.factory.io(size, size); - } - - @Override - public void release(Buffer toRelease) { - toRelease.release(); - } - - @Override - public int sizeOf(Buffer toMeasure) { - return toMeasure.readableBytes(); - } - - // TODO - @Override - public Buffer concat(Buffer left, Buffer right) { - boolean leftReadable = left.readableBytes() > 0; - boolean rightReadable = right.readableBytes() > 0; - if (!(leftReadable || rightReadable)) { - return factory.heap(); - } - if (!leftReadable) { - return right; - } - if (!rightReadable) { - return left; - } - Buffer rv = factory.composite(left, right); - // c.readerIndex() is 0, which is the first readable byte in left - rv.writerIndex( - left.writerIndex() - left.readerIndex() + right.writerIndex() - right.readerIndex()); - return rv; - } - - @Override - public void markReaderIndex(Buffer source) { - throw new UnsupportedOperationException(); - } - - @Override - public void resetReaderIndex(Buffer source) { - throw new UnsupportedOperationException(); - } - - @Override - public byte readByte(Buffer source) { - return source.readByte(); - } - - @Override - public int readInt(Buffer source) { - return source.readInt(); - } - - @Override - public int readInt(Buffer source, int offset) { - throw new UnsupportedOperationException(); - } - - @Override - public InetAddress readInetAddr(Buffer source) { - int length = readByte(source) & 0xFF; - byte[] bytes = new byte[length]; - source.readBytes(bytes); - return newInetAddress(bytes); - } - - @Override - public long readLong(Buffer source) { - return source.readLong(); - } - - @Override - public int readUnsignedShort(Buffer source) { - return source.readShort() & 0xFFFF; - } - - @Override - public ByteBuffer readBytes(Buffer source) { - int length = readInt(source); - if (length < 0) return null; - return source.nioBuffer(source.readerIndex(), length); - } - - @Override - public byte[] readShortBytes(Buffer source) { - try { - int length = readUnsignedShort(source); - byte[] bytes = new byte[length]; - source.readBytes(bytes); - return bytes; - } catch (IndexOutOfBoundsException e) { - throw new IllegalArgumentException( - "Not enough bytes to read a byte array preceded by its 2 bytes length"); - } - } - - // Copy of PrimitiveCodec impl - @Override - public String readString(Buffer source) { - int length = readUnsignedShort(source); - return readString(source, length); - } - - @Override - public String readLongString(Buffer source) { - int length = readInt(source); - return readString(source, length); - } - - @Override - public Buffer readRetainedSlice(Buffer source, int sliceLength) { - throw new UnsupportedOperationException(); - } - - @Override - public void updateCrc(Buffer source, CRC32 crc) { - throw new UnsupportedOperationException(); - } - - @Override - public void writeByte(byte b, Buffer dest) { - dest.writeByte(b); - } - - @Override - public void writeInt(int i, Buffer dest) { - dest.writeInt(i); - } - - @Override - public void writeInetAddr(InetAddress address, Buffer dest) { - byte[] bytes = address.getAddress(); - writeByte((byte) bytes.length, dest); - dest.writeBytes(bytes); - } - - @Override - public void writeLong(long l, Buffer dest) { - dest.writeLong(l); - } - - @Override - public void writeUnsignedShort(int i, Buffer dest) { - dest.writeShort(i); - } - - // Copy of PrimitiveCodec impl - @Override - public void writeString(String s, Buffer dest) { - - byte[] bytes = s.getBytes(Charsets.UTF_8); - writeUnsignedShort(bytes.length, dest); - dest.writeBytes(bytes); - } - - @Override - public void writeLongString(String s, Buffer dest) { - byte[] bytes = s.getBytes(Charsets.UTF_8); - writeInt(bytes.length, dest); - dest.writeBytes(bytes); - } - - @Override - public void writeBytes(ByteBuffer bytes, Buffer dest) { - if (bytes == null) { - writeInt(-1, dest); - } else { - writeInt(bytes.remaining(), dest); - dest.writeBytes(bytes.duplicate()); - } - } - - @Override - public void writeBytes(byte[] bytes, Buffer dest) { - if (bytes == null) { - writeInt(-1, dest); - } else { - writeInt(bytes.length, dest); - dest.writeBytes(bytes); - } - } - - @Override - public void writeShortBytes(byte[] bytes, Buffer dest) { - writeUnsignedShort(bytes.length, dest); - dest.writeBytes(bytes); - } - - // Based on PrimitiveCodec impl, although that method leverages some - // Netty built-ins which we have to do manually here - private static String readString(Buffer buff, int length) { - try { - - // Basically what io.netty.buffer.ByteBufUtil.decodeString() does minus some extra - // ByteBuf-specific ops - int offset; - byte[] bytes; - ByteBuffer byteBuff = buff.nioBuffer(); - if (byteBuff.hasArray()) { - - bytes = byteBuff.array(); - offset = byteBuff.arrayOffset(); - } else { - - bytes = new byte[length]; - byteBuff.get(bytes, 0, length); - offset = 0; - } - - String str = new String(bytes, offset, length, Charsets.UTF_8); - - // Ops against the NIO buffers don't impact the read/write indexes for he Buffer - // itself so we have to do that manually - buff.readerIndex(buff.readerIndex() + length); - return str; - } catch (IndexOutOfBoundsException e) { - throw new IllegalArgumentException( - "Not enough bytes to read an UTF-8 serialized string of size " + length, e); - } - } - - // TODO: Code below copied directly from ByteBufPrimitiveCodec, probably want to consolidate this - // somewhere - private static InetAddress newInetAddress(byte[] bytes) { - try { - return InetAddress.getByAddress(bytes); - } catch (UnknownHostException e) { - // Per the Javadoc, the only way this can happen is if the length is illegal - throw new IllegalArgumentException( - String.format("Invalid address length: %d (%s)", bytes.length, Arrays.toString(bytes))); - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/search/DateRangeUtil.java b/core/src/main/java/com/datastax/dse/driver/internal/core/search/DateRangeUtil.java deleted file mode 100644 index 15e278260c5..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/search/DateRangeUtil.java +++ /dev/null @@ -1,230 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.search; - -import com.datastax.dse.driver.api.core.data.time.DateRangePrecision; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.text.ParseException; -import java.time.LocalDateTime; -import java.time.ZoneOffset; -import java.time.ZonedDateTime; -import java.time.temporal.ChronoField; -import java.time.temporal.ChronoUnit; -import java.time.temporal.TemporalAdjusters; -import java.util.Calendar; -import java.util.Locale; -import java.util.Map; -import java.util.TimeZone; - -public class DateRangeUtil { - - /** Sets all the fields smaller than the given unit to their lowest possible value. */ - @NonNull - public static ZonedDateTime roundDown(@NonNull ZonedDateTime date, @NonNull ChronoUnit unit) { - switch (unit) { - case YEARS: - return date.with(TemporalAdjusters.firstDayOfYear()).truncatedTo(ChronoUnit.DAYS); - case MONTHS: - return date.with(TemporalAdjusters.firstDayOfMonth()).truncatedTo(ChronoUnit.DAYS); - case DAYS: - case HOURS: - case MINUTES: - case SECONDS: - case MILLIS: - return date.truncatedTo(unit); - default: - throw new IllegalArgumentException("Unsupported unit for rounding: " + unit); - } - } - - /** Sets all the fields smaller than the given unit to their highest possible value. */ - @NonNull - public static ZonedDateTime roundUp(@NonNull ZonedDateTime date, @NonNull ChronoUnit unit) { - return roundDown(date, unit) - .plus(1, unit) - // Even though ZDT has nanosecond-precision, DSE only rounds to millisecond precision so be - // consistent with that - .minus(1, ChronoUnit.MILLIS); - } - - /** - * Parses the given string as a date in a range bound. - * - *

This method deliberately uses legacy time APIs, in order to be as close as possible to the - * server-side parsing logic. We want the client to behave exactly like the server, i.e. parsing a - * date locally and inlining it in a CQL query should always yield the same result as binding the - * date as a value. - */ - public static Calendar parseCalendar(String source) throws ParseException { - // The contents of this method are based on Lucene's DateRangePrefixTree#parseCalendar, released - // under the Apache License, Version 2.0. - // Following is the original notice from that file: - - // Licensed to the Apache Software Foundation (ASF) under one or more - // contributor license agreements. See the NOTICE file distributed with - // this work for additional information regarding copyright ownership. - // The ASF licenses this file to You under the Apache License, Version 2.0 - // (the "License"); you may not use this file except in compliance with - // the License. You may obtain a copy of the License at - // - // http://www.apache.org/licenses/LICENSE-2.0 - // - // Unless required by applicable law or agreed to in writing, software - // distributed under the License is distributed on an "AS IS" BASIS, - // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - // See the License for the specific language governing permissions and - // limitations under the License. - - if (source == null || source.isEmpty()) { - throw new IllegalArgumentException("Can't parse a null or blank string"); - } - - Calendar calendar = newCalendar(); - if (source.equals("*")) { - return calendar; - } - int offset = 0; // a pointer - try { - // year & era: - int lastOffset = - (source.charAt(source.length() - 1) == 'Z') ? source.length() - 1 : source.length(); - int hyphenIdx = source.indexOf('-', 1); // look past possible leading hyphen - if (hyphenIdx < 0) { - hyphenIdx = lastOffset; - } - int year = Integer.parseInt(source.substring(offset, hyphenIdx)); - calendar.set(Calendar.ERA, year <= 0 ? 0 : 1); - calendar.set(Calendar.YEAR, year <= 0 ? -1 * year + 1 : year); - offset = hyphenIdx + 1; - if (lastOffset < offset) { - return calendar; - } - - // NOTE: We aren't validating separator chars, and we unintentionally accept leading +/-. - // The str.substring()'s hopefully get optimized to be stack-allocated. - - // month: - calendar.set( - Calendar.MONTH, - Integer.parseInt(source.substring(offset, offset + 2)) - 1); // starts at 0 - offset += 3; - if (lastOffset < offset) { - return calendar; - } - // day: - calendar.set(Calendar.DAY_OF_MONTH, Integer.parseInt(source.substring(offset, offset + 2))); - offset += 3; - if (lastOffset < offset) { - return calendar; - } - // hour: - calendar.set(Calendar.HOUR_OF_DAY, Integer.parseInt(source.substring(offset, offset + 2))); - offset += 3; - if (lastOffset < offset) { - return calendar; - } - // minute: - calendar.set(Calendar.MINUTE, Integer.parseInt(source.substring(offset, offset + 2))); - offset += 3; - if (lastOffset < offset) { - return calendar; - } - // second: - calendar.set(Calendar.SECOND, Integer.parseInt(source.substring(offset, offset + 2))); - offset += 3; - if (lastOffset < offset) { - return calendar; - } - // ms: - calendar.set(Calendar.MILLISECOND, Integer.parseInt(source.substring(offset, offset + 3))); - offset += 3; // last one, move to next char - if (lastOffset == offset) { - return calendar; - } - } catch (Exception e) { - ParseException pe = new ParseException("Improperly formatted date: " + source, offset); - pe.initCause(e); - throw pe; - } - throw new ParseException("Improperly formatted date: " + source, offset); - } - - private static Calendar newCalendar() { - Calendar calendar = Calendar.getInstance(UTC, Locale.ROOT); - calendar.clear(); - return calendar; - } - - private static final TimeZone UTC = TimeZone.getTimeZone("UTC"); - - /** - * Returns the precision of a calendar obtained through {@link #parseCalendar(String)}, or {@code - * null} if no field was set. - */ - @Nullable - public static DateRangePrecision getPrecision(Calendar calendar) { - DateRangePrecision lastPrecision = null; - for (Map.Entry entry : FIELD_BY_PRECISION.entrySet()) { - DateRangePrecision precision = entry.getKey(); - int field = entry.getValue(); - if (calendar.isSet(field)) { - lastPrecision = precision; - } else { - break; - } - } - return lastPrecision; - } - - // Note: this could be a field on DateRangePrecision, but it's only used within this class so it's - // better not to expose it. - private static final ImmutableMap FIELD_BY_PRECISION = - ImmutableMap.builder() - .put(DateRangePrecision.YEAR, Calendar.YEAR) - .put(DateRangePrecision.MONTH, Calendar.MONTH) - .put(DateRangePrecision.DAY, Calendar.DAY_OF_MONTH) - .put(DateRangePrecision.HOUR, Calendar.HOUR_OF_DAY) - .put(DateRangePrecision.MINUTE, Calendar.MINUTE) - .put(DateRangePrecision.SECOND, Calendar.SECOND) - .put(DateRangePrecision.MILLISECOND, Calendar.MILLISECOND) - .build(); - - public static ZonedDateTime toZonedDateTime(Calendar calendar) { - int year = calendar.get(Calendar.YEAR); - if (calendar.get(Calendar.ERA) == 0) { - // BC era; 1 BC == 0 AD, 0 BD == -1 AD, etc - year -= 1; - if (year > 0) { - year = -year; - } - } - LocalDateTime localDateTime = - LocalDateTime.of( - year, - calendar.get(Calendar.MONTH) + 1, - calendar.get(Calendar.DAY_OF_MONTH), - calendar.get(Calendar.HOUR_OF_DAY), - calendar.get(Calendar.MINUTE), - calendar.get(Calendar.SECOND)); - localDateTime = - localDateTime.with(ChronoField.MILLI_OF_SECOND, calendar.get(Calendar.MILLISECOND)); - return ZonedDateTime.of(localDateTime, ZoneOffset.UTC); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/session/DefaultDseSession.java b/core/src/main/java/com/datastax/dse/driver/internal/core/session/DefaultDseSession.java deleted file mode 100644 index 183f385aa4a..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/session/DefaultDseSession.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.session; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.internal.core.session.SessionWrapper; -import net.jcip.annotations.ThreadSafe; - -/** - * @deprecated DSE functionality is now exposed directly on {@link CqlSession}. This class is - * preserved for backward compatibility, but {@link DefaultSession} should be used instead. - */ -@ThreadSafe -@Deprecated -public class DefaultDseSession extends SessionWrapper - implements com.datastax.dse.driver.api.core.DseSession { - - public DefaultDseSession(Session delegate) { - super(delegate); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/DseTypeCodecsRegistrar.java b/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/DseTypeCodecsRegistrar.java deleted file mode 100644 index 55da2a9475f..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/DseTypeCodecsRegistrar.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.type.codec; - -import static com.datastax.oss.driver.internal.core.util.Dependency.ESRI; - -import com.datastax.dse.driver.api.core.type.codec.DseTypeCodecs; -import com.datastax.oss.driver.api.core.type.codec.registry.MutableCodecRegistry; -import com.datastax.oss.driver.internal.core.util.DefaultDependencyChecker; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class DseTypeCodecsRegistrar { - - private static final Logger LOG = LoggerFactory.getLogger(DseTypeCodecsRegistrar.class); - - public static void registerDseCodecs(MutableCodecRegistry registry) { - registry.register(DseTypeCodecs.DATE_RANGE); - if (DefaultDependencyChecker.isPresent(ESRI)) { - registry.register(DseTypeCodecs.LINE_STRING, DseTypeCodecs.POINT, DseTypeCodecs.POLYGON); - } else { - LOG.debug("ESRI was not found on the classpath: geo codecs will not be available"); - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/DseTypeCodecsRegistrarSubstitutions.java b/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/DseTypeCodecsRegistrarSubstitutions.java deleted file mode 100644 index afd8d6cf9f6..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/DseTypeCodecsRegistrarSubstitutions.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.type.codec; - -import static com.datastax.oss.driver.internal.core.util.Dependency.ESRI; - -import com.datastax.dse.driver.api.core.type.codec.DseTypeCodecs; -import com.datastax.oss.driver.api.core.type.codec.registry.MutableCodecRegistry; -import com.datastax.oss.driver.internal.core.util.GraalDependencyChecker; -import com.oracle.svm.core.annotate.Substitute; -import com.oracle.svm.core.annotate.TargetClass; -import java.util.function.BooleanSupplier; - -@SuppressWarnings("unused") -public class DseTypeCodecsRegistrarSubstitutions { - - @TargetClass(value = DseTypeCodecsRegistrar.class, onlyWith = EsriMissing.class) - public static final class DseTypeCodecsRegistrarEsriMissing { - - @Substitute - public static void registerDseCodecs(MutableCodecRegistry registry) { - registry.register(DseTypeCodecs.DATE_RANGE); - } - } - - public static class EsriMissing implements BooleanSupplier { - @Override - public boolean getAsBoolean() { - return !GraalDependencyChecker.isPresent(ESRI); - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/geometry/GeometryCodec.java b/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/geometry/GeometryCodec.java deleted file mode 100644 index f6309bc1860..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/geometry/GeometryCodec.java +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.type.codec.geometry; - -import static com.datastax.oss.driver.internal.core.util.Strings.isQuoted; - -import com.datastax.dse.driver.api.core.data.geometry.Geometry; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.internal.core.util.Strings; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import net.jcip.annotations.ThreadSafe; - -/** Base class for geospatial type codecs. */ -@ThreadSafe -public abstract class GeometryCodec implements TypeCodec { - - @Nullable - @Override - public T decode(@Nullable ByteBuffer bb, @NonNull ProtocolVersion protocolVersion) { - return bb == null || bb.remaining() == 0 ? null : fromWellKnownBinary(bb.slice()); - } - - @Nullable - @Override - public ByteBuffer encode(@Nullable T geometry, @NonNull ProtocolVersion protocolVersion) { - return geometry == null ? null : toWellKnownBinary(geometry); - } - - @Nullable - @Override - public T parse(@Nullable String s) { - if (s == null) { - return null; - } - s = s.trim(); - if (s.isEmpty() || s.equalsIgnoreCase("NULL")) { - return null; - } - if (!isQuoted(s)) { - throw new IllegalArgumentException("Geometry values must be enclosed by single quotes"); - } - return fromWellKnownText(Strings.unquote(s)); - } - - @NonNull - @Override - public String format(@Nullable T geometry) throws IllegalArgumentException { - return geometry == null ? "NULL" : Strings.quote(toWellKnownText(geometry)); - } - - /** - * Creates an instance of this codec's geospatial type from its Well-known Text (WKT) representation. - * - * @param source the Well-known Text representation to parse. Cannot be null. - * @return A new instance of this codec's geospatial type. - * @throws IllegalArgumentException if the string does not contain a valid Well-known Text - * representation. - */ - @NonNull - protected abstract T fromWellKnownText(@NonNull String source); - - /** - * Creates an instance of a geospatial type from its Well-known Binary - * (WKB) representation. - * - * @param bb the Well-known Binary representation to parse. Cannot be null. - * @return A new instance of this codec's geospatial type. - * @throws IllegalArgumentException if the given {@link ByteBuffer} does not contain a valid - * Well-known Binary representation. - */ - @NonNull - protected abstract T fromWellKnownBinary(@NonNull ByteBuffer bb); - - /** - * Returns a Well-known Text (WKT) - * representation of the given geospatial object. - * - * @param geometry the geospatial object to convert. Cannot be null. - * @return A Well-known Text representation of the given object. - */ - @NonNull - protected abstract String toWellKnownText(@NonNull T geometry); - - /** - * Returns a Well-known - * Binary (WKB) representation of the given geospatial object. - * - * @param geometry the geospatial object to convert. Cannot be null. - * @return A Well-known Binary representation of the given object. - */ - @NonNull - protected abstract ByteBuffer toWellKnownBinary(@NonNull T geometry); -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/geometry/LineStringCodec.java b/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/geometry/LineStringCodec.java deleted file mode 100644 index bbec99a4103..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/geometry/LineStringCodec.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.type.codec.geometry; - -import com.datastax.dse.driver.api.core.data.geometry.LineString; -import com.datastax.dse.driver.api.core.type.DseDataTypes; -import com.datastax.dse.driver.internal.core.data.geometry.DefaultGeometry; -import com.datastax.dse.driver.internal.core.data.geometry.DefaultLineString; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.esri.core.geometry.ogc.OGCLineString; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import net.jcip.annotations.ThreadSafe; - -/** - * A custom type codec to use {@link LineString} instances in driver. - * - *

If you use {@link com.datastax.dse.driver.api.core.DseSessionBuilder} to build your cluster, - * it will automatically register this codec. - */ -@ThreadSafe -public class LineStringCodec extends GeometryCodec { - - private static final GenericType JAVA_TYPE = GenericType.of(LineString.class); - - @NonNull - @Override - public GenericType getJavaType() { - return JAVA_TYPE; - } - - @NonNull - @Override - protected LineString fromWellKnownText(@NonNull String source) { - return new DefaultLineString(DefaultGeometry.fromOgcWellKnownText(source, OGCLineString.class)); - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return javaClass == LineString.class; - } - - @Override - public boolean accepts(@NonNull Object value) { - return value instanceof LineString; - } - - @NonNull - @Override - protected LineString fromWellKnownBinary(@NonNull ByteBuffer bb) { - return new DefaultLineString(DefaultGeometry.fromOgcWellKnownBinary(bb, OGCLineString.class)); - } - - @NonNull - @Override - protected String toWellKnownText(@NonNull LineString geometry) { - return geometry.asWellKnownText(); - } - - @NonNull - @Override - protected ByteBuffer toWellKnownBinary(@NonNull LineString geometry) { - return geometry.asWellKnownBinary(); - } - - @NonNull - @Override - public DataType getCqlType() { - return DseDataTypes.LINE_STRING; - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/geometry/PointCodec.java b/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/geometry/PointCodec.java deleted file mode 100644 index 5ebae64cbab..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/geometry/PointCodec.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.type.codec.geometry; - -import com.datastax.dse.driver.api.core.data.geometry.Point; -import com.datastax.dse.driver.api.core.type.DseDataTypes; -import com.datastax.dse.driver.internal.core.data.geometry.DefaultGeometry; -import com.datastax.dse.driver.internal.core.data.geometry.DefaultPoint; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.esri.core.geometry.ogc.OGCPoint; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import net.jcip.annotations.ThreadSafe; - -/** - * A custom type codec to use {@link Point} instances in the driver. - * - *

If you use {@link com.datastax.dse.driver.api.core.DseSessionBuilder} to build your cluster, - * it will automatically register this codec. - */ -@ThreadSafe -public class PointCodec extends GeometryCodec { - - private static final GenericType JAVA_TYPE = GenericType.of(Point.class); - - @NonNull - @Override - public GenericType getJavaType() { - return JAVA_TYPE; - } - - @NonNull - @Override - public DataType getCqlType() { - return DseDataTypes.POINT; - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return javaClass == Point.class; - } - - @Override - public boolean accepts(@NonNull Object value) { - return value instanceof Point; - } - - @NonNull - @Override - protected String toWellKnownText(@NonNull Point geometry) { - return geometry.asWellKnownText(); - } - - @NonNull - @Override - protected ByteBuffer toWellKnownBinary(@NonNull Point geometry) { - return geometry.asWellKnownBinary(); - } - - @NonNull - @Override - protected Point fromWellKnownText(@NonNull String source) { - return new DefaultPoint(DefaultGeometry.fromOgcWellKnownText(source, OGCPoint.class)); - } - - @NonNull - @Override - protected Point fromWellKnownBinary(@NonNull ByteBuffer source) { - return new DefaultPoint(DefaultGeometry.fromOgcWellKnownBinary(source, OGCPoint.class)); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/geometry/PolygonCodec.java b/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/geometry/PolygonCodec.java deleted file mode 100644 index 00a070a4b4a..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/geometry/PolygonCodec.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.type.codec.geometry; - -import com.datastax.dse.driver.api.core.data.geometry.Polygon; -import com.datastax.dse.driver.api.core.type.DseDataTypes; -import com.datastax.dse.driver.internal.core.data.geometry.DefaultGeometry; -import com.datastax.dse.driver.internal.core.data.geometry.DefaultPolygon; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.esri.core.geometry.ogc.OGCPolygon; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import net.jcip.annotations.ThreadSafe; - -/** - * A custom type codec to use {@link Polygon} instances in the driver. - * - *

If you use {@link com.datastax.dse.driver.api.core.DseSessionBuilder} to build your cluster, - * it will automatically register this codec. - */ -@ThreadSafe -public class PolygonCodec extends GeometryCodec { - - private static final GenericType JAVA_TYPE = GenericType.of(Polygon.class); - - @NonNull - @Override - public GenericType getJavaType() { - return JAVA_TYPE; - } - - @NonNull - @Override - public DataType getCqlType() { - return DseDataTypes.POLYGON; - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return javaClass == Polygon.class; - } - - @Override - public boolean accepts(@NonNull Object value) { - return value instanceof Polygon; - } - - @NonNull - @Override - protected Polygon fromWellKnownText(@NonNull String source) { - return new DefaultPolygon(DefaultGeometry.fromOgcWellKnownText(source, OGCPolygon.class)); - } - - @NonNull - @Override - protected Polygon fromWellKnownBinary(@NonNull ByteBuffer bb) { - return new DefaultPolygon(DefaultGeometry.fromOgcWellKnownBinary(bb, OGCPolygon.class)); - } - - @NonNull - @Override - protected String toWellKnownText(@NonNull Polygon geometry) { - return geometry.asWellKnownText(); - } - - @NonNull - @Override - protected ByteBuffer toWellKnownBinary(@NonNull Polygon geometry) { - return geometry.asWellKnownBinary(); - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/time/DateRangeCodec.java b/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/time/DateRangeCodec.java deleted file mode 100644 index e8a23e88848..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/time/DateRangeCodec.java +++ /dev/null @@ -1,188 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.type.codec.time; - -import com.datastax.dse.driver.api.core.data.time.DateRange; -import com.datastax.dse.driver.api.core.data.time.DateRangeBound; -import com.datastax.dse.driver.api.core.data.time.DateRangePrecision; -import com.datastax.dse.driver.api.core.type.DseDataTypes; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.util.Strings; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.text.ParseException; -import java.time.Instant; -import java.time.ZoneOffset; -import java.time.ZonedDateTime; -import java.util.Optional; - -public class DateRangeCodec implements TypeCodec { - - private static final GenericType JAVA_TYPE = GenericType.of(DateRange.class); - private static final DataType CQL_TYPE = DseDataTypes.DATE_RANGE; - - // e.g. [2001-01-01] - private static final byte DATE_RANGE_TYPE_SINGLE_DATE = 0x00; - // e.g. [2001-01-01 TO 2001-01-31] - private static final byte DATE_RANGE_TYPE_CLOSED_RANGE = 0x01; - // e.g. [2001-01-01 TO *] - private static final byte DATE_RANGE_TYPE_OPEN_RANGE_HIGH = 0x02; - // e.g. [* TO 2001-01-01] - private static final byte DATE_RANGE_TYPE_OPEN_RANGE_LOW = 0x03; - // [* TO *] - private static final byte DATE_RANGE_TYPE_BOTH_OPEN_RANGE = 0x04; - // * - private static final byte DATE_RANGE_TYPE_SINGLE_DATE_OPEN = 0x05; - - @NonNull - @Override - public GenericType getJavaType() { - return JAVA_TYPE; - } - - @NonNull - @Override - public DataType getCqlType() { - return CQL_TYPE; - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return javaClass == DateRange.class; - } - - @Nullable - @Override - public ByteBuffer encode( - @Nullable DateRange dateRange, @NonNull ProtocolVersion protocolVersion) { - if (dateRange == null) { - return null; - } - byte rangeType = encodeType(dateRange); - int bufferSize = 1; - DateRangeBound lowerBound = dateRange.getLowerBound(); - Optional maybeUpperBound = dateRange.getUpperBound(); - bufferSize += lowerBound.isUnbounded() ? 0 : 9; - bufferSize += maybeUpperBound.map(upperBound -> upperBound.isUnbounded() ? 0 : 9).orElse(0); - ByteBuffer buffer = ByteBuffer.allocate(bufferSize); - buffer.put(rangeType); - if (!lowerBound.isUnbounded()) { - put(buffer, lowerBound); - } - maybeUpperBound.ifPresent( - upperBound -> { - if (!upperBound.isUnbounded()) { - put(buffer, upperBound); - } - }); - return (ByteBuffer) buffer.flip(); - } - - private static byte encodeType(DateRange dateRange) { - if (dateRange.isSingleBounded()) { - return dateRange.getLowerBound().isUnbounded() - ? DATE_RANGE_TYPE_SINGLE_DATE_OPEN - : DATE_RANGE_TYPE_SINGLE_DATE; - } else { - DateRangeBound upperBound = - dateRange - .getUpperBound() - .orElseThrow( - () -> - new IllegalStateException("Upper bound should be set if !isSingleBounded()")); - if (dateRange.getLowerBound().isUnbounded()) { - return upperBound.isUnbounded() - ? DATE_RANGE_TYPE_BOTH_OPEN_RANGE - : DATE_RANGE_TYPE_OPEN_RANGE_LOW; - } else { - return upperBound.isUnbounded() - ? DATE_RANGE_TYPE_OPEN_RANGE_HIGH - : DATE_RANGE_TYPE_CLOSED_RANGE; - } - } - } - - private static void put(ByteBuffer buffer, DateRangeBound bound) { - buffer.putLong(bound.getTimestamp().toInstant().toEpochMilli()); - buffer.put(bound.getPrecision().getEncoding()); - } - - @Nullable - @Override - public DateRange decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) { - return null; - } - byte type = bytes.get(); - switch (type) { - case DATE_RANGE_TYPE_SINGLE_DATE: - return new DateRange(decodeLowerBound(bytes)); - case DATE_RANGE_TYPE_CLOSED_RANGE: - return new DateRange(decodeLowerBound(bytes), decodeUpperBound(bytes)); - case DATE_RANGE_TYPE_OPEN_RANGE_HIGH: - return new DateRange(decodeLowerBound(bytes), DateRangeBound.UNBOUNDED); - case DATE_RANGE_TYPE_OPEN_RANGE_LOW: - return new DateRange(DateRangeBound.UNBOUNDED, decodeUpperBound(bytes)); - case DATE_RANGE_TYPE_BOTH_OPEN_RANGE: - return new DateRange(DateRangeBound.UNBOUNDED, DateRangeBound.UNBOUNDED); - case DATE_RANGE_TYPE_SINGLE_DATE_OPEN: - return new DateRange(DateRangeBound.UNBOUNDED); - default: - throw new IllegalArgumentException("Unknown date range type: " + type); - } - } - - private static DateRangeBound decodeLowerBound(ByteBuffer bytes) { - long epochMilli = bytes.getLong(); - ZonedDateTime timestamp = - ZonedDateTime.ofInstant(Instant.ofEpochMilli(epochMilli), ZoneOffset.UTC); - DateRangePrecision precision = DateRangePrecision.fromEncoding(bytes.get()); - return DateRangeBound.lowerBound(timestamp, precision); - } - - private static DateRangeBound decodeUpperBound(ByteBuffer bytes) { - long epochMilli = bytes.getLong(); - ZonedDateTime timestamp = - ZonedDateTime.ofInstant(Instant.ofEpochMilli(epochMilli), ZoneOffset.UTC); - DateRangePrecision precision = DateRangePrecision.fromEncoding(bytes.get()); - return DateRangeBound.upperBound(timestamp, precision); - } - - @NonNull - @Override - public String format(@Nullable DateRange dateRange) { - return (dateRange == null) ? "NULL" : Strings.quote(dateRange.toString()); - } - - @Nullable - @Override - public DateRange parse(@Nullable String value) { - if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) { - return null; - } - try { - return DateRange.parse(Strings.unquote(value)); - } catch (ParseException e) { - throw new IllegalArgumentException(String.format("Invalid date range literal: %s", value), e); - } - } -} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/util/concurrent/BoundedConcurrentQueue.java b/core/src/main/java/com/datastax/dse/driver/internal/core/util/concurrent/BoundedConcurrentQueue.java deleted file mode 100644 index ea9ccd7d622..00000000000 --- a/core/src/main/java/com/datastax/dse/driver/internal/core/util/concurrent/BoundedConcurrentQueue.java +++ /dev/null @@ -1,134 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.util.concurrent; - -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Deque; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.ConcurrentLinkedDeque; -import java.util.concurrent.atomic.AtomicReference; - -/** - * A concurrent queue with a limited size. - * - *

Once the queue is full, the insertion of the next element is delayed until space becomes - * available again; in the meantime, additional insertions are not allowed (in other words, there - * can be at most one "pending" element waiting on a full queue). - */ -public class BoundedConcurrentQueue { - - private final Deque elements = new ConcurrentLinkedDeque<>(); - private final AtomicReference state; - - public BoundedConcurrentQueue(int maxSize) { - this.state = new AtomicReference<>(new State(maxSize)); - } - - /** - * @return a stage that completes when the element is inserted. If there was still space in the - * queue, it will be already complete; if the queue was full, it will complete at a later - * point in time (triggered by a call to {@link #poll()}). This method must not be invoked - * again until the stage has completed. - * @throws IllegalStateException if the method is invoked before the stage returned by the - * previous call has completed. - */ - @NonNull - public CompletionStage offer(@NonNull ElementT element) { - while (true) { - State oldState = state.get(); - State newState = oldState.increment(); - if (state.compareAndSet(oldState, newState)) { - if (newState.spaceAvailable != null) { - return newState.spaceAvailable.thenApply( - (aVoid) -> { - elements.offer(element); - return element; - }); - } else { - elements.offer(element); - return CompletableFuture.completedFuture(element); - } - } - } - } - - @Nullable - public ElementT poll() { - while (true) { - State oldState = state.get(); - if (oldState.size == 0) { - return null; - } - State newState = oldState.decrement(); - if (state.compareAndSet(oldState, newState)) { - if (oldState.spaceAvailable != null) { - oldState.spaceAvailable.complete(null); - } - return elements.poll(); - } - } - } - - @Nullable - public ElementT peek() { - return elements.peek(); - } - - /** - * Note that this does not complete a pending call to {@link #offer(Object)}. We only use this - * method for terminal states where we want to dereference the contained elements. - */ - public void clear() { - elements.clear(); - } - - private static class State { - - private final int maxSize; - - final int size; // Number of elements in the queue, + 1 if one is waiting to get in - final CompletableFuture spaceAvailable; // Not null iff size == maxSize + 1 - - State(int maxSize) { - this(0, null, maxSize); - } - - private State(int size, CompletableFuture spaceAvailable, int maxSize) { - this.maxSize = maxSize; - this.size = size; - this.spaceAvailable = spaceAvailable; - } - - State increment() { - if (size > maxSize) { - throw new IllegalStateException( - "Can't call offer() until the stage returned by the previous offer() call has completed"); - } - int newSize = size + 1; - CompletableFuture newFuture = - (newSize == maxSize + 1) ? new CompletableFuture<>() : null; - return new State(newSize, newFuture, maxSize); - } - - State decrement() { - return new State(size - 1, null, maxSize); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/AllNodesFailedException.java b/core/src/main/java/com/datastax/oss/driver/api/core/AllNodesFailedException.java deleted file mode 100644 index b6f1bf93838..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/AllNodesFailedException.java +++ /dev/null @@ -1,187 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.Iterables; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; - -/** - * Thrown when a query failed on all the coordinators it was tried on. This exception may wrap - * multiple errors, that are available either as {@linkplain #getSuppressed() suppressed - * exceptions}, or via {@link #getAllErrors()} where they are grouped by node. - */ -public class AllNodesFailedException extends DriverException { - - /** @deprecated Use {@link #fromErrors(List)} instead. */ - @NonNull - @Deprecated - public static AllNodesFailedException fromErrors(@Nullable Map errors) { - if (errors == null || errors.isEmpty()) { - return new NoNodeAvailableException(); - } else { - return new AllNodesFailedException(groupByNode(errors)); - } - } - - @NonNull - public static AllNodesFailedException fromErrors(@Nullable List> errors) { - if (errors == null || errors.isEmpty()) { - return new NoNodeAvailableException(); - } else { - return new AllNodesFailedException(groupByNode(errors)); - } - } - - private final Map> errors; - - /** @deprecated Use {@link #AllNodesFailedException(String, ExecutionInfo, Iterable)} instead. */ - @Deprecated - protected AllNodesFailedException( - @NonNull String message, - @Nullable ExecutionInfo executionInfo, - @NonNull Map errors) { - super(message, executionInfo, null, true); - this.errors = toDeepImmutableMap(groupByNode(errors)); - addSuppressedErrors(); - } - - protected AllNodesFailedException( - @NonNull String message, - @Nullable ExecutionInfo executionInfo, - @NonNull Iterable>> errors) { - super(message, executionInfo, null, true); - this.errors = toDeepImmutableMap(errors); - addSuppressedErrors(); - } - - private void addSuppressedErrors() { - for (List errors : this.errors.values()) { - for (Throwable error : errors) { - addSuppressed(error); - } - } - } - - private AllNodesFailedException(Map> errors) { - this( - buildMessage( - String.format("All %d node(s) tried for the query failed", errors.size()), errors), - null, - errors.entrySet()); - } - - private static String buildMessage(String baseMessage, Map> errors) { - int limit = Math.min(errors.size(), 3); - Iterator>> iterator = - Iterables.limit(errors.entrySet(), limit).iterator(); - StringBuilder details = new StringBuilder(); - while (iterator.hasNext()) { - Entry> entry = iterator.next(); - details.append(entry.getKey()).append(": ").append(entry.getValue()); - if (iterator.hasNext()) { - details.append(", "); - } - } - return String.format( - "%s (showing first %d nodes, use getAllErrors() for more): %s", - baseMessage, limit, details); - } - - /** - * An immutable map containing the first error on each tried node. - * - * @deprecated Use {@link #getAllErrors()} instead. - */ - @NonNull - @Deprecated - public Map getErrors() { - ImmutableMap.Builder builder = ImmutableMap.builder(); - for (Node node : errors.keySet()) { - List nodeErrors = errors.get(node); - if (!nodeErrors.isEmpty()) { - builder.put(node, nodeErrors.get(0)); - } - } - return builder.build(); - } - - /** An immutable map containing all errors on each tried node. */ - @NonNull - public Map> getAllErrors() { - return errors; - } - - @NonNull - @Override - public DriverException copy() { - return new AllNodesFailedException(getMessage(), getExecutionInfo(), errors.entrySet()); - } - - @NonNull - public AllNodesFailedException reword(String newMessage) { - return new AllNodesFailedException( - buildMessage(newMessage, errors), getExecutionInfo(), errors.entrySet()); - } - - private static Map> groupByNode(Map errors) { - return groupByNode(errors.entrySet()); - } - - private static Map> groupByNode(Iterable> errors) { - // no need for immutable collections here - Map> map = new LinkedHashMap<>(); - for (Entry entry : errors) { - Node node = entry.getKey(); - Throwable error = entry.getValue(); - map.compute( - node, - (k, v) -> { - if (v == null) { - v = new ArrayList<>(); - } - v.add(error); - return v; - }); - } - return map; - } - - private static Map> toDeepImmutableMap(Map> errors) { - return toDeepImmutableMap(errors.entrySet()); - } - - private static Map> toDeepImmutableMap( - Iterable>> errors) { - ImmutableMap.Builder> builder = ImmutableMap.builder(); - for (Entry> entry : errors) { - builder.put(entry.getKey(), ImmutableList.copyOf(entry.getValue())); - } - return builder.build(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/AsyncAutoCloseable.java b/core/src/main/java/com/datastax/oss/driver/api/core/AsyncAutoCloseable.java deleted file mode 100644 index 7f8cafbc895..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/AsyncAutoCloseable.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import com.datastax.oss.driver.internal.core.util.concurrent.BlockingOperation; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.concurrent.CompletionStage; - -/** - * An object that can be closed in an asynchronous, non-blocking manner. - * - *

For convenience, this extends the JDK's {@code AutoCloseable} in order to be usable in - * try-with-resource blocks (in that case, the blocking {@link #close()} will be used). - */ -public interface AsyncAutoCloseable extends AutoCloseable { - - /** - * Returns a stage that will complete when {@link #close()} or {@link #forceCloseAsync()} is - * called, and the shutdown sequence completes. - */ - @NonNull - CompletionStage closeFuture(); - - /** - * Whether shutdown has completed. - * - *

This is a shortcut for {@code closeFuture().toCompletableFuture().isDone()}. - */ - default boolean isClosed() { - return closeFuture().toCompletableFuture().isDone(); - } - - /** - * Initiates an orderly shutdown: no new requests are accepted, but all pending requests are - * allowed to complete normally. - * - * @return a stage that will complete when the shutdown sequence is complete. Multiple calls to - * this method or {@link #forceCloseAsync()} always return the same instance. - */ - @NonNull - CompletionStage closeAsync(); - - /** - * Initiates a forced shutdown of this instance: no new requests are accepted, and all pending - * requests will complete with an exception. - * - * @return a stage that will complete when the shutdown sequence is complete. Multiple calls to - * this method or {@link #close()} always return the same instance. - */ - @NonNull - CompletionStage forceCloseAsync(); - - /** - * {@inheritDoc} - * - *

This method is implemented by calling {@link #closeAsync()} and blocking on the result. This - * should not be called on a driver thread. - */ - @Override - default void close() { - BlockingOperation.checkNotDriverThread(); - CompletableFutures.getUninterruptibly(closeAsync().toCompletableFuture()); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/AsyncPagingIterable.java b/core/src/main/java/com/datastax/oss/driver/api/core/AsyncPagingIterable.java deleted file mode 100644 index fd7c5be6baa..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/AsyncPagingIterable.java +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.internal.core.AsyncPagingIterableWrapper; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Iterator; -import java.util.concurrent.CompletionStage; -import java.util.function.Function; - -/** - * An iterable of elements which are fetched asynchronously by the driver, possibly in multiple - * requests. - */ -public interface AsyncPagingIterable> { - - /** Metadata about the columns returned by the CQL request that was used to build this result. */ - @NonNull - ColumnDefinitions getColumnDefinitions(); - - /** Returns {@linkplain ExecutionInfo information about the execution} of this page of results. */ - @NonNull - ExecutionInfo getExecutionInfo(); - - /** How many rows are left before the current page is exhausted. */ - int remaining(); - - /** - * The elements in the current page. To keep iterating beyond that, use {@link #hasMorePages()} - * and {@link #fetchNextPage()}. - * - *

Note that this method always returns the same object, and that that object can only be - * iterated once: elements are "consumed" as they are read. - */ - @NonNull - Iterable currentPage(); - - /** - * Returns the next element, or {@code null} if the results are exhausted. - * - *

This is convenient for queries that are known to return exactly one element, for example - * count queries. - */ - @Nullable - default ElementT one() { - Iterator iterator = currentPage().iterator(); - return iterator.hasNext() ? iterator.next() : null; - } - - /** - * Whether there are more pages of results. If so, call {@link #fetchNextPage()} to fetch the next - * one asynchronously. - */ - boolean hasMorePages(); - - /** - * Fetch the next page of results asynchronously. - * - * @throws IllegalStateException if there are no more pages. Use {@link #hasMorePages()} to check - * if you can call this method. - */ - @NonNull - CompletionStage fetchNextPage() throws IllegalStateException; - - /** - * If the query that produced this result was a CQL conditional update, indicate whether it was - * successfully applied. - * - *

For consistency, this method always returns {@code true} for non-conditional queries - * (although there is no reason to call the method in that case). This is also the case for - * conditional DDL statements ({@code CREATE KEYSPACE... IF NOT EXISTS}, {@code CREATE TABLE... IF - * NOT EXISTS}), for which Cassandra doesn't return an {@code [applied]} column. - * - *

Note that, for versions of Cassandra strictly lower than 2.1.0-rc2, a server-side bug (CASSANDRA-7337) causes this - * method to always return {@code true} for batches containing conditional queries. - */ - boolean wasApplied(); - - /** - * Creates a new instance by transforming each element of this iterable with the provided - * function. - * - *

Note that both instances share the same underlying data: consuming elements from the - * transformed iterable will also consume them from this object, and vice-versa. - */ - default MappedAsyncPagingIterable map( - Function elementMapper) { - return new AsyncPagingIterableWrapper<>(this, elementMapper); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/ConsistencyLevel.java b/core/src/main/java/com/datastax/oss/driver/api/core/ConsistencyLevel.java deleted file mode 100644 index a1b6d8006df..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/ConsistencyLevel.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * The consistency level of a request. - * - *

The only reason to model this as an interface (as opposed to an enum type) is to accommodate - * for custom protocol extensions. If you're connecting to a standard Apache Cassandra cluster, all - * {@code ConsistencyLevel}s are {@link DefaultConsistencyLevel} instances. - */ -public interface ConsistencyLevel { - - ConsistencyLevel ANY = DefaultConsistencyLevel.ANY; - ConsistencyLevel ONE = DefaultConsistencyLevel.ONE; - ConsistencyLevel TWO = DefaultConsistencyLevel.TWO; - ConsistencyLevel THREE = DefaultConsistencyLevel.THREE; - ConsistencyLevel QUORUM = DefaultConsistencyLevel.QUORUM; - ConsistencyLevel ALL = DefaultConsistencyLevel.ALL; - ConsistencyLevel LOCAL_ONE = DefaultConsistencyLevel.LOCAL_ONE; - ConsistencyLevel LOCAL_QUORUM = DefaultConsistencyLevel.LOCAL_QUORUM; - ConsistencyLevel EACH_QUORUM = DefaultConsistencyLevel.EACH_QUORUM; - ConsistencyLevel SERIAL = DefaultConsistencyLevel.SERIAL; - ConsistencyLevel LOCAL_SERIAL = DefaultConsistencyLevel.LOCAL_SERIAL; - - /** The numerical value that the level is encoded to in protocol frames. */ - int getProtocolCode(); - - /** The textual representation of the level in configuration files. */ - @NonNull - String name(); - - /** Whether this consistency level applies to the local datacenter only. */ - boolean isDcLocal(); - - /** - * Whether this consistency level is serial, that is, applies only to the "paxos" phase of a lightweight - * transaction. - * - *

Serial consistency levels are only meaningful when executing conditional updates ({@code - * INSERT}, {@code UPDATE} or {@code DELETE} statements with an {@code IF} condition). - */ - boolean isSerial(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/CqlIdentifier.java b/core/src/main/java/com/datastax/oss/driver/api/core/CqlIdentifier.java deleted file mode 100644 index 82e4c2b30a6..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/CqlIdentifier.java +++ /dev/null @@ -1,155 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import com.datastax.oss.driver.internal.core.util.Strings; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.IOException; -import java.io.ObjectInputStream; -import java.io.Serializable; -import java.util.Locale; -import net.jcip.annotations.Immutable; - -/** - * The identifier of CQL element (keyspace, table, column, etc). - * - *

It has two representations: - * - *

    - *
  • the "CQL" form, which is how you would type the identifier in a CQL query. It is - * case-insensitive unless enclosed in double quotation marks; in addition, identifiers that - * contain special characters (anything other than alphanumeric and underscore), or match CQL - * keywords, must be double-quoted (with inner double quotes escaped as {@code ""}). - *
  • the "internal" form, which is how the name is stored in Cassandra system tables. It is - * lower-case for case-sensitive identifiers, and in the exact case for case-sensitive - * identifiers. - *
- * - * Examples: - * - * - * - * - * - * - * - * - * - *
Create statementCase-sensitive?CQL idInternal id
CREATE TABLE t(foo int PRIMARY KEY)Nofoofoo
CREATE TABLE t(Foo int PRIMARY KEY)Nofoofoo
CREATE TABLE t("Foo" int PRIMARY KEY)Yes"Foo"Foo
CREATE TABLE t("foo bar" int PRIMARY KEY)Yes"foo bar"foo bar
CREATE TABLE t("foo""bar" int PRIMARY KEY)Yes"foo""bar"foo"bar
CREATE TABLE t("create" int PRIMARY KEY)Yes (reserved keyword)"create"create
- * - * This class provides a common representation and avoids any ambiguity about which form the - * identifier is in. Driver clients will generally want to create instances from the CQL form with - * {@link #fromCql(String)}. - * - *

There is no internal caching; if you reuse the same identifiers often, consider caching them - * in your application. - */ -@Immutable -public class CqlIdentifier implements Serializable { - - private static final long serialVersionUID = 1; - - // IMPLEMENTATION NOTES: - // This is used internally, and for all API methods where the overhead of requiring the client to - // create an instance is acceptable (metadata, statement.getKeyspace, etc.) - // One exception is named getters, where we keep raw strings with the 3.x rules. - - /** Creates an identifier from its {@link CqlIdentifier CQL form}. */ - @NonNull - public static CqlIdentifier fromCql(@NonNull String cql) { - Preconditions.checkNotNull(cql, "cql must not be null"); - final String internal; - if (Strings.isDoubleQuoted(cql)) { - internal = Strings.unDoubleQuote(cql); - } else { - internal = cql.toLowerCase(Locale.ROOT); - Preconditions.checkArgument( - !Strings.needsDoubleQuotes(internal), "Invalid CQL form [%s]: needs double quotes", cql); - } - return fromInternal(internal); - } - - /** Creates an identifier from its {@link CqlIdentifier internal form}. */ - @NonNull - public static CqlIdentifier fromInternal(@NonNull String internal) { - Preconditions.checkNotNull(internal, "internal must not be null"); - return new CqlIdentifier(internal); - } - - /** @serial */ - private final String internal; - - private CqlIdentifier(String internal) { - this.internal = internal; - } - - /** - * Returns the identifier in the "internal" format. - * - * @return the identifier in its exact case, unquoted. - */ - @NonNull - public String asInternal() { - return this.internal; - } - - /** - * Returns the identifier in a format appropriate for concatenation in a CQL query. - * - * @param pretty if {@code true}, use the shortest possible representation: if the identifier is - * case-insensitive, an unquoted, lower-case string, otherwise the double-quoted form. If - * {@code false}, always use the double-quoted form (this is slightly more efficient since we - * don't need to inspect the string). - */ - @NonNull - public String asCql(boolean pretty) { - if (pretty) { - return Strings.needsDoubleQuotes(internal) ? Strings.doubleQuote(internal) : internal; - } else { - return Strings.doubleQuote(internal); - } - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof CqlIdentifier) { - CqlIdentifier that = (CqlIdentifier) other; - return this.internal.equals(that.internal); - } else { - return false; - } - } - - @Override - public int hashCode() { - return internal.hashCode(); - } - - @Override - public String toString() { - return internal; - } - - private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { - in.defaultReadObject(); - Preconditions.checkNotNull(internal, "internal must not be null"); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/CqlSession.java b/core/src/main/java/com/datastax/oss/driver/api/core/CqlSession.java deleted file mode 100644 index ff096719f3e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/CqlSession.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import com.datastax.dse.driver.api.core.cql.continuous.ContinuousSession; -import com.datastax.dse.driver.api.core.cql.continuous.reactive.ContinuousReactiveSession; -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveSession; -import com.datastax.dse.driver.api.core.graph.GraphSession; -import com.datastax.dse.driver.api.core.graph.reactive.ReactiveGraphSession; -import com.datastax.oss.driver.api.core.cql.AsyncCqlSession; -import com.datastax.oss.driver.api.core.cql.SyncCqlSession; -import com.datastax.oss.driver.api.core.session.Session; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * The default session type built by the driver. - * - *

It provides user-friendly execution methods for: - * - *

    - *
  • CQL requests: synchronous, asynchronous or reactive mode; - *
  • requests specific to DataStax Enterprise: graph and continuous paging. - *
- * - * Client applications can use this interface even if they don't need all the features. In - * particular, it can be used with a regular Apache Cassandra ® cluster, as long as you don't - * call any of the DSE-specific execute methods. If you're in that situation, you might also want to - * exclude certain dependencies from your classpath (see the "Integration" page in the user manual). - * - *

Note that the name "CQL session" is no longer really accurate since this interface can now - * execute other request types; but it was preserved for backward compatibility with previous driver - * versions. - */ -public interface CqlSession - extends Session, - SyncCqlSession, - AsyncCqlSession, - ReactiveSession, - ContinuousSession, - GraphSession, - ContinuousReactiveSession, - ReactiveGraphSession { - - /** - * Returns a builder to create a new instance. - * - *

Note that this builder is mutable and not thread-safe. - * - * @return {@code CqlSessionBuilder} to create a new instance. - */ - @NonNull - static CqlSessionBuilder builder() { - return new CqlSessionBuilder(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/CqlSessionBuilder.java b/core/src/main/java/com/datastax/oss/driver/api/core/CqlSessionBuilder.java deleted file mode 100644 index 4598c078dca..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/CqlSessionBuilder.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import com.datastax.oss.driver.api.core.session.SessionBuilder; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.NotThreadSafe; - -/** - * Helper class to build a {@link CqlSession} instance. - * - *

This class is mutable and not thread-safe. - */ -@NotThreadSafe -public class CqlSessionBuilder extends SessionBuilder { - - @Override - protected CqlSession wrap(@NonNull CqlSession defaultSession) { - return defaultSession; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/DefaultConsistencyLevel.java b/core/src/main/java/com/datastax/oss/driver/api/core/DefaultConsistencyLevel.java deleted file mode 100644 index 2e5a4a6f022..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/DefaultConsistencyLevel.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; - -/** A default consistency level supported by the driver out of the box. */ -public enum DefaultConsistencyLevel implements ConsistencyLevel { - ANY(ProtocolConstants.ConsistencyLevel.ANY), - ONE(ProtocolConstants.ConsistencyLevel.ONE), - TWO(ProtocolConstants.ConsistencyLevel.TWO), - THREE(ProtocolConstants.ConsistencyLevel.THREE), - QUORUM(ProtocolConstants.ConsistencyLevel.QUORUM), - ALL(ProtocolConstants.ConsistencyLevel.ALL), - LOCAL_ONE(ProtocolConstants.ConsistencyLevel.LOCAL_ONE), - LOCAL_QUORUM(ProtocolConstants.ConsistencyLevel.LOCAL_QUORUM), - EACH_QUORUM(ProtocolConstants.ConsistencyLevel.EACH_QUORUM), - - SERIAL(ProtocolConstants.ConsistencyLevel.SERIAL), - LOCAL_SERIAL(ProtocolConstants.ConsistencyLevel.LOCAL_SERIAL), - ; - // Note that, for the sake of convenience, we also expose shortcuts to these constants on the - // ConsistencyLevel interface. If you add a new enum constant, remember to update the interface as - // well. - - private final int protocolCode; - - DefaultConsistencyLevel(int protocolCode) { - this.protocolCode = protocolCode; - } - - @Override - public int getProtocolCode() { - return protocolCode; - } - - @NonNull - public static DefaultConsistencyLevel fromCode(int code) { - DefaultConsistencyLevel level = BY_CODE.get(code); - if (level == null) { - throw new IllegalArgumentException("Unknown code: " + code); - } - return level; - } - - @Override - public boolean isDcLocal() { - return this == LOCAL_ONE || this == LOCAL_QUORUM || this == LOCAL_SERIAL; - } - - @Override - public boolean isSerial() { - return this == SERIAL || this == LOCAL_SERIAL; - } - - private static final Map BY_CODE = mapByCode(values()); - - private static Map mapByCode(DefaultConsistencyLevel[] levels) { - ImmutableMap.Builder builder = ImmutableMap.builder(); - for (DefaultConsistencyLevel level : levels) { - builder.put(level.protocolCode, level); - } - return builder.build(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/DefaultProtocolVersion.java b/core/src/main/java/com/datastax/oss/driver/api/core/DefaultProtocolVersion.java deleted file mode 100644 index 91b45fc506a..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/DefaultProtocolVersion.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import com.datastax.oss.protocol.internal.ProtocolConstants; - -/** - * A protocol version supported by default by the driver. - * - *

Legacy versions 1 (Cassandra 1.2) and 2 (Cassandra 2.0) are not supported anymore. - */ -public enum DefaultProtocolVersion implements ProtocolVersion { - - /** Version 3, supported by Cassandra 2.1 and above. */ - V3(ProtocolConstants.Version.V3, false), - - /** Version 4, supported by Cassandra 2.2 and above. */ - V4(ProtocolConstants.Version.V4, false), - - /** Version 5, supported by Cassandra 4.0 and above. */ - V5(ProtocolConstants.Version.V5, false), - - /** - * Version 6, currently supported as a beta preview in Cassandra 4.0 and above. - * - *

Do not use this in production. - * - * @see ProtocolVersion#isBeta() - */ - V6(ProtocolConstants.Version.V6, true), - ; - // Note that, for the sake of convenience, we also expose shortcuts to these constants on the - // ProtocolVersion interface. If you add a new enum constant, remember to update the interface as - // well. - - private final int code; - private final boolean beta; - - DefaultProtocolVersion(int code, boolean beta) { - this.code = code; - this.beta = beta; - } - - @Override - public int getCode() { - return code; - } - - @Override - public boolean isBeta() { - return beta; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/DriverException.java b/core/src/main/java/com/datastax/oss/driver/api/core/DriverException.java deleted file mode 100644 index f5cf76e29eb..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/DriverException.java +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** - * Base class for all exceptions thrown by the driver. - * - *

Note that, for obvious programming errors, the driver might throw JDK runtime exceptions, such - * as {@link IllegalArgumentException} or {@link IllegalStateException}. In all other cases, it will - * be an instance of this class. - * - *

One special case is when the driver tried multiple nodes to complete a request, and they all - * failed; the error returned to the client will be an {@link AllNodesFailedException}, which wraps - * a map of errors per node. - * - *

Some implementations make the stack trace not writable to improve performance (see {@link - * Throwable#Throwable(String, Throwable, boolean, boolean)}). This is only done when the exception - * is thrown in a small number of well-known cases, and the stack trace wouldn't add any useful - * information (for example, server error responses). Instances returned by {@link #copy()} always - * have a stack trace. - */ -public abstract class DriverException extends RuntimeException { - - private transient volatile ExecutionInfo executionInfo; - - protected DriverException( - @Nullable String message, - @Nullable ExecutionInfo executionInfo, - @Nullable Throwable cause, - boolean writableStackTrace) { - super(message, cause, true, writableStackTrace); - this.executionInfo = executionInfo; - } - - /** - * Returns execution information about the request that led to this error. - * - *

This is similar to the information returned for a successful query in {@link ResultSet}, - * except that some fields may be absent: - * - *

    - *
  • {@link ExecutionInfo#getCoordinator()} may be null if the error occurred before any node - * was contacted; - *
  • {@link ExecutionInfo#getErrors()} will contain the errors encountered for other nodes, - * but not this error itself; - *
  • {@link ExecutionInfo#getSuccessfulExecutionIndex()} may be -1 if the error occurred - * before any execution was started; - *
  • {@link ExecutionInfo#getPagingState()} and {@link ExecutionInfo#getTracingId()} will - * always be null; - *
  • {@link ExecutionInfo#getWarnings()} and {@link ExecutionInfo#getIncomingPayload()} will - * always be empty; - *
  • {@link ExecutionInfo#isSchemaInAgreement()} will always be true; - *
  • {@link ExecutionInfo#getResponseSizeInBytes()} and {@link - * ExecutionInfo#getCompressedResponseSizeInBytes()} will always be -1. - *
- * - *

Note that this is only set for exceptions that are rethrown directly to the client from a - * session call. For example, individual node errors stored in {@link - * AllNodesFailedException#getAllErrors()} or {@link ExecutionInfo#getErrors()} do not contain - * their own execution info, and therefore return null from this method. - * - *

This method will also return null for low-level exceptions thrown directly from a driver - * channel, such as {@link com.datastax.oss.driver.api.core.connection.ConnectionInitException} or - * {@link com.datastax.oss.driver.api.core.connection.ClosedConnectionException}. - * - *

It will also be null if you serialize and deserialize an exception. - */ - public ExecutionInfo getExecutionInfo() { - return executionInfo; - } - - /** This is for internal use by the driver, a client application has no reason to call it. */ - public void setExecutionInfo(ExecutionInfo executionInfo) { - this.executionInfo = executionInfo; - } - - /** - * Copy the exception. - * - *

This returns a new exception, equivalent to the original one, except that because a new - * object is created in the current thread, the top-most element in the stacktrace of the - * exception will refer to the current thread. The original exception may or may not be included - * as the copy's cause, depending on whether that is deemed useful (this is left to the discretion - * of each implementation). - * - *

This is intended for the synchronous wrapper methods of the driver, in order to produce a - * more user-friendly stack trace (that includes the line in the user code where the driver - * rethrew the error). - */ - @NonNull - public abstract DriverException copy(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/DriverExecutionException.java b/core/src/main/java/com/datastax/oss/driver/api/core/DriverExecutionException.java deleted file mode 100644 index 90ff875e375..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/DriverExecutionException.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.Statement; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * Thrown by synchronous wrapper methods (such as {@link CqlSession#execute(Statement)}, when the - * underlying future was completed with a checked exception. - * - *

This exception should be rarely thrown (if ever). Most of the time, the driver uses unchecked - * exceptions, which will be rethrown directly instead of being wrapped in this class. - */ -public class DriverExecutionException extends DriverException { - public DriverExecutionException(Throwable cause) { - this(null, cause); - } - - private DriverExecutionException(ExecutionInfo executionInfo, Throwable cause) { - super(null, executionInfo, cause, true); - } - - @NonNull - @Override - public DriverException copy() { - return new DriverExecutionException(getExecutionInfo(), getCause()); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/DriverTimeoutException.java b/core/src/main/java/com/datastax/oss/driver/api/core/DriverTimeoutException.java deleted file mode 100644 index 8b4cc5dc5bb..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/DriverTimeoutException.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** Thrown when a driver request timed out. */ -public class DriverTimeoutException extends DriverException { - public DriverTimeoutException(@NonNull String message) { - this(message, null); - } - - private DriverTimeoutException(String message, ExecutionInfo executionInfo) { - super(message, executionInfo, null, true); - } - - @NonNull - @Override - public DriverException copy() { - return new DriverTimeoutException(getMessage(), getExecutionInfo()); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/InvalidKeyspaceException.java b/core/src/main/java/com/datastax/oss/driver/api/core/InvalidKeyspaceException.java deleted file mode 100644 index aa3f774800c..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/InvalidKeyspaceException.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** Thrown when a session gets created with an invalid keyspace. */ -public class InvalidKeyspaceException extends DriverException { - public InvalidKeyspaceException(@NonNull String message) { - this(message, null); - } - - private InvalidKeyspaceException(String message, ExecutionInfo executionInfo) { - super(message, executionInfo, null, true); - } - - @NonNull - @Override - public DriverException copy() { - return new InvalidKeyspaceException(getMessage(), getExecutionInfo()); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/MappedAsyncPagingIterable.java b/core/src/main/java/com/datastax/oss/driver/api/core/MappedAsyncPagingIterable.java deleted file mode 100644 index b3902489a48..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/MappedAsyncPagingIterable.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import java.util.function.Function; - -/** The result of calling {@link #map(Function)} on another async iterable. */ -public interface MappedAsyncPagingIterable - extends AsyncPagingIterable> {} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/MavenCoordinates.java b/core/src/main/java/com/datastax/oss/driver/api/core/MavenCoordinates.java deleted file mode 100644 index 3c3f18a5dc2..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/MavenCoordinates.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface MavenCoordinates { - - @NonNull - String getGroupId(); - - @NonNull - String getArtifactId(); - - @NonNull - Version getVersion(); - - @NonNull - String getName(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/NoNodeAvailableException.java b/core/src/main/java/com/datastax/oss/driver/api/core/NoNodeAvailableException.java deleted file mode 100644 index 9ef51fb99b6..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/NoNodeAvailableException.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Collections; - -/** - * Specialization of {@code AllNodesFailedException} when no coordinators were tried. - * - *

This can happen if all nodes are down, or if all the contact points provided at startup were - * invalid. - */ -public class NoNodeAvailableException extends AllNodesFailedException { - public NoNodeAvailableException() { - this(null); - } - - private NoNodeAvailableException(ExecutionInfo executionInfo) { - super("No node was available to execute the query", executionInfo, Collections.emptySet()); - } - - @NonNull - @Override - public DriverException copy() { - return new NoNodeAvailableException(getExecutionInfo()); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/NodeUnavailableException.java b/core/src/main/java/com/datastax/oss/driver/api/core/NodeUnavailableException.java deleted file mode 100644 index 5303119844e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/NodeUnavailableException.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import com.datastax.oss.driver.api.core.metadata.Node; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Objects; - -/** - * Indicates that a {@link Node} was selected in a query plan, but it had no connection available. - * - *

A common reason to encounter this error is when the configured number of connections per node - * and requests per connection is not high enough to absorb the overall request rate. This can be - * mitigated by tuning the following options: - * - *

    - *
  • {@code advanced.connection.pool.local.size}; - *
  • {@code advanced.connection.pool.remote.size}; - *
  • {@code advanced.connection.max-requests-per-connection}. - *
- * - * See {@code reference.conf} for more details. - * - *

Another possibility is when you are trying to direct a request {@linkplain - * com.datastax.oss.driver.api.core.cql.Statement#setNode(Node) to a particular node}, but that node - * has no connections available. - */ -public class NodeUnavailableException extends DriverException { - - private final Node node; - - public NodeUnavailableException(Node node) { - super("No connection was available to " + node, null, null, true); - this.node = Objects.requireNonNull(node); - } - - @NonNull - public Node getNode() { - return node; - } - - @Override - @NonNull - public DriverException copy() { - return new NodeUnavailableException(node); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/PagingIterable.java b/core/src/main/java/com/datastax/oss/driver/api/core/PagingIterable.java deleted file mode 100644 index c2a81b554d0..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/PagingIterable.java +++ /dev/null @@ -1,186 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.internal.core.PagingIterableWrapper; -import com.datastax.oss.driver.internal.core.cql.PagingIterableSpliterator; -import com.datastax.oss.driver.shaded.guava.common.collect.Iterables; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; -import java.util.Spliterator; -import java.util.function.Function; - -/** - * An iterable of elements which are fetched synchronously by the driver, possibly in multiple - * requests. - * - *

It uses asynchronous calls internally, but blocks on the results in order to provide a - * synchronous API to its clients. If the query is paged, only the first page will be fetched - * initially, and iteration will trigger background fetches of the next pages when necessary. - * - *

Note that this object can only be iterated once: elements are "consumed" as they are read, - * subsequent calls to {@code iterator()} will return the same iterator instance. - * - *

Implementations of this type are not thread-safe. They can only be iterated by the - * thread that invoked {@code session.execute}. - * - *

This is a generalization of {@link ResultSet}, replacing rows by an arbitrary element type. - */ -public interface PagingIterable extends Iterable { - - /** Metadata about the columns returned by the CQL request that was used to build this result. */ - @NonNull - ColumnDefinitions getColumnDefinitions(); - - /** - * The execution information for the last query performed for this iterable. - * - *

This is a shortcut for: - * - *

-   * getExecutionInfos().get(getExecutionInfos().size() - 1)
-   * 
- * - * @see #getExecutionInfos() - */ - @NonNull - default ExecutionInfo getExecutionInfo() { - List infos = getExecutionInfos(); - return infos.get(infos.size() - 1); - } - - /** - * The execution information for all the queries that have been performed so far to assemble this - * iterable. - * - *

This will have multiple elements if the query is paged, since the driver performs blocking - * background queries to fetch additional pages transparently as the result set is being iterated. - */ - @NonNull - List getExecutionInfos(); - - /** - * Returns the next element, or {@code null} if the iterable is exhausted. - * - *

This is convenient for queries that are known to return exactly one row, for example count - * queries. - */ - @Nullable - default ElementT one() { - Iterator iterator = iterator(); - return iterator.hasNext() ? iterator.next() : null; - } - - /** - * Returns all the remaining elements as a list; not recommended for queries that return a - * large number of elements. - * - *

Contrary to {@link #iterator()} or successive calls to {@link #one()}, this method forces - * fetching the full contents at once; in particular, this means that a large number of - * background queries might have to be run, and that all the data will be held in memory locally. - * Therefore it is crucial to only call this method for queries that are known to return a - * reasonable number of results. - */ - @NonNull - @SuppressWarnings("MixedMutabilityReturnType") - default List all() { - if (!iterator().hasNext()) { - return Collections.emptyList(); - } - // We can't know the actual size in advance since more pages could be fetched, but we can at - // least allocate for what we already have. - List result = Lists.newArrayListWithExpectedSize(getAvailableWithoutFetching()); - Iterables.addAll(result, this); - return result; - } - - /** - * Whether all pages have been fetched from the database. - * - *

If this is {@code false}, it means that more blocking background queries will be triggered - * as iteration continues. - */ - boolean isFullyFetched(); - - /** - * The number of elements that can be returned from this result set before a blocking background - * query needs to be performed to retrieve more results. In other words, this is the number of - * elements remaining in the current page. - * - *

This is useful if you use the paging state to pause the iteration and resume it later: after - * you've retrieved the state ({@link ExecutionInfo#getPagingState() - * getExecutionInfo().getPagingState()}), call this method and iterate the remaining elements; - * that way you're not leaving a gap between the last element and the position you'll restart from - * when you reinject the state in a new query. - */ - int getAvailableWithoutFetching(); - - /** - * If the query that produced this result was a CQL conditional update, indicate whether it was - * successfully applied. - * - *

For consistency, this method always returns {@code true} for non-conditional queries - * (although there is no reason to call the method in that case). This is also the case for - * conditional DDL statements ({@code CREATE KEYSPACE... IF NOT EXISTS}, {@code CREATE TABLE... IF - * NOT EXISTS}), for which Cassandra doesn't return an {@code [applied]} column. - * - *

Note that, for versions of Cassandra strictly lower than 2.1.0-rc2, a server-side bug (CASSANDRA-7337) causes this - * method to always return {@code true} for batches containing conditional queries. - */ - boolean wasApplied(); - - /** - * Creates a new instance by transforming each element of this iterable with the provided - * function. - * - *

Note that both instances share the same underlying data: consuming elements from the - * transformed iterable will also consume them from this object, and vice-versa. - */ - @NonNull - default PagingIterable map( - Function elementMapper) { - return new PagingIterableWrapper<>(this, elementMapper); - } - - /** - * {@inheritDoc} - * - *

Default spliterators created by the driver will report the following characteristics: {@link - * Spliterator#ORDERED}, {@link Spliterator#IMMUTABLE}, {@link Spliterator#NONNULL}. Single-page - * result sets will also report {@link Spliterator#SIZED} and {@link Spliterator#SUBSIZED}, since - * the result set size is known. - * - *

This method should be called at most once. Spliterators share the same underlying data but - * do not support concurrent consumption; once a spliterator for this iterable is obtained, the - * iterable should not be consumed through calls to other methods such as {@link - * #iterator()}, {@link #one()} or {@link #all()}; doing so will result in unpredictable results. - */ - @NonNull - @Override - default Spliterator spliterator() { - return new PagingIterableSpliterator<>(this); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/ProtocolVersion.java b/core/src/main/java/com/datastax/oss/driver/api/core/ProtocolVersion.java deleted file mode 100644 index dd69f705453..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/ProtocolVersion.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import com.datastax.dse.driver.api.core.DseProtocolVersion; -import com.datastax.oss.driver.api.core.detach.Detachable; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * A version of the native protocol used by the driver to communicate with the server. - * - *

The only reason to model this as an interface (as opposed to an enum type) is to accommodate - * for custom protocol extensions. If you're connecting to a standard Apache Cassandra cluster, all - * {@code ProtocolVersion}s are {@link DefaultProtocolVersion} instances. - */ -public interface ProtocolVersion { - - ProtocolVersion V3 = DefaultProtocolVersion.V3; - ProtocolVersion V4 = DefaultProtocolVersion.V4; - ProtocolVersion V5 = DefaultProtocolVersion.V5; - ProtocolVersion V6 = DefaultProtocolVersion.V6; - ProtocolVersion DSE_V1 = DseProtocolVersion.DSE_V1; - ProtocolVersion DSE_V2 = DseProtocolVersion.DSE_V2; - - /** The default version used for {@link Detachable detached} objects. */ - // Implementation note: we can't use the ProtocolVersionRegistry here, this has to be a - // compile-time constant. - ProtocolVersion DEFAULT = DefaultProtocolVersion.V5; - - /** - * A numeric code that uniquely identifies the version (this is the code used in network frames). - */ - int getCode(); - - /** A string representation of the version. */ - @NonNull - String name(); - - /** - * Whether the protocol version is in a beta status. - * - *

Beta versions are intended for Cassandra development. They should not be used in a regular - * application, as beta features may break at any point. - */ - boolean isBeta(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/RequestThrottlingException.java b/core/src/main/java/com/datastax/oss/driver/api/core/RequestThrottlingException.java deleted file mode 100644 index acf569d55f6..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/RequestThrottlingException.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * Thrown if the session uses a request throttler, and it didn't allow the current request to - * execute. - * - *

This can happen either when the session is overloaded, or at shutdown for requests that had - * been enqueued. - */ -public class RequestThrottlingException extends DriverException { - - public RequestThrottlingException(@NonNull String message) { - this(message, null); - } - - private RequestThrottlingException(String message, ExecutionInfo executionInfo) { - super(message, executionInfo, null, true); - } - - @NonNull - @Override - public DriverException copy() { - return new RequestThrottlingException(getMessage(), getExecutionInfo()); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/UnsupportedProtocolVersionException.java b/core/src/main/java/com/datastax/oss/driver/api/core/UnsupportedProtocolVersionException.java deleted file mode 100644 index 030984dc274..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/UnsupportedProtocolVersionException.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Collections; -import java.util.List; - -/** - * Indicates that we've attempted to connect to a Cassandra node with a protocol version that it - * cannot handle (e.g., connecting to a C* 2.1 node with protocol version 4). - * - *

The only time when this is returned directly to the client (wrapped in a {@link - * AllNodesFailedException}) is at initialization. If it happens later when the driver is already - * connected, it is just logged an the corresponding node is forced down. - */ -public class UnsupportedProtocolVersionException extends DriverException { - private static final long serialVersionUID = 0; - - private final EndPoint endPoint; - private final List attemptedVersions; - - @NonNull - public static UnsupportedProtocolVersionException forSingleAttempt( - @NonNull EndPoint endPoint, @NonNull ProtocolVersion attemptedVersion) { - String message = - String.format("[%s] Host does not support protocol version %s", endPoint, attemptedVersion); - return new UnsupportedProtocolVersionException( - endPoint, message, Collections.singletonList(attemptedVersion), null); - } - - @NonNull - public static UnsupportedProtocolVersionException forNegotiation( - @NonNull EndPoint endPoint, @NonNull List attemptedVersions) { - String message = - String.format( - "[%s] Protocol negotiation failed: could not find a common version (attempted: %s). " - + "Note that the driver does not support Cassandra 2.0 or lower.", - endPoint, attemptedVersions); - return new UnsupportedProtocolVersionException( - endPoint, message, ImmutableList.copyOf(attemptedVersions), null); - } - - public UnsupportedProtocolVersionException( - @Nullable EndPoint endPoint, // technically nullable, but should never be in real life - @NonNull String message, - @NonNull List attemptedVersions) { - this(endPoint, message, attemptedVersions, null); - } - - private UnsupportedProtocolVersionException( - EndPoint endPoint, - String message, - List attemptedVersions, - ExecutionInfo executionInfo) { - super(message, executionInfo, null, true); - this.endPoint = endPoint; - this.attemptedVersions = attemptedVersions; - } - - /** The address of the node that threw the error. */ - @Nullable - public EndPoint getEndPoint() { - return endPoint; - } - - /** The versions that were attempted. */ - @NonNull - public List getAttemptedVersions() { - return attemptedVersions; - } - - @NonNull - @Override - public DriverException copy() { - return new UnsupportedProtocolVersionException( - endPoint, getMessage(), attemptedVersions, getExecutionInfo()); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/Version.java b/core/src/main/java/com/datastax/oss/driver/api/core/Version.java deleted file mode 100644 index 52751e02984..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/Version.java +++ /dev/null @@ -1,310 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.io.Serializable; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.Objects; -import java.util.regex.Matcher; -import java.util.regex.Pattern; -import net.jcip.annotations.Immutable; - -/** - * A structured version number. - * - *

It is in the form X.Y.Z, with optional pre-release labels and build metadata. - * - *

Version numbers compare the usual way, the major number (X) is compared first, then the minor - * one (Y) and then the patch level one (Z). Lastly, versions with pre-release sorts before the - * versions that don't have one, and labels are sorted alphabetically if necessary. Build metadata - * are ignored for sorting versions. - */ -@Immutable -public class Version implements Comparable, Serializable { - - private static final long serialVersionUID = 1; - - private static final String VERSION_REGEXP = - "(\\d+)\\.(\\d+)(\\.\\d+)?(\\.\\d+)?([~\\-]\\w[.\\w]*(?:-\\w[.\\w]*)*)?(\\+[.\\w]+)?"; - - private static final Pattern pattern = Pattern.compile(VERSION_REGEXP); - - @NonNull public static final Version V1_0_0 = Objects.requireNonNull(parse("1.0.0")); - @NonNull public static final Version V2_1_0 = Objects.requireNonNull(parse("2.1.0")); - @NonNull public static final Version V2_2_0 = Objects.requireNonNull(parse("2.2.0")); - @NonNull public static final Version V3_0_0 = Objects.requireNonNull(parse("3.0.0")); - @NonNull public static final Version V4_0_0 = Objects.requireNonNull(parse("4.0.0")); - @NonNull public static final Version V4_1_0 = Objects.requireNonNull(parse("4.1.0")); - @NonNull public static final Version V5_0_0 = Objects.requireNonNull(parse("5.0.0")); - @NonNull public static final Version V6_7_0 = Objects.requireNonNull(parse("6.7.0")); - @NonNull public static final Version V6_8_0 = Objects.requireNonNull(parse("6.8.0")); - @NonNull public static final Version V6_9_0 = Objects.requireNonNull(parse("6.9.0")); - - private final int major; - private final int minor; - private final int patch; - private final int dsePatch; - - private final String[] preReleases; - private final String build; - - private Version( - int major, int minor, int patch, int dsePatch, String[] preReleases, String build) { - this.major = major; - this.minor = minor; - this.patch = patch; - this.dsePatch = dsePatch; - this.preReleases = preReleases; - this.build = build; - } - - /** - * Parses a version from a string. - * - *

The version string should have primarily the form X.Y.Z to which can be appended one or more - * pre-release label after dashes (2.0.1-beta1, 2.1.4-rc1-SNAPSHOT) and an optional build label - * (2.1.0-beta1+a20ba.sha). Out of convenience, the "patch" version number, Z, can be omitted, in - * which case it is assumed to be 0. - * - * @param version the string to parse. - * @return the parsed version number. - * @throws IllegalArgumentException if the provided string does not represent a valid version. - */ - @Nullable - public static Version parse(@Nullable String version) { - if (version == null) { - return null; - } - - Matcher matcher = pattern.matcher(version); - if (!matcher.matches()) { - throw new IllegalArgumentException("Invalid version number: " + version); - } - - try { - int major = Integer.parseInt(matcher.group(1)); - int minor = Integer.parseInt(matcher.group(2)); - - String pa = matcher.group(3); - int patch = - pa == null || pa.isEmpty() - ? 0 - : Integer.parseInt( - pa.substring(1)); // dropping the initial '.' since it's included this time - - String dse = matcher.group(4); - int dsePatch = - dse == null || dse.isEmpty() - ? -1 - : Integer.parseInt( - dse.substring(1)); // dropping the initial '.' since it's included this time - - String pr = matcher.group(5); - String[] preReleases = - pr == null || pr.isEmpty() - ? null - : pr.substring(1) - .split("-"); // drop initial '-' or '~' then split on the remaining ones - - String bl = matcher.group(6); - String build = bl == null || bl.isEmpty() ? null : bl.substring(1); // drop the initial '+' - - return new Version(major, minor, patch, dsePatch, preReleases, build); - } catch (NumberFormatException e) { - throw new IllegalArgumentException("Invalid version number: " + version); - } - } - - /** - * The major version number. - * - * @return the major version number, i.e. X in X.Y.Z. - */ - public int getMajor() { - return major; - } - - /** - * The minor version number. - * - * @return the minor version number, i.e. Y in X.Y.Z. - */ - public int getMinor() { - return minor; - } - - /** - * The patch version number. - * - * @return the patch version number, i.e. Z in X.Y.Z. - */ - public int getPatch() { - return patch; - } - - /** - * The DSE patch version number (will only be present for version of Cassandra in DSE). - * - *

DataStax Entreprise (DSE) adds a fourth number to the version number to track potential hot - * fixes and/or DSE specific patches that may have been applied to the Cassandra version. In that - * case, this method returns that fourth number. - * - * @return the DSE patch version number, i.e. D in X.Y.Z.D, or -1 if the version number is not - * from DSE. - */ - public int getDSEPatch() { - return dsePatch; - } - - /** - * The pre-release labels if relevant, i.e. label1 and label2 in X.Y.Z-label1-lable2. - * - * @return the pre-release labels. The return list will be {@code null} if the version number - * doesn't have any. - */ - public List getPreReleaseLabels() { - return preReleases == null ? null : Collections.unmodifiableList(Arrays.asList(preReleases)); - } - - /** - * The build label if there is one. - * - * @return the build label or {@code null} if the version number doesn't have one. - */ - public String getBuildLabel() { - return build; - } - - /** - * The next stable version, i.e. the version stripped of its pre-release labels and build - * metadata. - * - *

This is mostly used during our development stage, where we test the driver against - * pre-release versions of Cassandra like 2.1.0-rc7-SNAPSHOT, but need to compare to the stable - * version 2.1.0 when testing for native protocol compatibility, etc. - * - * @return the next stable version. - */ - public Version nextStable() { - return new Version(major, minor, patch, dsePatch, null, null); - } - - @Override - public int compareTo(@NonNull Version other) { - if (major < other.major) { - return -1; - } - if (major > other.major) { - return 1; - } - - if (minor < other.minor) { - return -1; - } - if (minor > other.minor) { - return 1; - } - - if (patch < other.patch) { - return -1; - } - if (patch > other.patch) { - return 1; - } - - if (dsePatch < 0) { - if (other.dsePatch >= 0) { - return -1; - } - } else { - if (other.dsePatch < 0) { - return 1; - } - - // Both are >= 0 - if (dsePatch < other.dsePatch) { - return -1; - } - if (dsePatch > other.dsePatch) { - return 1; - } - } - - if (preReleases == null) { - return other.preReleases == null ? 0 : 1; - } - if (other.preReleases == null) { - return -1; - } - - for (int i = 0; i < Math.min(preReleases.length, other.preReleases.length); i++) { - int cmp = preReleases[i].compareTo(other.preReleases[i]); - if (cmp != 0) { - return cmp; - } - } - - return Integer.compare(preReleases.length, other.preReleases.length); - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof Version) { - Version that = (Version) other; - return this.major == that.major - && this.minor == that.minor - && this.patch == that.patch - && this.dsePatch == that.dsePatch - && (this.preReleases == null - ? that.preReleases == null - : Arrays.equals(this.preReleases, that.preReleases)) - && Objects.equals(this.build, that.build); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(major, minor, patch, dsePatch, Arrays.hashCode(preReleases), build); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append(major).append('.').append(minor).append('.').append(patch); - if (dsePatch >= 0) { - sb.append('.').append(dsePatch); - } - if (preReleases != null) { - for (String preRelease : preReleases) { - sb.append('-').append(preRelease); - } - } - if (build != null) { - sb.append('+').append(build); - } - return sb.toString(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/addresstranslation/AddressTranslator.java b/core/src/main/java/com/datastax/oss/driver/api/core/addresstranslation/AddressTranslator.java deleted file mode 100644 index 47ce62f1461..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/addresstranslation/AddressTranslator.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.addresstranslation; - -import edu.umd.cs.findbugs.annotations.NonNull; -import java.net.InetSocketAddress; - -/** - * Translates IP addresses received from Cassandra nodes into locally queriable addresses. - * - *

The driver auto-detects new Cassandra nodes added to the cluster through server side pushed - * notifications and system table queries. For each node, the address the driver will receive will - * correspond to the address set as {@code broadcast_rpc_address} in the node's YAML file. In most - * cases, this is the correct address to use by the driver, and that is what is used by default. - * However, sometimes the addresses received through this mechanism will either not be reachable - * directly by the driver, or should not be the preferred address to use to reach the node (for - * instance, the {@code broadcast_rpc_address} set on Cassandra nodes might be a private IP, but - * some clients may have to use a public IP, or go through a router to reach that node). This - * interface addresses such cases, by allowing to translate an address as sent by a Cassandra node - * into another address to be used by the driver for connection. - * - *

The contact point addresses provided at driver initialization are considered translated - * already; in other words, they will be used as-is, without being processed by this component. - */ -public interface AddressTranslator extends AutoCloseable { - - /** - * Translates an address reported by a Cassandra node into the address that the driver will use to - * connect. - */ - @NonNull - InetSocketAddress translate(@NonNull InetSocketAddress address); - - /** Called when the cluster that this translator is associated with closes. */ - @Override - void close(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/auth/AuthProvider.java b/core/src/main/java/com/datastax/oss/driver/api/core/auth/AuthProvider.java deleted file mode 100644 index c73c3e4fb67..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/auth/AuthProvider.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.auth; - -import com.datastax.oss.driver.api.core.connection.ReconnectionPolicy; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.internal.core.auth.PlainTextAuthProvider; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * Provides {@link Authenticator} instances to use when connecting to Cassandra nodes. - * - *

See {@link PlainTextAuthProvider} for an implementation which uses SASL PLAIN mechanism to - * authenticate using username/password strings. - */ -public interface AuthProvider extends AutoCloseable { - - /** - * The authenticator to use when connecting to {@code host}. - * - * @param endPoint the Cassandra host to connect to. - * @param serverAuthenticator the configured authenticator on the host. - * @return the authentication implementation to use. - */ - @NonNull - Authenticator newAuthenticator(@NonNull EndPoint endPoint, @NonNull String serverAuthenticator) - throws AuthenticationException; - - /** - * What to do if the server does not send back an authentication challenge (in other words, lets - * the client connect without any form of authentication). - * - *

This is suspicious because having authentication enabled on the client but not on the server - * is probably a configuration mistake. - * - *

Provider implementations are free to handle this however they want; typical approaches are: - * - *

    - *
  • ignoring; - *
  • logging a warning; - *
  • throwing an {@link AuthenticationException} to abort the connection (but note that it - * will be retried according to the {@link ReconnectionPolicy}). - *
- */ - void onMissingChallenge(@NonNull EndPoint endPoint) throws AuthenticationException; -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/auth/AuthenticationException.java b/core/src/main/java/com/datastax/oss/driver/api/core/auth/AuthenticationException.java deleted file mode 100644 index 28dde2123cb..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/auth/AuthenticationException.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.auth; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** - * Indicates an error during the authentication phase while connecting to a node. - * - *

The only time when this is returned directly to the client (wrapped in a {@link - * AllNodesFailedException}) is at initialization. If it happens later when the driver is already - * connected, it is just logged and the connection will be reattempted. - */ -public class AuthenticationException extends RuntimeException { - private static final long serialVersionUID = 0; - - private final EndPoint endPoint; - - public AuthenticationException(@NonNull EndPoint endPoint, @NonNull String message) { - this(endPoint, message, null); - } - - public AuthenticationException( - @NonNull EndPoint endPoint, @NonNull String message, @Nullable Throwable cause) { - super(String.format("Authentication error on node %s: %s", endPoint, message), cause); - this.endPoint = endPoint; - } - - /** The address of the node that encountered the error. */ - @NonNull - public EndPoint getEndPoint() { - return endPoint; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/auth/Authenticator.java b/core/src/main/java/com/datastax/oss/driver/api/core/auth/Authenticator.java deleted file mode 100644 index 150a1dfb63f..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/auth/Authenticator.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.auth; - -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.util.concurrent.CompletionStage; - -/** - * Handles SASL authentication with Cassandra servers. - * - *

Each time a new connection is created and the server requires authentication, a new instance - * of this class will be created by the corresponding {@link AuthProvider} to handle that - * authentication. The lifecycle of that new {@code Authenticator} will be: - * - *

    - *
  1. the {@link #initialResponse} method will be called. The initial return value will be sent - * to the server to initiate the handshake. - *
  2. the server will respond to each client response by either issuing a challenge or indicating - * that the authentication is complete (successfully or not). If a new challenge is issued, - * the authenticator's {@link #evaluateChallenge} method will be called to produce a response - * that will be sent to the server. This challenge/response negotiation will continue until - * the server responds that authentication is successful (or an {@link - * AuthenticationException} is raised). - *
  3. When the server indicates that authentication is successful, the {@link - * #onAuthenticationSuccess} method will be called with the last information that the server - * may optionally have sent. - *
- * - * The exact nature of the negotiation between client and server is specific to the authentication - * mechanism configured server side. - * - *

Note that, since the methods in this interface will be invoked on a driver I/O thread, they - * all return asynchronous results. If your implementation performs heavy computations or blocking - * calls, you'll want to schedule them on a separate executor, and return a {@code CompletionStage} - * that represents their future completion. If your implementation is fast, lightweight and does not - * perform blocking operations, it might be acceptable to run it on I/O threads directly; in that - * case, implement {@link SyncAuthenticator} instead of this interface. - */ -public interface Authenticator { - - /** - * Obtain an initial response token for initializing the SASL handshake. - * - * @return a completion stage that will complete with the initial response to send to the server - * (which may be {@code null}). Note that, if the returned byte buffer is writable, the driver - * will clear its contents immediately after use (to avoid keeping sensitive - * information in memory); do not reuse the same buffer across multiple invocations. - * Alternatively, if the contents are not sensitive, you can make the buffer {@linkplain - * ByteBuffer#asReadOnlyBuffer() read-only} and safely reuse it. - */ - @NonNull - CompletionStage initialResponse(); - - /** - * Evaluate a challenge received from the server. Generally, this method should return null when - * authentication is complete from the client perspective. - * - * @param challenge the server's SASL challenge. - * @return a completion stage that will complete with the updated SASL token (which may be null to - * indicate the client requires no further action). Note that, if the returned byte buffer is - * writable, the driver will clear its contents immediately after use (to avoid keeping - * sensitive information in memory); do not reuse the same buffer across multiple invocations. - * Alternatively, if the contents are not sensitive, you can make the buffer {@linkplain - * ByteBuffer#asReadOnlyBuffer() read-only} and safely reuse it. - */ - @NonNull - CompletionStage evaluateChallenge(@Nullable ByteBuffer challenge); - - /** - * Called when authentication is successful with the last information optionally sent by the - * server. - * - * @param token the information sent by the server with the authentication successful message. - * This will be {@code null} if the server sends no particular information on authentication - * success. - * @return a completion stage that completes when the authenticator is done processing this - * response. - */ - @NonNull - CompletionStage onAuthenticationSuccess(@Nullable ByteBuffer token); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/auth/PlainTextAuthProviderBase.java b/core/src/main/java/com/datastax/oss/driver/api/core/auth/PlainTextAuthProviderBase.java deleted file mode 100644 index fb85797af9e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/auth/PlainTextAuthProviderBase.java +++ /dev/null @@ -1,264 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.auth; - -import com.datastax.dse.driver.api.core.auth.BaseDseAuthenticator; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.shaded.guava.common.base.Charsets; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.net.InetSocketAddress; -import java.net.SocketAddress; -import java.nio.ByteBuffer; -import java.nio.CharBuffer; -import java.nio.charset.StandardCharsets; -import java.util.Arrays; -import java.util.Objects; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Common infrastructure for plain text auth providers. - * - *

This can be reused to write an implementation that retrieves the credentials from another - * source than the configuration. The driver offers one built-in implementation: {@link - * ProgrammaticPlainTextAuthProvider}. - */ -@ThreadSafe -public abstract class PlainTextAuthProviderBase implements AuthProvider { - - private static final Logger LOG = LoggerFactory.getLogger(PlainTextAuthProviderBase.class); - - private final String logPrefix; - - /** - * @param logPrefix a string that will get prepended to the logs (this is used for discrimination - * when you have multiple driver instances executing in the same JVM). Built-in - * implementations fill this with {@link Session#getName()}. - */ - protected PlainTextAuthProviderBase(@NonNull String logPrefix) { - this.logPrefix = Objects.requireNonNull(logPrefix); - } - - /** - * Retrieves the credentials from the underlying source. - * - *

This is invoked every time the driver opens a new connection. - * - * @param endPoint The endpoint being contacted. - * @param serverAuthenticator The authenticator class sent by the endpoint. - */ - @NonNull - protected abstract Credentials getCredentials( - @NonNull EndPoint endPoint, @NonNull String serverAuthenticator); - - @NonNull - @Override - public Authenticator newAuthenticator( - @NonNull EndPoint endPoint, @NonNull String serverAuthenticator) - throws AuthenticationException { - return new PlainTextAuthenticator( - getCredentials(endPoint, serverAuthenticator), endPoint, serverAuthenticator); - } - - @Override - public void onMissingChallenge(@NonNull EndPoint endPoint) { - LOG.warn( - "[{}] {} did not send an authentication challenge; " - + "This is suspicious because the driver expects authentication", - logPrefix, - endPoint); - } - - @Override - public void close() { - // nothing to do - } - - public static class Credentials { - - private final char[] username; - private final char[] password; - private final char[] authorizationId; - - /** - * Builds an instance for username/password authentication, and proxy authentication with the - * given authorizationId. - * - *

This feature is only available with DataStax Enterprise. If the target server is Apache - * Cassandra, the authorizationId will be ignored. - */ - public Credentials( - @NonNull char[] username, @NonNull char[] password, @NonNull char[] authorizationId) { - this.username = Objects.requireNonNull(username); - this.password = Objects.requireNonNull(password); - this.authorizationId = Objects.requireNonNull(authorizationId); - } - - /** Builds an instance for simple username/password authentication. */ - public Credentials(@NonNull char[] username, @NonNull char[] password) { - this(username, password, new char[0]); - } - - @NonNull - public char[] getUsername() { - return username; - } - - /** - * @deprecated this method only exists for backward compatibility. It is a synonym for {@link - * #getUsername()}, which should be used instead. - */ - @Deprecated - @NonNull - public char[] getAuthenticationId() { - return username; - } - - @NonNull - public char[] getPassword() { - return password; - } - - @NonNull - public char[] getAuthorizationId() { - return authorizationId; - } - - /** Clears the credentials from memory when they're no longer needed. */ - protected void clear() { - // Note: this is a bit irrelevant with the built-in provider, because the config already - // caches the credentials in memory. But it might be useful for a custom implementation that - // retrieves the credentials from a different source. - Arrays.fill(getUsername(), (char) 0); - Arrays.fill(getPassword(), (char) 0); - Arrays.fill(getAuthorizationId(), (char) 0); - } - } - - // Implementation note: BaseDseAuthenticator is backward compatible with Cassandra authenticators. - // This will work with both Cassandra (as long as no authorizationId is set) and DSE. - protected static class PlainTextAuthenticator extends BaseDseAuthenticator { - - private static final ByteBuffer MECHANISM = - ByteBuffer.wrap("PLAIN".getBytes(StandardCharsets.UTF_8)).asReadOnlyBuffer(); - - private static final ByteBuffer SERVER_INITIAL_CHALLENGE = - ByteBuffer.wrap("PLAIN-START".getBytes(StandardCharsets.UTF_8)).asReadOnlyBuffer(); - - private static final EndPoint DUMMY_END_POINT = - new EndPoint() { - @NonNull - @Override - public SocketAddress resolve() { - return new InetSocketAddress("127.0.0.1", 9042); - } - - @NonNull - @Override - public String asMetricPrefix() { - return ""; // will never be used - } - }; - - private final ByteBuffer encodedCredentials; - private final EndPoint endPoint; - - protected PlainTextAuthenticator( - @NonNull Credentials credentials, - @NonNull EndPoint endPoint, - @NonNull String serverAuthenticator) { - super(serverAuthenticator); - - Objects.requireNonNull(credentials); - Objects.requireNonNull(endPoint); - - ByteBuffer authorizationId = toUtf8Bytes(credentials.getAuthorizationId()); - ByteBuffer username = toUtf8Bytes(credentials.getUsername()); - ByteBuffer password = toUtf8Bytes(credentials.getPassword()); - - this.encodedCredentials = - ByteBuffer.allocate( - authorizationId.remaining() + username.remaining() + password.remaining() + 2); - encodedCredentials.put(authorizationId); - encodedCredentials.put((byte) 0); - encodedCredentials.put(username); - encodedCredentials.put((byte) 0); - encodedCredentials.put(password); - encodedCredentials.flip(); - - clear(authorizationId); - clear(username); - clear(password); - - this.endPoint = endPoint; - } - - /** - * @deprecated Preserved for backward compatibility, implementors should use the 3-arg - * constructor {@code PlainTextAuthenticator(Credentials, EndPoint, String)} instead. - */ - @Deprecated - protected PlainTextAuthenticator(@NonNull Credentials credentials) { - this( - credentials, - // It's unlikely that this class was ever extended by third parties, but if it was, assume - // that it was not written for DSE: - // - dummy end point because we should never need to build an auth exception - DUMMY_END_POINT, - // - default OSS authenticator name (the only thing that matters is how this string - // compares to "DseAuthenticator") - "org.apache.cassandra.auth.PasswordAuthenticator"); - } - - private static ByteBuffer toUtf8Bytes(char[] charArray) { - CharBuffer charBuffer = CharBuffer.wrap(charArray); - return Charsets.UTF_8.encode(charBuffer); - } - - private static void clear(ByteBuffer buffer) { - buffer.rewind(); - while (buffer.remaining() > 0) { - buffer.put((byte) 0); - } - } - - @NonNull - @Override - public ByteBuffer getMechanism() { - return MECHANISM; - } - - @NonNull - @Override - public ByteBuffer getInitialServerChallenge() { - return SERVER_INITIAL_CHALLENGE; - } - - @Nullable - @Override - public ByteBuffer evaluateChallengeSync(@Nullable ByteBuffer challenge) { - if (SERVER_INITIAL_CHALLENGE.equals(challenge)) { - return encodedCredentials; - } - throw new AuthenticationException(endPoint, "Incorrect challenge from server"); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/auth/ProgrammaticPlainTextAuthProvider.java b/core/src/main/java/com/datastax/oss/driver/api/core/auth/ProgrammaticPlainTextAuthProvider.java deleted file mode 100644 index d991f5c5cb5..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/auth/ProgrammaticPlainTextAuthProvider.java +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.auth; - -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.core.session.SessionBuilder; -import com.datastax.oss.driver.internal.core.util.Strings; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Objects; -import net.jcip.annotations.ThreadSafe; - -/** - * A simple plaintext {@link AuthProvider} that receives the credentials programmatically instead of - * pulling them from the configuration. - * - *

To use this class, create an instance with the appropriate credentials to use and pass it to - * your session builder: - * - *

- * AuthProvider authProvider = new ProgrammaticPlainTextAuthProvider("...", "...");
- * CqlSession session =
- *     CqlSession.builder()
- *         .addContactEndPoints(...)
- *         .withAuthProvider(authProvider)
- *         .build();
- * 
- * - *

It also offers the possibility of changing the credentials at runtime. The new credentials - * will be used for all connections initiated after the change. - * - *

Implementation Note: this implementation is not particularly suited for highly-sensitive - * applications: it stores the credentials to use as private fields, and even if the fields are char - * arrays rather than strings to make it difficult to dump their contents, they are never cleared - * until the provider itself is garbage-collected, which typically only happens when the session is - * closed. - * - * @see SessionBuilder#withAuthProvider(AuthProvider) - * @see SessionBuilder#withAuthCredentials(String, String) - * @see SessionBuilder#withAuthCredentials(String, String, String) - */ -@ThreadSafe -public class ProgrammaticPlainTextAuthProvider extends PlainTextAuthProviderBase { - - private volatile char[] username; - private volatile char[] password; - private volatile char[] authorizationId; - - /** Builds an instance for simple username/password authentication. */ - public ProgrammaticPlainTextAuthProvider(@NonNull String username, @NonNull String password) { - this(username, password, ""); - } - - /** - * Builds an instance for username/password authentication, and proxy authentication with the - * given authorizationId. - * - *

This feature is only available with DataStax Enterprise. If the target server is Apache - * Cassandra, use {@link #ProgrammaticPlainTextAuthProvider(String, String)} instead, or set the - * authorizationId to an empty string. - */ - public ProgrammaticPlainTextAuthProvider( - @NonNull String username, @NonNull String password, @NonNull String authorizationId) { - // This will typically be built before the session so we don't know the log prefix yet. Pass an - // empty string, it's only used in one log message. - super(""); - this.username = Strings.requireNotEmpty(username, "username").toCharArray(); - this.password = Strings.requireNotEmpty(password, "password").toCharArray(); - this.authorizationId = - Objects.requireNonNull(authorizationId, "authorizationId cannot be null").toCharArray(); - } - - /** - * Changes the username. - * - *

The new credentials will be used for all connections initiated after this method was called. - * - * @param username the new name. - */ - public void setUsername(@NonNull String username) { - this.username = Strings.requireNotEmpty(username, "username").toCharArray(); - } - - /** - * Changes the password. - * - *

The new credentials will be used for all connections initiated after this method was called. - * - * @param password the new password. - */ - public void setPassword(@NonNull String password) { - this.password = Strings.requireNotEmpty(password, "password").toCharArray(); - } - - /** - * Changes the authorization id. - * - *

The new credentials will be used for all connections initiated after this method was called. - * - *

This feature is only available with DataStax Enterprise. If the target server is Apache - * Cassandra, this method should not be used. - * - * @param authorizationId the new authorization id. - */ - public void setAuthorizationId(@NonNull String authorizationId) { - this.authorizationId = - Objects.requireNonNull(authorizationId, "authorizationId cannot be null").toCharArray(); - } - - /** - * {@inheritDoc} - * - *

This implementation disregards the endpoint being connected to as well as the authenticator - * class sent by the server, and always returns the same credentials. - */ - @NonNull - @Override - protected Credentials getCredentials( - @NonNull EndPoint endPoint, @NonNull String serverAuthenticator) { - return new Credentials(username.clone(), password.clone(), authorizationId.clone()); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/auth/SyncAuthenticator.java b/core/src/main/java/com/datastax/oss/driver/api/core/auth/SyncAuthenticator.java deleted file mode 100644 index 016ac25680b..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/auth/SyncAuthenticator.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.auth; - -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.util.concurrent.CompletionStage; - -/** - * An authenticator that performs all of its operations synchronously, on the calling thread. - * - *

This is intended for simple implementations that are fast and lightweight enough, and do not - * perform any blocking operations. - */ -public interface SyncAuthenticator extends Authenticator { - - /** - * Obtain an initial response token for initializing the SASL handshake. - * - *

{@link #initialResponse()} calls this and wraps the result in an immediately completed - * future. - * - * @return The initial response to send to the server (which may be {@code null}). Note that, if - * the returned byte buffer is writable, the driver will clear its contents immediately - * after use (to avoid keeping sensitive information in memory); do not reuse the same buffer - * across multiple invocations. Alternatively, if the contents are not sensitive, you can make - * the buffer {@linkplain ByteBuffer#asReadOnlyBuffer() read-only} and safely reuse it. - */ - @Nullable - ByteBuffer initialResponseSync(); - - /** - * Evaluate a challenge received from the server. - * - *

{@link #evaluateChallenge(ByteBuffer)} calls this and wraps the result in an immediately - * completed future. - * - * @param challenge the server's SASL challenge; may be {@code null}. - * @return The updated SASL token (which may be {@code null} to indicate the client requires no - * further action). Note that, if the returned byte buffer is writable, the driver will - * clear its contents immediately after use (to avoid keeping sensitive information in - * memory); do not reuse the same buffer across multiple invocations. Alternatively, if the - * contents are not sensitive, you can make the buffer {@linkplain - * ByteBuffer#asReadOnlyBuffer() read-only} and safely reuse it. - */ - @Nullable - ByteBuffer evaluateChallengeSync(@Nullable ByteBuffer challenge); - - /** - * Called when authentication is successful with the last information optionally sent by the - * server. - * - *

{@link #onAuthenticationSuccess(ByteBuffer)} calls this, and then returns an immediately - * completed future. - * - * @param token the information sent by the server with the authentication successful message. - * This will be {@code null} if the server sends no particular information on authentication - * success. - */ - void onAuthenticationSuccessSync(@Nullable ByteBuffer token); - - @NonNull - @Override - default CompletionStage initialResponse() { - return CompletableFutures.wrap(this::initialResponseSync); - } - - @NonNull - @Override - default CompletionStage evaluateChallenge(@Nullable ByteBuffer challenge) { - return CompletableFutures.wrap(() -> evaluateChallengeSync(challenge)); - } - - @NonNull - @Override - default CompletionStage onAuthenticationSuccess(@Nullable ByteBuffer token) { - return CompletableFutures.wrap( - () -> { - onAuthenticationSuccessSync(token); - return null; - }); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/auth/package-info.java b/core/src/main/java/com/datastax/oss/driver/api/core/auth/package-info.java deleted file mode 100644 index b265b9ba463..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/auth/package-info.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Support for authentication between the driver and Cassandra nodes. - * - *

Authentication is performed on each newly open connection. It is customizable via the {@link - * com.datastax.oss.driver.api.core.auth.AuthProvider} interface. - */ -package com.datastax.oss.driver.api.core.auth; diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/config/DefaultDriverOption.java b/core/src/main/java/com/datastax/oss/driver/api/core/config/DefaultDriverOption.java deleted file mode 100644 index 60c44193577..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/config/DefaultDriverOption.java +++ /dev/null @@ -1,1057 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.config; - -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * Built-in driver options for the core driver. - * - *

Refer to {@code reference.conf} in the driver codebase for a full description of each option. - */ -public enum DefaultDriverOption implements DriverOption { - /** - * The contact points to use for the initial connection to the cluster. - * - *

Value type: {@link java.util.List List}<{@link String}> - */ - CONTACT_POINTS("basic.contact-points"), - /** - * A name that uniquely identifies the driver instance. - * - *

Value-type: {@link String} - */ - SESSION_NAME("basic.session-name"), - /** - * The name of the keyspace that the session should initially be connected to. - * - *

Value-type: {@link String} - */ - SESSION_KEYSPACE("basic.session-keyspace"), - /** - * How often the driver tries to reload the configuration. - * - *

Value-type: {@link java.time.Duration Duration} - */ - CONFIG_RELOAD_INTERVAL("basic.config-reload-interval"), - - /** - * How long the driver waits for a request to complete. - * - *

Value-type: {@link java.time.Duration Duration} - */ - REQUEST_TIMEOUT("basic.request.timeout"), - /** - * The consistency level. - * - *

Value-Type: {@link String} - */ - REQUEST_CONSISTENCY("basic.request.consistency"), - /** - * The page size. - * - *

Value-Type: int - */ - REQUEST_PAGE_SIZE("basic.request.page-size"), - /** - * The serial consistency level. - * - *

Value-type: {@link String} - */ - REQUEST_SERIAL_CONSISTENCY("basic.request.serial-consistency"), - /** - * The default idempotence of a request. - * - *

Value-type: boolean - */ - REQUEST_DEFAULT_IDEMPOTENCE("basic.request.default-idempotence"), - - // LOAD_BALANCING_POLICY is a collection of sub-properties - LOAD_BALANCING_POLICY("basic.load-balancing-policy"), - /** - * The class of the load balancing policy. - * - *

Value-type: {@link String} - */ - LOAD_BALANCING_POLICY_CLASS("basic.load-balancing-policy.class"), - /** - * The datacenter that is considered "local". - * - *

Value-type: {@link String} - */ - LOAD_BALANCING_LOCAL_DATACENTER("basic.load-balancing-policy.local-datacenter"), - /** - * A custom filter to include/exclude nodes. - * - *

Value-Type: {@link String} - * - * @deprecated use {@link #LOAD_BALANCING_DISTANCE_EVALUATOR_CLASS} instead. - */ - @Deprecated - LOAD_BALANCING_FILTER_CLASS("basic.load-balancing-policy.filter.class"), - - /** - * The timeout to use for internal queries that run as part of the initialization process - * - *

Value-type: {@link java.time.Duration Duration} - */ - CONNECTION_INIT_QUERY_TIMEOUT("advanced.connection.init-query-timeout"), - /** - * The timeout to use when the driver changes the keyspace on a connection at runtime. - * - *

Value-type: {@link java.time.Duration Duration} - */ - CONNECTION_SET_KEYSPACE_TIMEOUT("advanced.connection.set-keyspace-timeout"), - /** - * The maximum number of requests that can be executed concurrently on a connection - * - *

Value-type: int - */ - CONNECTION_MAX_REQUESTS("advanced.connection.max-requests-per-connection"), - /** - * The maximum number of "orphaned" requests before a connection gets closed automatically. - * - *

Value-type: int - */ - CONNECTION_MAX_ORPHAN_REQUESTS("advanced.connection.max-orphan-requests"), - /** - * Whether to log non-fatal errors when the driver tries to open a new connection. - * - *

Value-type: boolean - */ - CONNECTION_WARN_INIT_ERROR("advanced.connection.warn-on-init-error"), - /** - * The number of connections in the LOCAL pool. - * - *

Value-type: int - */ - CONNECTION_POOL_LOCAL_SIZE("advanced.connection.pool.local.size"), - /** - * The number of connections in the REMOTE pool. - * - *

Value-type: int - */ - CONNECTION_POOL_REMOTE_SIZE("advanced.connection.pool.remote.size"), - - /** - * Whether to schedule reconnection attempts if all contact points are unreachable on the first - * initialization attempt. - * - *

Value-type: boolean - */ - RECONNECT_ON_INIT("advanced.reconnect-on-init"), - - /** - * The class of the reconnection policy. - * - *

Value-type: {@link String} - */ - RECONNECTION_POLICY_CLASS("advanced.reconnection-policy.class"), - /** - * Base delay for computing time between reconnection attempts. - * - *

Value-type: {@link java.time.Duration Duration} - */ - RECONNECTION_BASE_DELAY("advanced.reconnection-policy.base-delay"), - /** - * Maximum delay between reconnection attempts. - * - *

Value-type: {@link java.time.Duration Duration} - */ - RECONNECTION_MAX_DELAY("advanced.reconnection-policy.max-delay"), - - // RETRY_POLICY is a collection of sub-properties - RETRY_POLICY("advanced.retry-policy"), - /** - * The class of the retry policy. - * - *

Value-type: {@link String} - */ - RETRY_POLICY_CLASS("advanced.retry-policy.class"), - - // SPECULATIVE_EXECUTION_POLICY is a collection of sub-properties - SPECULATIVE_EXECUTION_POLICY("advanced.speculative-execution-policy"), - /** - * The class of the speculative execution policy. - * - *

Value-type: {@link String} - */ - SPECULATIVE_EXECUTION_POLICY_CLASS("advanced.speculative-execution-policy.class"), - /** - * The maximum number of executions. - * - *

Value-type: int - */ - SPECULATIVE_EXECUTION_MAX("advanced.speculative-execution-policy.max-executions"), - /** - * The delay between each execution. - * - *

Value-type: {@link java.time.Duration Duration} - */ - SPECULATIVE_EXECUTION_DELAY("advanced.speculative-execution-policy.delay"), - - /** - * The class of the authentication provider. - * - *

Value-type: {@link String} - */ - AUTH_PROVIDER_CLASS("advanced.auth-provider.class"), - /** - * Plain text auth provider username. - * - *

Value-type: {@link String} - */ - AUTH_PROVIDER_USER_NAME("advanced.auth-provider.username"), - /** - * Plain text auth provider password. - * - *

Value-type: {@link String} - */ - AUTH_PROVIDER_PASSWORD("advanced.auth-provider.password"), - - /** - * The class of the SSL Engine Factory. - * - *

Value-type: {@link String} - */ - SSL_ENGINE_FACTORY_CLASS("advanced.ssl-engine-factory.class"), - /** - * The cipher suites to enable when creating an SSLEngine for a connection. - * - *

Value type: {@link java.util.List List}<{@link String}> - */ - SSL_CIPHER_SUITES("advanced.ssl-engine-factory.cipher-suites"), - /** - * Whether or not to require validation that the hostname of the server certificate's common name - * matches the hostname of the server being connected to. - * - *

Value-type: boolean - */ - SSL_HOSTNAME_VALIDATION("advanced.ssl-engine-factory.hostname-validation"), - /** - * The location of the keystore file. - * - *

Value-type: {@link String} - */ - SSL_KEYSTORE_PATH("advanced.ssl-engine-factory.keystore-path"), - /** - * The keystore password. - * - *

Value-type: {@link String} - */ - SSL_KEYSTORE_PASSWORD("advanced.ssl-engine-factory.keystore-password"), - /** - * The location of the truststore file. - * - *

Value-type: {@link String} - */ - SSL_TRUSTSTORE_PATH("advanced.ssl-engine-factory.truststore-path"), - /** - * The truststore password. - * - *

Value-type: {@link String} - */ - SSL_TRUSTSTORE_PASSWORD("advanced.ssl-engine-factory.truststore-password"), - - /** - * The class of the generator that assigns a microsecond timestamp to each request. - * - *

Value-type: {@link String} - */ - TIMESTAMP_GENERATOR_CLASS("advanced.timestamp-generator.class"), - /** - * Whether to force the driver to use Java's millisecond-precision system clock. - * - *

Value-type: boolean - */ - TIMESTAMP_GENERATOR_FORCE_JAVA_CLOCK("advanced.timestamp-generator.force-java-clock"), - /** - * How far in the future timestamps are allowed to drift before the warning is logged. - * - *

Value-type: {@link java.time.Duration Duration} - */ - TIMESTAMP_GENERATOR_DRIFT_WARNING_THRESHOLD( - "advanced.timestamp-generator.drift-warning.threshold"), - /** - * How often the warning will be logged if timestamps keep drifting above the threshold. - * - *

Value-type: {@link java.time.Duration Duration} - */ - TIMESTAMP_GENERATOR_DRIFT_WARNING_INTERVAL("advanced.timestamp-generator.drift-warning.interval"), - - /** - * The class of a session-wide component that tracks the outcome of requests. - * - *

Value-type: {@link String} - * - * @deprecated Use {@link #REQUEST_TRACKER_CLASSES} instead. - */ - @Deprecated - REQUEST_TRACKER_CLASS("advanced.request-tracker.class"), - /** - * Whether to log successful requests. - * - *

Value-type: boolean - */ - REQUEST_LOGGER_SUCCESS_ENABLED("advanced.request-tracker.logs.success.enabled"), - /** - * The threshold to classify a successful request as "slow". - * - *

Value-type: {@link java.time.Duration Duration} - */ - REQUEST_LOGGER_SLOW_THRESHOLD("advanced.request-tracker.logs.slow.threshold"), - /** - * Whether to log slow requests. - * - *

Value-type: boolean - */ - REQUEST_LOGGER_SLOW_ENABLED("advanced.request-tracker.logs.slow.enabled"), - /** - * Whether to log failed requests. - * - *

Value-type: boolean - */ - REQUEST_LOGGER_ERROR_ENABLED("advanced.request-tracker.logs.error.enabled"), - /** - * The maximum length of the query string in the log message. - * - *

Value-type: int - */ - REQUEST_LOGGER_MAX_QUERY_LENGTH("advanced.request-tracker.logs.max-query-length"), - /** - * Whether to log bound values in addition to the query string. - * - *

Value-type: boolean - */ - REQUEST_LOGGER_VALUES("advanced.request-tracker.logs.show-values"), - /** - * The maximum length for bound values in the log message. - * - *

Value-type: int - */ - REQUEST_LOGGER_MAX_VALUE_LENGTH("advanced.request-tracker.logs.max-value-length"), - /** - * The maximum number of bound values to log. - * - *

Value-type: int - */ - REQUEST_LOGGER_MAX_VALUES("advanced.request-tracker.logs.max-values"), - /** - * Whether to log stack traces for failed queries. - * - *

Value-type: boolean - */ - REQUEST_LOGGER_STACK_TRACES("advanced.request-tracker.logs.show-stack-traces"), - - /** - * The class of a session-wide component that controls the rate at which requests are executed. - * - *

Value-type: {@link String} - */ - REQUEST_THROTTLER_CLASS("advanced.throttler.class"), - /** - * The maximum number of requests that are allowed to execute in parallel. - * - *

Value-type: int - */ - REQUEST_THROTTLER_MAX_CONCURRENT_REQUESTS("advanced.throttler.max-concurrent-requests"), - /** - * The maximum allowed request rate. - * - *

Value-type: int - */ - REQUEST_THROTTLER_MAX_REQUESTS_PER_SECOND("advanced.throttler.max-requests-per-second"), - /** - * The maximum number of requests that can be enqueued when the throttling threshold is exceeded. - * - *

Value-type: int - */ - REQUEST_THROTTLER_MAX_QUEUE_SIZE("advanced.throttler.max-queue-size"), - /** - * How often the throttler attempts to dequeue requests. - * - *

Value-type: {@link java.time.Duration Duration} - */ - REQUEST_THROTTLER_DRAIN_INTERVAL("advanced.throttler.drain-interval"), - - /** - * The class of a session-wide component that listens for node state changes. - * - *

Value-type: {@link String} - * - * @deprecated Use {@link #METADATA_NODE_STATE_LISTENER_CLASSES} instead. - */ - @Deprecated - METADATA_NODE_STATE_LISTENER_CLASS("advanced.node-state-listener.class"), - - /** - * The class of a session-wide component that listens for schema changes. - * - *

Value-type: {@link String} - * - * @deprecated Use {@link #METADATA_SCHEMA_CHANGE_LISTENER_CLASSES} instead. - */ - @Deprecated - METADATA_SCHEMA_CHANGE_LISTENER_CLASS("advanced.schema-change-listener.class"), - - /** - * The class of the address translator to use to convert the addresses sent by Cassandra nodes - * into ones that the driver uses to connect. - * - *

Value-type: {@link String} - */ - ADDRESS_TRANSLATOR_CLASS("advanced.address-translator.class"), - - /** - * The native protocol version to use. - * - *

Value-type: {@link String} - */ - PROTOCOL_VERSION("advanced.protocol.version"), - /** - * The name of the algorithm used to compress protocol frames. - * - *

Value-type: {@link String} - */ - PROTOCOL_COMPRESSION("advanced.protocol.compression"), - /** - * The maximum length, in bytes, of the frames supported by the driver. - * - *

Value-type: long - */ - PROTOCOL_MAX_FRAME_LENGTH("advanced.protocol.max-frame-length"), - - /** - * Whether a warning is logged when a request (such as a CQL `USE ...`) changes the active - * keyspace. - * - *

Value-type: boolean - */ - REQUEST_WARN_IF_SET_KEYSPACE("advanced.request.warn-if-set-keyspace"), - /** - * How many times the driver will attempt to fetch the query trace if it is not ready yet. - * - *

Value-type: int - */ - REQUEST_TRACE_ATTEMPTS("advanced.request.trace.attempts"), - /** - * The interval between each attempt. - * - *

Value-type: {@link java.time.Duration Duration} - */ - REQUEST_TRACE_INTERVAL("advanced.request.trace.interval"), - /** - * The consistency level to use for trace queries. - * - *

Value-type: {@link String} - */ - REQUEST_TRACE_CONSISTENCY("advanced.request.trace.consistency"), - - /** - * List of enabled session-level metrics. - * - *

Value type: {@link java.util.List List}<{@link String}> - */ - METRICS_SESSION_ENABLED("advanced.metrics.session.enabled"), - /** - * List of enabled node-level metrics. - * - *

Value type: {@link java.util.List List}<{@link String}> - */ - METRICS_NODE_ENABLED("advanced.metrics.node.enabled"), - /** - * The largest latency that we expect to record for requests. - * - *

Value-type: {@link java.time.Duration Duration} - */ - METRICS_SESSION_CQL_REQUESTS_HIGHEST("advanced.metrics.session.cql-requests.highest-latency"), - /** - * The number of significant decimal digits to which internal structures will maintain for - * requests. - * - *

Value-type: int - */ - METRICS_SESSION_CQL_REQUESTS_DIGITS("advanced.metrics.session.cql-requests.significant-digits"), - /** - * The interval at which percentile data is refreshed for requests. - * - *

Value-type: {@link java.time.Duration Duration} - */ - METRICS_SESSION_CQL_REQUESTS_INTERVAL("advanced.metrics.session.cql-requests.refresh-interval"), - /** - * The largest latency that we expect to record for throttling. - * - *

Value-type: {@link java.time.Duration Duration} - */ - METRICS_SESSION_THROTTLING_HIGHEST("advanced.metrics.session.throttling.delay.highest-latency"), - /** - * The number of significant decimal digits to which internal structures will maintain for - * throttling. - * - *

Value-type: int - */ - METRICS_SESSION_THROTTLING_DIGITS("advanced.metrics.session.throttling.delay.significant-digits"), - /** - * The interval at which percentile data is refreshed for throttling. - * - *

Value-type: {@link java.time.Duration Duration} - */ - METRICS_SESSION_THROTTLING_INTERVAL("advanced.metrics.session.throttling.delay.refresh-interval"), - /** - * The largest latency that we expect to record for requests. - * - *

Value-type: {@link java.time.Duration Duration} - */ - METRICS_NODE_CQL_MESSAGES_HIGHEST("advanced.metrics.node.cql-messages.highest-latency"), - /** - * The number of significant decimal digits to which internal structures will maintain for - * requests. - * - *

Value-type: int - */ - METRICS_NODE_CQL_MESSAGES_DIGITS("advanced.metrics.node.cql-messages.significant-digits"), - /** - * The interval at which percentile data is refreshed for requests. - * - *

Value-type: {@link java.time.Duration Duration} - */ - METRICS_NODE_CQL_MESSAGES_INTERVAL("advanced.metrics.node.cql-messages.refresh-interval"), - - /** - * Whether or not to disable the Nagle algorithm. - * - *

Value-type: boolean - */ - SOCKET_TCP_NODELAY("advanced.socket.tcp-no-delay"), - /** - * Whether or not to enable TCP keep-alive probes. - * - *

Value-type: boolean - */ - SOCKET_KEEP_ALIVE("advanced.socket.keep-alive"), - /** - * Whether or not to allow address reuse. - * - *

Value-type: boolean - */ - SOCKET_REUSE_ADDRESS("advanced.socket.reuse-address"), - /** - * Sets the linger interval. - * - *

Value-type: int - */ - SOCKET_LINGER_INTERVAL("advanced.socket.linger-interval"), - /** - * Sets a hint to the size of the underlying buffers for incoming network I/O. - * - *

Value-type: int - */ - SOCKET_RECEIVE_BUFFER_SIZE("advanced.socket.receive-buffer-size"), - /** - * Sets a hint to the size of the underlying buffers for outgoing network I/O. - * - *

Value-type: int - */ - SOCKET_SEND_BUFFER_SIZE("advanced.socket.send-buffer-size"), - - /** - * The connection heartbeat interval. - * - *

Value-type: {@link java.time.Duration Duration} - */ - HEARTBEAT_INTERVAL("advanced.heartbeat.interval"), - /** - * How long the driver waits for the response to a heartbeat. - * - *

Value-type: {@link java.time.Duration Duration} - */ - HEARTBEAT_TIMEOUT("advanced.heartbeat.timeout"), - - /** - * How long the driver waits to propagate a Topology event. - * - *

Value-type: {@link java.time.Duration Duration} - */ - METADATA_TOPOLOGY_WINDOW("advanced.metadata.topology-event-debouncer.window"), - /** - * The maximum number of events that can accumulate. - * - *

Value-type: int - */ - METADATA_TOPOLOGY_MAX_EVENTS("advanced.metadata.topology-event-debouncer.max-events"), - /** - * Whether schema metadata is enabled. - * - *

Value-type: boolean - */ - METADATA_SCHEMA_ENABLED("advanced.metadata.schema.enabled"), - /** - * The timeout for the requests to the schema tables. - * - *

Value-type: {@link java.time.Duration Duration} - */ - METADATA_SCHEMA_REQUEST_TIMEOUT("advanced.metadata.schema.request-timeout"), - /** - * The page size for the requests to the schema tables. - * - *

Value-type: int - */ - METADATA_SCHEMA_REQUEST_PAGE_SIZE("advanced.metadata.schema.request-page-size"), - /** - * The list of keyspaces for which schema and token metadata should be maintained. - * - *

Value type: {@link java.util.List List}<{@link String}> - */ - METADATA_SCHEMA_REFRESHED_KEYSPACES("advanced.metadata.schema.refreshed-keyspaces"), - /** - * How long the driver waits to apply a refresh. - * - *

Value-type: {@link java.time.Duration Duration} - */ - METADATA_SCHEMA_WINDOW("advanced.metadata.schema.debouncer.window"), - /** - * The maximum number of refreshes that can accumulate. - * - *

Value-type: int - */ - METADATA_SCHEMA_MAX_EVENTS("advanced.metadata.schema.debouncer.max-events"), - /** - * Whether token metadata is enabled. - * - *

Value-type: boolean - */ - METADATA_TOKEN_MAP_ENABLED("advanced.metadata.token-map.enabled"), - - /** - * How long the driver waits for responses to control queries. - * - *

Value-type: {@link java.time.Duration Duration} - */ - CONTROL_CONNECTION_TIMEOUT("advanced.control-connection.timeout"), - /** - * The interval between each schema agreement check attempt. - * - *

Value-type: {@link java.time.Duration Duration} - */ - CONTROL_CONNECTION_AGREEMENT_INTERVAL("advanced.control-connection.schema-agreement.interval"), - /** - * The timeout after which schema agreement fails. - * - *

Value-type: {@link java.time.Duration Duration} - */ - CONTROL_CONNECTION_AGREEMENT_TIMEOUT("advanced.control-connection.schema-agreement.timeout"), - /** - * Whether to log a warning if schema agreement fails. - * - *

Value-type: boolean - */ - CONTROL_CONNECTION_AGREEMENT_WARN("advanced.control-connection.schema-agreement.warn-on-failure"), - - /** - * Whether `Session.prepare` calls should be sent to all nodes in the cluster. - * - *

Value-type: boolean - */ - PREPARE_ON_ALL_NODES("advanced.prepared-statements.prepare-on-all-nodes"), - /** - * Whether the driver tries to prepare on new nodes at all. - * - *

Value-type: boolean - */ - REPREPARE_ENABLED("advanced.prepared-statements.reprepare-on-up.enabled"), - /** - * Whether to check `system.prepared_statements` on the target node before repreparing. - * - *

Value-type: boolean - */ - REPREPARE_CHECK_SYSTEM_TABLE("advanced.prepared-statements.reprepare-on-up.check-system-table"), - /** - * The maximum number of statements that should be reprepared. - * - *

Value-type: int - */ - REPREPARE_MAX_STATEMENTS("advanced.prepared-statements.reprepare-on-up.max-statements"), - /** - * The maximum number of concurrent requests when repreparing. - * - *

Value-type: int - */ - REPREPARE_MAX_PARALLELISM("advanced.prepared-statements.reprepare-on-up.max-parallelism"), - /** - * The request timeout when repreparing. - * - *

Value-type: {@link java.time.Duration Duration} - */ - REPREPARE_TIMEOUT("advanced.prepared-statements.reprepare-on-up.timeout"), - - /** - * The number of threads in the I/O group. - * - *

Value-type: int - */ - NETTY_IO_SIZE("advanced.netty.io-group.size"), - /** - * Quiet period for I/O group shutdown. - * - *

Value-type: int - */ - NETTY_IO_SHUTDOWN_QUIET_PERIOD("advanced.netty.io-group.shutdown.quiet-period"), - /** - * Max time to wait for I/O group shutdown. - * - *

Value-type: int - */ - NETTY_IO_SHUTDOWN_TIMEOUT("advanced.netty.io-group.shutdown.timeout"), - /** - * Units for I/O group quiet period and timeout. - * - *

Value-type: {@link String} - */ - NETTY_IO_SHUTDOWN_UNIT("advanced.netty.io-group.shutdown.unit"), - /** - * The number of threads in the Admin group. - * - *

Value-type: int - */ - NETTY_ADMIN_SIZE("advanced.netty.admin-group.size"), - /** - * Quiet period for admin group shutdown. - * - *

Value-type: int - */ - NETTY_ADMIN_SHUTDOWN_QUIET_PERIOD("advanced.netty.admin-group.shutdown.quiet-period"), - /** - * Max time to wait for admin group shutdown. - * - *

Value-type: {@link String} - */ - NETTY_ADMIN_SHUTDOWN_TIMEOUT("advanced.netty.admin-group.shutdown.timeout"), - /** - * Units for admin group quite period and timeout. - * - *

Value-type: {@link String} - */ - NETTY_ADMIN_SHUTDOWN_UNIT("advanced.netty.admin-group.shutdown.unit"), - - /** @deprecated This option was removed in version 4.6.1. */ - @Deprecated - COALESCER_MAX_RUNS("advanced.coalescer.max-runs-with-no-work"), - /** - * The coalescer reschedule interval. - * - *

Value-type: {@link java.time.Duration Duration} - */ - COALESCER_INTERVAL("advanced.coalescer.reschedule-interval"), - - /** - * Whether to resolve the addresses passed to `basic.contact-points`. - * - *

Value-type: boolean - */ - RESOLVE_CONTACT_POINTS("advanced.resolve-contact-points"), - - /** - * This is how frequent the timer should wake up to check for timed-out tasks or speculative - * executions. - * - *

Value-type: {@link java.time.Duration Duration} - */ - NETTY_TIMER_TICK_DURATION("advanced.netty.timer.tick-duration"), - /** - * Number of ticks in the Timer wheel. - * - *

Value-type: int - */ - NETTY_TIMER_TICKS_PER_WHEEL("advanced.netty.timer.ticks-per-wheel"), - - /** - * Whether logging of server warnings generated during query execution should be disabled by the - * driver. - * - *

Value-type: boolean - */ - REQUEST_LOG_WARNINGS("advanced.request.log-warnings"), - - /** - * Whether the threads created by the driver should be daemon threads. - * - *

Value-type: boolean - */ - NETTY_DAEMON("advanced.netty.daemon"), - - /** - * The location of the cloud secure bundle used to connect to DataStax Apache Cassandra as a - * service. - * - *

Value-type: {@link String} - */ - CLOUD_SECURE_CONNECT_BUNDLE("basic.cloud.secure-connect-bundle"), - - /** - * Whether the slow replica avoidance should be enabled in the default LBP. - * - *

Value-type: boolean - */ - LOAD_BALANCING_POLICY_SLOW_AVOIDANCE("basic.load-balancing-policy.slow-replica-avoidance"), - - /** - * The timeout to use when establishing driver connections. - * - *

Value-type: {@link java.time.Duration Duration} - */ - CONNECTION_CONNECT_TIMEOUT("advanced.connection.connect-timeout"), - - /** - * The maximum number of live sessions that are allowed to coexist in a given VM. - * - *

Value-type: int - */ - SESSION_LEAK_THRESHOLD("advanced.session-leak.threshold"), - /** - * The period of inactivity after which the node level metrics will be evicted. The eviction will - * happen only if none of the enabled node-level metrics is updated for a given node within this - * time window. - * - *

Value-type: {@link java.time.Duration Duration} - */ - METRICS_NODE_EXPIRE_AFTER("advanced.metrics.node.expire-after"), - - /** - * The classname of the desired MetricsFactory implementation. - * - *

Value-type: {@link String} - */ - METRICS_FACTORY_CLASS("advanced.metrics.factory.class"), - - /** - * The maximum number of nodes from remote DCs to include in query plans. - * - *

Value-Type: int - */ - LOAD_BALANCING_DC_FAILOVER_MAX_NODES_PER_REMOTE_DC( - "advanced.load-balancing-policy.dc-failover.max-nodes-per-remote-dc"), - /** - * Whether to consider nodes from remote DCs if the request's consistency level is local. - * - *

Value-Type: boolean - */ - LOAD_BALANCING_DC_FAILOVER_ALLOW_FOR_LOCAL_CONSISTENCY_LEVELS( - "advanced.load-balancing-policy.dc-failover.allow-for-local-consistency-levels"), - - /** - * The classname of the desired {@code MetricIdGenerator} implementation. - * - *

Value-type: {@link String} - */ - METRICS_ID_GENERATOR_CLASS("advanced.metrics.id-generator.class"), - - /** - * The value of the prefix to prepend to all metric names. - * - *

Value-type: {@link String} - */ - METRICS_ID_GENERATOR_PREFIX("advanced.metrics.id-generator.prefix"), - - /** - * The class name of a custom {@link - * com.datastax.oss.driver.api.core.loadbalancing.NodeDistanceEvaluator}. - * - *

Value-Type: {@link String} - */ - LOAD_BALANCING_DISTANCE_EVALUATOR_CLASS("basic.load-balancing-policy.evaluator.class"), - - /** - * The shortest latency that we expect to record for requests. - * - *

Value-type: {@link java.time.Duration Duration} - */ - METRICS_SESSION_CQL_REQUESTS_LOWEST("advanced.metrics.session.cql-requests.lowest-latency"), - /** - * Optional service-level objectives to meet, as a list of latencies to track. - * - *

Value-type: List of {@link java.time.Duration Duration} - */ - METRICS_SESSION_CQL_REQUESTS_SLO("advanced.metrics.session.cql-requests.slo"), - - /** - * The shortest latency that we expect to record for throttling. - * - *

Value-type: {@link java.time.Duration Duration} - */ - METRICS_SESSION_THROTTLING_LOWEST("advanced.metrics.session.throttling.delay.lowest-latency"), - /** - * Optional service-level objectives to meet, as a list of latencies to track. - * - *

Value-type: List of {@link java.time.Duration Duration} - */ - METRICS_SESSION_THROTTLING_SLO("advanced.metrics.session.throttling.delay.slo"), - - /** - * The shortest latency that we expect to record for requests. - * - *

Value-type: {@link java.time.Duration Duration} - */ - METRICS_NODE_CQL_MESSAGES_LOWEST("advanced.metrics.node.cql-messages.lowest-latency"), - /** - * Optional service-level objectives to meet, as a list of latencies to track. - * - *

Value-type: List of {@link java.time.Duration Duration} - */ - METRICS_NODE_CQL_MESSAGES_SLO("advanced.metrics.node.cql-messages.slo"), - - /** - * Whether the prepared statements cache use weak values. - * - *

Value-type: boolean - */ - PREPARED_CACHE_WEAK_VALUES("advanced.prepared-statements.prepared-cache.weak-values"), - - /** - * The classes of session-wide components that track the outcome of requests. - * - *

Value-type: List of {@link String} - */ - REQUEST_TRACKER_CLASSES("advanced.request-tracker.classes"), - - /** - * The classes of session-wide components that listen for node state changes. - * - *

Value-type: List of {@link String} - */ - METADATA_NODE_STATE_LISTENER_CLASSES("advanced.node-state-listener.classes"), - - /** - * The classes of session-wide components that listen for schema changes. - * - *

Value-type: List of {@link String} - */ - METADATA_SCHEMA_CHANGE_LISTENER_CLASSES("advanced.schema-change-listener.classes"), - /** - * Optional list of percentiles to publish for cql-requests metric. Produces an additional time - * series for each requested percentile. This percentile is computed locally, and so can't be - * aggregated with percentiles computed across other dimensions (e.g. in a different instance). - * - *

Value type: {@link java.util.List List}<{@link Double}> - */ - METRICS_SESSION_CQL_REQUESTS_PUBLISH_PERCENTILES( - "advanced.metrics.session.cql-requests.publish-percentiles"), - /** - * Optional list of percentiles to publish for node cql-messages metric. Produces an additional - * time series for each requested percentile. This percentile is computed locally, and so can't be - * aggregated with percentiles computed across other dimensions (e.g. in a different instance). - * - *

Value type: {@link java.util.List List}<{@link Double}> - */ - METRICS_NODE_CQL_MESSAGES_PUBLISH_PERCENTILES( - "advanced.metrics.node.cql-messages.publish-percentiles"), - /** - * Optional list of percentiles to publish for throttling delay metric.Produces an additional time - * series for each requested percentile. This percentile is computed locally, and so can't be - * aggregated with percentiles computed across other dimensions (e.g. in a different instance). - * - *

Value type: {@link java.util.List List}<{@link Double}> - */ - METRICS_SESSION_THROTTLING_PUBLISH_PERCENTILES( - "advanced.metrics.session.throttling.delay.publish-percentiles"), - /** - * Adds histogram buckets used to generate aggregable percentile approximations in monitoring - * systems that have query facilities to do so (e.g. Prometheus histogram_quantile, Atlas - * percentiles). - * - *

Value-type: boolean - */ - METRICS_GENERATE_AGGREGABLE_HISTOGRAMS("advanced.metrics.histograms.generate-aggregable"), - /** - * The duration between attempts to reload the keystore. - * - *

Value-type: {@link java.time.Duration} - */ - SSL_KEYSTORE_RELOAD_INTERVAL("advanced.ssl-engine-factory.keystore-reload-interval"), - /** - * Ordered preference list of remote dcs optionally supplied for automatic failover. - * - *

Value type: {@link java.util.List List}<{@link String}> - */ - LOAD_BALANCING_DC_FAILOVER_PREFERRED_REMOTE_DCS( - "advanced.load-balancing-policy.dc-failover.preferred-remote-dcs"), - /** - * Whether or not to do a DNS reverse-lookup of provided server addresses for SAN addresses. - * - *

Value-type: boolean - */ - SSL_ALLOW_DNS_REVERSE_LOOKUP_SAN("advanced.ssl-engine-factory.allow-dns-reverse-lookup-san"), - /** - * The class of session-wide component that generates request IDs. - * - *

Value-type: {@link String} - */ - REQUEST_ID_GENERATOR_CLASS("advanced.request-id.generator.class"), - /** - * An address to always translate all node addresses to that same proxy hostname no matter what IP - * address a node has, but still using its native transport port. - * - *

Value-Type: {@link String} - */ - ADDRESS_TRANSLATOR_ADVERTISED_HOSTNAME("advanced.address-translator.advertised-hostname"), - /** - * A map of Cassandra node subnets (CIDR notations) to target addresses, for example (note quoted - * keys): - * - *

-   * advanced.address-translator.subnet-addresses {
-   *   "100.64.0.0/15" = "cassandra.datacenter1.com:9042"
-   *   "100.66.0.0/15" = "cassandra.datacenter2.com:9042"
-   *   # IPv6 example:
-   *   # "::ffff:6440:0/111" = "cassandra.datacenter1.com:9042"
-   *   # "::ffff:6442:0/111" = "cassandra.datacenter2.com:9042"
-   * }
-   * 
- * - * Note: subnets must be represented as prefix blocks, see {@link - * inet.ipaddr.Address#isPrefixBlock()}. - * - *

Value type: {@link java.util.Map Map}<{@link String},{@link String}> - */ - ADDRESS_TRANSLATOR_SUBNET_ADDRESSES("advanced.address-translator.subnet-addresses"), - /** - * A default address to fallback to if Cassandra node IP isn't contained in any of the configured - * subnets. - * - *

Value-Type: {@link String} - */ - ADDRESS_TRANSLATOR_DEFAULT_ADDRESS("advanced.address-translator.default-address"), - /** - * Whether to resolve the addresses on initialization (if true) or on each node (re-)connection - * (if false). Defaults to false. - * - *

Value-Type: boolean - */ - ADDRESS_TRANSLATOR_RESOLVE_ADDRESSES("advanced.address-translator.resolve-addresses"); - - private final String path; - - DefaultDriverOption(String path) { - this.path = path; - } - - @NonNull - @Override - public String getPath() { - return path; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/config/DriverConfig.java b/core/src/main/java/com/datastax/oss/driver/api/core/config/DriverConfig.java deleted file mode 100644 index 88519c82a22..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/config/DriverConfig.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.config; - -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; - -/** - * The configuration of the driver. - * - *

It is composed of options, that are organized into profiles. There is a default profile that - * is always present, and additional, named profiles, that can override part of the options. - * Profiles can be used to categorize queries that use the same parameters (for example, an - * "analytics" profile vs. a "transactional" profile). - */ -public interface DriverConfig { - - /** - * Alias to get the default profile, which is stored under the name {@link - * DriverExecutionProfile#DEFAULT_NAME} and always present. - */ - @NonNull - default DriverExecutionProfile getDefaultProfile() { - return getProfile(DriverExecutionProfile.DEFAULT_NAME); - } - - /** @throws IllegalArgumentException if there is no profile with this name. */ - @NonNull - DriverExecutionProfile getProfile(@NonNull String profileName); - - /** Returns an immutable view of all named profiles (including the default profile). */ - @NonNull - Map getProfiles(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/config/DriverConfigLoader.java b/core/src/main/java/com/datastax/oss/driver/api/core/config/DriverConfigLoader.java deleted file mode 100644 index 15fae232d17..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/config/DriverConfigLoader.java +++ /dev/null @@ -1,386 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.config; - -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.session.SessionBuilder; -import com.datastax.oss.driver.internal.core.config.composite.CompositeDriverConfigLoader; -import com.datastax.oss.driver.internal.core.config.map.MapBasedDriverConfigLoader; -import com.datastax.oss.driver.internal.core.config.typesafe.DefaultDriverConfigLoader; -import com.datastax.oss.driver.internal.core.config.typesafe.DefaultProgrammaticDriverConfigLoaderBuilder; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.File; -import java.net.URL; -import java.nio.file.Path; -import java.util.concurrent.CompletionStage; - -/** - * Manages the initialization, and optionally the periodic reloading, of the driver configuration. - * - * @see SessionBuilder#withConfigLoader(DriverConfigLoader) - */ -public interface DriverConfigLoader extends AutoCloseable { - - /** - * Builds an instance using the driver's default implementation (based on Typesafe config) except - * that application-specific classpath resources will be located using the provided {@link - * ClassLoader} instead of {@linkplain Thread#getContextClassLoader() the current thread's context - * class loader}. - * - *

The returned loader will honor the reload interval defined by the option {@code - * basic.config-reload-interval}. - */ - @NonNull - static DriverConfigLoader fromDefaults(@NonNull ClassLoader appClassLoader) { - return new DefaultDriverConfigLoader(appClassLoader); - } - - /** - * Builds an instance using the driver's default implementation (based on Typesafe config), except - * that application-specific options are loaded from a classpath resource with a custom name. - * - *

The class loader used to locate application-specific classpath resources is {@linkplain - * Thread#getContextClassLoader() the current thread's context class loader}. This might not be - * suitable for OSGi deployments, which should use {@link #fromClasspath(String, ClassLoader)} - * instead. - * - *

More precisely, configuration properties are loaded and merged from the following - * (first-listed are higher priority): - * - *

    - *
  • system properties - *
  • {@code .conf} (all resources on classpath with this name) - *
  • {@code .json} (all resources on classpath with this name) - *
  • {@code .properties} (all resources on classpath with this name) - *
  • {@code reference.conf} (all resources on classpath with this name). In particular, this - * will load the {@code reference.conf} included in the core driver JAR, that defines - * default options for all mandatory options. - *
- * - * The resulting configuration is expected to contain a {@code datastax-java-driver} section. - * - *

The returned loader will honor the reload interval defined by the option {@code - * basic.config-reload-interval}. - */ - @NonNull - static DriverConfigLoader fromClasspath(@NonNull String resourceBaseName) { - return fromClasspath(resourceBaseName, Thread.currentThread().getContextClassLoader()); - } - - /** - * Just like {@link #fromClasspath(java.lang.String)} except that application-specific classpath - * resources will be located using the provided {@link ClassLoader} instead of {@linkplain - * Thread#getContextClassLoader() the current thread's context class loader}. - */ - @NonNull - static DriverConfigLoader fromClasspath( - @NonNull String resourceBaseName, @NonNull ClassLoader appClassLoader) { - return DefaultDriverConfigLoader.fromClasspath(resourceBaseName, appClassLoader); - } - - /** - * Builds an instance using the driver's default implementation (based on Typesafe config), except - * that application-specific options are loaded from the given path. - * - *

More precisely, configuration properties are loaded and merged from the following - * (first-listed are higher priority): - * - *

    - *
  • system properties - *
  • the contents of {@code file} - *
  • {@code reference.conf} (all resources on classpath with this name). In particular, this - * will load the {@code reference.conf} included in the core driver JAR, that defines - * default options for all mandatory options. - *
- * - * The resulting configuration is expected to contain a {@code datastax-java-driver} section. - * - *

The returned loader will honor the reload interval defined by the option {@code - * basic.config-reload-interval}. - */ - @NonNull - static DriverConfigLoader fromPath(@NonNull Path file) { - return fromFile(file.toFile()); - } - - /** - * Builds an instance using the driver's default implementation (based on Typesafe config), except - * that application-specific options are loaded from the given file. - * - *

More precisely, configuration properties are loaded and merged from the following - * (first-listed are higher priority): - * - *

    - *
  • system properties - *
  • the contents of {@code file} - *
  • {@code reference.conf} (all resources on classpath with this name). In particular, this - * will load the {@code reference.conf} included in the core driver JAR, that defines - * default options for all mandatory options. - *
- * - * The resulting configuration is expected to contain a {@code datastax-java-driver} section. - * - *

The returned loader will honor the reload interval defined by the option {@code - * basic.config-reload-interval}. - */ - @NonNull - static DriverConfigLoader fromFile(@NonNull File file) { - return DefaultDriverConfigLoader.fromFile(file); - } - - /** - * Builds an instance using the driver's default implementation (based on Typesafe config), except - * that application-specific options are loaded from the given URL. - * - *

More precisely, configuration properties are loaded and merged from the following - * (first-listed are higher priority): - * - *

    - *
  • system properties - *
  • the contents of {@code url} - *
  • {@code reference.conf} (all resources on classpath with this name). In particular, this - * will load the {@code reference.conf} included in the core driver JAR, that defines - * default options for all mandatory options. - *
- * - * The resulting configuration is expected to contain a {@code datastax-java-driver} section. - * - *

The returned loader will honor the reload interval defined by the option {@code - * basic.config-reload-interval}. - */ - @NonNull - static DriverConfigLoader fromUrl(@NonNull URL url) { - return DefaultDriverConfigLoader.fromUrl(url); - } - - /** - * Builds an instance using the driver's default implementation (based on Typesafe config), except - * that application-specific options are parsed from the given string. - * - *

The string must be in HOCON format and contain a {@code datastax-java-driver} section. - * Options must be separated by line breaks: - * - *

-   * DriverConfigLoader.fromString(
-   *         "datastax-java-driver.basic { session-name = my-app\nrequest.timeout = 1 millisecond }")
-   * 
- * - *

More precisely, configuration properties are loaded and merged from the following - * (first-listed are higher priority): - * - *

    - *
  • system properties - *
  • the config in {@code contents} - *
  • {@code reference.conf} (all resources on classpath with this name). In particular, this - * will load the {@code reference.conf} included in the core driver JAR, that defines - * default options for all mandatory options. - *
- * - *

This loader does not support runtime reloading. - */ - @NonNull - static DriverConfigLoader fromString(@NonNull String contents) { - return DefaultDriverConfigLoader.fromString(contents); - } - - /** - * Starts a builder that allows configuration options to be overridden programmatically. - * - *

Note that {@link #fromMap(OptionsMap)} provides an alternative approach for programmatic - * configuration, that might be more convenient if you wish to completely bypass Typesafe config. - * - *

For example: - * - *

{@code
-   * DriverConfigLoader loader =
-   *     DriverConfigLoader.programmaticBuilder()
-   *         .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(5))
-   *         .startProfile("slow")
-   *         .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(30))
-   *         .endProfile()
-   *         .build();
-   * }
- * - * produces the same overrides as: - * - *
-   * datastax-java-driver {
-   *   basic.request.timeout = 5 seconds
-   *   profiles {
-   *     slow {
-   *       basic.request.timeout = 30 seconds
-   *     }
-   *   }
-   * }
-   * 
- * - * The resulting loader still uses the driver's default implementation (based on Typesafe config), - * except that the programmatic configuration takes precedence. More precisely, configuration - * properties are loaded and merged from the following (first-listed are higher priority): - * - *
    - *
  • system properties - *
  • properties that were provided programmatically - *
  • {@code application.conf} (all resources on classpath with this name) - *
  • {@code application.json} (all resources on classpath with this name) - *
  • {@code application.properties} (all resources on classpath with this name) - *
  • {@code reference.conf} (all resources on classpath with this name). In particular, this - * will load the {@code reference.conf} included in the core driver JAR, that defines - * default options for all mandatory options. - *
- * - * Note that {@code application.*} is entirely optional, you may choose to only rely on the - * driver's built-in {@code reference.conf} and programmatic overrides. - * - *

The class loader used to locate application-specific classpath resources is {@linkplain - * Thread#getContextClassLoader() the current thread's context class loader}. This might not be - * suitable for OSGi deployments, which should use {@link #programmaticBuilder(ClassLoader)} - * instead. - * - *

The resulting configuration is expected to contain a {@code datastax-java-driver} section. - * - *

The loader will honor the reload interval defined by the option {@code - * basic.config-reload-interval}. - * - *

Note that the returned builder is not thread-safe. - * - * @see #fromMap(OptionsMap) - */ - @NonNull - static ProgrammaticDriverConfigLoaderBuilder programmaticBuilder() { - return new DefaultProgrammaticDriverConfigLoaderBuilder(); - } - - /** - * Just like {@link #programmaticBuilder()} except that application-specific classpath resources - * will be located using the provided {@link ClassLoader} instead of {@linkplain - * Thread#getContextClassLoader() the current thread's context class loader}. - */ - @NonNull - static ProgrammaticDriverConfigLoaderBuilder programmaticBuilder( - @NonNull ClassLoader appClassLoader) { - return new DefaultProgrammaticDriverConfigLoaderBuilder(appClassLoader); - } - - /** - * Builds an instance backed by an {@link OptionsMap}, which holds all options in memory. - * - *

This is the simplest implementation. It is intended for clients who wish to completely - * bypass Typesafe config, and instead manage the configuration programmatically. A typical - * example is a third-party tool that already has its own configuration file, and doesn't want to - * introduce a separate mechanism for driver options. - * - *

With this loader, the driver's built-in {@code reference.conf} file is ignored, the provided - * {@link OptionsMap} must explicitly provide all mandatory options. Note however that {@link - * OptionsMap#driverDefaults()} allows you to initialize an instance with the same default values - * as {@code reference.conf}. - * - *

-   * // This creates a configuration equivalent to the built-in reference.conf:
-   * OptionsMap map = OptionsMap.driverDefaults();
-   *
-   * // Customize an option:
-   * map.put(TypedDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(5));
-   *
-   * DriverConfigLoader loader = DriverConfigLoader.fromMap(map);
-   * CqlSession session = CqlSession.builder()
-   *     .withConfigLoader(loader)
-   *     .build();
-   * 
- * - *

If the {@link OptionsMap} is modified at runtime, this will be reflected immediately in the - * configuration, you don't need to call {@link #reload()}. Note however that, depending on the - * option, the driver might not react to a configuration change immediately, or ever (this is - * documented in {@code reference.conf}). - * - * @since 4.6.0 - */ - @NonNull - static DriverConfigLoader fromMap(@NonNull OptionsMap source) { - return new MapBasedDriverConfigLoader(source, source.asRawMap()); - } - - /** - * Composes two existing config loaders to form a new one. - * - *

When the driver reads an option, the "primary" config will be queried first. If the option - * is missing, then it will be looked up in the "fallback" config. - * - *

All execution profiles will be surfaced in the new config. If a profile is defined both in - * the primary and the fallback config, its options will be merged using the same precedence rules - * as described above. - * - *

The new config is reloadable if at least one of the input configs is. If you invoke {@link - * DriverConfigLoader#reload()} on the new loader, it will reload whatever is reloadable, or fail - * if nothing is. If the input loaders have periodic reloading built-in, each one will reload at - * its own pace, and the changes will be reflected in the new config. - */ - @NonNull - static DriverConfigLoader compose( - @NonNull DriverConfigLoader primaryConfigLoader, - @NonNull DriverConfigLoader fallbackConfigLoader) { - return new CompositeDriverConfigLoader(primaryConfigLoader, fallbackConfigLoader); - } - - /** - * Loads the first configuration that will be used to initialize the driver. - * - *

If this loader {@linkplain #supportsReloading() supports reloading}, this object should be - * mutable and reflect later changes when the configuration gets reloaded. - */ - @NonNull - DriverConfig getInitialConfig(); - - /** - * Called when the driver initializes. For loaders that periodically check for configuration - * updates, this is a good time to grab an internal executor and schedule a recurring task. - */ - void onDriverInit(@NonNull DriverContext context); - - /** - * Triggers an immediate reload attempt and returns a stage that completes once the attempt is - * finished, with a boolean indicating whether the configuration changed as a result of this - * reload. - * - *

If so, it's also guaranteed that internal driver components have been notified by that time; - * note however that some react to the notification asynchronously, so they may not have - * completely applied all resulting changes yet. - * - *

If this loader does not support programmatic reloading — which you can check by - * calling {@link #supportsReloading()} before this method — the returned stage should fail - * immediately with an {@link UnsupportedOperationException}. The default implementation of this - * interface does support programmatic reloading however, and never returns a failed stage. - */ - @NonNull - CompletionStage reload(); - - /** - * Whether this implementation supports programmatic reloading with the {@link #reload()} method. - * - *

The default implementation of this interface does support programmatic reloading and always - * returns true. - */ - boolean supportsReloading(); - - /** - * Called when the session closes. This is a good time to release any external resource, for - * example cancel a scheduled reloading task. - */ - @Override - void close(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/config/DriverExecutionProfile.java b/core/src/main/java/com/datastax/oss/driver/api/core/config/DriverExecutionProfile.java deleted file mode 100644 index 89c28f0f521..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/config/DriverExecutionProfile.java +++ /dev/null @@ -1,329 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.config; - -import com.datastax.oss.driver.internal.core.config.DerivedExecutionProfile; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.time.Duration; -import java.util.List; -import java.util.Map; -import java.util.SortedSet; - -/** - * A profile in the driver's configuration. - * - *

It is a collection of typed options. - * - *

Getters (such as {@link #getBoolean(DriverOption)}) are self-explanatory. - * - *

{@code withXxx} methods (such as {@link #withBoolean(DriverOption, boolean)}) create a - * "derived" profile, which is an on-the-fly copy of the profile with the new value (which - * might be a new option, or overwrite an existing one). If the original configuration is reloaded, - * all derived profiles get updated as well. For best performance, such derived profiles should be - * used sparingly; it is better to have built-in profiles for common scenarios. - * - * @see DriverConfig - */ -public interface DriverExecutionProfile extends OngoingConfigOptions { - - /** - * The name of the default profile (the string {@value}). - * - *

Named profiles can't use this name. If you try to declare such a profile, a runtime error - * will be thrown. - */ - String DEFAULT_NAME = "default"; - - /** - * The name of the profile in the configuration. - * - *

Derived profiles inherit the name of their parent. - */ - @NonNull - String getName(); - - boolean isDefined(@NonNull DriverOption option); - - boolean getBoolean(@NonNull DriverOption option); - - default boolean getBoolean(@NonNull DriverOption option, boolean defaultValue) { - return isDefined(option) ? getBoolean(option) : defaultValue; - } - - @NonNull - List getBooleanList(@NonNull DriverOption option); - - @Nullable - default List getBooleanList( - @NonNull DriverOption option, @Nullable List defaultValue) { - return isDefined(option) ? getBooleanList(option) : defaultValue; - } - - int getInt(@NonNull DriverOption option); - - default int getInt(@NonNull DriverOption option, int defaultValue) { - return isDefined(option) ? getInt(option) : defaultValue; - } - - @NonNull - List getIntList(@NonNull DriverOption option); - - @Nullable - default List getIntList( - @NonNull DriverOption option, @Nullable List defaultValue) { - return isDefined(option) ? getIntList(option) : defaultValue; - } - - long getLong(@NonNull DriverOption option); - - default long getLong(@NonNull DriverOption option, long defaultValue) { - return isDefined(option) ? getLong(option) : defaultValue; - } - - @NonNull - List getLongList(@NonNull DriverOption option); - - @Nullable - default List getLongList(@NonNull DriverOption option, @Nullable List defaultValue) { - return isDefined(option) ? getLongList(option) : defaultValue; - } - - double getDouble(@NonNull DriverOption option); - - default double getDouble(@NonNull DriverOption option, double defaultValue) { - return isDefined(option) ? getDouble(option) : defaultValue; - } - - @NonNull - List getDoubleList(@NonNull DriverOption option); - - @Nullable - default List getDoubleList( - @NonNull DriverOption option, @Nullable List defaultValue) { - return isDefined(option) ? getDoubleList(option) : defaultValue; - } - - @NonNull - String getString(@NonNull DriverOption option); - - @Nullable - default String getString(@NonNull DriverOption option, @Nullable String defaultValue) { - return isDefined(option) ? getString(option) : defaultValue; - } - - @NonNull - List getStringList(@NonNull DriverOption option); - - @Nullable - default List getStringList( - @NonNull DriverOption option, @Nullable List defaultValue) { - return isDefined(option) ? getStringList(option) : defaultValue; - } - - @NonNull - Map getStringMap(@NonNull DriverOption option); - - @Nullable - default Map getStringMap( - @NonNull DriverOption option, @Nullable Map defaultValue) { - return isDefined(option) ? getStringMap(option) : defaultValue; - } - - /** - * @return a size in bytes. This is separate from {@link #getLong(DriverOption)}, in case - * implementations want to allow users to provide sizes in a more human-readable way, for - * example "256 MB". - */ - long getBytes(@NonNull DriverOption option); - - default long getBytes(@NonNull DriverOption option, long defaultValue) { - return isDefined(option) ? getBytes(option) : defaultValue; - } - - /** @see #getBytes(DriverOption) */ - @NonNull - List getBytesList(DriverOption option); - - @Nullable - default List getBytesList(DriverOption option, @Nullable List defaultValue) { - return isDefined(option) ? getBytesList(option) : defaultValue; - } - - @NonNull - Duration getDuration(@NonNull DriverOption option); - - @Nullable - default Duration getDuration(@NonNull DriverOption option, @Nullable Duration defaultValue) { - return isDefined(option) ? getDuration(option) : defaultValue; - } - - @NonNull - List getDurationList(@NonNull DriverOption option); - - @Nullable - default List getDurationList( - @NonNull DriverOption option, @Nullable List defaultValue) { - return isDefined(option) ? getDurationList(option) : defaultValue; - } - - /** - * Returns a representation of all the child options under a given option. - * - *

This is used by the driver at initialization time, to compare profiles and determine if it - * must create per-profile policies. For example, if two profiles have the same options in the - * {@code basic.load-balancing-policy} section, they will share the same policy instance. But if - * their options differ, two separate instances will be created. - * - *

The runtime return type does not matter, as long as identical sections (same options with - * same values, regardless of order) compare as equal and have the same {@code hashCode()}. The - * default implementation builds a map based on the entries from {@link #entrySet()}, it should be - * good for most cases. - */ - @NonNull - default Object getComparisonKey(@NonNull DriverOption option) { - // This method is only used during driver initialization, performance is not crucial - String prefix = option.getPath(); - ImmutableMap.Builder childOptions = ImmutableMap.builder(); - for (Map.Entry entry : entrySet()) { - if (entry.getKey().startsWith(prefix)) { - childOptions.put(entry.getKey(), entry.getValue()); - } - } - return childOptions.build(); - } - - /** - * Enumerates all the entries in this profile, including those that were inherited from another - * profile. - * - *

The keys are raw strings that match {@link DriverOption#getPath()}. - * - *

The values are implementation-dependent. With the driver's default implementation, the - * possible types are {@code String}, {@code Number}, {@code Boolean}, {@code Map}, - * {@code List}, or {@code null}. - */ - @NonNull - SortedSet> entrySet(); - - @NonNull - @Override - default DriverExecutionProfile withBoolean(@NonNull DriverOption option, boolean value) { - return DerivedExecutionProfile.with(this, option, value); - } - - @NonNull - @Override - default DriverExecutionProfile withBooleanList( - @NonNull DriverOption option, @NonNull List value) { - return DerivedExecutionProfile.with(this, option, value); - } - - @NonNull - @Override - default DriverExecutionProfile withInt(@NonNull DriverOption option, int value) { - return DerivedExecutionProfile.with(this, option, value); - } - - @NonNull - @Override - default DriverExecutionProfile withIntList( - @NonNull DriverOption option, @NonNull List value) { - return DerivedExecutionProfile.with(this, option, value); - } - - @NonNull - @Override - default DriverExecutionProfile withLong(@NonNull DriverOption option, long value) { - return DerivedExecutionProfile.with(this, option, value); - } - - @NonNull - @Override - default DriverExecutionProfile withLongList( - @NonNull DriverOption option, @NonNull List value) { - return DerivedExecutionProfile.with(this, option, value); - } - - @NonNull - @Override - default DriverExecutionProfile withDouble(@NonNull DriverOption option, double value) { - return DerivedExecutionProfile.with(this, option, value); - } - - @NonNull - @Override - default DriverExecutionProfile withDoubleList( - @NonNull DriverOption option, @NonNull List value) { - return DerivedExecutionProfile.with(this, option, value); - } - - @NonNull - @Override - default DriverExecutionProfile withString(@NonNull DriverOption option, @NonNull String value) { - return DerivedExecutionProfile.with(this, option, value); - } - - @NonNull - @Override - default DriverExecutionProfile withStringList( - @NonNull DriverOption option, @NonNull List value) { - return DerivedExecutionProfile.with(this, option, value); - } - - @NonNull - @Override - default DriverExecutionProfile withStringMap( - @NonNull DriverOption option, @NonNull Map value) { - return DerivedExecutionProfile.with(this, option, value); - } - - @NonNull - @Override - default DriverExecutionProfile withBytes(@NonNull DriverOption option, long value) { - return DerivedExecutionProfile.with(this, option, value); - } - - @NonNull - @Override - default DriverExecutionProfile withBytesList( - @NonNull DriverOption option, @NonNull List value) { - return DerivedExecutionProfile.with(this, option, value); - } - - @NonNull - @Override - default DriverExecutionProfile withDuration( - @NonNull DriverOption option, @NonNull Duration value) { - return DerivedExecutionProfile.with(this, option, value); - } - - @NonNull - @Override - default DriverExecutionProfile withDurationList( - @NonNull DriverOption option, @NonNull List value) { - return DerivedExecutionProfile.with(this, option, value); - } - - @NonNull - @Override - default DriverExecutionProfile without(@NonNull DriverOption option) { - return DerivedExecutionProfile.without(this, option); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/config/DriverOption.java b/core/src/main/java/com/datastax/oss/driver/api/core/config/DriverOption.java deleted file mode 100644 index 2f15b701f36..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/config/DriverOption.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.config; - -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * Describes an option in the driver's configuration. - * - *

This is just a thin wrapper around the option's path, to make it easier to find where it is - * referenced in the code. We recommend using enums for implementations. - */ -public interface DriverOption { - - /** - * The option's path. Paths are hierarchical and each segment is separated by a dot, e.g. {@code - * metadata.schema.enabled}. - */ - @NonNull - String getPath(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/config/OngoingConfigOptions.java b/core/src/main/java/com/datastax/oss/driver/api/core/config/OngoingConfigOptions.java deleted file mode 100644 index 2c931bbfa91..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/config/OngoingConfigOptions.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.config; - -import edu.umd.cs.findbugs.annotations.NonNull; -import java.time.Duration; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; - -/** An object where config options can be set programmatically. */ -public interface OngoingConfigOptions> { - - @NonNull - SelfT withBoolean(@NonNull DriverOption option, boolean value); - - @NonNull - SelfT withBooleanList(@NonNull DriverOption option, @NonNull List value); - - @NonNull - SelfT withInt(@NonNull DriverOption option, int value); - - @NonNull - SelfT withIntList(@NonNull DriverOption option, @NonNull List value); - - @NonNull - SelfT withLong(@NonNull DriverOption option, long value); - - @NonNull - SelfT withLongList(@NonNull DriverOption option, @NonNull List value); - - @NonNull - SelfT withDouble(@NonNull DriverOption option, double value); - - @NonNull - SelfT withDoubleList(@NonNull DriverOption option, @NonNull List value); - - @NonNull - SelfT withString(@NonNull DriverOption option, @NonNull String value); - - /** - * Note that this is just a shortcut to call {@link #withString(DriverOption, String)} with {@code - * value.getName()}. - */ - @NonNull - default SelfT withClass(@NonNull DriverOption option, @NonNull Class value) { - return withString(option, value.getName()); - } - - /** - * Note that this is just a shortcut to call {@link #withStringList(DriverOption, List)} with - * class names obtained from {@link Class#getName()}. - */ - @NonNull - default SelfT withClassList(@NonNull DriverOption option, @NonNull List> values) { - return withStringList(option, values.stream().map(Class::getName).collect(Collectors.toList())); - } - - @NonNull - SelfT withStringList(@NonNull DriverOption option, @NonNull List value); - - @NonNull - SelfT withStringMap(@NonNull DriverOption option, @NonNull Map value); - - @NonNull - SelfT withBytes(@NonNull DriverOption option, long value); - - @NonNull - SelfT withBytesList(@NonNull DriverOption option, @NonNull List value); - - @NonNull - SelfT withDuration(@NonNull DriverOption option, @NonNull Duration value); - - @NonNull - SelfT withDurationList(@NonNull DriverOption option, @NonNull List value); - - @NonNull - SelfT without(@NonNull DriverOption option); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/config/OptionsMap.java b/core/src/main/java/com/datastax/oss/driver/api/core/config/OptionsMap.java deleted file mode 100644 index 98faf3e590c..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/config/OptionsMap.java +++ /dev/null @@ -1,403 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.config; - -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.io.InvalidObjectException; -import java.io.ObjectInputStream; -import java.io.Serializable; -import java.time.Duration; -import java.time.temporal.ChronoUnit; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.function.Consumer; -import net.jcip.annotations.Immutable; -import net.jcip.annotations.ThreadSafe; - -/** - * An in-memory repository of config options, for use with {@link - * DriverConfigLoader#fromMap(OptionsMap)}. - * - *

This class is intended for clients who wish to assemble the driver configuration in memory, - * instead of loading it from configuration files. Note that {@link #driverDefaults()} can be used - * to pre-initialize the map with the driver's built-in defaults. - * - *

It functions like a two-dimensional map indexed by execution profile and option. All methods - * have a profile-less variant that applies to the default profile, for example {@link #get(String, - * TypedDriverOption)} and {@link #get(TypedDriverOption)}. Options are represented by {@link - * TypedDriverOption}, which allows this class to enforce additional type-safety guarantees (an - * option can only be set to a value of its intended type). - * - *

This class is mutable and thread-safe. Live changes are reflected in real time to the driver - * session(s) that use this configuration. - * - * @since 4.6.0 - */ -@ThreadSafe -public class OptionsMap implements Serializable { - - private static final long serialVersionUID = 1; - - /** - * Creates a new instance that contains the driver's default configuration. - * - *

This will produce a configuration that is equivalent to the {@code reference.conf} file - * bundled with the driver (however, this method does not load any file, and doesn't require - * Typesafe config in the classpath). - */ - @NonNull - public static OptionsMap driverDefaults() { - OptionsMap source = new OptionsMap(); - fillWithDriverDefaults(source); - return source; - } - - private final ConcurrentHashMap> map; - - private final List> changeListeners = new CopyOnWriteArrayList<>(); - - public OptionsMap() { - this(new ConcurrentHashMap<>()); - } - - private OptionsMap(ConcurrentHashMap> map) { - this.map = map; - } - - /** - * Associates the specified value for the specified option, in the specified execution profile. - * - * @return the previous value associated with {@code option}, or {@code null} if the option was - * not defined. - */ - @Nullable - public ValueT put( - @NonNull String profile, @NonNull TypedDriverOption option, @NonNull ValueT value) { - Objects.requireNonNull(option, "option"); - Objects.requireNonNull(value, "value"); - Object previous = getProfileMap(profile).put(option.getRawOption(), value); - if (!value.equals(previous)) { - for (Consumer listener : changeListeners) { - listener.accept(this); - } - } - return cast(previous); - } - - /** - * Associates the specified value for the specified option, in the default execution profile. - * - * @return the previous value associated with {@code option}, or {@code null} if the option was - * not defined. - */ - @Nullable - public ValueT put(@NonNull TypedDriverOption option, @NonNull ValueT value) { - return put(DriverExecutionProfile.DEFAULT_NAME, option, value); - } - - /** - * Returns the value to which the specified option is mapped in the specified profile, or {@code - * null} if the option is not defined. - */ - @Nullable - public ValueT get(@NonNull String profile, @NonNull TypedDriverOption option) { - Objects.requireNonNull(option, "option"); - Object result = getProfileMap(profile).get(option.getRawOption()); - return cast(result); - } - - /** - * Returns the value to which the specified option is mapped in the default profile, or {@code - * null} if the option is not defined. - */ - @Nullable - public ValueT get(@NonNull TypedDriverOption option) { - return get(DriverExecutionProfile.DEFAULT_NAME, option); - } - - /** - * Removes the specified option from the specified profile. - * - * @return the previous value associated with {@code option}, or {@code null} if the option was - * not defined. - */ - @Nullable - public ValueT remove( - @NonNull String profile, @NonNull TypedDriverOption option) { - Objects.requireNonNull(option, "option"); - Object previous = getProfileMap(profile).remove(option.getRawOption()); - if (previous != null) { - for (Consumer listener : changeListeners) { - listener.accept(this); - } - } - return cast(previous); - } - - /** - * Removes the specified option from the default profile. - * - * @return the previous value associated with {@code option}, or {@code null} if the option was - * not defined. - */ - @Nullable - public ValueT remove(@NonNull TypedDriverOption option) { - return remove(DriverExecutionProfile.DEFAULT_NAME, option); - } - - /** - * Registers a listener that will get notified when this object changes. - * - *

This is mostly for internal use by the driver. Note that listeners are transient, and not - * taken into account by {@link #equals(Object)} and {@link #hashCode()}. - */ - public void addChangeListener(@NonNull Consumer listener) { - changeListeners.add(Objects.requireNonNull(listener)); - } - - /** - * Unregisters a listener that was previously registered with {@link - * #addChangeListener(Consumer)}. - * - * @return {@code true} if the listener was indeed registered for this object. - */ - public boolean removeChangeListener(@NonNull Consumer listener) { - return changeListeners.remove(Objects.requireNonNull(listener)); - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof OptionsMap) { - OptionsMap that = (OptionsMap) other; - return this.map.equals(that.map); - } else { - return false; - } - } - - @Override - public int hashCode() { - return map.hashCode(); - } - - /** - * Returns a live view of this object, using the driver's untyped {@link DriverOption}. - * - *

This is intended for internal usage by the driver. Modifying the resulting map is strongly - * discouraged, as it could break the type-safety guarantees provided by the public methods. - */ - @NonNull - protected Map> asRawMap() { - return map; - } - - @NonNull - private Map getProfileMap(@NonNull String profile) { - Objects.requireNonNull(profile, "profile"); - return map.computeIfAbsent(profile, p -> new ConcurrentHashMap<>()); - } - - // Isolate the suppressed warning for retrieval. The cast should always succeed unless the user - // messes with asMap() directly. - @SuppressWarnings({"unchecked", "TypeParameterUnusedInFormals"}) - @Nullable - private ValueT cast(@Nullable Object value) { - return (ValueT) value; - } - - /** - * This object gets replaced by an internal proxy for serialization. - * - * @serialData the serialized form of the {@code Map>} used to - * store options internally (listeners are transient). - */ - private Object writeReplace() { - return new SerializationProxy(this.map); - } - - // Should never be called since we serialize a proxy - @SuppressWarnings("UnusedVariable") - private void readObject(ObjectInputStream stream) throws InvalidObjectException { - throw new InvalidObjectException("Proxy required"); - } - - protected static void fillWithDriverDefaults(OptionsMap map) { - Duration initQueryTimeout = Duration.ofSeconds(5); - Duration requestTimeout = Duration.ofSeconds(2); - int requestPageSize = 5000; - int continuousMaxPages = 0; - int continuousMaxPagesPerSecond = 0; - int continuousMaxEnqueuedPages = 4; - - // Sorted by order of appearance in reference.conf: - - // Skip CONFIG_RELOAD_INTERVAL because the map-based config doesn't need periodic reloading - map.put(TypedDriverOption.REQUEST_TIMEOUT, requestTimeout); - map.put(TypedDriverOption.REQUEST_CONSISTENCY, "LOCAL_ONE"); - map.put(TypedDriverOption.REQUEST_PAGE_SIZE, requestPageSize); - map.put(TypedDriverOption.REQUEST_SERIAL_CONSISTENCY, "SERIAL"); - map.put(TypedDriverOption.REQUEST_DEFAULT_IDEMPOTENCE, false); - map.put(TypedDriverOption.GRAPH_TRAVERSAL_SOURCE, "g"); - map.put(TypedDriverOption.LOAD_BALANCING_POLICY_CLASS, "DefaultLoadBalancingPolicy"); - map.put(TypedDriverOption.LOAD_BALANCING_POLICY_SLOW_AVOIDANCE, true); - map.put(TypedDriverOption.SESSION_LEAK_THRESHOLD, 4); - map.put(TypedDriverOption.CONNECTION_CONNECT_TIMEOUT, Duration.ofSeconds(5)); - map.put(TypedDriverOption.CONNECTION_INIT_QUERY_TIMEOUT, initQueryTimeout); - map.put(TypedDriverOption.CONNECTION_SET_KEYSPACE_TIMEOUT, initQueryTimeout); - map.put(TypedDriverOption.CONNECTION_POOL_LOCAL_SIZE, 1); - map.put(TypedDriverOption.CONNECTION_POOL_REMOTE_SIZE, 1); - map.put(TypedDriverOption.CONNECTION_MAX_REQUESTS, 1024); - map.put(TypedDriverOption.CONNECTION_MAX_ORPHAN_REQUESTS, 256); - map.put(TypedDriverOption.CONNECTION_WARN_INIT_ERROR, true); - map.put(TypedDriverOption.RECONNECT_ON_INIT, false); - map.put(TypedDriverOption.RECONNECTION_POLICY_CLASS, "ExponentialReconnectionPolicy"); - map.put(TypedDriverOption.RECONNECTION_BASE_DELAY, Duration.ofSeconds(1)); - map.put(TypedDriverOption.RECONNECTION_MAX_DELAY, Duration.ofSeconds(60)); - map.put(TypedDriverOption.RETRY_POLICY_CLASS, "DefaultRetryPolicy"); - map.put(TypedDriverOption.SPECULATIVE_EXECUTION_POLICY_CLASS, "NoSpeculativeExecutionPolicy"); - map.put(TypedDriverOption.TIMESTAMP_GENERATOR_CLASS, "AtomicTimestampGenerator"); - map.put(TypedDriverOption.TIMESTAMP_GENERATOR_DRIFT_WARNING_THRESHOLD, Duration.ofSeconds(1)); - map.put(TypedDriverOption.TIMESTAMP_GENERATOR_DRIFT_WARNING_INTERVAL, Duration.ofSeconds(10)); - map.put(TypedDriverOption.TIMESTAMP_GENERATOR_FORCE_JAVA_CLOCK, false); - map.put(TypedDriverOption.REQUEST_THROTTLER_CLASS, "PassThroughRequestThrottler"); - map.put(TypedDriverOption.ADDRESS_TRANSLATOR_CLASS, "PassThroughAddressTranslator"); - map.put(TypedDriverOption.RESOLVE_CONTACT_POINTS, true); - map.put(TypedDriverOption.PROTOCOL_MAX_FRAME_LENGTH, 256L * 1024 * 1024); - map.put(TypedDriverOption.REQUEST_WARN_IF_SET_KEYSPACE, true); - map.put(TypedDriverOption.REQUEST_TRACE_ATTEMPTS, 5); - map.put(TypedDriverOption.REQUEST_TRACE_INTERVAL, Duration.ofMillis(3)); - map.put(TypedDriverOption.REQUEST_TRACE_CONSISTENCY, "ONE"); - map.put(TypedDriverOption.REQUEST_LOG_WARNINGS, true); - map.put(TypedDriverOption.GRAPH_PAGING_ENABLED, "AUTO"); - map.put(TypedDriverOption.GRAPH_CONTINUOUS_PAGING_PAGE_SIZE, requestPageSize); - map.put(TypedDriverOption.GRAPH_CONTINUOUS_PAGING_MAX_PAGES, continuousMaxPages); - map.put( - TypedDriverOption.GRAPH_CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND, - continuousMaxPagesPerSecond); - map.put( - TypedDriverOption.GRAPH_CONTINUOUS_PAGING_MAX_ENQUEUED_PAGES, continuousMaxEnqueuedPages); - map.put(TypedDriverOption.CONTINUOUS_PAGING_PAGE_SIZE, requestPageSize); - map.put(TypedDriverOption.CONTINUOUS_PAGING_PAGE_SIZE_BYTES, false); - map.put(TypedDriverOption.CONTINUOUS_PAGING_MAX_PAGES, continuousMaxPages); - map.put(TypedDriverOption.CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND, continuousMaxPagesPerSecond); - map.put(TypedDriverOption.CONTINUOUS_PAGING_MAX_ENQUEUED_PAGES, continuousMaxEnqueuedPages); - map.put(TypedDriverOption.CONTINUOUS_PAGING_TIMEOUT_FIRST_PAGE, Duration.ofSeconds(2)); - map.put(TypedDriverOption.CONTINUOUS_PAGING_TIMEOUT_OTHER_PAGES, Duration.ofSeconds(1)); - map.put(TypedDriverOption.MONITOR_REPORTING_ENABLED, true); - map.put(TypedDriverOption.METRICS_SESSION_ENABLED, Collections.emptyList()); - map.put(TypedDriverOption.METRICS_SESSION_CQL_REQUESTS_HIGHEST, Duration.ofSeconds(3)); - map.put(TypedDriverOption.METRICS_SESSION_CQL_REQUESTS_LOWEST, Duration.ofMillis(1)); - map.put(TypedDriverOption.METRICS_SESSION_CQL_REQUESTS_DIGITS, 3); - map.put(TypedDriverOption.METRICS_SESSION_CQL_REQUESTS_INTERVAL, Duration.ofMinutes(5)); - map.put(TypedDriverOption.METRICS_SESSION_THROTTLING_HIGHEST, Duration.ofSeconds(3)); - map.put(TypedDriverOption.METRICS_SESSION_THROTTLING_LOWEST, Duration.ofMillis(1)); - map.put(TypedDriverOption.METRICS_SESSION_THROTTLING_DIGITS, 3); - map.put(TypedDriverOption.METRICS_SESSION_THROTTLING_INTERVAL, Duration.ofMinutes(5)); - map.put( - TypedDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_HIGHEST, - Duration.ofMinutes(2)); - map.put( - TypedDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_LOWEST, - Duration.ofMillis(10)); - map.put(TypedDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_DIGITS, 3); - map.put( - TypedDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_INTERVAL, - Duration.ofMinutes(5)); - map.put(TypedDriverOption.METRICS_FACTORY_CLASS, "DefaultMetricsFactory"); - map.put(TypedDriverOption.METRICS_ID_GENERATOR_CLASS, "DefaultMetricIdGenerator"); - map.put(TypedDriverOption.METRICS_SESSION_GRAPH_REQUESTS_HIGHEST, Duration.ofSeconds(12)); - map.put(TypedDriverOption.METRICS_SESSION_GRAPH_REQUESTS_LOWEST, Duration.ofMillis(1)); - map.put(TypedDriverOption.METRICS_SESSION_GRAPH_REQUESTS_DIGITS, 3); - map.put(TypedDriverOption.METRICS_SESSION_GRAPH_REQUESTS_INTERVAL, Duration.ofMinutes(5)); - map.put(TypedDriverOption.METRICS_NODE_ENABLED, Collections.emptyList()); - map.put(TypedDriverOption.METRICS_NODE_CQL_MESSAGES_HIGHEST, Duration.ofSeconds(3)); - map.put(TypedDriverOption.METRICS_NODE_CQL_MESSAGES_LOWEST, Duration.ofMillis(1)); - map.put(TypedDriverOption.METRICS_NODE_CQL_MESSAGES_DIGITS, 3); - map.put(TypedDriverOption.METRICS_NODE_CQL_MESSAGES_INTERVAL, Duration.ofMinutes(5)); - map.put(TypedDriverOption.METRICS_NODE_GRAPH_MESSAGES_HIGHEST, Duration.ofSeconds(3)); - map.put(TypedDriverOption.METRICS_NODE_GRAPH_MESSAGES_LOWEST, Duration.ofMillis(1)); - map.put(TypedDriverOption.METRICS_NODE_GRAPH_MESSAGES_DIGITS, 3); - map.put(TypedDriverOption.METRICS_NODE_GRAPH_MESSAGES_INTERVAL, Duration.ofMinutes(5)); - map.put(TypedDriverOption.METRICS_NODE_EXPIRE_AFTER, Duration.ofHours(1)); - map.put(TypedDriverOption.SOCKET_TCP_NODELAY, true); - map.put(TypedDriverOption.HEARTBEAT_INTERVAL, Duration.ofSeconds(30)); - map.put(TypedDriverOption.HEARTBEAT_TIMEOUT, initQueryTimeout); - map.put(TypedDriverOption.METADATA_TOPOLOGY_WINDOW, Duration.ofSeconds(1)); - map.put(TypedDriverOption.METADATA_TOPOLOGY_MAX_EVENTS, 20); - map.put(TypedDriverOption.METADATA_SCHEMA_ENABLED, true); - map.put( - TypedDriverOption.METADATA_SCHEMA_REFRESHED_KEYSPACES, - ImmutableList.of("!system", "!/^system_.*/", "!/^dse_.*/", "!solr_admin", "!OpsCenter")); - map.put(TypedDriverOption.METADATA_SCHEMA_REQUEST_TIMEOUT, requestTimeout); - map.put(TypedDriverOption.METADATA_SCHEMA_REQUEST_PAGE_SIZE, requestPageSize); - map.put(TypedDriverOption.METADATA_SCHEMA_WINDOW, Duration.ofSeconds(1)); - map.put(TypedDriverOption.METADATA_SCHEMA_MAX_EVENTS, 20); - map.put(TypedDriverOption.METADATA_TOKEN_MAP_ENABLED, true); - map.put(TypedDriverOption.CONTROL_CONNECTION_TIMEOUT, initQueryTimeout); - map.put(TypedDriverOption.CONTROL_CONNECTION_AGREEMENT_INTERVAL, Duration.ofMillis(200)); - map.put(TypedDriverOption.CONTROL_CONNECTION_AGREEMENT_TIMEOUT, Duration.ofSeconds(10)); - map.put(TypedDriverOption.CONTROL_CONNECTION_AGREEMENT_WARN, true); - map.put(TypedDriverOption.PREPARE_ON_ALL_NODES, true); - map.put(TypedDriverOption.REPREPARE_ENABLED, true); - map.put(TypedDriverOption.REPREPARE_CHECK_SYSTEM_TABLE, false); - map.put(TypedDriverOption.REPREPARE_MAX_STATEMENTS, 0); - map.put(TypedDriverOption.REPREPARE_MAX_PARALLELISM, 100); - map.put(TypedDriverOption.REPREPARE_TIMEOUT, initQueryTimeout); - map.put(TypedDriverOption.NETTY_DAEMON, false); - map.put(TypedDriverOption.NETTY_IO_SIZE, 0); - map.put(TypedDriverOption.NETTY_IO_SHUTDOWN_QUIET_PERIOD, 2); - map.put(TypedDriverOption.NETTY_IO_SHUTDOWN_TIMEOUT, 15); - map.put(TypedDriverOption.NETTY_IO_SHUTDOWN_UNIT, "SECONDS"); - map.put(TypedDriverOption.NETTY_ADMIN_SIZE, 2); - map.put(TypedDriverOption.NETTY_ADMIN_SHUTDOWN_QUIET_PERIOD, 2); - map.put(TypedDriverOption.NETTY_ADMIN_SHUTDOWN_TIMEOUT, 15); - map.put(TypedDriverOption.NETTY_ADMIN_SHUTDOWN_UNIT, "SECONDS"); - map.put(TypedDriverOption.NETTY_TIMER_TICK_DURATION, Duration.ofMillis(100)); - map.put(TypedDriverOption.NETTY_TIMER_TICKS_PER_WHEEL, 2048); - map.put(TypedDriverOption.COALESCER_INTERVAL, Duration.of(10, ChronoUnit.MICROS)); - map.put(TypedDriverOption.LOAD_BALANCING_DC_FAILOVER_MAX_NODES_PER_REMOTE_DC, 0); - map.put(TypedDriverOption.LOAD_BALANCING_DC_FAILOVER_ALLOW_FOR_LOCAL_CONSISTENCY_LEVELS, false); - map.put(TypedDriverOption.METRICS_GENERATE_AGGREGABLE_HISTOGRAMS, true); - map.put( - TypedDriverOption.LOAD_BALANCING_DC_FAILOVER_PREFERRED_REMOTE_DCS, ImmutableList.of("")); - } - - @Immutable - private static class SerializationProxy implements Serializable { - - private static final long serialVersionUID = 1L; - - private final ConcurrentHashMap> map; - - private SerializationProxy(ConcurrentHashMap> map) { - this.map = map; - } - - private Object readResolve() { - return new OptionsMap(map); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/config/ProgrammaticDriverConfigLoaderBuilder.java b/core/src/main/java/com/datastax/oss/driver/api/core/config/ProgrammaticDriverConfigLoaderBuilder.java deleted file mode 100644 index c3ae1d1bf5b..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/config/ProgrammaticDriverConfigLoaderBuilder.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.config; - -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * A builder that allows the creation of a config loader where options are overridden - * programmatically. - * - * @see DriverConfigLoader#programmaticBuilder() - */ -public interface ProgrammaticDriverConfigLoaderBuilder - extends OngoingConfigOptions { - - /** - * Starts the definition of a new profile. - * - *

All options set after this call, and before the next call to this method or {@link - * #endProfile()}, will apply to the given profile. - */ - @NonNull - ProgrammaticDriverConfigLoaderBuilder startProfile(@NonNull String profileName); - - /** - * Ends the definition of a profile. - * - *

All options set after this call, and before the next call to {@link #startProfile(String)}, - * will apply to the default profile. - */ - @NonNull - ProgrammaticDriverConfigLoaderBuilder endProfile(); - - @NonNull - DriverConfigLoader build(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/config/TypedDriverOption.java b/core/src/main/java/com/datastax/oss/driver/api/core/config/TypedDriverOption.java deleted file mode 100644 index 182753300e7..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/config/TypedDriverOption.java +++ /dev/null @@ -1,944 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.config; - -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.lang.reflect.Field; -import java.lang.reflect.Modifier; -import java.time.Duration; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.StringJoiner; - -/** - * A type-safe wrapper around {@link DriverOption}, that encodes the intended value type of each - * option. - * - *

This type was introduced in conjunction with {@link DriverConfigLoader#fromMap(OptionsMap)}. - * Unfortunately, for backward compatibility reasons, it wasn't possible to retrofit the rest of the - * driver to use it; therefore the APIs used to read the configuration, such as {@link DriverConfig} - * and {@link DriverExecutionProfile}, still use the untyped {@link DriverOption}. - * - * @since 4.6.0 - */ -public class TypedDriverOption { - - private static volatile Iterable> builtInValues; - - /** - * Returns the list of all built-in options known to the driver codebase; in other words, all the - * {@link TypedDriverOption} constants defined on this class. - * - *

Note that 3rd-party driver extensions might define their own {@link TypedDriverOption} - * constants for custom options. - * - *

This method uses reflection to introspect all the constants on this class; the result is - * computed lazily on the first invocation, and then cached for future calls. - */ - public static Iterable> builtInValues() { - if (builtInValues == null) { - builtInValues = introspectBuiltInValues(); - } - return builtInValues; - } - - private final DriverOption rawOption; - private final GenericType expectedType; - - public TypedDriverOption( - @NonNull DriverOption rawOption, @NonNull GenericType expectedType) { - this.rawOption = Objects.requireNonNull(rawOption); - this.expectedType = Objects.requireNonNull(expectedType); - } - - @NonNull - public DriverOption getRawOption() { - return rawOption; - } - - @NonNull - public GenericType getExpectedType() { - return expectedType; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof TypedDriverOption) { - TypedDriverOption that = (TypedDriverOption) other; - return this.rawOption.equals(that.rawOption) && this.expectedType.equals(that.expectedType); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(rawOption, expectedType); - } - - @Override - public String toString() { - return new StringJoiner(", ", TypedDriverOption.class.getSimpleName() + "[", "]") - .add("rawOption=" + rawOption) - .add("expectedType=" + expectedType) - .toString(); - } - - /** The contact points to use for the initial connection to the cluster. */ - public static final TypedDriverOption> CONTACT_POINTS = - new TypedDriverOption<>(DefaultDriverOption.CONTACT_POINTS, GenericType.listOf(String.class)); - /** A name that uniquely identifies the driver instance. */ - public static final TypedDriverOption SESSION_NAME = - new TypedDriverOption<>(DefaultDriverOption.SESSION_NAME, GenericType.STRING); - /** The name of the keyspace that the session should initially be connected to. */ - public static final TypedDriverOption SESSION_KEYSPACE = - new TypedDriverOption<>(DefaultDriverOption.SESSION_KEYSPACE, GenericType.STRING); - /** How often the driver tries to reload the configuration. */ - public static final TypedDriverOption CONFIG_RELOAD_INTERVAL = - new TypedDriverOption<>(DefaultDriverOption.CONFIG_RELOAD_INTERVAL, GenericType.DURATION); - /** How long the driver waits for a request to complete. */ - public static final TypedDriverOption REQUEST_TIMEOUT = - new TypedDriverOption<>(DefaultDriverOption.REQUEST_TIMEOUT, GenericType.DURATION); - /** The consistency level. */ - public static final TypedDriverOption REQUEST_CONSISTENCY = - new TypedDriverOption<>(DefaultDriverOption.REQUEST_CONSISTENCY, GenericType.STRING); - /** The page size. */ - public static final TypedDriverOption REQUEST_PAGE_SIZE = - new TypedDriverOption<>(DefaultDriverOption.REQUEST_PAGE_SIZE, GenericType.INTEGER); - /** The serial consistency level. */ - public static final TypedDriverOption REQUEST_SERIAL_CONSISTENCY = - new TypedDriverOption<>(DefaultDriverOption.REQUEST_SERIAL_CONSISTENCY, GenericType.STRING); - /** The default idempotence of a request. */ - public static final TypedDriverOption REQUEST_DEFAULT_IDEMPOTENCE = - new TypedDriverOption<>(DefaultDriverOption.REQUEST_DEFAULT_IDEMPOTENCE, GenericType.BOOLEAN); - /** The class of the load balancing policy. */ - public static final TypedDriverOption LOAD_BALANCING_POLICY_CLASS = - new TypedDriverOption<>(DefaultDriverOption.LOAD_BALANCING_POLICY_CLASS, GenericType.STRING); - /** The datacenter that is considered "local". */ - public static final TypedDriverOption LOAD_BALANCING_LOCAL_DATACENTER = - new TypedDriverOption<>( - DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER, GenericType.STRING); - /** - * A custom filter to include/exclude nodes. - * - * @deprecated Use {@link #LOAD_BALANCING_DISTANCE_EVALUATOR_CLASS} instead. - */ - @Deprecated - public static final TypedDriverOption LOAD_BALANCING_FILTER_CLASS = - new TypedDriverOption<>(DefaultDriverOption.LOAD_BALANCING_FILTER_CLASS, GenericType.STRING); - /** - * The class name of a custom {@link - * com.datastax.oss.driver.api.core.loadbalancing.NodeDistanceEvaluator}. - */ - public static final TypedDriverOption LOAD_BALANCING_DISTANCE_EVALUATOR_CLASS = - new TypedDriverOption<>( - DefaultDriverOption.LOAD_BALANCING_DISTANCE_EVALUATOR_CLASS, GenericType.STRING); - /** The timeout to use for internal queries that run as part of the initialization process. */ - public static final TypedDriverOption CONNECTION_INIT_QUERY_TIMEOUT = - new TypedDriverOption<>( - DefaultDriverOption.CONNECTION_INIT_QUERY_TIMEOUT, GenericType.DURATION); - /** The timeout to use when the driver changes the keyspace on a connection at runtime. */ - public static final TypedDriverOption CONNECTION_SET_KEYSPACE_TIMEOUT = - new TypedDriverOption<>( - DefaultDriverOption.CONNECTION_SET_KEYSPACE_TIMEOUT, GenericType.DURATION); - /** The maximum number of requests that can be executed concurrently on a connection. */ - public static final TypedDriverOption CONNECTION_MAX_REQUESTS = - new TypedDriverOption<>(DefaultDriverOption.CONNECTION_MAX_REQUESTS, GenericType.INTEGER); - /** The maximum number of "orphaned" requests before a connection gets closed automatically. */ - public static final TypedDriverOption CONNECTION_MAX_ORPHAN_REQUESTS = - new TypedDriverOption<>( - DefaultDriverOption.CONNECTION_MAX_ORPHAN_REQUESTS, GenericType.INTEGER); - /** Whether to log non-fatal errors when the driver tries to open a new connection. */ - public static final TypedDriverOption CONNECTION_WARN_INIT_ERROR = - new TypedDriverOption<>(DefaultDriverOption.CONNECTION_WARN_INIT_ERROR, GenericType.BOOLEAN); - /** The number of connections in the LOCAL pool. */ - public static final TypedDriverOption CONNECTION_POOL_LOCAL_SIZE = - new TypedDriverOption<>(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE, GenericType.INTEGER); - /** The number of connections in the REMOTE pool. */ - public static final TypedDriverOption CONNECTION_POOL_REMOTE_SIZE = - new TypedDriverOption<>(DefaultDriverOption.CONNECTION_POOL_REMOTE_SIZE, GenericType.INTEGER); - /** - * Whether to schedule reconnection attempts if all contact points are unreachable on the first - * initialization attempt. - */ - public static final TypedDriverOption RECONNECT_ON_INIT = - new TypedDriverOption<>(DefaultDriverOption.RECONNECT_ON_INIT, GenericType.BOOLEAN); - /** The class of the reconnection policy. */ - public static final TypedDriverOption RECONNECTION_POLICY_CLASS = - new TypedDriverOption<>(DefaultDriverOption.RECONNECTION_POLICY_CLASS, GenericType.STRING); - /** Base delay for computing time between reconnection attempts. */ - public static final TypedDriverOption RECONNECTION_BASE_DELAY = - new TypedDriverOption<>(DefaultDriverOption.RECONNECTION_BASE_DELAY, GenericType.DURATION); - /** Maximum delay between reconnection attempts. */ - public static final TypedDriverOption RECONNECTION_MAX_DELAY = - new TypedDriverOption<>(DefaultDriverOption.RECONNECTION_MAX_DELAY, GenericType.DURATION); - /** The class of the retry policy. */ - public static final TypedDriverOption RETRY_POLICY_CLASS = - new TypedDriverOption<>(DefaultDriverOption.RETRY_POLICY_CLASS, GenericType.STRING); - /** The class of the speculative execution policy. */ - public static final TypedDriverOption SPECULATIVE_EXECUTION_POLICY_CLASS = - new TypedDriverOption<>( - DefaultDriverOption.SPECULATIVE_EXECUTION_POLICY_CLASS, GenericType.STRING); - /** The maximum number of executions. */ - public static final TypedDriverOption SPECULATIVE_EXECUTION_MAX = - new TypedDriverOption<>(DefaultDriverOption.SPECULATIVE_EXECUTION_MAX, GenericType.INTEGER); - /** The delay between each execution. */ - public static final TypedDriverOption SPECULATIVE_EXECUTION_DELAY = - new TypedDriverOption<>( - DefaultDriverOption.SPECULATIVE_EXECUTION_DELAY, GenericType.DURATION); - /** The class of the authentication provider. */ - public static final TypedDriverOption AUTH_PROVIDER_CLASS = - new TypedDriverOption<>(DefaultDriverOption.AUTH_PROVIDER_CLASS, GenericType.STRING); - /** Plain text auth provider username. */ - public static final TypedDriverOption AUTH_PROVIDER_USER_NAME = - new TypedDriverOption<>(DefaultDriverOption.AUTH_PROVIDER_USER_NAME, GenericType.STRING); - /** Plain text auth provider password. */ - public static final TypedDriverOption AUTH_PROVIDER_PASSWORD = - new TypedDriverOption<>(DefaultDriverOption.AUTH_PROVIDER_PASSWORD, GenericType.STRING); - /** The class of the SSL Engine Factory. */ - public static final TypedDriverOption SSL_ENGINE_FACTORY_CLASS = - new TypedDriverOption<>(DefaultDriverOption.SSL_ENGINE_FACTORY_CLASS, GenericType.STRING); - /** The cipher suites to enable when creating an SSLEngine for a connection. */ - public static final TypedDriverOption> SSL_CIPHER_SUITES = - new TypedDriverOption<>( - DefaultDriverOption.SSL_CIPHER_SUITES, GenericType.listOf(String.class)); - /** - * Whether or not to require validation that the hostname of the server certificate's common name - * matches the hostname of the server being connected to. - */ - public static final TypedDriverOption SSL_HOSTNAME_VALIDATION = - new TypedDriverOption<>(DefaultDriverOption.SSL_HOSTNAME_VALIDATION, GenericType.BOOLEAN); - - public static final TypedDriverOption SSL_ALLOW_DNS_REVERSE_LOOKUP_SAN = - new TypedDriverOption<>( - DefaultDriverOption.SSL_ALLOW_DNS_REVERSE_LOOKUP_SAN, GenericType.BOOLEAN); - /** The location of the keystore file. */ - public static final TypedDriverOption SSL_KEYSTORE_PATH = - new TypedDriverOption<>(DefaultDriverOption.SSL_KEYSTORE_PATH, GenericType.STRING); - /** The keystore password. */ - public static final TypedDriverOption SSL_KEYSTORE_PASSWORD = - new TypedDriverOption<>(DefaultDriverOption.SSL_KEYSTORE_PASSWORD, GenericType.STRING); - - /** The duration between attempts to reload the keystore. */ - public static final TypedDriverOption SSL_KEYSTORE_RELOAD_INTERVAL = - new TypedDriverOption<>( - DefaultDriverOption.SSL_KEYSTORE_RELOAD_INTERVAL, GenericType.DURATION); - - /** The location of the truststore file. */ - public static final TypedDriverOption SSL_TRUSTSTORE_PATH = - new TypedDriverOption<>(DefaultDriverOption.SSL_TRUSTSTORE_PATH, GenericType.STRING); - /** The truststore password. */ - public static final TypedDriverOption SSL_TRUSTSTORE_PASSWORD = - new TypedDriverOption<>(DefaultDriverOption.SSL_TRUSTSTORE_PASSWORD, GenericType.STRING); - /** The class of the generator that assigns a microsecond timestamp to each request. */ - public static final TypedDriverOption TIMESTAMP_GENERATOR_CLASS = - new TypedDriverOption<>(DefaultDriverOption.TIMESTAMP_GENERATOR_CLASS, GenericType.STRING); - /** Whether to force the driver to use Java's millisecond-precision system clock. */ - public static final TypedDriverOption TIMESTAMP_GENERATOR_FORCE_JAVA_CLOCK = - new TypedDriverOption<>( - DefaultDriverOption.TIMESTAMP_GENERATOR_FORCE_JAVA_CLOCK, GenericType.BOOLEAN); - /** How far in the future timestamps are allowed to drift before the warning is logged. */ - public static final TypedDriverOption TIMESTAMP_GENERATOR_DRIFT_WARNING_THRESHOLD = - new TypedDriverOption<>( - DefaultDriverOption.TIMESTAMP_GENERATOR_DRIFT_WARNING_THRESHOLD, GenericType.DURATION); - /** How often the warning will be logged if timestamps keep drifting above the threshold. */ - public static final TypedDriverOption TIMESTAMP_GENERATOR_DRIFT_WARNING_INTERVAL = - new TypedDriverOption<>( - DefaultDriverOption.TIMESTAMP_GENERATOR_DRIFT_WARNING_INTERVAL, GenericType.DURATION); - - /** - * The class of a session-wide component that tracks the outcome of requests. - * - * @deprecated Use {@link #REQUEST_TRACKER_CLASSES} instead. - */ - @Deprecated - public static final TypedDriverOption REQUEST_TRACKER_CLASS = - new TypedDriverOption<>(DefaultDriverOption.REQUEST_TRACKER_CLASS, GenericType.STRING); - - /** The classes of session-wide components that track the outcome of requests. */ - public static final TypedDriverOption> REQUEST_TRACKER_CLASSES = - new TypedDriverOption<>( - DefaultDriverOption.REQUEST_TRACKER_CLASSES, GenericType.listOf(String.class)); - - /** The class of a session-wide component that generates request IDs. */ - public static final TypedDriverOption REQUEST_ID_GENERATOR_CLASS = - new TypedDriverOption<>(DefaultDriverOption.REQUEST_ID_GENERATOR_CLASS, GenericType.STRING); - - /** Whether to log successful requests. */ - public static final TypedDriverOption REQUEST_LOGGER_SUCCESS_ENABLED = - new TypedDriverOption<>( - DefaultDriverOption.REQUEST_LOGGER_SUCCESS_ENABLED, GenericType.BOOLEAN); - /** The threshold to classify a successful request as "slow". */ - public static final TypedDriverOption REQUEST_LOGGER_SLOW_THRESHOLD = - new TypedDriverOption<>( - DefaultDriverOption.REQUEST_LOGGER_SLOW_THRESHOLD, GenericType.DURATION); - /** Whether to log slow requests. */ - public static final TypedDriverOption REQUEST_LOGGER_SLOW_ENABLED = - new TypedDriverOption<>(DefaultDriverOption.REQUEST_LOGGER_SLOW_ENABLED, GenericType.BOOLEAN); - /** Whether to log failed requests. */ - public static final TypedDriverOption REQUEST_LOGGER_ERROR_ENABLED = - new TypedDriverOption<>( - DefaultDriverOption.REQUEST_LOGGER_ERROR_ENABLED, GenericType.BOOLEAN); - /** The maximum length of the query string in the log message. */ - public static final TypedDriverOption REQUEST_LOGGER_MAX_QUERY_LENGTH = - new TypedDriverOption<>( - DefaultDriverOption.REQUEST_LOGGER_MAX_QUERY_LENGTH, GenericType.INTEGER); - /** Whether to log bound values in addition to the query string. */ - public static final TypedDriverOption REQUEST_LOGGER_VALUES = - new TypedDriverOption<>(DefaultDriverOption.REQUEST_LOGGER_VALUES, GenericType.BOOLEAN); - /** The maximum length for bound values in the log message. */ - public static final TypedDriverOption REQUEST_LOGGER_MAX_VALUE_LENGTH = - new TypedDriverOption<>( - DefaultDriverOption.REQUEST_LOGGER_MAX_VALUE_LENGTH, GenericType.INTEGER); - /** The maximum number of bound values to log. */ - public static final TypedDriverOption REQUEST_LOGGER_MAX_VALUES = - new TypedDriverOption<>(DefaultDriverOption.REQUEST_LOGGER_MAX_VALUES, GenericType.INTEGER); - /** Whether to log stack traces for failed queries. */ - public static final TypedDriverOption REQUEST_LOGGER_STACK_TRACES = - new TypedDriverOption<>(DefaultDriverOption.REQUEST_LOGGER_STACK_TRACES, GenericType.BOOLEAN); - /** - * The class of a session-wide component that controls the rate at which requests are executed. - */ - public static final TypedDriverOption REQUEST_THROTTLER_CLASS = - new TypedDriverOption<>(DefaultDriverOption.REQUEST_THROTTLER_CLASS, GenericType.STRING); - /** The maximum number of requests that are allowed to execute in parallel. */ - public static final TypedDriverOption REQUEST_THROTTLER_MAX_CONCURRENT_REQUESTS = - new TypedDriverOption<>( - DefaultDriverOption.REQUEST_THROTTLER_MAX_CONCURRENT_REQUESTS, GenericType.INTEGER); - /** The maximum allowed request rate. */ - public static final TypedDriverOption REQUEST_THROTTLER_MAX_REQUESTS_PER_SECOND = - new TypedDriverOption<>( - DefaultDriverOption.REQUEST_THROTTLER_MAX_REQUESTS_PER_SECOND, GenericType.INTEGER); - /** - * The maximum number of requests that can be enqueued when the throttling threshold is exceeded. - */ - public static final TypedDriverOption REQUEST_THROTTLER_MAX_QUEUE_SIZE = - new TypedDriverOption<>( - DefaultDriverOption.REQUEST_THROTTLER_MAX_QUEUE_SIZE, GenericType.INTEGER); - /** How often the throttler attempts to dequeue requests. */ - public static final TypedDriverOption REQUEST_THROTTLER_DRAIN_INTERVAL = - new TypedDriverOption<>( - DefaultDriverOption.REQUEST_THROTTLER_DRAIN_INTERVAL, GenericType.DURATION); - - /** - * The class of a session-wide component that listens for node state changes. - * - * @deprecated Use {@link #METADATA_NODE_STATE_LISTENER_CLASSES} instead. - */ - @Deprecated - public static final TypedDriverOption METADATA_NODE_STATE_LISTENER_CLASS = - new TypedDriverOption<>( - DefaultDriverOption.METADATA_NODE_STATE_LISTENER_CLASS, GenericType.STRING); - - /** - * The class of a session-wide component that listens for schema changes. - * - * @deprecated Use {@link #METADATA_SCHEMA_CHANGE_LISTENER_CLASSES} instead. - */ - @Deprecated - public static final TypedDriverOption METADATA_SCHEMA_CHANGE_LISTENER_CLASS = - new TypedDriverOption<>( - DefaultDriverOption.METADATA_SCHEMA_CHANGE_LISTENER_CLASS, GenericType.STRING); - - /** The classes of session-wide components that listen for node state changes. */ - public static final TypedDriverOption> METADATA_NODE_STATE_LISTENER_CLASSES = - new TypedDriverOption<>( - DefaultDriverOption.METADATA_NODE_STATE_LISTENER_CLASSES, - GenericType.listOf(String.class)); - - /** The classes of session-wide components that listen for schema changes. */ - public static final TypedDriverOption> METADATA_SCHEMA_CHANGE_LISTENER_CLASSES = - new TypedDriverOption<>( - DefaultDriverOption.METADATA_SCHEMA_CHANGE_LISTENER_CLASSES, - GenericType.listOf(String.class)); - - /** - * The class of the address translator to use to convert the addresses sent by Cassandra nodes - * into ones that the driver uses to connect. - */ - public static final TypedDriverOption ADDRESS_TRANSLATOR_CLASS = - new TypedDriverOption<>(DefaultDriverOption.ADDRESS_TRANSLATOR_CLASS, GenericType.STRING); - /** The native protocol version to use. */ - public static final TypedDriverOption PROTOCOL_VERSION = - new TypedDriverOption<>(DefaultDriverOption.PROTOCOL_VERSION, GenericType.STRING); - /** The name of the algorithm used to compress protocol frames. */ - public static final TypedDriverOption PROTOCOL_COMPRESSION = - new TypedDriverOption<>(DefaultDriverOption.PROTOCOL_COMPRESSION, GenericType.STRING); - /** The maximum length, in bytes, of the frames supported by the driver. */ - public static final TypedDriverOption PROTOCOL_MAX_FRAME_LENGTH = - new TypedDriverOption<>(DefaultDriverOption.PROTOCOL_MAX_FRAME_LENGTH, GenericType.LONG); - /** - * Whether a warning is logged when a request (such as a CQL `USE ...`) changes the active - * keyspace. - */ - public static final TypedDriverOption REQUEST_WARN_IF_SET_KEYSPACE = - new TypedDriverOption<>( - DefaultDriverOption.REQUEST_WARN_IF_SET_KEYSPACE, GenericType.BOOLEAN); - /** How many times the driver will attempt to fetch the query trace if it is not ready yet. */ - public static final TypedDriverOption REQUEST_TRACE_ATTEMPTS = - new TypedDriverOption<>(DefaultDriverOption.REQUEST_TRACE_ATTEMPTS, GenericType.INTEGER); - /** The interval between each attempt. */ - public static final TypedDriverOption REQUEST_TRACE_INTERVAL = - new TypedDriverOption<>(DefaultDriverOption.REQUEST_TRACE_INTERVAL, GenericType.DURATION); - /** The consistency level to use for trace queries. */ - public static final TypedDriverOption REQUEST_TRACE_CONSISTENCY = - new TypedDriverOption<>(DefaultDriverOption.REQUEST_TRACE_CONSISTENCY, GenericType.STRING); - /** Whether or not to publish aggregable histogram for metrics */ - public static final TypedDriverOption METRICS_GENERATE_AGGREGABLE_HISTOGRAMS = - new TypedDriverOption<>( - DefaultDriverOption.METRICS_GENERATE_AGGREGABLE_HISTOGRAMS, GenericType.BOOLEAN); - /** List of enabled session-level metrics. */ - public static final TypedDriverOption> METRICS_SESSION_ENABLED = - new TypedDriverOption<>( - DefaultDriverOption.METRICS_SESSION_ENABLED, GenericType.listOf(String.class)); - /** List of enabled node-level metrics. */ - public static final TypedDriverOption> METRICS_NODE_ENABLED = - new TypedDriverOption<>( - DefaultDriverOption.METRICS_NODE_ENABLED, GenericType.listOf(String.class)); - /** The largest latency that we expect to record for requests. */ - public static final TypedDriverOption METRICS_SESSION_CQL_REQUESTS_HIGHEST = - new TypedDriverOption<>( - DefaultDriverOption.METRICS_SESSION_CQL_REQUESTS_HIGHEST, GenericType.DURATION); - /** The shortest latency that we expect to record for requests. */ - public static final TypedDriverOption METRICS_SESSION_CQL_REQUESTS_LOWEST = - new TypedDriverOption<>( - DefaultDriverOption.METRICS_SESSION_CQL_REQUESTS_LOWEST, GenericType.DURATION); - /** Optional service-level objectives to meet, as a list of latencies to track. */ - public static final TypedDriverOption> METRICS_SESSION_CQL_REQUESTS_SLO = - new TypedDriverOption<>( - DefaultDriverOption.METRICS_SESSION_CQL_REQUESTS_SLO, - GenericType.listOf(GenericType.DURATION)); - /** Optional pre-defined percentile of cql requests to publish, as a list of percentiles . */ - public static final TypedDriverOption> - METRICS_SESSION_CQL_REQUESTS_PUBLISH_PERCENTILES = - new TypedDriverOption<>( - DefaultDriverOption.METRICS_SESSION_CQL_REQUESTS_PUBLISH_PERCENTILES, - GenericType.listOf(GenericType.DOUBLE)); - /** - * The number of significant decimal digits to which internal structures will maintain for - * requests. - */ - public static final TypedDriverOption METRICS_SESSION_CQL_REQUESTS_DIGITS = - new TypedDriverOption<>( - DefaultDriverOption.METRICS_SESSION_CQL_REQUESTS_DIGITS, GenericType.INTEGER); - /** The interval at which percentile data is refreshed for requests. */ - public static final TypedDriverOption METRICS_SESSION_CQL_REQUESTS_INTERVAL = - new TypedDriverOption<>( - DefaultDriverOption.METRICS_SESSION_CQL_REQUESTS_INTERVAL, GenericType.DURATION); - /** The largest latency that we expect to record for throttling. */ - public static final TypedDriverOption METRICS_SESSION_THROTTLING_HIGHEST = - new TypedDriverOption<>( - DefaultDriverOption.METRICS_SESSION_THROTTLING_HIGHEST, GenericType.DURATION); - /** The shortest latency that we expect to record for throttling. */ - public static final TypedDriverOption METRICS_SESSION_THROTTLING_LOWEST = - new TypedDriverOption<>( - DefaultDriverOption.METRICS_SESSION_THROTTLING_LOWEST, GenericType.DURATION); - /** Optional service-level objectives to meet, as a list of latencies to track. */ - public static final TypedDriverOption> METRICS_SESSION_THROTTLING_SLO = - new TypedDriverOption<>( - DefaultDriverOption.METRICS_SESSION_THROTTLING_SLO, - GenericType.listOf(GenericType.DURATION)); - /** Optional pre-defined percentile of throttling delay to publish, as a list of percentiles . */ - public static final TypedDriverOption> - METRICS_SESSION_THROTTLING_PUBLISH_PERCENTILES = - new TypedDriverOption<>( - DefaultDriverOption.METRICS_SESSION_THROTTLING_PUBLISH_PERCENTILES, - GenericType.listOf(GenericType.DOUBLE)); - /** - * The number of significant decimal digits to which internal structures will maintain for - * throttling. - */ - public static final TypedDriverOption METRICS_SESSION_THROTTLING_DIGITS = - new TypedDriverOption<>( - DefaultDriverOption.METRICS_SESSION_THROTTLING_DIGITS, GenericType.INTEGER); - /** The interval at which percentile data is refreshed for throttling. */ - public static final TypedDriverOption METRICS_SESSION_THROTTLING_INTERVAL = - new TypedDriverOption<>( - DefaultDriverOption.METRICS_SESSION_THROTTLING_INTERVAL, GenericType.DURATION); - /** The largest latency that we expect to record for requests. */ - public static final TypedDriverOption METRICS_NODE_CQL_MESSAGES_HIGHEST = - new TypedDriverOption<>( - DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_HIGHEST, GenericType.DURATION); - /** The shortest latency that we expect to record for requests. */ - public static final TypedDriverOption METRICS_NODE_CQL_MESSAGES_LOWEST = - new TypedDriverOption<>( - DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_LOWEST, GenericType.DURATION); - /** Optional service-level objectives to meet, as a list of latencies to track. */ - public static final TypedDriverOption> METRICS_NODE_CQL_MESSAGES_SLO = - new TypedDriverOption<>( - DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_SLO, - GenericType.listOf(GenericType.DURATION)); - /** Optional pre-defined percentile of node cql messages to publish, as a list of percentiles . */ - public static final TypedDriverOption> - METRICS_NODE_CQL_MESSAGES_PUBLISH_PERCENTILES = - new TypedDriverOption<>( - DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_PUBLISH_PERCENTILES, - GenericType.listOf(GenericType.DOUBLE)); - /** - * The number of significant decimal digits to which internal structures will maintain for - * requests. - */ - public static final TypedDriverOption METRICS_NODE_CQL_MESSAGES_DIGITS = - new TypedDriverOption<>( - DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_DIGITS, GenericType.INTEGER); - /** The interval at which percentile data is refreshed for requests. */ - public static final TypedDriverOption METRICS_NODE_CQL_MESSAGES_INTERVAL = - new TypedDriverOption<>( - DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_INTERVAL, GenericType.DURATION); - /** Whether or not to disable the Nagle algorithm. */ - public static final TypedDriverOption SOCKET_TCP_NODELAY = - new TypedDriverOption<>(DefaultDriverOption.SOCKET_TCP_NODELAY, GenericType.BOOLEAN); - /** Whether or not to enable TCP keep-alive probes. */ - public static final TypedDriverOption SOCKET_KEEP_ALIVE = - new TypedDriverOption<>(DefaultDriverOption.SOCKET_KEEP_ALIVE, GenericType.BOOLEAN); - /** Whether or not to allow address reuse. */ - public static final TypedDriverOption SOCKET_REUSE_ADDRESS = - new TypedDriverOption<>(DefaultDriverOption.SOCKET_REUSE_ADDRESS, GenericType.BOOLEAN); - /** Sets the linger interval. */ - public static final TypedDriverOption SOCKET_LINGER_INTERVAL = - new TypedDriverOption<>(DefaultDriverOption.SOCKET_LINGER_INTERVAL, GenericType.INTEGER); - /** Sets a hint to the size of the underlying buffers for incoming network I/O. */ - public static final TypedDriverOption SOCKET_RECEIVE_BUFFER_SIZE = - new TypedDriverOption<>(DefaultDriverOption.SOCKET_RECEIVE_BUFFER_SIZE, GenericType.INTEGER); - /** Sets a hint to the size of the underlying buffers for outgoing network I/O. */ - public static final TypedDriverOption SOCKET_SEND_BUFFER_SIZE = - new TypedDriverOption<>(DefaultDriverOption.SOCKET_SEND_BUFFER_SIZE, GenericType.INTEGER); - /** The connection heartbeat interval. */ - public static final TypedDriverOption HEARTBEAT_INTERVAL = - new TypedDriverOption<>(DefaultDriverOption.HEARTBEAT_INTERVAL, GenericType.DURATION); - /** How long the driver waits for the response to a heartbeat. */ - public static final TypedDriverOption HEARTBEAT_TIMEOUT = - new TypedDriverOption<>(DefaultDriverOption.HEARTBEAT_TIMEOUT, GenericType.DURATION); - /** How long the driver waits to propagate a Topology event. */ - public static final TypedDriverOption METADATA_TOPOLOGY_WINDOW = - new TypedDriverOption<>(DefaultDriverOption.METADATA_TOPOLOGY_WINDOW, GenericType.DURATION); - /** The maximum number of events that can accumulate. */ - public static final TypedDriverOption METADATA_TOPOLOGY_MAX_EVENTS = - new TypedDriverOption<>( - DefaultDriverOption.METADATA_TOPOLOGY_MAX_EVENTS, GenericType.INTEGER); - /** Whether schema metadata is enabled. */ - public static final TypedDriverOption METADATA_SCHEMA_ENABLED = - new TypedDriverOption<>(DefaultDriverOption.METADATA_SCHEMA_ENABLED, GenericType.BOOLEAN); - /** The timeout for the requests to the schema tables. */ - public static final TypedDriverOption METADATA_SCHEMA_REQUEST_TIMEOUT = - new TypedDriverOption<>( - DefaultDriverOption.METADATA_SCHEMA_REQUEST_TIMEOUT, GenericType.DURATION); - /** The page size for the requests to the schema tables. */ - public static final TypedDriverOption METADATA_SCHEMA_REQUEST_PAGE_SIZE = - new TypedDriverOption<>( - DefaultDriverOption.METADATA_SCHEMA_REQUEST_PAGE_SIZE, GenericType.INTEGER); - /** The list of keyspaces for which schema and token metadata should be maintained. */ - public static final TypedDriverOption> METADATA_SCHEMA_REFRESHED_KEYSPACES = - new TypedDriverOption<>( - DefaultDriverOption.METADATA_SCHEMA_REFRESHED_KEYSPACES, - GenericType.listOf(String.class)); - /** How long the driver waits to apply a refresh. */ - public static final TypedDriverOption METADATA_SCHEMA_WINDOW = - new TypedDriverOption<>(DefaultDriverOption.METADATA_SCHEMA_WINDOW, GenericType.DURATION); - /** The maximum number of refreshes that can accumulate. */ - public static final TypedDriverOption METADATA_SCHEMA_MAX_EVENTS = - new TypedDriverOption<>(DefaultDriverOption.METADATA_SCHEMA_MAX_EVENTS, GenericType.INTEGER); - /** Whether token metadata is enabled. */ - public static final TypedDriverOption METADATA_TOKEN_MAP_ENABLED = - new TypedDriverOption<>(DefaultDriverOption.METADATA_TOKEN_MAP_ENABLED, GenericType.BOOLEAN); - /** How long the driver waits for responses to control queries. */ - public static final TypedDriverOption CONTROL_CONNECTION_TIMEOUT = - new TypedDriverOption<>(DefaultDriverOption.CONTROL_CONNECTION_TIMEOUT, GenericType.DURATION); - /** The interval between each schema agreement check attempt. */ - public static final TypedDriverOption CONTROL_CONNECTION_AGREEMENT_INTERVAL = - new TypedDriverOption<>( - DefaultDriverOption.CONTROL_CONNECTION_AGREEMENT_INTERVAL, GenericType.DURATION); - /** The timeout after which schema agreement fails. */ - public static final TypedDriverOption CONTROL_CONNECTION_AGREEMENT_TIMEOUT = - new TypedDriverOption<>( - DefaultDriverOption.CONTROL_CONNECTION_AGREEMENT_TIMEOUT, GenericType.DURATION); - /** Whether to log a warning if schema agreement fails. */ - public static final TypedDriverOption CONTROL_CONNECTION_AGREEMENT_WARN = - new TypedDriverOption<>( - DefaultDriverOption.CONTROL_CONNECTION_AGREEMENT_WARN, GenericType.BOOLEAN); - /** Whether `Session.prepare` calls should be sent to all nodes in the cluster. */ - public static final TypedDriverOption PREPARE_ON_ALL_NODES = - new TypedDriverOption<>(DefaultDriverOption.PREPARE_ON_ALL_NODES, GenericType.BOOLEAN); - /** Whether the driver tries to prepare on new nodes at all. */ - public static final TypedDriverOption REPREPARE_ENABLED = - new TypedDriverOption<>(DefaultDriverOption.REPREPARE_ENABLED, GenericType.BOOLEAN); - /** Whether to check `system.prepared_statements` on the target node before repreparing. */ - public static final TypedDriverOption REPREPARE_CHECK_SYSTEM_TABLE = - new TypedDriverOption<>( - DefaultDriverOption.REPREPARE_CHECK_SYSTEM_TABLE, GenericType.BOOLEAN); - /** The maximum number of statements that should be reprepared. */ - public static final TypedDriverOption REPREPARE_MAX_STATEMENTS = - new TypedDriverOption<>(DefaultDriverOption.REPREPARE_MAX_STATEMENTS, GenericType.INTEGER); - /** The maximum number of concurrent requests when repreparing. */ - public static final TypedDriverOption REPREPARE_MAX_PARALLELISM = - new TypedDriverOption<>(DefaultDriverOption.REPREPARE_MAX_PARALLELISM, GenericType.INTEGER); - /** The request timeout when repreparing. */ - public static final TypedDriverOption REPREPARE_TIMEOUT = - new TypedDriverOption<>(DefaultDriverOption.REPREPARE_TIMEOUT, GenericType.DURATION); - /** Whether the prepared statements cache use weak values. */ - public static final TypedDriverOption PREPARED_CACHE_WEAK_VALUES = - new TypedDriverOption<>(DefaultDriverOption.PREPARED_CACHE_WEAK_VALUES, GenericType.BOOLEAN); - /** The number of threads in the I/O group. */ - public static final TypedDriverOption NETTY_IO_SIZE = - new TypedDriverOption<>(DefaultDriverOption.NETTY_IO_SIZE, GenericType.INTEGER); - /** Quiet period for I/O group shutdown. */ - public static final TypedDriverOption NETTY_IO_SHUTDOWN_QUIET_PERIOD = - new TypedDriverOption<>( - DefaultDriverOption.NETTY_IO_SHUTDOWN_QUIET_PERIOD, GenericType.INTEGER); - /** Max time to wait for I/O group shutdown. */ - public static final TypedDriverOption NETTY_IO_SHUTDOWN_TIMEOUT = - new TypedDriverOption<>(DefaultDriverOption.NETTY_IO_SHUTDOWN_TIMEOUT, GenericType.INTEGER); - /** Units for I/O group quiet period and timeout. */ - public static final TypedDriverOption NETTY_IO_SHUTDOWN_UNIT = - new TypedDriverOption<>(DefaultDriverOption.NETTY_IO_SHUTDOWN_UNIT, GenericType.STRING); - /** The number of threads in the Admin group. */ - public static final TypedDriverOption NETTY_ADMIN_SIZE = - new TypedDriverOption<>(DefaultDriverOption.NETTY_ADMIN_SIZE, GenericType.INTEGER); - /** Quiet period for admin group shutdown. */ - public static final TypedDriverOption NETTY_ADMIN_SHUTDOWN_QUIET_PERIOD = - new TypedDriverOption<>( - DefaultDriverOption.NETTY_ADMIN_SHUTDOWN_QUIET_PERIOD, GenericType.INTEGER); - /** Max time to wait for admin group shutdown. */ - public static final TypedDriverOption NETTY_ADMIN_SHUTDOWN_TIMEOUT = - new TypedDriverOption<>( - DefaultDriverOption.NETTY_ADMIN_SHUTDOWN_TIMEOUT, GenericType.INTEGER); - /** Units for admin group quiet period and timeout. */ - public static final TypedDriverOption NETTY_ADMIN_SHUTDOWN_UNIT = - new TypedDriverOption<>(DefaultDriverOption.NETTY_ADMIN_SHUTDOWN_UNIT, GenericType.STRING); - /** @deprecated This option was removed in version 4.6.1. */ - @Deprecated - public static final TypedDriverOption COALESCER_MAX_RUNS = - new TypedDriverOption<>(DefaultDriverOption.COALESCER_MAX_RUNS, GenericType.INTEGER); - /** The coalescer reschedule interval. */ - public static final TypedDriverOption COALESCER_INTERVAL = - new TypedDriverOption<>(DefaultDriverOption.COALESCER_INTERVAL, GenericType.DURATION); - /** Whether to resolve the addresses passed to `basic.contact-points`. */ - public static final TypedDriverOption RESOLVE_CONTACT_POINTS = - new TypedDriverOption<>(DefaultDriverOption.RESOLVE_CONTACT_POINTS, GenericType.BOOLEAN); - /** - * This is how frequent the timer should wake up to check for timed-out tasks or speculative - * executions. - */ - public static final TypedDriverOption NETTY_TIMER_TICK_DURATION = - new TypedDriverOption<>(DefaultDriverOption.NETTY_TIMER_TICK_DURATION, GenericType.DURATION); - /** Number of ticks in the Timer wheel. */ - public static final TypedDriverOption NETTY_TIMER_TICKS_PER_WHEEL = - new TypedDriverOption<>(DefaultDriverOption.NETTY_TIMER_TICKS_PER_WHEEL, GenericType.INTEGER); - /** - * Whether logging of server warnings generated during query execution should be disabled by the - * driver. - */ - public static final TypedDriverOption REQUEST_LOG_WARNINGS = - new TypedDriverOption<>(DefaultDriverOption.REQUEST_LOG_WARNINGS, GenericType.BOOLEAN); - /** Whether the threads created by the driver should be daemon threads. */ - public static final TypedDriverOption NETTY_DAEMON = - new TypedDriverOption<>(DefaultDriverOption.NETTY_DAEMON, GenericType.BOOLEAN); - /** - * The location of the cloud secure bundle used to connect to DataStax Apache Cassandra as a - * service. - */ - public static final TypedDriverOption CLOUD_SECURE_CONNECT_BUNDLE = - new TypedDriverOption<>(DefaultDriverOption.CLOUD_SECURE_CONNECT_BUNDLE, GenericType.STRING); - /** Whether the slow replica avoidance should be enabled in the default LBP. */ - public static final TypedDriverOption LOAD_BALANCING_POLICY_SLOW_AVOIDANCE = - new TypedDriverOption<>( - DefaultDriverOption.LOAD_BALANCING_POLICY_SLOW_AVOIDANCE, GenericType.BOOLEAN); - /** The timeout to use when establishing driver connections. */ - public static final TypedDriverOption CONNECTION_CONNECT_TIMEOUT = - new TypedDriverOption<>(DefaultDriverOption.CONNECTION_CONNECT_TIMEOUT, GenericType.DURATION); - /** The maximum number of live sessions that are allowed to coexist in a given VM. */ - public static final TypedDriverOption SESSION_LEAK_THRESHOLD = - new TypedDriverOption<>(DefaultDriverOption.SESSION_LEAK_THRESHOLD, GenericType.INTEGER); - - /** The name of the application using the session. */ - public static final TypedDriverOption APPLICATION_NAME = - new TypedDriverOption<>(DseDriverOption.APPLICATION_NAME, GenericType.STRING); - /** The version of the application using the session. */ - public static final TypedDriverOption APPLICATION_VERSION = - new TypedDriverOption<>(DseDriverOption.APPLICATION_VERSION, GenericType.STRING); - /** Proxy authentication for GSSAPI authentication: allows to login as another user or role. */ - public static final TypedDriverOption AUTH_PROVIDER_AUTHORIZATION_ID = - new TypedDriverOption<>(DseDriverOption.AUTH_PROVIDER_AUTHORIZATION_ID, GenericType.STRING); - /** Service name for GSSAPI authentication. */ - public static final TypedDriverOption AUTH_PROVIDER_SERVICE = - new TypedDriverOption<>(DseDriverOption.AUTH_PROVIDER_SERVICE, GenericType.STRING); - /** Login configuration for GSSAPI authentication. */ - public static final TypedDriverOption AUTH_PROVIDER_LOGIN_CONFIGURATION = - new TypedDriverOption<>( - DseDriverOption.AUTH_PROVIDER_LOGIN_CONFIGURATION, GenericType.STRING); - /** Internal SASL properties, if any, such as QOP, for GSSAPI authentication. */ - public static final TypedDriverOption> AUTH_PROVIDER_SASL_PROPERTIES = - new TypedDriverOption<>( - DseDriverOption.AUTH_PROVIDER_SASL_PROPERTIES, - GenericType.mapOf(GenericType.STRING, GenericType.STRING)); - /** The page size for continuous paging. */ - public static final TypedDriverOption CONTINUOUS_PAGING_PAGE_SIZE = - new TypedDriverOption<>(DseDriverOption.CONTINUOUS_PAGING_PAGE_SIZE, GenericType.INTEGER); - /** - * Whether {@link #CONTINUOUS_PAGING_PAGE_SIZE} should be interpreted in number of rows or bytes. - */ - public static final TypedDriverOption CONTINUOUS_PAGING_PAGE_SIZE_BYTES = - new TypedDriverOption<>( - DseDriverOption.CONTINUOUS_PAGING_PAGE_SIZE_BYTES, GenericType.BOOLEAN); - /** The maximum number of continuous pages to return. */ - public static final TypedDriverOption CONTINUOUS_PAGING_MAX_PAGES = - new TypedDriverOption<>(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES, GenericType.INTEGER); - /** The maximum number of continuous pages per second. */ - public static final TypedDriverOption CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND = - new TypedDriverOption<>( - DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND, GenericType.INTEGER); - /** The maximum number of continuous pages that can be stored in the local queue. */ - public static final TypedDriverOption CONTINUOUS_PAGING_MAX_ENQUEUED_PAGES = - new TypedDriverOption<>( - DseDriverOption.CONTINUOUS_PAGING_MAX_ENQUEUED_PAGES, GenericType.INTEGER); - /** How long to wait for the coordinator to send the first continuous page. */ - public static final TypedDriverOption CONTINUOUS_PAGING_TIMEOUT_FIRST_PAGE = - new TypedDriverOption<>( - DseDriverOption.CONTINUOUS_PAGING_TIMEOUT_FIRST_PAGE, GenericType.DURATION); - /** How long to wait for the coordinator to send subsequent continuous pages. */ - public static final TypedDriverOption CONTINUOUS_PAGING_TIMEOUT_OTHER_PAGES = - new TypedDriverOption<>( - DseDriverOption.CONTINUOUS_PAGING_TIMEOUT_OTHER_PAGES, GenericType.DURATION); - /** The largest latency that we expect to record for continuous requests. */ - public static final TypedDriverOption - CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_HIGHEST = - new TypedDriverOption<>( - DseDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_HIGHEST, - GenericType.DURATION); - /** The shortest latency that we expect to record for continuous requests. */ - public static final TypedDriverOption - CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_LOWEST = - new TypedDriverOption<>( - DseDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_LOWEST, - GenericType.DURATION); - /** Optional service-level objectives to meet, as a list of latencies to track. */ - public static final TypedDriverOption> - CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_SLO = - new TypedDriverOption<>( - DseDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_SLO, - GenericType.listOf(GenericType.DURATION)); - /** - * Optional pre-defined percentile of continuous paging cql requests to publish, as a list of - * percentiles . - */ - public static final TypedDriverOption> - CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_PUBLISH_PERCENTILES = - new TypedDriverOption<>( - DseDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_PUBLISH_PERCENTILES, - GenericType.listOf(GenericType.DOUBLE)); - /** - * The number of significant decimal digits to which internal structures will maintain for - * continuous requests. - */ - public static final TypedDriverOption - CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_DIGITS = - new TypedDriverOption<>( - DseDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_DIGITS, - GenericType.INTEGER); - /** The interval at which percentile data is refreshed for continuous requests. */ - public static final TypedDriverOption - CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_INTERVAL = - new TypedDriverOption<>( - DseDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_INTERVAL, - GenericType.DURATION); - /** The read consistency level to use for graph statements. */ - public static final TypedDriverOption GRAPH_READ_CONSISTENCY_LEVEL = - new TypedDriverOption<>(DseDriverOption.GRAPH_READ_CONSISTENCY_LEVEL, GenericType.STRING); - /** The write consistency level to use for graph statements. */ - public static final TypedDriverOption GRAPH_WRITE_CONSISTENCY_LEVEL = - new TypedDriverOption<>(DseDriverOption.GRAPH_WRITE_CONSISTENCY_LEVEL, GenericType.STRING); - /** The traversal source to use for graph statements. */ - public static final TypedDriverOption GRAPH_TRAVERSAL_SOURCE = - new TypedDriverOption<>(DseDriverOption.GRAPH_TRAVERSAL_SOURCE, GenericType.STRING); - /** - * The sub-protocol the driver will use to communicate with DSE Graph, on top of the Cassandra - * native protocol. - */ - public static final TypedDriverOption GRAPH_SUB_PROTOCOL = - new TypedDriverOption<>(DseDriverOption.GRAPH_SUB_PROTOCOL, GenericType.STRING); - /** Whether a script statement represents a system query. */ - public static final TypedDriverOption GRAPH_IS_SYSTEM_QUERY = - new TypedDriverOption<>(DseDriverOption.GRAPH_IS_SYSTEM_QUERY, GenericType.BOOLEAN); - /** The name of the graph targeted by graph statements. */ - public static final TypedDriverOption GRAPH_NAME = - new TypedDriverOption<>(DseDriverOption.GRAPH_NAME, GenericType.STRING); - /** How long the driver waits for a graph request to complete. */ - public static final TypedDriverOption GRAPH_TIMEOUT = - new TypedDriverOption<>(DseDriverOption.GRAPH_TIMEOUT, GenericType.DURATION); - /** Whether to send events for Insights monitoring. */ - public static final TypedDriverOption MONITOR_REPORTING_ENABLED = - new TypedDriverOption<>(DseDriverOption.MONITOR_REPORTING_ENABLED, GenericType.BOOLEAN); - /** Whether to enable paging for Graph queries. */ - public static final TypedDriverOption GRAPH_PAGING_ENABLED = - new TypedDriverOption<>(DseDriverOption.GRAPH_PAGING_ENABLED, GenericType.STRING); - /** The page size for Graph continuous paging. */ - public static final TypedDriverOption GRAPH_CONTINUOUS_PAGING_PAGE_SIZE = - new TypedDriverOption<>( - DseDriverOption.GRAPH_CONTINUOUS_PAGING_PAGE_SIZE, GenericType.INTEGER); - /** The maximum number of Graph continuous pages to return. */ - public static final TypedDriverOption GRAPH_CONTINUOUS_PAGING_MAX_PAGES = - new TypedDriverOption<>( - DseDriverOption.GRAPH_CONTINUOUS_PAGING_MAX_PAGES, GenericType.INTEGER); - /** The maximum number of Graph continuous pages per second. */ - public static final TypedDriverOption GRAPH_CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND = - new TypedDriverOption<>( - DseDriverOption.GRAPH_CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND, GenericType.INTEGER); - /** The maximum number of Graph continuous pages that can be stored in the local queue. */ - public static final TypedDriverOption GRAPH_CONTINUOUS_PAGING_MAX_ENQUEUED_PAGES = - new TypedDriverOption<>( - DseDriverOption.GRAPH_CONTINUOUS_PAGING_MAX_ENQUEUED_PAGES, GenericType.INTEGER); - /** The largest latency that we expect to record for graph requests. */ - public static final TypedDriverOption METRICS_SESSION_GRAPH_REQUESTS_HIGHEST = - new TypedDriverOption<>( - DseDriverOption.METRICS_SESSION_GRAPH_REQUESTS_HIGHEST, GenericType.DURATION); - /** The shortest latency that we expect to record for graph requests. */ - public static final TypedDriverOption METRICS_SESSION_GRAPH_REQUESTS_LOWEST = - new TypedDriverOption<>( - DseDriverOption.METRICS_SESSION_GRAPH_REQUESTS_LOWEST, GenericType.DURATION); - /** Optional service-level objectives to meet, as a list of latencies to track. */ - public static final TypedDriverOption> METRICS_SESSION_GRAPH_REQUESTS_SLO = - new TypedDriverOption<>( - DseDriverOption.METRICS_SESSION_GRAPH_REQUESTS_SLO, - GenericType.listOf(GenericType.DURATION)); - /** Optional pre-defined percentile of graph requests to publish, as a list of percentiles . */ - public static final TypedDriverOption> - METRICS_SESSION_GRAPH_REQUESTS_PUBLISH_PERCENTILES = - new TypedDriverOption<>( - DseDriverOption.METRICS_SESSION_GRAPH_REQUESTS_PUBLISH_PERCENTILES, - GenericType.listOf(GenericType.DOUBLE)); - /** - * The number of significant decimal digits to which internal structures will maintain for graph - * requests. - */ - public static final TypedDriverOption METRICS_SESSION_GRAPH_REQUESTS_DIGITS = - new TypedDriverOption<>( - DseDriverOption.METRICS_SESSION_GRAPH_REQUESTS_DIGITS, GenericType.INTEGER); - /** The interval at which percentile data is refreshed for graph requests. */ - public static final TypedDriverOption METRICS_SESSION_GRAPH_REQUESTS_INTERVAL = - new TypedDriverOption<>( - DseDriverOption.METRICS_SESSION_GRAPH_REQUESTS_INTERVAL, GenericType.DURATION); - /** The largest latency that we expect to record for graph requests. */ - public static final TypedDriverOption METRICS_NODE_GRAPH_MESSAGES_HIGHEST = - new TypedDriverOption<>( - DseDriverOption.METRICS_NODE_GRAPH_MESSAGES_HIGHEST, GenericType.DURATION); - /** The shortest latency that we expect to record for graph requests. */ - public static final TypedDriverOption METRICS_NODE_GRAPH_MESSAGES_LOWEST = - new TypedDriverOption<>( - DseDriverOption.METRICS_NODE_GRAPH_MESSAGES_LOWEST, GenericType.DURATION); - /** Optional service-level objectives to meet, as a list of latencies to track. */ - public static final TypedDriverOption> METRICS_NODE_GRAPH_MESSAGES_SLO = - new TypedDriverOption<>( - DseDriverOption.METRICS_NODE_GRAPH_MESSAGES_SLO, - GenericType.listOf(GenericType.DURATION)); - /** - * Optional pre-defined percentile of node graph requests to publish, as a list of percentiles . - */ - public static final TypedDriverOption> - METRICS_NODE_GRAPH_MESSAGES_PUBLISH_PERCENTILES = - new TypedDriverOption<>( - DseDriverOption.METRICS_NODE_GRAPH_MESSAGES_PUBLISH_PERCENTILES, - GenericType.listOf(GenericType.DOUBLE)); - /** - * The number of significant decimal digits to which internal structures will maintain for graph - * requests. - */ - public static final TypedDriverOption METRICS_NODE_GRAPH_MESSAGES_DIGITS = - new TypedDriverOption<>( - DseDriverOption.METRICS_NODE_GRAPH_MESSAGES_DIGITS, GenericType.INTEGER); - /** The interval at which percentile data is refreshed for graph requests. */ - public static final TypedDriverOption METRICS_NODE_GRAPH_MESSAGES_INTERVAL = - new TypedDriverOption<>( - DseDriverOption.METRICS_NODE_GRAPH_MESSAGES_INTERVAL, GenericType.DURATION); - - /** The time after which the node level metrics will be evicted. */ - public static final TypedDriverOption METRICS_NODE_EXPIRE_AFTER = - new TypedDriverOption<>(DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER, GenericType.DURATION); - - /** The classname of the desired MetricsFactory implementation. */ - public static final TypedDriverOption METRICS_FACTORY_CLASS = - new TypedDriverOption<>(DefaultDriverOption.METRICS_FACTORY_CLASS, GenericType.STRING); - - /** The classname of the desired {@code MetricIdGenerator} implementation. */ - public static final TypedDriverOption METRICS_ID_GENERATOR_CLASS = - new TypedDriverOption<>(DefaultDriverOption.METRICS_ID_GENERATOR_CLASS, GenericType.STRING); - - /** The value of the prefix to prepend to all metric names. */ - public static final TypedDriverOption METRICS_ID_GENERATOR_PREFIX = - new TypedDriverOption<>(DefaultDriverOption.METRICS_ID_GENERATOR_PREFIX, GenericType.STRING); - - /** The maximum number of nodes from remote DCs to include in query plans. */ - public static final TypedDriverOption - LOAD_BALANCING_DC_FAILOVER_MAX_NODES_PER_REMOTE_DC = - new TypedDriverOption<>( - DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_MAX_NODES_PER_REMOTE_DC, - GenericType.INTEGER); - /** Whether to consider nodes from remote DCs if the request's consistency level is local. */ - public static final TypedDriverOption - LOAD_BALANCING_DC_FAILOVER_ALLOW_FOR_LOCAL_CONSISTENCY_LEVELS = - new TypedDriverOption<>( - DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_ALLOW_FOR_LOCAL_CONSISTENCY_LEVELS, - GenericType.BOOLEAN); - - public static final TypedDriverOption ADDRESS_TRANSLATOR_ADVERTISED_HOSTNAME = - new TypedDriverOption<>( - DefaultDriverOption.ADDRESS_TRANSLATOR_ADVERTISED_HOSTNAME, GenericType.STRING); - public static final TypedDriverOption> ADDRESS_TRANSLATOR_SUBNET_ADDRESSES = - new TypedDriverOption<>( - DefaultDriverOption.ADDRESS_TRANSLATOR_SUBNET_ADDRESSES, - GenericType.mapOf(GenericType.STRING, GenericType.STRING)); - public static final TypedDriverOption ADDRESS_TRANSLATOR_DEFAULT_ADDRESS = - new TypedDriverOption<>( - DefaultDriverOption.ADDRESS_TRANSLATOR_DEFAULT_ADDRESS, GenericType.STRING); - public static final TypedDriverOption ADDRESS_TRANSLATOR_RESOLVE_ADDRESSES = - new TypedDriverOption<>( - DefaultDriverOption.ADDRESS_TRANSLATOR_RESOLVE_ADDRESSES, GenericType.BOOLEAN); - - /** - * Ordered preference list of remote dcs optionally supplied for automatic failover and included - * in query plan. This feature is enabled only when max-nodes-per-remote-dc is greater than 0. - */ - public static final TypedDriverOption> - LOAD_BALANCING_DC_FAILOVER_PREFERRED_REMOTE_DCS = - new TypedDriverOption<>( - DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_PREFERRED_REMOTE_DCS, - GenericType.listOf(String.class)); - - private static Iterable> introspectBuiltInValues() { - try { - ImmutableList.Builder> result = ImmutableList.builder(); - for (Field field : TypedDriverOption.class.getFields()) { - if ((field.getModifiers() & PUBLIC_STATIC_FINAL) == PUBLIC_STATIC_FINAL - && field.getType() == TypedDriverOption.class) { - TypedDriverOption typedOption = (TypedDriverOption) field.get(null); - result.add(typedOption); - } - } - return result.build(); - } catch (IllegalAccessException e) { - throw new IllegalStateException("Unexpected error while introspecting built-in values", e); - } - } - - private static final int PUBLIC_STATIC_FINAL = Modifier.PUBLIC | Modifier.STATIC | Modifier.FINAL; -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/config/package-info.java b/core/src/main/java/com/datastax/oss/driver/api/core/config/package-info.java deleted file mode 100644 index a751d983e70..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/config/package-info.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * The configuration of the driver. - * - *

The public API is completely agnostic to the underlying implementation (where the - * configuration is loaded from, what framework is used...). - */ -package com.datastax.oss.driver.api.core.config; diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/connection/BusyConnectionException.java b/core/src/main/java/com/datastax/oss/driver/api/core/connection/BusyConnectionException.java deleted file mode 100644 index 8069474612a..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/connection/BusyConnectionException.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.connection; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * Indicates that a write was attempted on a connection that already handles too many simultaneous - * requests. - * - *

This might happen under heavy load. The driver will automatically try the next node in the - * query plan. Therefore, the only way that the client can observe this exception is as part of a - * {@link AllNodesFailedException}. - */ -public class BusyConnectionException extends DriverException { - - // Note: the driver doesn't use this constructor anymore, it is preserved only for backward - // compatibility. - @SuppressWarnings("unused") - public BusyConnectionException(int maxAvailableIds) { - this( - String.format( - "Connection has exceeded its maximum of %d simultaneous requests", maxAvailableIds), - null, - false); - } - - public BusyConnectionException(String message) { - this(message, null, false); - } - - private BusyConnectionException( - String message, ExecutionInfo executionInfo, boolean writableStackTrace) { - super(message, executionInfo, null, writableStackTrace); - } - - @Override - @NonNull - public DriverException copy() { - return new BusyConnectionException(getMessage(), getExecutionInfo(), true); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/connection/ClosedConnectionException.java b/core/src/main/java/com/datastax/oss/driver/api/core/connection/ClosedConnectionException.java deleted file mode 100644 index a192e2c5efc..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/connection/ClosedConnectionException.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.connection; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.DriverException; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** - * Thrown when the connection on which a request was executing is closed due to an unrelated event. - * - *

For example, this can happen if the node is unresponsive and a heartbeat query failed, or if - * the node was forced down. - * - *

The driver will retry these requests on the next node transparently, unless the request is not - * idempotent. Therefore, this exception is usually observed as part of an {@link - * AllNodesFailedException}. - */ -public class ClosedConnectionException extends DriverException { - - public ClosedConnectionException(@NonNull String message) { - this(message, null, false); - } - - public ClosedConnectionException(@NonNull String message, @Nullable Throwable cause) { - this(message, cause, false); - } - - private ClosedConnectionException( - @NonNull String message, @Nullable Throwable cause, boolean writableStackTrace) { - super(message, null, cause, writableStackTrace); - } - - @Override - @NonNull - public DriverException copy() { - return new ClosedConnectionException(getMessage(), getCause(), true); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/connection/ConnectionInitException.java b/core/src/main/java/com/datastax/oss/driver/api/core/connection/ConnectionInitException.java deleted file mode 100644 index 519624e8d5d..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/connection/ConnectionInitException.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.connection; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** - * Indicates a generic error while initializing a connection. - * - *

The only time when this is returned directly to the client (wrapped in a {@link - * AllNodesFailedException}) is at initialization. If it happens later when the driver is already - * connected, it is just logged and the connection is reattempted. - */ -public class ConnectionInitException extends DriverException { - public ConnectionInitException(@NonNull String message, @Nullable Throwable cause) { - super(message, null, cause, true); - } - - private ConnectionInitException(String message, ExecutionInfo executionInfo, Throwable cause) { - super(message, executionInfo, cause, true); - } - - @NonNull - @Override - public DriverException copy() { - return new ConnectionInitException(getMessage(), getExecutionInfo(), getCause()); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/connection/CrcMismatchException.java b/core/src/main/java/com/datastax/oss/driver/api/core/connection/CrcMismatchException.java deleted file mode 100644 index d0fc8fc3b73..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/connection/CrcMismatchException.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.connection; - -import com.datastax.oss.driver.api.core.DriverException; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * Thrown when the checksums in a server response don't match (protocol v5 or above). - * - *

This indicates a data corruption issue, either due to a hardware issue on the client, or on - * the network between the server and the client. It is not recoverable: the driver will drop the - * connection. - */ -public class CrcMismatchException extends DriverException { - - public CrcMismatchException(@NonNull String message) { - super(message, null, null, true); - } - - @NonNull - @Override - public DriverException copy() { - return new CrcMismatchException(getMessage()); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/connection/FrameTooLongException.java b/core/src/main/java/com/datastax/oss/driver/api/core/connection/FrameTooLongException.java deleted file mode 100644 index 9954aefb3d4..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/connection/FrameTooLongException.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.connection; - -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.net.SocketAddress; - -/** - * Thrown when an incoming or outgoing protocol frame exceeds the limit defined by {@code - * protocol.max-frame-length} in the configuration. - * - *

This error is always rethrown directly to the client, without any retry attempt. - */ -public class FrameTooLongException extends DriverException { - - private final SocketAddress address; - - public FrameTooLongException(@NonNull SocketAddress address, @NonNull String message) { - this(address, message, null); - } - - private FrameTooLongException( - SocketAddress address, String message, ExecutionInfo executionInfo) { - super(message, executionInfo, null, false); - this.address = address; - } - - /** The address of the node that encountered the error. */ - @NonNull - public SocketAddress getAddress() { - return address; - } - - @NonNull - @Override - public DriverException copy() { - return new FrameTooLongException(address, getMessage(), getExecutionInfo()); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/connection/HeartbeatException.java b/core/src/main/java/com/datastax/oss/driver/api/core/connection/HeartbeatException.java deleted file mode 100644 index 60c3d60a69d..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/connection/HeartbeatException.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.connection; - -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.session.Request; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.net.SocketAddress; - -/** - * Thrown when a heartbeat query fails. - * - *

Heartbeat queries are sent automatically on idle connections, to ensure that they are still - * alive. If a heartbeat query fails, the connection is closed, and all pending queries are aborted. - * The exception will be passed to {@link RetryPolicy#onRequestAbortedVerdict(Request, Throwable, - * int)}, which decides what to do next (the default policy retries the query on the next node). - */ -public class HeartbeatException extends DriverException { - - private final SocketAddress address; - - public HeartbeatException( - @NonNull SocketAddress address, @Nullable String message, @Nullable Throwable cause) { - this(address, message, null, cause); - } - - public HeartbeatException( - SocketAddress address, String message, ExecutionInfo executionInfo, Throwable cause) { - super(message, executionInfo, cause, true); - this.address = address; - } - - /** The address of the node that encountered the error. */ - @NonNull - public SocketAddress getAddress() { - return address; - } - - @NonNull - @Override - public DriverException copy() { - return new HeartbeatException(address, getMessage(), getExecutionInfo(), getCause()); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/connection/ReconnectionPolicy.java b/core/src/main/java/com/datastax/oss/driver/api/core/connection/ReconnectionPolicy.java deleted file mode 100644 index 9f81843c9c0..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/connection/ReconnectionPolicy.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.connection; - -import com.datastax.oss.driver.api.core.metadata.Node; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.time.Duration; - -/** - * Decides how often the driver tries to re-establish lost connections. - * - *

When a reconnection starts, the driver invokes this policy to create a {@link - * ReconnectionSchedule ReconnectionSchedule} instance. That schedule's {@link - * ReconnectionSchedule#nextDelay() nextDelay()} method will get called each time the driver needs - * to program the next connection attempt. When the reconnection succeeds, the schedule is - * discarded; if the connection is lost again later, the next reconnection attempt will query the - * policy again to obtain a new schedule. - * - *

There are two types of reconnection: - * - *

    - *
  • {@linkplain #newNodeSchedule(Node) for regular node connections}: when the connection pool - * for a node does not have its configured number of connections (see {@code - * advanced.connection.pool.*.size} in the configuration), a reconnection starts for that - * pool. - *
  • {@linkplain #newControlConnectionSchedule(boolean) for the control connection}: when the - * control node goes down, a reconnection starts to find another node to replace it. This is - * also used if the configuration option {@code advanced.reconnect-on-init} is set and the - * driver has to retry the initial connection. - *
- * - * This interface defines separate methods for those two cases, but implementations are free to - * delegate to the same method internally if the same type of schedule can be used. - */ -public interface ReconnectionPolicy extends AutoCloseable { - - /** Creates a new schedule for the given node. */ - @NonNull - ReconnectionSchedule newNodeSchedule(@NonNull Node node); - - /** - * Creates a new schedule for the control connection. - * - * @param isInitialConnection whether this schedule is generated for the driver's initial attempt - * to connect to the cluster. - *
    - *
  • {@code true} means that the configuration option {@code advanced.reconnect-on-init} - * is set, the driver failed to reach any contact point, and it is now scheduling - * reattempts. - *
  • {@code false} means that the driver was already initialized, lost connection to the - * control node, and is now scheduling attempts to connect to another node. - *
- */ - @NonNull - ReconnectionSchedule newControlConnectionSchedule(boolean isInitialConnection); - - /** Called when the cluster that this policy is associated with closes. */ - @Override - void close(); - - /** - * The reconnection schedule from the time a connection is lost, to the time all connections to - * this node have been restored. - */ - interface ReconnectionSchedule { - /** How long to wait before the next reconnection attempt. */ - @NonNull - Duration nextDelay(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/connection/package-info.java b/core/src/main/java/com/datastax/oss/driver/api/core/connection/package-info.java deleted file mode 100644 index 737f985ad1d..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/connection/package-info.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Types related to a connection to a Cassandra node. - * - *

The driver generally connects to multiple nodes, and may keep multiple connections to each - * node. - */ -package com.datastax.oss.driver.api.core.connection; diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/context/DriverContext.java b/core/src/main/java/com/datastax/oss/driver/api/core/context/DriverContext.java deleted file mode 100644 index 6f0afd3df8a..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/context/DriverContext.java +++ /dev/null @@ -1,158 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.context; - -import com.datastax.oss.driver.api.core.addresstranslation.AddressTranslator; -import com.datastax.oss.driver.api.core.auth.AuthProvider; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.connection.ReconnectionPolicy; -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; -import com.datastax.oss.driver.api.core.metadata.NodeStateListener; -import com.datastax.oss.driver.api.core.metadata.schema.SchemaChangeListener; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.session.throttling.RequestThrottler; -import com.datastax.oss.driver.api.core.specex.SpeculativeExecutionPolicy; -import com.datastax.oss.driver.api.core.ssl.SslEngineFactory; -import com.datastax.oss.driver.api.core.time.TimestampGenerator; -import com.datastax.oss.driver.api.core.tracker.RequestIdGenerator; -import com.datastax.oss.driver.api.core.tracker.RequestTracker; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; -import java.util.Optional; - -/** Holds common components that are shared throughout a driver instance. */ -public interface DriverContext extends AttachmentPoint { - - /** - * This is the same as {@link Session#getName()}, it's exposed here for components that only have - * a reference to the context. - */ - @NonNull - String getSessionName(); - - /** @return The driver's configuration; never {@code null}. */ - @NonNull - DriverConfig getConfig(); - - /** @return The driver's configuration loader; never {@code null}. */ - @NonNull - DriverConfigLoader getConfigLoader(); - - /** - * @return The driver's load balancing policies, keyed by profile name; the returned map is - * guaranteed to never be {@code null} and to always contain an entry for the {@value - * DriverExecutionProfile#DEFAULT_NAME} profile. - */ - @NonNull - Map getLoadBalancingPolicies(); - - /** - * @param profileName the profile name; never {@code null}. - * @return The driver's load balancing policy for the given profile; never {@code null}. - */ - @NonNull - default LoadBalancingPolicy getLoadBalancingPolicy(@NonNull String profileName) { - LoadBalancingPolicy policy = getLoadBalancingPolicies().get(profileName); - // Protect against a non-existent name - return (policy != null) - ? policy - : getLoadBalancingPolicies().get(DriverExecutionProfile.DEFAULT_NAME); - } - - /** - * @return The driver's retry policies, keyed by profile name; the returned map is guaranteed to - * never be {@code null} and to always contain an entry for the {@value - * DriverExecutionProfile#DEFAULT_NAME} profile. - */ - @NonNull - Map getRetryPolicies(); - - /** - * @param profileName the profile name; never {@code null}. - * @return The driver's retry policy for the given profile; never {@code null}. - */ - @NonNull - default RetryPolicy getRetryPolicy(@NonNull String profileName) { - RetryPolicy policy = getRetryPolicies().get(profileName); - return (policy != null) ? policy : getRetryPolicies().get(DriverExecutionProfile.DEFAULT_NAME); - } - - /** - * @return The driver's speculative execution policies, keyed by profile name; the returned map is - * guaranteed to never be {@code null} and to always contain an entry for the {@value - * DriverExecutionProfile#DEFAULT_NAME} profile. - */ - @NonNull - Map getSpeculativeExecutionPolicies(); - - /** - * @param profileName the profile name; never {@code null}. - * @return The driver's speculative execution policy for the given profile; never {@code null}. - */ - @NonNull - default SpeculativeExecutionPolicy getSpeculativeExecutionPolicy(@NonNull String profileName) { - SpeculativeExecutionPolicy policy = getSpeculativeExecutionPolicies().get(profileName); - return (policy != null) - ? policy - : getSpeculativeExecutionPolicies().get(DriverExecutionProfile.DEFAULT_NAME); - } - - /** @return The driver's timestamp generator; never {@code null}. */ - @NonNull - TimestampGenerator getTimestampGenerator(); - - /** @return The driver's reconnection policy; never {@code null}. */ - @NonNull - ReconnectionPolicy getReconnectionPolicy(); - - /** @return The driver's address translator; never {@code null}. */ - @NonNull - AddressTranslator getAddressTranslator(); - - /** @return The authentication provider, if authentication was configured. */ - @NonNull - Optional getAuthProvider(); - - /** @return The SSL engine factory, if SSL was configured. */ - @NonNull - Optional getSslEngineFactory(); - - /** @return The driver's request tracker; never {@code null}. */ - @NonNull - RequestTracker getRequestTracker(); - - /** @return The driver's request ID generator; never {@code null}. */ - @NonNull - Optional getRequestIdGenerator(); - - /** @return The driver's request throttler; never {@code null}. */ - @NonNull - RequestThrottler getRequestThrottler(); - - /** @return The driver's node state listener; never {@code null}. */ - @NonNull - NodeStateListener getNodeStateListener(); - - /** @return The driver's schema change listener; never {@code null}. */ - @NonNull - SchemaChangeListener getSchemaChangeListener(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/AsyncCqlSession.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/AsyncCqlSession.java deleted file mode 100644 index 7b56bd61a09..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/AsyncCqlSession.java +++ /dev/null @@ -1,160 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.internal.core.cql.DefaultPrepareRequest; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; -import java.util.Objects; -import java.util.concurrent.CompletionStage; - -/** - * A session that offers user-friendly methods to execute CQL requests asynchronously. - * - * @since 4.4.0 - */ -public interface AsyncCqlSession extends Session { - - /** - * Executes a CQL statement asynchronously (the call returns as soon as the statement was sent, - * generally before the result is available). - * - * @param statement the CQL query to execute (that can be any {@code Statement}). - * @return a {@code CompletionStage} that, once complete, will produce the async result set. - */ - @NonNull - default CompletionStage executeAsync(@NonNull Statement statement) { - return Objects.requireNonNull( - execute(statement, Statement.ASYNC), "The CQL processor should never return a null result"); - } - - /** - * Executes a CQL statement asynchronously (the call returns as soon as the statement was sent, - * generally before the result is available). - * - *

This is an alias for {@link #executeAsync(Statement)} - * executeAsync(SimpleStatement.newInstance(query))}. - * - * @param query the CQL query to execute. - * @return a {@code CompletionStage} that, once complete, will produce the async result set. - * @see SimpleStatement#newInstance(String) - */ - @NonNull - default CompletionStage executeAsync(@NonNull String query) { - return executeAsync(SimpleStatement.newInstance(query)); - } - - /** - * Executes a CQL statement asynchronously (the call returns as soon as the statement was sent, - * generally before the result is available). - * - *

This is an alias for {@link #executeAsync(Statement)} - * executeAsync(SimpleStatement.newInstance(query, values))}. - * - * @param query the CQL query to execute. - * @param values the values for placeholders in the query string. Individual values can be {@code - * null}, but the vararg array itself can't. - * @return a {@code CompletionStage} that, once complete, will produce the async result set. - * @see SimpleStatement#newInstance(String, Object...) - */ - @NonNull - default CompletionStage executeAsync( - @NonNull String query, @NonNull Object... values) { - return executeAsync(SimpleStatement.newInstance(query, values)); - } - - /** - * Executes a CQL statement asynchronously (the call returns as soon as the statement was sent, - * generally before the result is available). - * - *

This is an alias for {@link #executeAsync(Statement)} - * executeAsync(SimpleStatement.newInstance(query, values))}. - * - * @param query the CQL query to execute. - * @param values the values for named placeholders in the query string. Individual values can be - * {@code null}, but the map itself can't. - * @return a {@code CompletionStage} that, once complete, will produce the async result set. - * @see SimpleStatement#newInstance(String, Map) - */ - @NonNull - default CompletionStage executeAsync( - @NonNull String query, @NonNull Map values) { - return executeAsync(SimpleStatement.newInstance(query, values)); - } - - /** - * Prepares a CQL statement asynchronously (the call returns as soon as the prepare query was - * sent, generally before the statement is prepared). - * - *

Note that the bound statements created from the resulting prepared statement will inherit - * some of the attributes of {@code query}; see {@link SyncCqlSession#prepare(SimpleStatement)} - * for more details. - * - *

The result of this method is cached (see {@link SyncCqlSession#prepare(SimpleStatement)} for - * more explanations). - * - * @param statement the CQL query to prepare (that can be any {@code SimpleStatement}). - * @return a {@code CompletionStage} that, once complete, will produce the prepared statement. - */ - @NonNull - default CompletionStage prepareAsync(@NonNull SimpleStatement statement) { - return Objects.requireNonNull( - execute(new DefaultPrepareRequest(statement), PrepareRequest.ASYNC), - "The CQL prepare processor should never return a null result"); - } - - /** - * Prepares a CQL statement asynchronously (the call returns as soon as the prepare query was - * sent, generally before the statement is prepared). - * - *

The result of this method is cached (see {@link SyncCqlSession#prepare(SimpleStatement)} for - * more explanations). - * - * @param query the CQL query string to prepare. - * @return a {@code CompletionStage} that, once complete, will produce the prepared statement. - */ - @NonNull - default CompletionStage prepareAsync(@NonNull String query) { - return Objects.requireNonNull( - execute(new DefaultPrepareRequest(query), PrepareRequest.ASYNC), - "The CQL prepare processor should never return a null result"); - } - - /** - * Prepares a CQL statement asynchronously (the call returns as soon as the prepare query was - * sent, generally before the statement is prepared). - * - *

This variant is exposed in case you use an ad hoc {@link PrepareRequest} implementation to - * customize how attributes are propagated when you prepare a {@link SimpleStatement} (see {@link - * SyncCqlSession#prepare(SimpleStatement)} for more explanations). Otherwise, you should rarely - * have to deal with {@link PrepareRequest} directly. - * - *

The result of this method is cached (see {@link SyncCqlSession#prepare(SimpleStatement)} for - * more explanations). - * - * @param request the {@code PrepareRequest} to prepare. - * @return a {@code CompletionStage} that, once complete, will produce the prepared statement. - */ - @NonNull - default CompletionStage prepareAsync(PrepareRequest request) { - return Objects.requireNonNull( - execute(request, PrepareRequest.ASYNC), - "The CQL prepare processor should never return a null result"); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/AsyncResultSet.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/AsyncResultSet.java deleted file mode 100644 index 05a292ccbd0..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/AsyncResultSet.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -import com.datastax.oss.driver.api.core.AsyncPagingIterable; -import com.datastax.oss.driver.api.core.CqlSession; - -/** - * The result of an asynchronous CQL query. - * - * @see CqlSession#executeAsync(Statement) - * @see CqlSession#executeAsync(String) - */ -public interface AsyncResultSet extends AsyncPagingIterable { - - // overridden to amend the javadocs: - /** - * {@inheritDoc} - * - *

This is equivalent to calling: - * - *

-   *   this.iterator().next().getBoolean("[applied]")
-   * 
- * - * Except that this method peeks at the next row without consuming it. - */ - @Override - boolean wasApplied(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/BatchStatement.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/BatchStatement.java deleted file mode 100644 index 9deb33c6007..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/BatchStatement.java +++ /dev/null @@ -1,280 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.internal.core.cql.DefaultBatchStatement; -import com.datastax.oss.driver.internal.core.time.ServerSideTimestampGenerator; -import com.datastax.oss.driver.internal.core.util.Sizes; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.protocol.internal.PrimitiveSizes; -import edu.umd.cs.findbugs.annotations.CheckReturnValue; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; - -/** - * A statement that groups a number of other statements, so that they can be executed as a batch - * (i.e. sent together as a single protocol frame). - * - *

The default implementation returned by the driver is immutable and thread-safe. - * All mutating methods return a new instance. See also the static factory methods and builders in - * this interface. - */ -public interface BatchStatement extends Statement, Iterable> { - - /** - * Creates an instance of the default implementation for the given batch type. - * - *

Note that the returned object is immutable. - */ - @NonNull - static BatchStatement newInstance(@NonNull BatchType batchType) { - return new DefaultBatchStatement( - batchType, - new ArrayList<>(), - null, - null, - null, - null, - null, - null, - Collections.emptyMap(), - null, - false, - Statement.NO_DEFAULT_TIMESTAMP, - null, - Integer.MIN_VALUE, - null, - null, - null, - null, - Statement.NO_NOW_IN_SECONDS); - } - - /** - * Creates an instance of the default implementation for the given batch type, containing the - * given statements. - * - *

Note that the returned object is immutable. - */ - @NonNull - static BatchStatement newInstance( - @NonNull BatchType batchType, @NonNull Iterable> statements) { - return new DefaultBatchStatement( - batchType, - ImmutableList.copyOf(statements), - null, - null, - null, - null, - null, - null, - Collections.emptyMap(), - null, - false, - Statement.NO_DEFAULT_TIMESTAMP, - null, - Integer.MIN_VALUE, - null, - null, - null, - null, - Statement.NO_NOW_IN_SECONDS); - } - - /** - * Creates an instance of the default implementation for the given batch type, containing the - * given statements. - * - *

Note that the returned object is immutable. - */ - @NonNull - static BatchStatement newInstance( - @NonNull BatchType batchType, @NonNull BatchableStatement... statements) { - return new DefaultBatchStatement( - batchType, - ImmutableList.copyOf(statements), - null, - null, - null, - null, - null, - null, - Collections.emptyMap(), - null, - false, - Statement.NO_DEFAULT_TIMESTAMP, - null, - Integer.MIN_VALUE, - null, - null, - null, - null, - Statement.NO_NOW_IN_SECONDS); - } - - /** - * Returns a builder to create an instance of the default implementation. - * - *

Note that this builder is mutable and not thread-safe. - */ - @NonNull - static BatchStatementBuilder builder(@NonNull BatchType batchType) { - return new BatchStatementBuilder(batchType); - } - - /** - * Returns a builder to create an instance of the default implementation, copying the fields of - * the given statement. - * - *

Note that this builder is mutable and not thread-safe. - */ - @NonNull - static BatchStatementBuilder builder(@NonNull BatchStatement template) { - return new BatchStatementBuilder(template); - } - - @NonNull - BatchType getBatchType(); - - /** - * Sets the batch type. - * - *

The driver's built-in implementation is immutable, and returns a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - */ - @NonNull - @CheckReturnValue - BatchStatement setBatchType(@NonNull BatchType newBatchType); - - /** - * Sets the CQL keyspace to associate with this batch. - * - *

If the keyspace is not set explicitly with this method, it will be inferred from the first - * simple statement in the batch that has a keyspace set (or will be null if no such statement - * exists). - * - *

This feature is only available with {@link DefaultProtocolVersion#V5 native protocol v5} or - * higher. Specifying a per-request keyspace with lower protocol versions will cause a runtime - * error. - * - * @see Request#getKeyspace() - */ - @NonNull - @CheckReturnValue - BatchStatement setKeyspace(@Nullable CqlIdentifier newKeyspace); - - /** - * Shortcut for {@link #setKeyspace(CqlIdentifier) - * setKeyspace(CqlIdentifier.fromCql(newKeyspaceName))}. - */ - @NonNull - @CheckReturnValue - default BatchStatement setKeyspace(@NonNull String newKeyspaceName) { - return setKeyspace(CqlIdentifier.fromCql(newKeyspaceName)); - } - - /** - * Adds a new statement to the batch. - * - *

Note that, due to protocol limitations, simple statements with named values are currently - * not supported. - * - *

The driver's built-in implementation is immutable, and returns a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - */ - @NonNull - @CheckReturnValue - BatchStatement add(@NonNull BatchableStatement statement); - - /** - * Adds new statements to the batch. - * - *

Note that, due to protocol limitations, simple statements with named values are currently - * not supported. - * - *

The driver's built-in implementation is immutable, and returns a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - */ - @NonNull - @CheckReturnValue - BatchStatement addAll(@NonNull Iterable> statements); - - /** @see #addAll(Iterable) */ - @NonNull - @CheckReturnValue - default BatchStatement addAll(@NonNull BatchableStatement... statements) { - return addAll(Arrays.asList(statements)); - } - - /** @return The number of child statements in this batch. */ - int size(); - - /** - * Clears the batch, removing all the statements added so far. - * - *

The driver's built-in implementation is immutable, and returns a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - */ - @NonNull - @CheckReturnValue - BatchStatement clear(); - - @Override - default int computeSizeInBytes(@NonNull DriverContext context) { - int size = Sizes.minimumStatementSize(this, context); - - // BatchStatement's additional elements to take into account are: - // - batch type - // - inner statements (simple or bound) - // - per-query keyspace - // - timestamp - - // batch type - size += PrimitiveSizes.BYTE; - - // inner statements - size += PrimitiveSizes.SHORT; // number of statements - - for (BatchableStatement batchableStatement : this) { - size += - Sizes.sizeOfInnerBatchStatementInBytes( - batchableStatement, context.getProtocolVersion(), context.getCodecRegistry()); - } - - // per-query keyspace - if (getKeyspace() != null) { - size += PrimitiveSizes.sizeOfString(getKeyspace().asInternal()); - } - - // timestamp - if (!(context.getTimestampGenerator() instanceof ServerSideTimestampGenerator) - || getQueryTimestamp() != Statement.NO_DEFAULT_TIMESTAMP) { - - size += PrimitiveSizes.LONG; - } - - return size; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/BatchStatementBuilder.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/BatchStatementBuilder.java deleted file mode 100644 index a8e2b8ab659..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/BatchStatementBuilder.java +++ /dev/null @@ -1,164 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.internal.core.cql.DefaultBatchStatement; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.Iterables; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Arrays; -import net.jcip.annotations.NotThreadSafe; - -/** - * A builder to create a batch statement. - * - *

This class is mutable and not thread-safe. - */ -@NotThreadSafe -public class BatchStatementBuilder extends StatementBuilder { - - @NonNull private BatchType batchType; - @Nullable private CqlIdentifier keyspace; - @NonNull private ImmutableList.Builder> statementsBuilder; - private int statementsCount; - - public BatchStatementBuilder(@NonNull BatchType batchType) { - this.batchType = batchType; - this.statementsBuilder = ImmutableList.builder(); - } - - public BatchStatementBuilder(@NonNull BatchStatement template) { - super(template); - this.batchType = template.getBatchType(); - this.statementsBuilder = ImmutableList.>builder().addAll(template); - this.statementsCount = template.size(); - } - - /** - * Sets the CQL keyspace to execute this batch in. - * - * @return this builder; never {@code null}. - * @see BatchStatement#getKeyspace() - */ - @NonNull - public BatchStatementBuilder setKeyspace(@NonNull CqlIdentifier keyspace) { - this.keyspace = keyspace; - return this; - } - - /** - * Sets the CQL keyspace to execute this batch in. Shortcut for {@link #setKeyspace(CqlIdentifier) - * setKeyspace(CqlIdentifier.fromCql(keyspaceName))}. - * - * @return this builder; never {@code null}. - */ - @NonNull - public BatchStatementBuilder setKeyspace(@NonNull String keyspaceName) { - return setKeyspace(CqlIdentifier.fromCql(keyspaceName)); - } - - /** - * Adds a new statement to the batch. - * - * @return this builder; never {@code null}. - * @see BatchStatement#add(BatchableStatement) - */ - @NonNull - public BatchStatementBuilder addStatement(@NonNull BatchableStatement statement) { - if (statementsCount >= 0xFFFF) { - throw new IllegalStateException( - "Batch statement cannot contain more than " + 0xFFFF + " statements."); - } - statementsCount += 1; - statementsBuilder.add(statement); - return this; - } - - /** - * Adds new statements to the batch. - * - * @return this builder; never {@code null}. - * @see BatchStatement#addAll(Iterable) - */ - @NonNull - public BatchStatementBuilder addStatements(@NonNull Iterable> statements) { - int delta = Iterables.size(statements); - if (statementsCount + delta > 0xFFFF) { - throw new IllegalStateException( - "Batch statement cannot contain more than " + 0xFFFF + " statements."); - } - statementsCount += delta; - statementsBuilder.addAll(statements); - return this; - } - - /** - * Adds new statements to the batch. - * - * @return this builder; never {@code null}. - * @see BatchStatement#addAll(BatchableStatement[]) - */ - @NonNull - public BatchStatementBuilder addStatements(@NonNull BatchableStatement... statements) { - return addStatements(Arrays.asList(statements)); - } - - /** - * Clears all the statements in this batch. - * - * @return this builder; never {@code null}. - */ - @NonNull - public BatchStatementBuilder clearStatements() { - statementsBuilder = ImmutableList.builder(); - statementsCount = 0; - return this; - } - - /** @return a newly-allocated {@linkplain BatchStatement batch}; never {@code null}.. */ - @Override - @NonNull - public BatchStatement build() { - return new DefaultBatchStatement( - batchType, - statementsBuilder.build(), - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - buildCustomPayload(), - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - public int getStatementsCount() { - return this.statementsCount; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/BatchType.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/BatchType.java deleted file mode 100644 index 6b0a7f09688..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/BatchType.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -/** - * The type of a batch. - * - *

The only reason to model this as an interface (as opposed to an enum type) is to accommodate - * for custom protocol extensions. If you're connecting to a standard Apache Cassandra cluster, all - * {@code BatchType}s are {@link DefaultBatchType} instances. - */ -public interface BatchType { - - BatchType LOGGED = DefaultBatchType.LOGGED; - BatchType UNLOGGED = DefaultBatchType.UNLOGGED; - BatchType COUNTER = DefaultBatchType.COUNTER; - - /** The numerical value that the batch type is encoded to. */ - byte getProtocolCode(); - - // Implementation note: we don't have a "BatchTypeRegistry" because we never decode batch types. - // This can be added later if needed (see ConsistencyLevelRegistry for an example). -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/BatchableStatement.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/BatchableStatement.java deleted file mode 100644 index a25f625bae9..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/BatchableStatement.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -/** - * A statement that can be added to a CQL batch. - * - * @param the "self type" used for covariant returns in subtypes. - */ -public interface BatchableStatement> - extends Statement {} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/Bindable.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/Bindable.java deleted file mode 100644 index 64f0f22a051..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/Bindable.java +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.data.AccessibleByName; -import com.datastax.oss.driver.api.core.data.GettableById; -import com.datastax.oss.driver.api.core.data.GettableByName; -import com.datastax.oss.driver.api.core.data.SettableById; -import com.datastax.oss.driver.api.core.data.SettableByName; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import edu.umd.cs.findbugs.annotations.CheckReturnValue; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** A data container with the ability to unset values. */ -public interface Bindable> - extends GettableById, GettableByName, SettableById, SettableByName { - /** - * Whether the {@code i}th value has been set. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @SuppressWarnings("ReferenceEquality") - default boolean isSet(int i) { - return getBytesUnsafe(i) != ProtocolConstants.UNSET_VALUE; - } - - /** - * Whether the value for the first occurrence of {@code id} has been set. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IndexOutOfBoundsException if the id is invalid. - */ - @SuppressWarnings("ReferenceEquality") - default boolean isSet(@NonNull CqlIdentifier id) { - return getBytesUnsafe(id) != ProtocolConstants.UNSET_VALUE; - } - - /** - * Whether the value for the first occurrence of {@code name} has been set. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IndexOutOfBoundsException if the name is invalid. - */ - @SuppressWarnings("ReferenceEquality") - default boolean isSet(@NonNull String name) { - return getBytesUnsafe(name) != ProtocolConstants.UNSET_VALUE; - } - - /** - * Unsets the {@code i}th value. This will leave the statement in the same state as if no setter - * was ever called for this value. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT unset(int i) { - return setBytesUnsafe(i, ProtocolConstants.UNSET_VALUE); - } - - /** - * Unsets the value for the first occurrence of {@code id}. This will leave the statement in the - * same state as if no setter was ever called for this value. - * - * @throws IndexOutOfBoundsException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT unset(@NonNull CqlIdentifier id) { - return setBytesUnsafe(id, ProtocolConstants.UNSET_VALUE); - } - - /** - * Unsets the value for the first occurrence of {@code name}. This will leave the statement in the - * same state as if no setter was ever called for this value. - * - * @throws IndexOutOfBoundsException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT unset(@NonNull String name) { - return setBytesUnsafe(name, ProtocolConstants.UNSET_VALUE); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/BoundStatement.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/BoundStatement.java deleted file mode 100644 index bd7c142907f..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/BoundStatement.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.internal.core.time.ServerSideTimestampGenerator; -import com.datastax.oss.driver.internal.core.util.Sizes; -import com.datastax.oss.protocol.internal.PrimitiveSizes; -import com.datastax.oss.protocol.internal.request.query.Values; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.util.List; - -/** - * A prepared statement in its executable form, with values bound to the variables. - * - *

The default implementation returned by the driver is immutable and thread-safe. - * All mutating methods return a new instance. - */ -public interface BoundStatement - extends BatchableStatement, Bindable { - - /** The prepared statement that was used to create this statement. */ - @NonNull - PreparedStatement getPreparedStatement(); - - /** The values to bind, in their serialized form. */ - @NonNull - List getValues(); - - /** - * Always returns {@code null} (bound statements can't have a per-request keyspace, they always - * inherit the one of the statement that was initially prepared). - */ - @Override - @Nullable - default CqlIdentifier getKeyspace() { - return null; - } - - @Override - default int computeSizeInBytes(@NonNull DriverContext context) { - int size = Sizes.minimumStatementSize(this, context); - - // BoundStatement's additional elements to take into account are: - // - prepared ID - // - result metadata ID - // - parameters - // - page size - // - paging state - // - timestamp - - // prepared ID - size += PrimitiveSizes.sizeOfShortBytes(getPreparedStatement().getId()); - - // result metadata ID - if (getPreparedStatement().getResultMetadataId() != null) { - size += PrimitiveSizes.sizeOfShortBytes(getPreparedStatement().getResultMetadataId()); - } - - // parameters (always sent as positional values for bound statements) - size += Values.sizeOfPositionalValues(getValues()); - - // page size - size += PrimitiveSizes.INT; - - // paging state - if (getPagingState() != null) { - size += PrimitiveSizes.sizeOfBytes(getPagingState()); - } - - // timestamp - if (!(context.getTimestampGenerator() instanceof ServerSideTimestampGenerator) - || getQueryTimestamp() != Statement.NO_DEFAULT_TIMESTAMP) { - size += PrimitiveSizes.LONG; - } - - return size; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/BoundStatementBuilder.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/BoundStatementBuilder.java deleted file mode 100644 index 7e8f8723e1b..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/BoundStatementBuilder.java +++ /dev/null @@ -1,209 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.internal.core.cql.DefaultBoundStatement; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.List; -import java.util.Map; -import net.jcip.annotations.NotThreadSafe; - -/** - * A builder to create a bound statement. - * - *

This class is mutable and not thread-safe. - */ -@NotThreadSafe -public class BoundStatementBuilder extends StatementBuilder - implements Bindable { - - @NonNull private final PreparedStatement preparedStatement; - @NonNull private final ColumnDefinitions variableDefinitions; - @NonNull private final ByteBuffer[] values; - @NonNull private final CodecRegistry codecRegistry; - @NonNull private final ProtocolVersion protocolVersion; - - public BoundStatementBuilder( - @NonNull PreparedStatement preparedStatement, - @NonNull ColumnDefinitions variableDefinitions, - @NonNull ByteBuffer[] values, - @Nullable String executionProfileName, - @Nullable DriverExecutionProfile executionProfile, - @Nullable CqlIdentifier routingKeyspace, - @Nullable ByteBuffer routingKey, - @Nullable Token routingToken, - @NonNull Map customPayload, - @Nullable Boolean idempotent, - boolean tracing, - long timestamp, - @Nullable ByteBuffer pagingState, - int pageSize, - @Nullable ConsistencyLevel consistencyLevel, - @Nullable ConsistencyLevel serialConsistencyLevel, - @Nullable Duration timeout, - @NonNull CodecRegistry codecRegistry, - @NonNull ProtocolVersion protocolVersion) { - this.preparedStatement = preparedStatement; - this.variableDefinitions = variableDefinitions; - this.values = values; - this.executionProfileName = executionProfileName; - this.executionProfile = executionProfile; - this.routingKeyspace = routingKeyspace; - this.routingKey = routingKey; - this.routingToken = routingToken; - for (Map.Entry entry : customPayload.entrySet()) { - this.addCustomPayload(entry.getKey(), entry.getValue()); - } - this.idempotent = idempotent; - this.tracing = tracing; - this.timestamp = timestamp; - this.pagingState = pagingState; - this.pageSize = pageSize; - this.consistencyLevel = consistencyLevel; - this.serialConsistencyLevel = serialConsistencyLevel; - this.timeout = timeout; - this.codecRegistry = codecRegistry; - this.protocolVersion = protocolVersion; - } - - public BoundStatementBuilder(@NonNull BoundStatement template) { - super(template); - this.preparedStatement = template.getPreparedStatement(); - this.variableDefinitions = template.getPreparedStatement().getVariableDefinitions(); - this.values = template.getValues().toArray(new ByteBuffer[this.variableDefinitions.size()]); - this.codecRegistry = template.codecRegistry(); - this.protocolVersion = template.protocolVersion(); - this.node = template.getNode(); - } - - /** The prepared statement that was used to create this statement. */ - @NonNull - public PreparedStatement getPreparedStatement() { - return preparedStatement; - } - - @NonNull - @Override - public List allIndicesOf(@NonNull CqlIdentifier id) { - List indices = variableDefinitions.allIndicesOf(id); - if (indices.isEmpty()) { - throw new IllegalArgumentException(id + " is not a variable in this bound statement"); - } - return indices; - } - - @Override - public int firstIndexOf(@NonNull CqlIdentifier id) { - int indexOf = variableDefinitions.firstIndexOf(id); - if (indexOf == -1) { - throw new IllegalArgumentException(id + " is not a variable in this bound statement"); - } - return indexOf; - } - - @NonNull - @Override - public List allIndicesOf(@NonNull String name) { - List indices = variableDefinitions.allIndicesOf(name); - if (indices.isEmpty()) { - throw new IllegalArgumentException(name + " is not a variable in this bound statement"); - } - return indices; - } - - @Override - public int firstIndexOf(@NonNull String name) { - int indexOf = variableDefinitions.firstIndexOf(name); - if (indexOf == -1) { - throw new IllegalArgumentException(name + " is not a variable in this bound statement"); - } - return indexOf; - } - - @NonNull - @Override - public BoundStatementBuilder setBytesUnsafe(int i, ByteBuffer v) { - values[i] = v; - return this; - } - - @Override - public ByteBuffer getBytesUnsafe(int i) { - return values[i]; - } - - @Override - public int size() { - return values.length; - } - - @NonNull - @Override - public DataType getType(int i) { - return variableDefinitions.get(i).getType(); - } - - @NonNull - @Override - public CodecRegistry codecRegistry() { - return codecRegistry; - } - - @NonNull - @Override - public ProtocolVersion protocolVersion() { - return protocolVersion; - } - - @NonNull - @Override - public BoundStatement build() { - return new DefaultBoundStatement( - preparedStatement, - variableDefinitions, - values, - executionProfileName, - executionProfile, - routingKeyspace, - routingKey, - routingToken, - buildCustomPayload(), - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - codecRegistry, - protocolVersion, - node, - nowInSeconds); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/ColumnDefinition.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/ColumnDefinition.java deleted file mode 100644 index cb48f058be4..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/ColumnDefinition.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.detach.Detachable; -import com.datastax.oss.driver.api.core.type.DataType; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * Metadata about a CQL column. - * - *

The default implementation returned by the driver is immutable and serializable. If you write - * your own implementation, it should at least be thread-safe; serializability is not mandatory, but - * recommended for use with some 3rd-party tools like Apache Spark ™. - */ -public interface ColumnDefinition extends Detachable { - - @NonNull - CqlIdentifier getKeyspace(); - - @NonNull - CqlIdentifier getTable(); - - @NonNull - CqlIdentifier getName(); - - @NonNull - DataType getType(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/ColumnDefinitions.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/ColumnDefinitions.java deleted file mode 100644 index 7a775064317..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/ColumnDefinitions.java +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.data.AccessibleByName; -import com.datastax.oss.driver.api.core.detach.Detachable; -import com.datastax.oss.driver.internal.core.util.Loggers; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Collections; -import java.util.List; - -/** - * Metadata about a set of CQL columns. - * - *

The default implementation returned by the driver is immutable and serializable. If you write - * your own implementation, it should at least be thread-safe; serializability is not mandatory, but - * recommended for use with some 3rd-party tools like Apache Spark ™. - */ -public interface ColumnDefinitions extends Iterable, Detachable { - - /** @return the number of definitions contained in this metadata. */ - int size(); - - /** - * @param i the index to check. - * @throws IndexOutOfBoundsException if the index is invalid. - * @return the {@code i}th {@link ColumnDefinition} in this metadata. - */ - @NonNull - ColumnDefinition get(int i); - - /** - * Get a definition by name. - * - *

This is the equivalent of: - * - *

-   *   get(firstIndexOf(name))
-   * 
- * - * @throws IllegalArgumentException if the name does not exist (in other words, if {@code - * !contains(name))}). - * @see #contains(String) - * @see #firstIndexOf(String) - */ - @NonNull - default ColumnDefinition get(@NonNull String name) { - if (!contains(name)) { - throw new IllegalArgumentException("No definition named " + name); - } else { - return get(firstIndexOf(name)); - } - } - - /** - * Get a definition by name. - * - *

This is the equivalent of: - * - *

-   *   get(firstIndexOf(name))
-   * 
- * - * @throws IllegalArgumentException if the name does not exist (in other words, if {@code - * !contains(name))}). - * @see #contains(CqlIdentifier) - * @see #firstIndexOf(CqlIdentifier) - */ - @NonNull - default ColumnDefinition get(@NonNull CqlIdentifier name) { - if (!contains(name)) { - throw new IllegalArgumentException("No definition named " + name); - } else { - return get(firstIndexOf(name)); - } - } - - /** - * Whether there is a definition using the given name. - * - *

Because raw strings are ambiguous with regard to case-sensitivity, the argument will be - * interpreted according to the rules described in {@link AccessibleByName}. - */ - boolean contains(@NonNull String name); - - /** Whether there is a definition using the given CQL identifier. */ - boolean contains(@NonNull CqlIdentifier id); - - /** - * Returns the indices of all columns that use the given name. - * - *

Because raw strings are ambiguous with regard to case-sensitivity, the argument will be - * interpreted according to the rules described in {@link AccessibleByName}. - * - * @return the indices, or an empty list if no column uses this name. - * @apiNote the default implementation only exists for backward compatibility. It wraps the result - * of {@link #firstIndexOf(String)} in a singleton list, which is not entirely correct, as it - * will only return the first occurrence. Therefore it also logs a warning. - *

Implementors should always override this method (all built-in driver implementations - * do). - */ - @NonNull - default List allIndicesOf(@NonNull String name) { - Loggers.COLUMN_DEFINITIONS.warn( - "{} should override allIndicesOf(String), the default implementation is a " - + "workaround for backward compatibility, it only returns the first occurrence", - getClass().getName()); - return Collections.singletonList(firstIndexOf(name)); - } - - /** - * Returns the index of the first column that uses the given name. - * - *

Because raw strings are ambiguous with regard to case-sensitivity, the argument will be - * interpreted according to the rules described in {@link AccessibleByName}. - * - * @return the index, or -1 if no column uses this name. - */ - int firstIndexOf(@NonNull String name); - - /** - * Returns the indices of all columns that use the given identifier. - * - * @return the indices, or an empty list if no column uses this identifier. - * @apiNote the default implementation only exists for backward compatibility. It wraps the result - * of {@link #firstIndexOf(CqlIdentifier)} in a singleton list, which is not entirely correct, - * as it will only return the first occurrence. Therefore it also logs a warning. - *

Implementors should always override this method (all built-in driver implementations - * do). - */ - @NonNull - default List allIndicesOf(@NonNull CqlIdentifier id) { - Loggers.COLUMN_DEFINITIONS.warn( - "{} should override allIndicesOf(CqlIdentifier), the default implementation is a " - + "workaround for backward compatibility, it only returns the first occurrence", - getClass().getName()); - return Collections.singletonList(firstIndexOf(id)); - } - - /** - * Returns the index of the first column that uses the given identifier. - * - * @return the index, or -1 if no column uses this identifier. - */ - int firstIndexOf(@NonNull CqlIdentifier id); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/DefaultBatchType.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/DefaultBatchType.java deleted file mode 100644 index f699438df59..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/DefaultBatchType.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -import com.datastax.oss.protocol.internal.ProtocolConstants; - -/** A default batch type supported by the driver out of the box. */ -public enum DefaultBatchType implements BatchType { - /** - * A logged batch: Cassandra will first write the batch to its distributed batch log to ensure the - * atomicity of the batch (atomicity meaning that if any statement in the batch succeeds, all will - * eventually succeed). - */ - LOGGED(ProtocolConstants.BatchType.LOGGED), - - /** - * A batch that doesn't use Cassandra's distributed batch log. Such batch are not guaranteed to be - * atomic. - */ - UNLOGGED(ProtocolConstants.BatchType.UNLOGGED), - - /** - * A counter batch. Note that such batch is the only type that can contain counter operations and - * it can only contain these. - */ - COUNTER(ProtocolConstants.BatchType.COUNTER), - ; - // Note that, for the sake of convenience, we also expose shortcuts to these constants on the - // BatchType interface. If you add a new enum constant, remember to update the interface as - // well. - - private final byte code; - - DefaultBatchType(byte code) { - this.code = code; - } - - @Override - public byte getProtocolCode() { - return code; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/ExecutionInfo.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/ExecutionInfo.java deleted file mode 100644 index 40cfca827d1..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/ExecutionInfo.java +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.RequestThrottlingException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.retry.RetryDecision; -import com.datastax.oss.driver.api.core.servererrors.CoordinatorException; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.specex.SpeculativeExecutionPolicy; -import com.datastax.oss.driver.internal.core.cql.DefaultPagingState; -import com.datastax.oss.driver.internal.core.util.concurrent.BlockingOperation; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import java.util.concurrent.CompletionStage; - -/** - * Information about the execution of a query. - * - *

This can be obtained either from a result set for a successful query, or from a driver - * exception for a failed query. - * - * @see ResultSet#getExecutionInfo() - * @see DriverException#getExecutionInfo() - */ -public interface ExecutionInfo { - - /** @return The {@link Request} that was executed. */ - @NonNull - default Request getRequest() { - return getStatement(); - } - - /** - * @return The {@link Request} that was executed, if it can be cast to {@link Statement}. - * @deprecated Use {@link #getRequest()} instead. - * @throws ClassCastException If the request that was executed cannot be cast to {@link - * Statement}. - */ - @NonNull - @Deprecated - Statement getStatement(); - - /** - * The node that acted as a coordinator for the query. - * - *

For successful queries, this is never {@code null}. It is the node that sent the response - * from which the result was decoded. - * - *

For failed queries, this can either be {@code null} if the error occurred before any node - * could be contacted (for example a {@link RequestThrottlingException}), or present if a node was - * successfully contacted, but replied with an error response (any subclass of {@link - * CoordinatorException}). - */ - @Nullable - Node getCoordinator(); - - /** - * The number of speculative executions that were started for this query. - * - *

This does not include the initial, normal execution of the query. Therefore, if speculative - * executions are disabled, this will always be 0. If they are enabled and one speculative - * execution was triggered in addition to the initial execution, this will be 1, etc. - * - * @see SpeculativeExecutionPolicy - */ - int getSpeculativeExecutionCount(); - - /** - * The index of the execution that completed this query. - * - *

0 represents the initial, normal execution of the query, 1 the first speculative execution, - * etc. If this execution info is attached to an error, this might not be applicable, and will - * return -1. - * - * @see SpeculativeExecutionPolicy - */ - int getSuccessfulExecutionIndex(); - - /** - * The errors encountered on previous coordinators, if any. - * - *

The list is in chronological order, based on the time that the driver processed the error - * responses. If speculative executions are enabled, they run concurrently so their errors will be - * interleaved. A node can appear multiple times (if the retry policy decided to retry on the same - * node). - */ - @NonNull - List> getErrors(); - - /** - * The paging state of the query, in its raw form. - * - *

This represents the next page to be fetched if this query has multiple page of results. It - * can be saved and reused later on the same statement. - * - *

Note that this is the equivalent of driver 3's {@code getPagingStateUnsafe()}. If you're - * looking for the method that returns a {@link PagingState}, use {@link #getSafePagingState()}. - * - * @return the paging state, or {@code null} if there is no next page. - */ - @Nullable - ByteBuffer getPagingState(); - - /** - * The paging state of the query, in a safe wrapper that checks if it's reused on the right - * statement. - * - *

This represents the next page to be fetched if this query has multiple page of results. It - * can be saved and reused later on the same statement. - * - * @return the paging state, or {@code null} if there is no next page. - */ - @Nullable - default PagingState getSafePagingState() { - // Default implementation for backward compatibility, but we override it in the concrete class, - // because it knows the attachment point. - ByteBuffer rawPagingState = getPagingState(); - if (rawPagingState == null) { - return null; - } else { - Request request = getRequest(); - if (!(request instanceof Statement)) { - throw new IllegalStateException("Only statements should have a paging state"); - } - Statement statement = (Statement) request; - return new DefaultPagingState(rawPagingState, statement, AttachmentPoint.NONE); - } - } - - /** - * The server-side warnings for this query, if any (otherwise the list will be empty). - * - *

This feature is only available with {@link DefaultProtocolVersion#V4} or above; with lower - * versions, this list will always be empty. - */ - @NonNull - List getWarnings(); - - /** - * The custom payload sent back by the server with the response, if any (otherwise the map will be - * empty). - * - *

This method returns a read-only view of the original map, but its values remain inherently - * mutable. If multiple clients will read these values, care should be taken not to corrupt the - * data (in particular, preserve the indices by calling {@link ByteBuffer#duplicate()}). - * - *

This feature is only available with {@link DefaultProtocolVersion#V4} or above; with lower - * versions, this map will always be empty. - */ - @NonNull - Map getIncomingPayload(); - - /** - * Whether the cluster reached schema agreement after the execution of this query. - * - *

After a successful schema-altering query (ex: creating a table), the driver will check if - * the cluster's nodes agree on the new schema version. If not, it will keep retrying a few times - * (the retry delay and timeout are set through the configuration). - * - *

If this method returns {@code false}, clients can call {@link - * Session#checkSchemaAgreement()} later to perform the check manually. - * - *

Schema agreement is only checked for schema-altering queries. For other query types, this - * method will always return {@code true}. - * - * @see DefaultDriverOption#CONTROL_CONNECTION_AGREEMENT_INTERVAL - * @see DefaultDriverOption#CONTROL_CONNECTION_AGREEMENT_TIMEOUT - */ - boolean isSchemaInAgreement(); - - /** - * The tracing identifier if tracing was {@link Statement#isTracing() enabled} for this query, - * otherwise {@code null}. - */ - @Nullable - UUID getTracingId(); - - /** - * Fetches the query trace asynchronously, if tracing was enabled for this query. - * - *

Note that each call to this method triggers a new fetch, even if the previous call was - * successful (this allows fetching the trace again if the list of {@link QueryTrace#getEvents() - * events} was incomplete). - * - *

This method will return a failed future if tracing was disabled for the query (that is, if - * {@link #getTracingId()} is null). - */ - @NonNull - CompletionStage getQueryTraceAsync(); - - /** - * Convenience method to call {@link #getQueryTraceAsync()} and block for the result. - * - *

This must not be called on a driver thread. - * - * @throws IllegalStateException if {@link #getTracingId()} is null. - */ - @NonNull - default QueryTrace getQueryTrace() { - BlockingOperation.checkNotDriverThread(); - return CompletableFutures.getUninterruptibly(getQueryTraceAsync()); - } - - /** - * The size of the binary response in bytes. - * - *

This is the size of the protocol-level frame (including the frame header) before it was - * decoded by the driver, but after decompression (if compression is enabled). - * - *

If the information is not available (for example if this execution info comes from an {@link - * RetryDecision#IGNORE IGNORE} decision of the retry policy), this method returns -1. - * - * @see #getCompressedResponseSizeInBytes() - */ - int getResponseSizeInBytes(); - - /** - * The size of the compressed binary response in bytes. - * - *

This is the size of the protocol-level frame (including the frame header) as it came in the - * TCP response, before decompression and decoding by the driver. - * - *

If compression is disabled, or if the information is not available (for example if this - * execution info comes from an {@link RetryDecision#IGNORE IGNORE} decision of the retry policy), - * this method returns -1. - * - * @see #getResponseSizeInBytes() - */ - int getCompressedResponseSizeInBytes(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/PagingState.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/PagingState.java deleted file mode 100644 index b9042f99841..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/PagingState.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.internal.core.cql.DefaultPagingState; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; - -/** - * A safe wrapper around the paging state of a query. - * - *

This class performs additional checks to fail fast if the paging state is not reused on the - * same query, and it provides utility methods for conversion to/from strings and byte arrays. - * - *

The serialized form returned by {@link #toBytes()} and {@link Object#toString()} is an opaque - * sequence of bytes. Note however that it is not cryptographically secure: the contents are - * not encrypted and the checks are performed with a simple MD5 checksum. If you need stronger - * guarantees, you should build your own wrapper around {@link ExecutionInfo#getPagingState()}. - */ -public interface PagingState { - - /** Parses an instance from a string previously generated with {@code toString()}. */ - @NonNull - static PagingState fromString(@NonNull String string) { - return DefaultPagingState.fromString(string); - } - - /** Parses an instance from a byte array previously generated with {@link #toBytes()}. */ - @NonNull - static PagingState fromBytes(byte[] bytes) { - return DefaultPagingState.fromBytes(bytes); - } - - /** Returns a representation of this object as a byte array. */ - byte[] toBytes(); - - /** - * Checks if this paging state can be safely reused for the given statement. Specifically, the - * query string and any bound values must match. - * - *

Note that, if {@code statement} is a {@link SimpleStatement} with bound values, those values - * must be encoded in order to perform the check. This method uses the default codec registry and - * default protocol version. This might fail if you use custom codecs; in that case, use {@link - * #matches(Statement, Session)} instead. - * - *

If {@code statement} is a {@link BoundStatement}, it is always safe to call this method. - */ - default boolean matches(@NonNull Statement statement) { - return matches(statement, null); - } - - /** - * Alternative to {@link #matches(Statement)} that specifies the session the statement will be - * executed with. You only need this for simple statements, and if you use custom codecs. - * Bound statements already know which session they are attached to. - */ - boolean matches(@NonNull Statement statement, @Nullable Session session); - - /** - * Returns the underlying "unsafe" paging state (the equivalent of {@link - * ExecutionInfo#getPagingState()}). - */ - @NonNull - ByteBuffer getRawPagingState(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/PrepareRequest.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/PrepareRequest.java deleted file mode 100644 index eb04f26c046..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/PrepareRequest.java +++ /dev/null @@ -1,173 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.Map; -import java.util.concurrent.CompletionStage; - -/** - * A request to prepare a CQL query. - * - *

Driver clients should rarely have to deal directly with this type, it's used internally by - * {@link Session}'s prepare methods. However a {@link RetryPolicy} implementation might use it if - * it needs a custom behavior for prepare requests. - * - *

A client may also provide their own implementation of this interface to customize which - * attributes are propagated when preparing a simple statement; see {@link - * CqlSession#prepare(SimpleStatement)} for more explanations. - */ -public interface PrepareRequest extends Request { - - /** - * The type returned when a CQL statement is prepared synchronously. - * - *

Most users won't use this explicitly. It is needed for the generic execute method ({@link - * Session#execute(Request, GenericType)}), but CQL statements will generally be prepared with one - * of the driver's built-in helper methods (such as {@link CqlSession#prepare(SimpleStatement)}). - */ - GenericType SYNC = GenericType.of(PreparedStatement.class); - - /** - * The type returned when a CQL statement is prepared asynchronously. - * - *

Most users won't use this explicitly. It is needed for the generic execute method ({@link - * Session#execute(Request, GenericType)}), but CQL statements will generally be prepared with one - * of the driver's built-in helper methods (such as {@link - * CqlSession#prepareAsync(SimpleStatement)}. - */ - GenericType> ASYNC = - new GenericType>() {}; - - /** The CQL query to prepare. */ - @NonNull - String getQuery(); - - /** - * {@inheritDoc} - * - *

Note that this refers to the prepare query itself, not to the bound statements that will be - * created from the prepared statement (see {@link #areBoundStatementsIdempotent()}). - */ - @NonNull - @Override - default Boolean isIdempotent() { - // Retrying to prepare is always safe - return true; - } - - /** - * The name of the execution profile to use for the bound statements that will be created from the - * prepared statement. - * - *

Note that this will be ignored if {@link #getExecutionProfileForBoundStatements()} returns a - * non-null value. - */ - @Nullable - String getExecutionProfileNameForBoundStatements(); - - /** - * The execution profile to use for the bound statements that will be created from the prepared - * statement. - */ - @Nullable - DriverExecutionProfile getExecutionProfileForBoundStatements(); - - /** - * The routing keyspace to use for the bound statements that will be created from the prepared - * statement. - */ - CqlIdentifier getRoutingKeyspaceForBoundStatements(); - - /** - * The routing key to use for the bound statements that will be created from the prepared - * statement. - */ - ByteBuffer getRoutingKeyForBoundStatements(); - - /** - * The routing key to use for the bound statements that will be created from the prepared - * statement. - * - *

If it's not null, it takes precedence over {@link #getRoutingKeyForBoundStatements()}. - */ - Token getRoutingTokenForBoundStatements(); - - /** - * Returns the custom payload to send alongside the bound statements that will be created from the - * prepared statement. - */ - @NonNull - Map getCustomPayloadForBoundStatements(); - - /** - * Whether bound statements that will be created from the prepared statement are idempotent. - * - *

This follows the same semantics as {@link #isIdempotent()}. - */ - @Nullable - Boolean areBoundStatementsIdempotent(); - - /** - * The timeout to use for the bound statements that will be created from the prepared statement. - * If the value is null, the default value will be used from the configuration. - */ - @Nullable - Duration getTimeoutForBoundStatements(); - - /** - * The paging state to use for the bound statements that will be created from the prepared - * statement. - */ - ByteBuffer getPagingStateForBoundStatements(); - - /** - * The page size to use for the bound statements that will be created from the prepared statement. - * If the value is 0 or negative, the default value will be used from the configuration. - */ - int getPageSizeForBoundStatements(); - - /** - * The consistency level to use for the bound statements that will be created from the prepared - * statement or {@code null} to use the default value from the configuration. - */ - @Nullable - ConsistencyLevel getConsistencyLevelForBoundStatements(); - - /** - * The serial consistency level to use for the bound statements that will be created from the - * prepared statement or {@code null} to use the default value from the configuration. - */ - @Nullable - ConsistencyLevel getSerialConsistencyLevelForBoundStatements(); - - /** Whether bound statements that will be created from the prepared statement are tracing. */ - boolean areBoundStatementsTracing(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/PreparedStatement.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/PreparedStatement.java deleted file mode 100644 index 7828f9f809c..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/PreparedStatement.java +++ /dev/null @@ -1,145 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.util.List; - -/** - * A query with bind variables that has been pre-parsed by the database. - * - *

Client applications create instances with {@link CqlSession#prepare(SimpleStatement)}. Then - * they use {@link #bind(Object...)} to obtain an executable {@link BoundStatement}. - * - *

The default prepared statement implementation returned by the driver is thread-safe. - * Client applications can -- and are expected to -- prepare each query once and store the result in - * a place where it can be accessed concurrently by application threads (for example a final field). - * Preparing the same query string twice is suboptimal and a bad practice, and will cause the driver - * to log a warning. - */ -public interface PreparedStatement { - - /** - * A unique identifier for this prepared statement. - * - *

Note: the returned buffer is read-only. - */ - @NonNull - ByteBuffer getId(); - - @NonNull - String getQuery(); - - /** A description of the bind variables of this prepared statement. */ - @NonNull - ColumnDefinitions getVariableDefinitions(); - - /** - * The indices of the variables in {@link #getVariableDefinitions()} that correspond to the target - * table's partition key. - * - *

This is only present if all the partition key columns are expressed as bind variables. - * Otherwise, the list will be empty. For example, given the following schema: - * - *

-   *   CREATE TABLE foo (pk1 int, pk2 int, cc int, v int, PRIMARY KEY ((pk1, pk2), cc));
-   * 
- * - * And the following definitions: - * - *
-   * PreparedStatement ps1 = session.prepare("UPDATE foo SET v = ? WHERE pk1 = ? AND pk2 = ? AND v = ?");
-   * PreparedStatement ps2 = session.prepare("UPDATE foo SET v = ? WHERE pk1 = 1 AND pk2 = ? AND v = ?");
-   * 
- * - * Then {@code ps1.getPartitionKeyIndices()} contains 1 and 2, and {@code - * ps2.getPartitionKeyIndices()} is empty (because one of the partition key components is - * hard-coded in the query string). - */ - @NonNull - List getPartitionKeyIndices(); - - /** - * A unique identifier for result metadata (essentially a hash of {@link - * #getResultSetDefinitions()}). - * - *

This information is mostly for internal use: with protocol {@link DefaultProtocolVersion#V5} - * or higher, the driver sends it with every execution of the prepared statement, to validate that - * its result metadata is still up-to-date. - * - *

Note: this method returns {@code null} for protocol {@link DefaultProtocolVersion#V4} or - * lower; otherwise, the returned buffer is read-only. - * - * @see CASSANDRA-10786 - */ - @Nullable - ByteBuffer getResultMetadataId(); - - /** - * A description of the result set that will be returned when this prepared statement is bound and - * executed. - * - *

This information is only present for {@code SELECT} queries, otherwise it is always empty. - * Note that this is slightly incorrect for conditional updates (e.g. {@code INSERT ... IF NOT - * EXISTS}), which do return columns; for those cases, use {@link - * ResultSet#getColumnDefinitions()} on the result, not this method. - */ - @NonNull - ColumnDefinitions getResultSetDefinitions(); - - /** - * Updates {@link #getResultMetadataId()} and {@link #getResultSetDefinitions()} atomically. - * - *

This is for internal use by the driver. Calling this manually with incorrect information can - * cause existing queries to fail. - */ - void setResultMetadata( - @NonNull ByteBuffer newResultMetadataId, @NonNull ColumnDefinitions newResultSetDefinitions); - - /** - * Builds an executable statement that associates a set of values with the bind variables. - * - *

Note that the built-in bound statement implementation is immutable. If you need to set - * multiple execution parameters on the bound statement (such as {@link - * BoundStatement#setExecutionProfileName(String)}, {@link - * BoundStatement#setPagingState(ByteBuffer)}, etc.), consider using {@link - * #boundStatementBuilder(Object...)} instead to avoid unnecessary allocations. - * - * @param values the values of the bound variables in the statement. You can provide less values - * than the actual number of variables (or even none at all), in which case the remaining - * variables will be left unset. However, this method will throw an {@link - * IllegalArgumentException} if there are more values than variables. Individual values can be - * {@code null}, but the vararg array itself can't. - */ - @NonNull - BoundStatement bind(@NonNull Object... values); - - /** - * Returns a builder to construct an executable statement. - * - *

Note that this builder is mutable and not thread-safe. - * - * @see #bind(Object...) - */ - @NonNull - BoundStatementBuilder boundStatementBuilder(@NonNull Object... values); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/QueryTrace.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/QueryTrace.java deleted file mode 100644 index 37ebb85c0db..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/QueryTrace.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -import edu.umd.cs.findbugs.annotations.NonNull; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.util.List; -import java.util.Map; -import java.util.UUID; - -/** - * Tracing information for a query. - * - *

When {@link Statement#isTracing() tracing} is enabled for a query, Cassandra generates rows in - * the {@code sessions} and {@code events} table of the {@code system_traces} keyspace. This class - * is a client-side representation of that information. - */ -public interface QueryTrace { - - @NonNull - UUID getTracingId(); - - @NonNull - String getRequestType(); - - /** The server-side duration of the query in microseconds. */ - int getDurationMicros(); - - /** - * @deprecated returns the coordinator IP, but {@link #getCoordinatorAddress()} should be - * preferred, since C* 4.0 and above now returns the port was well. - */ - @NonNull - @Deprecated - InetAddress getCoordinator(); - - /** - * The IP and port of the node that coordinated the query. Prior to C* 4.0 the port is not set and - * will default to 0. - * - *

This method's default implementation returns {@link #getCoordinator()} with the port set to - * 0. The only reason it exists is to preserve binary compatibility. Internally, the driver - * overrides it to set the correct port. - * - * @since 4.6.0 - */ - @NonNull - default InetSocketAddress getCoordinatorAddress() { - return new InetSocketAddress(getCoordinator(), 0); - } - - /** The parameters attached to this trace. */ - @NonNull - Map getParameters(); - - /** The server-side timestamp of the start of this query. */ - long getStartedAt(); - - /** - * The events contained in this trace. - * - *

Query tracing is asynchronous in Cassandra. Hence, it is possible for the list returned to - * be missing some events for some of the replicas involved in the query if the query trace is - * requested just after the return of the query (the only guarantee being that the list will - * contain the events pertaining to the coordinator). - */ - @NonNull - List getEvents(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/ResultSet.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/ResultSet.java deleted file mode 100644 index 54f786b2068..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/ResultSet.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.PagingIterable; - -/** - * The result of a synchronous CQL query. - * - *

See {@link PagingIterable} for a few generic explanations about the behavior of this object; - * in particular, implementations are not thread-safe. They can only be iterated by the - * thread that invoked {@code session.execute}. - * - * @see CqlSession#execute(Statement) - * @see CqlSession#execute(String) - */ -public interface ResultSet extends PagingIterable { - - // overridden to amend the javadocs: - /** - * {@inheritDoc} - * - *

This is equivalent to calling: - * - *

-   *   this.iterator().next().getBoolean("[applied]")
-   * 
- * - * Except that this method peeks at the next row without consuming it. - */ - @Override - boolean wasApplied(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/Row.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/Row.java deleted file mode 100644 index 5eab449b057..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/Row.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -import com.datastax.oss.driver.api.core.data.GettableById; -import com.datastax.oss.driver.api.core.data.GettableByIndex; -import com.datastax.oss.driver.api.core.data.GettableByName; -import com.datastax.oss.driver.api.core.detach.Detachable; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * A row from a CQL table. - * - *

The default implementation returned by the driver is immutable and serializable. If you write - * your own implementation, it should at least be thread-safe; serializability is not mandatory, but - * recommended for use with some 3rd-party tools like Apache Spark ™. - */ -public interface Row extends GettableByIndex, GettableByName, GettableById, Detachable { - - /** @return the column definitions contained in this result set. */ - @NonNull - ColumnDefinitions getColumnDefinitions(); - - /** - * Returns a string representation of the contents of this row. - * - *

This produces a comma-separated list enclosed in square brackets. Each column is represented - * by its name, followed by a column and the value as a CQL literal. For example: - * - *

-   * [id:1, name:'test']
-   * 
- * - * Notes: - * - *
    - *
  • This method does not sanitize its output in any way. In particular, no effort is made to - * limit output size: all columns are included, and large strings or blobs will be appended - * as-is. - *
  • Be mindful of how you expose the result. For example, in high-security environments, it - * might be undesirable to leak data in application logs. - *
- */ - @NonNull - default String getFormattedContents() { - StringBuilder result = new StringBuilder("["); - ColumnDefinitions definitions = getColumnDefinitions(); - for (int i = 0; i < definitions.size(); i++) { - if (i > 0) { - result.append(", "); - } - ColumnDefinition definition = definitions.get(i); - String name = definition.getName().asCql(true); - TypeCodec codec = codecRegistry().codecFor(definition.getType()); - Object value = codec.decode(getBytesUnsafe(i), protocolVersion()); - result.append(name).append(':').append(codec.format(value)); - } - return result.append("]").toString(); - } - - /** - * Returns an abstract representation of this object, that may not include the row's - * contents. - * - *

The driver's built-in {@link Row} implementation returns the default format of {@link - * Object#toString()}: the class name, followed by the at-sign and the hash code of the object. - * - *

Omitting the contents was a deliberate choice, because we feel it would make it too easy to - * accidentally leak data (e.g. in application logs). If you want the contents, use {@link - * #getFormattedContents()}. - */ - @Override - String toString(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/SimpleStatement.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/SimpleStatement.java deleted file mode 100644 index ef04cd14a5b..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/SimpleStatement.java +++ /dev/null @@ -1,318 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.internal.core.cql.DefaultSimpleStatement; -import com.datastax.oss.driver.internal.core.time.ServerSideTimestampGenerator; -import com.datastax.oss.driver.internal.core.util.Sizes; -import com.datastax.oss.protocol.internal.PrimitiveSizes; -import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableList; -import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableMap; -import edu.umd.cs.findbugs.annotations.CheckReturnValue; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.List; -import java.util.Map; - -/** - * A one-off CQL statement consisting of a query string with optional placeholders, and a set of - * values for these placeholders. - * - *

To create instances, client applications can use the {@code newInstance} factory methods on - * this interface for common cases, or {@link #builder(String)} for more control over the - * parameters. They can then be passed to {@link CqlSession#execute(Statement)}. - * - *

Simple statements should be reserved for queries that will only be executed a few times by an - * application. For more frequent queries, {@link PreparedStatement} provides many advantages: it is - * more efficient because the server parses the query only once and caches the result; it allows the - * server to return metadata about the bind variables, which allows the driver to validate the - * values earlier, and apply certain optimizations like token-aware routing. - * - *

The default implementation returned by the driver is immutable and thread-safe. - * All mutating methods return a new instance. See also the static factory methods and builders in - * this interface. - * - *

If an application reuses the same statement more than once, it is recommended to cache it (for - * example in a final field). - */ -public interface SimpleStatement extends BatchableStatement { - - /** - * Shortcut to create an instance of the default implementation with only a CQL query (see {@link - * SimpleStatementBuilder} for the defaults for the other fields). - * - *

Note that the returned object is immutable. - */ - static SimpleStatement newInstance(@NonNull String cqlQuery) { - return new DefaultSimpleStatement( - cqlQuery, - NullAllowingImmutableList.of(), - NullAllowingImmutableMap.of(), - null, - null, - null, - null, - null, - null, - NullAllowingImmutableMap.of(), - null, - false, - Statement.NO_DEFAULT_TIMESTAMP, - null, - Integer.MIN_VALUE, - null, - null, - null, - null, - Statement.NO_NOW_IN_SECONDS); - } - - /** - * Shortcut to create an instance of the default implementation with only a CQL query and - * positional values (see {@link SimpleStatementBuilder} for the defaults for the other fields). - * - *

Note that the returned object is immutable. - * - * @param positionalValues the values for placeholders in the query string. Individual values can - * be {@code null}, but the vararg array itself can't. - */ - static SimpleStatement newInstance( - @NonNull String cqlQuery, @NonNull Object... positionalValues) { - return new DefaultSimpleStatement( - cqlQuery, - NullAllowingImmutableList.of(positionalValues), - NullAllowingImmutableMap.of(), - null, - null, - null, - null, - null, - null, - NullAllowingImmutableMap.of(), - null, - false, - Statement.NO_DEFAULT_TIMESTAMP, - null, - Integer.MIN_VALUE, - null, - null, - null, - null, - Statement.NO_NOW_IN_SECONDS); - } - - /** - * Shortcut to create an instance of the default implementation with only a CQL query and named - * values (see {@link SimpleStatementBuilder} for the defaults for other fields). - * - *

Note that the returned object is immutable. - */ - static SimpleStatement newInstance( - @NonNull String cqlQuery, @NonNull Map namedValues) { - return new DefaultSimpleStatement( - cqlQuery, - NullAllowingImmutableList.of(), - DefaultSimpleStatement.wrapKeys(namedValues), - null, - null, - null, - null, - null, - null, - NullAllowingImmutableMap.of(), - null, - false, - Statement.NO_DEFAULT_TIMESTAMP, - null, - Integer.MIN_VALUE, - null, - null, - null, - null, - Statement.NO_NOW_IN_SECONDS); - } - - /** - * Returns a builder to create an instance of the default implementation. - * - *

Note that this builder is mutable and not thread-safe. - */ - @NonNull - static SimpleStatementBuilder builder(@NonNull String query) { - return new SimpleStatementBuilder(query); - } - - /** - * Returns a builder to create an instance of the default implementation, copying the fields of - * the given statement. - * - *

Note that this builder is mutable and not thread-safe. - */ - @NonNull - static SimpleStatementBuilder builder(@NonNull SimpleStatement template) { - return new SimpleStatementBuilder(template); - } - - @NonNull - String getQuery(); - - /** - * Sets the CQL query to execute. - * - *

It may contain anonymous placeholders identified by a question mark, as in: - * - *

-   *   SELECT username FROM user WHERE id = ?
-   * 
- * - * Or named placeholders prefixed by a column, as in: - * - *
-   *   SELECT username FROM user WHERE id = :i
-   * 
- * - *

The driver's built-in implementation is immutable, and returns a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - * - * @see #setPositionalValues(List) - * @see #setNamedValuesWithIds(Map) - */ - @NonNull - @CheckReturnValue - SimpleStatement setQuery(@NonNull String newQuery); - - /** - * Sets the CQL keyspace to associate with the query. - * - *

This feature is only available with {@link DefaultProtocolVersion#V5 native protocol v5} or - * higher. Specifying a per-request keyspace with lower protocol versions will cause a runtime - * error. - * - * @see Request#getKeyspace() - */ - @NonNull - @CheckReturnValue - SimpleStatement setKeyspace(@Nullable CqlIdentifier newKeyspace); - - /** - * Shortcut for {@link #setKeyspace(CqlIdentifier) - * setKeyspace(CqlIdentifier.fromCql(newKeyspaceName))}. - */ - @NonNull - @CheckReturnValue - default SimpleStatement setKeyspace(@NonNull String newKeyspaceName) { - return setKeyspace(CqlIdentifier.fromCql(newKeyspaceName)); - } - - @NonNull - List getPositionalValues(); - - /** - * Sets the positional values to bind to anonymous placeholders. - * - *

You can use either positional or named values, but not both. Therefore if you call this - * method but {@link #getNamedValues()} returns a non-empty map, an {@link - * IllegalArgumentException} will be thrown. - * - *

The driver's built-in implementation is immutable, and returns a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - * - * @see #setQuery(String) - */ - @NonNull - @CheckReturnValue - SimpleStatement setPositionalValues(@NonNull List newPositionalValues); - - @NonNull - Map getNamedValues(); - - /** - * Sets the named values to bind to named placeholders. - * - *

Names must be stripped of the leading column. - * - *

You can use either positional or named values, but not both. Therefore if you call this - * method but {@link #getPositionalValues()} returns a non-empty list, an {@link - * IllegalArgumentException} will be thrown. - * - *

The driver's built-in implementation is immutable, and returns a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - * - * @see #setQuery(String) - */ - @NonNull - @CheckReturnValue - SimpleStatement setNamedValuesWithIds(@NonNull Map newNamedValues); - - /** - * Shortcut for {@link #setNamedValuesWithIds(Map)} with raw strings as value names. The keys are - * converted on the fly with {@link CqlIdentifier#fromCql(String)}. - */ - @NonNull - @CheckReturnValue - default SimpleStatement setNamedValues(@NonNull Map newNamedValues) { - return setNamedValuesWithIds(DefaultSimpleStatement.wrapKeys(newNamedValues)); - } - - @Override - default int computeSizeInBytes(@NonNull DriverContext context) { - int size = Sizes.minimumStatementSize(this, context); - - // SimpleStatement's additional elements to take into account are: - // - query string - // - parameters (named or not) - // - per-query keyspace - // - page size - // - paging state - // - timestamp - - // query - size += PrimitiveSizes.sizeOfLongString(getQuery()); - - // parameters - size += - Sizes.sizeOfSimpleStatementValues( - this, context.getProtocolVersion(), context.getCodecRegistry()); - - // per-query keyspace - if (getKeyspace() != null) { - size += PrimitiveSizes.sizeOfString(getKeyspace().asInternal()); - } - - // page size - size += PrimitiveSizes.INT; - - // paging state - if (getPagingState() != null) { - size += PrimitiveSizes.sizeOfBytes(getPagingState()); - } - - // timestamp - if (!(context.getTimestampGenerator() instanceof ServerSideTimestampGenerator) - || getQueryTimestamp() != Statement.NO_DEFAULT_TIMESTAMP) { - size += PrimitiveSizes.LONG; - } - - return size; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/SimpleStatementBuilder.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/SimpleStatementBuilder.java deleted file mode 100644 index 1ac910ff6a7..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/SimpleStatementBuilder.java +++ /dev/null @@ -1,190 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.internal.core.cql.DefaultSimpleStatement; -import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableList; -import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Arrays; -import java.util.List; -import java.util.Map; -import net.jcip.annotations.NotThreadSafe; - -/** - * A builder to create a simple statement. - * - *

This class is mutable and not thread-safe. - */ -@NotThreadSafe -public class SimpleStatementBuilder - extends StatementBuilder { - - @NonNull private String query; - @Nullable private CqlIdentifier keyspace; - @Nullable private NullAllowingImmutableList.Builder positionalValuesBuilder; - @Nullable private NullAllowingImmutableMap.Builder namedValuesBuilder; - - public SimpleStatementBuilder(@NonNull String query) { - this.query = query; - } - - public SimpleStatementBuilder(@NonNull SimpleStatement template) { - super(template); - if (!template.getPositionalValues().isEmpty() && !template.getNamedValues().isEmpty()) { - throw new IllegalArgumentException( - "Illegal statement to copy, can't have both named and positional values"); - } - - this.query = template.getQuery(); - if (!template.getPositionalValues().isEmpty()) { - this.positionalValuesBuilder = - NullAllowingImmutableList.builder(template.getPositionalValues().size()) - .addAll(template.getPositionalValues()); - } - if (!template.getNamedValues().isEmpty()) { - this.namedValuesBuilder = - NullAllowingImmutableMap.builder(template.getNamedValues().size()) - .putAll(template.getNamedValues()); - } - } - - /** @see SimpleStatement#getQuery() */ - @NonNull - public SimpleStatementBuilder setQuery(@NonNull String query) { - this.query = query; - return this; - } - - /** @see SimpleStatement#getKeyspace() */ - @NonNull - public SimpleStatementBuilder setKeyspace(@Nullable CqlIdentifier keyspace) { - this.keyspace = keyspace; - return this; - } - - /** - * Shortcut for {@link #setKeyspace(CqlIdentifier) - * setKeyspace(CqlIdentifier.fromCql(keyspaceName))}. - */ - @NonNull - public SimpleStatementBuilder setKeyspace(@Nullable String keyspaceName) { - return setKeyspace(keyspaceName == null ? null : CqlIdentifier.fromCql(keyspaceName)); - } - - /** @see SimpleStatement#setPositionalValues(List) */ - @NonNull - public SimpleStatementBuilder addPositionalValue(@Nullable Object value) { - if (namedValuesBuilder != null) { - throw new IllegalArgumentException( - "Can't have both positional and named values in a statement."); - } - if (positionalValuesBuilder == null) { - positionalValuesBuilder = NullAllowingImmutableList.builder(); - } - positionalValuesBuilder.add(value); - return this; - } - - /** @see SimpleStatement#setPositionalValues(List) */ - @NonNull - public SimpleStatementBuilder addPositionalValues(@NonNull Iterable values) { - if (namedValuesBuilder != null) { - throw new IllegalArgumentException( - "Can't have both positional and named values in a statement."); - } - if (positionalValuesBuilder == null) { - positionalValuesBuilder = NullAllowingImmutableList.builder(); - } - positionalValuesBuilder.addAll(values); - return this; - } - - /** @see SimpleStatement#setPositionalValues(List) */ - @NonNull - public SimpleStatementBuilder addPositionalValues(@NonNull Object... values) { - return addPositionalValues(Arrays.asList(values)); - } - - /** @see SimpleStatement#setPositionalValues(List) */ - @NonNull - public SimpleStatementBuilder clearPositionalValues() { - positionalValuesBuilder = NullAllowingImmutableList.builder(); - return this; - } - - /** @see SimpleStatement#setNamedValuesWithIds(Map) */ - @NonNull - public SimpleStatementBuilder addNamedValue(@NonNull CqlIdentifier name, @Nullable Object value) { - if (positionalValuesBuilder != null) { - throw new IllegalArgumentException( - "Can't have both positional and named values in a statement."); - } - if (namedValuesBuilder == null) { - namedValuesBuilder = NullAllowingImmutableMap.builder(); - } - namedValuesBuilder.put(name, value); - return this; - } - - /** - * Shortcut for {@link #addNamedValue(CqlIdentifier, Object) - * addNamedValue(CqlIdentifier.fromCql(name), value)}. - */ - @NonNull - public SimpleStatementBuilder addNamedValue(@NonNull String name, @Nullable Object value) { - return addNamedValue(CqlIdentifier.fromCql(name), value); - } - - /** @see SimpleStatement#setNamedValuesWithIds(Map) */ - @NonNull - public SimpleStatementBuilder clearNamedValues() { - namedValuesBuilder = NullAllowingImmutableMap.builder(); - return this; - } - - @NonNull - @Override - public SimpleStatement build() { - return new DefaultSimpleStatement( - query, - (positionalValuesBuilder == null) - ? NullAllowingImmutableList.of() - : positionalValuesBuilder.build(), - (namedValuesBuilder == null) ? NullAllowingImmutableMap.of() : namedValuesBuilder.build(), - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - buildCustomPayload(), - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/Statement.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/Statement.java deleted file mode 100644 index d70c56686c5..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/Statement.java +++ /dev/null @@ -1,548 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.NoNodeAvailableException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.time.TimestampGenerator; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.util.RoutingKey; -import com.datastax.oss.protocol.internal.request.query.QueryOptions; -import edu.umd.cs.findbugs.annotations.CheckReturnValue; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.Map; -import java.util.concurrent.CompletionStage; - -/** - * A request to execute a CQL query. - * - * @param the "self type" used for covariant returns in subtypes. - */ -public interface Statement> extends Request { - // Implementation note: "CqlRequest" would be a better name, but we keep "Statement" to match - // previous driver versions. - - /** - * The type returned when a CQL statement is executed synchronously. - * - *

Most users won't use this explicitly. It is needed for the generic execute method ({@link - * Session#execute(Request, GenericType)}), but CQL statements will generally be run with one of - * the driver's built-in helper methods (such as {@link CqlSession#execute(Statement)}). - */ - GenericType SYNC = GenericType.of(ResultSet.class); - - /** - * The type returned when a CQL statement is executed asynchronously. - * - *

Most users won't use this explicitly. It is needed for the generic execute method ({@link - * Session#execute(Request, GenericType)}), but CQL statements will generally be run with one of - * the driver's built-in helper methods (such as {@link CqlSession#executeAsync(Statement)}). - */ - GenericType> ASYNC = - new GenericType>() {}; - - /** - * A special value for {@link #getQueryTimestamp()} that means "no value". - * - *

It is equal to {@link Long#MIN_VALUE}. - */ - long NO_DEFAULT_TIMESTAMP = QueryOptions.NO_DEFAULT_TIMESTAMP; - - /** - * A special value for {@link #getNowInSeconds()} that means "no value". - * - *

It is equal to {@link Integer#MIN_VALUE}. - */ - int NO_NOW_IN_SECONDS = QueryOptions.NO_NOW_IN_SECONDS; - - /** - * Sets the name of the execution profile that will be used for this statement. - * - *

For all the driver's built-in implementations, calling this method with a non-null argument - * automatically resets {@link #getExecutionProfile()} to null. - * - *

All the driver's built-in implementations are immutable, and return a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - */ - @NonNull - @CheckReturnValue - SelfT setExecutionProfileName(@Nullable String newConfigProfileName); - - /** - * Sets the execution profile to use for this statement. - * - *

For all the driver's built-in implementations, calling this method with a non-null argument - * automatically resets {@link #getExecutionProfileName()} to null. - * - *

All the driver's built-in implementations are immutable, and return a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - */ - @NonNull - @CheckReturnValue - SelfT setExecutionProfile(@Nullable DriverExecutionProfile newProfile); - - /** - * Sets the keyspace to use for token-aware routing. - * - *

See {@link Request#getRoutingKey()} for a description of the token-aware routing algorithm. - * - * @param newRoutingKeyspace The keyspace to use, or {@code null} to disable token-aware routing. - */ - @NonNull - @CheckReturnValue - SelfT setRoutingKeyspace(@Nullable CqlIdentifier newRoutingKeyspace); - - /** - * Sets the {@link Node} that should handle this query. - * - *

In the general case, use of this method is heavily discouraged and should only be - * used in the following cases: - * - *

    - *
  1. Querying node-local tables, such as tables in the {@code system} and {@code system_views} - * keyspaces. - *
  2. Applying a series of schema changes, where it may be advantageous to execute schema - * changes in sequence on the same node. - *
- * - *

Configuring a specific node causes the configured {@link LoadBalancingPolicy} to be - * completely bypassed. However, if the load balancing policy dictates that the node is at - * distance {@link NodeDistance#IGNORED} or there is no active connectivity to the node, the - * request will fail with a {@link NoNodeAvailableException}. - * - * @param node The node that should be used to handle executions of this statement or null to - * delegate to the configured load balancing policy. - */ - @NonNull - @CheckReturnValue - SelfT setNode(@Nullable Node node); - - /** - * Shortcut for {@link #setRoutingKeyspace(CqlIdentifier) - * setRoutingKeyspace(CqlIdentifier.fromCql(newRoutingKeyspaceName))}. - * - * @param newRoutingKeyspaceName The keyspace to use, or {@code null} to disable token-aware - * routing. - */ - @NonNull - @CheckReturnValue - default SelfT setRoutingKeyspace(@Nullable String newRoutingKeyspaceName) { - return setRoutingKeyspace( - newRoutingKeyspaceName == null ? null : CqlIdentifier.fromCql(newRoutingKeyspaceName)); - } - - /** - * Sets the key to use for token-aware routing. - * - *

See {@link Request#getRoutingKey()} for a description of the token-aware routing algorithm. - * - * @param newRoutingKey The routing key to use, or {@code null} to disable token-aware routing. - */ - @NonNull - @CheckReturnValue - SelfT setRoutingKey(@Nullable ByteBuffer newRoutingKey); - - /** - * Sets the key to use for token-aware routing, when the partition key has multiple components. - * - *

This method assembles the components into a single byte buffer and passes it to {@link - * #setRoutingKey(ByteBuffer)}. Neither the individual components, nor the vararg array itself, - * can be {@code null}. - */ - @NonNull - @CheckReturnValue - default SelfT setRoutingKey(@NonNull ByteBuffer... newRoutingKeyComponents) { - return setRoutingKey(RoutingKey.compose(newRoutingKeyComponents)); - } - - /** - * Sets the token to use for token-aware routing. - * - *

See {@link Request#getRoutingKey()} for a description of the token-aware routing algorithm. - * - * @param newRoutingToken The routing token to use, or {@code null} to disable token-aware - * routing. - */ - @NonNull - @CheckReturnValue - SelfT setRoutingToken(@Nullable Token newRoutingToken); - - /** - * Sets the custom payload to use for execution. - * - *

All the driver's built-in statement implementations are immutable, and return a new instance - * from this method. However custom implementations may choose to be mutable and return the same - * instance. - * - *

Note that it's your responsibility to provide a thread-safe map. This can be achieved with a - * concurrent or immutable implementation, or by making it effectively immutable (meaning that - * it's never modified after being set on the statement). - */ - @NonNull - @CheckReturnValue - SelfT setCustomPayload(@NonNull Map newCustomPayload); - - /** - * Sets the idempotence to use for execution. - * - *

All the driver's built-in implementations are immutable, and return a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - * - * @param newIdempotence a boolean instance to set a statement-specific value, or {@code null} to - * use the default idempotence defined in the configuration. - */ - @NonNull - @CheckReturnValue - SelfT setIdempotent(@Nullable Boolean newIdempotence); - - /** - * Sets tracing for execution. - * - *

All the driver's built-in implementations are immutable, and return a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - */ - @NonNull - @CheckReturnValue - SelfT setTracing(boolean newTracing); - - /** - * @deprecated this method only exists to ease the transition from driver 3, it is an alias for - * {@link #setTracing(boolean) setTracing(true)}. - */ - @Deprecated - @NonNull - @CheckReturnValue - default SelfT enableTracing() { - return setTracing(true); - } - - /** - * @deprecated this method only exists to ease the transition from driver 3, it is an alias for - * {@link #setTracing(boolean) setTracing(false)}. - */ - @Deprecated - @NonNull - @CheckReturnValue - default SelfT disableTracing() { - return setTracing(false); - } - - /** - * Returns the query timestamp, in microseconds, to send with the statement. See {@link - * #setQueryTimestamp(long)} for details. - * - *

If this is equal to {@link #NO_DEFAULT_TIMESTAMP}, the {@link TimestampGenerator} configured - * for this driver instance will be used to generate a timestamp. - * - * @see #NO_DEFAULT_TIMESTAMP - * @see TimestampGenerator - */ - long getQueryTimestamp(); - - /** - * @deprecated this method only exists to ease the transition from driver 3, it is an alias for - * {@link #getQueryTimestamp()}. - */ - @Deprecated - default long getDefaultTimestamp() { - return getQueryTimestamp(); - } - - /** - * Sets the query timestamp, in microseconds, to send with the statement. - * - *

This is an alternative to appending a {@code USING TIMESTAMP} clause in the statement's - * query string, and has the advantage of sending the timestamp separately from the query string - * itself, which doesn't have to be modified when executing the same statement with different - * timestamps. Note that, if both a {@code USING TIMESTAMP} clause and a query timestamp are set - * for a given statement, the timestamp from the {@code USING TIMESTAMP} clause wins. - * - *

This method can be used on any instance of {@link SimpleStatement}, {@link BoundStatement} - * or {@link BatchStatement}. For a {@link BatchStatement}, the timestamp will apply to all its - * child statements; it is not possible to define per-child timestamps using this method, and - * consequently, if this method is called on a batch child statement, the provided timestamp will - * be silently ignored. If different timestamps are required for individual child statements, this - * can only be achieved with a custom {@code USING TIMESTAMP} clause in each child query. - * - *

If this is equal to {@link #NO_DEFAULT_TIMESTAMP}, the {@link TimestampGenerator} configured - * for this driver instance will be used to generate a timestamp. - * - *

All the driver's built-in implementations are immutable, and return a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance. - * - * @see #NO_DEFAULT_TIMESTAMP - * @see TimestampGenerator - */ - @NonNull - @CheckReturnValue - SelfT setQueryTimestamp(long newTimestamp); - - /** - * @deprecated this method only exists to ease the transition from driver 3, it is an alias for - * {@link #setQueryTimestamp(long)}. - */ - @Deprecated - @NonNull - @CheckReturnValue - default SelfT setDefaultTimestamp(long newTimestamp) { - return setQueryTimestamp(newTimestamp); - } - - /** - * Sets how long to wait for this request to complete. This is a global limit on the duration of a - * session.execute() call, including any retries the driver might do. - * - * @param newTimeout the timeout to use, or {@code null} to use the default value defined in the - * configuration. - * @see DefaultDriverOption#REQUEST_TIMEOUT - */ - @NonNull - @CheckReturnValue - SelfT setTimeout(@Nullable Duration newTimeout); - - /** - * Returns the paging state to send with the statement, or {@code null} if this statement has no - * paging state. - * - *

Paging states are used in scenarios where a paged result is interrupted then resumed later. - * The paging state can only be reused with the exact same statement (same query string, same - * parameters). It is an opaque value that is only meant to be collected, stored and re-used. If - * you try to modify its contents or reuse it with a different statement, the results are - * unpredictable. - */ - @Nullable - ByteBuffer getPagingState(); - - /** - * Sets the paging state to send with the statement, or {@code null} if this statement has no - * paging state. - * - *

Paging states are used in scenarios where a paged result is interrupted then resumed later. - * The paging state can only be reused with the exact same statement (same query string, same - * parameters). It is an opaque value that is only meant to be collected, stored and re-used. If - * you try to modify its contents or reuse it with a different statement, the results are - * unpredictable. - * - *

All the driver's built-in implementations are immutable, and return a new instance from this - * method. However custom implementations may choose to be mutable and return the same instance; - * if you do so, you must override {@link #copy(ByteBuffer)}. - */ - @NonNull - @CheckReturnValue - SelfT setPagingState(@Nullable ByteBuffer newPagingState); - - /** - * Sets the paging state to send with the statement, or {@code null} if this statement has no - * paging state. - * - *

This variant uses the "safe" paging state wrapper, it will throw immediately if the - * statement doesn't match the one that the state was initially extracted from (same query string, - * same parameters). The advantage is that it fails fast, instead of waiting for an error response - * from the server. - * - *

Note that, if this statement is a {@link SimpleStatement} with bound values, those values - * must be encoded in order to perform the check. This method uses the default codec registry and - * default protocol version. This might fail if you use custom codecs; in that case, use {@link - * #setPagingState(PagingState, Session)} instead. - * - * @throws IllegalArgumentException if the given state does not match this statement. - * @see #setPagingState(ByteBuffer) - * @see ExecutionInfo#getSafePagingState() - */ - @NonNull - @CheckReturnValue - default SelfT setPagingState(@Nullable PagingState newPagingState) { - return setPagingState(newPagingState, null); - } - - /** - * Alternative to {@link #setPagingState(PagingState)} that specifies the session the statement - * will be executed with. You only need this for simple statements, and if you use custom - * codecs. Bound statements already know which session they are attached to. - */ - @NonNull - @CheckReturnValue - default SelfT setPagingState(@Nullable PagingState newPagingState, @Nullable Session session) { - if (newPagingState == null) { - return setPagingState((ByteBuffer) null); - } else if (newPagingState.matches(this, session)) { - return setPagingState(newPagingState.getRawPagingState()); - } else { - throw new IllegalArgumentException( - "Paging state mismatch, " - + "this means that either the paging state contents were altered, " - + "or you're trying to apply it to a different statement"); - } - } - - /** - * Returns the page size to use for the statement. - * - * @return the set page size, otherwise 0 or a negative value to use the default value defined in - * the configuration. - * @see DefaultDriverOption#REQUEST_PAGE_SIZE - */ - int getPageSize(); - - /** - * @deprecated this method only exists to ease the transition from driver 3, it is an alias for - * {@link #getPageSize()}. - */ - @Deprecated - default int getFetchSize() { - return getPageSize(); - } - - /** - * Configures how many rows will be retrieved simultaneously in a single network roundtrip (the - * goal being to avoid loading too many results in memory at the same time). - * - * @param newPageSize the page size to use, set to 0 or a negative value to use the default value - * defined in the configuration. - * @see DefaultDriverOption#REQUEST_PAGE_SIZE - */ - @NonNull - @CheckReturnValue - SelfT setPageSize(int newPageSize); - - /** - * @deprecated this method only exists to ease the transition from driver 3, it is an alias for - * {@link #setPageSize(int)}. - */ - @Deprecated - @NonNull - @CheckReturnValue - default SelfT setFetchSize(int newPageSize) { - return setPageSize(newPageSize); - } - - /** - * Returns the {@link ConsistencyLevel} to use for the statement. - * - * @return the set consistency, or {@code null} to use the default value defined in the - * configuration. - * @see DefaultDriverOption#REQUEST_CONSISTENCY - */ - @Nullable - ConsistencyLevel getConsistencyLevel(); - - /** - * Sets the {@link ConsistencyLevel} to use for this statement. - * - * @param newConsistencyLevel the consistency level to use, or null to use the default value - * defined in the configuration. - * @see DefaultDriverOption#REQUEST_CONSISTENCY - */ - @NonNull - @CheckReturnValue - SelfT setConsistencyLevel(@Nullable ConsistencyLevel newConsistencyLevel); - - /** - * Returns the serial {@link ConsistencyLevel} to use for the statement. - * - * @return the set serial consistency, or {@code null} to use the default value defined in the - * configuration. - * @see DefaultDriverOption#REQUEST_SERIAL_CONSISTENCY - */ - @Nullable - ConsistencyLevel getSerialConsistencyLevel(); - - /** - * Sets the serial {@link ConsistencyLevel} to use for this statement. - * - * @param newSerialConsistencyLevel the serial consistency level to use, or null to use the - * default value defined in the configuration. - * @see DefaultDriverOption#REQUEST_SERIAL_CONSISTENCY - */ - @NonNull - @CheckReturnValue - SelfT setSerialConsistencyLevel(@Nullable ConsistencyLevel newSerialConsistencyLevel); - - /** Whether tracing information should be recorded for this statement. */ - boolean isTracing(); - - /** - * A custom "now in seconds" to use when applying the request (for testing purposes). - * - *

This method's default implementation returns {@link #NO_NOW_IN_SECONDS}. The only reason it - * exists is to preserve binary compatibility. Internally, the driver overrides it to return the - * value that was set programmatically (if any). - * - * @see #NO_NOW_IN_SECONDS - */ - default int getNowInSeconds() { - return NO_NOW_IN_SECONDS; - } - - /** - * Sets the "now in seconds" to use when applying the request (for testing purposes). - * - *

This method's default implementation returns the statement unchanged. The only reason it - * exists is to preserve binary compatibility. Internally, the driver overrides it to record the - * new value. - * - * @see #NO_NOW_IN_SECONDS - */ - @NonNull - @CheckReturnValue - @SuppressWarnings("unchecked") - default SelfT setNowInSeconds(int nowInSeconds) { - return (SelfT) this; - } - - /** - * Calculates the approximate size in bytes that the statement will have when encoded. - * - *

The size might be over-estimated by a few bytes due to global options that may be defined on - * a {@link Session} but not explicitly set on the statement itself. - * - *

The result of this method is not cached, calling it will cause some encoding to be done in - * order to determine some of the statement's attributes sizes. Therefore, use this method - * sparingly in order to avoid unnecessary computation. - * - * @return the approximate number of bytes this statement will take when encoded. - */ - int computeSizeInBytes(@NonNull DriverContext context); - - /** - * Creates a new instance with a different paging state. - * - *

Since all the built-in statement implementations in the driver are immutable, this method's - * default implementation delegates to {@link #setPagingState(ByteBuffer)}. However, if you write - * your own mutable implementation, make sure it returns a different instance. - */ - @NonNull - @CheckReturnValue - default SelfT copy(@Nullable ByteBuffer newPagingState) { - return setPagingState(newPagingState); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/StatementBuilder.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/StatementBuilder.java deleted file mode 100644 index 531070b854c..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/StatementBuilder.java +++ /dev/null @@ -1,294 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.internal.core.util.RoutingKey; -import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.Map; -import net.jcip.annotations.NotThreadSafe; - -/** - * Handle options common to all statement builders. - * - * @see SimpleStatement#builder(String) - * @see BatchStatement#builder(BatchType) - * @see PreparedStatement#boundStatementBuilder(Object...) - */ -@NotThreadSafe -public abstract class StatementBuilder< - SelfT extends StatementBuilder, StatementT extends Statement> { - - @SuppressWarnings("unchecked") - private final SelfT self = (SelfT) this; - - @Nullable protected String executionProfileName; - @Nullable protected DriverExecutionProfile executionProfile; - @Nullable protected CqlIdentifier routingKeyspace; - @Nullable protected ByteBuffer routingKey; - @Nullable protected Token routingToken; - @Nullable private NullAllowingImmutableMap.Builder customPayloadBuilder; - @Nullable protected Boolean idempotent; - protected boolean tracing; - protected long timestamp = Statement.NO_DEFAULT_TIMESTAMP; - @Nullable protected ByteBuffer pagingState; - protected int pageSize = Integer.MIN_VALUE; - @Nullable protected ConsistencyLevel consistencyLevel; - @Nullable protected ConsistencyLevel serialConsistencyLevel; - @Nullable protected Duration timeout; - @Nullable protected Node node; - protected int nowInSeconds = Statement.NO_NOW_IN_SECONDS; - - protected StatementBuilder() { - // nothing to do - } - - protected StatementBuilder(StatementT template) { - this.executionProfileName = template.getExecutionProfileName(); - this.executionProfile = template.getExecutionProfile(); - this.routingKeyspace = template.getRoutingKeyspace(); - this.routingKey = template.getRoutingKey(); - this.routingToken = template.getRoutingToken(); - if (!template.getCustomPayload().isEmpty()) { - this.customPayloadBuilder = - NullAllowingImmutableMap.builder() - .putAll(template.getCustomPayload()); - } - this.idempotent = template.isIdempotent(); - this.tracing = template.isTracing(); - this.timestamp = template.getQueryTimestamp(); - this.pagingState = template.getPagingState(); - this.pageSize = template.getPageSize(); - this.consistencyLevel = template.getConsistencyLevel(); - this.serialConsistencyLevel = template.getSerialConsistencyLevel(); - this.timeout = template.getTimeout(); - this.node = template.getNode(); - this.nowInSeconds = template.getNowInSeconds(); - } - - /** @see Statement#setExecutionProfileName(String) */ - @NonNull - public SelfT setExecutionProfileName(@Nullable String executionProfileName) { - this.executionProfileName = executionProfileName; - if (executionProfileName != null) { - this.executionProfile = null; - } - return self; - } - - /** @see Statement#setExecutionProfile(DriverExecutionProfile) */ - @NonNull - public SelfT setExecutionProfile(@Nullable DriverExecutionProfile executionProfile) { - this.executionProfile = executionProfile; - if (executionProfile != null) { - this.executionProfileName = null; - } - return self; - } - - /** @see Statement#setRoutingKeyspace(CqlIdentifier) */ - @NonNull - public SelfT setRoutingKeyspace(@Nullable CqlIdentifier routingKeyspace) { - this.routingKeyspace = routingKeyspace; - return self; - } - - /** - * Shortcut for {@link #setRoutingKeyspace(CqlIdentifier) - * setRoutingKeyspace(CqlIdentifier.fromCql(routingKeyspaceName))}. - */ - @NonNull - public SelfT setRoutingKeyspace(@Nullable String routingKeyspaceName) { - return setRoutingKeyspace( - routingKeyspaceName == null ? null : CqlIdentifier.fromCql(routingKeyspaceName)); - } - - /** @see Statement#setRoutingKey(ByteBuffer) */ - @NonNull - public SelfT setRoutingKey(@Nullable ByteBuffer routingKey) { - this.routingKey = routingKey; - return self; - } - - /** @see Statement#setRoutingKey(ByteBuffer...) */ - @NonNull - public SelfT setRoutingKey(@NonNull ByteBuffer... newRoutingKeyComponents) { - return setRoutingKey(RoutingKey.compose(newRoutingKeyComponents)); - } - - /** @see Statement#setRoutingToken(Token) */ - @NonNull - public SelfT setRoutingToken(@Nullable Token routingToken) { - this.routingToken = routingToken; - return self; - } - - /** @see Statement#setCustomPayload(Map) */ - @NonNull - public SelfT addCustomPayload(@NonNull String key, @Nullable ByteBuffer value) { - if (customPayloadBuilder == null) { - customPayloadBuilder = NullAllowingImmutableMap.builder(); - } - customPayloadBuilder.put(key, value); - return self; - } - - /** @see Statement#setCustomPayload(Map) */ - @NonNull - public SelfT clearCustomPayload() { - customPayloadBuilder = null; - return self; - } - - /** @see Statement#setIdempotent(Boolean) */ - @NonNull - public SelfT setIdempotence(@Nullable Boolean idempotent) { - this.idempotent = idempotent; - return self; - } - - /** - * This method is a shortcut to {@link #setTracing(boolean)} with an argument of true. It is - * preserved to maintain API compatibility. - * - * @see Statement#setTracing(boolean) - */ - @NonNull - public SelfT setTracing() { - return setTracing(true); - } - - /** @see Statement#setTracing(boolean) */ - @NonNull - public SelfT setTracing(boolean tracing) { - this.tracing = tracing; - return self; - } - - /** - * @deprecated this method only exists to ease the transition from driver 3, it is an alias for - * {@link #setTracing(boolean) setTracing(true)}. - */ - @Deprecated - @NonNull - public SelfT enableTracing() { - return setTracing(true); - } - - /** - * @deprecated this method only exists to ease the transition from driver 3, it is an alias for - * {@link #setTracing(boolean) setTracing(false)}. - */ - @Deprecated - @NonNull - public SelfT disableTracing() { - return setTracing(false); - } - - /** @see Statement#setQueryTimestamp(long) */ - @NonNull - public SelfT setQueryTimestamp(long timestamp) { - this.timestamp = timestamp; - return self; - } - - /** - * @deprecated this method only exists to ease the transition from driver 3, it is an alias for - * {@link #setQueryTimestamp(long)}. - */ - @Deprecated - @NonNull - public SelfT setDefaultTimestamp(long timestamp) { - return setQueryTimestamp(timestamp); - } - - /** @see Statement#setPagingState(ByteBuffer) */ - @NonNull - public SelfT setPagingState(@Nullable ByteBuffer pagingState) { - this.pagingState = pagingState; - return self; - } - - /** @see Statement#setPageSize(int) */ - @NonNull - public SelfT setPageSize(int pageSize) { - this.pageSize = pageSize; - return self; - } - - /** - * @deprecated this method only exists to ease the transition from driver 3, it is an alias for - * {@link #setPageSize(int)}. - */ - @Deprecated - @NonNull - public SelfT setFetchSize(int pageSize) { - return this.setPageSize(pageSize); - } - - /** @see Statement#setConsistencyLevel(ConsistencyLevel) */ - @NonNull - public SelfT setConsistencyLevel(@Nullable ConsistencyLevel consistencyLevel) { - this.consistencyLevel = consistencyLevel; - return self; - } - - /** @see Statement#setSerialConsistencyLevel(ConsistencyLevel) */ - @NonNull - public SelfT setSerialConsistencyLevel(@Nullable ConsistencyLevel serialConsistencyLevel) { - this.serialConsistencyLevel = serialConsistencyLevel; - return self; - } - - /** @see Statement#setTimeout(Duration) */ - @NonNull - public SelfT setTimeout(@Nullable Duration timeout) { - this.timeout = timeout; - return self; - } - - /** @see Statement#setNode(Node) */ - public SelfT setNode(@Nullable Node node) { - this.node = node; - return self; - } - - /** @see Statement#setNowInSeconds(int) */ - public SelfT setNowInSeconds(int nowInSeconds) { - this.nowInSeconds = nowInSeconds; - return self; - } - - @NonNull - protected Map buildCustomPayload() { - return (customPayloadBuilder == null) - ? NullAllowingImmutableMap.of() - : customPayloadBuilder.build(); - } - - @NonNull - public abstract StatementT build(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/SyncCqlSession.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/SyncCqlSession.java deleted file mode 100644 index a0f752db407..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/SyncCqlSession.java +++ /dev/null @@ -1,252 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.servererrors.QueryExecutionException; -import com.datastax.oss.driver.api.core.servererrors.QueryValidationException; -import com.datastax.oss.driver.api.core.servererrors.SyntaxError; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.internal.core.cql.DefaultPrepareRequest; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; -import java.util.Objects; - -/** - * A session that offers user-friendly methods to execute CQL requests synchronously. - * - * @since 4.4.0 - */ -public interface SyncCqlSession extends Session { - - /** - * Executes a CQL statement synchronously (the calling thread blocks until the result becomes - * available). - * - * @param statement the CQL query to execute (that can be any {@link Statement}). - * @return the result of the query. That result will never be null but can be empty (and will be - * for any non SELECT query). - * @throws AllNodesFailedException if no host in the cluster can be contacted successfully to - * execute this query. - * @throws QueryExecutionException if the query triggered an execution exception, i.e. an - * exception thrown by Cassandra when it cannot execute the query with the requested - * consistency level successfully. - * @throws QueryValidationException if the query is invalid (syntax error, unauthorized or any - * other validation problem). - */ - @NonNull - default ResultSet execute(@NonNull Statement statement) { - return Objects.requireNonNull( - execute(statement, Statement.SYNC), "The CQL processor should never return a null result"); - } - - /** - * Executes a CQL statement synchronously (the calling thread blocks until the result becomes - * available). - * - *

This is an alias for {@link #execute(Statement) - * execute(SimpleStatement.newInstance(query))}. - * - * @param query the CQL query to execute. - * @return the result of the query. That result will never be null but can be empty (and will be - * for any non SELECT query). - * @throws AllNodesFailedException if no host in the cluster can be contacted successfully to - * execute this query. - * @throws QueryExecutionException if the query triggered an execution exception, i.e. an - * exception thrown by Cassandra when it cannot execute the query with the requested - * consistency level successfully. - * @throws QueryValidationException if the query if invalid (syntax error, unauthorized or any - * other validation problem). - * @see SimpleStatement#newInstance(String) - */ - @NonNull - default ResultSet execute(@NonNull String query) { - return execute(SimpleStatement.newInstance(query)); - } - - /** - * Executes a CQL statement synchronously (the calling thread blocks until the result becomes - * available). - * - *

This is an alias for {@link #execute(Statement) execute(SimpleStatement.newInstance(query, - * values))}. - * - * @param query the CQL query to execute. - * @param values the values for placeholders in the query string. Individual values can be {@code - * null}, but the vararg array itself can't. - * @return the result of the query. That result will never be null but can be empty (and will be - * for any non SELECT query). - * @throws AllNodesFailedException if no host in the cluster can be contacted successfully to - * execute this query. - * @throws QueryExecutionException if the query triggered an execution exception, i.e. an - * exception thrown by Cassandra when it cannot execute the query with the requested - * consistency level successfully. - * @throws QueryValidationException if the query if invalid (syntax error, unauthorized or any - * other validation problem). - * @see SimpleStatement#newInstance(String, Object...) - */ - @NonNull - default ResultSet execute(@NonNull String query, @NonNull Object... values) { - return execute(SimpleStatement.newInstance(query, values)); - } - - /** - * Executes a CQL statement synchronously (the calling thread blocks until the result becomes - * available). - * - *

This is an alias for {@link #execute(Statement) execute(SimpleStatement.newInstance(query, - * values))}. - * - * @param query the CQL query to execute. - * @param values the values for named placeholders in the query string. Individual values can be - * {@code null}, but the map itself can't. - * @return the result of the query. That result will never be null but can be empty (and will be - * for any non SELECT query). - * @throws AllNodesFailedException if no host in the cluster can be contacted successfully to - * execute this query. - * @throws QueryExecutionException if the query triggered an execution exception, i.e. an - * exception thrown by Cassandra when it cannot execute the query with the requested - * consistency level successfully. - * @throws QueryValidationException if the query if invalid (syntax error, unauthorized or any - * other validation problem). - * @see SimpleStatement#newInstance(String, Map) - */ - @NonNull - default ResultSet execute(@NonNull String query, @NonNull Map values) { - return execute(SimpleStatement.newInstance(query, values)); - } - - /** - * Prepares a CQL statement synchronously (the calling thread blocks until the statement is - * prepared). - * - *

Note that the bound statements created from the resulting prepared statement will inherit - * some of the attributes of the provided simple statement. That is, given: - * - *

{@code
-   * SimpleStatement simpleStatement = SimpleStatement.newInstance("...");
-   * PreparedStatement preparedStatement = session.prepare(simpleStatement);
-   * BoundStatement boundStatement = preparedStatement.bind();
-   * }
- * - * Then: - * - *
    - *
  • the following methods return the same value as their counterpart on {@code - * simpleStatement}: - *
      - *
    • {@link Request#getExecutionProfileName() boundStatement.getExecutionProfileName()} - *
    • {@link Request#getExecutionProfile() boundStatement.getExecutionProfile()} - *
    • {@link Statement#getPagingState() boundStatement.getPagingState()} - *
    • {@link Request#getRoutingKey() boundStatement.getRoutingKey()} - *
    • {@link Request#getRoutingToken() boundStatement.getRoutingToken()} - *
    • {@link Request#getCustomPayload() boundStatement.getCustomPayload()} - *
    • {@link Request#isIdempotent() boundStatement.isIdempotent()} - *
    • {@link Request#getTimeout() boundStatement.getTimeout()} - *
    • {@link Statement#getPagingState() boundStatement.getPagingState()} - *
    • {@link Statement#getPageSize() boundStatement.getPageSize()} - *
    • {@link Statement#getConsistencyLevel() boundStatement.getConsistencyLevel()} - *
    • {@link Statement#getSerialConsistencyLevel() - * boundStatement.getSerialConsistencyLevel()} - *
    • {@link Statement#isTracing() boundStatement.isTracing()} - *
    - *
  • {@link Request#getRoutingKeyspace() boundStatement.getRoutingKeyspace()} is set from - * either {@link Request#getKeyspace() simpleStatement.getKeyspace()} (if it's not {@code - * null}), or {@code simpleStatement.getRoutingKeyspace()}; - *
  • on the other hand, the following attributes are not propagated: - *
      - *
    • {@link Statement#getQueryTimestamp() boundStatement.getQueryTimestamp()} will be - * set to {@link Statement#NO_DEFAULT_TIMESTAMP}, meaning that the value will be - * assigned by the session's timestamp generator. - *
    • {@link Statement#getNode() boundStatement.getNode()} will always be {@code null}. - *
    • {@link Statement#getNowInSeconds()} boundStatement.getNowInSeconds()} will always - * be equal to {@link Statement#NO_NOW_IN_SECONDS}. - *
    - *
- * - * If you want to customize this behavior, you can write your own implementation of {@link - * PrepareRequest} and pass it to {@link #prepare(PrepareRequest)}. - * - *

The result of this method is cached: if you call it twice with the same {@link - * SimpleStatement}, you will get the same {@link PreparedStatement} instance. We still recommend - * keeping a reference to it (for example by caching it as a field in a DAO); if that's not - * possible (e.g. if query strings are generated dynamically), it's OK to call this method every - * time: there will just be a small performance overhead to check the internal cache. Note that - * caching is based on: - * - *

    - *
  • the query string exactly as you provided it: the driver does not perform any kind of - * trimming or sanitizing. - *
  • all other execution parameters: for example, preparing two statements with identical - * query strings but different {@linkplain SimpleStatement#getConsistencyLevel() consistency - * levels} will yield distinct prepared statements. - *
- * - * @param statement the CQL query to execute (that can be any {@link SimpleStatement}). - * @return the prepared statement corresponding to {@code statement}. - * @throws SyntaxError if the syntax of the query to prepare is not correct. - */ - @NonNull - default PreparedStatement prepare(@NonNull SimpleStatement statement) { - return Objects.requireNonNull( - execute(new DefaultPrepareRequest(statement), PrepareRequest.SYNC), - "The CQL prepare processor should never return a null result"); - } - - /** - * Prepares a CQL statement synchronously (the calling thread blocks until the statement is - * prepared). - * - *

The result of this method is cached (see {@link #prepare(SimpleStatement)} for more - * explanations). - * - * @param query the CQL string query to execute. - * @return the prepared statement corresponding to {@code query}. - * @throws SyntaxError if the syntax of the query to prepare is not correct. - */ - @NonNull - default PreparedStatement prepare(@NonNull String query) { - return Objects.requireNonNull( - execute(new DefaultPrepareRequest(query), PrepareRequest.SYNC), - "The CQL prepare processor should never return a null result"); - } - - /** - * Prepares a CQL statement synchronously (the calling thread blocks until the statement is - * prepared). - * - *

This variant is exposed in case you use an ad hoc {@link PrepareRequest} implementation to - * customize how attributes are propagated when you prepare a {@link SimpleStatement} (see {@link - * #prepare(SimpleStatement)} for more explanations). Otherwise, you should rarely have to deal - * with {@link PrepareRequest} directly. - * - *

The result of this method is cached (see {@link #prepare(SimpleStatement)} for more - * explanations). - * - * @param request the {@code PrepareRequest} to execute. - * @return the prepared statement corresponding to {@code request}. - * @throws SyntaxError if the syntax of the query to prepare is not correct. - */ - @NonNull - default PreparedStatement prepare(@NonNull PrepareRequest request) { - return Objects.requireNonNull( - execute(request, PrepareRequest.SYNC), - "The CQL prepare processor should never return a null result"); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/TraceEvent.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/TraceEvent.java deleted file mode 100644 index 3043d94057f..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/TraceEvent.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -import edu.umd.cs.findbugs.annotations.Nullable; -import java.net.InetAddress; -import java.net.InetSocketAddress; - -/** An event in a {@link QueryTrace}. */ -public interface TraceEvent { - - /** Which activity this event corresponds to. */ - @Nullable - String getActivity(); - - /** The server-side timestamp of the event. */ - long getTimestamp(); - - /** - * @deprecated returns the source IP, but {@link #getSourceAddress()} should be preferred, since - * C* 4.0 and above now returns the port was well. - */ - @Nullable - @Deprecated - InetAddress getSource(); - - /** - * The IP and Port of the host having generated this event. Prior to C* 4.0 the port will be set - * to zero. - * - *

This method's default implementation returns {@link #getSource()} with the port set to 0. - * The only reason it exists is to preserve binary compatibility. Internally, the driver overrides - * it to set the correct port. - * - * @since 4.6.0 - */ - @Nullable - default InetSocketAddress getSourceAddress() { - return new InetSocketAddress(getSource(), 0); - } - /** - * The number of microseconds elapsed on the source when this event occurred since the moment when - * the source started handling the query. - */ - int getSourceElapsedMicros(); - - /** The name of the thread on which this event occurred. */ - @Nullable - String getThreadName(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/data/AccessibleById.java b/core/src/main/java/com/datastax/oss/driver/api/core/data/AccessibleById.java deleted file mode 100644 index 2ca2222424c..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/data/AccessibleById.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.data; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.internal.core.util.Loggers; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Collections; -import java.util.List; - -/** - * A data structure where the values are accessible via a CQL identifier. - * - *

In the driver, these data structures are always accessible by index as well. - */ -public interface AccessibleById extends AccessibleByIndex { - - /** - * Returns all the indices where a given identifier appears. - * - * @throws IllegalArgumentException if the id is invalid. - * @apiNote the default implementation only exists for backward compatibility. It wraps the result - * of {@link #firstIndexOf(CqlIdentifier)} in a singleton list, which is not entirely correct, - * as it will only return the first occurrence. Therefore it also logs a warning. - *

Implementors should always override this method (all built-in driver implementations - * do). - */ - @NonNull - default List allIndicesOf(@NonNull CqlIdentifier id) { - Loggers.ACCESSIBLE_BY_ID.warn( - "{} should override allIndicesOf(CqlIdentifier), the default implementation is a " - + "workaround for backward compatibility, it only returns the first occurrence", - getClass().getName()); - return Collections.singletonList(firstIndexOf(id)); - } - - /** - * Returns the first index where a given identifier appears (depending on the implementation, - * identifiers may appear multiple times). - * - * @throws IllegalArgumentException if the id is invalid. - */ - int firstIndexOf(@NonNull CqlIdentifier id); - - /** - * Returns the CQL type of the value for the first occurrence of {@code id}. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - DataType getType(@NonNull CqlIdentifier id); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/data/AccessibleByIndex.java b/core/src/main/java/com/datastax/oss/driver/api/core/data/AccessibleByIndex.java deleted file mode 100644 index 3007ed1fb68..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/data/AccessibleByIndex.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.data; - -import com.datastax.oss.driver.api.core.type.DataType; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** A data structure where the values are accessible via an integer index. */ -public interface AccessibleByIndex extends Data { - - /** Returns the number of values. */ - int size(); - - /** - * Returns the CQL type of the {@code i}th value. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - DataType getType(int i); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/data/AccessibleByName.java b/core/src/main/java/com/datastax/oss/driver/api/core/data/AccessibleByName.java deleted file mode 100644 index 74574a82f38..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/data/AccessibleByName.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.data; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.internal.core.util.Loggers; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Collections; -import java.util.List; - -/** - * A data structure where the values are accessible via a name string. - * - *

This is an optimized version of {@link AccessibleById}, in case the overhead of having to - * create a {@link CqlIdentifier} for each value is too much. - * - *

By default, case is ignored when matching names. If multiple names only differ by their case, - * then the first one is chosen. You can force an exact match by double-quoting the name. - * - *

For example, if the data structure contains three values named {@code Foo}, {@code foo} and - * {@code fOO}, then: - * - *

    - *
  • {@code getString("foo")} retrieves the first value (ignore case, first occurrence). - *
  • {@code getString("\"foo\"")} retrieves the second value (exact case). - *
  • {@code getString("\"fOO\"")} retrieves the third value (exact case). - *
  • {@code getString("\"FOO\"")} fails (exact case, no match). - *
- * - *

In the driver, these data structures are always accessible by index as well. - */ -public interface AccessibleByName extends AccessibleByIndex { - - /** - * Returns all the indices where a given identifier appears. - * - * @throws IllegalArgumentException if the name is invalid. - * @apiNote the default implementation only exists for backward compatibility. It wraps the result - * of {@link #firstIndexOf(String)} in a singleton list, which is not entirely correct, as it - * will only return the first occurrence. Therefore it also logs a warning. - *

Implementors should always override this method (all built-in driver implementations - * do). - */ - @NonNull - default List allIndicesOf(@NonNull String name) { - Loggers.ACCESSIBLE_BY_NAME.warn( - "{} should override allIndicesOf(String), the default implementation is a " - + "workaround for backward compatibility, it only returns the first occurrence", - getClass().getName()); - return Collections.singletonList(firstIndexOf(name)); - } - - /** - * Returns the first index where a given identifier appears (depending on the implementation, - * identifiers may appear multiple times). - * - * @throws IllegalArgumentException if the name is invalid. - */ - int firstIndexOf(@NonNull String name); - - /** - * Returns the CQL type of the value for the first occurrence of {@code name}. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * GettableByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - DataType getType(@NonNull String name); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/data/ByteUtils.java b/core/src/main/java/com/datastax/oss/driver/api/core/data/ByteUtils.java deleted file mode 100644 index d3dc68733e4..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/data/ByteUtils.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.data; - -import com.datastax.oss.protocol.internal.util.Bytes; -import java.nio.ByteBuffer; - -/** - * A set of static utility methods to work with byte buffers (associated with CQL type {@code - * blob}). - */ -public class ByteUtils { - - // Implementation note: this is just a gateway to the internal `Bytes` class in native-protocol. - // The difference is that this one is part of the public API. - - /** - * Converts a blob to its CQL hex string representation. - * - *

A CQL blob string representation consists of the hexadecimal representation of the blob - * bytes prefixed by "0x". - * - * @param bytes the blob/bytes to convert to a string. - * @return the CQL string representation of {@code bytes}. If {@code bytes} is {@code null}, this - * method returns {@code null}. - */ - public static String toHexString(ByteBuffer bytes) { - return Bytes.toHexString(bytes); - } - - /** - * Converts a blob to its CQL hex string representation. - * - *

A CQL blob string representation consists of the hexadecimal representation of the blob - * bytes prefixed by "0x". - * - * @param byteArray the blob/bytes array to convert to a string. - * @return the CQL string representation of {@code bytes}. If {@code bytes} is {@code null}, this - * method returns {@code null}. - */ - public static String toHexString(byte[] byteArray) { - return Bytes.toHexString(byteArray); - } - - /** - * Parses a hex string representing a CQL blob. - * - *

The input should be a valid representation of a CQL blob, i.e. it must start by "0x" - * followed by the hexadecimal representation of the blob bytes. - * - * @param str the CQL blob string representation to parse. - * @return the bytes corresponding to {@code str}. If {@code str} is {@code null}, this method - * returns {@code null}. - * @throws IllegalArgumentException if {@code str} is not a valid CQL blob string. - */ - public static ByteBuffer fromHexString(String str) { - return Bytes.fromHexString(str); - } - - /** - * Extracts the content of the provided {@code ByteBuffer} as a byte array. - * - *

This method works with any type of {@code ByteBuffer} (direct and non-direct ones), but when - * the buffer is backed by an array, it will try to avoid copy when possible. As a consequence, - * changes to the returned byte array may or may not reflect into the initial buffer. - * - * @param bytes the buffer whose contents to extract. - * @return a byte array with the contents of {@code bytes}. That array may be the array backing - * {@code bytes} if this can avoid a copy. - */ - public static byte[] getArray(ByteBuffer bytes) { - return Bytes.getArray(bytes); - } - - private ByteUtils() {} -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/data/CqlDuration.java b/core/src/main/java/com/datastax/oss/driver/api/core/data/CqlDuration.java deleted file mode 100644 index bfa9df20bbb..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/data/CqlDuration.java +++ /dev/null @@ -1,666 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.data; - -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.base.Objects; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.Serializable; -import java.time.Duration; -import java.time.Period; -import java.time.temporal.ChronoUnit; -import java.time.temporal.Temporal; -import java.time.temporal.TemporalAmount; -import java.time.temporal.TemporalUnit; -import java.time.temporal.UnsupportedTemporalTypeException; -import java.util.List; -import java.util.Locale; -import java.util.regex.Matcher; -import java.util.regex.Pattern; -import net.jcip.annotations.Immutable; - -/** - * A duration, as defined in CQL. - * - *

It stores months, days, and seconds separately due to the fact that the number of days in a - * month varies, and a day can have 23 or 25 hours if a daylight saving is involved. As such, this - * type differs from {@link java.time.Duration} (which only represents an amount between two points - * in time, regardless of the calendar). - */ -@Immutable -public final class CqlDuration implements TemporalAmount, Serializable { - - private static final long serialVersionUID = 1L; - - @VisibleForTesting static final long NANOS_PER_MICRO = 1000L; - @VisibleForTesting static final long NANOS_PER_MILLI = 1000 * NANOS_PER_MICRO; - @VisibleForTesting static final long NANOS_PER_SECOND = 1000 * NANOS_PER_MILLI; - @VisibleForTesting static final long NANOS_PER_MINUTE = 60 * NANOS_PER_SECOND; - @VisibleForTesting static final long NANOS_PER_HOUR = 60 * NANOS_PER_MINUTE; - @VisibleForTesting static final int DAYS_PER_WEEK = 7; - @VisibleForTesting static final int MONTHS_PER_YEAR = 12; - - /** The Regexp used to parse the duration provided as String. */ - private static final Pattern STANDARD_PATTERN = - Pattern.compile( - "\\G(\\d+)(y|Y|mo|MO|mO|Mo|w|W|d|D|h|H|s|S|ms|MS|mS|Ms|us|US|uS|Us|µs|µS|ns|NS|nS|Ns|m|M)"); - - /** - * The Regexp used to parse the duration when provided in the ISO 8601 format with designators. - */ - private static final Pattern ISO8601_PATTERN = - Pattern.compile("P((\\d+)Y)?((\\d+)M)?((\\d+)D)?(T((\\d+)H)?((\\d+)M)?((\\d+)S)?)?"); - - /** - * The Regexp used to parse the duration when provided in the ISO 8601 format with designators. - */ - private static final Pattern ISO8601_WEEK_PATTERN = Pattern.compile("P(\\d+)W"); - - /** The Regexp used to parse the duration when provided in the ISO 8601 alternative format. */ - private static final Pattern ISO8601_ALTERNATIVE_PATTERN = - Pattern.compile("P(\\d{4})-(\\d{2})-(\\d{2})T(\\d{2}):(\\d{2}):(\\d{2})"); - - private static final ImmutableList TEMPORAL_UNITS = - ImmutableList.of(ChronoUnit.MONTHS, ChronoUnit.DAYS, ChronoUnit.NANOS); - - /** @serial */ - private final int months; - /** @serial */ - private final int days; - /** @serial */ - private final long nanoseconds; - - private CqlDuration(int months, int days, long nanoseconds) { - // Makes sure that all the values are negative if one of them is - if ((months < 0 || days < 0 || nanoseconds < 0) - && (months > 0 || days > 0 || nanoseconds > 0)) { - throw new IllegalArgumentException( - String.format( - "All values must be either negative or positive, got %d months, %d days, %d nanoseconds", - months, days, nanoseconds)); - } - this.months = months; - this.days = days; - this.nanoseconds = nanoseconds; - } - - /** - * Creates a duration with the given number of months, days and nanoseconds. - * - *

A duration can be negative. In this case, all the non zero values must be negative. - * - * @param months the number of months - * @param days the number of days - * @param nanoseconds the number of nanoseconds - * @throws IllegalArgumentException if the values are not all negative or all positive - */ - public static CqlDuration newInstance(int months, int days, long nanoseconds) { - return new CqlDuration(months, days, nanoseconds); - } - - /** - * Converts a String into a duration. - * - *

The accepted formats are: - * - *

    - *
  • multiple digits followed by a time unit like: 12h30m where the time unit can be: - *
      - *
    • {@code y}: years - *
    • {@code mo}: months - *
    • {@code w}: weeks - *
    • {@code d}: days - *
    • {@code h}: hours - *
    • {@code m}: minutes - *
    • {@code s}: seconds - *
    • {@code ms}: milliseconds - *
    • {@code us} or {@code µs}: microseconds - *
    • {@code ns}: nanoseconds - *
    - *
  • ISO 8601 format: P[n]Y[n]M[n]DT[n]H[n]M[n]S or P[n]W - *
  • ISO 8601 alternative format: P[YYYY]-[MM]-[DD]T[hh]:[mm]:[ss] - *
- * - * @param input the String to convert - */ - public static CqlDuration from(@NonNull String input) { - boolean isNegative = input.startsWith("-"); - String source = isNegative ? input.substring(1) : input; - - if (source.startsWith("P")) { - if (source.endsWith("W")) { - return parseIso8601WeekFormat(isNegative, source); - } - if (source.contains("-")) { - return parseIso8601AlternativeFormat(isNegative, source); - } - return parseIso8601Format(isNegative, source); - } - return parseStandardFormat(isNegative, source); - } - - private static CqlDuration parseIso8601Format(boolean isNegative, @NonNull String source) { - Matcher matcher = ISO8601_PATTERN.matcher(source); - if (!matcher.matches()) - throw new IllegalArgumentException( - String.format("Unable to convert '%s' to a duration", source)); - - Builder builder = new Builder(isNegative); - if (matcher.group(1) != null) { - builder.addYears(groupAsLong(matcher, 2)); - } - if (matcher.group(3) != null) { - builder.addMonths(groupAsLong(matcher, 4)); - } - if (matcher.group(5) != null) { - builder.addDays(groupAsLong(matcher, 6)); - } - // Checks if the String contains time information - if (matcher.group(7) != null) { - if (matcher.group(8) != null) { - builder.addHours(groupAsLong(matcher, 9)); - } - if (matcher.group(10) != null) { - builder.addMinutes(groupAsLong(matcher, 11)); - } - if (matcher.group(12) != null) { - builder.addSeconds(groupAsLong(matcher, 13)); - } - } - return builder.build(); - } - - private static CqlDuration parseIso8601AlternativeFormat( - boolean isNegative, @NonNull String source) { - Matcher matcher = ISO8601_ALTERNATIVE_PATTERN.matcher(source); - if (!matcher.matches()) { - throw new IllegalArgumentException( - String.format("Unable to convert '%s' to a duration", source)); - } - return new Builder(isNegative) - .addYears(groupAsLong(matcher, 1)) - .addMonths(groupAsLong(matcher, 2)) - .addDays(groupAsLong(matcher, 3)) - .addHours(groupAsLong(matcher, 4)) - .addMinutes(groupAsLong(matcher, 5)) - .addSeconds(groupAsLong(matcher, 6)) - .build(); - } - - private static CqlDuration parseIso8601WeekFormat(boolean isNegative, @NonNull String source) { - Matcher matcher = ISO8601_WEEK_PATTERN.matcher(source); - if (!matcher.matches()) { - throw new IllegalArgumentException( - String.format("Unable to convert '%s' to a duration", source)); - } - return new Builder(isNegative).addWeeks(groupAsLong(matcher, 1)).build(); - } - - private static CqlDuration parseStandardFormat(boolean isNegative, @NonNull String source) { - Matcher matcher = STANDARD_PATTERN.matcher(source); - if (!matcher.find()) { - throw new IllegalArgumentException( - String.format("Unable to convert '%s' to a duration", source)); - } - Builder builder = new Builder(isNegative); - boolean done; - - do { - long number = groupAsLong(matcher, 1); - String symbol = matcher.group(2); - add(builder, number, symbol); - done = matcher.end() == source.length(); - } while (matcher.find()); - - if (!done) { - throw new IllegalArgumentException( - String.format("Unable to convert '%s' to a duration", source)); - } - return builder.build(); - } - - private static long groupAsLong(@NonNull Matcher matcher, int group) { - return Long.parseLong(matcher.group(group)); - } - - private static Builder add(@NonNull Builder builder, long number, @NonNull String symbol) { - String s = symbol.toLowerCase(Locale.ROOT); - if (s.equals("y")) { - return builder.addYears(number); - } else if (s.equals("mo")) { - return builder.addMonths(number); - } else if (s.equals("w")) { - return builder.addWeeks(number); - } else if (s.equals("d")) { - return builder.addDays(number); - } else if (s.equals("h")) { - return builder.addHours(number); - } else if (s.equals("m")) { - return builder.addMinutes(number); - } else if (s.equals("s")) { - return builder.addSeconds(number); - } else if (s.equals("ms")) { - return builder.addMillis(number); - } else if (s.equals("us") || s.equals("µs")) { - return builder.addMicros(number); - } else if (s.equals("ns")) { - return builder.addNanos(number); - } - throw new IllegalArgumentException(String.format("Unknown duration symbol '%s'", symbol)); - } - - /** - * Appends the result of the division to the specified builder if the dividend is not zero. - * - * @param builder the builder to append to - * @param dividend the dividend - * @param divisor the divisor - * @param unit the time unit to append after the result of the division - * @return the remainder of the division - */ - private static long append( - @NonNull StringBuilder builder, long dividend, long divisor, @NonNull String unit) { - if (dividend == 0 || dividend < divisor) { - return dividend; - } - builder.append(dividend / divisor).append(unit); - return dividend % divisor; - } - - /** - * Returns the number of months in this duration. - * - * @return the number of months in this duration. - */ - public int getMonths() { - return months; - } - - /** - * Returns the number of days in this duration. - * - * @return the number of days in this duration. - */ - public int getDays() { - return days; - } - - /** - * Returns the number of nanoseconds in this duration. - * - * @return the number of months in this duration. - */ - public long getNanoseconds() { - return nanoseconds; - } - - /** - * {@inheritDoc} - * - *

This implementation converts the months and days components to a {@link Period}, and the - * nanosecond component to a {@link Duration}, and adds those two amounts to the temporal object. - * Therefore the chronology of the temporal must be either the ISO chronology or null. - * - * @see Period#addTo(Temporal) - * @see Duration#addTo(Temporal) - */ - @Override - public Temporal addTo(Temporal temporal) { - return temporal.plus(Period.of(0, months, days)).plus(Duration.ofNanos(nanoseconds)); - } - - /** - * {@inheritDoc} - * - *

This implementation converts the months and days components to a {@link Period}, and the - * nanosecond component to a {@link Duration}, and subtracts those two amounts to the temporal - * object. Therefore the chronology of the temporal must be either the ISO chronology or null. - * - * @see Period#subtractFrom(Temporal) - * @see Duration#subtractFrom(Temporal) - */ - @Override - public Temporal subtractFrom(Temporal temporal) { - return temporal.minus(Period.of(0, months, days)).minus(Duration.ofNanos(nanoseconds)); - } - - @Override - public long get(TemporalUnit unit) { - if (unit == ChronoUnit.MONTHS) { - return months; - } else if (unit == ChronoUnit.DAYS) { - return days; - } else if (unit == ChronoUnit.NANOS) { - return nanoseconds; - } else { - throw new UnsupportedTemporalTypeException("Unsupported unit: " + unit); - } - } - - @Override - public List getUnits() { - return TEMPORAL_UNITS; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof CqlDuration) { - CqlDuration that = (CqlDuration) other; - return this.days == that.days - && this.months == that.months - && this.nanoseconds == that.nanoseconds; - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hashCode(days, months, nanoseconds); - } - - @Override - public String toString() { - StringBuilder builder = new StringBuilder(); - - if (months < 0 || days < 0 || nanoseconds < 0) { - builder.append('-'); - } - long remainder = append(builder, Math.abs(months), MONTHS_PER_YEAR, "y"); - append(builder, remainder, 1, "mo"); - - append(builder, Math.abs(days), 1, "d"); - - if (nanoseconds != 0) { - remainder = append(builder, Math.abs(nanoseconds), NANOS_PER_HOUR, "h"); - remainder = append(builder, remainder, NANOS_PER_MINUTE, "m"); - remainder = append(builder, remainder, NANOS_PER_SECOND, "s"); - remainder = append(builder, remainder, NANOS_PER_MILLI, "ms"); - remainder = append(builder, remainder, NANOS_PER_MICRO, "us"); - append(builder, remainder, 1, "ns"); - } - return builder.toString(); - } - - private static class Builder { - private final boolean isNegative; - private int months; - private int days; - private long nanoseconds; - - /** We need to make sure that the values for each units are provided in order. */ - private int currentUnitIndex; - - public Builder(boolean isNegative) { - this.isNegative = isNegative; - } - - /** - * Adds the specified amount of years. - * - * @param numberOfYears the number of years to add. - * @return this {@code Builder} - */ - @NonNull - public Builder addYears(long numberOfYears) { - validateOrder(1); - validateMonths(numberOfYears, MONTHS_PER_YEAR); - // Cast to avoid http://errorprone.info/bugpattern/NarrowingCompoundAssignment - // We could also change the method to accept an int, but keeping long allows us to keep the - // calling code generic. - months += (int) numberOfYears * MONTHS_PER_YEAR; - return this; - } - - /** - * Adds the specified amount of months. - * - * @param numberOfMonths the number of months to add. - * @return this {@code Builder} - */ - @NonNull - public Builder addMonths(long numberOfMonths) { - validateOrder(2); - validateMonths(numberOfMonths, 1); - months += (int) numberOfMonths; - return this; - } - - /** - * Adds the specified amount of weeks. - * - * @param numberOfWeeks the number of weeks to add. - * @return this {@code Builder} - */ - @NonNull - public Builder addWeeks(long numberOfWeeks) { - validateOrder(3); - validateDays(numberOfWeeks, DAYS_PER_WEEK); - days += (int) numberOfWeeks * DAYS_PER_WEEK; - return this; - } - - /** - * Adds the specified amount of days. - * - * @param numberOfDays the number of days to add. - * @return this {@code Builder} - */ - @NonNull - public Builder addDays(long numberOfDays) { - validateOrder(4); - validateDays(numberOfDays, 1); - days += (int) numberOfDays; - return this; - } - - /** - * Adds the specified amount of hours. - * - * @param numberOfHours the number of hours to add. - * @return this {@code Builder} - */ - @NonNull - public Builder addHours(long numberOfHours) { - validateOrder(5); - validateNanos(numberOfHours, NANOS_PER_HOUR); - nanoseconds += numberOfHours * NANOS_PER_HOUR; - return this; - } - - /** - * Adds the specified amount of minutes. - * - * @param numberOfMinutes the number of minutes to add. - * @return this {@code Builder} - */ - @NonNull - public Builder addMinutes(long numberOfMinutes) { - validateOrder(6); - validateNanos(numberOfMinutes, NANOS_PER_MINUTE); - nanoseconds += numberOfMinutes * NANOS_PER_MINUTE; - return this; - } - - /** - * Adds the specified amount of seconds. - * - * @param numberOfSeconds the number of seconds to add. - * @return this {@code Builder} - */ - @NonNull - public Builder addSeconds(long numberOfSeconds) { - validateOrder(7); - validateNanos(numberOfSeconds, NANOS_PER_SECOND); - nanoseconds += numberOfSeconds * NANOS_PER_SECOND; - return this; - } - - /** - * Adds the specified amount of milliseconds. - * - * @param numberOfMillis the number of milliseconds to add. - * @return this {@code Builder} - */ - @NonNull - public Builder addMillis(long numberOfMillis) { - validateOrder(8); - validateNanos(numberOfMillis, NANOS_PER_MILLI); - nanoseconds += numberOfMillis * NANOS_PER_MILLI; - return this; - } - - /** - * Adds the specified amount of microseconds. - * - * @param numberOfMicros the number of microseconds to add. - * @return this {@code Builder} - */ - @NonNull - public Builder addMicros(long numberOfMicros) { - validateOrder(9); - validateNanos(numberOfMicros, NANOS_PER_MICRO); - nanoseconds += numberOfMicros * NANOS_PER_MICRO; - return this; - } - - /** - * Adds the specified amount of nanoseconds. - * - * @param numberOfNanos the number of nanoseconds to add. - * @return this {@code Builder} - */ - @NonNull - public Builder addNanos(long numberOfNanos) { - validateOrder(10); - validateNanos(numberOfNanos, 1); - nanoseconds += numberOfNanos; - return this; - } - - /** - * Validates that the total number of months can be stored. - * - * @param units the number of units that need to be added - * @param monthsPerUnit the number of days per unit - */ - private void validateMonths(long units, int monthsPerUnit) { - validate(units, (Integer.MAX_VALUE - months) / monthsPerUnit, "months"); - } - - /** - * Validates that the total number of days can be stored. - * - * @param units the number of units that need to be added - * @param daysPerUnit the number of days per unit - */ - private void validateDays(long units, int daysPerUnit) { - validate(units, (Integer.MAX_VALUE - days) / daysPerUnit, "days"); - } - - /** - * Validates that the total number of nanoseconds can be stored. - * - * @param units the number of units that need to be added - * @param nanosPerUnit the number of nanoseconds per unit - */ - private void validateNanos(long units, long nanosPerUnit) { - validate(units, (Long.MAX_VALUE - nanoseconds) / nanosPerUnit, "nanoseconds"); - } - - /** - * Validates that the specified amount is less than the limit. - * - * @param units the number of units to check - * @param limit the limit on the number of units - * @param unitName the unit name - */ - private void validate(long units, long limit, @NonNull String unitName) { - Preconditions.checkArgument( - units <= limit, - "Invalid duration. The total number of %s must be less or equal to %s", - unitName, - Integer.MAX_VALUE); - } - - /** - * Validates that the duration values are added in the proper order. - * - * @param unitIndex the unit index (e.g. years=1, months=2, ...) - */ - private void validateOrder(int unitIndex) { - if (unitIndex == currentUnitIndex) { - throw new IllegalArgumentException( - String.format( - "Invalid duration. The %s are specified multiple times", getUnitName(unitIndex))); - } - if (unitIndex <= currentUnitIndex) { - throw new IllegalArgumentException( - String.format( - "Invalid duration. The %s should be after %s", - getUnitName(currentUnitIndex), getUnitName(unitIndex))); - } - currentUnitIndex = unitIndex; - } - - /** - * Returns the name of the unit corresponding to the specified index. - * - * @param unitIndex the unit index - * @return the name of the unit corresponding to the specified index. - */ - @NonNull - private String getUnitName(int unitIndex) { - switch (unitIndex) { - case 1: - return "years"; - case 2: - return "months"; - case 3: - return "weeks"; - case 4: - return "days"; - case 5: - return "hours"; - case 6: - return "minutes"; - case 7: - return "seconds"; - case 8: - return "milliseconds"; - case 9: - return "microseconds"; - case 10: - return "nanoseconds"; - default: - throw new AssertionError("unknown unit index: " + unitIndex); - } - } - - @NonNull - public CqlDuration build() { - return isNegative - ? new CqlDuration(-months, -days, -nanoseconds) - : new CqlDuration(months, days, nanoseconds); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/data/CqlVector.java b/core/src/main/java/com/datastax/oss/driver/api/core/data/CqlVector.java deleted file mode 100644 index 8089d551750..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/data/CqlVector.java +++ /dev/null @@ -1,293 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.data; - -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.internal.core.type.codec.ParseUtils; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.base.Predicates; -import com.datastax.oss.driver.shaded.guava.common.collect.Iterables; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.IOException; -import java.io.InvalidObjectException; -import java.io.ObjectInputStream; -import java.io.ObjectOutputStream; -import java.io.ObjectStreamException; -import java.io.Serializable; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Iterator; -import java.util.List; -import java.util.Objects; -import java.util.stream.Stream; - -/** - * Representation of a vector as defined in CQL. - * - *

A CQL vector is a fixed-length array of non-null numeric values. These properties don't map - * cleanly to an existing class in the standard JDK Collections hierarchy so we provide this value - * object instead. Like other value object collections returned by the driver instances of this - * class are not immutable; think of these value objects as a representation of a vector stored in - * the database as an initial step in some additional computation. - * - *

While we don't implement any Collection APIs we do implement Iterable. We also attempt to play - * nice with the Streams API in order to better facilitate integration with data pipelines. Finally, - * where possible we've tried to make the API of this class similar to the equivalent methods on - * {@link List}. - */ -public class CqlVector implements Iterable, Serializable { - - /** - * Create a new CqlVector containing the specified values. - * - * @param vals the collection of values to wrap. - * @return a CqlVector wrapping those values - */ - public static CqlVector newInstance(V... vals) { - - // Note that Array.asList() guarantees the return of an array which implements RandomAccess - return new CqlVector(Arrays.asList(vals)); - } - - /** - * Create a new CqlVector that "wraps" an existing ArrayList. Modifications to the passed - * ArrayList will also be reflected in the returned CqlVector. - * - * @param list the collection of values to wrap. - * @return a CqlVector wrapping those values - */ - public static CqlVector newInstance(List list) { - Preconditions.checkArgument(list != null, "Input list should not be null"); - return new CqlVector(list); - } - - /** - * Create a new CqlVector instance from the specified string representation. - * - * @param str a String representation of a CqlVector - * @param subtypeCodec - * @return a new CqlVector built from the String representation - */ - public static CqlVector from(@NonNull String str, @NonNull TypeCodec subtypeCodec) { - Preconditions.checkArgument(str != null, "Cannot create CqlVector from null string"); - Preconditions.checkArgument(!str.isEmpty(), "Cannot create CqlVector from empty string"); - if (str.equalsIgnoreCase("NULL")) return null; - - int idx = ParseUtils.skipSpaces(str, 0); - if (str.charAt(idx++) != '[') - throw new IllegalArgumentException( - String.format( - "Cannot parse vector value from \"%s\", at character %d expecting '[' but got '%c'", - str, idx, str.charAt(idx))); - - idx = ParseUtils.skipSpaces(str, idx); - - if (str.charAt(idx) == ']') { - return new CqlVector<>(new ArrayList<>()); - } - - List list = new ArrayList<>(); - while (idx < str.length()) { - int n; - try { - n = ParseUtils.skipCQLValue(str, idx); - } catch (IllegalArgumentException e) { - throw new IllegalArgumentException( - String.format( - "Cannot parse vector value from \"%s\", invalid CQL value at character %d", - str, idx), - e); - } - - list.add(subtypeCodec.parse(str.substring(idx, n))); - idx = n; - - idx = ParseUtils.skipSpaces(str, idx); - if (str.charAt(idx) == ']') return new CqlVector<>(list); - if (str.charAt(idx++) != ',') - throw new IllegalArgumentException( - String.format( - "Cannot parse vector value from \"%s\", at character %d expecting ',' but got '%c'", - str, idx, str.charAt(idx))); - - idx = ParseUtils.skipSpaces(str, idx); - } - throw new IllegalArgumentException( - String.format("Malformed vector value \"%s\", missing closing ']'", str)); - } - - private final List list; - - private CqlVector(@NonNull List list) { - - Preconditions.checkArgument( - Iterables.all(list, Predicates.notNull()), "CqlVectors cannot contain null values"); - this.list = list; - } - - /** - * Retrieve the value at the specified index. Modelled after {@link List#get(int)} - * - * @param idx the index to retrieve - * @return the value at the specified index - */ - public T get(int idx) { - return list.get(idx); - } - - /** - * Update the value at the specified index. Modelled after {@link List#set(int, Object)} - * - * @param idx the index to set - * @param val the new value for the specified index - * @return the old value for the specified index - */ - public T set(int idx, T val) { - return list.set(idx, val); - } - - /** - * Return the size of this vector. Modelled after {@link List#size()} - * - * @return the vector size - */ - public int size() { - return this.list.size(); - } - - /** - * Return a CqlVector consisting of the contents of a portion of this vector. Modelled after - * {@link List#subList(int, int)} - * - * @param from the index to start from (inclusive) - * @param to the index to end on (exclusive) - * @return a new CqlVector wrapping the sublist - */ - public CqlVector subVector(int from, int to) { - return new CqlVector(this.list.subList(from, to)); - } - - /** - * Return a boolean indicating whether the vector is empty. Modelled after {@link List#isEmpty()} - * - * @return true if the list is empty, false otherwise - */ - public boolean isEmpty() { - return this.list.isEmpty(); - } - - /** - * Create an {@link Iterator} for this vector - * - * @return the generated iterator - */ - @Override - public Iterator iterator() { - return this.list.iterator(); - } - - /** - * Create a {@link Stream} of the values in this vector - * - * @return the Stream instance - */ - public Stream stream() { - return this.list.stream(); - } - - @Override - public boolean equals(Object o) { - if (o == this) { - return true; - } else if (o instanceof CqlVector) { - CqlVector that = (CqlVector) o; - return this.list.equals(that.list); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(list); - } - - /** - * The string representation of the vector. Elements, like strings, may not be properly quoted. - * - * @return the string representation - */ - @Override - public String toString() { - return Iterables.toString(this.list); - } - - /** - * Serialization proxy for CqlVector. Allows serialization regardless of implementation of list - * field. - * - * @param inner type of CqlVector, assume Number is always Serializable. - */ - private static class SerializationProxy implements Serializable { - - private static final long serialVersionUID = 1; - - private transient List list; - - SerializationProxy(CqlVector vector) { - this.list = vector.list; - } - - // Reconstruct CqlVector's list of elements. - private void readObject(ObjectInputStream stream) throws IOException, ClassNotFoundException { - stream.defaultReadObject(); - - int size = stream.readInt(); - list = new ArrayList<>(); - for (int i = 0; i < size; i++) { - list.add((T) stream.readObject()); - } - } - - // Return deserialized proxy object as CqlVector. - private Object readResolve() throws ObjectStreamException { - return new CqlVector(list); - } - - // Write size of CqlVector followed by items in order. - private void writeObject(ObjectOutputStream stream) throws IOException { - stream.defaultWriteObject(); - - stream.writeInt(list.size()); - for (T item : list) { - stream.writeObject(item); - } - } - } - - /** @serialData The number of elements in the vector, followed by each element in-order. */ - private Object writeReplace() { - return new SerializationProxy(this); - } - - private void readObject(@SuppressWarnings("unused") ObjectInputStream stream) - throws InvalidObjectException { - // Should never be called since we serialized a proxy - throw new InvalidObjectException("Proxy required"); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/data/Data.java b/core/src/main/java/com/datastax/oss/driver/api/core/data/Data.java deleted file mode 100644 index 495b96e97c7..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/data/Data.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.data; - -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.detach.Detachable; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** A data structure containing CQL values. */ -public interface Data { - - /** - * Returns the registry of all the codecs currently available to convert values for this instance. - * - *

If you obtained this object from the driver, this will be set automatically. If you created - * it manually, or just deserialized it, it is set to {@link CodecRegistry#DEFAULT}. You can - * reattach this object to an existing driver instance to use its codec registry. - * - * @see Detachable - */ - @NonNull - CodecRegistry codecRegistry(); - - /** - * Returns the protocol version that is currently used to convert values for this instance. - * - *

If you obtained this object from the driver, this will be set automatically. If you created - * it manually, or just deserialized it, it is set to {@link DefaultProtocolVersion#DEFAULT}. You - * can reattach this object to an existing driver instance to use its protocol version. - * - * @see Detachable - */ - @NonNull - ProtocolVersion protocolVersion(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/data/GettableById.java b/core/src/main/java/com/datastax/oss/driver/api/core/data/GettableById.java deleted file mode 100644 index 8393bc9f758..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/data/GettableById.java +++ /dev/null @@ -1,676 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.data; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.type.codec.CodecNotFoundException; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.math.BigDecimal; -import java.math.BigInteger; -import java.net.InetAddress; -import java.nio.ByteBuffer; -import java.time.Instant; -import java.time.LocalDate; -import java.time.LocalTime; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; - -/** A data structure that provides methods to retrieve its values via a CQL identifier. */ -public interface GettableById extends GettableByIndex, AccessibleById { - - /** - * Returns the raw binary representation of the value for the first occurrence of {@code id}. - * - *

This is primarily for internal use; you'll likely want to use one of the typed getters - * instead, to get a higher-level Java representation. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @return the raw value, or {@code null} if the CQL value is {@code NULL}. For performance - * reasons, this is the actual instance used internally. If you read data from the buffer, - * make sure to {@link ByteBuffer#duplicate() duplicate} it beforehand, or only use relative - * methods. If you change the buffer's index or its contents in any way, any other getter - * invocation for this value will have unpredictable results. - * @throws IllegalArgumentException if the id is invalid. - */ - @Nullable - default ByteBuffer getBytesUnsafe(@NonNull CqlIdentifier id) { - return getBytesUnsafe(firstIndexOf(id)); - } - - /** - * Indicates whether the value for the first occurrence of {@code id} is a CQL {@code NULL}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - default boolean isNull(@NonNull CqlIdentifier id) { - return isNull(firstIndexOf(id)); - } - - /** - * Returns the value for the first occurrence of {@code id}, using the given codec for the - * conversion. - * - *

This method completely bypasses the {@link #codecRegistry()}, and forces the driver to use - * the given codec instead. This can be useful if the codec would collide with a previously - * registered one, or if you want to use the codec just once without registering it. - * - *

It is the caller's responsibility to ensure that the given codec is appropriate for the - * conversion. Failing to do so will result in errors at runtime. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @Nullable - default ValueT get(@NonNull CqlIdentifier id, @NonNull TypeCodec codec) { - return get(firstIndexOf(id), codec); - } - - /** - * Returns the value for the first occurrence of {@code id}, converting it to the given Java type. - * - *

The {@link #codecRegistry()} will be used to look up a codec to handle the conversion. - * - *

This variant is for generic Java types. If the target type is not generic, use {@link - * #get(int, Class)} instead, which may perform slightly better. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - * @throws CodecNotFoundException if no codec can perform the conversion. - */ - @Nullable - default ValueT get(@NonNull CqlIdentifier id, @NonNull GenericType targetType) { - return get(firstIndexOf(id), targetType); - } - - /** - * Returns the value for the first occurrence of {@code id}, converting it to the given Java type. - * - *

The {@link #codecRegistry()} will be used to look up a codec to handle the conversion. - * - *

If the target type is generic, use {@link #get(int, GenericType)} instead. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - * @throws CodecNotFoundException if no codec can perform the conversion. - */ - @Nullable - default ValueT get(@NonNull CqlIdentifier id, @NonNull Class targetClass) { - return get(firstIndexOf(id), targetClass); - } - - /** - * Returns the value for the first occurrence of {@code id}, converting it to the most appropriate - * Java type. - * - *

The {@link #codecRegistry()} will be used to look up a codec to handle the conversion. - * - *

Use this method to dynamically inspect elements when types aren't known in advance, for - * instance if you're writing a generic row logger. If you know the target Java type, it is - * generally preferable to use typed variants, such as the ones for built-in types ({@link - * #getBoolean(int)}, {@link #getInt(int)}, etc.), or {@link #get(int, Class)} and {@link - * #get(int, GenericType)} for custom types. - * - *

The definition of "most appropriate" is unspecified, and left to the appreciation of the - * {@link #codecRegistry()} implementation. By default, the driver uses the mapping described in - * the other {@code getXxx()} methods (for example {@link #getString(int) String for text, varchar - * and ascii}, etc). - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - * @throws CodecNotFoundException if no codec can perform the conversion. - */ - @Nullable - default Object getObject(@NonNull CqlIdentifier id) { - return getObject(firstIndexOf(id)); - } - - /** - * Returns the value for the first occurrence of {@code id} as a Java primitive boolean. - * - *

By default, this works with CQL type {@code boolean}. - * - *

Note that, due to its signature, this method cannot return {@code null}. If the CQL value is - * {@code NULL}, it will return {@code false}. If this doesn't work for you, either call {@link - * #isNull(CqlIdentifier)} before calling this method, or use {@code get(id, Boolean.class)} - * instead. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - default boolean getBoolean(@NonNull CqlIdentifier id) { - return getBoolean(firstIndexOf(id)); - } - - /** - * @deprecated this method only exists to ease the transition from driver 3, it is an alias for - * {@link #getBoolean(CqlIdentifier)}. - */ - @Deprecated - default boolean getBool(@NonNull CqlIdentifier id) { - return getBoolean(id); - } - - /** - * Returns the value for the first occurrence of {@code id} as a Java primitive byte. - * - *

By default, this works with CQL type {@code tinyint}. - * - *

Note that, due to its signature, this method cannot return {@code null}. If the CQL value is - * {@code NULL}, it will return {@code 0}. If this doesn't work for you, either call {@link - * #isNull(CqlIdentifier)} before calling this method, or use {@code get(id, Byte.class)} instead. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - default byte getByte(@NonNull CqlIdentifier id) { - return getByte(firstIndexOf(id)); - } - - /** - * Returns the value for the first occurrence of {@code id} as a Java primitive double. - * - *

By default, this works with CQL type {@code double}. - * - *

Note that, due to its signature, this method cannot return {@code null}. If the CQL value is - * {@code NULL}, it will return {@code 0.0}. If this doesn't work for you, either call {@link - * #isNull(CqlIdentifier)} before calling this method, or use {@code get(id, Double.class)} - * instead. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - default double getDouble(@NonNull CqlIdentifier id) { - return getDouble(firstIndexOf(id)); - } - - /** - * Returns the value for the first occurrence of {@code id} as a Java primitive float. - * - *

By default, this works with CQL type {@code float}. - * - *

Note that, due to its signature, this method cannot return {@code null}. If the CQL value is - * {@code NULL}, it will return {@code 0.0}. If this doesn't work for you, either call {@link - * #isNull(CqlIdentifier)} before calling this method, or use {@code get(id, Float.class)} - * instead. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - default float getFloat(@NonNull CqlIdentifier id) { - return getFloat(firstIndexOf(id)); - } - - /** - * Returns the value for the first occurrence of {@code id} as a Java primitive integer. - * - *

By default, this works with CQL type {@code int}. - * - *

Note that, due to its signature, this method cannot return {@code null}. If the CQL value is - * {@code NULL}, it will return {@code 0}. If this doesn't work for you, either call {@link - * #isNull(CqlIdentifier)} before calling this method, or use {@code get(id, Integer.class)} - * instead. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - default int getInt(@NonNull CqlIdentifier id) { - return getInt(firstIndexOf(id)); - } - - /** - * Returns the value for the first occurrence of {@code id} as a Java primitive long. - * - *

By default, this works with CQL types {@code bigint} and {@code counter}. - * - *

Note that, due to its signature, this method cannot return {@code null}. If the CQL value is - * {@code NULL}, it will return {@code 0}. If this doesn't work for you, either call {@link - * #isNull(CqlIdentifier)} before calling this method, or use {@code get(id, Long.class)} instead. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - default long getLong(@NonNull CqlIdentifier id) { - return getLong(firstIndexOf(id)); - } - - /** - * Returns the value for the first occurrence of {@code id} as a Java primitive short. - * - *

By default, this works with CQL type {@code smallint}. - * - *

Note that, due to its signature, this method cannot return {@code null}. If the CQL value is - * {@code NULL}, it will return {@code 0}. If this doesn't work for you, either call {@link - * #isNull(CqlIdentifier)} before calling this method, or use {@code get(id, Short.class)} - * instead. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - default short getShort(@NonNull CqlIdentifier id) { - return getShort(firstIndexOf(id)); - } - - /** - * Returns the value for the first occurrence of {@code id} as a Java instant. - * - *

By default, this works with CQL type {@code timestamp}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @Nullable - default Instant getInstant(@NonNull CqlIdentifier id) { - return getInstant(firstIndexOf(id)); - } - - /** - * Returns the value for the first occurrence of {@code id} as a Java local date. - * - *

By default, this works with CQL type {@code date}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @Nullable - default LocalDate getLocalDate(@NonNull CqlIdentifier id) { - return getLocalDate(firstIndexOf(id)); - } - - /** - * Returns the value for the first occurrence of {@code id} as a Java local time. - * - *

By default, this works with CQL type {@code time}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @Nullable - default LocalTime getLocalTime(@NonNull CqlIdentifier id) { - return getLocalTime(firstIndexOf(id)); - } - - /** - * Returns the value for the first occurrence of {@code id} as a Java byte buffer. - * - *

By default, this works with CQL type {@code blob}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @Nullable - default ByteBuffer getByteBuffer(@NonNull CqlIdentifier id) { - return getByteBuffer(firstIndexOf(id)); - } - - /** - * Returns the value for the first occurrence of {@code id} as a Java string. - * - *

By default, this works with CQL types {@code text}, {@code varchar} and {@code ascii}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @Nullable - default String getString(@NonNull CqlIdentifier id) { - return getString(firstIndexOf(id)); - } - - /** - * Returns the value for the first occurrence of {@code id} as a Java big integer. - * - *

By default, this works with CQL type {@code varint}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @Nullable - default BigInteger getBigInteger(@NonNull CqlIdentifier id) { - return getBigInteger(firstIndexOf(id)); - } - - /** - * Returns the value for the first occurrence of {@code id} as a Java big decimal. - * - *

By default, this works with CQL type {@code decimal}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @Nullable - default BigDecimal getBigDecimal(@NonNull CqlIdentifier id) { - return getBigDecimal(firstIndexOf(id)); - } - - /** - * Returns the value for the first occurrence of {@code id} as a Java UUID. - * - *

By default, this works with CQL types {@code uuid} and {@code timeuuid}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @Nullable - default UUID getUuid(@NonNull CqlIdentifier id) { - return getUuid(firstIndexOf(id)); - } - - /** - * Returns the value for the first occurrence of {@code id} as a Java IP address. - * - *

By default, this works with CQL type {@code inet}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @Nullable - default InetAddress getInetAddress(@NonNull CqlIdentifier id) { - return getInetAddress(firstIndexOf(id)); - } - - /** - * Returns the value for the first occurrence of {@code id} as a duration. - * - *

By default, this works with CQL type {@code duration}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @Nullable - default CqlDuration getCqlDuration(@NonNull CqlIdentifier id) { - return getCqlDuration(firstIndexOf(id)); - } - - /** - * Returns the value for the first occurrence of {@code id} as a vector. - * - *

By default, this works with CQL type {@code vector}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @Nullable - default CqlVector getVector( - @NonNull CqlIdentifier id, @NonNull Class elementsClass) { - return getVector(firstIndexOf(id), elementsClass); - } - - /** - * Returns the value for the first occurrence of {@code id} as a token. - * - *

Note that, for simplicity, this method relies on the CQL type of the column to pick the - * correct token implementation. Therefore it must only be called on columns of the type that - * matches the partitioner in use for this cluster: {@code bigint} for {@code Murmur3Partitioner}, - * {@code blob} for {@code ByteOrderedPartitioner}, and {@code varint} for {@code - * RandomPartitioner}. Calling it for the wrong type will produce corrupt tokens that are unusable - * with this driver instance. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the column type can not be converted to a known token type - * or if the name is invalid. - */ - @Nullable - default Token getToken(@NonNull CqlIdentifier id) { - return getToken(firstIndexOf(id)); - } - - /** - * Returns the value for the first occurrence of {@code id} as a Java list. - * - *

By default, this works with CQL type {@code list}. - * - *

This method is provided for convenience when the element type is a non-generic type. For - * more complex list types, use {@link #get(int, GenericType)}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - *

Apache Cassandra does not make any distinction between an empty collection and {@code null}. - * Whether this method will return an empty collection or {@code null} will depend on the codec - * used; by default, the driver's built-in codecs all return empty collections. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @Nullable - default List getList( - @NonNull CqlIdentifier id, @NonNull Class elementsClass) { - return getList(firstIndexOf(id), elementsClass); - } - - /** - * Returns the value for the first occurrence of {@code id} as a Java set. - * - *

By default, this works with CQL type {@code set}. - * - *

This method is provided for convenience when the element type is a non-generic type. For - * more complex set types, use {@link #get(int, GenericType)}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - *

Apache Cassandra does not make any distinction between an empty collection and {@code null}. - * Whether this method will return an empty collection or {@code null} will depend on the codec - * used; by default, the driver's built-in codecs all return empty collections. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @Nullable - default Set getSet( - @NonNull CqlIdentifier id, @NonNull Class elementsClass) { - return getSet(firstIndexOf(id), elementsClass); - } - - /** - * Returns the value for the first occurrence of {@code id} as a Java map. - * - *

By default, this works with CQL type {@code map}. - * - *

This method is provided for convenience when the element type is a non-generic type. For - * more complex map types, use {@link #get(int, GenericType)}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - *

Apache Cassandra does not make any distinction between an empty collection and {@code null}. - * Whether this method will return an empty collection or {@code null} will depend on the codec - * used; by default, the driver's built-in codecs all return empty collections. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @Nullable - default Map getMap( - @NonNull CqlIdentifier id, @NonNull Class keyClass, @NonNull Class valueClass) { - return getMap(firstIndexOf(id), keyClass, valueClass); - } - - /** - * Returns the value for the first occurrence of {@code id} as a user defined type value. - * - *

By default, this works with CQL user-defined types. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @Nullable - default UdtValue getUdtValue(@NonNull CqlIdentifier id) { - return getUdtValue(firstIndexOf(id)); - } - - /** - * Returns the value for the first occurrence of {@code id} as a tuple value. - * - *

By default, this works with CQL tuples. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @Nullable - default TupleValue getTupleValue(@NonNull CqlIdentifier id) { - return getTupleValue(firstIndexOf(id)); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/data/GettableByIndex.java b/core/src/main/java/com/datastax/oss/driver/api/core/data/GettableByIndex.java deleted file mode 100644 index bb75bd9a2b4..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/data/GettableByIndex.java +++ /dev/null @@ -1,565 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.data; - -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.CodecNotFoundException; -import com.datastax.oss.driver.api.core.type.codec.PrimitiveBooleanCodec; -import com.datastax.oss.driver.api.core.type.codec.PrimitiveByteCodec; -import com.datastax.oss.driver.api.core.type.codec.PrimitiveDoubleCodec; -import com.datastax.oss.driver.api.core.type.codec.PrimitiveFloatCodec; -import com.datastax.oss.driver.api.core.type.codec.PrimitiveIntCodec; -import com.datastax.oss.driver.api.core.type.codec.PrimitiveLongCodec; -import com.datastax.oss.driver.api.core.type.codec.PrimitiveShortCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.metadata.token.ByteOrderedToken; -import com.datastax.oss.driver.internal.core.metadata.token.Murmur3Token; -import com.datastax.oss.driver.internal.core.metadata.token.RandomToken; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.math.BigDecimal; -import java.math.BigInteger; -import java.net.InetAddress; -import java.nio.ByteBuffer; -import java.time.Instant; -import java.time.LocalDate; -import java.time.LocalTime; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; - -/** A data structure that provides methods to retrieve its values via an integer index. */ -public interface GettableByIndex extends AccessibleByIndex { - - /** - * Returns the raw binary representation of the {@code i}th value. - * - *

This is primarily for internal use; you'll likely want to use one of the typed getters - * instead, to get a higher-level Java representation. - * - * @return the raw value, or {@code null} if the CQL value is {@code NULL}. For performance - * reasons, this is the actual instance used internally. If you read data from the buffer, - * make sure to {@link ByteBuffer#duplicate() duplicate} it beforehand, or only use relative - * methods. If you change the buffer's index or its contents in any way, any other getter - * invocation for this value will have unpredictable results. - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @Nullable - ByteBuffer getBytesUnsafe(int i); - - /** - * Indicates whether the {@code i}th value is a CQL {@code NULL}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - default boolean isNull(int i) { - return getBytesUnsafe(i) == null; - } - - /** - * Returns the {@code i}th value, using the given codec for the conversion. - * - *

This method completely bypasses the {@link #codecRegistry()}, and forces the driver to use - * the given codec instead. This can be useful if the codec would collide with a previously - * registered one, or if you want to use the codec just once without registering it. - * - *

It is the caller's responsibility to ensure that the given codec is appropriate for the - * conversion. Failing to do so will result in errors at runtime. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @Nullable - default ValueT get(int i, TypeCodec codec) { - return codec.decode(getBytesUnsafe(i), protocolVersion()); - } - - /** - * Returns the {@code i}th value, converting it to the given Java type. - * - *

The {@link #codecRegistry()} will be used to look up a codec to handle the conversion. - * - *

This variant is for generic Java types. If the target type is not generic, use {@link - * #get(int, Class)} instead, which may perform slightly better. - * - * @throws IndexOutOfBoundsException if the index is invalid. - * @throws CodecNotFoundException if no codec can perform the conversion. - */ - @Nullable - default ValueT get(int i, GenericType targetType) { - DataType cqlType = getType(i); - TypeCodec codec = codecRegistry().codecFor(cqlType, targetType); - return get(i, codec); - } - - /** - * Returns the {@code i}th value, converting it to the given Java type. - * - *

The {@link #codecRegistry()} will be used to look up a codec to handle the conversion. - * - *

If the target type is generic, use {@link #get(int, GenericType)} instead. - * - * @throws IndexOutOfBoundsException if the index is invalid. - * @throws CodecNotFoundException if no codec can perform the conversion. - */ - @Nullable - default ValueT get(int i, Class targetClass) { - // This is duplicated from the GenericType variant, because we want to give the codec registry - // a chance to process the unwrapped class directly, if it can do so in a more efficient way. - DataType cqlType = getType(i); - TypeCodec codec = codecRegistry().codecFor(cqlType, targetClass); - return get(i, codec); - } - - /** - * Returns the {@code i}th value, converting it to the most appropriate Java type. - * - *

The {@link #codecRegistry()} will be used to look up a codec to handle the conversion. - * - *

Use this method to dynamically inspect elements when types aren't known in advance, for - * instance if you're writing a generic row logger. If you know the target Java type, it is - * generally preferable to use typed variants, such as the ones for built-in types ({@link - * #getBoolean(int)}, {@link #getInt(int)}, etc.), or {@link #get(int, Class)} and {@link - * #get(int, GenericType)} for custom types. - * - *

The definition of "most appropriate" is unspecified, and left to the appreciation of the - * {@link #codecRegistry()} implementation. By default, the driver uses the mapping described in - * the other {@code getXxx()} methods (for example {@link #getString(int) String for text, varchar - * and ascii}, etc). - * - * @throws IndexOutOfBoundsException if the index is invalid. - * @throws CodecNotFoundException if no codec can perform the conversion. - */ - @Nullable - default Object getObject(int i) { - DataType cqlType = getType(i); - TypeCodec codec = codecRegistry().codecFor(cqlType); - return codec.decode(getBytesUnsafe(i), protocolVersion()); - } - - /** - * Returns the {@code i}th value as a Java primitive boolean. - * - *

By default, this works with CQL type {@code boolean}. - * - *

Note that, due to its signature, this method cannot return {@code null}. If the CQL value is - * {@code NULL}, it will return {@code false}. If this doesn't work for you, either call {@link - * #isNull(int)} before calling this method, or use {@code get(i, Boolean.class)} instead. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - default boolean getBoolean(int i) { - DataType cqlType = getType(i); - TypeCodec codec = codecRegistry().codecFor(cqlType, Boolean.class); - if (codec instanceof PrimitiveBooleanCodec) { - return ((PrimitiveBooleanCodec) codec).decodePrimitive(getBytesUnsafe(i), protocolVersion()); - } else { - Boolean value = get(i, codec); - return value == null ? false : value; - } - } - - /** - * @deprecated this method only exists to ease the transition from driver 3, it is an alias for - * {@link #getBoolean(int)}. - */ - @Deprecated - default boolean getBool(int i) { - return getBoolean(i); - } - - /** - * Returns the {@code i}th value as a Java primitive byte. - * - *

By default, this works with CQL type {@code tinyint}. - * - *

Note that, due to its signature, this method cannot return {@code null}. If the CQL value is - * {@code NULL}, it will return {@code 0}. If this doesn't work for you, either call {@link - * #isNull(int)} before calling this method, or use {@code get(i, Byte.class)} instead. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - default byte getByte(int i) { - DataType cqlType = getType(i); - TypeCodec codec = codecRegistry().codecFor(cqlType, Byte.class); - if (codec instanceof PrimitiveByteCodec) { - return ((PrimitiveByteCodec) codec).decodePrimitive(getBytesUnsafe(i), protocolVersion()); - } else { - Byte value = get(i, codec); - return value == null ? 0 : value; - } - } - - /** - * Returns the {@code i}th value as a Java primitive double. - * - *

By default, this works with CQL type {@code double}. - * - *

Note that, due to its signature, this method cannot return {@code null}. If the CQL value is - * {@code NULL}, it will return {@code 0.0}. If this doesn't work for you, either call {@link - * #isNull(int)} before calling this method, or use {@code get(i, Double.class)} instead. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - default double getDouble(int i) { - DataType cqlType = getType(i); - TypeCodec codec = codecRegistry().codecFor(cqlType, Double.class); - if (codec instanceof PrimitiveDoubleCodec) { - return ((PrimitiveDoubleCodec) codec).decodePrimitive(getBytesUnsafe(i), protocolVersion()); - } else { - Double value = get(i, codec); - return value == null ? 0 : value; - } - } - - /** - * Returns the {@code i}th value as a Java primitive float. - * - *

By default, this works with CQL type {@code float}. - * - *

Note that, due to its signature, this method cannot return {@code null}. If the CQL value is - * {@code NULL}, it will return {@code 0.0}. If this doesn't work for you, either call {@link - * #isNull(int)} before calling this method, or use {@code get(i, Float.class)} instead. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - default float getFloat(int i) { - DataType cqlType = getType(i); - TypeCodec codec = codecRegistry().codecFor(cqlType, Float.class); - if (codec instanceof PrimitiveFloatCodec) { - return ((PrimitiveFloatCodec) codec).decodePrimitive(getBytesUnsafe(i), protocolVersion()); - } else { - Float value = get(i, codec); - return value == null ? 0 : value; - } - } - - /** - * Returns the {@code i}th value as a Java primitive integer. - * - *

By default, this works with CQL type {@code int}. - * - *

Note that, due to its signature, this method cannot return {@code null}. If the CQL value is - * {@code NULL}, it will return {@code 0}. If this doesn't work for you, either call {@link - * #isNull(int)} before calling this method, or use {@code get(i, Integer.class)} instead. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - default int getInt(int i) { - DataType cqlType = getType(i); - TypeCodec codec = codecRegistry().codecFor(cqlType, Integer.class); - if (codec instanceof PrimitiveIntCodec) { - return ((PrimitiveIntCodec) codec).decodePrimitive(getBytesUnsafe(i), protocolVersion()); - } else { - Integer value = get(i, codec); - return value == null ? 0 : value; - } - } - - /** - * Returns the {@code i}th value as a Java primitive long. - * - *

By default, this works with CQL types {@code bigint} and {@code counter}. - * - *

Note that, due to its signature, this method cannot return {@code null}. If the CQL value is - * {@code NULL}, it will return {@code 0}. If this doesn't work for you, either call {@link - * #isNull(int)} before calling this method, or use {@code get(i, Long.class)} instead. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - default long getLong(int i) { - DataType cqlType = getType(i); - TypeCodec codec = codecRegistry().codecFor(cqlType, Long.class); - if (codec instanceof PrimitiveLongCodec) { - return ((PrimitiveLongCodec) codec).decodePrimitive(getBytesUnsafe(i), protocolVersion()); - } else { - Long value = get(i, codec); - return value == null ? 0 : value; - } - } - - /** - * Returns the {@code i}th value as a Java primitive short. - * - *

By default, this works with CQL type {@code smallint}. - * - *

Note that, due to its signature, this method cannot return {@code null}. If the CQL value is - * {@code NULL}, it will return {@code 0}. If this doesn't work for you, either call {@link - * #isNull(int)} before calling this method, or use {@code get(i, Short.class)} instead. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - default short getShort(int i) { - DataType cqlType = getType(i); - TypeCodec codec = codecRegistry().codecFor(cqlType, Short.class); - if (codec instanceof PrimitiveShortCodec) { - return ((PrimitiveShortCodec) codec).decodePrimitive(getBytesUnsafe(i), protocolVersion()); - } else { - Short value = get(i, codec); - return value == null ? 0 : value; - } - } - - /** - * Returns the {@code i}th value as a Java instant. - * - *

By default, this works with CQL type {@code timestamp}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @Nullable - default Instant getInstant(int i) { - return get(i, Instant.class); - } - - /** - * Returns the {@code i}th value as a Java local date. - * - *

By default, this works with CQL type {@code date}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @Nullable - default LocalDate getLocalDate(int i) { - return get(i, LocalDate.class); - } - - /** - * Returns the {@code i}th value as a Java local time. - * - *

By default, this works with CQL type {@code time}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @Nullable - default LocalTime getLocalTime(int i) { - return get(i, LocalTime.class); - } - - /** - * Returns the {@code i}th value as a Java byte buffer. - * - *

By default, this works with CQL type {@code blob}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @Nullable - default ByteBuffer getByteBuffer(int i) { - return get(i, ByteBuffer.class); - } - - /** - * Returns the {@code i}th value as a Java string. - * - *

By default, this works with CQL types {@code text}, {@code varchar} and {@code ascii}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @Nullable - default String getString(int i) { - return get(i, String.class); - } - - /** - * Returns the {@code i}th value as a Java big integer. - * - *

By default, this works with CQL type {@code varint}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @Nullable - default BigInteger getBigInteger(int i) { - return get(i, BigInteger.class); - } - - /** - * Returns the {@code i}th value as a Java big decimal. - * - *

By default, this works with CQL type {@code decimal}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @Nullable - default BigDecimal getBigDecimal(int i) { - return get(i, BigDecimal.class); - } - - /** - * Returns the {@code i}th value as a Java UUID. - * - *

By default, this works with CQL types {@code uuid} and {@code timeuuid}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @Nullable - default UUID getUuid(int i) { - return get(i, UUID.class); - } - - /** - * Returns the {@code i}th value as a Java IP address. - * - *

By default, this works with CQL type {@code inet}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @Nullable - default InetAddress getInetAddress(int i) { - return get(i, InetAddress.class); - } - - /** - * Returns the {@code i}th value as a duration. - * - *

By default, this works with CQL type {@code duration}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @Nullable - default CqlDuration getCqlDuration(int i) { - return get(i, CqlDuration.class); - } - - /** - * Returns the {@code i}th value as a vector. - * - *

By default, this works with CQL type {@code vector}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @Nullable - default CqlVector getVector(int i, @NonNull Class elementsClass) { - return get(i, GenericType.vectorOf(elementsClass)); - } - - /** - * Returns the {@code i}th value as a token. - * - *

Note that, for simplicity, this method relies on the CQL type of the column to pick the - * correct token implementation. Therefore it must only be called on columns of the type that - * matches the partitioner in use for this cluster: {@code bigint} for {@code Murmur3Partitioner}, - * {@code blob} for {@code ByteOrderedPartitioner}, and {@code varint} for {@code - * RandomPartitioner}. Calling it for the wrong type will produce corrupt tokens that are unusable - * with this driver instance. - * - * @throws IndexOutOfBoundsException if the index is invalid. - * @throws IllegalArgumentException if the column type can not be converted to a known token type. - */ - @Nullable - default Token getToken(int i) { - DataType type = getType(i); - // Simply enumerate all known implementations. This goes against the concept of TokenFactory, - // but injecting the factory here is too much of a hassle. - // The only issue is if someone uses a custom partitioner, but this is highly unlikely, and even - // then they can get the value manually as a workaround. - if (type.equals(DataTypes.BIGINT)) { - return isNull(i) ? null : new Murmur3Token(getLong(i)); - } else if (type.equals(DataTypes.BLOB)) { - return isNull(i) ? null : new ByteOrderedToken(getByteBuffer(i)); - } else if (type.equals(DataTypes.VARINT)) { - return isNull(i) ? null : new RandomToken(getBigInteger(i)); - } else { - throw new IllegalArgumentException("Can't convert CQL type " + type + " into a token"); - } - } - - /** - * Returns the {@code i}th value as a Java list. - * - *

By default, this works with CQL type {@code list}. - * - *

This method is provided for convenience when the element type is a non-generic type. For - * more complex list types, use {@link #get(int, GenericType)}. - * - *

Apache Cassandra does not make any distinction between an empty collection and {@code null}. - * Whether this method will return an empty collection or {@code null} will depend on the codec - * used; by default, the driver's built-in codecs all return empty collections. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @Nullable - default List getList(int i, @NonNull Class elementsClass) { - return get(i, GenericType.listOf(elementsClass)); - } - - /** - * Returns the {@code i}th value as a Java set. - * - *

By default, this works with CQL type {@code set}. - * - *

This method is provided for convenience when the element type is a non-generic type. For - * more complex set types, use {@link #get(int, GenericType)}. - * - *

Apache Cassandra does not make any distinction between an empty collection and {@code null}. - * Whether this method will return an empty collection or {@code null} will depend on the codec - * used; by default, the driver's built-in codecs all return empty collections. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @Nullable - default Set getSet(int i, @NonNull Class elementsClass) { - return get(i, GenericType.setOf(elementsClass)); - } - - /** - * Returns the {@code i}th value as a Java map. - * - *

By default, this works with CQL type {@code map}. - * - *

This method is provided for convenience when the element type is a non-generic type. For - * more complex map types, use {@link #get(int, GenericType)}. - * - *

Apache Cassandra does not make any distinction between an empty collection and {@code null}. - * Whether this method will return an empty collection or {@code null} will depend on the codec - * used; by default, the driver's built-in codecs all return empty collections. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @Nullable - default Map getMap( - int i, @NonNull Class keyClass, @NonNull Class valueClass) { - return get(i, GenericType.mapOf(keyClass, valueClass)); - } - - /** - * Returns the {@code i}th value as a user defined type value. - * - *

By default, this works with CQL user-defined types. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @Nullable - default UdtValue getUdtValue(int i) { - return get(i, UdtValue.class); - } - - /** - * Returns the {@code i}th value as a tuple value. - * - *

By default, this works with CQL tuples. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @Nullable - default TupleValue getTupleValue(int i) { - return get(i, TupleValue.class); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/data/GettableByName.java b/core/src/main/java/com/datastax/oss/driver/api/core/data/GettableByName.java deleted file mode 100644 index b0a4660033b..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/data/GettableByName.java +++ /dev/null @@ -1,672 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.data; - -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.type.codec.CodecNotFoundException; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.math.BigDecimal; -import java.math.BigInteger; -import java.net.InetAddress; -import java.nio.ByteBuffer; -import java.time.Instant; -import java.time.LocalDate; -import java.time.LocalTime; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; - -/** A data structure that provides methods to retrieve its values via a name. */ -public interface GettableByName extends GettableByIndex, AccessibleByName { - - /** - * Returns the raw binary representation of the value for the first occurrence of {@code name}. - * - *

This is primarily for internal use; you'll likely want to use one of the typed getters - * instead, to get a higher-level Java representation. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @return the raw value, or {@code null} if the CQL value is {@code NULL}. For performance - * reasons, this is the actual instance used internally. If you read data from the buffer, - * make sure to {@link ByteBuffer#duplicate() duplicate} it beforehand, or only use relative - * methods. If you change the buffer's index or its contents in any way, any other getter - * invocation for this value will have unpredictable results. - * @throws IllegalArgumentException if the name is invalid. - */ - @Nullable - default ByteBuffer getBytesUnsafe(@NonNull String name) { - return getBytesUnsafe(firstIndexOf(name)); - } - - /** - * Indicates whether the value for the first occurrence of {@code name} is a CQL {@code NULL}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - default boolean isNull(@NonNull String name) { - return isNull(firstIndexOf(name)); - } - - /** - * Returns the value for the first occurrence of {@code name}, using the given codec for the - * conversion. - * - *

This method completely bypasses the {@link #codecRegistry()}, and forces the driver to use - * the given codec instead. This can be useful if the codec would collide with a previously - * registered one, or if you want to use the codec just once without registering it. - * - *

It is the caller's responsibility to ensure that the given codec is appropriate for the - * conversion. Failing to do so will result in errors at runtime. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @Nullable - default ValueT get(@NonNull String name, @NonNull TypeCodec codec) { - return get(firstIndexOf(name), codec); - } - - /** - * Returns the value for the first occurrence of {@code name}, converting it to the given Java - * type. - * - *

The {@link #codecRegistry()} will be used to look up a codec to handle the conversion. - * - *

This variant is for generic Java types. If the target type is not generic, use {@link - * #get(int, Class)} instead, which may perform slightly better. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - * @throws CodecNotFoundException if no codec can perform the conversion. - */ - @Nullable - default ValueT get(@NonNull String name, @NonNull GenericType targetType) { - return get(firstIndexOf(name), targetType); - } - - /** - * Returns the value for the first occurrence of {@code name}, converting it to the given Java - * type. - * - *

The {@link #codecRegistry()} will be used to look up a codec to handle the conversion. - * - *

If the target type is generic, use {@link #get(int, GenericType)} instead. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - * @throws CodecNotFoundException if no codec can perform the conversion. - */ - @Nullable - default ValueT get(@NonNull String name, @NonNull Class targetClass) { - return get(firstIndexOf(name), targetClass); - } - - /** - * Returns the value for the first occurrence of {@code name}, converting it to the most - * appropriate Java type. - * - *

The {@link #codecRegistry()} will be used to look up a codec to handle the conversion. - * - *

Use this method to dynamically inspect elements when types aren't known in advance, for - * instance if you're writing a generic row logger. If you know the target Java type, it is - * generally preferable to use typed variants, such as the ones for built-in types ({@link - * #getBoolean(int)}, {@link #getInt(int)}, etc.), or {@link #get(int, Class)} and {@link - * #get(int, GenericType)} for custom types. - * - *

The definition of "most appropriate" is unspecified, and left to the appreciation of the - * {@link #codecRegistry()} implementation. By default, the driver uses the mapping described in - * the other {@code getXxx()} methods (for example {@link #getString(int) String for text, varchar - * and ascii}, etc). - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - * @throws CodecNotFoundException if no codec can perform the conversion. - */ - @Nullable - default Object getObject(@NonNull String name) { - return getObject(firstIndexOf(name)); - } - - /** - * Returns the value for the first occurrence of {@code name} as a Java primitive boolean. - * - *

By default, this works with CQL type {@code boolean}. - * - *

Note that, due to its signature, this method cannot return {@code null}. If the CQL value is - * {@code NULL}, it will return {@code false}. If this doesn't work for you, either call {@link - * #isNull(String)} before calling this method, or use {@code get(name, Boolean.class)} instead. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - default boolean getBoolean(@NonNull String name) { - return getBoolean(firstIndexOf(name)); - } - - /** - * @deprecated this method only exists to ease the transition from driver 3, it is an alias for - * {@link #getBoolean(String)}. - */ - @Deprecated - default boolean getBool(@NonNull String name) { - return getBoolean(name); - } - - /** - * Returns the value for the first occurrence of {@code name} as a Java primitive byte. - * - *

By default, this works with CQL type {@code tinyint}. - * - *

Note that, due to its signature, this method cannot return {@code null}. If the CQL value is - * {@code NULL}, it will return {@code 0}. If this doesn't work for you, either call {@link - * #isNull(String)} before calling this method, or use {@code get(name, Byte.class)} instead. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - default byte getByte(@NonNull String name) { - return getByte(firstIndexOf(name)); - } - - /** - * Returns the value for the first occurrence of {@code name} as a Java primitive double. - * - *

By default, this works with CQL type {@code double}. - * - *

Note that, due to its signature, this method cannot return {@code null}. If the CQL value is - * {@code NULL}, it will return {@code 0.0}. If this doesn't work for you, either call {@link - * #isNull(String)} before calling this method, or use {@code get(name, Double.class)} instead. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - default double getDouble(@NonNull String name) { - return getDouble(firstIndexOf(name)); - } - - /** - * Returns the value for the first occurrence of {@code name} as a Java primitive float. - * - *

By default, this works with CQL type {@code float}. - * - *

Note that, due to its signature, this method cannot return {@code null}. If the CQL value is - * {@code NULL}, it will return {@code 0.0}. If this doesn't work for you, either call {@link - * #isNull(String)} before calling this method, or use {@code get(name, Float.class)} instead. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - default float getFloat(@NonNull String name) { - return getFloat(firstIndexOf(name)); - } - - /** - * Returns the value for the first occurrence of {@code name} as a Java primitive integer. - * - *

By default, this works with CQL type {@code int}. - * - *

Note that, due to its signature, this method cannot return {@code null}. If the CQL value is - * {@code NULL}, it will return {@code 0}. If this doesn't work for you, either call {@link - * #isNull(String)} before calling this method, or use {@code get(name, Integer.class)} instead. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - default int getInt(@NonNull String name) { - return getInt(firstIndexOf(name)); - } - - /** - * Returns the value for the first occurrence of {@code name} as a Java primitive long. - * - *

By default, this works with CQL types {@code bigint} and {@code counter}. - * - *

Note that, due to its signature, this method cannot return {@code null}. If the CQL value is - * {@code NULL}, it will return {@code 0}. If this doesn't work for you, either call {@link - * #isNull(String)} before calling this method, or use {@code get(name, Long.class)} instead. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - default long getLong(@NonNull String name) { - return getLong(firstIndexOf(name)); - } - - /** - * Returns the value for the first occurrence of {@code name} as a Java primitive short. - * - *

By default, this works with CQL type {@code smallint}. - * - *

Note that, due to its signature, this method cannot return {@code null}. If the CQL value is - * {@code NULL}, it will return {@code 0}. If this doesn't work for you, either call {@link - * #isNull(String)} before calling this method, or use {@code get(name, Short.class)} instead. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - default short getShort(@NonNull String name) { - return getShort(firstIndexOf(name)); - } - - /** - * Returns the value for the first occurrence of {@code name} as a Java instant. - * - *

By default, this works with CQL type {@code timestamp}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @Nullable - default Instant getInstant(@NonNull String name) { - return getInstant(firstIndexOf(name)); - } - - /** - * Returns the value for the first occurrence of {@code name} as a Java local date. - * - *

By default, this works with CQL type {@code date}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @Nullable - default LocalDate getLocalDate(@NonNull String name) { - return getLocalDate(firstIndexOf(name)); - } - - /** - * Returns the value for the first occurrence of {@code name} as a Java local time. - * - *

By default, this works with CQL type {@code time}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @Nullable - default LocalTime getLocalTime(@NonNull String name) { - return getLocalTime(firstIndexOf(name)); - } - - /** - * Returns the value for the first occurrence of {@code name} as a Java byte buffer. - * - *

By default, this works with CQL type {@code blob}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @Nullable - default ByteBuffer getByteBuffer(@NonNull String name) { - return getByteBuffer(firstIndexOf(name)); - } - - /** - * Returns the value for the first occurrence of {@code name} as a Java string. - * - *

By default, this works with CQL types {@code text}, {@code varchar} and {@code ascii}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @Nullable - default String getString(@NonNull String name) { - return getString(firstIndexOf(name)); - } - - /** - * Returns the value for the first occurrence of {@code name} as a Java big integer. - * - *

By default, this works with CQL type {@code varint}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @Nullable - default BigInteger getBigInteger(@NonNull String name) { - return getBigInteger(firstIndexOf(name)); - } - - /** - * Returns the value for the first occurrence of {@code name} as a Java big decimal. - * - *

By default, this works with CQL type {@code decimal}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @Nullable - default BigDecimal getBigDecimal(@NonNull String name) { - return getBigDecimal(firstIndexOf(name)); - } - - /** - * Returns the value for the first occurrence of {@code name} as a Java UUID. - * - *

By default, this works with CQL types {@code uuid} and {@code timeuuid}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @Nullable - default UUID getUuid(@NonNull String name) { - return getUuid(firstIndexOf(name)); - } - - /** - * Returns the value for the first occurrence of {@code name} as a Java IP address. - * - *

By default, this works with CQL type {@code inet}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @Nullable - default InetAddress getInetAddress(@NonNull String name) { - return getInetAddress(firstIndexOf(name)); - } - - /** - * Returns the value for the first occurrence of {@code name} as a duration. - * - *

By default, this works with CQL type {@code duration}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @Nullable - default CqlDuration getCqlDuration(@NonNull String name) { - return getCqlDuration(firstIndexOf(name)); - } - - /** - * Returns the value for the first occurrence of {@code name} as a vector. - * - *

By default, this works with CQL type {@code vector}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @Nullable - default CqlVector getVector( - @NonNull String name, @NonNull Class elementsClass) { - return getVector(firstIndexOf(name), elementsClass); - } - - /** - * Returns the value for the first occurrence of {@code name} as a token. - * - *

Note that, for simplicity, this method relies on the CQL type of the column to pick the - * correct token implementation. Therefore it must only be called on columns of the type that - * matches the partitioner in use for this cluster: {@code bigint} for {@code Murmur3Partitioner}, - * {@code blob} for {@code ByteOrderedPartitioner}, and {@code varint} for {@code - * RandomPartitioner}. Calling it for the wrong type will produce corrupt tokens that are unusable - * with this driver instance. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the column type can not be converted to a known token type - * or if the name is invalid. - */ - @Nullable - default Token getToken(@NonNull String name) { - return getToken(firstIndexOf(name)); - } - - /** - * Returns the value for the first occurrence of {@code name} as a Java list. - * - *

By default, this works with CQL type {@code list}. - * - *

This method is provided for convenience when the element type is a non-generic type. For - * more complex list types, use {@link #get(int, GenericType)}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - *

Apache Cassandra does not make any distinction between an empty collection and {@code null}. - * Whether this method will return an empty collection or {@code null} will depend on the codec - * used; by default, the driver's built-in codecs all return empty collections. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @Nullable - default List getList( - @NonNull String name, @NonNull Class elementsClass) { - return getList(firstIndexOf(name), elementsClass); - } - - /** - * Returns the value for the first occurrence of {@code name} as a Java set. - * - *

By default, this works with CQL type {@code set}. - * - *

This method is provided for convenience when the element type is a non-generic type. For - * more complex set types, use {@link #get(int, GenericType)}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - *

Apache Cassandra does not make any distinction between an empty collection and {@code null}. - * Whether this method will return an empty collection or {@code null} will depend on the codec - * used; by default, the driver's built-in codecs all return empty collections. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @Nullable - default Set getSet( - @NonNull String name, @NonNull Class elementsClass) { - return getSet(firstIndexOf(name), elementsClass); - } - - /** - * Returns the value for the first occurrence of {@code name} as a Java map. - * - *

By default, this works with CQL type {@code map}. - * - *

This method is provided for convenience when the element type is a non-generic type. For - * more complex map types, use {@link #get(int, GenericType)}. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - *

Apache Cassandra does not make any distinction between an empty collection and {@code null}. - * Whether this method will return an empty collection or {@code null} will depend on the codec - * used; by default, the driver's built-in codecs all return empty collections. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @Nullable - default Map getMap( - @NonNull String name, @NonNull Class keyClass, @NonNull Class valueClass) { - return getMap(firstIndexOf(name), keyClass, valueClass); - } - - /** - * Returns the value for the first occurrence of {@code name} as a user defined type value. - * - *

By default, this works with CQL user-defined types. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @Nullable - default UdtValue getUdtValue(@NonNull String name) { - return getUdtValue(firstIndexOf(name)); - } - - /** - * Returns the value for the first occurrence of {@code name} as a tuple value. - * - *

By default, this works with CQL tuples. - * - *

If an identifier appears multiple times, this can only be used to access the first value. - * For the other ones, use positional getters. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @Nullable - default TupleValue getTupleValue(@NonNull String name) { - return getTupleValue(firstIndexOf(name)); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/data/SettableById.java b/core/src/main/java/com/datastax/oss/driver/api/core/data/SettableById.java deleted file mode 100644 index 0f5e3cd9daa..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/data/SettableById.java +++ /dev/null @@ -1,734 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.data; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.codec.CodecNotFoundException; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.CheckReturnValue; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.math.BigDecimal; -import java.math.BigInteger; -import java.net.InetAddress; -import java.nio.ByteBuffer; -import java.time.Instant; -import java.time.LocalDate; -import java.time.LocalTime; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; - -/** A data structure that provides methods to set its values via a CQL identifier. */ -public interface SettableById> - extends SettableByIndex, AccessibleById { - - /** - * Sets the raw binary representation of the value for all occurrences of {@code id}. - * - *

This is primarily for internal use; you'll likely want to use one of the typed setters - * instead, to pass a higher-level Java representation. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @param v the raw value, or {@code null} to set the CQL value {@code NULL}. For performance - * reasons, this is the actual instance used internally. If pass in a buffer that you're going - * to modify elsewhere in your application, make sure to {@link ByteBuffer#duplicate() - * duplicate} it beforehand. If you change the buffer's index or its contents in any way, - * further usage of this data will have unpredictable results. - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setBytesUnsafe(@NonNull CqlIdentifier id, @Nullable ByteBuffer v) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setBytesUnsafe(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - @NonNull - @Override - default DataType getType(@NonNull CqlIdentifier id) { - return getType(firstIndexOf(id)); - } - - /** - * Sets the value for all occurrences of {@code id} to CQL {@code NULL}. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setToNull(@NonNull CqlIdentifier id) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setToNull(i); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code id}, using the given codec for the conversion. - * - *

This method completely bypasses the {@link #codecRegistry()}, and forces the driver to use - * the given codec instead. This can be useful if the codec would collide with a previously - * registered one, or if you want to use the codec just once without registering it. - * - *

It is the caller's responsibility to ensure that the given codec is appropriate for the - * conversion. Failing to do so will result in errors at runtime. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT set( - @NonNull CqlIdentifier id, @Nullable ValueT v, @NonNull TypeCodec codec) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).set(i, v, codec); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code id}, converting it to the given Java type. - * - *

The {@link #codecRegistry()} will be used to look up a codec to handle the conversion. - * - *

This variant is for generic Java types. If the target type is not generic, use {@link - * #set(int, Object, Class)} instead, which may perform slightly better. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - * @throws CodecNotFoundException if no codec can perform the conversion. - */ - @NonNull - @CheckReturnValue - default SelfT set( - @NonNull CqlIdentifier id, @Nullable ValueT v, @NonNull GenericType targetType) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).set(i, v, targetType); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Returns the value for all occurrences of {@code id}, converting it to the given Java type. - * - *

The {@link #codecRegistry()} will be used to look up a codec to handle the conversion. - * - *

If the target type is generic, use {@link #set(int, Object, GenericType)} instead. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - * @throws CodecNotFoundException if no codec can perform the conversion. - */ - @NonNull - @CheckReturnValue - default SelfT set( - @NonNull CqlIdentifier id, @Nullable ValueT v, @NonNull Class targetClass) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).set(i, v, targetClass); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code id} to the provided Java primitive boolean. - * - *

By default, this works with CQL type {@code boolean}. - * - *

To set the value to CQL {@code NULL}, use {@link #setToNull(int)}, or {@code set(i, v, - * Boolean.class)}. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setBoolean(@NonNull CqlIdentifier id, boolean v) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setBoolean(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * @deprecated this method only exists to ease the transition from driver 3, it is an alias for - * {@link #setBoolean(CqlIdentifier, boolean)}. - */ - @Deprecated - @NonNull - @CheckReturnValue - default SelfT setBool(@NonNull CqlIdentifier id, boolean v) { - return setBoolean(id, v); - } - - /** - * Sets the value for all occurrences of {@code id} to the provided Java primitive byte. - * - *

By default, this works with CQL type {@code tinyint}. - * - *

To set the value to CQL {@code NULL}, use {@link #setToNull(int)}, or {@code set(i, v, - * Boolean.class)}. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setByte(@NonNull CqlIdentifier id, byte v) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setByte(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code id} to the provided Java primitive double. - * - *

By default, this works with CQL type {@code double}. - * - *

To set the value to CQL {@code NULL}, use {@link #setToNull(int)}, or {@code set(i, v, - * Double.class)}. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setDouble(@NonNull CqlIdentifier id, double v) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setDouble(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code id} to the provided Java primitive float. - * - *

By default, this works with CQL type {@code float}. - * - *

To set the value to CQL {@code NULL}, use {@link #setToNull(int)}, or {@code set(i, v, - * Float.class)}. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setFloat(@NonNull CqlIdentifier id, float v) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setFloat(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code id} to the provided Java primitive integer. - * - *

By default, this works with CQL type {@code int}. - * - *

To set the value to CQL {@code NULL}, use {@link #setToNull(int)}, or {@code set(i, v, - * Integer.class)}. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setInt(@NonNull CqlIdentifier id, int v) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setInt(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code id} to the provided Java primitive long. - * - *

By default, this works with CQL types {@code bigint} and {@code counter}. - * - *

To set the value to CQL {@code NULL}, use {@link #setToNull(int)}, or {@code set(i, v, - * Long.class)}. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setLong(@NonNull CqlIdentifier id, long v) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setLong(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code id} to the provided Java primitive short. - * - *

By default, this works with CQL type {@code smallint}. - * - *

To set the value to CQL {@code NULL}, use {@link #setToNull(int)}, or {@code set(i, v, - * Short.class)}. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setShort(@NonNull CqlIdentifier id, short v) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setShort(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code id} to the provided Java instant. - * - *

By default, this works with CQL type {@code timestamp}. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setInstant(@NonNull CqlIdentifier id, @Nullable Instant v) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setInstant(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code id} to the provided Java local date. - * - *

By default, this works with CQL type {@code date}. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setLocalDate(@NonNull CqlIdentifier id, @Nullable LocalDate v) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setLocalDate(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code id} to the provided Java local time. - * - *

By default, this works with CQL type {@code time}. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setLocalTime(@NonNull CqlIdentifier id, @Nullable LocalTime v) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setLocalTime(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code id} to the provided Java byte buffer. - * - *

By default, this works with CQL type {@code blob}. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setByteBuffer(@NonNull CqlIdentifier id, @Nullable ByteBuffer v) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setByteBuffer(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code id} to the provided Java string. - * - *

By default, this works with CQL types {@code text}, {@code varchar} and {@code ascii}. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setString(@NonNull CqlIdentifier id, @Nullable String v) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setString(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code id} to the provided Java big integer. - * - *

By default, this works with CQL type {@code varint}. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setBigInteger(@NonNull CqlIdentifier id, @Nullable BigInteger v) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setBigInteger(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code id} to the provided Java big decimal. - * - *

By default, this works with CQL type {@code decimal}. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setBigDecimal(@NonNull CqlIdentifier id, @Nullable BigDecimal v) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setBigDecimal(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code id} to the provided Java UUID. - * - *

By default, this works with CQL types {@code uuid} and {@code timeuuid}. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setUuid(@NonNull CqlIdentifier id, @Nullable UUID v) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setUuid(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code id} to the provided Java IP address. - * - *

By default, this works with CQL type {@code inet}. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setInetAddress(@NonNull CqlIdentifier id, @Nullable InetAddress v) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setInetAddress(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code id} to the provided duration. - * - *

By default, this works with CQL type {@code duration}. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setCqlDuration(@NonNull CqlIdentifier id, @Nullable CqlDuration v) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setCqlDuration(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code id} to the provided {@code vector}. - * - *

By default, this works with CQL type {@code vector}. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setVector( - @NonNull CqlIdentifier id, - @Nullable CqlVector v, - @NonNull Class elementsClass) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setVector(i, v, elementsClass); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code id} to the provided token. - * - *

This works with the CQL type matching the partitioner in use for this cluster: {@code - * bigint} for {@code Murmur3Partitioner}, {@code blob} for {@code ByteOrderedPartitioner}, and - * {@code varint} for {@code RandomPartitioner}. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setToken(@NonNull CqlIdentifier id, @NonNull Token v) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setToken(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code id} to the provided Java list. - * - *

By default, this works with CQL type {@code list}. - * - *

This method is provided for convenience when the element type is a non-generic type. For - * more complex list types, use {@link #set(int, Object, GenericType)}. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setList( - @NonNull CqlIdentifier id, - @Nullable List v, - @NonNull Class elementsClass) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setList(i, v, elementsClass); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code id} to the provided Java set. - * - *

By default, this works with CQL type {@code set}. - * - *

This method is provided for convenience when the element type is a non-generic type. For - * more complex set types, use {@link #set(int, Object, GenericType)}. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setSet( - @NonNull CqlIdentifier id, - @Nullable Set v, - @NonNull Class elementsClass) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setSet(i, v, elementsClass); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code id} to the provided Java map. - * - *

By default, this works with CQL type {@code map}. - * - *

This method is provided for convenience when the element type is a non-generic type. For - * more complex map types, use {@link #set(int, Object, GenericType)}. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setMap( - @NonNull CqlIdentifier id, - @Nullable Map v, - @NonNull Class keyClass, - @NonNull Class valueClass) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setMap(i, v, keyClass, valueClass); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code id} to the provided user defined type value. - * - *

By default, this works with CQL user-defined types. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setUdtValue(@NonNull CqlIdentifier id, @Nullable UdtValue v) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setUdtValue(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code id} to the provided tuple value. - * - *

By default, this works with CQL tuples. - * - *

If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of - * this method that takes a string argument. - * - * @throws IllegalArgumentException if the id is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setTupleValue(@NonNull CqlIdentifier id, @Nullable TupleValue v) { - SelfT result = null; - for (Integer i : allIndicesOf(id)) { - result = (result == null ? this : result).setTupleValue(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/data/SettableByIndex.java b/core/src/main/java/com/datastax/oss/driver/api/core/data/SettableByIndex.java deleted file mode 100644 index 4ecdf647590..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/data/SettableByIndex.java +++ /dev/null @@ -1,539 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.data; - -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.codec.CodecNotFoundException; -import com.datastax.oss.driver.api.core.type.codec.PrimitiveBooleanCodec; -import com.datastax.oss.driver.api.core.type.codec.PrimitiveByteCodec; -import com.datastax.oss.driver.api.core.type.codec.PrimitiveDoubleCodec; -import com.datastax.oss.driver.api.core.type.codec.PrimitiveFloatCodec; -import com.datastax.oss.driver.api.core.type.codec.PrimitiveIntCodec; -import com.datastax.oss.driver.api.core.type.codec.PrimitiveLongCodec; -import com.datastax.oss.driver.api.core.type.codec.PrimitiveShortCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.metadata.token.ByteOrderedToken; -import com.datastax.oss.driver.internal.core.metadata.token.Murmur3Token; -import com.datastax.oss.driver.internal.core.metadata.token.RandomToken; -import edu.umd.cs.findbugs.annotations.CheckReturnValue; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.math.BigDecimal; -import java.math.BigInteger; -import java.net.InetAddress; -import java.nio.ByteBuffer; -import java.time.Instant; -import java.time.LocalDate; -import java.time.LocalTime; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; - -/** A data structure that provides methods to set its values via an integer index. */ -public interface SettableByIndex> extends AccessibleByIndex { - - /** - * Sets the raw binary representation of the {@code i}th value. - * - *

This is primarily for internal use; you'll likely want to use one of the typed setters - * instead, to pass a higher-level Java representation. - * - * @param v the raw value, or {@code null} to set the CQL value {@code NULL}. For performance - * reasons, this is the actual instance used internally. If pass in a buffer that you're going - * to modify elsewhere in your application, make sure to {@link ByteBuffer#duplicate() - * duplicate} it beforehand. If you change the buffer's index or its contents in any way, - * further usage of this data will have unpredictable results. - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - SelfT setBytesUnsafe(int i, @Nullable ByteBuffer v); - - /** - * Sets the {@code i}th value to CQL {@code NULL}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setToNull(int i) { - return setBytesUnsafe(i, null); - } - - /** - * Sets the {@code i}th value, using the given codec for the conversion. - * - *

This method completely bypasses the {@link #codecRegistry()}, and forces the driver to use - * the given codec instead. This can be useful if the codec would collide with a previously - * registered one, or if you want to use the codec just once without registering it. - * - *

It is the caller's responsibility to ensure that the given codec is appropriate for the - * conversion. Failing to do so will result in errors at runtime. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT set(int i, @Nullable ValueT v, @NonNull TypeCodec codec) { - return setBytesUnsafe(i, codec.encode(v, protocolVersion())); - } - - /** - * Sets the {@code i}th value, converting it to the given Java type. - * - *

The {@link #codecRegistry()} will be used to look up a codec to handle the conversion. - * - *

This variant is for generic Java types. If the target type is not generic, use {@link - * #set(int, Object, Class)} instead, which may perform slightly better. - * - * @throws IndexOutOfBoundsException if the index is invalid. - * @throws CodecNotFoundException if no codec can perform the conversion. - */ - @NonNull - @CheckReturnValue - default SelfT set(int i, @Nullable ValueT v, @NonNull GenericType targetType) { - DataType cqlType = getType(i); - TypeCodec codec = codecRegistry().codecFor(cqlType, targetType); - return set(i, v, codec); - } - - /** - * Returns the {@code i}th value, converting it to the given Java type. - * - *

The {@link #codecRegistry()} will be used to look up a codec to handle the conversion. - * - *

If the target type is generic, use {@link #set(int, Object, GenericType)} instead. - * - * @throws IndexOutOfBoundsException if the index is invalid. - * @throws CodecNotFoundException if no codec can perform the conversion. - */ - @NonNull - @CheckReturnValue - default SelfT set(int i, @Nullable ValueT v, @NonNull Class targetClass) { - // This is duplicated from the GenericType variant, because we want to give the codec registry - // a chance to process the unwrapped class directly, if it can do so in a more efficient way. - DataType cqlType = getType(i); - TypeCodec codec = codecRegistry().codecFor(cqlType, targetClass); - return set(i, v, codec); - } - - /** - * Sets the {@code i}th value to the provided Java primitive boolean. - * - *

By default, this works with CQL type {@code boolean}. - * - *

To set the value to CQL {@code NULL}, use {@link #setToNull(int)}, or {@code set(i, v, - * Boolean.class)}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setBoolean(int i, boolean v) { - DataType cqlType = getType(i); - TypeCodec codec = codecRegistry().codecFor(cqlType, Boolean.class); - return (codec instanceof PrimitiveBooleanCodec) - ? setBytesUnsafe(i, ((PrimitiveBooleanCodec) codec).encodePrimitive(v, protocolVersion())) - : set(i, v, codec); - } - - /** - * @deprecated this method only exists to ease the transition from driver 3, it is an alias for - * {@link #setBoolean(int, boolean)}. - */ - @Deprecated - @NonNull - @CheckReturnValue - default SelfT setBool(int i, boolean v) { - return setBoolean(i, v); - } - - /** - * Sets the {@code i}th value to the provided Java primitive byte. - * - *

By default, this works with CQL type {@code tinyint}. - * - *

To set the value to CQL {@code NULL}, use {@link #setToNull(int)}, or {@code set(i, v, - * Boolean.class)}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setByte(int i, byte v) { - DataType cqlType = getType(i); - TypeCodec codec = codecRegistry().codecFor(cqlType, Byte.class); - return (codec instanceof PrimitiveByteCodec) - ? setBytesUnsafe(i, ((PrimitiveByteCodec) codec).encodePrimitive(v, protocolVersion())) - : set(i, v, codec); - } - - /** - * Sets the {@code i}th value to the provided Java primitive double. - * - *

By default, this works with CQL type {@code double}. - * - *

To set the value to CQL {@code NULL}, use {@link #setToNull(int)}, or {@code set(i, v, - * Double.class)}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setDouble(int i, double v) { - DataType cqlType = getType(i); - TypeCodec codec = codecRegistry().codecFor(cqlType, Double.class); - return (codec instanceof PrimitiveDoubleCodec) - ? setBytesUnsafe(i, ((PrimitiveDoubleCodec) codec).encodePrimitive(v, protocolVersion())) - : set(i, v, codec); - } - - /** - * Sets the {@code i}th value to the provided Java primitive float. - * - *

By default, this works with CQL type {@code float}. - * - *

To set the value to CQL {@code NULL}, use {@link #setToNull(int)}, or {@code set(i, v, - * Float.class)}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setFloat(int i, float v) { - DataType cqlType = getType(i); - TypeCodec codec = codecRegistry().codecFor(cqlType, Float.class); - return (codec instanceof PrimitiveFloatCodec) - ? setBytesUnsafe(i, ((PrimitiveFloatCodec) codec).encodePrimitive(v, protocolVersion())) - : set(i, v, codec); - } - - /** - * Sets the {@code i}th value to the provided Java primitive integer. - * - *

By default, this works with CQL type {@code int}. - * - *

To set the value to CQL {@code NULL}, use {@link #setToNull(int)}, or {@code set(i, v, - * Integer.class)}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setInt(int i, int v) { - DataType cqlType = getType(i); - TypeCodec codec = codecRegistry().codecFor(cqlType, Integer.class); - return (codec instanceof PrimitiveIntCodec) - ? setBytesUnsafe(i, ((PrimitiveIntCodec) codec).encodePrimitive(v, protocolVersion())) - : set(i, v, codec); - } - - /** - * Sets the {@code i}th value to the provided Java primitive long. - * - *

By default, this works with CQL types {@code bigint} and {@code counter}. - * - *

To set the value to CQL {@code NULL}, use {@link #setToNull(int)}, or {@code set(i, v, - * Long.class)}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setLong(int i, long v) { - DataType cqlType = getType(i); - TypeCodec codec = codecRegistry().codecFor(cqlType, Long.class); - return (codec instanceof PrimitiveLongCodec) - ? setBytesUnsafe(i, ((PrimitiveLongCodec) codec).encodePrimitive(v, protocolVersion())) - : set(i, v, codec); - } - - /** - * Sets the {@code i}th value to the provided Java primitive short. - * - *

By default, this works with CQL type {@code smallint}. - * - *

To set the value to CQL {@code NULL}, use {@link #setToNull(int)}, or {@code set(i, v, - * Short.class)}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setShort(int i, short v) { - DataType cqlType = getType(i); - TypeCodec codec = codecRegistry().codecFor(cqlType, Short.class); - return (codec instanceof PrimitiveShortCodec) - ? setBytesUnsafe(i, ((PrimitiveShortCodec) codec).encodePrimitive(v, protocolVersion())) - : set(i, v, codec); - } - - /** - * Sets the {@code i}th value to the provided Java instant. - * - *

By default, this works with CQL type {@code timestamp}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setInstant(int i, @Nullable Instant v) { - return set(i, v, Instant.class); - } - - /** - * Sets the {@code i}th value to the provided Java local date. - * - *

By default, this works with CQL type {@code date}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setLocalDate(int i, @Nullable LocalDate v) { - return set(i, v, LocalDate.class); - } - - /** - * Sets the {@code i}th value to the provided Java local time. - * - *

By default, this works with CQL type {@code time}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setLocalTime(int i, @Nullable LocalTime v) { - return set(i, v, LocalTime.class); - } - - /** - * Sets the {@code i}th value to the provided Java byte buffer. - * - *

By default, this works with CQL type {@code blob}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setByteBuffer(int i, @Nullable ByteBuffer v) { - return set(i, v, ByteBuffer.class); - } - - /** - * Sets the {@code i}th value to the provided Java string. - * - *

By default, this works with CQL types {@code text}, {@code varchar} and {@code ascii}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setString(int i, @Nullable String v) { - return set(i, v, String.class); - } - - /** - * Sets the {@code i}th value to the provided Java big integer. - * - *

By default, this works with CQL type {@code varint}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setBigInteger(int i, @Nullable BigInteger v) { - return set(i, v, BigInteger.class); - } - - /** - * Sets the {@code i}th value to the provided Java big decimal. - * - *

By default, this works with CQL type {@code decimal}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setBigDecimal(int i, @Nullable BigDecimal v) { - return set(i, v, BigDecimal.class); - } - - /** - * Sets the {@code i}th value to the provided Java UUID. - * - *

By default, this works with CQL types {@code uuid} and {@code timeuuid}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setUuid(int i, @Nullable UUID v) { - return set(i, v, UUID.class); - } - - /** - * Sets the {@code i}th value to the provided Java IP address. - * - *

By default, this works with CQL type {@code inet}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setInetAddress(int i, @Nullable InetAddress v) { - return set(i, v, InetAddress.class); - } - - /** - * Sets the {@code i}th value to the provided duration. - * - *

By default, this works with CQL type {@code duration}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setCqlDuration(int i, @Nullable CqlDuration v) { - return set(i, v, CqlDuration.class); - } - - /** - * Sets the {@code i}th value to the provided vector. - * - *

By default, this works with CQL type {@code vector}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setVector( - int i, @Nullable CqlVector v, @NonNull Class elementsClass) { - return set(i, v, GenericType.vectorOf(elementsClass)); - } - - /** - * Sets the {@code i}th value to the provided token. - * - *

This works with the CQL type matching the partitioner in use for this cluster: {@code - * bigint} for {@code Murmur3Partitioner}, {@code blob} for {@code ByteOrderedPartitioner}, and - * {@code varint} for {@code RandomPartitioner}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setToken(int i, @NonNull Token v) { - // Simply enumerate all known implementations. This goes against the concept of TokenFactory, - // but injecting the factory here is too much of a hassle. - // The only issue is if someone uses a custom partitioner, but this is highly unlikely, and even - // then they can set the value manually as a workaround. - if (v instanceof Murmur3Token) { - return setLong(i, ((Murmur3Token) v).getValue()); - } else if (v instanceof ByteOrderedToken) { - return setByteBuffer(i, ((ByteOrderedToken) v).getValue()); - } else if (v instanceof RandomToken) { - return setBigInteger(i, ((RandomToken) v).getValue()); - } else { - throw new IllegalArgumentException("Unsupported token type " + v.getClass()); - } - } - - /** - * Sets the {@code i}th value to the provided Java list. - * - *

By default, this works with CQL type {@code list}. - * - *

This method is provided for convenience when the element type is a non-generic type. For - * more complex list types, use {@link #set(int, Object, GenericType)}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setList( - int i, @Nullable List v, @NonNull Class elementsClass) { - return set(i, v, GenericType.listOf(elementsClass)); - } - - /** - * Sets the {@code i}th value to the provided Java set. - * - *

By default, this works with CQL type {@code set}. - * - *

This method is provided for convenience when the element type is a non-generic type. For - * more complex set types, use {@link #set(int, Object, GenericType)}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setSet( - int i, @Nullable Set v, @NonNull Class elementsClass) { - return set(i, v, GenericType.setOf(elementsClass)); - } - - /** - * Sets the {@code i}th value to the provided Java map. - * - *

By default, this works with CQL type {@code map}. - * - *

This method is provided for convenience when the element type is a non-generic type. For - * more complex map types, use {@link #set(int, Object, GenericType)}. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setMap( - int i, - @Nullable Map v, - @NonNull Class keyClass, - @NonNull Class valueClass) { - return set(i, v, GenericType.mapOf(keyClass, valueClass)); - } - - /** - * Sets the {@code i}th value to the provided user defined type value. - * - *

By default, this works with CQL user-defined types. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setUdtValue(int i, @Nullable UdtValue v) { - return set(i, v, UdtValue.class); - } - - /** - * Sets the {@code i}th value to the provided tuple value. - * - *

By default, this works with CQL tuples. - * - * @throws IndexOutOfBoundsException if the index is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setTupleValue(int i, @Nullable TupleValue v) { - return set(i, v, TupleValue.class); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/data/SettableByName.java b/core/src/main/java/com/datastax/oss/driver/api/core/data/SettableByName.java deleted file mode 100644 index afe9ba59f64..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/data/SettableByName.java +++ /dev/null @@ -1,729 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.data; - -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.codec.CodecNotFoundException; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.CheckReturnValue; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.math.BigDecimal; -import java.math.BigInteger; -import java.net.InetAddress; -import java.nio.ByteBuffer; -import java.time.Instant; -import java.time.LocalDate; -import java.time.LocalTime; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; - -/** A data structure that provides methods to set its values via a name. */ -public interface SettableByName> - extends SettableByIndex, AccessibleByName { - - /** - * Sets the raw binary representation of the value for all occurrences of {@code name}. - * - *

This is primarily for internal use; you'll likely want to use one of the typed setters - * instead, to pass a higher-level Java representation. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @param v the raw value, or {@code null} to set the CQL value {@code NULL}. For performance - * reasons, this is the actual instance used internally. If pass in a buffer that you're going - * to modify elsewhere in your application, make sure to {@link ByteBuffer#duplicate() - * duplicate} it beforehand. If you change the buffer's index or its contents in any way, - * further usage of this data will have unpredictable results. - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setBytesUnsafe(@NonNull String name, @Nullable ByteBuffer v) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setBytesUnsafe(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - @NonNull - @Override - default DataType getType(@NonNull String name) { - return getType(firstIndexOf(name)); - } - - /** - * Sets the value for all occurrences of {@code name} to CQL {@code NULL}. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setToNull(@NonNull String name) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setToNull(i); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code name}, using the given codec for the conversion. - * - *

This method completely bypasses the {@link #codecRegistry()}, and forces the driver to use - * the given codec instead. This can be useful if the codec would collide with a previously - * registered one, or if you want to use the codec just once without registering it. - * - *

It is the caller's responsibility to ensure that the given codec is appropriate for the - * conversion. Failing to do so will result in errors at runtime. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT set( - @NonNull String name, @Nullable ValueT v, @NonNull TypeCodec codec) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).set(i, v, codec); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code name}, converting it to the given Java type. - * - *

The {@link #codecRegistry()} will be used to look up a codec to handle the conversion. - * - *

This variant is for generic Java types. If the target type is not generic, use {@link - * #set(int, Object, Class)} instead, which may perform slightly better. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - * @throws CodecNotFoundException if no codec can perform the conversion. - */ - @NonNull - @CheckReturnValue - default SelfT set( - @NonNull String name, @Nullable ValueT v, @NonNull GenericType targetType) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).set(i, v, targetType); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Returns the value for all occurrences of {@code name}, converting it to the given Java type. - * - *

The {@link #codecRegistry()} will be used to look up a codec to handle the conversion. - * - *

If the target type is generic, use {@link #set(int, Object, GenericType)} instead. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - * @throws CodecNotFoundException if no codec can perform the conversion. - */ - @NonNull - @CheckReturnValue - default SelfT set( - @NonNull String name, @Nullable ValueT v, @NonNull Class targetClass) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).set(i, v, targetClass); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code name} to the provided Java primitive boolean. - * - *

By default, this works with CQL type {@code boolean}. - * - *

To set the value to CQL {@code NULL}, use {@link #setToNull(int)}, or {@code set(i, v, - * Boolean.class)}. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setBoolean(@NonNull String name, boolean v) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setBoolean(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * @deprecated this method only exists to ease the transition from driver 3, it is an alias - * for{@link #setBoolean(String, boolean)}. - */ - @Deprecated - @NonNull - @CheckReturnValue - default SelfT setBool(@NonNull String name, boolean v) { - return setBoolean(name, v); - } - - /** - * Sets the value for all occurrences of {@code name} to the provided Java primitive byte. - * - *

By default, this works with CQL type {@code tinyint}. - * - *

To set the value to CQL {@code NULL}, use {@link #setToNull(int)}, or {@code set(i, v, - * Boolean.class)}. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setByte(@NonNull String name, byte v) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setByte(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code name} to the provided Java primitive double. - * - *

By default, this works with CQL type {@code double}. - * - *

To set the value to CQL {@code NULL}, use {@link #setToNull(int)}, or {@code set(i, v, - * Double.class)}. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setDouble(@NonNull String name, double v) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setDouble(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code name} to the provided Java primitive float. - * - *

By default, this works with CQL type {@code float}. - * - *

To set the value to CQL {@code NULL}, use {@link #setToNull(int)}, or {@code set(i, v, - * Float.class)}. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setFloat(@NonNull String name, float v) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setFloat(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code name} to the provided Java primitive integer. - * - *

By default, this works with CQL type {@code int}. - * - *

To set the value to CQL {@code NULL}, use {@link #setToNull(int)}, or {@code set(i, v, - * Integer.class)}. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setInt(@NonNull String name, int v) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setInt(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code name} to the provided Java primitive long. - * - *

By default, this works with CQL types {@code bigint} and {@code counter}. - * - *

To set the value to CQL {@code NULL}, use {@link #setToNull(int)}, or {@code set(i, v, - * Long.class)}. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setLong(@NonNull String name, long v) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setLong(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code name} to the provided Java primitive short. - * - *

By default, this works with CQL type {@code smallint}. - * - *

To set the value to CQL {@code NULL}, use {@link #setToNull(int)}, or {@code set(i, v, - * Short.class)}. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setShort(@NonNull String name, short v) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setShort(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code name} to the provided Java instant. - * - *

By default, this works with CQL type {@code timestamp}. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setInstant(@NonNull String name, @Nullable Instant v) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setInstant(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code name} to the provided Java local date. - * - *

By default, this works with CQL type {@code date}. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setLocalDate(@NonNull String name, @Nullable LocalDate v) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setLocalDate(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code name} to the provided Java local time. - * - *

By default, this works with CQL type {@code time}. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setLocalTime(@NonNull String name, @Nullable LocalTime v) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setLocalTime(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code name} to the provided Java byte buffer. - * - *

By default, this works with CQL type {@code blob}. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setByteBuffer(@NonNull String name, @Nullable ByteBuffer v) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setByteBuffer(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code name} to the provided Java string. - * - *

By default, this works with CQL types {@code text}, {@code varchar} and {@code ascii}. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setString(@NonNull String name, @Nullable String v) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setString(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code name} to the provided Java big integer. - * - *

By default, this works with CQL type {@code varint}. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setBigInteger(@NonNull String name, @Nullable BigInteger v) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setBigInteger(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code name} to the provided Java big decimal. - * - *

By default, this works with CQL type {@code decimal}. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setBigDecimal(@NonNull String name, @Nullable BigDecimal v) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setBigDecimal(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code name} to the provided Java UUID. - * - *

By default, this works with CQL types {@code uuid} and {@code timeuuid}. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setUuid(@NonNull String name, @Nullable UUID v) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setUuid(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code name} to the provided Java IP address. - * - *

By default, this works with CQL type {@code inet}. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setInetAddress(@NonNull String name, @Nullable InetAddress v) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setInetAddress(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code name} to the provided duration. - * - *

By default, this works with CQL type {@code duration}. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setCqlDuration(@NonNull String name, @Nullable CqlDuration v) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setCqlDuration(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code name} to the provided vector. - * - *

By default, this works with CQL type {@code vector}. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setVector( - @NonNull String name, - @Nullable CqlVector v, - @NonNull Class elementsClass) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setVector(i, v, elementsClass); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code name} to the provided token. - * - *

This works with the CQL type matching the partitioner in use for this cluster: {@code - * bigint} for {@code Murmur3Partitioner}, {@code blob} for {@code ByteOrderedPartitioner}, and - * {@code varint} for {@code RandomPartitioner}. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setToken(@NonNull String name, @NonNull Token v) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setToken(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code name} to the provided Java list. - * - *

By default, this works with CQL type {@code list}. - * - *

This method is provided for convenience when the element type is a non-generic type. For - * more complex list types, use {@link #set(int, Object, GenericType)}. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setList( - @NonNull String name, @Nullable List v, @NonNull Class elementsClass) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setList(i, v, elementsClass); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code name} to the provided Java set. - * - *

By default, this works with CQL type {@code set}. - * - *

This method is provided for convenience when the element type is a non-generic type. For - * more complex set types, use {@link #set(int, Object, GenericType)}. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setSet( - @NonNull String name, @Nullable Set v, @NonNull Class elementsClass) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setSet(i, v, elementsClass); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code name} to the provided Java map. - * - *

By default, this works with CQL type {@code map}. - * - *

This method is provided for convenience when the element type is a non-generic type. For - * more complex map types, use {@link #set(int, Object, GenericType)}. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setMap( - @NonNull String name, - @Nullable Map v, - @NonNull Class keyClass, - @NonNull Class valueClass) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setMap(i, v, keyClass, valueClass); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code name} to the provided user defined type value. - * - *

By default, this works with CQL user-defined types. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setUdtValue(@NonNull String name, @Nullable UdtValue v) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setUdtValue(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } - - /** - * Sets the value for all occurrences of {@code name} to the provided tuple value. - * - *

By default, this works with CQL tuples. - * - *

This method deals with case sensitivity in the way explained in the documentation of {@link - * AccessibleByName}. - * - * @throws IllegalArgumentException if the name is invalid. - */ - @NonNull - @CheckReturnValue - default SelfT setTupleValue(@NonNull String name, @Nullable TupleValue v) { - SelfT result = null; - for (Integer i : allIndicesOf(name)) { - result = (result == null ? this : result).setTupleValue(i, v); - } - assert result != null; // allIndices throws if there are no results - return result; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/data/TupleValue.java b/core/src/main/java/com/datastax/oss/driver/api/core/data/TupleValue.java deleted file mode 100644 index 0fde2d87e71..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/data/TupleValue.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.data; - -import com.datastax.oss.driver.api.core.detach.Detachable; -import com.datastax.oss.driver.api.core.type.TupleType; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * Driver-side representation of a CQL {@code tuple} value. - * - *

It is an ordered set of anonymous, typed fields. - * - *

A tuple value is attached if and only if its type is attached (see {@link Detachable}). - * - *

The default implementation returned by the driver is mutable and serializable. If you write - * your own implementation, serializability is not mandatory, but recommended for use with some - * 3rd-party tools like Apache Spark ™. - */ -public interface TupleValue extends GettableByIndex, SettableByIndex { - - @NonNull - TupleType getType(); - - /** - * Returns a string representation of the contents of this tuple. - * - *

This produces a CQL literal, for example: - * - *

-   * (1,'test')
-   * 
- * - * Notes: - * - *
    - *
  • This method does not sanitize its output in any way. In particular, no effort is made to - * limit output size: all fields are included, and large strings or blobs will be appended - * as-is. - *
  • Be mindful of how you expose the result. For example, in high-security environments, it - * might be undesirable to leak data in application logs. - *
- */ - @NonNull - default String getFormattedContents() { - return codecRegistry().codecFor(getType(), TupleValue.class).format(this); - } - - /** - * Returns an abstract representation of this object, that may not include the tuple's - * contents. - * - *

The driver's built-in {@link TupleValue} implementation returns the default format of {@link - * Object#toString()}: the class name, followed by the at-sign and the hash code of the object. - * - *

Omitting the contents was a deliberate choice, because we feel it would make it too easy to - * accidentally leak data (e.g. in application logs). If you want the contents, use {@link - * #getFormattedContents()}. - */ - @Override - String toString(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/data/UdtValue.java b/core/src/main/java/com/datastax/oss/driver/api/core/data/UdtValue.java deleted file mode 100644 index 7e8bc80793b..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/data/UdtValue.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.data; - -import com.datastax.oss.driver.api.core.detach.Detachable; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * Driver-side representation of an instance of a CQL user defined type. - * - *

It is an ordered set of named, typed fields. - * - *

A tuple value is attached if and only if its type is attached (see {@link Detachable}). - * - *

The default implementation returned by the driver is mutable and serializable. If you write - * your own implementation, serializability is not mandatory, but recommended for use with some - * 3rd-party tools like Apache Spark ™. - */ -public interface UdtValue - extends GettableById, GettableByName, SettableById, SettableByName { - - @NonNull - UserDefinedType getType(); - - /** - * Returns a string representation of the contents of this UDT. - * - *

This produces a CQL literal, for example: - * - *

-   * {street:'42 Main Street',zip:12345}
-   * 
- * - * Notes: - * - *
    - *
  • This method does not sanitize its output in any way. In particular, no effort is made to - * limit output size: all fields are included, and large strings or blobs will be appended - * as-is. - *
  • Be mindful of how you expose the result. For example, in high-security environments, it - * might be undesirable to leak data in application logs. - *
- */ - @NonNull - default String getFormattedContents() { - return codecRegistry().codecFor(getType(), UdtValue.class).format(this); - } - - /** - * Returns an abstract representation of this object, that may not include the UDT's - * contents. - * - *

The driver's built-in {@link UdtValue} implementation returns the default format of {@link - * Object#toString()}: the class name, followed by the at-sign and the hash code of the object. - * - *

Omitting the contents was a deliberate choice, because we feel it would make it too easy to - * accidentally leak data (e.g. in application logs). If you want the contents, use {@link - * #getFormattedContents()}. - */ - @Override - String toString(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/detach/AttachmentPoint.java b/core/src/main/java/com/datastax/oss/driver/api/core/detach/AttachmentPoint.java deleted file mode 100644 index d1897f66e16..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/detach/AttachmentPoint.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.detach; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.api.core.type.codec.registry.MutableCodecRegistry; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** @see Detachable */ -public interface AttachmentPoint { - AttachmentPoint NONE = - new AttachmentPoint() { - @NonNull - @Override - public ProtocolVersion getProtocolVersion() { - return ProtocolVersion.DEFAULT; - } - - @NonNull - @Override - public CodecRegistry getCodecRegistry() { - return CodecRegistry.DEFAULT; - } - }; - - @NonNull - ProtocolVersion getProtocolVersion(); - - /** - * Note that the default registry implementation returned by the driver also implements {@link - * MutableCodecRegistry}, which allows you to register new codecs at runtime. You can safely cast - * the result of this method (as long as you didn't extend the driver context to plug a custom - * registry implementation). - */ - @NonNull - CodecRegistry getCodecRegistry(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/detach/Detachable.java b/core/src/main/java/com/datastax/oss/driver/api/core/detach/Detachable.java deleted file mode 100644 index 0c92bb727ea..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/detach/Detachable.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.detach; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.data.Data; -import com.datastax.oss.driver.api.core.type.TupleType; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * Defines the contract of an object that can be detached and reattached to a driver instance. - * - *

The driver's {@link Data data structure} types (such as rows, tuples and UDT values) store - * their data as byte buffers, and only decode it on demand, when the end user accesses a particular - * column or field. - * - *

Decoding requires a {@link ProtocolVersion} (because the encoded format might change across - * versions), and a {@link CodecRegistry} (because the user might ask us to decode to a custom - * type). - * - *

    - *
  • When a data container was obtained from a driver instance (for example, reading a row from - * a result set, or reading a value from a UDT column), it is attached: its protocol - * version and registry are those of the driver. - *
  • When it is created manually by the user (for example, creating an instance from a manually - * created {@link TupleType}), it is detached: it uses {@link - * ProtocolVersion#DEFAULT} and {@link CodecRegistry#DEFAULT}. - *
- * - * The only way an attached object can become detached is if it is serialized and deserialized - * (referring to Java serialization). - * - *

A detached object can be reattached to a driver instance. This is done automatically if you - * pass the object to one of the driver methods, for example if you use a manually created tuple as - * a query parameter. - */ -public interface Detachable { - boolean isDetached(); - - void attach(@NonNull AttachmentPoint attachmentPoint); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/loadbalancing/LoadBalancingPolicy.java b/core/src/main/java/com/datastax/oss/driver/api/core/loadbalancing/LoadBalancingPolicy.java deleted file mode 100644 index de0d9db4ebd..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/loadbalancing/LoadBalancingPolicy.java +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.loadbalancing; - -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeState; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.tracker.RequestTracker; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Collections; -import java.util.Map; -import java.util.Optional; -import java.util.Queue; -import java.util.UUID; - -/** Decides which Cassandra nodes to contact for each query. */ -public interface LoadBalancingPolicy extends AutoCloseable { - - /** - * Returns an optional {@link RequestTracker} to be registered with the session. Registering a - * request tracker allows load-balancing policies to track node latencies in order to pick the - * fastest ones. - * - *

This method is invoked only once during session configuration, and before any other methods - * in this interface. Note that at this point, the driver hasn't connected to any node yet. - * - * @since 4.13.0 - */ - @NonNull - default Optional getRequestTracker() { - return Optional.empty(); - } - - /** - * Initializes this policy with the nodes discovered during driver initialization. - * - *

This method is guaranteed to be called exactly once per instance, and before any other - * method in this interface except {@link #getRequestTracker()}. At this point, the driver has - * successfully connected to one of the contact points, and performed a first refresh of topology - * information (by default, the contents of {@code system.peers}), to discover other nodes in the - * cluster. - * - *

This method must call {@link DistanceReporter#setDistance(Node, NodeDistance) - * distanceReporter.setDistance} for each provided node (otherwise that node will stay at distance - * {@link NodeDistance#IGNORED IGNORED}, and the driver won't open connections to it). Note that - * the node's {@link Node#getState() state} can be either {@link NodeState#UP UP} (for the - * successful contact point), {@link NodeState#DOWN DOWN} (for contact points that were tried - * unsuccessfully), or {@link NodeState#UNKNOWN UNKNOWN} (for contact points that weren't tried, - * or any other node discovered from the topology refresh). Node states may be updated - * concurrently while this method executes, but if so this policy will get notified after this - * method has returned, through other methods such as {@link #onUp(Node)} or {@link - * #onDown(Node)}. - * - * @param nodes all the nodes that are known to exist in the cluster (regardless of their state) - * at the time of invocation. - * @param distanceReporter an object that will be used by the policy to signal distance changes. - * Implementations will typically store this in a field, since new nodes may get {@link - * #onAdd(Node) added} later and will need to have their distance set (or the policy might - * change distances dynamically over time). - */ - void init(@NonNull Map nodes, @NonNull DistanceReporter distanceReporter); - - /** Returns map containing details that impact C* node connectivity. */ - @NonNull - default Map getStartupConfiguration() { - return Collections.emptyMap(); - } - - /** - * Returns the coordinators to use for a new query. - * - *

Each new query will call this method, and try the returned nodes sequentially. - * - * @param request the request that is being routed. Note that this can be null for some internal - * uses. - * @param session the session that is executing the request. Note that this can be null for some - * internal uses. - * @return the list of coordinators to try. This must be a concurrent queue; {@link - * java.util.concurrent.ConcurrentLinkedQueue} is a good choice. - */ - @NonNull - Queue newQueryPlan(@Nullable Request request, @Nullable Session session); - - /** - * Called when a node is added to the cluster. - * - *

The new node will be at distance {@link NodeDistance#IGNORED IGNORED}, and have the state - * {@link NodeState#UNKNOWN UNKNOWN}. - * - *

If this method assigns an active distance to the node, the driver will try to create a - * connection pool to it (resulting in a state change to {@link #onUp(Node) UP} or {@link - * #onDown(Node) DOWN} depending on the outcome). - * - *

If it leaves it at distance {@link NodeDistance#IGNORED IGNORED}, the driver won't attempt - * any connection. The node state will remain unknown, but might be updated later if a topology - * event is received from the cluster. - * - * @see #init(Map, DistanceReporter) - */ - void onAdd(@NonNull Node node); - - /** Called when a node is determined to be up. */ - void onUp(@NonNull Node node); - - /** Called when a node is determined to be down. */ - void onDown(@NonNull Node node); - - /** Called when a node is removed from the cluster. */ - void onRemove(@NonNull Node node); - - /** Called when the cluster that this policy is associated with closes. */ - @Override - void close(); - - /** An object that the policy uses to signal decisions it makes about node distances. */ - interface DistanceReporter { - void setDistance(@NonNull Node node, @NonNull NodeDistance distance); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/loadbalancing/NodeDistance.java b/core/src/main/java/com/datastax/oss/driver/api/core/loadbalancing/NodeDistance.java deleted file mode 100644 index aaae7957d00..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/loadbalancing/NodeDistance.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.loadbalancing; -/** - * Determines how the driver will manage connections to a Cassandra node. - * - *

The distance is assigned by a {@link LoadBalancingPolicy}. - */ -public enum NodeDistance { - /** - * An "active" distance that, indicates that the driver should maintain connections to the node; - * it also marks it as "preferred", meaning that the number or capacity of the connections may be - * higher, and that the node may also have priority for some tasks (for example, being chosen as - * the control host). - */ - LOCAL, - /** - * An "active" distance that, indicates that the driver should maintain connections to the node; - * it also marks it as "less preferred", meaning that the number or capacity of the connections - * may be lower, and that other nodes may have a higher priority for some tasks (for example, - * being chosen as the control host). - */ - REMOTE, - /** - * An "inactive" distance, that indicates that the driver will not open any connection to the - * node. - */ - IGNORED, -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/loadbalancing/NodeDistanceEvaluator.java b/core/src/main/java/com/datastax/oss/driver/api/core/loadbalancing/NodeDistanceEvaluator.java deleted file mode 100644 index 9a5a7f5a894..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/loadbalancing/NodeDistanceEvaluator.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.loadbalancing; - -import com.datastax.oss.driver.api.core.metadata.Node; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** - * A pluggable {@link NodeDistance} evaluator. - * - *

Node distance evaluators are recognized by all the driver built-in load balancing policies. - * They can be specified {@linkplain - * com.datastax.oss.driver.api.core.session.SessionBuilder#withNodeDistanceEvaluator(String, - * NodeDistanceEvaluator) programmatically} or through the configuration (with the {@code - * load-balancing-policy.evaluator.class} option). - * - * @see com.datastax.oss.driver.api.core.session.SessionBuilder#withNodeDistanceEvaluator(String, - * NodeDistanceEvaluator) - */ -@FunctionalInterface -public interface NodeDistanceEvaluator { - - /** - * Evaluates the distance to apply to the given node. - * - *

This method will be invoked each time the {@link LoadBalancingPolicy} processes a topology - * or state change, and will be passed the node being inspected, and the local datacenter name (or - * null if none is defined). If it returns a non-null {@link NodeDistance}, the policy will - * suggest that distance for the node; if it returns null, the policy will assign a default - * distance instead, based on its internal algorithm for computing node distances. - * - * @param node The node to assign a new distance to. - * @param localDc The local datacenter name, if defined, or null otherwise. - * @return The {@link NodeDistance} to assign to the node, or null to let the policy decide. - */ - @Nullable - NodeDistance evaluateDistance(@NonNull Node node, @Nullable String localDc); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/EndPoint.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/EndPoint.java deleted file mode 100644 index 530f2ad38ac..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/EndPoint.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata; - -import edu.umd.cs.findbugs.annotations.NonNull; -import java.net.InetSocketAddress; -import java.net.SocketAddress; - -/** - * Encapsulates the information needed to open connections to a node. - * - *

By default, the driver assumes plain TCP connections, and this is just a wrapper around an - * {@link InetSocketAddress}. However, more complex deployment scenarios might use a custom - * implementation that contains additional information; for example, if the nodes are accessed - * through a proxy with SNI routing, an SNI server name is needed in addition to the proxy address. - */ -public interface EndPoint { - - /** - * Resolves this instance to a socket address. - * - *

This will be called each time the driver opens a new connection to the node. The returned - * address cannot be null. - */ - @NonNull - SocketAddress resolve(); - - /** - * Returns an alternate string representation for use in node-level metric names. - * - *

Because metrics names are path-like, dot-separated strings, raw IP addresses don't make very - * good identifiers. So this method will typically replace the dots by another character, for - * example {@code 127_0_0_1_9042}. - */ - @NonNull - String asMetricPrefix(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/Metadata.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/Metadata.java deleted file mode 100644 index 21ad200abed..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/Metadata.java +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; -import com.datastax.oss.driver.api.core.session.Session; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.net.InetSocketAddress; -import java.util.Map; -import java.util.Optional; -import java.util.UUID; - -/** - * The metadata of the Cassandra cluster that this driver instance is connected to. - * - *

Updates to this object are guaranteed to be atomic: the node list, schema, and token metadata - * are immutable, and will always be consistent for a given metadata instance. The node instances - * are the only mutable objects in the hierarchy, and some of their fields will be modified - * dynamically (in particular the node state). - * - * @see Session#getMetadata() - */ -public interface Metadata { - /** - * The nodes known to the driver, indexed by their unique identifier ({@code host_id} in {@code - * system.local}/{@code system.peers}). This might include nodes that are currently viewed as - * down, or ignored by the load balancing policy. - */ - @NonNull - Map getNodes(); - - /** - * Finds the node with the given {@linkplain Node#getEndPoint() connection information}, if it - * exists. - * - *

Note that this method performs a linear search of {@link #getNodes()}. - */ - @NonNull - default Optional findNode(@NonNull EndPoint endPoint) { - for (Node node : getNodes().values()) { - if (node.getEndPoint().equals(endPoint)) { - return Optional.of(node); - } - } - return Optional.empty(); - } - - /** - * Finds the node with the given untranslated {@linkplain Node#getBroadcastRpcAddress() - * broadcast RPC address}, if it exists. - * - *

Note that this method performs a linear search of {@link #getNodes()}. - */ - @NonNull - default Optional findNode(@NonNull InetSocketAddress broadcastRpcAddress) { - for (Node node : getNodes().values()) { - Optional o = node.getBroadcastRpcAddress(); - if (o.isPresent() && o.get().equals(broadcastRpcAddress)) { - return Optional.of(node); - } - } - return Optional.empty(); - } - - /** - * The keyspaces defined in this cluster. - * - *

Note that schema metadata can be disabled or restricted to a subset of keyspaces, therefore - * this map might be empty or incomplete. - * - * @see DefaultDriverOption#METADATA_SCHEMA_ENABLED - * @see Session#setSchemaMetadataEnabled(Boolean) - * @see DefaultDriverOption#METADATA_SCHEMA_REFRESHED_KEYSPACES - */ - @NonNull - Map getKeyspaces(); - - @NonNull - default Optional getKeyspace(@NonNull CqlIdentifier keyspaceId) { - return Optional.ofNullable(getKeyspaces().get(keyspaceId)); - } - - /** - * Shortcut for {@link #getKeyspace(CqlIdentifier) - * getKeyspace(CqlIdentifier.fromCql(keyspaceName))}. - */ - @NonNull - default Optional getKeyspace(@NonNull String keyspaceName) { - return getKeyspace(CqlIdentifier.fromCql(keyspaceName)); - } - - /** - * The token map for this cluster. - * - *

Note that this property might be absent if token metadata was disabled, or if there was a - * runtime error while computing the map (this would generate a warning log). - * - * @see DefaultDriverOption#METADATA_TOKEN_MAP_ENABLED - */ - @NonNull - Optional getTokenMap(); - - /** - * The cluster name to which this session is connected. The Optional returned should contain the - * value from the server for system.local.cluster_name. - * - *

Note that this method has a default implementation for backwards compatibility. It is - * expected that any implementing classes override this method. - */ - @NonNull - default Optional getClusterName() { - return Optional.empty(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/Node.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/Node.java deleted file mode 100644 index fbfc748dd52..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/Node.java +++ /dev/null @@ -1,219 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata; - -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.session.Session; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.net.InetSocketAddress; -import java.util.Map; -import java.util.Optional; -import java.util.UUID; - -/** - * Metadata about a Cassandra node in the cluster. - * - *

This object is mutable, all of its properties may be updated at runtime to reflect the latest - * state of the node. - * - *

Note that the default implementation returned by the driver uses reference equality. A - * {@link Session} will always return the same instance for a given {@link #getHostId() host id}. - * However, instances coming from different sessions will not be equal, even if they refer to the - * same host id. - */ -public interface Node { - - /** - * The information that the driver uses to connect to the node. - * - *

In default deployments, the endpoint address is usually derived from the node's {@linkplain - * #getBroadcastAddress() broadcast RPC address} for peers hosts. For the control host however, - * the driver doesn't rely on that value because it may be wrong (see CASSANDRA-11181); instead, - * it simply uses the control connection's own endpoint. - * - *

When behind a proxy, the endpoint reported here usually refers to the proxy itself, and is - * unrelated to the node's broadcast RPC address. - */ - @NonNull - EndPoint getEndPoint(); - - /** - * The node's broadcast RPC address. That is, the address that the node expects clients to connect - * to. - * - *

This is computed from values reported in {@code system.local.rpc_address} and {@code - * system.peers.rpc_address} (Cassandra 3), or {@code system.local.rpc_address}, {@code - * system.local.rpc_port}, {@code system.peers_v2.native_address} and {@code - * system.peers_v2.native_port} (Cassandra 4+). - * - *

However, the address reported here might not be what the driver uses directly; to know which - * address the driver is really using to connect to this node, check {@link #getEndPoint()}. - * - *

This may not be known at all times. In particular, some Cassandra versions (less than - * 2.0.16, 2.1.6 or 2.2.0-rc1) don't store it in the {@code system.local} table, so this will be - * unknown for the control node, until the control connection reconnects to another node. - * - * @see CASSANDRA-9436 (where the - * information was added to system.local) - */ - @NonNull - Optional getBroadcastRpcAddress(); - - /** - * The node's broadcast address. That is, the address that other nodes use to communicate with - * that node. - * - *

This is computed from values reported in {@code system.local.broadcast_address} and {@code - * system.peers.peer} (Cassandra 3), or {@code system.local.broadcast_address}, {@code - * system.local.broadcast_port}, {@code system.peers_v2.peer} and {@code - * system.peers_v2.peer_port} (Cassandra 4+). If the port is set to 0 it is unknown. - * - *

This may not be known at all times. In particular, some Cassandra versions (less than - * 2.0.16, 2.1.6 or 2.2.0-rc1) don't store it in the {@code system.local} table, so this will be - * unknown for the control node, until the control connection reconnects to another node. - * - * @see CASSANDRA-9436 (where the - * information was added to system.local) - */ - @NonNull - Optional getBroadcastAddress(); - - /** - * The node's listen address. That is, the address that the Cassandra process binds to. - * - *

This is computed from values reported in {@code system.local.listen_address} (Cassandra 3), - * or {@code system.local.listen_address} and {@code system.local.listen_port} (Cassandra 4+). If - * the port is set to 0 it is unknown. - * - *

This may not be known at all times. In particular, current Cassandra versions (up to 3.11) - * only store it in {@code system.local}, so this will be known only for the control node. - */ - @NonNull - Optional getListenAddress(); - - /** - * The datacenter that this node belongs to (according to the server-side snitch). - * - *

This should be non-null in a healthy deployment, but the driver will still function, and - * report {@code null} here, if the server metadata was corrupted. - */ - @Nullable - String getDatacenter(); - - /** - * The rack that this node belongs to (according to the server-side snitch). - * - *

This should be non-null in a healthy deployment, but the driver will still function, and - * report {@code null} here, if the server metadata was corrupted. - */ - @Nullable - String getRack(); - - /** - * The Cassandra version of the server. - * - *

This should be non-null in a healthy deployment, but the driver will still function, and - * report {@code null} here, if the server metadata was corrupted or the reported version could - * not be parsed. - */ - @Nullable - Version getCassandraVersion(); - - /** - * An additional map of free-form properties. - * - *

This is intended for future evolution or custom driver extensions. The contents of this map - * are unspecified and may change at any point in time, always check for the existence of a key - * before using it. - * - *

Note that the returned map is immutable: if the properties change, this is reflected by - * publishing a new map instance, therefore you must call this method again to see the changes. - */ - @NonNull - Map getExtras(); - - @NonNull - NodeState getState(); - - /** - * The last time that this node transitioned to the UP state, in milliseconds since the epoch, or - * -1 if it's not up at the moment. - */ - long getUpSinceMillis(); - - /** - * The total number of active connections currently open by this driver instance to the node. This - * can be either pooled connections, or the control connection. - */ - int getOpenConnections(); - - /** - * Whether the driver is currently trying to reconnect to this node. That is, whether the active - * connection count is below the value mandated by the configuration. This does not mean that the - * node is down, there could be some active connections but not enough. - */ - boolean isReconnecting(); - - /** - * The distance assigned to this node by the {@link LoadBalancingPolicy}, that controls certain - * aspects of connection management. - * - *

This is exposed here for information only. Distance events are handled internally by the - * driver. - */ - @NonNull - NodeDistance getDistance(); - - /** - * The host ID that is assigned to this node by Cassandra. This value can be used to uniquely - * identify a node even when the underling IP address changes. - * - *

This information is always present once the session has initialized. However, there is a - * narrow corner case where a driver client can observe a null value: if a {@link - * NodeStateListener} is registered, the very first {@code onUp} call will reference a node - * that has a null id (that node is the initial contact point, and the driver hasn't read host ids - * from {@code system.local} and {@code system.peers} yet). Beyond that point — including - * any other {@code onUp} call — the host id will always be present. - * - *

-   * CqlSession session = CqlSession.builder()
-   *     .withNodeStateListener(
-   *         new NodeStateListenerBase() {
-   *           @Override
-   *           public void onUp(@NonNull Node node) {
-   *             // node.getHostId() == null for the first invocation only
-   *           }
-   *         })
-   *     .build();
-   * 
- */ - @Nullable - UUID getHostId(); - - /** - * The current version that is associated with the node's schema. - * - *

This should be non-null in a healthy deployment, but the driver will still function, and - * report {@code null} here, if the server metadata was corrupted. - */ - @Nullable - UUID getSchemaVersion(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/NodeState.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/NodeState.java deleted file mode 100644 index 2f2460886ef..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/NodeState.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata; - -import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; -import com.datastax.oss.driver.internal.core.metadata.TopologyEvent; -import java.net.InetSocketAddress; - -/** The state of a node, as viewed from the driver. */ -public enum NodeState { - /** - * The driver has never tried to connect to the node, nor received any topology events about it. - * - *

This happens when nodes are first added to the cluster, and will persist if your {@link - * LoadBalancingPolicy} decides to ignore them. Since the driver does not connect to them, the - * only way it can assess their states is from topology events. - */ - UNKNOWN, - /** - * A node is considered up in either of the following situations: 1) the driver has at least one - * active connection to the node, or 2) the driver is not actively trying to connect to the node - * (because it's ignored by the {@link LoadBalancingPolicy}), but it has received a topology event - * indicating that the node is up. - */ - UP, - /** - * A node is considered down in either of the following situations: 1) the driver has lost all - * connections to the node (and is currently trying to reconnect), or 2) the driver is not - * actively trying to connect to the node (because it's ignored by the {@link - * LoadBalancingPolicy}), but it has received a topology event indicating that the node is down. - */ - DOWN, - /** - * The node was forced down externally, the driver will never try to reconnect to it, whatever the - * {@link LoadBalancingPolicy} says. - * - *

This is used for edge error cases, for example when the driver detects that it's trying to - * connect to a node that does not belong to the Cassandra cluster (e.g. a wrong address was - * provided in the contact points). It can also be {@link - * TopologyEvent#forceDown(InetSocketAddress) triggered explicitly} by components (for example a - * custom load balancing policy) that want to limit the number of nodes that the driver connects - * to. - */ - FORCED_DOWN, -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/NodeStateListener.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/NodeStateListener.java deleted file mode 100644 index bb52e9d1496..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/NodeStateListener.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata; - -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.session.SessionBuilder; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * A listener that gets notified when nodes states change. - * - *

Implementations of this interface can be registered either via the configuration (see {@code - * reference.conf} in the manual or core driver JAR), or programmatically via {@link - * SessionBuilder#addNodeStateListener(NodeStateListener)}. - * - *

Note that the methods defined by this interface will be executed by internal driver threads, - * and are therefore expected to have short execution times. If you need to perform long - * computations or blocking calls in response to schema change events, it is strongly recommended to - * schedule them asynchronously on a separate thread provided by your application code. - * - *

If you implement this interface but don't need to implement all the methods, extend {@link - * NodeStateListenerBase}. - * - *

If your implementation of this interface requires access to a fully-initialized session, - * consider wrapping it in a {@link SafeInitNodeStateListener}. - */ -public interface NodeStateListener extends AutoCloseable { - - /** - * Invoked when a node is first added to the cluster. - * - *

The node is not up yet at this point. {@link #onUp(Node)} will be notified later if the - * driver successfully connects to the node (provided that a session is opened and the node is not - * {@link NodeDistance#IGNORED ignored}), or receives a topology event for it. - * - *

This method is not invoked for the contact points provided at initialization. It is - * however for new nodes discovered during the full node list refresh after the first connection. - */ - void onAdd(@NonNull Node node); - - /** Invoked when a node's state switches to {@link NodeState#UP}. */ - void onUp(@NonNull Node node); - - /** - * Invoked when a node's state switches to {@link NodeState#DOWN} or {@link - * NodeState#FORCED_DOWN}. - */ - void onDown(@NonNull Node node); - - /** - * Invoked when a node leaves the cluster. - * - *

This can be triggered by a topology event, or during a full node list refresh if the node is - * absent from the new list. - */ - void onRemove(@NonNull Node node); - - /** - * Invoked when the session is ready to process user requests. - * - *

This corresponds to the moment when {@link SessionBuilder#build()} returns, or the future - * returned by {@link SessionBuilder#buildAsync()} completes. If the session initialization fails, - * this method will not get called. - * - *

Listener methods are invoked from different threads; if you store the session in a field, - * make it at least volatile to guarantee proper publication. - * - *

Note that this method will not be the first one invoked on the listener; the driver emits - * node events before that, during the initialization of the session: - * - *

    - *
  • First the driver shuffles the contact points, and tries each one sequentially. For any - * contact point that can't be reached, {@link #onDown(Node)} is invoked; for the one that - * eventually succeeds, {@link #onUp(Node)} is invoked and that node becomes the control - * node (if none succeeds, the session initialization fails and the process stops here). - *
  • The control node's {@code system.peers} table is inspected to discover the remaining - * nodes in the cluster. For any node that wasn't already a contact point, {@link - * #onAdd(Node)} is invoked; for any contact point that doesn't have a corresponding entry - * in the table, {@link #onRemove(Node)} is invoked; - *
  • The load balancing policy computes the nodes' {@linkplain NodeDistance distances}, and, - * for each LOCAL or REMOTE node, the driver creates a connection pool. If at least one - * pooled connection can be established, {@link #onUp(Node)} is invoked; otherwise, {@link - * #onDown(Node)} is invoked (no additional event is emitted for the control node, it is - * considered up since we already have a connection to it). - *
  • Once all the pools are created, the session is fully initialized and this method is - * invoked. - *
- * - * If you're not interested in those init events, or want to delay them until after the session is - * ready, take a look at {@link SafeInitNodeStateListener}. - * - *

This method's default implementation is empty. - */ - default void onSessionReady(@NonNull Session session) {} -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/NodeStateListenerBase.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/NodeStateListenerBase.java deleted file mode 100644 index 0b747a00084..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/NodeStateListenerBase.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata; - -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * Convenience class for listener implementations that that don't need to override all methods (all - * methods in this class are empty). - */ -public class NodeStateListenerBase implements NodeStateListener { - - @Override - public void onAdd(@NonNull Node node) { - // nothing to do - } - - @Override - public void onUp(@NonNull Node node) { - // nothing to do - } - - @Override - public void onDown(@NonNull Node node) { - // nothing to do - } - - @Override - public void onRemove(@NonNull Node node) { - // nothing to do - } - - @Override - public void close() throws Exception { - // nothing to do - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/SafeInitNodeStateListener.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/SafeInitNodeStateListener.java deleted file mode 100644 index c33f7616b5a..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/SafeInitNodeStateListener.java +++ /dev/null @@ -1,195 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata; - -import com.datastax.oss.driver.api.core.session.Session; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.ArrayList; -import java.util.List; -import java.util.Objects; -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; -import java.util.function.BiConsumer; -import net.jcip.annotations.GuardedBy; - -/** - * A node state listener wrapper that delays (or ignores) init events until after the session is - * ready. - * - *

By default, the driver calls node state events, such as {@link #onUp} and {@link #onAdd}, - * before the session is ready; see {@link NodeStateListener#onSessionReady(Session)} for a detailed - * explanation. This can make things complicated if your listener implementation needs the session - * to process those events. - * - *

This class wraps another implementation to shield it from those details: - * - *

- * NodeStateListener delegate = ... // your listener implementation
- *
- * SafeInitNodeStateListener wrapper =
- *     new SafeInitNodeStateListener(delegate, true);
- *
- * CqlSession session = CqlSession.builder()
- *     .withNodeStateListener(wrapper)
- *     .build();
- * 
- * - * With this setup, {@code delegate.onSessionReady} is guaranteed to be invoked first, before any - * other method. The second constructor argument indicates what to do with the method calls that - * were ignored before that: - * - *
    - *
  • if {@code true}, they are recorded, and replayed to {@code delegate} immediately after - * {@link #onSessionReady}. They are guaranteed to happen in the original order, and before - * any post-initialization events. - *
  • if {@code false}, they are discarded. - *
- * - *

Usage in non-blocking applications: beware that this class is not lock-free. It is implemented - * with locks for internal coordination. - * - * @since 4.6.0 - */ -public class SafeInitNodeStateListener implements NodeStateListener { - - private final NodeStateListener delegate; - private final boolean replayInitEvents; - - // Write lock: recording init events or setting sessionReady - // Read lock: reading init events or checking sessionReady - private final ReadWriteLock lock = new ReentrantReadWriteLock(); - - @GuardedBy("lock") - private boolean sessionReady; - - @GuardedBy("lock") - private final List initEvents = new ArrayList<>(); - - /** - * Creates a new instance. - * - * @param delegate the wrapped listener, to which method invocations will be forwarded. - * @param replayInitEvents whether to record events during initialization and replay them to the - * child listener once it's created, or just ignore them. - */ - public SafeInitNodeStateListener(@NonNull NodeStateListener delegate, boolean replayInitEvents) { - this.delegate = Objects.requireNonNull(delegate); - this.replayInitEvents = replayInitEvents; - } - - @Override - public void onSessionReady(@NonNull Session session) { - lock.writeLock().lock(); - try { - if (!sessionReady) { - sessionReady = true; - delegate.onSessionReady(session); - if (replayInitEvents) { - for (InitEvent event : initEvents) { - event.invoke(delegate); - } - } - } - } finally { - lock.writeLock().unlock(); - } - } - - @Override - public void onAdd(@NonNull Node node) { - onEvent(node, InitEvent.Type.ADD); - } - - @Override - public void onUp(@NonNull Node node) { - onEvent(node, InitEvent.Type.UP); - } - - @Override - public void onDown(@NonNull Node node) { - onEvent(node, InitEvent.Type.DOWN); - } - - @Override - public void onRemove(@NonNull Node node) { - onEvent(node, InitEvent.Type.REMOVE); - } - - private void onEvent(Node node, InitEvent.Type eventType) { - - // Cheap case: the session is ready, just delegate - lock.readLock().lock(); - try { - if (sessionReady) { - eventType.listenerMethod.accept(delegate, node); - return; - } - } finally { - lock.readLock().unlock(); - } - - // Otherwise, we must acquire the write lock to record the event - if (replayInitEvents) { - lock.writeLock().lock(); - try { - // Must re-check because we completely released the lock for a short duration - if (sessionReady) { - eventType.listenerMethod.accept(delegate, node); - } else { - initEvents.add(new InitEvent(node, eventType)); - } - } finally { - lock.writeLock().unlock(); - } - } - } - - @Override - public void close() throws Exception { - delegate.close(); - } - - private static class InitEvent { - enum Type { - ADD(NodeStateListener::onAdd), - UP(NodeStateListener::onUp), - DOWN(NodeStateListener::onDown), - REMOVE(NodeStateListener::onRemove), - ; - - @SuppressWarnings("ImmutableEnumChecker") - final BiConsumer listenerMethod; - - Type(BiConsumer listenerMethod) { - this.listenerMethod = listenerMethod; - } - } - - final Node node; - final Type type; - - InitEvent(@NonNull Node node, @NonNull Type type) { - this.node = Objects.requireNonNull(node); - this.type = Objects.requireNonNull(type); - } - - void invoke(@NonNull NodeStateListener target) { - type.listenerMethod.accept(Objects.requireNonNull(target), node); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/TokenMap.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/TokenMap.java deleted file mode 100644 index 7746bf3382e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/TokenMap.java +++ /dev/null @@ -1,153 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.metadata.token.TokenRange; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.util.Set; - -/** - * Utility component to work with the tokens of a given driver instance. - * - *

Note that the methods that take a keyspace argument are based on schema metadata, which can be - * disabled or restricted to a subset of keyspaces; therefore these methods might return empty - * results for some or all of the keyspaces. - * - * @see DefaultDriverOption#METADATA_SCHEMA_ENABLED - * @see Session#setSchemaMetadataEnabled(Boolean) - * @see DefaultDriverOption#METADATA_SCHEMA_REFRESHED_KEYSPACES - */ -public interface TokenMap { - - /** Builds a token from its string representation. */ - @NonNull - Token parse(@NonNull String tokenString); - - /** Formats a token into a string representation appropriate for concatenation in a CQL query. */ - @NonNull - String format(@NonNull Token token); - - /** - * Builds a token from a partition key. - * - * @param partitionKey the partition key components, in their serialized form (which can be - * obtained with {@link TypeCodec#encode(Object, ProtocolVersion)}. Neither the individual - * components, nor the vararg array itself, can be {@code null}. - */ - @NonNull - Token newToken(@NonNull ByteBuffer... partitionKey); - - @NonNull - TokenRange newTokenRange(@NonNull Token start, @NonNull Token end); - - /** The token ranges that define data distribution on the ring. */ - @NonNull - Set getTokenRanges(); - - /** The token ranges for which a given node is the primary replica. */ - @NonNull - Set getTokenRanges(Node node); - - /** - * The tokens owned by the given node. - * - *

This is functionally equivalent to {@code getTokenRanges(node).map(r -> r.getEnd())}. Note - * that the set is rebuilt every time you call this method. - */ - @NonNull - default Set getTokens(@NonNull Node node) { - ImmutableSet.Builder result = ImmutableSet.builder(); - for (TokenRange range : getTokenRanges(node)) { - result.add(range.getEnd()); - } - return result.build(); - } - - /** The token ranges that are replicated on the given node, for the given keyspace. */ - @NonNull - Set getTokenRanges(@NonNull CqlIdentifier keyspace, @NonNull Node replica); - - /** - * Shortcut for {@link #getTokenRanges(CqlIdentifier, Node) - * getTokenRanges(CqlIdentifier.fromCql(keyspaceName), replica)}. - */ - @NonNull - default Set getTokenRanges(@NonNull String keyspaceName, @NonNull Node replica) { - return getTokenRanges(CqlIdentifier.fromCql(keyspaceName), replica); - } - - /** The replicas for a given partition key in the given keyspace. */ - @NonNull - Set getReplicas(@NonNull CqlIdentifier keyspace, @NonNull ByteBuffer partitionKey); - - /** - * Shortcut for {@link #getReplicas(CqlIdentifier, ByteBuffer) - * getReplicas(CqlIdentifier.fromCql(keyspaceName), partitionKey)}. - */ - @NonNull - default Set getReplicas(@NonNull String keyspaceName, @NonNull ByteBuffer partitionKey) { - return getReplicas(CqlIdentifier.fromCql(keyspaceName), partitionKey); - } - - /** The replicas for a given token in the given keyspace. */ - @NonNull - Set getReplicas(@NonNull CqlIdentifier keyspace, @NonNull Token token); - - /** - * Shortcut for {@link #getReplicas(CqlIdentifier, Token) - * getReplicas(CqlIdentifier.fromCql(keyspaceName), token)}. - */ - @NonNull - default Set getReplicas(@NonNull String keyspaceName, @NonNull Token token) { - return getReplicas(CqlIdentifier.fromCql(keyspaceName), token); - } - - /** - * The replicas for a given range in the given keyspace. - * - *

It is assumed that the input range does not overlap across multiple node ranges. If the - * range extends over multiple nodes, it only returns the nodes that are replicas for the last - * token of the range. In other words, this method is a shortcut for {@code getReplicas(keyspace, - * range.getEnd())}. - */ - @NonNull - default Set getReplicas(@NonNull CqlIdentifier keyspace, @NonNull TokenRange range) { - return getReplicas(keyspace, range.getEnd()); - } - - /** - * Shortcut for {@link #getReplicas(CqlIdentifier, TokenRange) - * getReplicas(CqlIdentifier.fromCql(keyspaceName), range)}. - */ - @NonNull - default Set getReplicas(@NonNull String keyspaceName, @NonNull TokenRange range) { - return getReplicas(CqlIdentifier.fromCql(keyspaceName), range); - } - - /** The name of the partitioner class in use, as reported by the Cassandra nodes. */ - @NonNull - String getPartitionerName(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/AggregateMetadata.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/AggregateMetadata.java deleted file mode 100644 index 35eec88eb45..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/AggregateMetadata.java +++ /dev/null @@ -1,146 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.internal.core.metadata.schema.ScriptBuilder; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Optional; - -/** A CQL aggregate in the schema metadata. */ -public interface AggregateMetadata extends Describable { - - @NonNull - CqlIdentifier getKeyspace(); - - @NonNull - FunctionSignature getSignature(); - - /** - * The signature of the final function of this aggregate, or empty if there is none. - * - *

This is the function specified with {@code FINALFUNC} in the {@code CREATE AGGREGATE...} - * statement. It transforms the final value after the aggregation is complete. - */ - @NonNull - Optional getFinalFuncSignature(); - - /** - * The initial state value of this aggregate, or {@code null} if there is none. - * - *

This is the value specified with {@code INITCOND} in the {@code CREATE AGGREGATE...} - * statement. It's passed to the initial invocation of the state function (if that function does - * not accept null arguments). - * - *

The actual type of the returned object depends on the aggregate's {@link #getStateType() - * state type} and on the {@link TypeCodec codec} used to {@link TypeCodec#parse(String) parse} - * the {@code INITCOND} literal. - * - *

If, for some reason, the {@code INITCOND} literal cannot be parsed, a warning will be logged - * and the returned object will be the original {@code INITCOND} literal in its textual, - * non-parsed form. - * - * @return the initial state, or empty if there is none. - */ - @NonNull - Optional getInitCond(); - - /** - * The return type of this aggregate. - * - *

This is the final type of the value computed by this aggregate; in other words, the return - * type of the final function if it is defined, or the state type otherwise. - */ - @NonNull - DataType getReturnType(); - - /** - * The signature of the state function of this aggregate. - * - *

This is the function specified with {@code SFUNC} in the {@code CREATE AGGREGATE...} - * statement. It aggregates the current state with each row to produce a new state. - */ - @NonNull - FunctionSignature getStateFuncSignature(); - - /** - * The state type of this aggregate. - * - *

This is the type specified with {@code STYPE} in the {@code CREATE AGGREGATE...} statement. - * It defines the type of the value that is accumulated as the aggregate iterates through the - * rows. - */ - @NonNull - DataType getStateType(); - - @NonNull - @Override - default String describeWithChildren(boolean pretty) { - // An aggregate has no children - return describe(pretty); - } - - @NonNull - @Override - default String describe(boolean pretty) { - ScriptBuilder builder = new ScriptBuilder(pretty); - builder - .append("CREATE AGGREGATE ") - .append(getKeyspace()) - .append(".") - .append(getSignature().getName()) - .append("("); - boolean first = true; - for (int i = 0; i < getSignature().getParameterTypes().size(); i++) { - if (first) { - first = false; - } else { - builder.append(","); - } - DataType type = getSignature().getParameterTypes().get(i); - builder.append(type.asCql(false, pretty)); - } - builder - .increaseIndent() - .append(")") - .newLine() - .append("SFUNC ") - .append(getStateFuncSignature().getName()) - .newLine() - .append("STYPE ") - .append(getStateType().asCql(false, pretty)); - - if (getFinalFuncSignature().isPresent()) { - builder.newLine().append("FINALFUNC ").append(getFinalFuncSignature().get().getName()); - } - if (getInitCond().isPresent()) { - Optional formatInitCond = formatInitCond(); - assert formatInitCond.isPresent(); - builder.newLine().append("INITCOND ").append(formatInitCond.get()); - } - return builder.append(";").build(); - } - - /** - * Formats the {@linkplain #getInitCond() initial state value} for inclusion in a CQL statement. - */ - @NonNull - Optional formatInitCond(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/ClusteringOrder.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/ClusteringOrder.java deleted file mode 100644 index 97613e2d2f1..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/ClusteringOrder.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata.schema; - -/** The order of a clustering column in a table or materialized view. */ -public enum ClusteringOrder { - ASC, - DESC -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/ColumnMetadata.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/ColumnMetadata.java deleted file mode 100644 index fb91211e2fa..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/ColumnMetadata.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** A column in the schema metadata. */ -public interface ColumnMetadata { - - @NonNull - CqlIdentifier getKeyspace(); - - /** - * The identifier of the {@link TableMetadata} or a {@link ViewMetadata} that this column belongs - * to. - */ - @NonNull - CqlIdentifier getParent(); - - @NonNull - CqlIdentifier getName(); - - @NonNull - DataType getType(); - - boolean isStatic(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/Describable.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/Describable.java deleted file mode 100644 index bf1bf97b19e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/Describable.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** A schema element that can be described in terms of CQL {@code CREATE} statements. */ -public interface Describable { - - /** - * Returns a single CQL statement that creates the element. - * - * @param pretty if {@code true}, make the output more human-readable (line breaks, indents, and - * {@link CqlIdentifier#asCql(boolean) pretty identifiers}). If {@code false}, return the - * statement on a single line with minimal formatting. - */ - @NonNull - String describe(boolean pretty); - - /** - * Returns a CQL script that creates the element and all of its children. For example: a schema - * with its tables, materialized views, types, etc. A table with its indices. - * - * @param pretty if {@code true}, make the output more human-readable (line breaks, indents, and - * {@link CqlIdentifier#asCql(boolean) pretty identifiers}). If {@code false}, return each - * statement on a single line with minimal formatting. - */ - @NonNull - String describeWithChildren(boolean pretty); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/FunctionMetadata.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/FunctionMetadata.java deleted file mode 100644 index ed2d4d780de..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/FunctionMetadata.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.internal.core.metadata.schema.ScriptBuilder; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.List; - -/** A CQL function in the schema metadata. */ -public interface FunctionMetadata extends Describable { - - @NonNull - CqlIdentifier getKeyspace(); - - @NonNull - FunctionSignature getSignature(); - - /** - * The names of the parameters. This is in the same order as {@code - * getSignature().getParameterTypes()} - */ - @NonNull - List getParameterNames(); - - @NonNull - String getBody(); - - boolean isCalledOnNullInput(); - - @NonNull - String getLanguage(); - - @NonNull - DataType getReturnType(); - - @NonNull - @Override - default String describe(boolean pretty) { - ScriptBuilder builder = new ScriptBuilder(pretty); - builder - .append("CREATE FUNCTION ") - .append(getKeyspace()) - .append(".") - .append(getSignature().getName()) - .append("("); - boolean first = true; - for (int i = 0; i < getSignature().getParameterTypes().size(); i++) { - if (first) { - first = false; - } else { - builder.append(","); - } - DataType type = getSignature().getParameterTypes().get(i); - CqlIdentifier name = getParameterNames().get(i); - builder.append(name).append(" ").append(type.asCql(false, pretty)); - } - return builder - .append(")") - .increaseIndent() - .newLine() - .append(isCalledOnNullInput() ? "CALLED ON NULL INPUT" : "RETURNS NULL ON NULL INPUT") - .newLine() - .append("RETURNS ") - .append(getReturnType().asCql(false, true)) - .newLine() - .append("LANGUAGE ") - .append(getLanguage()) - .newLine() - .append("AS '") - .append(getBody()) - .append("';") - .build(); - } - - @NonNull - @Override - default String describeWithChildren(boolean pretty) { - // A function has no children - return describe(pretty); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/FunctionSignature.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/FunctionSignature.java deleted file mode 100644 index 8108b4b7afd..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/FunctionSignature.java +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.Serializable; -import java.util.List; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -/** - * The signature that uniquely identifies a CQL function or aggregate in a keyspace. - * - *

It's composed of a name and a list of parameter types. Overloads (such as {@code sum(int)} and - * {@code sum(int, int)} are not equal. - */ -@Immutable -public class FunctionSignature implements Serializable { - - private static final long serialVersionUID = 1; - - @NonNull private final CqlIdentifier name; - @NonNull private final List parameterTypes; - - public FunctionSignature( - @NonNull CqlIdentifier name, @NonNull Iterable parameterTypes) { - this.name = name; - this.parameterTypes = ImmutableList.copyOf(parameterTypes); - } - - /** - * @param parameterTypes neither the individual types, nor the vararg array itself, can be null. - */ - public FunctionSignature(@NonNull CqlIdentifier name, @NonNull DataType... parameterTypes) { - this( - name, - parameterTypes.length == 0 - ? ImmutableList.of() - : ImmutableList.builder().add(parameterTypes).build()); - } - - /** - * Shortcut for {@link #FunctionSignature(CqlIdentifier, Iterable) new - * FunctionSignature(CqlIdentifier.fromCql(name), parameterTypes)}. - */ - public FunctionSignature(@NonNull String name, @NonNull Iterable parameterTypes) { - this(CqlIdentifier.fromCql(name), parameterTypes); - } - - /** - * Shortcut for {@link #FunctionSignature(CqlIdentifier, DataType...)} new - * FunctionSignature(CqlIdentifier.fromCql(name), parameterTypes)}. - * - * @param parameterTypes neither the individual types, nor the vararg array itself, can be null. - */ - public FunctionSignature(@NonNull String name, @NonNull DataType... parameterTypes) { - this(CqlIdentifier.fromCql(name), parameterTypes); - } - - @NonNull - public CqlIdentifier getName() { - return name; - } - - @NonNull - public List getParameterTypes() { - return parameterTypes; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof FunctionSignature) { - FunctionSignature that = (FunctionSignature) other; - return this.name.equals(that.name) && this.parameterTypes.equals(that.parameterTypes); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(name, parameterTypes); - } - - @Override - public String toString() { - StringBuilder builder = new StringBuilder(name.asInternal()).append('('); - boolean first = true; - for (DataType type : parameterTypes) { - if (first) { - first = false; - } else { - builder.append(", "); - } - builder.append(type.asCql(true, true)); - } - return builder.append(')').toString(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/IndexKind.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/IndexKind.java deleted file mode 100644 index 67ac4c06a2c..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/IndexKind.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata.schema; - -/** A kind of index in the schema. */ -public enum IndexKind { - KEYS, - CUSTOM, - COMPOSITES -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/IndexMetadata.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/IndexMetadata.java deleted file mode 100644 index 631a6584a27..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/IndexMetadata.java +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.internal.core.metadata.schema.ScriptBuilder; -import com.datastax.oss.driver.shaded.guava.common.collect.Maps; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; -import java.util.Optional; - -/** A secondary index in the schema metadata. */ -public interface IndexMetadata extends Describable { - - @NonNull - CqlIdentifier getKeyspace(); - - @NonNull - CqlIdentifier getTable(); - - @NonNull - CqlIdentifier getName(); - - @NonNull - IndexKind getKind(); - - @NonNull - String getTarget(); - - /** If this index is custom, the name of the server-side implementation. Otherwise, empty. */ - @NonNull - default Optional getClassName() { - return Optional.ofNullable(getOptions().get("class_name")); - } - - /** - * The options of the index. - * - *

This directly reflects the corresponding column of the system table ({@code - * system.schema_columns.index_options} in Cassandra <= 2.2, or {@code - * system_schema.indexes.options} in later versions). - * - *

Note that some of these options might also be exposed as standalone fields in this - * interface, namely {@link #getClassName()} and {{@link #getTarget()}}. - */ - @NonNull - Map getOptions(); - - @NonNull - @Override - default String describe(boolean pretty) { - ScriptBuilder builder = new ScriptBuilder(pretty); - if (getClassName().isPresent()) { - builder - .append("CREATE CUSTOM INDEX ") - .append(getName()) - .append(" ON ") - .append(getKeyspace()) - .append(".") - .append(getTable()) - .append(String.format(" (%s)", getTarget())) - .newLine() - .append(String.format("USING '%s'", getClassName().get())); - - // Some options already appear in the CREATE statement, ignore them - Map describedOptions = - Maps.filterKeys(getOptions(), k -> !"target".equals(k) && !"class_name".equals(k)); - if (!describedOptions.isEmpty()) { - builder.newLine().append("WITH OPTIONS = {").newLine().increaseIndent(); - boolean first = true; - for (Map.Entry option : describedOptions.entrySet()) { - if (first) { - first = false; - } else { - builder.append(",").newLine(); - } - builder.append(String.format("'%s' : '%s'", option.getKey(), option.getValue())); - } - builder.decreaseIndent().append("}"); - } - } else { - builder - .append("CREATE INDEX ") - .append(getName()) - .append(" ON ") - .append(getKeyspace()) - .append(".") - .append(getTable()) - .append(String.format(" (%s);", getTarget())); - } - return builder.build(); - } - - @NonNull - @Override - default String describeWithChildren(boolean pretty) { - // An index has no children - return describe(pretty); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/KeyspaceMetadata.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/KeyspaceMetadata.java deleted file mode 100644 index e5080932b3c..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/KeyspaceMetadata.java +++ /dev/null @@ -1,264 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.core.metadata.schema.ScriptBuilder; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.Iterables; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; - -/** A keyspace in the schema metadata. */ -public interface KeyspaceMetadata extends Describable { - - @NonNull - CqlIdentifier getName(); - - /** Whether durable writes are set on this keyspace. */ - boolean isDurableWrites(); - - /** Whether this keyspace is virtual */ - boolean isVirtual(); - - /** The replication options defined for this keyspace. */ - @NonNull - Map getReplication(); - - @NonNull - Map getTables(); - - @NonNull - default Optional getTable(@NonNull CqlIdentifier tableId) { - return Optional.ofNullable(getTables().get(tableId)); - } - - /** Shortcut for {@link #getTable(CqlIdentifier) getTable(CqlIdentifier.fromCql(tableName))}. */ - @NonNull - default Optional getTable(@NonNull String tableName) { - return getTable(CqlIdentifier.fromCql(tableName)); - } - - @NonNull - Map getViews(); - - /** Gets the views based on a given table. */ - @NonNull - default Map getViewsOnTable(@NonNull CqlIdentifier tableId) { - ImmutableMap.Builder builder = ImmutableMap.builder(); - for (ViewMetadata view : getViews().values()) { - if (view.getBaseTable().equals(tableId)) { - builder.put(view.getName(), view); - } - } - return builder.build(); - } - - @NonNull - default Optional getView(@NonNull CqlIdentifier viewId) { - return Optional.ofNullable(getViews().get(viewId)); - } - - /** Shortcut for {@link #getView(CqlIdentifier) getView(CqlIdentifier.fromCql(viewName))}. */ - @NonNull - default Optional getView(@NonNull String viewName) { - return getView(CqlIdentifier.fromCql(viewName)); - } - - @NonNull - Map getUserDefinedTypes(); - - @NonNull - default Optional getUserDefinedType(@NonNull CqlIdentifier typeId) { - return Optional.ofNullable(getUserDefinedTypes().get(typeId)); - } - - /** - * Shortcut for {@link #getUserDefinedType(CqlIdentifier) - * getUserDefinedType(CqlIdentifier.fromCql(typeName))}. - */ - @NonNull - default Optional getUserDefinedType(@NonNull String typeName) { - return getUserDefinedType(CqlIdentifier.fromCql(typeName)); - } - - @NonNull - Map getFunctions(); - - @NonNull - default Optional getFunction(@NonNull FunctionSignature functionSignature) { - return Optional.ofNullable(getFunctions().get(functionSignature)); - } - - @NonNull - default Optional getFunction( - @NonNull CqlIdentifier functionId, @NonNull Iterable parameterTypes) { - return Optional.ofNullable( - getFunctions().get(new FunctionSignature(functionId, parameterTypes))); - } - - /** - * Shortcut for {@link #getFunction(CqlIdentifier, Iterable) - * getFunction(CqlIdentifier.fromCql(functionName), parameterTypes)}. - */ - @NonNull - default Optional getFunction( - @NonNull String functionName, @NonNull Iterable parameterTypes) { - return getFunction(CqlIdentifier.fromCql(functionName), parameterTypes); - } - - /** - * @param parameterTypes neither the individual types, nor the vararg array itself, can be null. - */ - @NonNull - default Optional getFunction( - @NonNull CqlIdentifier functionId, @NonNull DataType... parameterTypes) { - return Optional.ofNullable( - getFunctions().get(new FunctionSignature(functionId, parameterTypes))); - } - - /** - * Shortcut for {@link #getFunction(CqlIdentifier, DataType...) - * getFunction(CqlIdentifier.fromCql(functionName), parameterTypes)}. - * - * @param parameterTypes neither the individual types, nor the vararg array itself, can be null. - */ - @NonNull - default Optional getFunction( - @NonNull String functionName, @NonNull DataType... parameterTypes) { - return getFunction(CqlIdentifier.fromCql(functionName), parameterTypes); - } - - @NonNull - Map getAggregates(); - - @NonNull - default Optional getAggregate(@NonNull FunctionSignature aggregateSignature) { - return Optional.ofNullable(getAggregates().get(aggregateSignature)); - } - - @NonNull - default Optional getAggregate( - @NonNull CqlIdentifier aggregateId, @NonNull Iterable parameterTypes) { - return Optional.ofNullable( - getAggregates().get(new FunctionSignature(aggregateId, parameterTypes))); - } - - /** - * Shortcut for {@link #getAggregate(CqlIdentifier, Iterable) - * getAggregate(CqlIdentifier.fromCql(aggregateName), parameterTypes)}. - */ - @NonNull - default Optional getAggregate( - @NonNull String aggregateName, @NonNull Iterable parameterTypes) { - return getAggregate(CqlIdentifier.fromCql(aggregateName), parameterTypes); - } - - /** - * @param parameterTypes neither the individual types, nor the vararg array itself, can be null. - */ - @NonNull - default Optional getAggregate( - @NonNull CqlIdentifier aggregateId, @NonNull DataType... parameterTypes) { - return Optional.ofNullable( - getAggregates().get(new FunctionSignature(aggregateId, parameterTypes))); - } - - /** - * Shortcut for {@link #getAggregate(CqlIdentifier, DataType...)} - * getAggregate(CqlIdentifier.fromCql(aggregateName), parameterTypes)}. - * - * @param parameterTypes neither the individual types, nor the vararg array itself, can be null. - */ - @NonNull - default Optional getAggregate( - @NonNull String aggregateName, @NonNull DataType... parameterTypes) { - return getAggregate(CqlIdentifier.fromCql(aggregateName), parameterTypes); - } - - @NonNull - @Override - default String describe(boolean pretty) { - ScriptBuilder builder = new ScriptBuilder(pretty); - if (isVirtual()) { - builder.append("/* VIRTUAL "); - } else { - builder.append("CREATE "); - } - builder - .append("KEYSPACE ") - .append(getName()) - .append(" WITH replication = { 'class' : '") - .append(getReplication().get("class")) - .append("'"); - for (Map.Entry entry : getReplication().entrySet()) { - if (!entry.getKey().equals("class")) { - builder - .append(", '") - .append(entry.getKey()) - .append("': '") - .append(entry.getValue()) - .append("'"); - } - } - builder - .append(" } AND durable_writes = ") - .append(Boolean.toString(isDurableWrites())) - .append(";"); - if (isVirtual()) { - builder.append(" */"); - } - return builder.build(); - } - - @NonNull - @Override - default String describeWithChildren(boolean pretty) { - String createKeyspace = describe(pretty); - ScriptBuilder builder = new ScriptBuilder(pretty).append(createKeyspace); - - for (Describable element : - Iterables.concat( - getUserDefinedTypes().values(), - getTables().values(), - getViews().values(), - getFunctions().values(), - getAggregates().values())) { - builder.forceNewLine(2).append(element.describeWithChildren(pretty)); - } - - return builder.build(); - } - - default boolean shallowEquals(Object other) { - if (other == this) { - return true; - } else if (other instanceof KeyspaceMetadata) { - KeyspaceMetadata that = (KeyspaceMetadata) other; - return Objects.equals(this.getName(), that.getName()) - && this.isDurableWrites() == that.isDurableWrites() - && Objects.equals(this.getReplication(), that.getReplication()); - } else { - return false; - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/RelationMetadata.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/RelationMetadata.java deleted file mode 100644 index 8b70ba04955..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/RelationMetadata.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.UUID; - -/** A table or materialized view in the schema metadata. */ -public interface RelationMetadata extends Describable { - - @NonNull - CqlIdentifier getKeyspace(); - - @NonNull - CqlIdentifier getName(); - - /** The unique id generated by the server for this element. */ - Optional getId(); - - /** - * Convenience method to get all the primary key columns (partition key + clustering columns) in a - * single call. - * - *

Note that this creates a new list instance on each call. - * - * @see #getPartitionKey() - * @see #getClusteringColumns() - */ - @NonNull - default List getPrimaryKey() { - return ImmutableList.builder() - .addAll(getPartitionKey()) - .addAll(getClusteringColumns().keySet()) - .build(); - } - - @NonNull - List getPartitionKey(); - - @NonNull - Map getClusteringColumns(); - - @NonNull - Map getColumns(); - - @NonNull - default Optional getColumn(@NonNull CqlIdentifier columnId) { - return Optional.ofNullable(getColumns().get(columnId)); - } - - /** - * Shortcut for {@link #getColumn(CqlIdentifier) getColumn(CqlIdentifier.fromCql(columnName))}. - */ - @NonNull - default Optional getColumn(@NonNull String columnName) { - return getColumn(CqlIdentifier.fromCql(columnName)); - } - - /** - * The options of this table or materialized view. - * - *

This corresponds to the {@code WITH} clauses in the {@code CREATE} statement that would - * recreate this element. The exact set of keys and the types of the values depend on the server - * version that this metadata was extracted from. For example, in Cassandra 2.2 and below, {@code - * WITH caching} takes a string argument, whereas starting with Cassandra 3.0 it is a map. - */ - @NonNull - Map getOptions(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/SchemaChangeListener.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/SchemaChangeListener.java deleted file mode 100644 index ac7317574ed..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/SchemaChangeListener.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata.schema; - -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.session.SessionBuilder; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * Tracks schema changes. - * - *

Implementations of this interface can be registered either via the configuration (see {@code - * reference.conf} in the manual or core driver JAR), or programmatically via {@link - * SessionBuilder#addSchemaChangeListener(SchemaChangeListener)}. - * - *

Note that the methods defined by this interface will be executed by internal driver threads, - * and are therefore expected to have short execution times. If you need to perform long - * computations or blocking calls in response to schema change events, it is strongly recommended to - * schedule them asynchronously on a separate thread provided by your application code. - * - *

If you implement this interface but don't need to implement all the methods, extend {@link - * SchemaChangeListenerBase}. - */ -public interface SchemaChangeListener extends AutoCloseable { - - void onKeyspaceCreated(@NonNull KeyspaceMetadata keyspace); - - void onKeyspaceDropped(@NonNull KeyspaceMetadata keyspace); - - void onKeyspaceUpdated(@NonNull KeyspaceMetadata current, @NonNull KeyspaceMetadata previous); - - void onTableCreated(@NonNull TableMetadata table); - - void onTableDropped(@NonNull TableMetadata table); - - void onTableUpdated(@NonNull TableMetadata current, @NonNull TableMetadata previous); - - void onUserDefinedTypeCreated(@NonNull UserDefinedType type); - - void onUserDefinedTypeDropped(@NonNull UserDefinedType type); - - void onUserDefinedTypeUpdated( - @NonNull UserDefinedType current, @NonNull UserDefinedType previous); - - void onFunctionCreated(@NonNull FunctionMetadata function); - - void onFunctionDropped(@NonNull FunctionMetadata function); - - void onFunctionUpdated(@NonNull FunctionMetadata current, @NonNull FunctionMetadata previous); - - void onAggregateCreated(@NonNull AggregateMetadata aggregate); - - void onAggregateDropped(@NonNull AggregateMetadata aggregate); - - void onAggregateUpdated(@NonNull AggregateMetadata current, @NonNull AggregateMetadata previous); - - void onViewCreated(@NonNull ViewMetadata view); - - void onViewDropped(@NonNull ViewMetadata view); - - void onViewUpdated(@NonNull ViewMetadata current, @NonNull ViewMetadata previous); - - /** - * Invoked when the session is ready to process user requests. - * - *

This corresponds to the moment when {@link SessionBuilder#build()} returns, or the future - * returned by {@link SessionBuilder#buildAsync()} completes. If the session initialization fails, - * this method will not get called. - * - *

Listener methods are invoked from different threads; if you store the session in a field, - * make it at least volatile to guarantee proper publication. - * - *

This method is guaranteed to be the first one invoked on this object. - * - *

The default implementation is empty. - */ - default void onSessionReady(@NonNull Session session) {} -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/SchemaChangeListenerBase.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/SchemaChangeListenerBase.java deleted file mode 100644 index 1cd449b39d8..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/SchemaChangeListenerBase.java +++ /dev/null @@ -1,127 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata.schema; - -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * Convenience class for listener implementations that that don't need to override all methods (all - * methods in this class are empty). - */ -public class SchemaChangeListenerBase implements SchemaChangeListener { - - @Override - public void onKeyspaceCreated(@NonNull KeyspaceMetadata keyspace) { - // nothing to do - } - - @Override - public void onKeyspaceDropped(@NonNull KeyspaceMetadata keyspace) { - // nothing to do - } - - @Override - public void onKeyspaceUpdated( - @NonNull KeyspaceMetadata current, @NonNull KeyspaceMetadata previous) { - // nothing to do - } - - @Override - public void onTableCreated(@NonNull TableMetadata table) { - // nothing to do - } - - @Override - public void onTableDropped(@NonNull TableMetadata table) { - // nothing to do - } - - @Override - public void onTableUpdated(@NonNull TableMetadata current, @NonNull TableMetadata previous) { - // nothing to do - } - - @Override - public void onUserDefinedTypeCreated(@NonNull UserDefinedType type) { - // nothing to do - } - - @Override - public void onUserDefinedTypeDropped(@NonNull UserDefinedType type) { - // nothing to do - } - - @Override - public void onUserDefinedTypeUpdated( - @NonNull UserDefinedType current, @NonNull UserDefinedType previous) { - // nothing to do - } - - @Override - public void onFunctionCreated(@NonNull FunctionMetadata function) { - // nothing to do - } - - @Override - public void onFunctionDropped(@NonNull FunctionMetadata function) { - // nothing to do - } - - @Override - public void onFunctionUpdated( - @NonNull FunctionMetadata current, @NonNull FunctionMetadata previous) { - // nothing to do - } - - @Override - public void onAggregateCreated(@NonNull AggregateMetadata aggregate) { - // nothing to do - } - - @Override - public void onAggregateDropped(@NonNull AggregateMetadata aggregate) { - // nothing to do - } - - @Override - public void onAggregateUpdated( - @NonNull AggregateMetadata current, @NonNull AggregateMetadata previous) { - // nothing to do - } - - @Override - public void onViewCreated(@NonNull ViewMetadata view) { - // nothing to do - } - - @Override - public void onViewDropped(@NonNull ViewMetadata view) { - // nothing to do - } - - @Override - public void onViewUpdated(@NonNull ViewMetadata current, @NonNull ViewMetadata previous) { - // nothing to do - } - - @Override - public void close() throws Exception { - // nothing to do - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/TableMetadata.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/TableMetadata.java deleted file mode 100644 index bcda226b45d..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/TableMetadata.java +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.internal.core.metadata.schema.ScriptBuilder; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.RelationParser; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; -import java.util.Optional; - -/** A table in the schema metadata. */ -public interface TableMetadata extends RelationMetadata { - - boolean isCompactStorage(); - - /** Whether this table is virtual */ - boolean isVirtual(); - - @NonNull - Map getIndexes(); - - @NonNull - default Optional getIndex(@NonNull CqlIdentifier indexId) { - return Optional.ofNullable(getIndexes().get(indexId)); - } - - /** Shortcut for {@link #getIndex(CqlIdentifier) getIndex(CqlIdentifier.fromCql(indexName))}. */ - @NonNull - default Optional getIndex(@NonNull String indexName) { - return getIndex(CqlIdentifier.fromCql(indexName)); - } - - @NonNull - @Override - default String describe(boolean pretty) { - ScriptBuilder builder = new ScriptBuilder(pretty); - if (isVirtual()) { - builder.append("/* VIRTUAL "); - } else { - builder.append("CREATE "); - } - - builder - .append("TABLE ") - .append(getKeyspace()) - .append(".") - .append(getName()) - .append(" (") - .newLine() - .increaseIndent(); - - for (ColumnMetadata column : getColumns().values()) { - builder.append(column.getName()).append(" ").append(column.getType().asCql(true, pretty)); - if (column.isStatic()) { - builder.append(" static"); - } - builder.append(",").newLine(); - } - - // PK - builder.append("PRIMARY KEY ("); - if (getPartitionKey().size() == 1) { // PRIMARY KEY (k - builder.append(getPartitionKey().get(0).getName()); - } else { // PRIMARY KEY ((k1, k2) - builder.append("("); - boolean first = true; - for (ColumnMetadata pkColumn : getPartitionKey()) { - if (first) { - first = false; - } else { - builder.append(", "); - } - builder.append(pkColumn.getName()); - } - builder.append(")"); - } - // PRIMARY KEY (, cc1, cc2, cc3) - for (ColumnMetadata clusteringColumn : getClusteringColumns().keySet()) { - builder.append(", ").append(clusteringColumn.getName()); - } - builder.append(")"); - - builder.newLine().decreaseIndent().append(")"); - - builder.increaseIndent(); - if (isCompactStorage()) { - builder.andWith().append("COMPACT STORAGE"); - } - if (getClusteringColumns().containsValue(ClusteringOrder.DESC)) { - builder.andWith().append("CLUSTERING ORDER BY ("); - boolean first = true; - for (Map.Entry entry : getClusteringColumns().entrySet()) { - if (first) { - first = false; - } else { - builder.append(", "); - } - builder.append(entry.getKey().getName()).append(" ").append(entry.getValue().name()); - } - builder.append(")"); - } - Map options = getOptions(); - RelationParser.appendOptions(options, builder); - builder.append(";"); - if (isVirtual()) { - builder.append(" */"); - } - return builder.build(); - } - - /** - * {@inheritDoc} - * - *

This describes the table and all of its indices. Contrary to previous driver versions, views - * are not included. - */ - @NonNull - @Override - default String describeWithChildren(boolean pretty) { - String createTable = describe(pretty); - ScriptBuilder builder = new ScriptBuilder(pretty).append(createTable); - for (IndexMetadata indexMetadata : getIndexes().values()) { - builder.forceNewLine(2).append(indexMetadata.describeWithChildren(pretty)); - } - return builder.build(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/ViewMetadata.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/ViewMetadata.java deleted file mode 100644 index e6b06cffb97..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/ViewMetadata.java +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.internal.core.metadata.schema.ScriptBuilder; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.RelationParser; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Optional; - -/** A materialized view in the schema metadata. */ -public interface ViewMetadata extends RelationMetadata { - - /** The table that this view is based on. */ - @NonNull - CqlIdentifier getBaseTable(); - - /** - * Whether this view does a {@code SELECT *} on its base table (this only affects the output of - * {@link #describe(boolean)}). - */ - boolean includesAllColumns(); - - @NonNull - Optional getWhereClause(); - - @NonNull - @Override - default String describe(boolean pretty) { - ScriptBuilder builder = - new ScriptBuilder(pretty) - .append("CREATE MATERIALIZED VIEW ") - .append(getKeyspace()) - .append(".") - .append(getName()) - .append(" AS") - .newLine(); - - builder.append("SELECT"); - if (includesAllColumns()) { - builder.append(" * "); - } else { - builder.newLine().increaseIndent(); - boolean first = true; - for (ColumnMetadata column : getColumns().values()) { - if (first) { - first = false; - } else { - builder.append(",").newLine(); - } - builder.append(column.getName()); - } - builder.newLine().decreaseIndent(); - } - - builder.append("FROM ").append(getKeyspace()).append(".").append(getBaseTable()); - - Optional whereClause = getWhereClause(); - if (whereClause.isPresent() && !whereClause.get().isEmpty()) { - builder.newLine().append("WHERE ").append(whereClause.get()); - } - - builder.newLine().append("PRIMARY KEY ("); - if (getPartitionKey().size() == 1) { // PRIMARY KEY (k - builder.append(getPartitionKey().get(0).getName()); - } else { // PRIMARY KEY ((k1, k2) - builder.append("("); - boolean first = true; - for (ColumnMetadata pkColumn : getPartitionKey()) { - if (first) { - first = false; - } else { - builder.append(", "); - } - builder.append(pkColumn.getName()); - } - builder.append(")"); - } - // PRIMARY KEY (, cc1, cc2, cc3) - for (ColumnMetadata clusteringColumn : getClusteringColumns().keySet()) { - builder.append(", ").append(clusteringColumn.getName()); - } - builder.append(")").increaseIndent(); - - RelationParser.appendOptions(getOptions(), builder); - return builder.append(";").build(); - } - - @NonNull - @Override - default String describeWithChildren(boolean pretty) { - return describe(pretty); // A view has no children - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/token/Token.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/token/Token.java deleted file mode 100644 index f39de8ec5b6..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/token/Token.java +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata.token; - -/** A token on the ring. */ -public interface Token extends Comparable {} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/token/TokenRange.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/token/TokenRange.java deleted file mode 100644 index e384300c571..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/token/TokenRange.java +++ /dev/null @@ -1,149 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata.token; - -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.List; - -/** - * A range of tokens on the Cassandra ring. - * - *

A range is start-exclusive and end-inclusive. It is empty when start and end are the same - * token, except if that is the minimum token, in which case the range covers the whole ring (this - * is consistent with the behavior of CQL range queries). - * - *

Note that CQL does not handle wrapping. To query all partitions in a range, see {@link - * #unwrap()}. - */ -public interface TokenRange extends Comparable { - - /** The start of the range (exclusive). */ - @NonNull - Token getStart(); - - /** The end of the range (inclusive). */ - @NonNull - Token getEnd(); - - /** - * Splits this range into a number of smaller ranges of equal "size" (referring to the number of - * tokens, not the actual amount of data). - * - *

Splitting an empty range is not permitted. But note that, in edge cases, splitting a range - * might produce one or more empty ranges. - * - * @throws IllegalArgumentException if the range is empty or if {@code numberOfSplits < 1}. - */ - @NonNull - List splitEvenly(int numberOfSplits); - - /** - * Whether this range is empty. - * - *

A range is empty when {@link #getStart()} and {@link #getEnd()} are the same token, except - * if that is the minimum token, in which case the range covers the whole ring (this is consistent - * with the behavior of CQL range queries). - */ - boolean isEmpty(); - - /** Whether this range wraps around the end of the ring. */ - boolean isWrappedAround(); - - /** Whether this range represents the full ring. */ - boolean isFullRing(); - - /** - * Splits this range into a list of two non-wrapping ranges. This will return the range itself if - * it is non-wrapping, or two ranges otherwise. - * - *

For example: - * - *

    - *
  • {@code ]1,10]} unwraps to itself; - *
  • {@code ]10,1]} unwraps to {@code ]10,min_token]} and {@code ]min_token,1]}. - *
- * - *

This is useful for CQL range queries, which do not handle wrapping: - * - *

{@code
-   * List rows = new ArrayList();
-   * for (TokenRange subRange : range.unwrap()) {
-   *     ResultSet rs = session.execute(
-   *         "SELECT * FROM mytable WHERE token(pk) > ? and token(pk) <= ?",
-   *         subRange.getStart(), subRange.getEnd());
-   *     rows.addAll(rs.all());
-   * }
-   * }
- */ - @NonNull - List unwrap(); - - /** - * Whether this range intersects another one. - * - *

For example: - * - *

    - *
  • {@code ]3,5]} intersects {@code ]1,4]}, {@code ]4,5]}... - *
  • {@code ]3,5]} does not intersect {@code ]1,2]}, {@code ]2,3]}, {@code ]5,7]}... - *
- */ - boolean intersects(@NonNull TokenRange that); - - /** - * Computes the intersection of this range with another one, producing one or more ranges. - * - *

If either of these ranges overlap the the ring, they are unwrapped and the unwrapped ranges - * are compared to one another. - * - *

This call will fail if the two ranges do not intersect, you must check by calling {@link - * #intersects(TokenRange)} first. - * - * @param that the other range. - * @return the range(s) resulting from the intersection. - * @throws IllegalArgumentException if the ranges do not intersect. - */ - @NonNull - List intersectWith(@NonNull TokenRange that); - - /** - * Checks whether this range contains a given token, i.e. {@code range.start < token <= - * range.end}. - */ - boolean contains(@NonNull Token token); - - /** - * Merges this range with another one. - * - *

The two ranges should either intersect or be adjacent; in other words, the merged range - * should not include tokens that are in neither of the original ranges. - * - *

For example: - * - *

    - *
  • merging {@code ]3,5]} with {@code ]4,7]} produces {@code ]3,7]}; - *
  • merging {@code ]3,5]} with {@code ]4,5]} produces {@code ]3,5]}; - *
  • merging {@code ]3,5]} with {@code ]5,8]} produces {@code ]3,8]}; - *
  • merging {@code ]3,5]} with {@code ]6,8]} fails. - *
- * - * @throws IllegalArgumentException if the ranges neither intersect nor are adjacent. - */ - @NonNull - TokenRange mergeWith(@NonNull TokenRange that); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metrics/DefaultNodeMetric.java b/core/src/main/java/com/datastax/oss/driver/api/core/metrics/DefaultNodeMetric.java deleted file mode 100644 index 0e9934c7034..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metrics/DefaultNodeMetric.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metrics; - -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; - -/** See {@code reference.conf} for a description of each metric. */ -public enum DefaultNodeMetric implements NodeMetric { - OPEN_CONNECTIONS("pool.open-connections"), - AVAILABLE_STREAMS("pool.available-streams"), - IN_FLIGHT("pool.in-flight"), - ORPHANED_STREAMS("pool.orphaned-streams"), - BYTES_SENT("bytes-sent"), - BYTES_RECEIVED("bytes-received"), - CQL_MESSAGES("cql-messages"), - UNSENT_REQUESTS("errors.request.unsent"), - ABORTED_REQUESTS("errors.request.aborted"), - WRITE_TIMEOUTS("errors.request.write-timeouts"), - READ_TIMEOUTS("errors.request.read-timeouts"), - UNAVAILABLES("errors.request.unavailables"), - OTHER_ERRORS("errors.request.others"), - RETRIES("retries.total"), - RETRIES_ON_ABORTED("retries.aborted"), - RETRIES_ON_READ_TIMEOUT("retries.read-timeout"), - RETRIES_ON_WRITE_TIMEOUT("retries.write-timeout"), - RETRIES_ON_UNAVAILABLE("retries.unavailable"), - RETRIES_ON_OTHER_ERROR("retries.other"), - IGNORES("ignores.total"), - IGNORES_ON_ABORTED("ignores.aborted"), - IGNORES_ON_READ_TIMEOUT("ignores.read-timeout"), - IGNORES_ON_WRITE_TIMEOUT("ignores.write-timeout"), - IGNORES_ON_UNAVAILABLE("ignores.unavailable"), - IGNORES_ON_OTHER_ERROR("ignores.other"), - SPECULATIVE_EXECUTIONS("speculative-executions"), - CONNECTION_INIT_ERRORS("errors.connection.init"), - AUTHENTICATION_ERRORS("errors.connection.auth"), - ; - - private static final Map BY_PATH = sortByPath(); - - private final String path; - - DefaultNodeMetric(String path) { - this.path = path; - } - - @Override - @NonNull - public String getPath() { - return path; - } - - @NonNull - public static DefaultNodeMetric fromPath(@NonNull String path) { - DefaultNodeMetric metric = BY_PATH.get(path); - if (metric == null) { - throw new IllegalArgumentException("Unknown node metric path " + path); - } - return metric; - } - - private static Map sortByPath() { - ImmutableMap.Builder result = ImmutableMap.builder(); - for (DefaultNodeMetric value : values()) { - result.put(value.getPath(), value); - } - return result.build(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metrics/DefaultSessionMetric.java b/core/src/main/java/com/datastax/oss/driver/api/core/metrics/DefaultSessionMetric.java deleted file mode 100644 index 63027a23fe7..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metrics/DefaultSessionMetric.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metrics; - -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; - -/** See {@code reference.conf} for a description of each metric. */ -public enum DefaultSessionMetric implements SessionMetric { - BYTES_SENT("bytes-sent"), - BYTES_RECEIVED("bytes-received"), - CONNECTED_NODES("connected-nodes"), - CQL_REQUESTS("cql-requests"), - CQL_CLIENT_TIMEOUTS("cql-client-timeouts"), - THROTTLING_DELAY("throttling.delay"), - THROTTLING_QUEUE_SIZE("throttling.queue-size"), - THROTTLING_ERRORS("throttling.errors"), - CQL_PREPARED_CACHE_SIZE("cql-prepared-cache-size"), - ; - - private static final Map BY_PATH = sortByPath(); - - private final String path; - - DefaultSessionMetric(String path) { - this.path = path; - } - - @NonNull - @Override - public String getPath() { - return path; - } - - @NonNull - public static DefaultSessionMetric fromPath(@NonNull String path) { - DefaultSessionMetric metric = BY_PATH.get(path); - if (metric == null) { - throw new IllegalArgumentException("Unknown session metric path " + path); - } - return metric; - } - - private static Map sortByPath() { - ImmutableMap.Builder result = ImmutableMap.builder(); - for (DefaultSessionMetric value : values()) { - result.put(value.getPath(), value); - } - return result.build(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metrics/Metrics.java b/core/src/main/java/com/datastax/oss/driver/api/core/metrics/Metrics.java deleted file mode 100644 index 58d531b3464..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metrics/Metrics.java +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metrics; - -import com.codahale.metrics.Metric; -import com.codahale.metrics.MetricRegistry; -import com.datastax.oss.driver.api.core.metadata.Node; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Optional; - -/** - * A wrapper around a {@link MetricRegistry} to expose the driver's metrics. - * - *

This type exists mainly to avoid a hard dependency to Dropwizard Metrics (that is, the JAR can - * be completely removed from the classpath if metrics are disabled). It also provides convenience - * methods to access individual metrics programmatically. - */ -public interface Metrics { - - /** - * Returns the underlying Dropwizard registry. - * - *

Typically, this can be used to configure a reporter. - * - * @see Reporters - * (Dropwizard Metrics manual) - * @leaks-private-api - */ - @NonNull - MetricRegistry getRegistry(); - - /** - * Retrieves a session-level metric from the registry. - * - *

To determine the type of each metric, refer to the comments in the default {@code - * reference.conf} (included in the driver's codebase and JAR file). Note that the method does not - * check that this type is correct (there is no way to do this at runtime because some metrics are - * generic); if you use the wrong type, you will get a {@code ClassCastException} in your code: - * - *

{@code
-   * // Correct:
-   * Gauge connectedNodes = getSessionMetric(DefaultSessionMetric.CONNECTED_NODES);
-   *
-   * // Wrong, will throw CCE:
-   * Counter connectedNodes = getSessionMetric(DefaultSessionMetric.CONNECTED_NODES);
-   * }
- * - * @param profileName the name of the execution profile, or {@code null} if the metric is not - * associated to any profile. Note that this is only included for future extensibility: at - * this time, the driver does not break up metrics per profile. Therefore you can always use - * {@link #getSessionMetric(SessionMetric)} instead of this method. - * @return the metric, or empty if it is disabled. - */ - @SuppressWarnings("TypeParameterUnusedInFormals") - @NonNull - Optional getSessionMetric( - @NonNull SessionMetric metric, @Nullable String profileName); - - /** - * Shortcut for {@link #getSessionMetric(SessionMetric, String) getSessionMetric(metric, null)}. - */ - @SuppressWarnings("TypeParameterUnusedInFormals") - @NonNull - default Optional getSessionMetric(@NonNull SessionMetric metric) { - return getSessionMetric(metric, null); - } - - /** - * Retrieves a node-level metric for a given node from the registry. - * - *

To determine the type of each metric, refer to the comments in the default {@code - * reference.conf} (included in the driver's codebase and JAR file). Note that the method does not - * check that this type is correct (there is no way to do this at runtime because some metrics are - * generic); if you use the wrong type, you will get a {@code ClassCastException} in your code: - * - *

{@code
-   * // Correct:
-   * Gauge openConnections = getNodeMetric(node, DefaultNodeMetric.OPEN_CONNECTIONS);
-   *
-   * // Wrong, will throw CCE:
-   * Counter openConnections = getNodeMetric(node, DefaultNodeMetric.OPEN_CONNECTIONS);
-   * }
- * - * @param profileName the name of the execution profile, or {@code null} if the metric is not - * associated to any profile. Note that this is only included for future extensibility: at - * this time, the driver does not break up metrics per profile. Therefore you can always use - * {@link #getNodeMetric(Node, NodeMetric)} instead of this method. - * @return the metric, or empty if it is disabled. - */ - @SuppressWarnings("TypeParameterUnusedInFormals") - @NonNull - Optional getNodeMetric( - @NonNull Node node, @NonNull NodeMetric metric, @Nullable String profileName); - - /** - * Shortcut for {@link #getNodeMetric(Node, NodeMetric, String) getNodeMetric(node, metric, - * null)}. - */ - @SuppressWarnings("TypeParameterUnusedInFormals") - @NonNull - default Optional getNodeMetric( - @NonNull Node node, @NonNull NodeMetric metric) { - return getNodeMetric(node, metric, null); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metrics/NodeMetric.java b/core/src/main/java/com/datastax/oss/driver/api/core/metrics/NodeMetric.java deleted file mode 100644 index b31c0ed8bcf..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metrics/NodeMetric.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metrics; - -import com.datastax.oss.driver.api.core.session.Session; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * A node-level metric exposed through {@link Session#getMetrics()}. - * - *

All metrics exposed out of the box by the driver are instances of {@link DefaultNodeMetric} or - * {@link com.datastax.dse.driver.api.core.metrics.DseNodeMetric DseNodeMetric} (this interface only - * exists to allow custom metrics in driver extensions). - * - * @see SessionMetric - */ -public interface NodeMetric { - - @NonNull - String getPath(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metrics/SessionMetric.java b/core/src/main/java/com/datastax/oss/driver/api/core/metrics/SessionMetric.java deleted file mode 100644 index 2a1ee599754..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metrics/SessionMetric.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metrics; - -import com.datastax.oss.driver.api.core.session.Session; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * A session-level metric exposed through {@link Session#getMetrics()}. - * - *

All metrics exposed out of the box by the driver are instances of {@link DefaultSessionMetric} - * or {@link com.datastax.dse.driver.api.core.metrics.DseSessionMetric DseSessionMetric} (this - * interface only exists to allow custom metrics in driver extensions). - * - * @see NodeMetric - */ -public interface SessionMetric { - - @NonNull - String getPath(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/package-info.java b/core/src/main/java/com/datastax/oss/driver/api/core/package-info.java deleted file mode 100644 index 597b333267b..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/package-info.java +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** The core API of the driver, that deals with query execution and cluster metadata. */ -package com.datastax.oss.driver.api.core; diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/paging/OffsetPager.java b/core/src/main/java/com/datastax/oss/driver/api/core/paging/OffsetPager.java deleted file mode 100644 index 3cb838f3171..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/paging/OffsetPager.java +++ /dev/null @@ -1,321 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.paging; - -import com.datastax.oss.driver.api.core.AsyncPagingIterable; -import com.datastax.oss.driver.api.core.MappedAsyncPagingIterable; -import com.datastax.oss.driver.api.core.PagingIterable; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import java.util.Objects; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import net.jcip.annotations.ThreadSafe; - -/** - * A utility to emulate offset queries on the client side (this comes with important performance - * trade-offs, make sure you read and understand the whole javadocs before using this class). - * - *

Web UIs and services often provide paginated results with random access, for example: given a - * page size of 20 elements, fetch page 5. Cassandra does not support this natively (see CASSANDRA-6511), because such - * queries are inherently linear: the database would have to restart from the beginning every time, - * and skip unwanted rows until it reaches the desired offset. - * - *

However, random pagination is a real need for many applications, and linear performance can be - * a reasonable trade-off if the cardinality stays low. This class provides a way to emulate this - * behavior on the client side. - * - *

Performance considerations

- * - * For each page that you want to retrieve: - * - *
    - *
  • you need to re-execute the query, in order to start with a fresh result set; - *
  • this class starts iterating from the beginning, and skips rows until it reaches the desired - * offset. - *
- * - *
- * - *
- * String query = "SELECT ...";
- * OffsetPager pager = new OffsetPager(20);
- *
- * // Get page 2: start from a fresh result set, throw away rows 1-20, then return rows 21-40
- * ResultSet rs = session.execute(query);
- * OffsetPager.Page<Row> page2 = pager.getPage(rs, 2);
- *
- * // Get page 5: start from a fresh result set, throw away rows 1-80, then return rows 81-100
- * rs = session.execute(query);
- * OffsetPager.Page<Row> page5 = pager.getPage(rs, 5);
- * 
- * - *

Establishing application-level guardrails

- * - * Linear performance should be fine for the values typically encountered in real-world - * applications: for example, if the page size is 25 and users never go past page 10, the worst case - * is only 250 rows, which is a very small result set. However, we strongly recommend that you - * implement hard limits in your application code: if the page number is exposed to the user (for - * example if it is passed as a URL parameter), make sure it is properly validated and enforce a - * maximum, so that an attacker can't inject a large value that could potentially fetch millions of - * rows. - * - *

Relation with protocol-level paging

- * - * Protocol-level paging refers to the ability to split large response into multiple network chunks: - * see {@link Statement#setPageSize(int)} and {@code basic.request.page-size} in the configuration. - * It happens under the hood, and is completely transparent for offset paging: this class will work - * the same no matter how many network roundtrips were needed to fetch the result. You don't need to - * set the protocol page size and the logical page size to the same value. - */ -@ThreadSafe -public class OffsetPager { - - /** A page returned as the result of an offset query. */ - public interface Page { - - /** The elements in the page. */ - @NonNull - List getElements(); - - /** - * The page number (1 for the first page, 2 for the second page, etc). - * - *

Note that it may be different than the number you passed to {@link - * #getPage(PagingIterable, int)}: if the result set was too short, this is the actual number of - * the last page. - */ - int getPageNumber(); - - /** Whether this is the last page in the result set. */ - boolean isLast(); - } - - private final int pageSize; - - /** - * Creates a new instance. - * - * @param pageSize the number of elements per page. Must be greater than or equal to 1. - */ - public OffsetPager(int pageSize) { - if (pageSize < 1) { - throw new IllegalArgumentException("Invalid pageSize, expected >=1, got " + pageSize); - } - this.pageSize = pageSize; - } - - /** - * Extracts a page from a synchronous result set, by skipping rows until we get to the requested - * offset. - * - * @param iterable the iterable to extract the results from: typically a {@link ResultSet}, or a - * {@link PagingIterable} returned by the mapper. - * @param targetPageNumber the page to return (1 for the first page, 2 for the second page, etc). - * Must be greater than or equal to 1. - * @return the requested page, or the last page if the requested page was past the end of the - * iterable. - * @throws IllegalArgumentException if the conditions on the arguments are not respected. - */ - @NonNull - public Page getPage( - @NonNull PagingIterable iterable, final int targetPageNumber) { - - throwIfIllegalArguments(iterable, targetPageNumber); - - // Holds the contents of the target page. We also need to record the current page as we go, - // because our iterable is forward-only and we can't predict when we'll hit the end. - List currentPageElements = new ArrayList<>(); - - int currentPageNumber = 1; - int currentPageSize = 0; - for (ElementT element : iterable) { - currentPageSize += 1; - - if (currentPageSize > pageSize) { - currentPageNumber += 1; - currentPageSize = 1; - currentPageElements.clear(); - } - - currentPageElements.add(element); - - if (currentPageNumber == targetPageNumber && currentPageSize == pageSize) { - // The target page has the full size and we've seen all of its elements - break; - } - } - - // Either we have the full target page, or we've reached the end of the result set. - boolean isLast = iterable.one() == null; - return new DefaultPage<>(currentPageElements, currentPageNumber, isLast); - } - - /** - * Extracts a page from an asynchronous result set, by skipping rows until we get to the requested - * offset. - * - * @param iterable the iterable to extract the results from. Typically an {@link - * AsyncPagingIterable}, or a {@link MappedAsyncPagingIterable} returned by the mapper. - * @param targetPageNumber the page to return (1 for the first page, 2 for the second page, etc). - * Must be greater than or equal to 1. - * @return a stage that will complete with the requested page, or the last page if the requested - * page was past the end of the iterable. - * @throws IllegalArgumentException if the conditions on the arguments are not respected. - */ - @NonNull - public > - CompletionStage> getPage( - @NonNull IterableT iterable, final int targetPageNumber) { - - // Throw IllegalArgumentException directly instead of failing the stage, since it signals - // blatant programming errors - throwIfIllegalArguments(iterable, targetPageNumber); - - CompletableFuture> pageFuture = new CompletableFuture<>(); - getPage(iterable, targetPageNumber, 1, 0, new ArrayList<>(), pageFuture); - - return pageFuture; - } - - private void throwIfIllegalArguments(@NonNull Object iterable, int targetPageNumber) { - Objects.requireNonNull(iterable); - if (targetPageNumber < 1) { - throw new IllegalArgumentException( - "Invalid targetPageNumber, expected >=1, got " + targetPageNumber); - } - } - - /** - * Main method for the async iteration. - * - *

See the synchronous version in {@link #getPage(PagingIterable, int)} for more explanations: - * this is identical, except that it is async and we need to handle protocol page transitions - * manually. - */ - private , ElementT> void getPage( - @NonNull IterableT iterable, - final int targetPageNumber, - int currentPageNumber, - int currentPageSize, - @NonNull List currentPageElements, - @NonNull CompletableFuture> pageFuture) { - - // Note: iterable.currentPage()/fetchNextPage() refer to protocol-level pages, do not confuse - // with logical pages handled by this class - Iterator currentFrame = iterable.currentPage().iterator(); - while (currentFrame.hasNext()) { - ElementT element = currentFrame.next(); - - currentPageSize += 1; - - if (currentPageSize > pageSize) { - currentPageNumber += 1; - currentPageSize = 1; - currentPageElements.clear(); - } - - currentPageElements.add(element); - - if (currentPageNumber == targetPageNumber && currentPageSize == pageSize) { - // Full-size target page. In this method it's simpler to finish directly here. - if (currentFrame.hasNext()) { - pageFuture.complete(new DefaultPage<>(currentPageElements, currentPageNumber, false)); - } else if (!iterable.hasMorePages()) { - pageFuture.complete(new DefaultPage<>(currentPageElements, currentPageNumber, true)); - } else { - // It's possible for the server to return an empty last frame, so we need to fetch it to - // know for sure whether there are more elements - int finalCurrentPageNumber = currentPageNumber; - iterable - .fetchNextPage() - .whenComplete( - (nextIterable, throwable) -> { - if (throwable != null) { - pageFuture.completeExceptionally(throwable); - } else { - boolean isLastPage = !nextIterable.currentPage().iterator().hasNext(); - pageFuture.complete( - new DefaultPage<>( - currentPageElements, finalCurrentPageNumber, isLastPage)); - } - }); - } - return; - } - } - - if (iterable.hasMorePages()) { - int finalCurrentPageNumber = currentPageNumber; - int finalCurrentPageSize = currentPageSize; - iterable - .fetchNextPage() - .whenComplete( - (nextIterable, throwable) -> { - if (throwable != null) { - pageFuture.completeExceptionally(throwable); - } else { - getPage( - nextIterable, - targetPageNumber, - finalCurrentPageNumber, - finalCurrentPageSize, - currentPageElements, - pageFuture); - } - }); - } else { - // Reached the end of the result set, finish with what we have so far - pageFuture.complete(new DefaultPage<>(currentPageElements, currentPageNumber, true)); - } - } - - private static class DefaultPage implements Page { - private final List elements; - private final int pageNumber; - private final boolean isLast; - - DefaultPage(@NonNull List elements, int pageNumber, boolean isLast) { - this.elements = ImmutableList.copyOf(elements); - this.pageNumber = pageNumber; - this.isLast = isLast; - } - - @NonNull - @Override - public List getElements() { - return elements; - } - - @Override - public int getPageNumber() { - return pageNumber; - } - - @Override - public boolean isLast() { - return isLast; - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/retry/RetryDecision.java b/core/src/main/java/com/datastax/oss/driver/api/core/retry/RetryDecision.java deleted file mode 100644 index 4b57b781822..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/retry/RetryDecision.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.retry; - -/** - * A decision from the {@link RetryPolicy} on how to handle a retry. - * - * @see RetryVerdict#getRetryDecision() - */ -public enum RetryDecision { - /** Retry the operation on the same node. */ - RETRY_SAME, - /** Retry the operation on the next available node in the query plan (if any). */ - RETRY_NEXT, - /** Rethrow to the calling code, as the result of the execute operation. */ - RETHROW, - /** Don't retry and return an empty result set to the calling code. */ - IGNORE, - ; -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/retry/RetryPolicy.java b/core/src/main/java/com/datastax/oss/driver/api/core/retry/RetryPolicy.java deleted file mode 100644 index e8546816e23..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/retry/RetryPolicy.java +++ /dev/null @@ -1,334 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.retry; - -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.connection.ClosedConnectionException; -import com.datastax.oss.driver.api.core.connection.HeartbeatException; -import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; -import com.datastax.oss.driver.api.core.servererrors.BootstrappingException; -import com.datastax.oss.driver.api.core.servererrors.CoordinatorException; -import com.datastax.oss.driver.api.core.servererrors.FunctionFailureException; -import com.datastax.oss.driver.api.core.servererrors.OverloadedException; -import com.datastax.oss.driver.api.core.servererrors.ProtocolError; -import com.datastax.oss.driver.api.core.servererrors.QueryValidationException; -import com.datastax.oss.driver.api.core.servererrors.ReadFailureException; -import com.datastax.oss.driver.api.core.servererrors.ReadTimeoutException; -import com.datastax.oss.driver.api.core.servererrors.ServerError; -import com.datastax.oss.driver.api.core.servererrors.TruncateException; -import com.datastax.oss.driver.api.core.servererrors.WriteFailureException; -import com.datastax.oss.driver.api.core.servererrors.WriteType; -import com.datastax.oss.driver.api.core.session.Request; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * Defines the behavior to adopt when a request fails. - * - *

For each request, the driver gets a "query plan" (a list of coordinators to try) from the - * {@link LoadBalancingPolicy}, and tries each node in sequence. This policy is invoked if the - * request to that node fails. - * - *

The methods of this interface are invoked on I/O threads, therefore implementations should - * never block. In particular, don't call {@link Thread#sleep(long)} to retry after a delay: - * this would prevent asynchronous processing of other requests, and very negatively impact - * throughput. If the application needs to back off and retry later, this should be implemented in - * client code, not in this policy. - */ -public interface RetryPolicy extends AutoCloseable { - - /** - * Whether to retry when the server replied with a {@code READ_TIMEOUT} error; this indicates a - * server-side timeout during a read query, i.e. some replicas did not reply to the - * coordinator in time. - * - * @param request the request that timed out. - * @param cl the requested consistency level. - * @param blockFor the minimum number of replica acknowledgements/responses that were required to - * fulfill the operation. - * @param received the number of replica that had acknowledged/responded to the operation before - * it failed. - * @param dataPresent whether the actual data was amongst the received replica responses. See - * {@link ReadTimeoutException#wasDataPresent()}. - * @param retryCount how many times the retry policy has been invoked already for this request - * (not counting the current invocation). - * @deprecated As of version 4.10, use {@link #onReadTimeoutVerdict(Request, ConsistencyLevel, - * int, int, boolean, int)} instead. - */ - @Deprecated - RetryDecision onReadTimeout( - @NonNull Request request, - @NonNull ConsistencyLevel cl, - int blockFor, - int received, - boolean dataPresent, - int retryCount); - - /** - * Whether to retry when the server replied with a {@code READ_TIMEOUT} error; this indicates a - * server-side timeout during a read query, i.e. some replicas did not reply to the - * coordinator in time. - * - * @param request the request that timed out. - * @param cl the requested consistency level. - * @param blockFor the minimum number of replica acknowledgements/responses that were required to - * fulfill the operation. - * @param received the number of replica that had acknowledged/responded to the operation before - * it failed. - * @param dataPresent whether the actual data was amongst the received replica responses. See - * {@link ReadTimeoutException#wasDataPresent()}. - * @param retryCount how many times the retry policy has been invoked already for this request - * (not counting the current invocation). - */ - default RetryVerdict onReadTimeoutVerdict( - @NonNull Request request, - @NonNull ConsistencyLevel cl, - int blockFor, - int received, - boolean dataPresent, - int retryCount) { - RetryDecision decision = - onReadTimeout(request, cl, blockFor, received, dataPresent, retryCount); - return () -> decision; - } - - /** - * Whether to retry when the server replied with a {@code WRITE_TIMEOUT} error; this indicates a - * server-side timeout during a write query, i.e. some replicas did not reply to the - * coordinator in time. - * - *

Note that this method will only be invoked for {@link Request#isIdempotent()} idempotent} - * requests: when a write times out, it is impossible to determine with 100% certainty whether the - * mutation was applied or not, so the write is never safe to retry; the driver will rethrow the - * error directly, without invoking the retry policy. - * - * @param request the request that timed out. - * @param cl the requested consistency level. - * @param writeType the type of the write for which the timeout was raised. - * @param blockFor the minimum number of replica acknowledgements/responses that were required to - * fulfill the operation. - * @param received the number of replica that had acknowledged/responded to the operation before - * it failed. - * @param retryCount how many times the retry policy has been invoked already for this request - * (not counting the current invocation). - * @deprecated As of version 4.10, use {@link #onWriteTimeoutVerdict(Request, ConsistencyLevel, - * WriteType, int, int, int)} instead. - */ - @Deprecated - RetryDecision onWriteTimeout( - @NonNull Request request, - @NonNull ConsistencyLevel cl, - @NonNull WriteType writeType, - int blockFor, - int received, - int retryCount); - - /** - * Whether to retry when the server replied with a {@code WRITE_TIMEOUT} error; this indicates a - * server-side timeout during a write query, i.e. some replicas did not reply to the - * coordinator in time. - * - *

Note that this method will only be invoked for {@link Request#isIdempotent()} idempotent} - * requests: when a write times out, it is impossible to determine with 100% certainty whether the - * mutation was applied or not, so the write is never safe to retry; the driver will rethrow the - * error directly, without invoking the retry policy. - * - * @param request the request that timed out. - * @param cl the requested consistency level. - * @param writeType the type of the write for which the timeout was raised. - * @param blockFor the minimum number of replica acknowledgements/responses that were required to - * fulfill the operation. - * @param received the number of replica that had acknowledged/responded to the operation before - * it failed. - * @param retryCount how many times the retry policy has been invoked already for this request - * (not counting the current invocation). - */ - default RetryVerdict onWriteTimeoutVerdict( - @NonNull Request request, - @NonNull ConsistencyLevel cl, - @NonNull WriteType writeType, - int blockFor, - int received, - int retryCount) { - RetryDecision decision = onWriteTimeout(request, cl, writeType, blockFor, received, retryCount); - return () -> decision; - } - - /** - * Whether to retry when the server replied with an {@code UNAVAILABLE} error; this indicates that - * the coordinator determined that there were not enough replicas alive to perform a query with - * the requested consistency level. - * - * @param request the request that timed out. - * @param cl the requested consistency level. - * @param required the number of replica acknowledgements/responses required to perform the - * operation (with its required consistency level). - * @param alive the number of replicas that were known to be alive by the coordinator node when it - * tried to execute the operation. - * @param retryCount how many times the retry policy has been invoked already for this request - * (not counting the current invocation). - * @deprecated As of version 4.10, use {@link #onUnavailableVerdict(Request, ConsistencyLevel, - * int, int, int)} instead. - */ - @Deprecated - RetryDecision onUnavailable( - @NonNull Request request, - @NonNull ConsistencyLevel cl, - int required, - int alive, - int retryCount); - - /** - * Whether to retry when the server replied with an {@code UNAVAILABLE} error; this indicates that - * the coordinator determined that there were not enough replicas alive to perform a query with - * the requested consistency level. - * - * @param request the request that timed out. - * @param cl the requested consistency level. - * @param required the number of replica acknowledgements/responses required to perform the - * operation (with its required consistency level). - * @param alive the number of replicas that were known to be alive by the coordinator node when it - * tried to execute the operation. - * @param retryCount how many times the retry policy has been invoked already for this request - * (not counting the current invocation). - */ - default RetryVerdict onUnavailableVerdict( - @NonNull Request request, - @NonNull ConsistencyLevel cl, - int required, - int alive, - int retryCount) { - RetryDecision decision = onUnavailable(request, cl, required, alive, retryCount); - return () -> decision; - } - - /** - * Whether to retry when a request was aborted before we could get a response from the server. - * - *

This can happen in two cases: if the connection was closed due to an external event (this - * will manifest as a {@link ClosedConnectionException}, or {@link HeartbeatException} for a - * heartbeat failure); or if there was an unexpected error while decoding the response (this can - * only be a driver bug). - * - *

Note that this method will only be invoked for {@linkplain Request#isIdempotent() - * idempotent} requests: when execution was aborted before getting a response, it is impossible to - * determine with 100% certainty whether a mutation was applied or not, so a write is never safe - * to retry; the driver will rethrow the error directly, without invoking the retry policy. - * - * @param request the request that was aborted. - * @param error the error. - * @param retryCount how many times the retry policy has been invoked already for this request - * (not counting the current invocation). - * @deprecated As of version 4.10, use {@link #onRequestAbortedVerdict(Request, Throwable, int)} - * instead. - */ - @Deprecated - RetryDecision onRequestAborted( - @NonNull Request request, @NonNull Throwable error, int retryCount); - - /** - * Whether to retry when a request was aborted before we could get a response from the server. - * - *

This can happen in two cases: if the connection was closed due to an external event (this - * will manifest as a {@link ClosedConnectionException}, or {@link HeartbeatException} for a - * heartbeat failure); or if there was an unexpected error while decoding the response (this can - * only be a driver bug). - * - *

Note that this method will only be invoked for {@linkplain Request#isIdempotent() - * idempotent} requests: when execution was aborted before getting a response, it is impossible to - * determine with 100% certainty whether a mutation was applied or not, so a write is never safe - * to retry; the driver will rethrow the error directly, without invoking the retry policy. - * - * @param request the request that was aborted. - * @param error the error. - * @param retryCount how many times the retry policy has been invoked already for this request - * (not counting the current invocation). - */ - default RetryVerdict onRequestAbortedVerdict( - @NonNull Request request, @NonNull Throwable error, int retryCount) { - RetryDecision decision = onRequestAborted(request, error, retryCount); - return () -> decision; - } - - /** - * Whether to retry when the server replied with a recoverable error (other than {@code - * READ_TIMEOUT}, {@code WRITE_TIMEOUT} or {@code UNAVAILABLE}). - * - *

This can happen for the following errors: {@link OverloadedException}, {@link ServerError}, - * {@link TruncateException}, {@link ReadFailureException}, {@link WriteFailureException}. - * - *

The following errors are handled internally by the driver, and therefore will never - * be encountered in this method: - * - *

    - *
  • {@link BootstrappingException}: always retried on the next node; - *
  • {@link QueryValidationException} (and its subclasses), {@link FunctionFailureException} - * and {@link ProtocolError}: always rethrown. - *
- * - *

Note that this method will only be invoked for {@link Request#isIdempotent()} idempotent} - * requests: when execution was aborted before getting a response, it is impossible to determine - * with 100% certainty whether a mutation was applied or not, so a write is never safe to retry; - * the driver will rethrow the error directly, without invoking the retry policy. - * - * @param request the request that failed. - * @param error the error. - * @param retryCount how many times the retry policy has been invoked already for this request - * (not counting the current invocation). - * @deprecated As of version 4.10, use {@link #onErrorResponseVerdict(Request, - * CoordinatorException, int)} instead. - */ - @Deprecated - RetryDecision onErrorResponse( - @NonNull Request request, @NonNull CoordinatorException error, int retryCount); - - /** - * Whether to retry when the server replied with a recoverable error (other than {@code - * READ_TIMEOUT}, {@code WRITE_TIMEOUT} or {@code UNAVAILABLE}). - * - *

This can happen for the following errors: {@link OverloadedException}, {@link ServerError}, - * {@link TruncateException}, {@link ReadFailureException}, {@link WriteFailureException}. - * - *

The following errors are handled internally by the driver, and therefore will never - * be encountered in this method: - * - *

    - *
  • {@link BootstrappingException}: always retried on the next node; - *
  • {@link QueryValidationException} (and its subclasses), {@link FunctionFailureException} - * and {@link ProtocolError}: always rethrown. - *
- * - *

Note that this method will only be invoked for {@link Request#isIdempotent()} idempotent} - * requests: when execution was aborted before getting a response, it is impossible to determine - * with 100% certainty whether a mutation was applied or not, so a write is never safe to retry; - * the driver will rethrow the error directly, without invoking the retry policy. - * - * @param request the request that failed. - * @param error the error. - * @param retryCount how many times the retry policy has been invoked already for this request - * (not counting the current invocation). - */ - default RetryVerdict onErrorResponseVerdict( - @NonNull Request request, @NonNull CoordinatorException error, int retryCount) { - RetryDecision decision = onErrorResponse(request, error, retryCount); - return () -> decision; - } - - /** Called when the cluster that this policy is associated with closes. */ - @Override - void close(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/retry/RetryVerdict.java b/core/src/main/java/com/datastax/oss/driver/api/core/retry/RetryVerdict.java deleted file mode 100644 index 9abb54156db..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/retry/RetryVerdict.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.retry; - -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.internal.core.retry.DefaultRetryVerdict; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * The verdict returned by a {@link RetryPolicy} determining what to do when a request failed. A - * verdict contains a {@link RetryDecision} indicating if a retry should be attempted at all and - * where, and a method that allows the original request to be modified before the retry. - */ -@FunctionalInterface -public interface RetryVerdict { - - /** A retry verdict that retries the same request on the same node. */ - RetryVerdict RETRY_SAME = new DefaultRetryVerdict(RetryDecision.RETRY_SAME); - - /** A retry verdict that retries the same request on the next node in the query plan. */ - RetryVerdict RETRY_NEXT = new DefaultRetryVerdict(RetryDecision.RETRY_NEXT); - - /** A retry verdict that ignores the error, returning and empty result set to the caller. */ - RetryVerdict IGNORE = new DefaultRetryVerdict(RetryDecision.IGNORE); - - /** A retry verdict that rethrows the execution error to the calling code. */ - RetryVerdict RETHROW = new DefaultRetryVerdict(RetryDecision.RETHROW); - - /** @return The retry decision to apply. */ - @NonNull - RetryDecision getRetryDecision(); - - /** - * Returns the request to retry, based on the request that was just executed (and failed). - * - *

The default retry policy always returns the request as is. Custom retry policies can use - * this method to customize the request to retry, for example, by changing its consistency level, - * query timestamp, custom payload, or even its execution profile. - * - * @param The actual type of the request. - * @param previous The request that was just executed (and failed). - * @return The request to retry. - */ - @NonNull - default RequestT getRetryRequest(@NonNull RequestT previous) { - return previous; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/AlreadyExistsException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/AlreadyExistsException.java deleted file mode 100644 index 2bf541c91de..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/AlreadyExistsException.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.servererrors; - -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** - * Thrown when a query attempts to create a keyspace or table that already exists. - * - *

This exception does not go through the {@link RetryPolicy}, it is always rethrown directly to - * the client. - */ -public class AlreadyExistsException extends QueryValidationException { - - private final String keyspace; - private final String table; - - public AlreadyExistsException( - @NonNull Node coordinator, @NonNull String keyspace, @NonNull String table) { - this(coordinator, makeMessage(keyspace, table), keyspace, table, null, false); - } - - private AlreadyExistsException( - @NonNull Node coordinator, - @NonNull String message, - @NonNull String keyspace, - @NonNull String table, - @Nullable ExecutionInfo executionInfo, - boolean writableStackTrace) { - super(coordinator, message, executionInfo, writableStackTrace); - this.keyspace = keyspace; - this.table = table; - } - - private static String makeMessage(String keyspace, String table) { - if (table == null || table.isEmpty()) { - return String.format("Keyspace %s already exists", keyspace); - } else { - return String.format("Object %s.%s already exists", keyspace, table); - } - } - - @NonNull - @Override - public DriverException copy() { - return new AlreadyExistsException( - getCoordinator(), getMessage(), keyspace, table, getExecutionInfo(), true); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/BootstrappingException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/BootstrappingException.java deleted file mode 100644 index a408e0384f5..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/BootstrappingException.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.servererrors; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** - * Thrown when the coordinator was bootstrapping when it received a query. - * - *

This exception does not go through the {@link RetryPolicy}, the query is always retried on the - * next node. Therefore the only way the client can observe this exception is in an {@link - * AllNodesFailedException}. - */ -public class BootstrappingException extends QueryExecutionException { - - public BootstrappingException(@NonNull Node coordinator) { - this(coordinator, String.format("%s is bootstrapping", coordinator), null, false); - } - - private BootstrappingException( - @NonNull Node coordinator, - @NonNull String message, - @Nullable ExecutionInfo executionInfo, - boolean writableStackTrace) { - super(coordinator, message, executionInfo, writableStackTrace); - } - - @NonNull - @Override - public DriverException copy() { - return new BootstrappingException(getCoordinator(), getMessage(), getExecutionInfo(), true); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/CASWriteUnknownException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/CASWriteUnknownException.java deleted file mode 100644 index 477bf7813c9..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/CASWriteUnknownException.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.servererrors; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.session.Request; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * The result of a CAS operation is in an unknown state. - * - *

This exception is processed by {@link RetryPolicy#onErrorResponseVerdict(Request, - * CoordinatorException, int)} , which will decide if it is rethrown directly to the client or if - * the request should be retried. If all other tried nodes also fail, this exception will appear in - * the {@link AllNodesFailedException} thrown to the client. - */ -public class CASWriteUnknownException extends QueryConsistencyException { - - public CASWriteUnknownException( - @NonNull Node coordinator, - @NonNull ConsistencyLevel consistencyLevel, - int received, - int blockFor) { - this( - coordinator, - String.format( - "CAS operation result is unknown - proposal was not accepted by a quorum. (%d / %d)", - received, blockFor), - consistencyLevel, - received, - blockFor, - null, - false); - } - - private CASWriteUnknownException( - @NonNull Node coordinator, - @NonNull String message, - @NonNull ConsistencyLevel consistencyLevel, - int received, - int blockFor, - ExecutionInfo executionInfo, - boolean writableStackTrace) { - super( - coordinator, - message, - consistencyLevel, - received, - blockFor, - executionInfo, - writableStackTrace); - } - - @NonNull - @Override - public DriverException copy() { - return new CASWriteUnknownException( - getCoordinator(), - getMessage(), - getConsistencyLevel(), - getReceived(), - getBlockFor(), - getExecutionInfo(), - true); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/CDCWriteFailureException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/CDCWriteFailureException.java deleted file mode 100644 index 3ce782653ab..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/CDCWriteFailureException.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.servererrors; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.session.Request; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** - * An attempt was made to write to a commitlog segment which doesn't support CDC mutations. - * - *

This exception is processed by {@link RetryPolicy#onErrorResponseVerdict(Request, - * CoordinatorException, int)}, which will decide if it is rethrown directly to the client or if the - * request should be retried. If all other tried nodes also fail, this exception will appear in the - * {@link AllNodesFailedException} thrown to the client. - */ -public class CDCWriteFailureException extends QueryExecutionException { - - public CDCWriteFailureException(@NonNull Node coordinator) { - super(coordinator, "Commitlog does not support CDC mutations", null, false); - } - - public CDCWriteFailureException(@NonNull Node coordinator, @NonNull String message) { - super(coordinator, "Commitlog does not support CDC mutations", null, false); - } - - private CDCWriteFailureException( - @NonNull Node coordinator, - @NonNull String message, - @Nullable ExecutionInfo executionInfo, - boolean writableStackTrace) { - super(coordinator, message, executionInfo, writableStackTrace); - } - - @NonNull - @Override - public DriverException copy() { - return new CDCWriteFailureException(getCoordinator(), getMessage(), getExecutionInfo(), true); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/CoordinatorException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/CoordinatorException.java deleted file mode 100644 index 8f6052850df..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/CoordinatorException.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.servererrors; - -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** A server-side error thrown by the coordinator node in response to a driver request. */ -public abstract class CoordinatorException extends DriverException { - - // This is also present on ExecutionInfo. But the execution info is only set for errors that are - // rethrown to the client, not on errors that get retried. It can be useful to know the node in - // the retry policy, so store it here, it might be duplicated but that doesn't matter. - private final Node coordinator; - - protected CoordinatorException( - @NonNull Node coordinator, - @NonNull String message, - @Nullable ExecutionInfo executionInfo, - boolean writableStackTrace) { - super(message, executionInfo, null, writableStackTrace); - this.coordinator = coordinator; - } - - @NonNull - public Node getCoordinator() { - return coordinator; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/DefaultWriteType.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/DefaultWriteType.java deleted file mode 100644 index a24097e6e5b..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/DefaultWriteType.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.servererrors; - -/** A default write type supported by the driver out of the box. */ -public enum DefaultWriteType implements WriteType { - - /** A write to a single partition key. Such writes are guaranteed to be atomic and isolated. */ - SIMPLE, - /** - * A write to a multiple partition key that used the distributed batch log to ensure atomicity - * (atomicity meaning that if any statement in the batch succeeds, all will eventually succeed). - */ - BATCH, - /** - * A write to a multiple partition key that doesn't use the distributed batch log. Atomicity for - * such writes is not guaranteed - */ - UNLOGGED_BATCH, - /** - * A counter write (that can be for one or multiple partition key). Such write should not be - * replayed to avoid over-counting. - */ - COUNTER, - /** - * The initial write to the distributed batch log that Cassandra performs internally before a - * BATCH write. - */ - BATCH_LOG, - /** - * A conditional write. If a timeout has this {@code WriteType}, the timeout has happened while - * doing the compare-and-swap for an conditional update. In this case, the update may or may not - * have been applied. - */ - CAS, - /** - * Indicates that the timeout was related to acquiring locks needed for updating materialized - * views affected by write operation. - */ - VIEW, - /** - * Indicates that the timeout was related to acquiring space for change data capture logs for cdc - * tracked tables. - */ - CDC, - ; - // Note that, for the sake of convenience, we also expose shortcuts to these constants on the - // WriteType interface. If you add a new enum constant, remember to update the interface as - // well. -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/FunctionFailureException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/FunctionFailureException.java deleted file mode 100644 index 31993762319..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/FunctionFailureException.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.servererrors; - -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** - * An error during the execution of a CQL function. - * - *

This exception does not go through the {@link RetryPolicy}, it is always rethrown directly to - * the client. - */ -public class FunctionFailureException extends QueryExecutionException { - - public FunctionFailureException(@NonNull Node coordinator, @NonNull String message) { - this(coordinator, message, null, false); - } - - private FunctionFailureException( - @NonNull Node coordinator, - @NonNull String message, - @Nullable ExecutionInfo executionInfo, - boolean writableStackTrace) { - super(coordinator, message, executionInfo, writableStackTrace); - } - - @NonNull - @Override - public DriverException copy() { - return new FunctionFailureException(getCoordinator(), getMessage(), getExecutionInfo(), true); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/InvalidConfigurationInQueryException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/InvalidConfigurationInQueryException.java deleted file mode 100644 index 405efa47299..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/InvalidConfigurationInQueryException.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.servererrors; - -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** - * Indicates that a query is invalid because of some configuration problem. - * - *

This is generally throw by queries that manipulate the schema (CREATE and ALTER) when the - * required configuration options are invalid. - * - *

This exception does not go through the {@link RetryPolicy}, it is always rethrown directly to - * the client. - */ -public class InvalidConfigurationInQueryException extends QueryValidationException { - - public InvalidConfigurationInQueryException(@NonNull Node coordinator, @NonNull String message) { - this(coordinator, message, null, false); - } - - private InvalidConfigurationInQueryException( - @NonNull Node coordinator, - @NonNull String message, - @Nullable ExecutionInfo executionInfo, - boolean writableStackTrace) { - super(coordinator, message, executionInfo, writableStackTrace); - } - - @NonNull - @Override - public DriverException copy() { - return new InvalidConfigurationInQueryException( - getCoordinator(), getMessage(), getExecutionInfo(), true); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/InvalidQueryException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/InvalidQueryException.java deleted file mode 100644 index 468de8a1bd0..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/InvalidQueryException.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.servererrors; - -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** - * Indicates a syntactically correct, but invalid query. - * - *

This exception does not go through the {@link RetryPolicy}, it is always rethrown directly to - * the client. - */ -public class InvalidQueryException extends QueryValidationException { - - public InvalidQueryException(@NonNull Node coordinator, @NonNull String message) { - this(coordinator, message, null, false); - } - - private InvalidQueryException( - @NonNull Node coordinator, - @NonNull String message, - @Nullable ExecutionInfo executionInfo, - boolean writableStackTrace) { - super(coordinator, message, executionInfo, writableStackTrace); - } - - @NonNull - @Override - public DriverException copy() { - return new InvalidQueryException(getCoordinator(), getMessage(), getExecutionInfo(), true); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/OverloadedException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/OverloadedException.java deleted file mode 100644 index f56a7f30a7e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/OverloadedException.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.servererrors; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.session.Request; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** - * Thrown when the coordinator reported itself as being overloaded. - * - *

This exception is processed by {@link RetryPolicy#onErrorResponseVerdict(Request, - * CoordinatorException, int)}, which will decide if it is rethrown directly to the client or if the - * request should be retried. If all other tried nodes also fail, this exception will appear in the - * {@link AllNodesFailedException} thrown to the client. - */ -public class OverloadedException extends QueryExecutionException { - - public OverloadedException(@NonNull Node coordinator) { - super(coordinator, String.format("%s is overloaded", coordinator), null, false); - } - - public OverloadedException(@NonNull Node coordinator, @NonNull String message) { - super(coordinator, String.format("%s is overloaded: %s", coordinator, message), null, false); - } - - private OverloadedException( - @NonNull Node coordinator, - @NonNull String message, - @Nullable ExecutionInfo executionInfo, - boolean writableStackTrace) { - super(coordinator, message, executionInfo, writableStackTrace); - } - - @NonNull - @Override - public DriverException copy() { - return new OverloadedException(getCoordinator(), getMessage(), getExecutionInfo(), true); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/ProtocolError.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/ProtocolError.java deleted file mode 100644 index 898a857954f..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/ProtocolError.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.servererrors; - -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** - * Indicates that the contacted node reported a protocol error. - * - *

Protocol errors indicate that the client triggered a protocol violation (for instance, a - * {@code QUERY} message is sent before a {@code STARTUP} one has been sent). Protocol errors should - * be considered as a bug in the driver and reported as such. - * - *

This exception does not go through the {@link RetryPolicy}, it is always rethrown directly to - * the client. - */ -public class ProtocolError extends CoordinatorException { - - public ProtocolError(@NonNull Node coordinator, @NonNull String message) { - this(coordinator, message, null, false); - } - - private ProtocolError( - @NonNull Node coordinator, - @NonNull String message, - @Nullable ExecutionInfo executionInfo, - boolean writableStackTrace) { - super(coordinator, message, executionInfo, writableStackTrace); - } - - @NonNull - @Override - public DriverException copy() { - return new ProtocolError(getCoordinator(), getMessage(), getExecutionInfo(), true); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/QueryConsistencyException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/QueryConsistencyException.java deleted file mode 100644 index 4a6f97f3342..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/QueryConsistencyException.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.servererrors; - -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * A failure to reach the required consistency level during the execution of a query. - * - *

Such an exception is returned when the query has been tried by Cassandra but cannot be - * achieved with the requested consistency level because either: - * - *

    - *
  • the coordinator did not receive enough replica responses within the rpc timeout set for - * Cassandra; - *
  • some replicas replied with an error. - *
- */ -public abstract class QueryConsistencyException extends QueryExecutionException { - - private final ConsistencyLevel consistencyLevel; - private final int received; - private final int blockFor; - - protected QueryConsistencyException( - @NonNull Node coordinator, - @NonNull String message, - @NonNull ConsistencyLevel consistencyLevel, - int received, - int blockFor, - ExecutionInfo executionInfo, - boolean writableStackTrace) { - super(coordinator, message, executionInfo, writableStackTrace); - this.consistencyLevel = consistencyLevel; - this.received = received; - this.blockFor = blockFor; - } - - /** The consistency level of the operation that failed. */ - @NonNull - public ConsistencyLevel getConsistencyLevel() { - return consistencyLevel; - } - - /** The number of replica that had acknowledged/responded to the operation before it failed. */ - public int getReceived() { - return received; - } - - /** - * The minimum number of replica acknowledgements/responses that were required to fulfill the - * operation. - */ - public int getBlockFor() { - return blockFor; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/QueryExecutionException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/QueryExecutionException.java deleted file mode 100644 index 541a32d9fba..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/QueryExecutionException.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.servererrors; - -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** A server-side error thrown when a valid query cannot be executed. */ -public abstract class QueryExecutionException extends CoordinatorException { - - protected QueryExecutionException( - @NonNull Node coordinator, - @NonNull String message, - @Nullable ExecutionInfo executionInfo, - boolean writableStackTrace) { - super(coordinator, message, executionInfo, writableStackTrace); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/QueryValidationException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/QueryValidationException.java deleted file mode 100644 index 9c8dfe537b6..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/QueryValidationException.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.servererrors; - -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** - * A server-side error thrown when a query cannot be executed because it is syntactically incorrect, - * invalid or unauthorized. - */ -public abstract class QueryValidationException extends CoordinatorException { - - protected QueryValidationException( - @NonNull Node coordinator, - @NonNull String message, - @Nullable ExecutionInfo executionInfo, - boolean writableStackTrace) { - super(coordinator, message, executionInfo, writableStackTrace); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/ReadFailureException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/ReadFailureException.java deleted file mode 100644 index 94c4404f8d9..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/ReadFailureException.java +++ /dev/null @@ -1,153 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.servererrors; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.session.Request; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.net.InetAddress; -import java.util.Map; - -/** - * A non-timeout error during a read query. - * - *

This happens when some of the replicas that were contacted by the coordinator replied with an - * error. - * - *

This exception is processed by {@link RetryPolicy#onErrorResponseVerdict(Request, - * CoordinatorException, int)}, which will decide if it is rethrown directly to the client or if the - * request should be retried. If all other tried nodes also fail, this exception will appear in the - * {@link AllNodesFailedException} thrown to the client. - */ -public class ReadFailureException extends QueryConsistencyException { - - private final int numFailures; - private final boolean dataPresent; - private final Map reasonMap; - - public ReadFailureException( - @NonNull Node coordinator, - @NonNull ConsistencyLevel consistencyLevel, - int received, - int blockFor, - int numFailures, - boolean dataPresent, - @NonNull Map reasonMap) { - this( - coordinator, - String.format( - "Cassandra failure during read query at consistency %s " - + "(%d responses were required but only %d replica responded, %d failed)", - consistencyLevel, blockFor, received, numFailures), - consistencyLevel, - received, - blockFor, - numFailures, - dataPresent, - reasonMap, - null, - false); - } - - private ReadFailureException( - @NonNull Node coordinator, - @NonNull String message, - @NonNull ConsistencyLevel consistencyLevel, - int received, - int blockFor, - int numFailures, - boolean dataPresent, - @NonNull Map reasonMap, - @Nullable ExecutionInfo executionInfo, - boolean writableStackTrace) { - super( - coordinator, - message, - consistencyLevel, - received, - blockFor, - executionInfo, - writableStackTrace); - this.numFailures = numFailures; - this.dataPresent = dataPresent; - this.reasonMap = reasonMap; - } - - /** Returns the number of replicas that experienced a failure while executing the request. */ - public int getNumFailures() { - return numFailures; - } - - /** - * Whether the actual data was amongst the received replica responses. - * - *

During reads, Cassandra doesn't request data from every replica to minimize internal network - * traffic. Instead, some replicas are only asked for a checksum of the data. A read failure may - * occur even if enough replicas have responded to fulfill the consistency level, if only checksum - * responses have been received. This method allows to detect that case. - */ - public boolean wasDataPresent() { - return dataPresent; - } - - /** - * Returns the a failure reason code for each node that failed. - * - *

At the time of writing, the existing reason codes are: - * - *

    - *
  • {@code 0x0000}: the error does not have a specific code assigned yet, or the cause is - * unknown. - *
  • {@code 0x0001}: The read operation scanned too many tombstones (as defined by {@code - * tombstone_failure_threshold} in {@code cassandra.yaml}, causing a {@code - * TombstoneOverwhelmingException}. - *
- * - * (please refer to the Cassandra documentation for your version for the most up-to-date list of - * errors) - * - *

This feature is available for protocol v5 or above only. With lower protocol versions, the - * map will always be empty. - */ - @NonNull - public Map getReasonMap() { - return reasonMap; - } - - @NonNull - @Override - public DriverException copy() { - return new ReadFailureException( - getCoordinator(), - getMessage(), - getConsistencyLevel(), - getReceived(), - getBlockFor(), - numFailures, - dataPresent, - reasonMap, - getExecutionInfo(), - true); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/ReadTimeoutException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/ReadTimeoutException.java deleted file mode 100644 index 4dddfedf49a..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/ReadTimeoutException.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.servererrors; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.session.Request; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * A server-side timeout during a read query. - * - *

This exception is processed by {@link RetryPolicy#onReadTimeoutVerdict(Request, - * ConsistencyLevel, int, int, boolean, int)}, which will decide if it is rethrown directly to the - * client or if the request should be retried. If all other tried nodes also fail, this exception - * will appear in the {@link AllNodesFailedException} thrown to the client. - */ -public class ReadTimeoutException extends QueryConsistencyException { - - private final boolean dataPresent; - - public ReadTimeoutException( - @NonNull Node coordinator, - @NonNull ConsistencyLevel consistencyLevel, - int received, - int blockFor, - boolean dataPresent) { - this( - coordinator, - String.format( - "Cassandra timeout during read query at consistency %s (%s). " - + "In case this was generated during read repair, the consistency level is not representative of the actual consistency.", - consistencyLevel, formatDetails(received, blockFor, dataPresent)), - consistencyLevel, - received, - blockFor, - dataPresent, - null, - false); - } - - private ReadTimeoutException( - @NonNull Node coordinator, - @NonNull String message, - @NonNull ConsistencyLevel consistencyLevel, - int received, - int blockFor, - boolean dataPresent, - ExecutionInfo executionInfo, - boolean writableStackTrace) { - super( - coordinator, - message, - consistencyLevel, - received, - blockFor, - executionInfo, - writableStackTrace); - this.dataPresent = dataPresent; - } - - private static String formatDetails(int received, int blockFor, boolean dataPresent) { - if (received < blockFor) { - return String.format( - "%d responses were required but only %d replica responded", blockFor, received); - } else if (!dataPresent) { - return "the replica queried for data didn't respond"; - } else { - return "timeout while waiting for repair of inconsistent replica"; - } - } - - /** - * Whether the actual data was amongst the received replica responses. - * - *

During reads, Cassandra doesn't request data from every replica to minimize internal network - * traffic. Instead, some replicas are only asked for a checksum of the data. A read timeout may - * occur even if enough replicas have responded to fulfill the consistency level, if only checksum - * responses have been received. This method allows to detect that case. - */ - public boolean wasDataPresent() { - return dataPresent; - } - - @NonNull - @Override - public DriverException copy() { - return new ReadTimeoutException( - getCoordinator(), - getMessage(), - getConsistencyLevel(), - getReceived(), - getBlockFor(), - dataPresent, - getExecutionInfo(), - true); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/ServerError.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/ServerError.java deleted file mode 100644 index de300803421..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/ServerError.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.servererrors; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.session.Request; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** - * Indicates that the contacted node reported an internal error. - * - *

This should be considered as a server bug and reported as such. - * - *

This exception is processed by {@link RetryPolicy#onErrorResponseVerdict(Request, - * CoordinatorException, int)}, which will decide if it is rethrown directly to the client or if the - * request should be retried. If all other tried nodes also fail, this exception will appear in the - * {@link AllNodesFailedException} thrown to the client. - */ -public class ServerError extends CoordinatorException { - - public ServerError(@NonNull Node coordinator, @NonNull String message) { - this(coordinator, message, null, false); - } - - private ServerError( - @NonNull Node coordinator, - @NonNull String message, - @Nullable ExecutionInfo executionInfo, - boolean writableStackTrace) { - super(coordinator, message, executionInfo, writableStackTrace); - } - - @NonNull - @Override - public DriverException copy() { - return new ServerError(getCoordinator(), getMessage(), getExecutionInfo(), true); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/SyntaxError.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/SyntaxError.java deleted file mode 100644 index 708068c0299..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/SyntaxError.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.servererrors; - -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** - * A syntax error in a query. - * - *

This exception does not go through the {@link RetryPolicy}, it is always rethrown directly to - * the client. - */ -public class SyntaxError extends QueryValidationException { - - public SyntaxError(@NonNull Node coordinator, @NonNull String message) { - this(coordinator, message, null, false); - } - - private SyntaxError( - @NonNull Node coordinator, - @NonNull String message, - @Nullable ExecutionInfo executionInfo, - boolean writableStackTrace) { - super(coordinator, message, executionInfo, writableStackTrace); - } - - @NonNull - @Override - public DriverException copy() { - return new SyntaxError(getCoordinator(), getMessage(), getExecutionInfo(), true); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/TruncateException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/TruncateException.java deleted file mode 100644 index 2091d166e98..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/TruncateException.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.servererrors; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.session.Request; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** - * An error during a truncation operation. - * - *

This exception is processed by {@link RetryPolicy#onErrorResponseVerdict(Request, - * CoordinatorException, int)}, which will decide if it is rethrown directly to the client or if the - * request should be retried. If all other tried nodes also fail, this exception will appear in the - * {@link AllNodesFailedException} thrown to the client. - */ -public class TruncateException extends QueryExecutionException { - - public TruncateException(@NonNull Node coordinator, @NonNull String message) { - this(coordinator, message, null, false); - } - - private TruncateException( - @NonNull Node coordinator, - @NonNull String message, - @Nullable ExecutionInfo executionInfo, - boolean writableStackTrace) { - super(coordinator, message, executionInfo, writableStackTrace); - } - - @NonNull - @Override - public DriverException copy() { - return new TruncateException(getCoordinator(), getMessage(), getExecutionInfo(), true); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/UnauthorizedException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/UnauthorizedException.java deleted file mode 100644 index 7a6235422de..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/UnauthorizedException.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.servererrors; - -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** - * Indicates that a query cannot be performed due to the authorization restrictions of the logged - * user. - * - *

This exception does not go through the {@link RetryPolicy}, it is always rethrown directly to - * the client. - */ -public class UnauthorizedException extends QueryValidationException { - - public UnauthorizedException(@NonNull Node coordinator, @NonNull String message) { - this(coordinator, message, null, false); - } - - private UnauthorizedException( - @NonNull Node coordinator, - @NonNull String message, - @Nullable ExecutionInfo executionInfo, - boolean writableStackTrace) { - super(coordinator, message, executionInfo, writableStackTrace); - } - - @NonNull - @Override - public DriverException copy() { - return new UnauthorizedException(getCoordinator(), getMessage(), getExecutionInfo(), true); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/UnavailableException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/UnavailableException.java deleted file mode 100644 index b9e9848ce36..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/UnavailableException.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.servererrors; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.session.Request; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * Thrown when the coordinator knows there is not enough replicas alive to perform a query with the - * requested consistency level. - * - *

This exception is processed by {@link RetryPolicy#onUnavailableVerdict(Request, - * ConsistencyLevel, int, int, int)}, which will decide if it is rethrown directly to the client or - * if the request should be retried. If all other tried nodes also fail, this exception will appear - * in the {@link AllNodesFailedException} thrown to the client. - */ -public class UnavailableException extends QueryExecutionException { - private final ConsistencyLevel consistencyLevel; - private final int required; - private final int alive; - - public UnavailableException( - @NonNull Node coordinator, - @NonNull ConsistencyLevel consistencyLevel, - int required, - int alive) { - this( - coordinator, - String.format( - "Not enough replicas available for query at consistency %s (%d required but only %d alive)", - consistencyLevel, required, alive), - consistencyLevel, - required, - alive, - null, - false); - } - - private UnavailableException( - @NonNull Node coordinator, - @NonNull String message, - @NonNull ConsistencyLevel consistencyLevel, - int required, - int alive, - ExecutionInfo executionInfo, - boolean writableStackTrace) { - super(coordinator, message, executionInfo, writableStackTrace); - this.consistencyLevel = consistencyLevel; - this.required = required; - this.alive = alive; - } - - /** The consistency level of the operation triggering this exception. */ - @NonNull - public ConsistencyLevel getConsistencyLevel() { - return consistencyLevel; - } - - /** - * The number of replica acknowledgements/responses required to perform the operation (with its - * required consistency level). - */ - public int getRequired() { - return required; - } - - /** - * The number of replicas that were known to be alive by the coordinator node when it tried to - * execute the operation. - */ - public int getAlive() { - return alive; - } - - @NonNull - @Override - public DriverException copy() { - return new UnavailableException( - getCoordinator(), - getMessage(), - consistencyLevel, - required, - alive, - getExecutionInfo(), - true); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/WriteFailureException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/WriteFailureException.java deleted file mode 100644 index ffbbd2aef6f..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/WriteFailureException.java +++ /dev/null @@ -1,147 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.servererrors; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.session.Request; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.net.InetAddress; -import java.util.Map; - -/** - * A non-timeout error during a write query. - * - *

This happens when some of the replicas that were contacted by the coordinator replied with an - * error. - * - *

This exception is processed by {@link RetryPolicy#onErrorResponseVerdict(Request, - * CoordinatorException, int)}, which will decide if it is rethrown directly to the client or if the - * request should be retried. If all other tried nodes also fail, this exception will appear in the - * {@link AllNodesFailedException} thrown to the client. - */ -public class WriteFailureException extends QueryConsistencyException { - - private final WriteType writeType; - private final int numFailures; - private final Map reasonMap; - - public WriteFailureException( - @NonNull Node coordinator, - @NonNull ConsistencyLevel consistencyLevel, - int received, - int blockFor, - @NonNull WriteType writeType, - int numFailures, - @NonNull Map reasonMap) { - this( - coordinator, - String.format( - "Cassandra failure during write query at consistency %s " - + "(%d responses were required but only %d replica responded, %d failed)", - consistencyLevel, blockFor, received, numFailures), - consistencyLevel, - received, - blockFor, - writeType, - numFailures, - reasonMap, - null, - false); - } - - private WriteFailureException( - @NonNull Node coordinator, - @NonNull String message, - @NonNull ConsistencyLevel consistencyLevel, - int received, - int blockFor, - @NonNull WriteType writeType, - int numFailures, - @NonNull Map reasonMap, - @Nullable ExecutionInfo executionInfo, - boolean writableStackTrace) { - super( - coordinator, - message, - consistencyLevel, - received, - blockFor, - executionInfo, - writableStackTrace); - this.writeType = writeType; - this.numFailures = numFailures; - this.reasonMap = reasonMap; - } - - /** The type of the write for which this failure was raised. */ - @NonNull - public WriteType getWriteType() { - return writeType; - } - - /** Returns the number of replicas that experienced a failure while executing the request. */ - public int getNumFailures() { - return numFailures; - } - - /** - * Returns the a failure reason code for each node that failed. - * - *

At the time of writing, the existing reason codes are: - * - *

    - *
  • {@code 0x0000}: the error does not have a specific code assigned yet, or the cause is - * unknown. - *
  • {@code 0x0001}: The read operation scanned too many tombstones (as defined by {@code - * tombstone_failure_threshold} in {@code cassandra.yaml}, causing a {@code - * TombstoneOverwhelmingException}. - *
- * - * (please refer to the Cassandra documentation for your version for the most up-to-date list of - * errors) - * - *

This feature is available for protocol v5 or above only. With lower protocol versions, the - * map will always be empty. - */ - @NonNull - public Map getReasonMap() { - return reasonMap; - } - - @NonNull - @Override - public DriverException copy() { - return new WriteFailureException( - getCoordinator(), - getMessage(), - getConsistencyLevel(), - getReceived(), - getBlockFor(), - writeType, - numFailures, - reasonMap, - getExecutionInfo(), - true); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/WriteTimeoutException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/WriteTimeoutException.java deleted file mode 100644 index 9913dbd0a91..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/WriteTimeoutException.java +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.servererrors; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.session.Request; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** - * A server-side timeout during a write query. - * - *

This exception is processed by {@link RetryPolicy#onWriteTimeoutVerdict(Request, - * ConsistencyLevel, WriteType, int, int, int)}, which will decide if it is rethrown directly to the - * client or if the request should be retried. If all other tried nodes also fail, this exception - * will appear in the {@link AllNodesFailedException} thrown to the client. - */ -public class WriteTimeoutException extends QueryConsistencyException { - - private final WriteType writeType; - - public WriteTimeoutException( - @NonNull Node coordinator, - @NonNull ConsistencyLevel consistencyLevel, - int received, - int blockFor, - @NonNull WriteType writeType) { - this( - coordinator, - String.format( - "Cassandra timeout during %s write query at consistency %s " - + "(%d replica were required but only %d acknowledged the write)", - writeType, consistencyLevel, blockFor, received), - consistencyLevel, - received, - blockFor, - writeType, - null, - false); - } - - private WriteTimeoutException( - @NonNull Node coordinator, - @NonNull String message, - @NonNull ConsistencyLevel consistencyLevel, - int received, - int blockFor, - @NonNull WriteType writeType, - @Nullable ExecutionInfo executionInfo, - boolean writableStackTrace) { - super( - coordinator, - message, - consistencyLevel, - received, - blockFor, - executionInfo, - writableStackTrace); - this.writeType = writeType; - } - - /** The type of the write for which a timeout was raised. */ - @NonNull - public WriteType getWriteType() { - return writeType; - } - - @NonNull - @Override - public DriverException copy() { - return new WriteTimeoutException( - getCoordinator(), - getMessage(), - getConsistencyLevel(), - getReceived(), - getBlockFor(), - writeType, - getExecutionInfo(), - true); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/WriteType.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/WriteType.java deleted file mode 100644 index 05ad99e5ce4..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/WriteType.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.servererrors; - -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * The type of a Cassandra write query. - * - *

This information is returned by Cassandra when a write timeout is raised, to indicate what - * type of write timed out. It is useful to decide which retry decision to adopt. - * - *

The only reason to model this as an interface (as opposed to an enum type) is to accommodate - * for custom protocol extensions. If you're connecting to a standard Apache Cassandra cluster, all - * {@code WriteType}s are {@link DefaultWriteType} instances. - */ -public interface WriteType { - - WriteType SIMPLE = DefaultWriteType.SIMPLE; - WriteType BATCH = DefaultWriteType.BATCH; - WriteType UNLOGGED_BATCH = DefaultWriteType.UNLOGGED_BATCH; - WriteType COUNTER = DefaultWriteType.COUNTER; - WriteType BATCH_LOG = DefaultWriteType.BATCH_LOG; - WriteType CAS = DefaultWriteType.CAS; - WriteType VIEW = DefaultWriteType.VIEW; - WriteType CDC = DefaultWriteType.CDC; - - /** The textual representation that the write type is encoded to in protocol frames. */ - @NonNull - String name(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/session/ProgrammaticArguments.java b/core/src/main/java/com/datastax/oss/driver/api/core/session/ProgrammaticArguments.java deleted file mode 100644 index 5e10fb4d915..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/session/ProgrammaticArguments.java +++ /dev/null @@ -1,451 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.session; - -import com.datastax.oss.driver.api.core.auth.AuthProvider; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistanceEvaluator; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeStateListener; -import com.datastax.oss.driver.api.core.metadata.schema.SchemaChangeListener; -import com.datastax.oss.driver.api.core.ssl.SslEngineFactory; -import com.datastax.oss.driver.api.core.tracker.RequestIdGenerator; -import com.datastax.oss.driver.api.core.tracker.RequestTracker; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.registry.MutableCodecRegistry; -import com.datastax.oss.driver.internal.core.loadbalancing.helper.NodeFilterToDistanceEvaluatorAdapter; -import com.datastax.oss.driver.internal.core.metadata.MultiplexingNodeStateListener; -import com.datastax.oss.driver.internal.core.metadata.schema.MultiplexingSchemaChangeListener; -import com.datastax.oss.driver.internal.core.tracker.MultiplexingRequestTracker; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.net.InetSocketAddress; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Objects; -import java.util.UUID; -import java.util.function.Predicate; - -/** - * The arguments that can be set programmatically when building a session. - * - *

This is mostly for internal use, you only need to deal with this directly if you write custom - * {@link SessionBuilder} subclasses. - */ -public class ProgrammaticArguments { - - @NonNull - public static Builder builder() { - return new Builder(); - } - - private final List> typeCodecs; - private final NodeStateListener nodeStateListener; - private final SchemaChangeListener schemaChangeListener; - private final RequestTracker requestTracker; - private final RequestIdGenerator requestIdGenerator; - private final Map localDatacenters; - private final Map> nodeFilters; - private final Map nodeDistanceEvaluators; - private final ClassLoader classLoader; - private final AuthProvider authProvider; - private final SslEngineFactory sslEngineFactory; - private final InetSocketAddress cloudProxyAddress; - private final UUID startupClientId; - private final String startupApplicationName; - private final String startupApplicationVersion; - private final MutableCodecRegistry codecRegistry; - private final Object metricRegistry; - - private ProgrammaticArguments( - @NonNull List> typeCodecs, - @Nullable NodeStateListener nodeStateListener, - @Nullable SchemaChangeListener schemaChangeListener, - @Nullable RequestTracker requestTracker, - @Nullable RequestIdGenerator requestIdGenerator, - @NonNull Map localDatacenters, - @NonNull Map> nodeFilters, - @NonNull Map nodeDistanceEvaluators, - @Nullable ClassLoader classLoader, - @Nullable AuthProvider authProvider, - @Nullable SslEngineFactory sslEngineFactory, - @Nullable InetSocketAddress cloudProxyAddress, - @Nullable UUID startupClientId, - @Nullable String startupApplicationName, - @Nullable String startupApplicationVersion, - @Nullable MutableCodecRegistry codecRegistry, - @Nullable Object metricRegistry) { - - this.typeCodecs = typeCodecs; - this.nodeStateListener = nodeStateListener; - this.schemaChangeListener = schemaChangeListener; - this.requestTracker = requestTracker; - this.requestIdGenerator = requestIdGenerator; - this.localDatacenters = localDatacenters; - this.nodeFilters = nodeFilters; - this.nodeDistanceEvaluators = nodeDistanceEvaluators; - this.classLoader = classLoader; - this.authProvider = authProvider; - this.sslEngineFactory = sslEngineFactory; - this.cloudProxyAddress = cloudProxyAddress; - this.startupClientId = startupClientId; - this.startupApplicationName = startupApplicationName; - this.startupApplicationVersion = startupApplicationVersion; - this.codecRegistry = codecRegistry; - this.metricRegistry = metricRegistry; - } - - @NonNull - public List> getTypeCodecs() { - return typeCodecs; - } - - @Nullable - public NodeStateListener getNodeStateListener() { - return nodeStateListener; - } - - @Nullable - public SchemaChangeListener getSchemaChangeListener() { - return schemaChangeListener; - } - - @Nullable - public RequestTracker getRequestTracker() { - return requestTracker; - } - - @Nullable - public RequestIdGenerator getRequestIdGenerator() { - return requestIdGenerator; - } - - @NonNull - public Map getLocalDatacenters() { - return localDatacenters; - } - - @NonNull - @Deprecated - @SuppressWarnings("DeprecatedIsStillUsed") - public Map> getNodeFilters() { - return nodeFilters; - } - - @NonNull - public Map getNodeDistanceEvaluators() { - return nodeDistanceEvaluators; - } - - @Nullable - public ClassLoader getClassLoader() { - return classLoader; - } - - @Nullable - public AuthProvider getAuthProvider() { - return authProvider; - } - - @Nullable - public SslEngineFactory getSslEngineFactory() { - return sslEngineFactory; - } - - @Nullable - public InetSocketAddress getCloudProxyAddress() { - return cloudProxyAddress; - } - - @Nullable - public UUID getStartupClientId() { - return startupClientId; - } - - @Nullable - public String getStartupApplicationName() { - return startupApplicationName; - } - - @Nullable - public String getStartupApplicationVersion() { - return startupApplicationVersion; - } - - @Nullable - public MutableCodecRegistry getCodecRegistry() { - return codecRegistry; - } - - @Nullable - public Object getMetricRegistry() { - return metricRegistry; - } - - public static class Builder { - - private final ImmutableList.Builder> typeCodecsBuilder = ImmutableList.builder(); - private NodeStateListener nodeStateListener; - private SchemaChangeListener schemaChangeListener; - private RequestTracker requestTracker; - private RequestIdGenerator requestIdGenerator; - private ImmutableMap.Builder localDatacentersBuilder = ImmutableMap.builder(); - private final ImmutableMap.Builder> nodeFiltersBuilder = - ImmutableMap.builder(); - private final ImmutableMap.Builder - nodeDistanceEvaluatorsBuilder = ImmutableMap.builder(); - private ClassLoader classLoader; - private AuthProvider authProvider; - private SslEngineFactory sslEngineFactory; - private InetSocketAddress cloudProxyAddress; - private UUID startupClientId; - private String startupApplicationName; - private String startupApplicationVersion; - private MutableCodecRegistry codecRegistry; - private Object metricRegistry; - - @NonNull - public Builder addTypeCodecs(@NonNull TypeCodec... typeCodecs) { - this.typeCodecsBuilder.add(typeCodecs); - return this; - } - - @NonNull - public Builder withNodeStateListener(@Nullable NodeStateListener nodeStateListener) { - this.nodeStateListener = nodeStateListener; - return this; - } - - @NonNull - public Builder addNodeStateListener(@NonNull NodeStateListener nodeStateListener) { - Objects.requireNonNull(nodeStateListener, "nodeStateListener cannot be null"); - if (this.nodeStateListener == null) { - this.nodeStateListener = nodeStateListener; - } else { - NodeStateListener previousListener = this.nodeStateListener; - if (previousListener instanceof MultiplexingNodeStateListener) { - ((MultiplexingNodeStateListener) previousListener).register(nodeStateListener); - } else { - MultiplexingNodeStateListener multiplexingNodeStateListener = - new MultiplexingNodeStateListener(); - multiplexingNodeStateListener.register(previousListener); - multiplexingNodeStateListener.register(nodeStateListener); - this.nodeStateListener = multiplexingNodeStateListener; - } - } - return this; - } - - @NonNull - public Builder withSchemaChangeListener(@Nullable SchemaChangeListener schemaChangeListener) { - this.schemaChangeListener = schemaChangeListener; - return this; - } - - @NonNull - public Builder addSchemaChangeListener(@NonNull SchemaChangeListener schemaChangeListener) { - Objects.requireNonNull(schemaChangeListener, "schemaChangeListener cannot be null"); - if (this.schemaChangeListener == null) { - this.schemaChangeListener = schemaChangeListener; - } else { - SchemaChangeListener previousListener = this.schemaChangeListener; - if (previousListener instanceof MultiplexingSchemaChangeListener) { - ((MultiplexingSchemaChangeListener) previousListener).register(schemaChangeListener); - } else { - MultiplexingSchemaChangeListener multiplexingSchemaChangeListener = - new MultiplexingSchemaChangeListener(); - multiplexingSchemaChangeListener.register(previousListener); - multiplexingSchemaChangeListener.register(schemaChangeListener); - this.schemaChangeListener = multiplexingSchemaChangeListener; - } - } - return this; - } - - @NonNull - public Builder withRequestTracker(@Nullable RequestTracker requestTracker) { - this.requestTracker = requestTracker; - return this; - } - - @NonNull - public Builder addRequestTracker(@NonNull RequestTracker requestTracker) { - Objects.requireNonNull(requestTracker, "requestTracker cannot be null"); - if (this.requestTracker == null) { - this.requestTracker = requestTracker; - } else { - RequestTracker previousTracker = this.requestTracker; - if (previousTracker instanceof MultiplexingRequestTracker) { - ((MultiplexingRequestTracker) previousTracker).register(requestTracker); - } else { - MultiplexingRequestTracker multiplexingRequestTracker = new MultiplexingRequestTracker(); - multiplexingRequestTracker.register(previousTracker); - multiplexingRequestTracker.register(requestTracker); - this.requestTracker = multiplexingRequestTracker; - } - } - return this; - } - - @NonNull - public Builder withRequestIdGenerator(@Nullable RequestIdGenerator requestIdGenerator) { - this.requestIdGenerator = requestIdGenerator; - return this; - } - - @NonNull - public Builder withLocalDatacenter( - @NonNull String profileName, @NonNull String localDatacenter) { - this.localDatacentersBuilder.put(profileName, localDatacenter); - return this; - } - - @NonNull - public Builder clearDatacenters() { - this.localDatacentersBuilder = ImmutableMap.builder(); - return this; - } - - @NonNull - public Builder withLocalDatacenters(Map localDatacenters) { - for (Map.Entry entry : localDatacenters.entrySet()) { - this.localDatacentersBuilder.put(entry.getKey(), entry.getValue()); - } - return this; - } - - @NonNull - public Builder withNodeDistanceEvaluator( - @NonNull String profileName, @NonNull NodeDistanceEvaluator nodeDistanceEvaluator) { - this.nodeDistanceEvaluatorsBuilder.put(profileName, nodeDistanceEvaluator); - return this; - } - - @NonNull - public Builder withNodeDistanceEvaluators( - Map nodeDistanceReporters) { - for (Entry entry : nodeDistanceReporters.entrySet()) { - this.nodeDistanceEvaluatorsBuilder.put(entry.getKey(), entry.getValue()); - } - return this; - } - - /** - * @deprecated Use {@link #withNodeDistanceEvaluator(String, NodeDistanceEvaluator)} instead. - */ - @NonNull - @Deprecated - public Builder withNodeFilter( - @NonNull String profileName, @NonNull Predicate nodeFilter) { - this.nodeFiltersBuilder.put(profileName, nodeFilter); - this.nodeDistanceEvaluatorsBuilder.put( - profileName, new NodeFilterToDistanceEvaluatorAdapter(nodeFilter)); - return this; - } - - /** @deprecated Use {@link #withNodeDistanceEvaluators(Map)} instead. */ - @NonNull - @Deprecated - public Builder withNodeFilters(Map> nodeFilters) { - for (Map.Entry> entry : nodeFilters.entrySet()) { - this.nodeFiltersBuilder.put(entry.getKey(), entry.getValue()); - this.nodeDistanceEvaluatorsBuilder.put( - entry.getKey(), new NodeFilterToDistanceEvaluatorAdapter(entry.getValue())); - } - return this; - } - - @NonNull - public Builder withClassLoader(@Nullable ClassLoader classLoader) { - this.classLoader = classLoader; - return this; - } - - @NonNull - public Builder withCloudProxyAddress(@Nullable InetSocketAddress cloudAddress) { - this.cloudProxyAddress = cloudAddress; - return this; - } - - @NonNull - public Builder withAuthProvider(@Nullable AuthProvider authProvider) { - this.authProvider = authProvider; - return this; - } - - @NonNull - public Builder withSslEngineFactory(@Nullable SslEngineFactory sslEngineFactory) { - this.sslEngineFactory = sslEngineFactory; - return this; - } - - @NonNull - public Builder withStartupClientId(@Nullable UUID startupClientId) { - this.startupClientId = startupClientId; - return this; - } - - @NonNull - public Builder withStartupApplicationName(@Nullable String startupApplicationName) { - this.startupApplicationName = startupApplicationName; - return this; - } - - @NonNull - public Builder withStartupApplicationVersion(@Nullable String startupApplicationVersion) { - this.startupApplicationVersion = startupApplicationVersion; - return this; - } - - @NonNull - public Builder withCodecRegistry(@Nullable MutableCodecRegistry codecRegistry) { - this.codecRegistry = codecRegistry; - return this; - } - - @NonNull - public Builder withMetricRegistry(@Nullable Object metricRegistry) { - this.metricRegistry = metricRegistry; - return this; - } - - @NonNull - public ProgrammaticArguments build() { - return new ProgrammaticArguments( - typeCodecsBuilder.build(), - nodeStateListener, - schemaChangeListener, - requestTracker, - requestIdGenerator, - localDatacentersBuilder.build(), - nodeFiltersBuilder.build(), - nodeDistanceEvaluatorsBuilder.build(), - classLoader, - authProvider, - sslEngineFactory, - cloudProxyAddress, - startupClientId, - startupApplicationName, - startupApplicationVersion, - codecRegistry, - metricRegistry); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/session/Request.java b/core/src/main/java/com/datastax/oss/driver/api/core/session/Request.java deleted file mode 100644 index 7d122276cbf..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/session/Request.java +++ /dev/null @@ -1,176 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.session; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.Map; - -/** - * A request executed by a {@link Session}. - * - *

This is a high-level abstraction, agnostic to the actual language (e.g. CQL). A request is - * anything that can be converted to a protocol message, provided that you register a request - * processor with the driver to do that conversion. - */ -public interface Request { - - /** - * The name of the execution profile that will be used for this request, or {@code null} if no - * profile has been set. - * - *

Note that this will be ignored if {@link #getExecutionProfile()} returns a non-null value. - * - * @see DriverConfig - */ - @Nullable - String getExecutionProfileName(); - - /** - * The execution profile to use for this request, or {@code null} if no profile has been set. - * - *

It is generally simpler to specify a profile name with {@link #getExecutionProfileName()}. - * However, this method can be used to provide a "derived" profile that was built programmatically - * by the client code. If specified, it overrides the profile name. - * - * @see DriverExecutionProfile - */ - @Nullable - DriverExecutionProfile getExecutionProfile(); - - /** - * The CQL keyspace to execute this request in, or {@code null} if this request does not specify - * any keyspace. - * - *

This overrides {@link Session#getKeyspace()} for this particular request, providing a way to - * specify the keyspace without forcing it globally on the session, nor hard-coding it in the - * query string. - * - *

This feature is only available with {@link DefaultProtocolVersion#V5 native protocol v5} or - * higher. Specifying a per-request keyspace with lower protocol versions will cause a runtime - * error. - * - * @see CASSANDRA-10145 - */ - @Nullable - CqlIdentifier getKeyspace(); - - /** - * The keyspace to use for token-aware routing. - * - *

Note that if a {@linkplain #getKeyspace() per-request keyspace} is already defined for this - * request, it takes precedence over this method. - * - *

See {@link #getRoutingKey()} for a detailed explanation of token-aware routing. - */ - @Nullable - CqlIdentifier getRoutingKeyspace(); - - /** - * The partition key to use for token-aware routing. - * - *

For each request, the driver tries to determine a routing keyspace and a - * routing key by calling the following methods: - * - *

    - *
  • routing keyspace: - *
      - *
    • the result of {@link #getKeyspace()}, if not null; - *
    • otherwise, the result of {@link #getRoutingKeyspace()}, if not null; - *
    • otherwise, the result of {@link Session#getKeyspace()}, if not empty; - *
    • otherwise, null. - *
    - *
  • routing key: - *
      - *
    • the result of {@link #getRoutingToken()}, if not null; - *
    • otherwise, the result of {@link #getRoutingKey()}, if not null; - *
    • otherwise, null. - *
    - *
- * - * This provides a hint of the partition that the request operates on. When the driver picks a - * coordinator for execution, it will prioritize the replicas that own that partition, in order to - * avoid an extra network jump on the server side. - * - *

Routing information is optional: if either keyspace or key is null, token-aware routing is - * disabled for this request. - */ - @Nullable - ByteBuffer getRoutingKey(); - - /** - * The token to use for token-aware routing. - * - *

This is an alternative to {@link #getRoutingKey()}. Both methods represent the same - * information, a request can provide one or the other. - * - *

See {@link #getRoutingKey()} for a detailed explanation of token-aware routing. - */ - @Nullable - Token getRoutingToken(); - - /** - * Returns the custom payload to send alongside the request. - * - *

This is used to exchange extra information with the server. By default, Cassandra doesn't do - * anything with this, you'll only need it if you have a custom request handler on the - * server-side. - * - * @return The custom payload, or an empty map if no payload is present. - */ - @NonNull - Map getCustomPayload(); - - /** - * Whether the request is idempotent; that is, whether applying the request twice leaves the - * database in the same state. - * - *

This is used internally for retries and speculative executions: if a request is not - * idempotent, the driver will take extra care to ensure that it is not sent twice (for example, - * don't retry if there is the slightest chance that the request reached a coordinator). - * - * @return a boolean value, or {@code null} to use the default value defined in the configuration. - * @see DefaultDriverOption#REQUEST_DEFAULT_IDEMPOTENCE - */ - @Nullable - Boolean isIdempotent(); - - /** - * How long to wait for this request to complete. This is a global limit on the duration of a - * session.execute() call, including any retries the driver might do. - * - * @return the set duration, or {@code null} to use the default value defined in the - * configuration. - * @see DefaultDriverOption#REQUEST_TIMEOUT - */ - @Nullable - Duration getTimeout(); - - /** @return The node configured on this statement, or null if none is configured. */ - @Nullable - Node getNode(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/session/Session.java b/core/src/main/java/com/datastax/oss/driver/api/core/session/Session.java deleted file mode 100644 index e047bf2fe09..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/session/Session.java +++ /dev/null @@ -1,225 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.session; - -import com.datastax.oss.driver.api.core.AsyncAutoCloseable; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.CqlSessionBuilder; -import com.datastax.oss.driver.api.core.MavenCoordinates; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.metadata.Metadata; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeState; -import com.datastax.oss.driver.api.core.metrics.Metrics; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.DefaultMavenCoordinates; -import com.datastax.oss.driver.internal.core.util.concurrent.BlockingOperation; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Optional; -import java.util.concurrent.CompletionStage; - -/** - * A nexus to send requests to a Cassandra cluster. - * - *

This is a high-level abstraction capable of handling arbitrary request and result types. The - * driver's built-in {@link CqlSession} is a more convenient subtype for most client applications. - * - *

The driver's request execution logic is pluggable (see {@code RequestProcessor} in the - * internal API) to allow custom extensions. Hence the generic {@link #execute(Request, - * GenericType)} method in this interface, that makes no assumptions about the request or result - * type. - * - * @see CqlSession#builder() - */ -public interface Session extends AsyncAutoCloseable { - - /** - * The Maven coordinates of the core driver artifact. - * - *

This is intended for products that wrap or extend the driver, as a way to check - * compatibility if end-users override the driver version in their application. - */ - @NonNull - MavenCoordinates OSS_DRIVER_COORDINATES = - DefaultMavenCoordinates.buildFromResourceAndPrint( - Session.class.getResource("/com/datastax/oss/driver/Driver.properties")); - - /** - * The unique name identifying this session instance. This is used as a prefix for log messages - * and metrics. - * - *

This gets populated from the option {@code basic.session-name} in the configuration. If that - * option is absent, the driver will generate an identifier composed of the letter 's' followed by - * an incrementing counter. - * - *

Note that this is purely a client-side identifier; in particular, it has no relation with - * {@code system.local.cluster_name} on the server. - */ - @NonNull - String getName(); - - /** - * Returns a snapshot of the Cassandra cluster's topology and schema metadata. - * - *

In order to provide atomic updates, this method returns an immutable object: the node list, - * token map, and schema contained in a given instance will always be consistent with each other - * (but note that {@link Node} itself is not immutable: some of its properties will be updated - * dynamically, in particular {@link Node#getState()}). - * - *

As a consequence of the above, you should call this method each time you need a fresh view - * of the metadata. Do not call it once and store the result, because it is a frozen - * snapshot that will become stale over time. - * - *

If a metadata refresh triggers events (such as node added/removed, or schema events), then - * the new version of the metadata is guaranteed to be visible by the time you receive these - * events. - * - *

The returned object is never {@code null}, but may be empty if metadata has been disabled in - * the configuration. - */ - @NonNull - Metadata getMetadata(); - - /** Whether schema metadata is currently enabled. */ - boolean isSchemaMetadataEnabled(); - - /** - * Enable or disable schema metadata programmatically. - * - *

Use this method to override the value defined in the driver's configuration; one typical use - * case is to temporarily disable schema metadata while the client issues a sequence of DDL - * statements. - * - *

If calling this method re-enables the metadata (that is, {@link #isSchemaMetadataEnabled()} - * was false before, and becomes true as a result of the call), a refresh is also triggered. - * - * @param newValue a boolean value to enable or disable schema metadata programmatically, or - * {@code null} to use the driver's configuration. - * @see DefaultDriverOption#METADATA_SCHEMA_ENABLED - * @return if this call triggered a refresh, a future that will complete when that refresh is - * complete. Otherwise, a completed future with the current metadata. - */ - @NonNull - CompletionStage setSchemaMetadataEnabled(@Nullable Boolean newValue); - - /** - * Force an immediate refresh of the schema metadata, even if it is currently disabled (either in - * the configuration or via {@link #setSchemaMetadataEnabled(Boolean)}). - * - *

The new metadata is returned in the resulting future (and will also be reflected by {@link - * #getMetadata()} when that future completes). - */ - @NonNull - CompletionStage refreshSchemaAsync(); - - /** - * Convenience method to call {@link #refreshSchemaAsync()} and block for the result. - * - *

This must not be called on a driver thread. - */ - @NonNull - default Metadata refreshSchema() { - BlockingOperation.checkNotDriverThread(); - return CompletableFutures.getUninterruptibly(refreshSchemaAsync()); - } - - /** - * Checks if all nodes in the cluster agree on a common schema version. - * - *

Due to the distributed nature of Cassandra, schema changes made on one node might not be - * immediately visible to others. Under certain circumstances, the driver waits until all nodes - * agree on a common schema version (namely: before a schema refresh, and before completing a - * successful schema-altering query). To do so, it queries system tables to find out the schema - * version of all nodes that are currently {@link NodeState#UP UP}. If all the versions match, the - * check succeeds, otherwise it is retried periodically, until a given timeout (specified in the - * configuration). - * - *

A schema agreement failure is not fatal, but it might produce unexpected results (for - * example, getting an "unconfigured table" error for a table that you created right before, just - * because the two queries went to different coordinators). - * - *

Note that schema agreement never succeeds in a mixed-version cluster (it would be - * challenging because the way the schema version is computed varies across server versions); the - * assumption is that schema updates are unlikely to happen during a rolling upgrade anyway. - * - * @return a future that completes with {@code true} if the nodes agree, or {@code false} if the - * timeout fired. - * @see DefaultDriverOption#CONTROL_CONNECTION_AGREEMENT_INTERVAL - * @see DefaultDriverOption#CONTROL_CONNECTION_AGREEMENT_TIMEOUT - */ - @NonNull - CompletionStage checkSchemaAgreementAsync(); - - /** - * Convenience method to call {@link #checkSchemaAgreementAsync()} and block for the result. - * - *

This must not be called on a driver thread. - */ - default boolean checkSchemaAgreement() { - BlockingOperation.checkNotDriverThread(); - return CompletableFutures.getUninterruptibly(checkSchemaAgreementAsync()); - } - - /** Returns a context that provides access to all the policies used by this driver instance. */ - @NonNull - DriverContext getContext(); - - /** - * The keyspace that this session is currently connected to, or {@link Optional#empty()} if this - * session is not connected to any keyspace. - * - *

There are two ways that this can be set: before initializing the session (either with the - * {@code session-keyspace} option in the configuration, or with {@link - * CqlSessionBuilder#withKeyspace(CqlIdentifier)}); or at runtime, if the client issues a request - * that changes the keyspace (such as a CQL {@code USE} query). Note that this second method is - * inherently unsafe, since other requests expecting the old keyspace might be executing - * concurrently. Therefore it is highly discouraged, aside from trivial cases (such as a - * cqlsh-style program where requests are never concurrent). - */ - @NonNull - Optional getKeyspace(); - - /** - * Returns a gateway to the driver's DropWizard metrics, or {@link Optional#empty()} if all - * metrics are disabled, or if the driver has been configured to use MicroProfile or Micrometer - * instead of DropWizard (see {@code advanced.metrics.factory.class} in the configuration). - * - *

{@link Metrics} was originally intended to allow programmatic access to the metrics, but it - * has a hard dependency to the DropWizard API, which makes it unsuitable for alternative metric - * frameworks. A workaround is to inject your own metric registry with {@link - * SessionBuilder#withMetricRegistry(Object)} when you build the session. You can then use the - * framework's proprietary APIs to retrieve the metrics from the registry. - */ - @NonNull - Optional getMetrics(); - - /** - * Executes an arbitrary request. - * - * @param resultType the type of the result, which determines the internal request processor - * (built-in or custom) that will be used to handle the request. - * @see Session - */ - @Nullable // because ResultT could be Void - ResultT execute( - @NonNull RequestT request, @NonNull GenericType resultType); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/session/SessionBuilder.java b/core/src/main/java/com/datastax/oss/driver/api/core/session/SessionBuilder.java deleted file mode 100644 index 25500119047..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/session/SessionBuilder.java +++ /dev/null @@ -1,1013 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.session; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.auth.AuthProvider; -import com.datastax.oss.driver.api.core.auth.PlainTextAuthProviderBase; -import com.datastax.oss.driver.api.core.auth.ProgrammaticPlainTextAuthProvider; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistanceEvaluator; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeStateListener; -import com.datastax.oss.driver.api.core.metadata.schema.SchemaChangeListener; -import com.datastax.oss.driver.api.core.ssl.ProgrammaticSslEngineFactory; -import com.datastax.oss.driver.api.core.ssl.SslEngineFactory; -import com.datastax.oss.driver.api.core.tracker.RequestIdGenerator; -import com.datastax.oss.driver.api.core.tracker.RequestTracker; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.registry.MutableCodecRegistry; -import com.datastax.oss.driver.api.core.uuid.Uuids; -import com.datastax.oss.driver.internal.core.ContactPoints; -import com.datastax.oss.driver.internal.core.config.cloud.CloudConfig; -import com.datastax.oss.driver.internal.core.config.cloud.CloudConfigFactory; -import com.datastax.oss.driver.internal.core.config.typesafe.DefaultDriverConfigLoader; -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.DefaultEndPoint; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.internal.core.tracker.W3CContextRequestIdGenerator; -import com.datastax.oss.driver.internal.core.util.concurrent.BlockingOperation; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.io.InputStream; -import java.net.InetSocketAddress; -import java.net.MalformedURLException; -import java.net.URL; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.Collection; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.Callable; -import java.util.concurrent.CompletionStage; -import java.util.function.Predicate; -import javax.net.ssl.SSLContext; -import net.jcip.annotations.NotThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Base implementation to build session instances. - * - *

You only need to deal with this directly if you use custom driver extensions. For the default - * session implementation, see {@link CqlSession#builder()}. - * - *

This class is mutable and not thread-safe. - */ -@NotThreadSafe -public abstract class SessionBuilder { - - public static final String ASTRA_PAYLOAD_KEY = "traceparent"; - - private static final Logger LOG = LoggerFactory.getLogger(SessionBuilder.class); - - @SuppressWarnings("unchecked") - protected final SelfT self = (SelfT) this; - - protected DriverConfigLoader configLoader; - protected Set programmaticContactPoints = new HashSet<>(); - protected CqlIdentifier keyspace; - protected Callable cloudConfigInputStream; - - protected ProgrammaticArguments.Builder programmaticArgumentsBuilder = - ProgrammaticArguments.builder(); - private boolean programmaticSslFactory = false; - private boolean programmaticLocalDatacenter = false; - - /** - * Sets the configuration loader to use. - * - *

If you don't call this method, the builder will use the default implementation, based on the - * Typesafe config library. More precisely, configuration properties are loaded and merged from - * the following (first-listed are higher priority): - * - *

    - *
  • system properties - *
  • {@code application.conf} (all resources on classpath with this name) - *
  • {@code application.json} (all resources on classpath with this name) - *
  • {@code application.properties} (all resources on classpath with this name) - *
  • {@code reference.conf} (all resources on classpath with this name). In particular, this - * will load the {@code reference.conf} included in the core driver JAR, that defines - * default options for all mandatory options. - *
- * - * The resulting configuration is expected to contain a {@code datastax-java-driver} section. - * - *

This default loader will honor the reload interval defined by the option {@code - * basic.config-reload-interval}. - * - * @see Typesafe config's - * standard loading behavior - */ - @NonNull - public SelfT withConfigLoader(@Nullable DriverConfigLoader configLoader) { - this.configLoader = configLoader; - return self; - } - - @NonNull - @Deprecated - protected DriverConfigLoader defaultConfigLoader() { - return new DefaultDriverConfigLoader(); - } - - @NonNull - protected DriverConfigLoader defaultConfigLoader(@Nullable ClassLoader classLoader) { - if (classLoader == null) { - return new DefaultDriverConfigLoader(); - } else { - return new DefaultDriverConfigLoader(classLoader); - } - } - - /** - * Adds contact points to use for the initial connection to the cluster. - * - *

These are addresses of Cassandra nodes that the driver uses to discover the cluster - * topology. Only one contact point is required (the driver will retrieve the address of the other - * nodes automatically), but it is usually a good idea to provide more than one contact point, - * because if that single contact point is unavailable, the driver cannot initialize itself - * correctly. - * - *

Contact points can also be provided statically in the configuration. If both are specified, - * they will be merged. If both are absent, the driver will default to 127.0.0.1:9042. - * - *

Contrary to the configuration, DNS names with multiple A-records will not be handled here. - * If you need that, extract them manually with {@link java.net.InetAddress#getAllByName(String)} - * before calling this method. Similarly, if you need connect addresses to stay unresolved, make - * sure you pass unresolved instances here (see {@code advanced.resolve-contact-points} in the - * configuration for more explanations). - */ - @NonNull - public SelfT addContactPoints(@NonNull Collection contactPoints) { - for (InetSocketAddress contactPoint : contactPoints) { - addContactPoint(contactPoint); - } - return self; - } - - /** - * Adds a contact point to use for the initial connection to the cluster. - * - * @see #addContactPoints(Collection) - */ - @NonNull - public SelfT addContactPoint(@NonNull InetSocketAddress contactPoint) { - this.programmaticContactPoints.add(new DefaultEndPoint(contactPoint)); - return self; - } - - /** - * Adds contact points to use for the initial connection to the cluster. - * - *

You only need this method if you use a custom {@link EndPoint} implementation. Otherwise, - * use {@link #addContactPoints(Collection)}. - */ - @NonNull - public SelfT addContactEndPoints(@NonNull Collection contactPoints) { - for (EndPoint contactPoint : contactPoints) { - addContactEndPoint(contactPoint); - } - return self; - } - - /** - * Adds a contact point to use for the initial connection to the cluster. - * - *

You only need this method if you use a custom {@link EndPoint} implementation. Otherwise, - * use {@link #addContactPoint(InetSocketAddress)}. - */ - @NonNull - public SelfT addContactEndPoint(@NonNull EndPoint contactPoint) { - this.programmaticContactPoints.add(contactPoint); - return self; - } - - /** - * Registers additional codecs for custom type mappings. - * - * @param typeCodecs neither the individual codecs, nor the vararg array itself, can be {@code - * null}. - */ - @NonNull - public SelfT addTypeCodecs(@NonNull TypeCodec... typeCodecs) { - this.programmaticArgumentsBuilder.addTypeCodecs(typeCodecs); - return self; - } - - /** - * Registers a node state listener to use with the session. - * - *

Listeners can be registered in two ways: either programmatically with this method, or via - * the configuration using the {@code advanced.metadata.node-state-listener.classes} option. - * - *

This method unregisters any previously-registered listener. If you intend to register more - * than one listener, use {@link #addNodeStateListener(NodeStateListener)} instead. - */ - @NonNull - public SelfT withNodeStateListener(@Nullable NodeStateListener nodeStateListener) { - this.programmaticArgumentsBuilder.withNodeStateListener(nodeStateListener); - return self; - } - - /** - * Registers a node state listener to use with the session, without removing previously-registered - * listeners. - * - *

Listeners can be registered in two ways: either programmatically with this method, or via - * the configuration using the {@code advanced.metadata.node-state-listener.classes} option. - * - *

Unlike {@link #withNodeStateListener(NodeStateListener)}, this method adds the new listener - * to the list of already-registered listeners, thus allowing applications to register multiple - * listeners. When multiple listeners are registered, they are notified in sequence every time a - * new listener event is triggered. - */ - @NonNull - public SelfT addNodeStateListener(@NonNull NodeStateListener nodeStateListener) { - programmaticArgumentsBuilder.addNodeStateListener(nodeStateListener); - return self; - } - - /** - * Registers a schema change listener to use with the session. - * - *

Listeners can be registered in two ways: either programmatically with this method, or via - * the configuration using the {@code advanced.metadata.schema-change-listener.classes} option. - * - *

This method unregisters any previously-registered listener. If you intend to register more - * than one listener, use {@link #addSchemaChangeListener(SchemaChangeListener)} instead. - */ - @NonNull - public SelfT withSchemaChangeListener(@Nullable SchemaChangeListener schemaChangeListener) { - this.programmaticArgumentsBuilder.withSchemaChangeListener(schemaChangeListener); - return self; - } - - /** - * Registers a schema change listener to use with the session, without removing - * previously-registered listeners. - * - *

Listeners can be registered in two ways: either programmatically with this method, or via - * the configuration using the {@code advanced.metadata.schema-change-listener.classes} option. - * - *

Unlike {@link #withSchemaChangeListener(SchemaChangeListener)}, this method adds the new - * listener to the list of already-registered listeners, thus allowing applications to register - * multiple listeners. When multiple listeners are registered, they are notified in sequence every - * time a new listener event is triggered. - */ - @NonNull - public SelfT addSchemaChangeListener(@NonNull SchemaChangeListener schemaChangeListener) { - programmaticArgumentsBuilder.addSchemaChangeListener(schemaChangeListener); - return self; - } - - /** - * Registers a request tracker to use with the session. - * - *

Trackers can be registered in two ways: either programmatically with this method, or via the - * configuration using the {@code advanced.request-tracker.classes} option. - * - *

This method unregisters any previously-registered tracker. If you intend to register more - * than one tracker, use {@link #addRequestTracker(RequestTracker)} instead. - */ - @NonNull - public SelfT withRequestTracker(@Nullable RequestTracker requestTracker) { - this.programmaticArgumentsBuilder.withRequestTracker(requestTracker); - return self; - } - - /** - * Registers a request tracker to use with the session, without removing previously-registered - * trackers. - * - *

Trackers can be registered in two ways: either programmatically with this method, or via the - * configuration using the {@code advanced.request-tracker.classes} option. - * - *

Unlike {@link #withRequestTracker(RequestTracker)}, this method adds the new tracker to the - * list of already-registered trackers, thus allowing applications to register multiple trackers. - * When multiple trackers are registered, they are notified in sequence every time a new tracker - * event is triggered. - */ - @NonNull - public SelfT addRequestTracker(@NonNull RequestTracker requestTracker) { - programmaticArgumentsBuilder.addRequestTracker(requestTracker); - return self; - } - - /** - * Registers a request ID generator. The driver will use the generated ID in the logs and - * optionally add to the custom payload so that users can correlate logs about the same request - * from the Cassandra side. - */ - @NonNull - public SelfT withRequestIdGenerator(@NonNull RequestIdGenerator requestIdGenerator) { - this.programmaticArgumentsBuilder.withRequestIdGenerator(requestIdGenerator); - return self; - } - - /** - * Registers an authentication provider to use with the session. - * - *

If the provider is specified programmatically with this method, it overrides the - * configuration (that is, the {@code advanced.auth-provider.class} option will be ignored). - */ - @NonNull - public SelfT withAuthProvider(@Nullable AuthProvider authProvider) { - this.programmaticArgumentsBuilder.withAuthProvider(authProvider); - return self; - } - - /** - * Configures the session to use plaintext authentication with the given username and password. - * - *

This methods calls {@link #withAuthProvider(AuthProvider)} to register a special provider - * implementation. Therefore calling it overrides the configuration (that is, the {@code - * advanced.auth-provider.class} option will be ignored). - * - *

Note that this approach holds the credentials in clear text in memory, which makes them - * vulnerable to an attacker who is able to perform memory dumps. If this is not acceptable for - * you, consider writing your own {@link AuthProvider} implementation ({@link - * PlainTextAuthProviderBase} is a good starting point), and providing it either with {@link - * #withAuthProvider(AuthProvider)} or via the configuration ({@code - * advanced.auth-provider.class}). - */ - @NonNull - public SelfT withAuthCredentials(@NonNull String username, @NonNull String password) { - return withAuthProvider(new ProgrammaticPlainTextAuthProvider(username, password)); - } - - /** - * Configures the session to use DSE plaintext authentication with the given username and - * password, and perform proxy authentication with the given authorization id. - * - *

This feature is only available in DataStax Enterprise. If connecting to Apache Cassandra, - * the authorization id will be ignored; it is recommended to use {@link - * #withAuthCredentials(String, String)} instead. - * - *

This methods calls {@link #withAuthProvider(AuthProvider)} to register a special provider - * implementation. Therefore calling it overrides the configuration (that is, the {@code - * advanced.auth-provider.class} option will be ignored). - * - *

Note that this approach holds the credentials in clear text in memory, which makes them - * vulnerable to an attacker who is able to perform memory dumps. If this is not acceptable for - * you, consider writing your own {@link AuthProvider} implementation (the internal class {@code - * PlainTextAuthProviderBase} is a good starting point), and providing it either with {@link - * #withAuthProvider(AuthProvider)} or via the configuration ({@code - * advanced.auth-provider.class}). - */ - @NonNull - public SelfT withAuthCredentials( - @NonNull String username, @NonNull String password, @NonNull String authorizationId) { - return withAuthProvider( - new ProgrammaticPlainTextAuthProvider(username, password, authorizationId)); - } - - /** - * @deprecated this method only exists to ease the transition from driver 3, it is an alias for - * {@link #withAuthCredentials(String, String)}. - */ - @Deprecated - @NonNull - public SelfT withCredentials(@NonNull String username, @NonNull String password) { - return withAuthCredentials(username, password); - } - - /** - * @deprecated this method only exists to ease the transition from driver 3, it is an alias for - * {@link #withAuthCredentials(String, String,String)}. - */ - @Deprecated - @NonNull - public SelfT withCredentials( - @NonNull String username, @NonNull String password, @NonNull String authorizationId) { - return withAuthCredentials(username, password, authorizationId); - } - - /** - * Registers an SSL engine factory for the session. - * - *

If the factory is provided programmatically with this method, it overrides the configuration - * (that is, the {@code advanced.ssl-engine-factory} option will be ignored). - * - * @see ProgrammaticSslEngineFactory - */ - @NonNull - public SelfT withSslEngineFactory(@Nullable SslEngineFactory sslEngineFactory) { - this.programmaticSslFactory = true; - this.programmaticArgumentsBuilder.withSslEngineFactory(sslEngineFactory); - return self; - } - - /** - * Configures the session to use SSL with the given context. - * - *

This is a convenience method for clients that already have an {@link SSLContext} instance. - * It wraps its argument into a {@link ProgrammaticSslEngineFactory}, and passes it to {@link - * #withSslEngineFactory(SslEngineFactory)}. - * - *

If you use this method, there is no way to customize cipher suites, or turn on host name - * validation. If you need finer control, use {@link #withSslEngineFactory(SslEngineFactory)} - * directly and pass either your own implementation of {@link SslEngineFactory}, or a {@link - * ProgrammaticSslEngineFactory} created with custom cipher suites and/or host name validation. - * - *

Also, note that SSL engines will be created with advisory peer information ({@link - * SSLContext#createSSLEngine(String, int)}) whenever possible. - */ - @NonNull - public SelfT withSslContext(@Nullable SSLContext sslContext) { - return withSslEngineFactory( - sslContext == null ? null : new ProgrammaticSslEngineFactory(sslContext)); - } - - /** - * Specifies the datacenter that is considered "local" by the load balancing policy. - * - *

This is a programmatic alternative to the configuration option {@code - * basic.load-balancing-policy.local-datacenter}. If this method is used, it takes precedence and - * overrides the configuration. - * - *

Note that this setting may or may not be relevant depending on the load balancing policy - * implementation in use. The driver's built-in {@code DefaultLoadBalancingPolicy} relies on it; - * if you use a third-party implementation, refer to their documentation. - */ - public SelfT withLocalDatacenter(@NonNull String profileName, @NonNull String localDatacenter) { - this.programmaticLocalDatacenter = true; - this.programmaticArgumentsBuilder.withLocalDatacenter(profileName, localDatacenter); - return self; - } - - /** Alias to {@link #withLocalDatacenter(String, String)} for the default profile. */ - public SelfT withLocalDatacenter(@NonNull String localDatacenter) { - return withLocalDatacenter(DriverExecutionProfile.DEFAULT_NAME, localDatacenter); - } - - /** - * Adds a custom {@link NodeDistanceEvaluator} for a particular execution profile. This assumes - * that you're also using a dedicated load balancing policy for that profile. - * - *

Node distance evaluators are honored by all the driver built-in load balancing policies. If - * you use a custom policy implementation however, you'll need to explicitly invoke the evaluator - * whenever appropriate. - * - *

If an evaluator is specified programmatically with this method, it overrides the - * configuration (that is, the {@code load-balancing-policy.evaluator.class} option will be - * ignored). - * - * @see #withNodeDistanceEvaluator(NodeDistanceEvaluator) - */ - @NonNull - public SelfT withNodeDistanceEvaluator( - @NonNull String profileName, @NonNull NodeDistanceEvaluator nodeDistanceEvaluator) { - this.programmaticArgumentsBuilder.withNodeDistanceEvaluator(profileName, nodeDistanceEvaluator); - return self; - } - - /** - * Alias to {@link #withNodeDistanceEvaluator(String, NodeDistanceEvaluator)} for the default - * profile. - */ - @NonNull - public SelfT withNodeDistanceEvaluator(@NonNull NodeDistanceEvaluator nodeDistanceEvaluator) { - return withNodeDistanceEvaluator(DriverExecutionProfile.DEFAULT_NAME, nodeDistanceEvaluator); - } - - /** - * Adds a custom filter to include/exclude nodes for a particular execution profile. This assumes - * that you're also using a dedicated load balancing policy for that profile. - * - *

The predicate's {@link Predicate#test(Object) test()} method will be invoked each time the - * {@link LoadBalancingPolicy} processes a topology or state change: if it returns false, the - * policy will suggest distance IGNORED (meaning the driver won't ever connect to it if all - * policies agree), and never included in any query plan. - * - *

Note that this behavior is implemented in the driver built-in load balancing policies. If - * you use a custom policy implementation, you'll need to explicitly invoke the filter. - * - *

If the filter is specified programmatically with this method, it overrides the configuration - * (that is, the {@code load-balancing-policy.filter.class} option will be ignored). - * - *

This method has been deprecated in favor of {@link - * #withNodeDistanceEvaluator(String, NodeDistanceEvaluator)}. If you were using node - * filters, you can easily replace your filters with the following implementation of {@link - * NodeDistanceEvaluator}: - * - *

{@code
-   * public class NodeFilterToDistanceEvaluatorAdapter implements NodeDistanceEvaluator {
-   *
-   *   private final Predicate nodeFilter;
-   *
-   *   public NodeFilterToDistanceEvaluatorAdapter(Predicate nodeFilter) {
-   *     this.nodeFilter = nodeFilter;
-   *   }
-   *
-   *   public NodeDistance evaluateDistance(Node node, String localDc) {
-   *     return nodeFilter.test(node) ? null : NodeDistance.IGNORED;
-   *   }
-   * }
-   * }
- * - * The same can be achieved using a lambda + closure: - * - *
{@code
-   * Predicate nodeFilter = ...
-   * NodeDistanceEvaluator evaluator =
-   *   (node, localDc) -> nodeFilter.test(node) ? null : NodeDistance.IGNORED;
-   * }
- * - * @see #withNodeFilter(Predicate) - * @deprecated Use {@link #withNodeDistanceEvaluator(String, NodeDistanceEvaluator)} instead. - */ - @Deprecated - @NonNull - public SelfT withNodeFilter(@NonNull String profileName, @NonNull Predicate nodeFilter) { - this.programmaticArgumentsBuilder.withNodeFilter(profileName, nodeFilter); - return self; - } - - /** - * Alias to {@link #withNodeFilter(String, Predicate)} for the default profile. - * - *

This method has been deprecated in favor of {@link - * #withNodeDistanceEvaluator(NodeDistanceEvaluator)}. See the javadocs of {@link - * #withNodeFilter(String, Predicate)} to understand how to migrate your legacy node filters. - * - * @deprecated Use {@link #withNodeDistanceEvaluator(NodeDistanceEvaluator)} instead. - */ - @Deprecated - @NonNull - public SelfT withNodeFilter(@NonNull Predicate nodeFilter) { - return withNodeFilter(DriverExecutionProfile.DEFAULT_NAME, nodeFilter); - } - - /** - * Sets the keyspace to connect the session to. - * - *

Note that this can also be provided by the configuration; if both are defined, this method - * takes precedence. - */ - @NonNull - public SelfT withKeyspace(@Nullable CqlIdentifier keyspace) { - this.keyspace = keyspace; - return self; - } - - /** - * Shortcut for {@link #withKeyspace(CqlIdentifier) - * setKeyspace(CqlIdentifier.fromCql(keyspaceName))} - */ - @NonNull - public SelfT withKeyspace(@Nullable String keyspaceName) { - return withKeyspace(keyspaceName == null ? null : CqlIdentifier.fromCql(keyspaceName)); - } - - /** - * The {@link ClassLoader} to use to reflectively load class names defined in configuration. - * - *

Unless you define a custom {@link #configLoader}, this class loader will also be used to - * locate application-specific configuration resources. - * - *

If you do not provide any custom class loader, the driver will attempt to use the following - * ones: - * - *

    - *
  1. When reflectively loading class names defined in configuration: same class loader that - * loaded the core driver classes. - *
  2. When locating application-specific configuration resources: the current thread's - * {@linkplain Thread#getContextClassLoader() context class loader}. - *
- * - * This is generally the right thing to do. - * - *

Defining a different class loader is typically only needed in web or OSGi environments where - * there are complex class loading requirements. - * - *

For example, if the driver jar is loaded by the web server's system class loader (that is, - * the driver jar was placed in the "/lib" folder of the web server), but the application tries to - * load a custom load balancing policy declared in the web app's "WEB-INF/lib" folder, the system - * class loader will not be able to load such class. Instead, you must use the web app's class - * loader, that you can obtain by calling {@link Thread#getContextClassLoader()}: - * - *

{@code
-   * CqlSession.builder()
-   *   .addContactEndPoint(...)
-   *   .withClassLoader(Thread.currentThread().getContextClassLoader())
-   *   .build();
-   * }
- * - * Indeed, in most web environments, {@code Thread.currentThread().getContextClassLoader()} will - * return the web app's class loader, which is a child of the web server's system class loader. - * This class loader is thus capable of loading both the implemented interface and the - * implementing class, in spite of them being declared in different places. - * - *

For OSGi deployments, it is usually not necessary to use this method. Even if the - * implemented interface and the implementing class are located in different bundles, the right - * class loader to use should be the default one (the driver bundle's class loader). In - * particular, it is not advised to rely on {@code Thread.currentThread().getContextClassLoader()} - * in OSGi environments, so you should never pass that class loader to this method. See Using - * a custom ClassLoader in our OSGi online docs for more information. - */ - @NonNull - public SelfT withClassLoader(@Nullable ClassLoader classLoader) { - this.programmaticArgumentsBuilder.withClassLoader(classLoader); - return self; - } - - /** - * Configures this SessionBuilder for Cloud deployments by retrieving connection information from - * the provided {@link Path}. - * - *

To connect to a Cloud database, you must first download the secure database bundle from the - * DataStax Astra console that contains the connection information, then instruct the driver to - * read its contents using either this method or one if its variants. - * - *

For more information, please refer to the DataStax Astra documentation. - * - * @param cloudConfigPath Path to the secure connect bundle zip file. - * @see #withCloudSecureConnectBundle(URL) - * @see #withCloudSecureConnectBundle(InputStream) - */ - @NonNull - public SelfT withCloudSecureConnectBundle(@NonNull Path cloudConfigPath) { - try { - URL cloudConfigUrl = cloudConfigPath.toAbsolutePath().normalize().toUri().toURL(); - this.cloudConfigInputStream = cloudConfigUrl::openStream; - } catch (MalformedURLException e) { - throw new IllegalArgumentException("Incorrect format of cloudConfigPath", e); - } - return self; - } - - /** - * Registers a CodecRegistry to use for the session. - * - *

When both this and {@link #addTypeCodecs(TypeCodec[])} are called, the added type codecs - * will be registered on the provided CodecRegistry. - */ - @NonNull - public SelfT withCodecRegistry(@Nullable MutableCodecRegistry codecRegistry) { - this.programmaticArgumentsBuilder.withCodecRegistry(codecRegistry); - return self; - } - - /** - * Configures this SessionBuilder for Cloud deployments by retrieving connection information from - * the provided {@link URL}. - * - *

To connect to a Cloud database, you must first download the secure database bundle from the - * DataStax Astra console that contains the connection information, then instruct the driver to - * read its contents using either this method or one if its variants. - * - *

For more information, please refer to the DataStax Astra documentation. - * - * @param cloudConfigUrl URL to the secure connect bundle zip file. - * @see #withCloudSecureConnectBundle(Path) - * @see #withCloudSecureConnectBundle(InputStream) - */ - @NonNull - public SelfT withCloudSecureConnectBundle(@NonNull URL cloudConfigUrl) { - this.cloudConfigInputStream = cloudConfigUrl::openStream; - return self; - } - - /** - * Configures this SessionBuilder for Cloud deployments by retrieving connection information from - * the provided {@link InputStream}. - * - *

To connect to a Cloud database, you must first download the secure database bundle from the - * DataStax Astra console that contains the connection information, then instruct the driver to - * read its contents using either this method or one if its variants. - * - *

For more information, please refer to the DataStax Astra documentation. - * - *

Note that the provided stream will be consumed and closed when either {@link - * #build()} or {@link #buildAsync()} are called; attempting to reuse it afterwards will result in - * an error being thrown. - * - * @param cloudConfigInputStream A stream containing the secure connect bundle zip file. - * @see #withCloudSecureConnectBundle(Path) - * @see #withCloudSecureConnectBundle(URL) - */ - @NonNull - public SelfT withCloudSecureConnectBundle(@NonNull InputStream cloudConfigInputStream) { - this.cloudConfigInputStream = () -> cloudConfigInputStream; - return self; - } - - /** - * Configures this SessionBuilder to use the provided Cloud proxy endpoint. - * - *

Normally, this method should not be called directly; the normal and easiest way to configure - * the driver for Cloud deployments is through a {@linkplain #withCloudSecureConnectBundle(URL) - * secure connect bundle}. - * - *

Setting this option to any non-null address will make the driver use a special topology - * monitor tailored for Cloud deployments. This topology monitor assumes that the target cluster - * should be contacted through the proxy specified here, using SNI routing. - * - *

For more information, please refer to the DataStax Astra documentation. - * - * @param cloudProxyAddress The address of the Cloud proxy to use. - * @see Server Name Indication - */ - @NonNull - public SelfT withCloudProxyAddress(@Nullable InetSocketAddress cloudProxyAddress) { - this.programmaticArgumentsBuilder.withCloudProxyAddress(cloudProxyAddress); - return self; - } - - /** - * A unique identifier for the created session. - * - *

It will be sent in the {@code STARTUP} protocol message, under the key {@code CLIENT_ID}, - * for each new connection established by the driver. Currently, this information is used by - * Insights monitoring (if the target cluster does not support Insights, the entry will be ignored - * by the server). - * - *

If you don't call this method, the driver will generate an identifier with {@link - * Uuids#random()}. - */ - @NonNull - public SelfT withClientId(@Nullable UUID clientId) { - this.programmaticArgumentsBuilder.withStartupClientId(clientId); - return self; - } - - /** - * The name of the application using the created session. - * - *

It will be sent in the {@code STARTUP} protocol message, under the key {@code - * APPLICATION_NAME}, for each new connection established by the driver. Currently, this - * information is used by Insights monitoring (if the target cluster does not support Insights, - * the entry will be ignored by the server). - * - *

This can also be defined in the driver configuration with the option {@code - * basic.application.name}; if you specify both, this method takes precedence and the - * configuration option will be ignored. If neither is specified, the entry is not included in the - * message. - */ - @NonNull - public SelfT withApplicationName(@Nullable String applicationName) { - this.programmaticArgumentsBuilder.withStartupApplicationName(applicationName); - return self; - } - - /** - * The version of the application using the created session. - * - *

It will be sent in the {@code STARTUP} protocol message, under the key {@code - * APPLICATION_VERSION}, for each new connection established by the driver. Currently, this - * information is used by Insights monitoring (if the target cluster does not support Insights, - * the entry will be ignored by the server). - * - *

This can also be defined in the driver configuration with the option {@code - * basic.application.version}; if you specify both, this method takes precedence and the - * configuration option will be ignored. If neither is specified, the entry is not included in the - * message. - */ - @NonNull - public SelfT withApplicationVersion(@Nullable String applicationVersion) { - this.programmaticArgumentsBuilder.withStartupApplicationVersion(applicationVersion); - return self; - } - - /** - * The metric registry object for storing driver metrics. - * - *

The argument should be an instance of the base registry type for the metrics framework you - * are using (see {@code advanced.metrics.factory.class} in the configuration): - * - *

    - *
  • Dropwizard (the default): {@code com.codahale.metrics.MetricRegistry} - *
  • Micrometer: {@code io.micrometer.core.instrument.MeterRegistry} - *
  • MicroProfile: {@code org.eclipse.microprofile.metrics.MetricRegistry} - *
- * - * Only MicroProfile requires an external instance of its registry to be provided. For - * Micrometer, if no Registry object is provided, Micrometer's {@code globalRegistry} will be - * used. For Dropwizard, if no Registry object is provided, an instance of {@code MetricRegistry} - * will be created and used. - */ - @NonNull - public SelfT withMetricRegistry(@Nullable Object metricRegistry) { - this.programmaticArgumentsBuilder.withMetricRegistry(metricRegistry); - return self; - } - - /** - * Creates the session with the options set by this builder. - * - *

The session initialization will happen asynchronously in a driver internal thread pool. - * - * @return a completion stage that completes with the session when it is fully initialized. - */ - @NonNull - public CompletionStage buildAsync() { - CompletionStage buildStage = buildDefaultSessionAsync(); - CompletionStage wrapStage = buildStage.thenApply(this::wrap); - // thenApply does not propagate cancellation (!) - CompletableFutures.propagateCancellation(wrapStage, buildStage); - return wrapStage; - } - /** - * Convenience method to call {@link #buildAsync()} and block on the result. - * - *

Usage in non-blocking applications: beware that session initialization is a costly - * operation. It should only be triggered from a thread that is allowed to block. If that is not - * the case, consider using {@link #buildAsync()} instead. - * - *

This must not be called on a driver thread. - */ - @NonNull - public SessionT build() { - BlockingOperation.checkNotDriverThread(); - return CompletableFutures.getUninterruptibly(buildAsync()); - } - - protected abstract SessionT wrap(@NonNull CqlSession defaultSession); - - @NonNull - protected final CompletionStage buildDefaultSessionAsync() { - try { - - ProgrammaticArguments programmaticArguments = programmaticArgumentsBuilder.build(); - - DriverConfigLoader configLoader = - this.configLoader != null - ? this.configLoader - : defaultConfigLoader(programmaticArguments.getClassLoader()); - - DriverExecutionProfile defaultConfig = configLoader.getInitialConfig().getDefaultProfile(); - if (cloudConfigInputStream == null) { - String configUrlString = - defaultConfig.getString(DefaultDriverOption.CLOUD_SECURE_CONNECT_BUNDLE, null); - if (configUrlString != null) { - cloudConfigInputStream = () -> getURL(configUrlString).openStream(); - } - } - List configContactPoints = - defaultConfig.getStringList(DefaultDriverOption.CONTACT_POINTS, Collections.emptyList()); - if (cloudConfigInputStream != null) { - // override request id generator, unless user has already set it - if (programmaticArguments.getRequestIdGenerator() == null) { - programmaticArgumentsBuilder.withRequestIdGenerator( - new W3CContextRequestIdGenerator(ASTRA_PAYLOAD_KEY)); - LOG.debug( - "A secure connect bundle is provided, using W3CContextRequestIdGenerator as request ID generator."); - } - if (!programmaticContactPoints.isEmpty() || !configContactPoints.isEmpty()) { - LOG.info( - "Both a secure connect bundle and contact points were provided. These are mutually exclusive. The contact points from the secure bundle will have priority."); - // clear the contact points provided in the setting file and via addContactPoints - configContactPoints = Collections.emptyList(); - programmaticContactPoints = new HashSet<>(); - } - - if (programmaticSslFactory - || defaultConfig.isDefined(DefaultDriverOption.SSL_ENGINE_FACTORY_CLASS)) { - LOG.info( - "Both a secure connect bundle and SSL options were provided. They are mutually exclusive. The SSL options from the secure bundle will have priority."); - } - CloudConfig cloudConfig = - new CloudConfigFactory().createCloudConfig(cloudConfigInputStream.call()); - addContactEndPoints(cloudConfig.getEndPoints()); - - boolean localDataCenterDefined = - anyProfileHasDatacenterDefined(configLoader.getInitialConfig()); - if (programmaticLocalDatacenter || localDataCenterDefined) { - LOG.info( - "Both a secure connect bundle and a local datacenter were provided. They are mutually exclusive. The local datacenter from the secure bundle will have priority."); - programmaticArgumentsBuilder.clearDatacenters(); - } - withLocalDatacenter(cloudConfig.getLocalDatacenter()); - withSslEngineFactory(cloudConfig.getSslEngineFactory()); - withCloudProxyAddress(cloudConfig.getProxyAddress()); - programmaticArguments = programmaticArgumentsBuilder.build(); - } - - boolean resolveAddresses = - defaultConfig.getBoolean(DefaultDriverOption.RESOLVE_CONTACT_POINTS, true); - - Set contactPoints = - ContactPoints.merge(programmaticContactPoints, configContactPoints, resolveAddresses); - - if (keyspace == null && defaultConfig.isDefined(DefaultDriverOption.SESSION_KEYSPACE)) { - keyspace = - CqlIdentifier.fromCql(defaultConfig.getString(DefaultDriverOption.SESSION_KEYSPACE)); - } - - return DefaultSession.init( - (InternalDriverContext) buildContext(configLoader, programmaticArguments), - contactPoints, - keyspace); - - } catch (Throwable t) { - // We construct the session synchronously (until the init() call), but async clients expect a - // failed future if anything goes wrong. So wrap any error from that synchronous part. - return CompletableFutures.failedFuture(t); - } - } - - private boolean anyProfileHasDatacenterDefined(DriverConfig driverConfig) { - for (DriverExecutionProfile driverExecutionProfile : driverConfig.getProfiles().values()) { - if (driverExecutionProfile.isDefined(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)) { - return true; - } - } - return false; - } - - /** - * Returns URL based on the configUrl setting. If the configUrl has no protocol provided, the - * method will fallback to file:// protocol and return URL that has file protocol specified. - * - * @param configUrl url to config secure bundle - * @return URL with file protocol if there was not explicit protocol provided in the configUrl - * setting - */ - private URL getURL(String configUrl) throws MalformedURLException { - try { - return new URL(configUrl); - } catch (MalformedURLException e1) { - try { - return Paths.get(configUrl).toAbsolutePath().normalize().toUri().toURL(); - } catch (MalformedURLException e2) { - e2.addSuppressed(e1); - throw e2; - } - } - } - - /** - * This must return an instance of {@code InternalDriverContext} (it's not expressed - * directly in the signature to avoid leaking that type through the protected API). - */ - protected DriverContext buildContext( - DriverConfigLoader configLoader, ProgrammaticArguments programmaticArguments) { - - // Preserve backward compatibility with the deprecated method: - @SuppressWarnings("deprecation") - DriverContext legacyApiContext = - buildContext( - configLoader, - programmaticArguments.getTypeCodecs(), - programmaticArguments.getNodeStateListener(), - programmaticArguments.getSchemaChangeListener(), - programmaticArguments.getRequestTracker(), - programmaticArguments.getLocalDatacenters(), - programmaticArguments.getNodeFilters(), - programmaticArguments.getClassLoader()); - if (legacyApiContext != null) { - return legacyApiContext; - } - - return new DefaultDriverContext(configLoader, programmaticArguments); - } - - /** - * @deprecated this method only exists for backward compatibility (if a subclass written for - * driver 4.1.0 returns a non-null result, that value will be used). Please override {@link - * #buildContext(DriverConfigLoader, ProgrammaticArguments)} instead. - */ - @Deprecated - @SuppressWarnings("DeprecatedIsStillUsed") - protected DriverContext buildContext( - DriverConfigLoader configLoader, - List> typeCodecs, - NodeStateListener nodeStateListener, - SchemaChangeListener schemaChangeListener, - RequestTracker requestTracker, - Map localDatacenters, - Map> nodeFilters, - ClassLoader classLoader) { - return null; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/session/throttling/RequestThrottler.java b/core/src/main/java/com/datastax/oss/driver/api/core/session/throttling/RequestThrottler.java deleted file mode 100644 index 73d347d533e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/session/throttling/RequestThrottler.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.session.throttling; - -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.Closeable; - -/** - * Limits the number of concurrent requests executed by the driver. - * - *

Usage in non-blocking applications: beware that some implementations of this interface use - * locks for internal coordination, and do not qualify as lock-free. If your application enforces - * strict lock-freedom, then you should use the {@code PassThroughRequestThrottler} or the {@code - * ConcurrencyLimitingRequestThrottler}. - */ -public interface RequestThrottler extends Closeable { - - /** - * Registers a new request to be throttled. The throttler will invoke {@link - * Throttled#onThrottleReady(boolean)} when the request is allowed to proceed. - */ - void register(@NonNull Throttled request); - - /** - * Signals that a request has succeeded. This indicates to the throttler that another request - * might be started. - */ - void signalSuccess(@NonNull Throttled request); - - /** - * Signals that a request has failed. This indicates to the throttler that another request might - * be started. - */ - void signalError(@NonNull Throttled request, @NonNull Throwable error); - - /** - * Signals that a request has timed out. This indicates to the throttler that this request has - * stopped (if it was running already), or that it doesn't need to be started in the future. - * - *

Note: requests are responsible for handling their own timeout. The throttler does not - * perform time-based eviction on pending requests. - */ - void signalTimeout(@NonNull Throttled request); - - /** - * Signals that a request has been cancelled. This indicates to the throttler that another request - * might be started. - */ - default void signalCancel(@NonNull Throttled request) { - // no-op for backward compatibility purposes - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/session/throttling/Throttled.java b/core/src/main/java/com/datastax/oss/driver/api/core/session/throttling/Throttled.java deleted file mode 100644 index 6fd562804da..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/session/throttling/Throttled.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.session.throttling; - -import com.datastax.oss.driver.api.core.RequestThrottlingException; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * A request that may be subjected to throttling by a {@link - * com.datastax.oss.driver.api.core.session.throttling.RequestThrottler}. - */ -public interface Throttled { - - /** - * Invoked by the throttler to indicate that the request can now start. The request must wait for - * this call until it does any "actual" work (typically, writing to a connection). - * - * @param wasDelayed indicates whether the throttler delayed at all; this is so that requests - * don't have to rely on measuring time to determine it (this is useful for metrics). - */ - void onThrottleReady(boolean wasDelayed); - - /** - * Invoked by the throttler to indicate that the request cannot be fulfilled. Typically, this - * means we've reached maximum capacity, and the request can't even be enqueued. This error must - * be rethrown to the client. - * - * @param error the error that the request should be completed (exceptionally) with. - */ - void onThrottleFailure(@NonNull RequestThrottlingException error); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/specex/SpeculativeExecutionPolicy.java b/core/src/main/java/com/datastax/oss/driver/api/core/specex/SpeculativeExecutionPolicy.java deleted file mode 100644 index 163204ba62d..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/specex/SpeculativeExecutionPolicy.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.specex; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.session.SessionBuilder; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** - * The policy that decides if the driver will send speculative queries to the next nodes when the - * current node takes too long to respond. - */ -public interface SpeculativeExecutionPolicy extends AutoCloseable { - - /** - * @param node the node that caused the speculative execution (that is, the node that was queried - * previously but was too slow to answer) - * @param keyspace the CQL keyspace currently associated to the session. This is set either - * through the configuration, by calling {@link SessionBuilder#withKeyspace(CqlIdentifier)}, - * or by manually executing a {@code USE} CQL statement. It can be {@code null} if the session - * has no keyspace. - * @param request the request to execute. - * @param runningExecutions the number of executions that are already running (including the - * initial, non-speculative request). For example, if this is 2 it means the initial attempt - * was sent, then the driver scheduled a first speculative execution, and it is now asking for - * the delay until the second speculative execution. - * @return the time (in milliseconds) until a speculative request is sent to the next node, or 0 - * to send it immediately, or a negative value to stop sending requests. - */ - long nextExecution( - @NonNull Node node, - @Nullable CqlIdentifier keyspace, - @NonNull Request request, - int runningExecutions); - - /** Called when the cluster that this policy is associated with closes. */ - @Override - void close(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/ssl/ProgrammaticSslEngineFactory.java b/core/src/main/java/com/datastax/oss/driver/api/core/ssl/ProgrammaticSslEngineFactory.java deleted file mode 100644 index d65eaa864aa..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/ssl/ProgrammaticSslEngineFactory.java +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.ssl; - -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.core.session.SessionBuilder; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.net.InetSocketAddress; -import java.net.SocketAddress; -import javax.net.ssl.SSLContext; -import javax.net.ssl.SSLEngine; -import javax.net.ssl.SSLParameters; - -/** - * An SSL engine factory that allows you to configure the driver programmatically, by passing your - * own {@link SSLContext}. - * - *

Note that this class will create SSL engines with advisory peer information ({@link - * SSLContext#createSSLEngine(String, int)}) whenever possible. - * - *

If those defaults do not work for you, it should be pretty straightforward to write your own - * implementation by extending or duplicating this class. - * - * @see SessionBuilder#withSslEngineFactory(SslEngineFactory) - * @see SessionBuilder#withSslContext(SSLContext) - */ -public class ProgrammaticSslEngineFactory implements SslEngineFactory { - - protected final SSLContext sslContext; - protected final String[] cipherSuites; - protected final boolean requireHostnameValidation; - protected final boolean allowDnsReverseLookupSan; - - /** - * Creates an instance with the given {@link SSLContext}, default cipher suites and no host name - * validation. - * - * @param sslContext the {@link SSLContext} to use. - */ - public ProgrammaticSslEngineFactory(@NonNull SSLContext sslContext) { - this(sslContext, null); - } - - /** - * Creates an instance with the given {@link SSLContext} and cipher suites, and no host name - * validation. - * - * @param sslContext the {@link SSLContext} to use. - * @param cipherSuites the cipher suites to use, or null to use the default ones. - */ - public ProgrammaticSslEngineFactory( - @NonNull SSLContext sslContext, @Nullable String[] cipherSuites) { - this(sslContext, cipherSuites, false); - } - - /** - * Creates an instance with the given {@link SSLContext}, cipher suites and host name validation. - * - * @param sslContext the {@link SSLContext} to use. - * @param cipherSuites the cipher suites to use, or null to use the default ones. - * @param requireHostnameValidation whether to enable host name validation. If enabled, host name - * validation will be done using HTTPS algorithm. - */ - public ProgrammaticSslEngineFactory( - @NonNull SSLContext sslContext, - @Nullable String[] cipherSuites, - boolean requireHostnameValidation) { - this(sslContext, cipherSuites, requireHostnameValidation, true); - } - - /** - * Creates an instance with the given {@link SSLContext}, cipher suites and host name validation. - * - * @param sslContext the {@link SSLContext} to use. - * @param cipherSuites the cipher suites to use, or null to use the default ones. - * @param requireHostnameValidation whether to enable host name validation. If enabled, host name - * validation will be done using HTTPS algorithm. - * @param allowDnsReverseLookupSan whether to allow raw server IPs to be DNS reverse-resolved to - * choose the appropriate Subject Alternative Name. - */ - public ProgrammaticSslEngineFactory( - @NonNull SSLContext sslContext, - @Nullable String[] cipherSuites, - boolean requireHostnameValidation, - boolean allowDnsReverseLookupSan) { - this.sslContext = sslContext; - this.cipherSuites = cipherSuites; - this.requireHostnameValidation = requireHostnameValidation; - this.allowDnsReverseLookupSan = allowDnsReverseLookupSan; - } - - @NonNull - @Override - public SSLEngine newSslEngine(@NonNull EndPoint remoteEndpoint) { - SSLEngine engine; - SocketAddress remoteAddress = remoteEndpoint.resolve(); - if (remoteAddress instanceof InetSocketAddress) { - InetSocketAddress socketAddress = (InetSocketAddress) remoteAddress; - engine = - sslContext.createSSLEngine( - allowDnsReverseLookupSan - ? socketAddress.getHostName() - : socketAddress.getHostString(), - socketAddress.getPort()); - } else { - engine = sslContext.createSSLEngine(); - } - engine.setUseClientMode(true); - if (cipherSuites != null) { - engine.setEnabledCipherSuites(cipherSuites); - } - if (requireHostnameValidation) { - SSLParameters parameters = engine.getSSLParameters(); - parameters.setEndpointIdentificationAlgorithm("HTTPS"); - engine.setSSLParameters(parameters); - } - return engine; - } - - @Override - public void close() { - // nothing to do - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/ssl/SslEngineFactory.java b/core/src/main/java/com/datastax/oss/driver/api/core/ssl/SslEngineFactory.java deleted file mode 100644 index db4f18a97b9..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/ssl/SslEngineFactory.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.ssl; - -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import edu.umd.cs.findbugs.annotations.NonNull; -import javax.net.ssl.SSLEngine; - -/** - * Extension point to configure SSL based on the built-in JDK implementation. - * - *

Note that, for advanced use cases (such as bypassing the JDK in favor of another SSL - * implementation), the driver's internal API provides a lower-level interface: {@link - * com.datastax.oss.driver.internal.core.ssl.SslHandlerFactory}. - */ -public interface SslEngineFactory extends AutoCloseable { - /** - * Creates a new SSL engine each time a connection is established. - * - * @param remoteEndpoint the remote endpoint we are connecting to (the address of the Cassandra - * node). - */ - @NonNull - SSLEngine newSslEngine(@NonNull EndPoint remoteEndpoint); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/ssl/package-info.java b/core/src/main/java/com/datastax/oss/driver/api/core/ssl/package-info.java deleted file mode 100644 index a0cb3e73397..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/ssl/package-info.java +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** Support for secured communication between the driver and Cassandra nodes. */ -package com.datastax.oss.driver.api.core.ssl; diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/time/TimestampGenerator.java b/core/src/main/java/com/datastax/oss/driver/api/core/time/TimestampGenerator.java deleted file mode 100644 index b1139dd9f4d..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/time/TimestampGenerator.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.time; - -import com.datastax.oss.driver.api.core.cql.Statement; - -/** - * Generates client-side, microsecond-precision query timestamps. - * - *

These timestamps are used to order queries server-side, and resolve potential conflicts. - */ -public interface TimestampGenerator extends AutoCloseable { - - /** - * Returns the next timestamp, in microseconds. - * - *

The timestamps returned by this method should be monotonic; that is, successive invocations - * should return strictly increasing results. Note that this might not be possible using the clock - * alone, if it is not precise enough; alternative strategies might include incrementing the last - * returned value if the clock tick hasn't changed, and possibly drifting in the future. See the - * built-in driver implementations for more details. - * - * @return the next timestamp, or {@link Statement#NO_DEFAULT_TIMESTAMP} to indicate that the - * driver should not send one with the query (and let Cassandra generate a server-side - * timestamp). - */ - long next(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/tracker/RequestIdGenerator.java b/core/src/main/java/com/datastax/oss/driver/api/core/tracker/RequestIdGenerator.java deleted file mode 100644 index 21db3793b01..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/tracker/RequestIdGenerator.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.tracker; - -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.session.Request; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.nio.charset.StandardCharsets; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; - -/** - * Interface responsible for generating request IDs. - * - *

Note that all request IDs have a parent/child relationship. A "session request ID" can loosely - * be thought of as encompassing a sequence of a request + any attendant retries, speculative - * executions etc. It's scope is identical to that of a {@link - * com.datastax.oss.driver.internal.core.cql.CqlRequestHandler}. A "node request ID" represents a - * single request within this larger scope. Note that a request corresponding to a request ID may be - * retried; in that case the retry count will be appended to the corresponding identifier in the - * logs. - */ -public interface RequestIdGenerator { - - String DEFAULT_PAYLOAD_KEY = "request-id"; - - /** - * Generates a unique identifier for the session request. This will be the identifier for the - * entire `session.execute()` call. This identifier will be added to logs, and propagated to - * request trackers. - * - * @return a unique identifier for the session request - */ - String getSessionRequestId(); - - /** - * Generates a unique identifier for the node request. This will be the identifier for the CQL - * request against a particular node. There can be one or more node requests for a single session - * request, due to retries or speculative executions. This identifier will be added to logs, and - * propagated to request trackers. - * - * @param statement the statement to be executed - * @param parentId the session request identifier - * @return a unique identifier for the node request - */ - String getNodeRequestId(@NonNull Request statement, @NonNull String parentId); - - default String getCustomPayloadKey() { - return DEFAULT_PAYLOAD_KEY; - } - - default Statement getDecoratedStatement( - @NonNull Statement statement, @NonNull String requestId) { - - Map existing = new HashMap<>(statement.getCustomPayload()); - String key = getCustomPayloadKey(); - - // Add or overwrite - existing.put(key, ByteBuffer.wrap(requestId.getBytes(StandardCharsets.UTF_8))); - - // Allowing null key/values - // Wrap a map inside to be immutable without instanciating a new map - Map unmodifiableMap = Collections.unmodifiableMap(existing); - - return statement.setCustomPayload(unmodifiableMap); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/tracker/RequestTracker.java b/core/src/main/java/com/datastax/oss/driver/api/core/tracker/RequestTracker.java deleted file mode 100644 index 065b41e496a..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/tracker/RequestTracker.java +++ /dev/null @@ -1,190 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.tracker; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.session.SessionBuilder; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** - * Tracks request execution for a session. - * - *

Implementations of this interface can be registered either via the configuration (see {@code - * reference.conf} in the manual or core driver JAR), or programmatically via {@link - * SessionBuilder#addRequestTracker(RequestTracker)}. - */ -public interface RequestTracker extends AutoCloseable { - - /** - * @deprecated This method only exists for backward compatibility. Override {@link - * #onSuccess(Request, long, DriverExecutionProfile, Node, String)} instead. - */ - @Deprecated - default void onSuccess( - @NonNull Request request, - long latencyNanos, - @NonNull DriverExecutionProfile executionProfile, - @NonNull Node node) {} - - /** - * Invoked each time a session request succeeds. A session request is a `session.execute()` call - * - * @param latencyNanos the overall execution time (from the {@link Session#execute(Request, - * GenericType) session.execute} call until the result is made available to the client). - * @param executionProfile the execution profile of this request. - * @param node the node that returned the successful response. - * @param sessionRequestLogPrefix the dedicated log prefix for this request - */ - default void onSuccess( - @NonNull Request request, - long latencyNanos, - @NonNull DriverExecutionProfile executionProfile, - @NonNull Node node, - @NonNull String sessionRequestLogPrefix) { - // If client doesn't override onSuccess with sessionRequestLogPrefix delegate call to the old - // method - onSuccess(request, latencyNanos, executionProfile, node); - } - - /** - * @deprecated This method only exists for backward compatibility. Override {@link - * #onError(Request, Throwable, long, DriverExecutionProfile, Node, String)} instead. - */ - @Deprecated - default void onError( - @NonNull Request request, - @NonNull Throwable error, - long latencyNanos, - @NonNull DriverExecutionProfile executionProfile, - @Nullable Node node) {} - - /** - * Invoked each time a session request fails. A session request is a `session.execute()` call - * - * @param latencyNanos the overall execution time (from the {@link Session#execute(Request, - * GenericType) session.execute} call until the error is propagated to the client). - * @param executionProfile the execution profile of this request. - * @param node the node that returned the error response, or {@code null} if the error occurred - * @param sessionRequestLogPrefix the dedicated log prefix for this request - */ - default void onError( - @NonNull Request request, - @NonNull Throwable error, - long latencyNanos, - @NonNull DriverExecutionProfile executionProfile, - @Nullable Node node, - @NonNull String sessionRequestLogPrefix) { - // If client doesn't override onError with sessionRequestLogPrefix delegate call to the old - // method - onError(request, error, latencyNanos, executionProfile, node); - } - - /** - * @deprecated This method only exists for backward compatibility. Override {@link - * #onNodeError(Request, Throwable, long, DriverExecutionProfile, Node, String)} instead. - */ - @Deprecated - default void onNodeError( - @NonNull Request request, - @NonNull Throwable error, - long latencyNanos, - @NonNull DriverExecutionProfile executionProfile, - @NonNull Node node) {} - - /** - * Invoked each time a node request fails. A node request is a CQL request sent to a particular - * node. There can be one or more node requests for a single session request, due to retries or - * speculative executions. - * - * @param latencyNanos the overall execution time (from the {@link Session#execute(Request, - * GenericType) session.execute} call until the error is propagated to the client). - * @param executionProfile the execution profile of this request. - * @param node the node that returned the error response. - * @param nodeRequestLogPrefix the dedicated log prefix for this request - */ - default void onNodeError( - @NonNull Request request, - @NonNull Throwable error, - long latencyNanos, - @NonNull DriverExecutionProfile executionProfile, - @NonNull Node node, - @NonNull String nodeRequestLogPrefix) { - // If client doesn't override onNodeError with nodeRequestLogPrefix delegate call to the old - // method - onNodeError(request, error, latencyNanos, executionProfile, node); - } - - /** - * @deprecated This method only exists for backward compatibility. Override {@link - * #onNodeSuccess(Request, long, DriverExecutionProfile, Node, String)} instead. - */ - @Deprecated - default void onNodeSuccess( - @NonNull Request request, - long latencyNanos, - @NonNull DriverExecutionProfile executionProfile, - @NonNull Node node) {} - - /** - * Invoked each time a node request succeeds. A node request is a CQL request sent to a particular - * node. There can be one or more node requests for a single session request, due to retries or - * speculative executions. - * - * @param latencyNanos the overall execution time (from the {@link Session#execute(Request, - * GenericType) session.execute} call until the result is made available to the client). - * @param executionProfile the execution profile of this request. - * @param node the node that returned the successful response. - * @param nodeRequestLogPrefix the dedicated log prefix for this request - */ - default void onNodeSuccess( - @NonNull Request request, - long latencyNanos, - @NonNull DriverExecutionProfile executionProfile, - @NonNull Node node, - @NonNull String nodeRequestLogPrefix) { - // If client doesn't override onNodeSuccess with nodeRequestLogPrefix delegate call to the old - // method - onNodeSuccess(request, latencyNanos, executionProfile, node); - } - - /** - * Invoked when the session is ready to process user requests. - * - *

WARNING: if you use {@code session.execute()} in your tracker implementation, keep in - * mind that those requests will in turn recurse back into {@code onSuccess} / {@code onError} - * methods. Make sure you don't trigger an infinite loop; one way to do that is to use a - * custom execution profile for internal requests. - * - *

This corresponds to the moment when {@link SessionBuilder#build()} returns, or the future - * returned by {@link SessionBuilder#buildAsync()} completes. If the session initialization fails, - * this method will not get called. - * - *

Listener methods are invoked from different threads; if you store the session in a field, - * make it at least volatile to guarantee proper publication. - * - *

This method is guaranteed to be the first one invoked on this object. - * - *

The default implementation is empty. - */ - default void onSessionReady(@NonNull Session session) {} -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/ContainerType.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/ContainerType.java deleted file mode 100644 index 93e92ec2c2b..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/ContainerType.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type; - -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * Representation of a type which "contains" some other type. This might be a collection type or it - * could be some other kind of container; the term is deliberately left somewhat vague. - */ -public interface ContainerType { - - @NonNull - DataType getElementType(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/CustomType.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/CustomType.java deleted file mode 100644 index 93f913a584d..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/CustomType.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type; - -import com.datastax.oss.protocol.internal.ProtocolConstants; -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface CustomType extends DataType { - /** - * The fully qualified name of the subtype of {@code org.apache.cassandra.db.marshal.AbstractType} - * that represents this type server-side. - */ - @NonNull - String getClassName(); - - @NonNull - @Override - default String asCql(boolean includeFrozen, boolean pretty) { - return String.format("'%s'", getClassName()); - } - - @Override - default int getProtocolCode() { - return ProtocolConstants.DataType.CUSTOM; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/DataType.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/DataType.java deleted file mode 100644 index 92e5cc5edf0..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/DataType.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.detach.Detachable; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * The type of a CQL column, field or function argument. - * - *

The default implementations returned by the driver are immutable and serializable. If you - * write your own implementations, they should at least be thread-safe; serializability is not - * mandatory, but recommended for use with some 3rd-party tools like Apache Spark ™. - * - * @see DataTypes - */ -public interface DataType extends Detachable { - /** The code of the data type in the native protocol specification. */ - int getProtocolCode(); - - /** - * Builds an appropriate representation for use in a CQL query. - * - * @param includeFrozen whether to include the {@code frozen<...>} keyword if applicable. This - * will need to be set depending on where the result is used: for example, {@code CREATE - * TABLE} statements use the frozen keyword, whereas it should never appear in {@code CREATE - * FUNCTION}. - * @param pretty whether to pretty-print UDT names (as described in {@link - * CqlIdentifier#asCql(boolean)}. - */ - @NonNull - String asCql(boolean includeFrozen, boolean pretty); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/DataTypes.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/DataTypes.java deleted file mode 100644 index 492fc121c71..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/DataTypes.java +++ /dev/null @@ -1,134 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type; - -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.detach.Detachable; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.DataTypeClassNameParser; -import com.datastax.oss.driver.internal.core.type.DefaultCustomType; -import com.datastax.oss.driver.internal.core.type.DefaultListType; -import com.datastax.oss.driver.internal.core.type.DefaultMapType; -import com.datastax.oss.driver.internal.core.type.DefaultSetType; -import com.datastax.oss.driver.internal.core.type.DefaultTupleType; -import com.datastax.oss.driver.internal.core.type.DefaultVectorType; -import com.datastax.oss.driver.internal.core.type.PrimitiveType; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Arrays; - -/** Constants and factory methods to obtain data type instances. */ -public class DataTypes { - - public static final DataType ASCII = new PrimitiveType(ProtocolConstants.DataType.ASCII); - public static final DataType BIGINT = new PrimitiveType(ProtocolConstants.DataType.BIGINT); - public static final DataType BLOB = new PrimitiveType(ProtocolConstants.DataType.BLOB); - public static final DataType BOOLEAN = new PrimitiveType(ProtocolConstants.DataType.BOOLEAN); - public static final DataType COUNTER = new PrimitiveType(ProtocolConstants.DataType.COUNTER); - public static final DataType DECIMAL = new PrimitiveType(ProtocolConstants.DataType.DECIMAL); - public static final DataType DOUBLE = new PrimitiveType(ProtocolConstants.DataType.DOUBLE); - public static final DataType FLOAT = new PrimitiveType(ProtocolConstants.DataType.FLOAT); - public static final DataType INT = new PrimitiveType(ProtocolConstants.DataType.INT); - public static final DataType TIMESTAMP = new PrimitiveType(ProtocolConstants.DataType.TIMESTAMP); - public static final DataType UUID = new PrimitiveType(ProtocolConstants.DataType.UUID); - public static final DataType VARINT = new PrimitiveType(ProtocolConstants.DataType.VARINT); - public static final DataType TIMEUUID = new PrimitiveType(ProtocolConstants.DataType.TIMEUUID); - public static final DataType INET = new PrimitiveType(ProtocolConstants.DataType.INET); - public static final DataType DATE = new PrimitiveType(ProtocolConstants.DataType.DATE); - public static final DataType TEXT = new PrimitiveType(ProtocolConstants.DataType.VARCHAR); - public static final DataType TIME = new PrimitiveType(ProtocolConstants.DataType.TIME); - public static final DataType SMALLINT = new PrimitiveType(ProtocolConstants.DataType.SMALLINT); - public static final DataType TINYINT = new PrimitiveType(ProtocolConstants.DataType.TINYINT); - public static final DataType DURATION = new PrimitiveType(ProtocolConstants.DataType.DURATION); - - private static final DataTypeClassNameParser classNameParser = new DataTypeClassNameParser(); - - @NonNull - public static DataType custom(@NonNull String className) { - - // In protocol v4, duration is implemented as a custom type - if (className.equals("org.apache.cassandra.db.marshal.DurationType")) return DURATION; - - /* Vector support is currently implemented as a custom type but is also parameterized */ - if (className.startsWith(DefaultVectorType.VECTOR_CLASS_NAME)) - return classNameParser.parse(className, AttachmentPoint.NONE); - return new DefaultCustomType(className); - } - - @NonNull - public static ListType listOf(@NonNull DataType elementType) { - return new DefaultListType(elementType, false); - } - - @NonNull - public static ListType listOf(@NonNull DataType elementType, boolean frozen) { - return new DefaultListType(elementType, frozen); - } - - @NonNull - public static ListType frozenListOf(@NonNull DataType elementType) { - return new DefaultListType(elementType, true); - } - - @NonNull - public static SetType setOf(@NonNull DataType elementType) { - return new DefaultSetType(elementType, false); - } - - @NonNull - public static SetType setOf(@NonNull DataType elementType, boolean frozen) { - return new DefaultSetType(elementType, frozen); - } - - @NonNull - public static SetType frozenSetOf(@NonNull DataType elementType) { - return new DefaultSetType(elementType, true); - } - - @NonNull - public static MapType mapOf(@NonNull DataType keyType, @NonNull DataType valueType) { - return new DefaultMapType(keyType, valueType, false); - } - - @NonNull - public static MapType mapOf( - @NonNull DataType keyType, @NonNull DataType valueType, boolean frozen) { - return new DefaultMapType(keyType, valueType, frozen); - } - - @NonNull - public static MapType frozenMapOf(@NonNull DataType keyType, @NonNull DataType valueType) { - return new DefaultMapType(keyType, valueType, true); - } - - /** - * Builds a new, detached tuple type. - * - * @param componentTypes neither the individual types, nor the vararg array itself, can be {@code - * null}. - * @see Detachable - */ - @NonNull - public static TupleType tupleOf(@NonNull DataType... componentTypes) { - return new DefaultTupleType(ImmutableList.copyOf(Arrays.asList(componentTypes))); - } - - public static VectorType vectorOf(DataType subtype, int dimensions) { - return new DefaultVectorType(subtype, dimensions); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/ListType.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/ListType.java deleted file mode 100644 index ca377d10bbf..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/ListType.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type; - -import com.datastax.oss.protocol.internal.ProtocolConstants; -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface ListType extends DataType, ContainerType { - - boolean isFrozen(); - - @NonNull - @Override - default String asCql(boolean includeFrozen, boolean pretty) { - String template = (isFrozen() && includeFrozen) ? "frozen>" : "list<%s>"; - return String.format(template, getElementType().asCql(includeFrozen, pretty)); - } - - @Override - default int getProtocolCode() { - return ProtocolConstants.DataType.LIST; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/MapType.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/MapType.java deleted file mode 100644 index f3bca2ac6a4..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/MapType.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type; - -import com.datastax.oss.protocol.internal.ProtocolConstants; -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface MapType extends DataType { - - @NonNull - DataType getKeyType(); - - @NonNull - DataType getValueType(); - - boolean isFrozen(); - - @NonNull - @Override - default String asCql(boolean includeFrozen, boolean pretty) { - String template = (isFrozen() && includeFrozen) ? "frozen>" : "map<%s, %s>"; - return String.format( - template, - getKeyType().asCql(includeFrozen, pretty), - getValueType().asCql(includeFrozen, pretty)); - } - - @Override - default int getProtocolCode() { - return ProtocolConstants.DataType.MAP; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/SetType.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/SetType.java deleted file mode 100644 index fa902c72bb8..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/SetType.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type; - -import com.datastax.oss.protocol.internal.ProtocolConstants; -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface SetType extends DataType, ContainerType { - - boolean isFrozen(); - - @NonNull - @Override - default String asCql(boolean includeFrozen, boolean pretty) { - String template = (isFrozen() && includeFrozen) ? "frozen>" : "set<%s>"; - return String.format(template, getElementType().asCql(includeFrozen, pretty)); - } - - @Override - default int getProtocolCode() { - return ProtocolConstants.DataType.SET; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/TupleType.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/TupleType.java deleted file mode 100644 index 9e2736ddce8..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/TupleType.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type; - -import com.datastax.oss.driver.api.core.data.TupleValue; -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.List; - -public interface TupleType extends DataType { - - @NonNull - List getComponentTypes(); - - @NonNull - TupleValue newValue(); - - /** - * Creates a new instance with the specified values for the fields. - * - *

To encode the values, this method uses the {@link CodecRegistry} that this type is {@link - * #getAttachmentPoint() attached} to; it looks for the best codec to handle the target CQL type - * and actual runtime type of each value (see {@link CodecRegistry#codecFor(DataType, Object)}). - * - * @param values the values of the tuple's fields. They must be in the same order as the fields in - * the tuple's definition. You can specify less values than there are fields (the remaining - * ones will be set to NULL), but not more (a runtime exception will be thrown). Individual - * values can be {@code null}, but the array itself can't. - * @throws IllegalArgumentException if there are too many values. - */ - @NonNull - TupleValue newValue(@NonNull Object... values); - - @NonNull - AttachmentPoint getAttachmentPoint(); - - @NonNull - @Override - default String asCql(boolean includeFrozen, boolean pretty) { - StringBuilder builder = new StringBuilder(); - // Tuples are always frozen - if (includeFrozen) { - builder.append("frozen<"); - } - boolean first = true; - for (DataType type : getComponentTypes()) { - builder.append(first ? "tuple<" : ", "); - first = false; - builder.append(type.asCql(includeFrozen, pretty)); - } - builder.append('>'); - if (includeFrozen) { - builder.append('>'); - } - return builder.toString(); - } - - @Override - default int getProtocolCode() { - return ProtocolConstants.DataType.TUPLE; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/UserDefinedType.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/UserDefinedType.java deleted file mode 100644 index 4d4768a8ae4..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/UserDefinedType.java +++ /dev/null @@ -1,169 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.metadata.schema.Describable; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.internal.core.metadata.schema.ScriptBuilder; -import com.datastax.oss.driver.internal.core.util.Loggers; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Collections; -import java.util.List; - -public interface UserDefinedType extends DataType, Describable { - - @Nullable // because of ShallowUserDefinedType usage in the query builder - CqlIdentifier getKeyspace(); - - @NonNull - CqlIdentifier getName(); - - boolean isFrozen(); - - @NonNull - List getFieldNames(); - - /** - * @apiNote the default implementation only exists for backward compatibility. It wraps the result - * of {@link #firstIndexOf(CqlIdentifier)} in a singleton list, which is not entirely correct, - * as it will only return the first occurrence. Therefore it also logs a warning. - *

Implementors should always override this method (all built-in driver implementations - * do). - */ - @NonNull - default List allIndicesOf(@NonNull CqlIdentifier id) { - Loggers.USER_DEFINED_TYPE.warn( - "{} should override allIndicesOf(CqlIdentifier), the default implementation is a " - + "workaround for backward compatibility, it only returns the first occurrence", - getClass().getName()); - return Collections.singletonList(firstIndexOf(id)); - } - - int firstIndexOf(@NonNull CqlIdentifier id); - - /** - * @apiNote the default implementation only exists for backward compatibility. It wraps the result - * of {@link #firstIndexOf(String)} in a singleton list, which is not entirely correct, as it - * will only return the first occurrence. Therefore it also logs a warning. - *

Implementors should always override this method (all built-in driver implementations - * do). - */ - @NonNull - default List allIndicesOf(@NonNull String name) { - Loggers.USER_DEFINED_TYPE.warn( - "{} should override allIndicesOf(String), the default implementation is a " - + "workaround for backward compatibility, it only returns the first occurrence", - getClass().getName()); - return Collections.singletonList(firstIndexOf(name)); - } - - int firstIndexOf(@NonNull String name); - - default boolean contains(@NonNull CqlIdentifier id) { - return firstIndexOf(id) >= 0; - } - - default boolean contains(@NonNull String name) { - return firstIndexOf(name) >= 0; - } - - @NonNull - List getFieldTypes(); - - @NonNull - UserDefinedType copy(boolean newFrozen); - - @NonNull - UdtValue newValue(); - - /** - * Creates a new instance with the specified values for the fields. - * - *

To encode the values, this method uses the {@link CodecRegistry} that this type is {@link - * #getAttachmentPoint() attached} to; it looks for the best codec to handle the target CQL type - * and actual runtime type of each value (see {@link CodecRegistry#codecFor(DataType, Object)}). - * - * @param fields the value of the fields. They must be in the same order as the fields in the - * type's definition. You can specify less values than there are fields (the remaining ones - * will be set to NULL), but not more (a runtime exception will be thrown). Individual values - * can be {@code null}, but the array itself can't. - * @throws IllegalArgumentException if there are too many values. - */ - @NonNull - UdtValue newValue(@NonNull Object... fields); - - @NonNull - AttachmentPoint getAttachmentPoint(); - - @NonNull - @Override - default String asCql(boolean includeFrozen, boolean pretty) { - if (getKeyspace() != null) { - String template = (isFrozen() && includeFrozen) ? "frozen<%s.%s>" : "%s.%s"; - return String.format(template, getKeyspace().asCql(pretty), getName().asCql(pretty)); - } else { - String template = (isFrozen() && includeFrozen) ? "frozen<%s>" : "%s"; - return String.format(template, getName().asCql(pretty)); - } - } - - @NonNull - @Override - default String describe(boolean pretty) { - ScriptBuilder builder = new ScriptBuilder(pretty); - - builder - .append("CREATE TYPE ") - .append(getKeyspace()) - .append(".") - .append(getName()) - .append(" (") - .newLine() - .increaseIndent(); - - List fieldNames = getFieldNames(); - List fieldTypes = getFieldTypes(); - int fieldCount = fieldNames.size(); - for (int i = 0; i < fieldCount; i++) { - builder.append(fieldNames.get(i)).append(" ").append(fieldTypes.get(i).asCql(true, pretty)); - if (i < fieldCount - 1) { - builder.append(","); - } - builder.newLine(); - } - builder.decreaseIndent().append(");"); - return builder.build(); - } - - @NonNull - @Override - default String describeWithChildren(boolean pretty) { - // No children (if it uses other types, they're considered dependencies, not sub-elements) - return describe(pretty); - } - - @Override - default int getProtocolCode() { - return ProtocolConstants.DataType.UDT; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/VectorType.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/VectorType.java deleted file mode 100644 index 1d7c13807ec..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/VectorType.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type; - -/** - * Type representing a Cassandra vector type as described in CEP-30. At the moment this is - * implemented as a custom type so we include the CustomType interface as well. - */ -public interface VectorType extends CustomType, ContainerType { - - int getDimensions(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/CodecNotFoundException.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/CodecNotFoundException.java deleted file mode 100644 index 4f45af0924f..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/CodecNotFoundException.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type.codec; - -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** Thrown when a suitable {@link TypeCodec} cannot be found by the {@link CodecRegistry}. */ -public class CodecNotFoundException extends DriverException { - - private final DataType cqlType; - - private final GenericType javaType; - - public CodecNotFoundException(@Nullable DataType cqlType, @Nullable GenericType javaType) { - this( - String.format("Codec not found for requested operation: [%s <-> %s]", cqlType, javaType), - null, - cqlType, - javaType); - } - - public CodecNotFoundException( - @NonNull Throwable cause, @Nullable DataType cqlType, @Nullable GenericType javaType) { - this( - String.format( - "Error while looking up codec for requested operation: [%s <-> %s]", cqlType, javaType), - cause, - cqlType, - javaType); - } - - private CodecNotFoundException( - String msg, Throwable cause, DataType cqlType, GenericType javaType) { - super(msg, null, cause, true); - this.cqlType = cqlType; - this.javaType = javaType; - } - - @Nullable - public DataType getCqlType() { - return cqlType; - } - - @Nullable - public GenericType getJavaType() { - return javaType; - } - - @NonNull - @Override - public DriverException copy() { - return new CodecNotFoundException(getMessage(), getCause(), getCqlType(), getJavaType()); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.java deleted file mode 100644 index 51a96a16376..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.java +++ /dev/null @@ -1,492 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type.codec; - -import com.datastax.oss.driver.api.core.session.SessionBuilder; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.registry.MutableCodecRegistry; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.DefaultVectorType; -import com.datastax.oss.driver.internal.core.type.codec.SimpleBlobCodec; -import com.datastax.oss.driver.internal.core.type.codec.TimestampCodec; -import com.datastax.oss.driver.internal.core.type.codec.extras.OptionalCodec; -import com.datastax.oss.driver.internal.core.type.codec.extras.array.BooleanListToArrayCodec; -import com.datastax.oss.driver.internal.core.type.codec.extras.array.ByteListToArrayCodec; -import com.datastax.oss.driver.internal.core.type.codec.extras.array.DoubleListToArrayCodec; -import com.datastax.oss.driver.internal.core.type.codec.extras.array.FloatListToArrayCodec; -import com.datastax.oss.driver.internal.core.type.codec.extras.array.IntListToArrayCodec; -import com.datastax.oss.driver.internal.core.type.codec.extras.array.LongListToArrayCodec; -import com.datastax.oss.driver.internal.core.type.codec.extras.array.ObjectListToArrayCodec; -import com.datastax.oss.driver.internal.core.type.codec.extras.array.ShortListToArrayCodec; -import com.datastax.oss.driver.internal.core.type.codec.extras.enums.EnumNameCodec; -import com.datastax.oss.driver.internal.core.type.codec.extras.enums.EnumOrdinalCodec; -import com.datastax.oss.driver.internal.core.type.codec.extras.json.JsonCodec; -import com.datastax.oss.driver.internal.core.type.codec.extras.time.LocalTimestampCodec; -import com.datastax.oss.driver.internal.core.type.codec.extras.time.PersistentZonedTimestampCodec; -import com.datastax.oss.driver.internal.core.type.codec.extras.time.TimestampMillisCodec; -import com.datastax.oss.driver.internal.core.type.codec.extras.time.ZonedTimestampCodec; -import com.datastax.oss.driver.internal.core.type.codec.extras.vector.FloatVectorToArrayCodec; -import com.fasterxml.jackson.databind.ObjectMapper; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.time.Instant; -import java.time.LocalDateTime; -import java.time.ZoneId; -import java.time.ZoneOffset; -import java.time.ZonedDateTime; -import java.util.Optional; - -/** - * Additional codecs that can be registered to handle different type mappings. - * - * @see SessionBuilder#addTypeCodecs(TypeCodec[]) - * @see MutableCodecRegistry#register(TypeCodec) - */ -public class ExtraTypeCodecs { - - /** - * A codec that maps CQL type {@code timestamp} to Java's {@link Instant}, using the UTC time zone - * to parse and format CQL literals. - * - *

This codec uses {@link ZoneOffset#UTC} as its source of time zone information when - * formatting values as CQL literals, or parsing CQL literals that do not have any time zone - * indication. Note that this only applies to the {@link TypeCodec#format(Object)} and {@link - * TypeCodec#parse(String)} methods; regular encoding and decoding, like setting a value on a - * bound statement or reading a column from a row, are not affected by the time zone. - * - *

If you need a different time zone, consider other constants in this class, or call {@link - * ExtraTypeCodecs#timestampAt(ZoneId)} instead. - * - * @see TypeCodecs#TIMESTAMP - * @see ExtraTypeCodecs#timestampAt(ZoneId) - */ - public static final TypeCodec TIMESTAMP_UTC = new TimestampCodec(ZoneOffset.UTC); - - /** - * A codec that maps CQL type {@code timestamp} to Java's {@code long}, representing the number of - * milliseconds since the Epoch, using the system's default time zone to parse and format CQL - * literals. - * - *

This codec uses the system's {@linkplain ZoneId#systemDefault() default time zone} as its - * source of time zone information when formatting values as CQL literals, or parsing CQL literals - * that do not have any time zone indication. Note that this only applies to the {@link - * TypeCodec#format(Object)} and {@link TypeCodec#parse(String)} methods; regular encoding and - * decoding, like setting a value on a bound statement or reading a column from a row, are not - * affected by the time zone. - * - *

If you need a different time zone, consider other constants in this class, or call {@link - * #timestampMillisAt(ZoneId)} instead. - * - *

This codec can serve as a replacement for the driver's built-in {@linkplain - * TypeCodecs#TIMESTAMP timestamp} codec, when application code prefers to deal with raw - * milliseconds than with {@link Instant} instances. - * - * @see #TIMESTAMP_MILLIS_UTC - * @see #timestampMillisAt(ZoneId) - */ - public static final PrimitiveLongCodec TIMESTAMP_MILLIS_SYSTEM = new TimestampMillisCodec(); - - /** - * A codec that maps CQL type {@code timestamp} to Java's {@code long}, representing the number of - * milliseconds since the Epoch, using the UTC time zone to parse and format CQL literals. - * - *

This codec uses {@link ZoneOffset#UTC} as its source of time zone information when - * formatting values as CQL literals, or parsing CQL literals that do not have any time zone - * indication. Note that this only applies to the {@link TypeCodec#format(Object)} and {@link - * TypeCodec#parse(String)} methods; regular encoding and decoding, like setting a value on a - * bound statement or reading a column from a row, are not affected by the time zone. - * - *

If you need a different time zone, consider other constants in this class, or call {@link - * #timestampMillisAt(ZoneId)} instead. - * - *

This codec can serve as a replacement for the driver's built-in {@linkplain - * TypeCodecs#TIMESTAMP timestamp} codec, when application code prefers to deal with raw - * milliseconds than with {@link Instant} instances. - * - * @see #TIMESTAMP_MILLIS_SYSTEM - * @see #timestampMillisAt(ZoneId) - */ - public static final PrimitiveLongCodec TIMESTAMP_MILLIS_UTC = - new TimestampMillisCodec(ZoneOffset.UTC); - - /** - * A codec that maps CQL type {@code timestamp} to Java's {@link ZonedDateTime}, using the - * system's default time zone. - * - *

This codec uses the system's {@linkplain ZoneId#systemDefault() default time zone} as its - * source of time zone information when encoding or decoding. If you need a different time zone, - * consider using other constants in this class, or call {@link #zonedTimestampAt(ZoneId)} - * instead. - * - *

Note that CQL type {@code timestamp} type does not store any time zone; this codec is - * provided merely as a convenience for users that need to deal with zoned timestamps in their - * applications. - * - * @see #ZONED_TIMESTAMP_UTC - * @see #ZONED_TIMESTAMP_PERSISTED - * @see #zonedTimestampAt(ZoneId) - */ - public static final TypeCodec ZONED_TIMESTAMP_SYSTEM = new ZonedTimestampCodec(); - - /** - * A codec that maps CQL type {@code timestamp} to Java's {@link ZonedDateTime}, using the UTC - * time zone. - * - *

This codec uses {@link ZoneOffset#UTC} as its source of time zone information when encoding - * or decoding. If you need a different time zone, consider using other constants in this class, - * or call {@link #zonedTimestampAt(ZoneId)} instead. - * - *

Note that CQL type {@code timestamp} type does not store any time zone; this codec is - * provided merely as a convenience for users that need to deal with zoned timestamps in their - * applications. - * - * @see #ZONED_TIMESTAMP_SYSTEM - * @see #ZONED_TIMESTAMP_PERSISTED - * @see #zonedTimestampAt(ZoneId) - */ - public static final TypeCodec ZONED_TIMESTAMP_UTC = - new ZonedTimestampCodec(ZoneOffset.UTC); - - /** - * A codec that maps CQL type {@code tuple} to Java's {@link ZonedDateTime}, - * providing a pattern for maintaining timezone information in Cassandra. - * - *

Since CQL type {@code timestamp} does not store any time zone, it is persisted separately in - * the {@code text} field of the tuple, and so when the value is read back the original timezone - * it was written with is preserved. - * - * @see #ZONED_TIMESTAMP_SYSTEM - * @see #ZONED_TIMESTAMP_UTC - * @see #zonedTimestampAt(ZoneId) - */ - public static final TypeCodec ZONED_TIMESTAMP_PERSISTED = - new PersistentZonedTimestampCodec(); - - /** - * A codec that maps CQL type {@code timestamp} to Java's {@link LocalDateTime}, using the - * system's default time zone. - * - *

This codec uses the system's {@linkplain ZoneId#systemDefault() default time zone} as its - * source of time zone information when encoding or decoding. If you need a different time zone, - * consider using other constants in this class, or call {@link #localTimestampAt(ZoneId)} - * instead. - * - *

Note that CQL type {@code timestamp} does not store any time zone; this codec is provided - * merely as a convenience for users that need to deal with local date-times in their - * applications. - * - * @see #LOCAL_TIMESTAMP_UTC - * @see #localTimestampAt(ZoneId) - */ - public static final TypeCodec LOCAL_TIMESTAMP_SYSTEM = new LocalTimestampCodec(); - - /** - * A codec that maps CQL type {@code timestamp} to Java's {@link LocalDateTime}, using the UTC - * time zone. - * - *

This codec uses {@link ZoneOffset#UTC} as its source of time zone information when encoding - * or decoding. If you need a different time zone, consider using other constants in this class, - * or call {@link #localTimestampAt(ZoneId)} instead. - * - *

Note that CQL type {@code timestamp} does not store any time zone; this codec is provided - * merely as a convenience for users that need to deal with local date-times in their - * applications. - * - * @see #LOCAL_TIMESTAMP_SYSTEM - * @see #localTimestampAt(ZoneId) - */ - public static final TypeCodec LOCAL_TIMESTAMP_UTC = - new LocalTimestampCodec(ZoneOffset.UTC); - - /** - * A codec that maps CQL type {@code blob} to Java's {@code byte[]}. - * - *

If you are looking for a codec mapping CQL type {@code blob} to the Java type {@link - * ByteBuffer}, you should use {@link TypeCodecs#BLOB} instead. - * - *

If you are looking for a codec mapping CQL type {@code list BLOB_TO_ARRAY = new SimpleBlobCodec(); - - /** - * A codec that maps CQL type {@code list} to Java's {@code boolean[]}. - * - *

Note that this codec is designed for performance and converts CQL lists directly to - * {@code boolean[]}, thus avoiding any unnecessary boxing and unboxing of Java primitive {@code - * boolean} values; it also instantiates arrays without the need for an intermediary Java {@code - * List} object. - */ - public static final TypeCodec BOOLEAN_LIST_TO_ARRAY = new BooleanListToArrayCodec(); - - /** - * A codec that maps CQL type {@code list} to Java's {@code byte[]}. - * - *

This codec is not suitable for reading CQL blobs as byte arrays. If you are looking for a - * codec for the CQL type {@code blob}, you should use {@link TypeCodecs#BLOB} or {@link - * ExtraTypeCodecs#BLOB_TO_ARRAY} instead. - * - *

Note that this codec is designed for performance and converts CQL lists directly to - * {@code byte[]}, thus avoiding any unnecessary boxing and unboxing of Java primitive {@code - * byte} values; it also instantiates arrays without the need for an intermediary Java {@code - * List} object. - * - * @see TypeCodecs#BLOB - * @see ExtraTypeCodecs#BLOB_TO_ARRAY - */ - public static final TypeCodec BYTE_LIST_TO_ARRAY = new ByteListToArrayCodec(); - - /** - * A codec that maps CQL type {@code list} to Java's {@code short[]}. - * - *

Note that this codec is designed for performance and converts CQL lists directly to - * {@code short[]}, thus avoiding any unnecessary boxing and unboxing of Java primitive {@code - * short} values; it also instantiates arrays without the need for an intermediary Java {@code - * List} object. - */ - public static final TypeCodec SHORT_LIST_TO_ARRAY = new ShortListToArrayCodec(); - - /** - * A codec that maps CQL type {@code list} to Java's {@code int[]}. - * - *

Note that this codec is designed for performance and converts CQL lists directly to - * {@code int[]}, thus avoiding any unnecessary boxing and unboxing of Java primitive {@code int} - * values; it also instantiates arrays without the need for an intermediary Java {@code List} - * object. - */ - public static final TypeCodec INT_LIST_TO_ARRAY = new IntListToArrayCodec(); - - /** - * A codec that maps CQL type {@code list} to Java's {@code long[]}. - * - *

Note that this codec is designed for performance and converts CQL lists directly to - * {@code long[]}, thus avoiding any unnecessary boxing and unboxing of Java primitive {@code - * long} values; it also instantiates arrays without the need for an intermediary Java {@code - * List} object. - */ - public static final TypeCodec LONG_LIST_TO_ARRAY = new LongListToArrayCodec(); - - /** - * A codec that maps CQL type {@code list} to Java's {@code float[]}. - * - *

Note that this codec is designed for performance and converts CQL lists directly to - * {@code float[]}, thus avoiding any unnecessary boxing and unboxing of Java primitive {@code - * float} values; it also instantiates arrays without the need for an intermediary Java {@code - * List} object. - */ - public static final TypeCodec FLOAT_LIST_TO_ARRAY = new FloatListToArrayCodec(); - - /** - * A codec that maps CQL type {@code list} to Java's {@code double[]}. - * - *

Note that this codec is designed for performance and converts CQL lists directly to - * {@code double[]}, thus avoiding any unnecessary boxing and unboxing of Java primitive {@code - * double} values; it also instantiates arrays without the need for an intermediary Java {@code - * List} object. - */ - public static final TypeCodec DOUBLE_LIST_TO_ARRAY = new DoubleListToArrayCodec(); - - /** - * Builds a new codec that maps CQL type {@code timestamp} to Java's {@link Instant}, using the - * given time zone to parse and format CQL literals. - * - *

This codec uses the supplied {@link ZoneId} as its source of time zone information when - * formatting values as CQL literals, or parsing CQL literals that do not have any time zone - * indication. Note that this only applies to the {@link TypeCodec#format(Object)} and {@link - * TypeCodec#parse(String)} methods; regular encoding and decoding, like setting a value on a - * bound statement or reading a column from a row, are not affected by the time zone. - * - * @see TypeCodecs#TIMESTAMP - * @see ExtraTypeCodecs#TIMESTAMP_UTC - */ - @NonNull - public static TypeCodec timestampAt(@NonNull ZoneId timeZone) { - return new TimestampCodec(timeZone); - } - - /** - * Builds a new codec that maps CQL type {@code timestamp} to Java's {@code long}, representing - * the number of milliseconds since the Epoch, using the given time zone to parse and format CQL - * literals. - * - *

This codec uses the supplied {@link ZoneId} as its source of time zone information when - * formatting values as CQL literals, or parsing CQL literals that do not have any time zone - * indication. Note that this only applies to the {@link TypeCodec#format(Object)} and {@link - * TypeCodec#parse(String)} methods; regular encoding and decoding, like setting a value on a - * bound statement or reading a column from a row, are not affected by the time zone. - * - *

This codec can serve as a replacement for the driver's built-in {@linkplain - * TypeCodecs#TIMESTAMP timestamp} codec, when application code prefers to deal with raw - * milliseconds than with {@link Instant} instances. - * - * @see ExtraTypeCodecs#TIMESTAMP_MILLIS_SYSTEM - * @see ExtraTypeCodecs#TIMESTAMP_MILLIS_UTC - */ - @NonNull - public static PrimitiveLongCodec timestampMillisAt(@NonNull ZoneId timeZone) { - return new TimestampMillisCodec(timeZone); - } - - /** - * Builds a new codec that maps CQL type {@code timestamp} to Java's {@link ZonedDateTime}. - * - *

This codec uses the supplied {@link ZoneId} as its source of time zone information when - * encoding or decoding. - * - *

Note that CQL type {@code timestamp} does not store any time zone; the codecs created by - * this method are provided merely as a convenience for users that need to deal with zoned - * timestamps in their applications. - * - * @see ExtraTypeCodecs#ZONED_TIMESTAMP_SYSTEM - * @see ExtraTypeCodecs#ZONED_TIMESTAMP_UTC - * @see ExtraTypeCodecs#ZONED_TIMESTAMP_PERSISTED - */ - @NonNull - public static TypeCodec zonedTimestampAt(@NonNull ZoneId timeZone) { - return new ZonedTimestampCodec(timeZone); - } - - /** - * Builds a new codec that maps CQL type {@code timestamp} to Java's {@link LocalDateTime}. - * - *

This codec uses the supplied {@link ZoneId} as its source of time zone information when - * encoding or decoding. - * - *

Note that CQL type {@code timestamp} does not store any time zone; the codecs created by - * this method are provided merely as a convenience for users that need to deal with local - * date-times in their applications. - * - * @see ExtraTypeCodecs#LOCAL_TIMESTAMP_UTC - * @see #localTimestampAt(ZoneId) - */ - @NonNull - public static TypeCodec localTimestampAt(@NonNull ZoneId timeZone) { - return new LocalTimestampCodec(timeZone); - } - - /** - * Builds a new codec that maps a CQL list to a Java array. Encoding and decoding of elements in - * the array is delegated to the provided element codec. - * - *

This method is not suitable for Java primitive arrays. Use {@link - * ExtraTypeCodecs#BOOLEAN_LIST_TO_ARRAY}, {@link ExtraTypeCodecs#BYTE_LIST_TO_ARRAY}, {@link - * ExtraTypeCodecs#SHORT_LIST_TO_ARRAY}, {@link ExtraTypeCodecs#INT_LIST_TO_ARRAY}, {@link - * ExtraTypeCodecs#LONG_LIST_TO_ARRAY}, {@link ExtraTypeCodecs#FLOAT_LIST_TO_ARRAY} or {@link - * ExtraTypeCodecs#DOUBLE_LIST_TO_ARRAY} instead. - */ - @NonNull - public static TypeCodec listToArrayOf(@NonNull TypeCodec elementCodec) { - return new ObjectListToArrayCodec<>(elementCodec); - } - - /** - * Builds a new codec that maps CQL type {@code int} to a Java Enum, according to its constants' - * {@linkplain Enum#ordinal() ordinals} (STRONGLY discouraged, see explanations below). - * - *

This method is provided for compatibility with driver 3, but we strongly recommend against - * it. Relying on enum ordinals is a bad practice: any reordering of the enum constants, or - * insertion of a new constant before the end, will change the ordinals. The codec will keep - * working, but start inserting different codes and corrupting your data. - * - *

{@link #enumNamesOf(Class)} is a safer alternative, as it is not dependent on the constant - * order. If you still want to use integer codes for storage efficiency, we recommend implementing - * an explicit mapping (for example with a {@code toCode()} method on your enum type). It is then - * fairly straightforward to implement a codec with {@link MappingCodec}, using {@link - * TypeCodecs#INT} as the "inner" codec. - */ - @NonNull - public static > TypeCodec enumOrdinalsOf( - @NonNull Class enumClass) { - return new EnumOrdinalCodec<>(enumClass); - } - - /** - * Builds a new codec that maps CQL type {@code text} to a Java Enum, according to its constants' - * programmatic {@linkplain Enum#name() names}. - * - * @see #enumOrdinalsOf(Class) - */ - @NonNull - public static > TypeCodec enumNamesOf( - @NonNull Class enumClass) { - return new EnumNameCodec<>(enumClass); - } - - /** - * Builds a new codec that wraps another codec's Java type into {@link Optional} instances - * (mapping CQL null to {@link Optional#empty()}). - */ - @NonNull - public static TypeCodec> optionalOf(@NonNull TypeCodec innerCodec) { - return new OptionalCodec<>(innerCodec); - } - - /** - * Builds a new codec that maps CQL type {@code text} to the given Java type, using JSON - * serialization with a default Jackson mapper. - * - * @see Jackson JSON Library - */ - @NonNull - public static TypeCodec json(@NonNull GenericType javaType) { - return new JsonCodec<>(javaType); - } - - /** - * Builds a new codec that maps CQL type {@code text} to the given Java type, using JSON - * serialization with a default Jackson mapper. - * - * @see Jackson JSON Library - */ - @NonNull - public static TypeCodec json(@NonNull Class javaType) { - return new JsonCodec<>(javaType); - } - - /** - * Builds a new codec that maps CQL type {@code text} to the given Java type, using JSON - * serialization with the provided Jackson mapper. - * - * @see Jackson JSON Library - */ - @NonNull - public static TypeCodec json( - @NonNull GenericType javaType, @NonNull ObjectMapper objectMapper) { - return new JsonCodec<>(javaType, objectMapper); - } - - /** - * Builds a new codec that maps CQL type {@code text} to the given Java type, using JSON - * serialization with the provided Jackson mapper. - * - * @see Jackson JSON Library - */ - @NonNull - public static TypeCodec json( - @NonNull Class javaType, @NonNull ObjectMapper objectMapper) { - return new JsonCodec<>(javaType, objectMapper); - } - - /** Builds a new codec that maps CQL float vectors of the specified size to an array of floats. */ - public static TypeCodec floatVectorToArray(int dimensions) { - return new FloatVectorToArrayCodec(new DefaultVectorType(DataTypes.FLOAT, dimensions)); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/MappingCodec.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/MappingCodec.java deleted file mode 100644 index df1a34a566a..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/MappingCodec.java +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.util.Objects; - -/** - * A {@link TypeCodec} that maps instances of {@code InnerT}, a driver supported Java type, to - * instances of a target {@code OuterT} Java type. - * - *

This codec can be used to provide support for Java types that are not natively handled by the - * driver, as long as there is a conversion path to and from another supported Java type. - * - * @param The "inner" Java type; must be a driver supported Java type (that is, there must - * exist a codec registered for it). - * @param The "outer", or target Java type; this codec will handle the mapping to and from - * {@code InnerT} and {@code OuterT}. - * @see driver - * documentation on custom codecs - * @see - * driver supported Java types - */ -public abstract class MappingCodec implements TypeCodec { - - protected final TypeCodec innerCodec; - protected final GenericType outerJavaType; - - /** - * Creates a new mapping codec providing support for {@code OuterT} based on an existing codec for - * {@code InnerT}. - * - * @param innerCodec The inner codec to use to handle instances of InnerT; must not be null. - * @param outerJavaType The outer Java type; must not be null. - */ - protected MappingCodec( - @NonNull TypeCodec innerCodec, @NonNull GenericType outerJavaType) { - this.innerCodec = Objects.requireNonNull(innerCodec, "innerCodec cannot be null"); - this.outerJavaType = Objects.requireNonNull(outerJavaType, "outerJavaType cannot be null"); - } - - /** @return The type of {@code OuterT}. */ - @NonNull - @Override - public GenericType getJavaType() { - return outerJavaType; - } - - /** @return The type of {@code InnerT}. */ - public GenericType getInnerJavaType() { - return innerCodec.getJavaType(); - } - - @NonNull - @Override - public DataType getCqlType() { - return innerCodec.getCqlType(); - } - - @Override - public ByteBuffer encode(OuterT value, @NonNull ProtocolVersion protocolVersion) { - return innerCodec.encode(outerToInner(value), protocolVersion); - } - - @Override - public OuterT decode(ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - return innerToOuter(innerCodec.decode(bytes, protocolVersion)); - } - - @NonNull - @Override - public String format(OuterT value) { - return innerCodec.format(outerToInner(value)); - } - - @Override - public OuterT parse(String value) { - return innerToOuter(innerCodec.parse(value)); - } - - /** - * Converts from an instance of the inner Java type to an instance of the outer Java type. Used - * when deserializing or parsing. - * - * @param value The value to convert; may be null. - * @return The converted value; may be null. - */ - @Nullable - protected abstract OuterT innerToOuter(@Nullable InnerT value); - - /** - * Converts from an instance of the outer Java type to an instance of the inner Java type. Used - * when serializing or formatting. - * - * @param value The value to convert; may be null. - * @return The converted value; may be null. - */ - @Nullable - protected abstract InnerT outerToInner(@Nullable OuterT value); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveBooleanCodec.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveBooleanCodec.java deleted file mode 100644 index 2ad4f2fa15a..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveBooleanCodec.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; - -/** - * A specialized boolean codec that knows how to deal with primitive types. - * - *

If the codec registry returns an instance of this type, the driver's boolean getters will use - * it to avoid boxing. - */ -public interface PrimitiveBooleanCodec extends TypeCodec { - - @Nullable - ByteBuffer encodePrimitive(boolean value, @NonNull ProtocolVersion protocolVersion); - - boolean decodePrimitive(@Nullable ByteBuffer value, @NonNull ProtocolVersion protocolVersion); - - @Nullable - @Override - default ByteBuffer encode(@Nullable Boolean value, @NonNull ProtocolVersion protocolVersion) { - return (value == null) ? null : encodePrimitive(value, protocolVersion); - } - - @Nullable - @Override - default Boolean decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - return (bytes == null || bytes.remaining() == 0) - ? null - : decodePrimitive(bytes, protocolVersion); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveByteCodec.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveByteCodec.java deleted file mode 100644 index 5909bcd4ff9..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveByteCodec.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; - -/** - * A specialized byte codec that knows how to deal with primitive types. - * - *

If the codec registry returns an instance of this type, the driver's byte getters will use it - * to avoid boxing. - */ -public interface PrimitiveByteCodec extends TypeCodec { - - @Nullable - ByteBuffer encodePrimitive(byte value, @NonNull ProtocolVersion protocolVersion); - - byte decodePrimitive(@Nullable ByteBuffer value, @NonNull ProtocolVersion protocolVersion); - - @Nullable - @Override - default ByteBuffer encode(@Nullable Byte value, @NonNull ProtocolVersion protocolVersion) { - return (value == null) ? null : encodePrimitive(value, protocolVersion); - } - - @Nullable - @Override - default Byte decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - return (bytes == null || bytes.remaining() == 0) - ? null - : decodePrimitive(bytes, protocolVersion); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveDoubleCodec.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveDoubleCodec.java deleted file mode 100644 index c46160f0942..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveDoubleCodec.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; - -/** - * A specialized double codec that knows how to deal with primitive types. - * - *

If the codec registry returns an instance of this type, the driver's double getters will use - * it to avoid boxing. - */ -public interface PrimitiveDoubleCodec extends TypeCodec { - - @Nullable - ByteBuffer encodePrimitive(double value, @NonNull ProtocolVersion protocolVersion); - - double decodePrimitive(@Nullable ByteBuffer value, @NonNull ProtocolVersion protocolVersion); - - @Nullable - @Override - default ByteBuffer encode(@Nullable Double value, @NonNull ProtocolVersion protocolVersion) { - return (value == null) ? null : encodePrimitive(value, protocolVersion); - } - - @Nullable - @Override - default Double decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - return (bytes == null || bytes.remaining() == 0) - ? null - : decodePrimitive(bytes, protocolVersion); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveFloatCodec.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveFloatCodec.java deleted file mode 100644 index 585d5fdb1fa..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveFloatCodec.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; - -/** - * A specialized float codec that knows how to deal with primitive types. - * - *

If the codec registry returns an instance of this type, the driver's float getters will use it - * to avoid boxing. - */ -public interface PrimitiveFloatCodec extends TypeCodec { - - @Nullable - ByteBuffer encodePrimitive(float value, @NonNull ProtocolVersion protocolVersion); - - float decodePrimitive(@Nullable ByteBuffer value, @NonNull ProtocolVersion protocolVersion); - - @Nullable - @Override - default ByteBuffer encode(@Nullable Float value, @NonNull ProtocolVersion protocolVersion) { - return (value == null) ? null : encodePrimitive(value, protocolVersion); - } - - @Nullable - @Override - default Float decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - return (bytes == null || bytes.remaining() == 0) - ? null - : decodePrimitive(bytes, protocolVersion); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveIntCodec.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveIntCodec.java deleted file mode 100644 index b3f374eb8d7..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveIntCodec.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; - -/** - * A specialized integer codec that knows how to deal with primitive types. - * - *

If the codec registry returns an instance of this type, the driver's integer getters will use - * it to avoid boxing. - */ -public interface PrimitiveIntCodec extends TypeCodec { - - @Nullable - ByteBuffer encodePrimitive(int value, @NonNull ProtocolVersion protocolVersion); - - int decodePrimitive(@Nullable ByteBuffer value, @NonNull ProtocolVersion protocolVersion); - - @Nullable - @Override - default ByteBuffer encode(@Nullable Integer value, @NonNull ProtocolVersion protocolVersion) { - return (value == null) ? null : encodePrimitive(value, protocolVersion); - } - - @Nullable - @Override - default Integer decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - return (bytes == null || bytes.remaining() == 0) - ? null - : decodePrimitive(bytes, protocolVersion); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveLongCodec.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveLongCodec.java deleted file mode 100644 index ec65820c60f..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveLongCodec.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; - -/** - * A specialized long codec that knows how to deal with primitive types. - * - *

If the codec registry returns an instance of this type, the driver's long getters will use it - * to avoid boxing. - */ -public interface PrimitiveLongCodec extends TypeCodec { - - @Nullable - ByteBuffer encodePrimitive(long value, @NonNull ProtocolVersion protocolVersion); - - long decodePrimitive(@Nullable ByteBuffer value, @NonNull ProtocolVersion protocolVersion); - - @Nullable - @Override - default ByteBuffer encode(@Nullable Long value, @NonNull ProtocolVersion protocolVersion) { - return (value == null) ? null : encodePrimitive(value, protocolVersion); - } - - @Nullable - @Override - default Long decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - return (bytes == null || bytes.remaining() == 0) - ? null - : decodePrimitive(bytes, protocolVersion); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveShortCodec.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveShortCodec.java deleted file mode 100644 index 48c063b3dc6..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveShortCodec.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; - -/** - * A specialized short codec that knows how to deal with primitive types. - * - *

If the codec registry returns an instance of this type, the driver's short getters will use it - * to avoid boxing. - */ -public interface PrimitiveShortCodec extends TypeCodec { - - @Nullable - ByteBuffer encodePrimitive(short value, @NonNull ProtocolVersion protocolVersion); - - short decodePrimitive(@Nullable ByteBuffer value, @NonNull ProtocolVersion protocolVersion); - - @Nullable - @Override - default ByteBuffer encode(@Nullable Short value, @NonNull ProtocolVersion protocolVersion) { - return (value == null) ? null : encodePrimitive(value, protocolVersion); - } - - @Nullable - @Override - default Short decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - return (bytes == null || bytes.remaining() == 0) - ? null - : decodePrimitive(bytes, protocolVersion); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/TypeCodec.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/TypeCodec.java deleted file mode 100644 index d6afbe0380a..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/TypeCodec.java +++ /dev/null @@ -1,243 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.data.TupleValue; -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.core.metadata.schema.AggregateMetadata; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.util.Optional; - -/** - * Manages the two-way conversion between a CQL type and a Java type. - * - *

Type codec implementations: - * - *

    - *
  1. must be thread-safe. - *
  2. must perform fast and never block. - *
  3. must support all native protocol versions; it is not possible to use different - * codecs for the same types but under different protocol versions. - *
  4. must comply with the native protocol specifications; failing to do so will result - * in unexpected results and could cause the driver to crash. - *
  5. should be stateless and immutable. - *
  6. should interpret {@code null} values and empty byte buffers (i.e. - * {@link ByteBuffer#remaining()} == 0) in a reasonable way; usually, {@code - * NULL} CQL values should map to {@code null} references, but exceptions exist; e.g. for - * varchar types, a {@code NULL} CQL value maps to a {@code null} reference, whereas an empty - * buffer maps to an empty String. For collection types, it is also admitted that {@code NULL} - * CQL values map to empty Java collections instead of {@code null} references. In any case, - * the codec's behavior with respect to {@code null} values and empty ByteBuffers should be - * clearly documented. - *
  7. for Java types that have a primitive equivalent, should implement the appropriate - * "primitive" codec interface, e.g. {@link PrimitiveBooleanCodec} for {@code boolean}. This - * allows the driver to avoid the overhead of boxing when using primitive accessors such as - * {@link Row#getBoolean(int)}. - *
  8. when decoding, must not consume {@link ByteBuffer} instances by performing - * relative read operations that modify their current position; codecs should instead prefer - * absolute read methods or, if necessary, {@link ByteBuffer#duplicate() duplicate} their byte - * buffers prior to reading them. - *
- */ -public interface TypeCodec { - - @NonNull - GenericType getJavaType(); - - @NonNull - DataType getCqlType(); - - /** - * Whether this codec is capable of processing the given Java type. - * - *

The default implementation is invariant with respect to the passed argument - * (through the usage of {@link GenericType#equals(Object)}) and it's strongly recommended not - * to modify this behavior. This means that a codec will only ever accept the exact - * Java type that it has been created for. - * - *

If the argument represents a Java primitive type, its wrapper type is considered instead. - */ - default boolean accepts(@NonNull GenericType javaType) { - Preconditions.checkNotNull(javaType); - return getJavaType().equals(javaType.wrap()); - } - - /** - * Whether this codec is capable of processing the given Java class. - * - *

This implementation simply compares the given class (or its wrapper type if it is a - * primitive type) against this codec's runtime (raw) class; it is invariant with respect - * to the passed argument (through the usage of {@link Class#equals(Object)} and it's strongly - * recommended not to modify this behavior. This means that a codec will only ever return - * {@code true} for the exact runtime (raw) Java class that it has been created for. - * - *

Implementors are encouraged to override this method if there is a more efficient way. In - * particular, if the codec targets a final class, the check can be done with a simple {@code ==}. - */ - default boolean accepts(@NonNull Class javaClass) { - Preconditions.checkNotNull(javaClass); - if (javaClass.isPrimitive()) { - if (javaClass == Boolean.TYPE) { - javaClass = Boolean.class; - } else if (javaClass == Character.TYPE) { - javaClass = Character.class; - } else if (javaClass == Byte.TYPE) { - javaClass = Byte.class; - } else if (javaClass == Short.TYPE) { - javaClass = Short.class; - } else if (javaClass == Integer.TYPE) { - javaClass = Integer.class; - } else if (javaClass == Long.TYPE) { - javaClass = Long.class; - } else if (javaClass == Float.TYPE) { - javaClass = Float.class; - } else if (javaClass == Double.TYPE) { - javaClass = Double.class; - } - } - return getJavaType().getRawType().equals(javaClass); - } - - /** - * Whether this codec is capable of encoding the given Java object. - * - *

The object's Java type is inferred from its runtime (raw) type, contrary to {@link - * #accepts(GenericType)} which is capable of handling generic types. - * - *

Contrary to other {@code accept} methods, this method's default implementation is - * covariant with respect to the passed argument (through the usage of {@link - * Class#isAssignableFrom(Class)}) and it's strongly recommended not to modify this - * behavior. This means that, by default, a codec will accept any subtype of the - * Java type that it has been created for. This is so because codec lookups by arbitrary Java - * objects only make sense when attempting to encode, never when attempting to decode, and indeed - * the {@linkplain #encode(Object, ProtocolVersion) encode} method is covariant with {@code - * JavaTypeT}. - * - *

It can only handle non-parameterized types; codecs handling parameterized types, such as - * collection types, must override this method and perform some sort of "manual" inspection of the - * actual type parameters. - * - *

Similarly, codecs that only accept a partial subset of all possible values must override - * this method and manually inspect the object to check if it complies or not with the codec's - * limitations. - * - *

Finally, if the codec targets a non-generic Java class, it might be possible to implement - * this method with a simple {@code instanceof} check. - */ - default boolean accepts(@NonNull Object value) { - Preconditions.checkNotNull(value); - return getJavaType().getRawType().isAssignableFrom(value.getClass()); - } - - /** Whether this codec is capable of processing the given CQL type. */ - default boolean accepts(@NonNull DataType cqlType) { - Preconditions.checkNotNull(cqlType); - return this.getCqlType().equals(cqlType); - } - - /** - * Encodes the given value in the binary format of the CQL type handled by this codec. - * - *

    - *
  • Null values should be gracefully handled and no exception should be raised; they should - * be considered as the equivalent of a NULL CQL value; - *
  • Codecs for CQL collection types should not permit null elements; - *
  • Codecs for CQL collection types should treat a {@code null} input as the equivalent of an - * empty collection. - *
- */ - @Nullable - ByteBuffer encode(@Nullable JavaTypeT value, @NonNull ProtocolVersion protocolVersion); - - /** - * Decodes a value from the binary format of the CQL type handled by this codec. - * - *
    - *
  • Null or empty buffers should be gracefully handled and no exception should be raised; - * they should be considered as the equivalent of a NULL CQL value and, in most cases, - * should map to {@code null} or a default value for the corresponding Java type, if - * applicable; - *
  • Codecs for CQL collection types should clearly document whether they return immutable - * collections or not (note that the driver's default collection codecs return - * mutable collections); - *
  • Codecs for CQL collection types should avoid returning {@code null}; they should return - * empty collections instead (the driver's default collection codecs all comply with this - * rule); - *
  • The provided {@link ByteBuffer} should never be consumed by read operations that modify - * its current position; if necessary, {@link ByteBuffer#duplicate()} duplicate} it before - * consuming. - *
- */ - @Nullable - JavaTypeT decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion); - - /** - * Formats the given value as a valid CQL literal according to the CQL type handled by this codec. - * - *

Implementors should take care of quoting and escaping the resulting CQL literal where - * applicable. Null values should be accepted; in most cases, implementations should return the - * CQL keyword {@code "NULL"} for {@code null} inputs. - * - *

Implementing this method is not strictly mandatory. It is used: - * - *

    - *
  1. by the request logger, if parameter logging is enabled; - *
  2. to format the INITCOND in {@link AggregateMetadata#describe(boolean)}; - *
  3. in the {@code toString()} representation of some driver objects (such as {@link UdtValue} - * and {@link TupleValue}), which is only used in driver logs; - *
  4. for literal values in the query builder (see {@code QueryBuilder#literal(Object, - * CodecRegistry)} and {@code QueryBuilder#literal(Object, TypeCodec)}). - *
- * - * If you choose not to implement this method, don't throw an exception but instead return a - * constant string (for example "XxxCodec.format not implemented"). - */ - @NonNull - String format(@Nullable JavaTypeT value); - - /** - * Parse the given CQL literal into an instance of the Java type handled by this codec. - * - *

Implementors should take care of unquoting and unescaping the given CQL string where - * applicable. Null values and empty strings should be accepted, as well as the string {@code - * "NULL"}; in most cases, implementations should interpret these inputs has equivalent to a - * {@code null} reference. - * - *

Implementing this method is not strictly mandatory: internally, the driver only uses it to - * parse the INITCOND when building the {@link AggregateMetadata metadata of an aggregate - * function} (and in most cases it will use a built-in codec, unless the INITCOND has a custom - * type). - * - *

If you choose not to implement this method, don't throw an exception but instead return - * {@code null}. - */ - @Nullable - JavaTypeT parse(@Nullable String value); - - @NonNull - default Optional serializedSize() { - return Optional.empty(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/TypeCodecs.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/TypeCodecs.java deleted file mode 100644 index 68f1b07b106..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/TypeCodecs.java +++ /dev/null @@ -1,264 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type.codec; - -import com.datastax.oss.driver.api.core.data.CqlDuration; -import com.datastax.oss.driver.api.core.data.CqlVector; -import com.datastax.oss.driver.api.core.data.TupleValue; -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.core.type.CustomType; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.TupleType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.api.core.type.VectorType; -import com.datastax.oss.driver.internal.core.type.codec.BigIntCodec; -import com.datastax.oss.driver.internal.core.type.codec.BlobCodec; -import com.datastax.oss.driver.internal.core.type.codec.BooleanCodec; -import com.datastax.oss.driver.internal.core.type.codec.CounterCodec; -import com.datastax.oss.driver.internal.core.type.codec.CqlDurationCodec; -import com.datastax.oss.driver.internal.core.type.codec.CustomCodec; -import com.datastax.oss.driver.internal.core.type.codec.DateCodec; -import com.datastax.oss.driver.internal.core.type.codec.DecimalCodec; -import com.datastax.oss.driver.internal.core.type.codec.DoubleCodec; -import com.datastax.oss.driver.internal.core.type.codec.FloatCodec; -import com.datastax.oss.driver.internal.core.type.codec.InetCodec; -import com.datastax.oss.driver.internal.core.type.codec.IntCodec; -import com.datastax.oss.driver.internal.core.type.codec.ListCodec; -import com.datastax.oss.driver.internal.core.type.codec.MapCodec; -import com.datastax.oss.driver.internal.core.type.codec.SetCodec; -import com.datastax.oss.driver.internal.core.type.codec.SmallIntCodec; -import com.datastax.oss.driver.internal.core.type.codec.StringCodec; -import com.datastax.oss.driver.internal.core.type.codec.TimeCodec; -import com.datastax.oss.driver.internal.core.type.codec.TimeUuidCodec; -import com.datastax.oss.driver.internal.core.type.codec.TimestampCodec; -import com.datastax.oss.driver.internal.core.type.codec.TinyIntCodec; -import com.datastax.oss.driver.internal.core.type.codec.TupleCodec; -import com.datastax.oss.driver.internal.core.type.codec.UdtCodec; -import com.datastax.oss.driver.internal.core.type.codec.UuidCodec; -import com.datastax.oss.driver.internal.core.type.codec.VarIntCodec; -import com.datastax.oss.driver.internal.core.type.codec.VectorCodec; -import com.datastax.oss.driver.shaded.guava.common.base.Charsets; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.math.BigDecimal; -import java.math.BigInteger; -import java.net.InetAddress; -import java.nio.ByteBuffer; -import java.time.Instant; -import java.time.LocalDate; -import java.time.LocalTime; -import java.time.ZoneId; -import java.time.ZonedDateTime; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; - -/** - * Constants and factory methods to obtain instances of the driver's default type codecs. - * - *

See also {@link ExtraTypeCodecs} for additional codecs that you can register with your session - * to handle different type mappings. - */ -public class TypeCodecs { - - /** The default codec that maps CQL type {@code boolean} to Java's {@code boolean}. */ - public static final PrimitiveBooleanCodec BOOLEAN = new BooleanCodec(); - - /** The default codec that maps CQL type {@code tinyint} to Java's {@code byte}. */ - public static final PrimitiveByteCodec TINYINT = new TinyIntCodec(); - - /** The default codec that maps CQL type {@code double} to Java's {@code double}. */ - public static final PrimitiveDoubleCodec DOUBLE = new DoubleCodec(); - - /** The default codec that maps CQL type {@code counter} to Java's {@code long}. */ - public static final PrimitiveLongCodec COUNTER = new CounterCodec(); - - /** The default codec that maps CQL type {@code float} to Java's {@code float}. */ - public static final PrimitiveFloatCodec FLOAT = new FloatCodec(); - - /** The default codec that maps CQL type {@code int} to Java's {@code int}. */ - public static final PrimitiveIntCodec INT = new IntCodec(); - - /** The default codec that maps CQL type {@code bigint} to Java's {@code long}. */ - public static final PrimitiveLongCodec BIGINT = new BigIntCodec(); - - /** The default codec that maps CQL type {@code smallint} to Java's {@code short}. */ - public static final PrimitiveShortCodec SMALLINT = new SmallIntCodec(); - - /** - * The default codec that maps CQL type {@code timestamp} to Java's {@link Instant}, using the - * system's default time zone to parse and format CQL literals. - * - *

This codec uses the system's {@linkplain ZoneId#systemDefault() default time zone} as its - * source of time zone information when formatting values as CQL literals, or parsing CQL literals - * that do not have any time zone indication. Note that this only applies to the {@link - * TypeCodec#format(Object)} and {@link TypeCodec#parse(String)} methods; regular encoding and - * decoding, like setting a value on a bound statement or reading a column from a row, are not - * affected by the time zone. - * - *

If you need a different time zone, consider other codecs in {@link ExtraTypeCodecs}, or call - * {@link ExtraTypeCodecs#timestampAt(ZoneId)} instead. - * - * @see ExtraTypeCodecs#TIMESTAMP_UTC - * @see ExtraTypeCodecs#timestampAt(ZoneId) - */ - public static final TypeCodec TIMESTAMP = new TimestampCodec(); - - /** The default codec that maps CQL type {@code date} to Java's {@link LocalDate}. */ - public static final TypeCodec DATE = new DateCodec(); - - /** The default codec that maps CQL type {@code time} to Java's {@link LocalTime}. */ - public static final TypeCodec TIME = new TimeCodec(); - - /** - * The default codec that maps CQL type {@code blob} to Java's {@link ByteBuffer}. - * - *

If you are looking for a codec mapping CQL type {@code blob} to the Java type {@code - * byte[]}, you should use {@link ExtraTypeCodecs#BLOB_TO_ARRAY} instead. - * - *

If you are looking for a codec mapping CQL type {@code list} to the Java type - * {@code byte[]}, you should use {@link ExtraTypeCodecs#BYTE_LIST_TO_ARRAY} instead. - * - * @see ExtraTypeCodecs#BLOB_TO_ARRAY - * @see ExtraTypeCodecs#BYTE_LIST_TO_ARRAY - */ - public static final TypeCodec BLOB = new BlobCodec(); - - /** The default codec that maps CQL type {@code text} to Java's {@link String}. */ - public static final TypeCodec TEXT = new StringCodec(DataTypes.TEXT, Charsets.UTF_8); - /** The default codec that maps CQL type {@code ascii} to Java's {@link String}. */ - public static final TypeCodec ASCII = new StringCodec(DataTypes.ASCII, Charsets.US_ASCII); - /** The default codec that maps CQL type {@code varint} to Java's {@link BigInteger}. */ - public static final TypeCodec VARINT = new VarIntCodec(); - /** The default codec that maps CQL type {@code decimal} to Java's {@link BigDecimal}. */ - public static final TypeCodec DECIMAL = new DecimalCodec(); - /** The default codec that maps CQL type {@code uuid} to Java's {@link UUID}. */ - public static final TypeCodec UUID = new UuidCodec(); - /** The default codec that maps CQL type {@code timeuuid} to Java's {@link UUID}. */ - public static final TypeCodec TIMEUUID = new TimeUuidCodec(); - /** The default codec that maps CQL type {@code inet} to Java's {@link InetAddress}. */ - public static final TypeCodec INET = new InetCodec(); - /** The default codec that maps CQL type {@code duration} to the driver's {@link CqlDuration}. */ - public static final TypeCodec DURATION = new CqlDurationCodec(); - - /** - * Builds a new codec that maps a CQL custom type to Java's {@link ByteBuffer}. - * - * @param cqlType the fully-qualified name of the custom type. - */ - @NonNull - public static TypeCodec custom(@NonNull DataType cqlType) { - Preconditions.checkArgument(cqlType instanceof CustomType, "cqlType must be a custom type"); - return new CustomCodec((CustomType) cqlType); - } - - /** - * Builds a new codec that maps a CQL list to a Java list, using the given codec to map each - * element. - */ - @NonNull - public static TypeCodec> listOf(@NonNull TypeCodec elementCodec) { - return new ListCodec<>(DataTypes.listOf(elementCodec.getCqlType()), elementCodec); - } - - /** - * Builds a new codec that maps a CQL set to a Java set, using the given codec to map each - * element. - */ - @NonNull - public static TypeCodec> setOf(@NonNull TypeCodec elementCodec) { - return new SetCodec<>(DataTypes.setOf(elementCodec.getCqlType()), elementCodec); - } - - /** - * Builds a new codec that maps a CQL map to a Java map, using the given codecs to map each key - * and value. - */ - @NonNull - public static TypeCodec> mapOf( - @NonNull TypeCodec keyCodec, @NonNull TypeCodec valueCodec) { - return new MapCodec<>( - DataTypes.mapOf(keyCodec.getCqlType(), valueCodec.getCqlType()), keyCodec, valueCodec); - } - - /** - * Builds a new codec that maps a CQL tuple to the driver's {@link TupleValue}, for the given type - * definition. - * - *

Note that the components of a {@link TupleValue} are stored in their encoded form. They are - * encoded/decoded on the fly when you set or get them, using the codec registry. - */ - @NonNull - public static TypeCodec tupleOf(@NonNull TupleType cqlType) { - return new TupleCodec(cqlType); - } - - public static TypeCodec> vectorOf( - @NonNull VectorType type, @NonNull TypeCodec subtypeCodec) { - return new VectorCodec( - DataTypes.vectorOf(subtypeCodec.getCqlType(), type.getDimensions()), subtypeCodec); - } - - public static TypeCodec> vectorOf( - int dimensions, @NonNull TypeCodec subtypeCodec) { - return new VectorCodec(DataTypes.vectorOf(subtypeCodec.getCqlType(), dimensions), subtypeCodec); - } - - /** - * Builds a new codec that maps a CQL user defined type to the driver's {@link UdtValue}, for the - * given type definition. - * - *

Note that the fields of a {@link UdtValue} are stored in their encoded form. They are - * encoded/decoded on the fly when you set or get them, using the codec registry. - */ - @NonNull - public static TypeCodec udtOf(@NonNull UserDefinedType cqlType) { - return new UdtCodec(cqlType); - } - - /** - * An alias for {@link ExtraTypeCodecs#ZONED_TIMESTAMP_SYSTEM}. - * - *

This exists for historical reasons: the constant was originally defined in this class, but - * technically it belongs to {@link ExtraTypeCodecs} because this is not a built-in mapping. - */ - public static final TypeCodec ZONED_TIMESTAMP_SYSTEM = - ExtraTypeCodecs.ZONED_TIMESTAMP_SYSTEM; - - /** - * An alias for {@link ExtraTypeCodecs#ZONED_TIMESTAMP_UTC}. - * - *

This exists for historical reasons: the constant was originally defined in this class, but - * technically it belongs to {@link ExtraTypeCodecs} because this is not a built-in mapping. - */ - public static final TypeCodec ZONED_TIMESTAMP_UTC = - ExtraTypeCodecs.ZONED_TIMESTAMP_UTC; - - /** - * An alias for {@link ExtraTypeCodecs#zonedTimestampAt(ZoneId)}. - * - *

This exists for historical reasons: the method was originally defined in this class, but - * technically it belongs to {@link ExtraTypeCodecs} because this is not a built-in mapping. - */ - @NonNull - public static TypeCodec zonedTimestampAt(@NonNull ZoneId timeZone) { - return ExtraTypeCodecs.zonedTimestampAt(timeZone); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/registry/CodecRegistry.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/registry/CodecRegistry.java deleted file mode 100644 index 36472f34c79..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/registry/CodecRegistry.java +++ /dev/null @@ -1,188 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type.codec.registry; - -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.data.GettableByIndex; -import com.datastax.oss.driver.api.core.data.TupleValue; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.TupleType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.api.core.type.codec.CodecNotFoundException; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.registry.DefaultCodecRegistry; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.List; - -/** - * Provides codecs to convert CQL types to their Java equivalent, and vice-versa. - * - *

Implementations MUST provide a default mapping for all CQL types (primitive types, and - * all the collections, tuples or user-defined types that can recursively be built from them — - * see {@link DataTypes}). - * - *

They may also provide additional mappings to other Java types (for use with methods such as - * {@link Row#get(int, Class)}, {@link TupleValue#set(int, Object, Class)}, etc.) - * - *

The default implementation returned by the driver also implements {@link - * MutableCodecRegistry}, and we strongly recommend that custom implementations do as well. The two - * interfaces are only separate for backward compatibility, because mutability was introduced in - * 4.3.0. - */ -public interface CodecRegistry { - /** - * An immutable instance, that only handles built-in driver types (that is, primitive types, and - * collections, tuples, and user defined types thereof). - * - *

Note that, due to implementation details, this instance is a {@link MutableCodecRegistry}, - * but any attempt to {@linkplain MutableCodecRegistry#register(TypeCodec) register new codecs} - * will throw {@link UnsupportedOperationException}. - */ - CodecRegistry DEFAULT = - new DefaultCodecRegistry("default") { - @Override - public void register(TypeCodec newCodec) { - throw new UnsupportedOperationException("CodecRegistry.DEFAULT is immutable"); - } - }; - - /** - * Returns a codec to handle the conversion between the given types. - * - *

This is used internally by the driver, in cases where both types are known, for example - * {@link GettableByIndex#getString(int) row.getString(0)} (Java type inferred from the method, - * CQL type known from the row metadata). - * - *

The driver's default registry implementation is invariant with regard to the Java - * type: for example, if {@code B extends A} and an {@code A<=>int} codec is registered, {@code - * codecFor(DataTypes.INT, B.class)} will not find that codec. This is because this method - * is used internally both for encoding and decoding, and covariance wouldn't work when decoding. - * - * @throws CodecNotFoundException if there is no such codec. - */ - @NonNull - TypeCodec codecFor( - @NonNull DataType cqlType, @NonNull GenericType javaType); - - /** - * Shortcut for {@link #codecFor(DataType, GenericType) codecFor(cqlType, - * GenericType.of(javaType))}. - * - *

Implementations may decide to override this method for performance reasons, if they have a - * way to avoid the overhead of wrapping. - * - * @throws CodecNotFoundException if there is no such codec. - */ - @NonNull - default TypeCodec codecFor( - @NonNull DataType cqlType, @NonNull Class javaType) { - return codecFor(cqlType, GenericType.of(javaType)); - } - - /** - * Returns a codec to convert the given CQL type to the Java type deemed most appropriate to - * represent it. - * - *

This is used internally by the driver, in cases where the Java type is not explicitly - * provided, for example {@link GettableByIndex#getObject(int) row.getObject(0)} (CQL type known - * from the row metadata, Java type unspecified). - * - *

The definition of "most appropriate" is left to the appreciation of the registry - * implementor. - * - * @throws CodecNotFoundException if there is no such codec. - */ - @NonNull - TypeCodec codecFor(@NonNull DataType cqlType); - - /** - * Returns a codec to convert the given Java type to the CQL type deemed most appropriate to - * represent it. - * - *

The driver does not use this method. It is provided as a convenience for third-party usage, - * for example if you were to generate a schema based on a set of Java classes. - * - *

The driver's default registry implementation is invariant with regard to the Java - * type: for example, if {@code B extends A} and an {@code A<=>int} codec is registered, {@code - * codecFor(DataTypes.INT, B.class)} will not find that codec. This is because we don't - * know whether this method will be used for encoding, decoding, or both. - * - * @throws CodecNotFoundException if there is no such codec. - */ - @NonNull - TypeCodec codecFor(@NonNull GenericType javaType); - - /** - * Shortcut for {@link #codecFor(GenericType) codecFor(GenericType.of(javaType))}. - * - *

Implementations may decide to override this method for performance reasons, if they have a - * way to avoid the overhead of wrapping. - * - * @throws CodecNotFoundException if there is no such codec. - */ - @NonNull - default TypeCodec codecFor(@NonNull Class javaType) { - return codecFor(GenericType.of(javaType)); - } - - /** - * Returns a codec to convert the given Java object to the given CQL type. - * - *

This is used internally by the driver when you bulk-set values in a {@link - * PreparedStatement#bind(Object...) bound statement}, {@link UserDefinedType#newValue(Object...) - * UDT} or {@link TupleType#newValue(Object...) tuple}. - * - *

Unlike other methods, the driver's default registry implementation is covariant - * with regard to the Java type: for example, if {@code B extends A} and an {@code A<=>int} codec - * is registered, {@code codecFor(DataTypes.INT, someB)} will find that codec. This is - * because this method is always used in encoding scenarios; if a bound statement has a value with - * a runtime type of {@code ArrayList}, it should be possible to encode it with a codec - * that accepts a {@code List}. - * - * @throws CodecNotFoundException if there is no such codec. - */ - @NonNull - TypeCodec codecFor(@NonNull DataType cqlType, @NonNull JavaTypeT value); - - /** - * Returns a codec to convert the given Java object to the CQL type deemed most appropriate to - * represent it. - * - *

This is used internally by the driver, in cases where the CQL type is unknown, for example - * for {@linkplain SimpleStatement#setPositionalValues(List) simple statement variables} (simple - * statements don't have access to schema metadata). - * - *

Unlike other methods, the driver's default registry implementation is covariant - * with regard to the Java type: for example, if {@code B extends A} and an {@code A<=>int} codec - * is registered, {@code codecFor(someB)} will find that codec. This is because this method - * is always used in encoding scenarios; if a simple statement has a value with a runtime type of - * {@code ArrayList}, it should be possible to encode it with a codec that accepts a - * {@code List}. - * - *

Note that, if {@code value} is an empty collection, this method may return a codec that - * won't accept {@code JavaTypeT}; but it will encode {@code value} correctly. - * - * @throws CodecNotFoundException if there is no such codec. - */ - @NonNull - TypeCodec codecFor(@NonNull JavaTypeT value); -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/registry/MutableCodecRegistry.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/registry/MutableCodecRegistry.java deleted file mode 100644 index 7f5d1fb9813..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/registry/MutableCodecRegistry.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type.codec.registry; - -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; - -/** - * A codec registry that can be extended with new user codecs at runtime. - * - *

This interface only exists to preserve backward compatibility. In practice, the default {@link - * CodecRegistry} implementation returned by the driver implements this interface, so it can safely - * be cast. - * - *

However {@link CodecRegistry#DEFAULT} is immutable. It implements this interface, but {@link - * #register(TypeCodec)} throws an {@link UnsupportedOperationException}. - * - * @since 4.3.0 - */ -public interface MutableCodecRegistry extends CodecRegistry { - - /** - * Adds the given codec to the registry. - * - *

This method will log a warning and ignore the codec if it collides with one already present - * in the registry. Note that the driver's built-in implementation uses internal synchronization - * to guarantee that two threads cannot register colliding codecs concurrently; registration is - * not expected to happen in a very concurrent manner, so this should not pose a performance - * issue. - */ - void register(TypeCodec codec); - - /** Invokes {@link #register(TypeCodec)} for every codec in the given list. */ - default void register(TypeCodec... codecs) { - for (TypeCodec codec : codecs) { - register(codec); - } - } - - /** Invokes {@link #register(TypeCodec)} for every codec in the given list. */ - default void register(Iterable> codecs) { - for (TypeCodec codec : codecs) { - register(codec); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/reflect/GenericType.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/reflect/GenericType.java deleted file mode 100644 index d22b6f1bfaf..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/reflect/GenericType.java +++ /dev/null @@ -1,405 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type.reflect; - -import com.datastax.oss.driver.api.core.data.CqlDuration; -import com.datastax.oss.driver.api.core.data.CqlVector; -import com.datastax.oss.driver.api.core.data.GettableByIndex; -import com.datastax.oss.driver.api.core.data.TupleValue; -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.shaded.guava.common.primitives.Primitives; -import com.datastax.oss.driver.shaded.guava.common.reflect.TypeParameter; -import com.datastax.oss.driver.shaded.guava.common.reflect.TypeResolver; -import com.datastax.oss.driver.shaded.guava.common.reflect.TypeToken; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.lang.reflect.Type; -import java.math.BigDecimal; -import java.math.BigInteger; -import java.net.InetAddress; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.time.Instant; -import java.time.LocalDate; -import java.time.LocalDateTime; -import java.time.LocalTime; -import java.time.ZonedDateTime; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Set; -import java.util.UUID; -import net.jcip.annotations.Immutable; - -/** - * Runtime representation of a generic Java type. - * - *

This is used by type codecs to indicate which Java types they accept ({@link - * TypeCodec#accepts(GenericType)}), and by generic getters and setters (such as {@link - * GettableByIndex#get(int, GenericType)} in the driver's query API. - * - *

There are various ways to build instances of this class: - * - *

By using one of the static factory methods: - * - *

{@code
- * GenericType> stringListType = GenericType.listOf(String.class);
- * }
- * - * By using an anonymous class: - * - *
{@code
- * GenericType> fooBarType = new GenericType>(){};
- * }
- * - * In a generic method, by using {@link #where(GenericTypeParameter, GenericType)} to substitute - * free type variables with runtime types: - * - *
{@code
- *  GenericType> optionalOf(GenericType elementType) {
- *   return new GenericType>() {}
- *     .where(new GenericTypeParameter() {}, elementType);
- * }
- * ...
- * GenericType>> optionalStringListType = optionalOf(GenericType.listOf(String.class));
- * }
- * - *

You are encouraged to store and reuse these instances. - * - *

Note that this class is a thin wrapper around Guava's {@code TypeToken}. The only reason why - * {@code TypeToken} is not used directly is because Guava is not exposed in the driver's public API - * (it's used internally, but shaded). - */ -@Immutable -public class GenericType { - - public static final GenericType BOOLEAN = of(Boolean.class); - public static final GenericType BYTE = of(Byte.class); - public static final GenericType DOUBLE = of(Double.class); - public static final GenericType FLOAT = of(Float.class); - public static final GenericType INTEGER = of(Integer.class); - public static final GenericType LONG = of(Long.class); - public static final GenericType SHORT = of(Short.class); - public static final GenericType INSTANT = of(Instant.class); - public static final GenericType ZONED_DATE_TIME = of(ZonedDateTime.class); - public static final GenericType LOCAL_DATE = of(LocalDate.class); - public static final GenericType LOCAL_TIME = of(LocalTime.class); - public static final GenericType LOCAL_DATE_TIME = of(LocalDateTime.class); - public static final GenericType BYTE_BUFFER = of(ByteBuffer.class); - public static final GenericType STRING = of(String.class); - public static final GenericType BIG_INTEGER = of(BigInteger.class); - public static final GenericType BIG_DECIMAL = of(BigDecimal.class); - public static final GenericType UUID = of(UUID.class); - public static final GenericType INET_ADDRESS = of(InetAddress.class); - public static final GenericType CQL_DURATION = of(CqlDuration.class); - public static final GenericType TUPLE_VALUE = of(TupleValue.class); - public static final GenericType UDT_VALUE = of(UdtValue.class); - public static final GenericType DURATION = of(Duration.class); - - @NonNull - public static GenericType of(@NonNull Class type) { - return new SimpleGenericType<>(type); - } - - @NonNull - public static GenericType of(@NonNull java.lang.reflect.Type type) { - return new GenericType<>(TypeToken.of(type)); - } - - @NonNull - public static GenericType> listOf(@NonNull Class elementType) { - TypeToken> token = - new TypeToken>() {}.where(new TypeParameter() {}, TypeToken.of(elementType)); - return new GenericType<>(token); - } - - @NonNull - public static GenericType> listOf(@NonNull GenericType elementType) { - TypeToken> token = - new TypeToken>() {}.where(new TypeParameter() {}, elementType.token); - return new GenericType<>(token); - } - - @NonNull - public static GenericType> setOf(@NonNull Class elementType) { - TypeToken> token = - new TypeToken>() {}.where(new TypeParameter() {}, TypeToken.of(elementType)); - return new GenericType<>(token); - } - - @NonNull - public static GenericType> setOf(@NonNull GenericType elementType) { - TypeToken> token = - new TypeToken>() {}.where(new TypeParameter() {}, elementType.token); - return new GenericType<>(token); - } - - @NonNull - public static GenericType> vectorOf(@NonNull Class elementType) { - TypeToken> token = - new TypeToken>() {}.where( - new TypeParameter() {}, TypeToken.of(elementType)); - return new GenericType<>(token); - } - - @NonNull - public static GenericType> vectorOf(@NonNull GenericType elementType) { - TypeToken> token = - new TypeToken>() {}.where(new TypeParameter() {}, elementType.token); - return new GenericType<>(token); - } - - @NonNull - public static GenericType> mapOf( - @NonNull Class keyType, @NonNull Class valueType) { - TypeToken> token = - new TypeToken>() {}.where(new TypeParameter() {}, TypeToken.of(keyType)) - .where(new TypeParameter() {}, TypeToken.of(valueType)); - return new GenericType<>(token); - } - - @NonNull - public static GenericType> mapOf( - @NonNull GenericType keyType, @NonNull GenericType valueType) { - TypeToken> token = - new TypeToken>() {}.where(new TypeParameter() {}, keyType.token) - .where(new TypeParameter() {}, valueType.token); - return new GenericType<>(token); - } - - @NonNull - public static GenericType arrayOf(@NonNull Class componentType) { - TypeToken token = - new TypeToken() {}.where(new TypeParameter() {}, TypeToken.of(componentType)); - return new GenericType<>(token); - } - - @NonNull - public static GenericType arrayOf(@NonNull GenericType componentType) { - TypeToken token = - new TypeToken() {}.where(new TypeParameter() {}, componentType.token); - return new GenericType<>(token); - } - - @NonNull - public static GenericType> optionalOf(@NonNull Class componentType) { - TypeToken> token = - new TypeToken>() {}.where( - new TypeParameter() {}, TypeToken.of(componentType)); - return new GenericType<>(token); - } - - @NonNull - public static GenericType> optionalOf(@NonNull GenericType componentType) { - TypeToken> token = - new TypeToken>() {}.where(new TypeParameter() {}, componentType.token); - return new GenericType<>(token); - } - - private final TypeToken token; - - private GenericType(TypeToken token) { - this.token = token; - } - - protected GenericType() { - this.token = new TypeToken(getClass()) {}; - } - - /** - * Returns true if this type is a supertype of the given {@code type}. "Supertype" is defined - * according to the rules for type - * arguments introduced with Java generics. - */ - public final boolean isSupertypeOf(@NonNull GenericType type) { - return token.isSupertypeOf(type.token); - } - - /** - * Returns true if this type is a subtype of the given {@code type}. "Subtype" is defined - * according to the rules for type - * arguments introduced with Java generics. - */ - public final boolean isSubtypeOf(@NonNull GenericType type) { - return token.isSubtypeOf(type.token); - } - - /** - * Returns true if this type is known to be an array type, such as {@code int[]}, {@code T[]}, - * {@code []>} etc. - */ - public final boolean isArray() { - return token.isArray(); - } - - /** Returns true if this type is one of the nine primitive types (including {@code void}). */ - public final boolean isPrimitive() { - return token.isPrimitive(); - } - - /** - * Returns the corresponding wrapper type if this is a primitive type; otherwise returns {@code - * this} itself. Idempotent. - */ - @NonNull - public final GenericType wrap() { - if (isPrimitive()) { - return new GenericType<>(token.wrap()); - } - return this; - } - - /** - * Returns the corresponding primitive type if this is a wrapper type; otherwise returns {@code - * this} itself. Idempotent. - */ - @NonNull - public final GenericType unwrap() { - if (Primitives.allWrapperTypes().contains(token.getRawType())) { - return new GenericType<>(token.unwrap()); - } - return this; - } - - /** - * Substitutes a free type variable with an actual type. See {@link GenericType this class's - * javadoc} for an example. - */ - @NonNull - public final GenericType where( - @NonNull GenericTypeParameter freeVariable, @NonNull GenericType actualType) { - TypeResolver resolver = - new TypeResolver().where(freeVariable.getTypeVariable(), actualType.__getToken().getType()); - Type resolvedType = resolver.resolveType(this.token.getType()); - @SuppressWarnings("unchecked") - TypeToken resolvedToken = (TypeToken) TypeToken.of(resolvedType); - return new GenericType<>(resolvedToken); - } - - /** - * Substitutes a free type variable with an actual type. See {@link GenericType this class's - * javadoc} for an example. - */ - @NonNull - public final GenericType where( - @NonNull GenericTypeParameter freeVariable, @NonNull Class actualType) { - return where(freeVariable, GenericType.of(actualType)); - } - - /** - * Returns the array component type if this type represents an array ({@code int[]}, {@code T[]}, - * {@code []>} etc.), or else {@code null} is returned. - */ - @Nullable - @SuppressWarnings("unchecked") - public final GenericType getComponentType() { - TypeToken componentTypeToken = token.getComponentType(); - return (componentTypeToken == null) ? null : new GenericType(componentTypeToken); - } - - /** - * Returns the raw type of {@code T}. Formally speaking, if {@code T} is returned by {@link - * java.lang.reflect.Method#getGenericReturnType}, the raw type is what's returned by {@link - * java.lang.reflect.Method#getReturnType} of the same method object. Specifically: - * - *

    - *
  • If {@code T} is a {@code Class} itself, {@code T} itself is returned. - *
  • If {@code T} is a parameterized type, the raw type of the parameterized type is returned. - *
  • If {@code T} is an array type , the returned type is the corresponding array class. For - * example: {@code List[] => List[]}. - *
  • If {@code T} is a type variable or a wildcard type, the raw type of the first upper bound - * is returned. For example: {@code => Foo}. - *
- */ - @NonNull - public Class getRawType() { - return token.getRawType(); - } - - /** - * Returns the generic form of {@code superclass}. For example, if this is {@code - * ArrayList}, {@code Iterable} is returned given the input {@code - * Iterable.class}. - */ - @SuppressWarnings("unchecked") - @NonNull - public final GenericType getSupertype(@NonNull Class superclass) { - return new GenericType(token.getSupertype(superclass)); - } - - /** - * Returns subtype of {@code this} with {@code subclass} as the raw class. For example, if this is - * {@code Iterable} and {@code subclass} is {@code List}, {@code List} is - * returned. - */ - @SuppressWarnings("unchecked") - @NonNull - public final GenericType getSubtype(@NonNull Class subclass) { - return new GenericType(token.getSubtype(subclass)); - } - - /** Returns the represented type. */ - @NonNull - public final Type getType() { - return token.getType(); - } - - /** - * This method is for internal use, DO NOT use it from client code. - * - *

It leaks a shaded type. This should be part of the internal API, but due to internal - * implementation details it has to be exposed here. - * - * @leaks-private-api - */ - @NonNull - public TypeToken __getToken() { - return token; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof GenericType) { - GenericType that = (GenericType) other; - return this.token.equals(that.token); - } else { - return false; - } - } - - @Override - public int hashCode() { - return token.hashCode(); - } - - @Override - public String toString() { - return token.toString(); - } - - private static class SimpleGenericType extends GenericType { - SimpleGenericType(Class type) { - super(TypeToken.of(type)); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/reflect/GenericTypeParameter.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/reflect/GenericTypeParameter.java deleted file mode 100644 index 3bf0e3537e0..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/reflect/GenericTypeParameter.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type.reflect; - -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.lang.reflect.ParameterizedType; -import java.lang.reflect.Type; -import java.lang.reflect.TypeVariable; -import net.jcip.annotations.Immutable; - -/** - * Captures a free type variable that can be used in {@link GenericType#where(GenericTypeParameter, - * GenericType)}. - */ -@Immutable -@SuppressWarnings("unused") // for T (unfortunately has to cover the whole class) -public class GenericTypeParameter { - private final TypeVariable typeVariable; - - protected GenericTypeParameter() { - Type superclass = getClass().getGenericSuperclass(); - Preconditions.checkArgument( - superclass instanceof ParameterizedType, "%s isn't parameterized", superclass); - this.typeVariable = - (TypeVariable) ((ParameterizedType) superclass).getActualTypeArguments()[0]; - } - - @NonNull - public TypeVariable getTypeVariable() { - return typeVariable; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/uuid/Uuids.java b/core/src/main/java/com/datastax/oss/driver/api/core/uuid/Uuids.java deleted file mode 100644 index 8dae31f3734..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/core/uuid/Uuids.java +++ /dev/null @@ -1,682 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.uuid; - -import com.datastax.oss.driver.internal.core.os.Native; -import com.datastax.oss.driver.internal.core.util.Loggers; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.base.Charsets; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.lang.management.ManagementFactory; -import java.net.InetAddress; -import java.net.NetworkInterface; -import java.net.SocketException; -import java.net.UnknownHostException; -import java.nio.charset.StandardCharsets; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; -import java.util.Date; -import java.util.Enumeration; -import java.util.HashSet; -import java.util.Objects; -import java.util.Properties; -import java.util.Random; -import java.util.Set; -import java.util.SplittableRandom; -import java.util.UUID; -import java.util.concurrent.atomic.AtomicLong; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Utility methods to help working with UUIDs, and more specifically, with time-based UUIDs (also - * known as Version 1 UUIDs). - * - *

The algorithm to generate time-based UUIDs roughly follows the description in RFC-4122, but - * with the following adaptations: - * - *

    - *
  1. Since Java does not provide direct access to the host's MAC address, that information is - * replaced with a digest of all IP addresses available on the host; - *
  2. The process ID (PID) isn't easily available to Java either, so it is determined by one of - * the following methods, in the order they are listed below: - *
      - *
    1. If the System property {@value PID_SYSTEM_PROPERTY} is set then the - * value to use as a PID will be read from that property; - *
    2. Otherwise, if a native call to {@code POSIX.getpid()} is possible, then the PID will - * be read from that call; - *
    3. Otherwise, an attempt will be made to read the PID from JMX's {@link - * ManagementFactory#getRuntimeMXBean() RuntimeMXBean}, since most JVMs tend to use the - * JVM's PID as part of that MXBean name (however that behavior is not officially part - * of the specification, so it may not work for all JVMs); - *
    4. If all of the above fail, a random integer will be generated and used as a surrogate - * PID. - *
    - *
- * - * @see JAVA-444 - * @see A Universally Unique IDentifier (UUID) URN - * Namespace (RFC 4122) - */ -public final class Uuids { - - /** The system property to use to force the value of the process ID ({@value}). */ - public static final String PID_SYSTEM_PROPERTY = "com.datastax.oss.driver.PID"; - - /** - * The namespace UUID for URLs, as defined in Appendix C of RFC-4122. When using - * this namespace to create a name-based UUID, it is expected that the name part be a valid {@link - * java.net.URL URL}. - */ - public static final UUID NAMESPACE_URL = UUID.fromString("6ba7b811-9dad-11d1-80b4-00c04fd430c8"); - - /** - * The namespace UUID for fully-qualified domain names, as defined in Appendix C of RFC-4122. When using - * this namespace to create a name-based UUID, it is expected that the name part be a valid domain - * name. - */ - public static final UUID NAMESPACE_DNS = UUID.fromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8"); - - /** - * The namespace UUID for OIDs, as defined in Appendix C of RFC-4122. When using - * this namespace to create a name-based UUID, it is expected that the name part be an ISO OID. - */ - public static final UUID NAMESPACE_OID = UUID.fromString("6ba7b812-9dad-11d1-80b4-00c04fd430c8"); - - /** - * The namespace UUID for X.500 domain names, as defined in Appendix C of RFC-4122. When using - * this namespace to create a name-based UUID, it is expected that the name part be a valid X.500 - * domain name, in DER or a text output format. - */ - public static final UUID NAMESPACE_X500 = UUID.fromString("6ba7b814-9dad-11d1-80b4-00c04fd430c8"); - - private static final Logger LOG = LoggerFactory.getLogger(Uuids.class); - - private Uuids() {} - - /** - * UUID v1 timestamps must be expressed relatively to October 15th, 1582 – the day when Gregorian - * calendar was introduced. This constant captures that moment in time expressed in milliseconds - * before the Unix epoch. It can be obtained by calling: - * - *
-   *   Instant.parse("1582-10-15T00:00:00Z").toEpochMilli();
-   * 
- */ - private static final long START_EPOCH_MILLIS = -12219292800000L; - - // Lazily initialize clock seq + node value at time of first access. Quarkus will attempt to - // initialize this class at deployment time which prevents us from just setting this value - // directly. The "node" part of the clock seq + node includes the current PID which (for - // GraalVM users) we obtain via the LLVM interop. That infrastructure isn't setup at Quarkus - // deployment time, however, thus we can't just call makeClockSeqAndNode() in an initializer. - // See JAVA-2663 for more detail on this point. - // - // Container impl adapted from Guava's memoized Supplier impl. - private static class ClockSeqAndNodeContainer { - - private volatile boolean initialized = false; - private long val; - - private long get() { - if (!initialized) { - synchronized (ClockSeqAndNodeContainer.class) { - if (!initialized) { - - initialized = true; - val = makeClockSeqAndNode(); - } - } - } - return val; - } - } - - private static final ClockSeqAndNodeContainer CLOCK_SEQ_AND_NODE = new ClockSeqAndNodeContainer(); - - // The min and max possible lsb for a UUID. - // - // This is not 0 and all 1's because Cassandra's TimeUUIDType compares the lsb parts as signed - // byte arrays. So the min value is 8 times -128 and the max is 8 times +127. - // - // We ignore the UUID variant (namely, MIN_CLOCK_SEQ_AND_NODE has variant 2 as it should, but - // MAX_CLOCK_SEQ_AND_NODE has variant 0) because I don't trust all UUID implementations to have - // correctly set those (pycassa doesn't always for instance). - private static final long MIN_CLOCK_SEQ_AND_NODE = 0x8080808080808080L; - private static final long MAX_CLOCK_SEQ_AND_NODE = 0x7f7f7f7f7f7f7f7fL; - - private static final AtomicLong lastTimestamp = new AtomicLong(0L); - - private static long makeNode() { - - // We don't have access to the MAC address (in pure JAVA at least) but need to generate a node - // part that identifies this host as uniquely as possible. - // The spec says that one option is to take as many sources that identify this node as possible - // and hash them together. That's what we do here by gathering all the IPs of this host as well - // as a few other sources. - try { - - MessageDigest digest = MessageDigest.getInstance("MD5"); - for (String address : getAllLocalAddresses()) update(digest, address); - - Properties props = System.getProperties(); - update(digest, props.getProperty("java.vendor")); - update(digest, props.getProperty("java.vendor.url")); - update(digest, props.getProperty("java.version")); - update(digest, props.getProperty("os.arch")); - update(digest, props.getProperty("os.name")); - update(digest, props.getProperty("os.version")); - update(digest, getProcessPiece()); - - byte[] hash = digest.digest(); - - long node = 0; - for (int i = 0; i < 6; i++) node |= (0x00000000000000ffL & (long) hash[i]) << (i * 8); - // Since we don't use the MAC address, the spec says that the multicast bit (least significant - // bit of the first byte of the node ID) must be 1. - return node | 0x0000010000000000L; - } catch (NoSuchAlgorithmException e) { - throw new RuntimeException(e); - } - } - - private static String getProcessPiece() { - Integer pid = null; - String pidProperty = System.getProperty(PID_SYSTEM_PROPERTY); - if (pidProperty != null) { - try { - pid = Integer.parseInt(pidProperty); - LOG.info("PID obtained from System property {}: {}", PID_SYSTEM_PROPERTY, pid); - } catch (NumberFormatException e) { - LOG.warn( - "Incorrect integer specified for PID in System property {}: {}", - PID_SYSTEM_PROPERTY, - pidProperty); - } - } - if (pid == null && Native.isGetProcessIdAvailable()) { - try { - pid = Native.getProcessId(); - LOG.info("PID obtained through native call to getpid(): {}", pid); - } catch (Exception e) { - Loggers.warnWithException(LOG, "Native call to getpid() failed", e); - } - } - if (pid == null) { - try { - @SuppressWarnings("StringSplitter") - String pidJmx = ManagementFactory.getRuntimeMXBean().getName().split("@")[0]; - pid = Integer.parseInt(pidJmx); - LOG.info("PID obtained through JMX: {}", pid); - } catch (Exception e) { - Loggers.warnWithException(LOG, "Failed to obtain PID from JMX", e); - } - } - if (pid == null) { - pid = new Random().nextInt(); - LOG.warn("Could not determine PID, falling back to a random integer: {}", pid); - } - ClassLoader loader = Uuids.class.getClassLoader(); - int loaderId = loader != null ? System.identityHashCode(loader) : 0; - return Integer.toHexString(pid) + Integer.toHexString(loaderId); - } - - private static void update(MessageDigest digest, String value) { - if (value != null) { - digest.update(value.getBytes(Charsets.UTF_8)); - } - } - - private static long makeClockSeqAndNode() { - long clock = new Random(System.currentTimeMillis()).nextLong(); - long node = makeNode(); - - long lsb = 0; - lsb |= (clock & 0x0000000000003FFFL) << 48; - lsb |= 0x8000000000000000L; - lsb |= node; - return lsb; - } - - /** - * Creates a new random (version 4) UUID. - * - *

This method has received a new implementation as of driver 4.10. Unlike the JDK's - * {@link UUID#randomUUID()} method, it does not use anymore the cryptographic {@link - * java.security.SecureRandom} number generator. Instead, it uses the non-cryptographic {@link - * Random} class, with a different seed at every invocation. - * - *

Using a non-cryptographic generator has two advantages: - * - *

    - *
  1. UUID generation is much faster than with {@link UUID#randomUUID()}; - *
  2. Contrary to {@link UUID#randomUUID()}, UUID generation with this method does not require - * I/O and is not a blocking call, which makes this method better suited for non-blocking - * applications. - *
- * - * Of course, this method is intended for usage where cryptographic strength is not required, such - * as when generating row identifiers for insertion in the database. If you still need - * cryptographic strength, consider using {@link Uuids#random(Random)} instead, and pass an - * instance of {@link java.security.SecureRandom}. - */ - @NonNull - public static UUID random() { - return random(new Random()); - } - - /** - * Creates a new random (version 4) UUID using the provided {@link Random} instance. - * - *

This method offers more flexibility than {@link #random()} as it allows to customize the - * {@link Random} instance to use, and also offers the possibility to reuse instances across - * successive calls. Reusing Random instances is the norm when using {@link - * java.util.concurrent.ThreadLocalRandom}, for instance; however other Random implementations may - * perform poorly under heavy thread contention. - * - *

Note: some Random implementations, such as {@link java.security.SecureRandom}, may trigger - * I/O activity during random number generation; these instances should not be used in - * non-blocking contexts. - */ - @NonNull - public static UUID random(@NonNull Random random) { - byte[] data = new byte[16]; - random.nextBytes(data); - return buildUuid(data, 4); - } - - /** - * Creates a new random (version 4) UUID using the provided {@link SplittableRandom} instance. - * - *

This method should be preferred to {@link #random()} when UUID generation happens in massive - * parallel computations, such as when using the ForkJoin framework. Note that {@link - * SplittableRandom} instances are not thread-safe. - */ - @NonNull - public static UUID random(@NonNull SplittableRandom random) { - byte[] data = toBytes(random.nextLong(), random.nextLong()); - return buildUuid(data, 4); - } - - /** - * Creates a new name-based (version 3) {@link UUID} from the given namespace UUID and the given - * string representing the name part. - * - *

Note that the given string will be converted to bytes using {@link StandardCharsets#UTF_8}. - * - * @param namespace The namespace UUID to use; cannot be null. - * @param name The name part; cannot be null. - * @throws NullPointerException if namespace or name is null. - * @throws IllegalStateException if the {@link MessageDigest} algorithm for version 3 (MD5) is not - * available on this platform. - */ - @NonNull - public static UUID nameBased(@NonNull UUID namespace, @NonNull String name) { - Objects.requireNonNull(name, "name cannot be null"); - return nameBased(namespace, name.getBytes(StandardCharsets.UTF_8)); - } - - /** - * Creates a new name-based (version 3) {@link UUID} from the given namespace UUID and the given - * byte array representing the name part. - * - * @param namespace The namespace UUID to use; cannot be null. - * @param name The name part; cannot be null. - * @throws NullPointerException if namespace or name is null. - * @throws IllegalStateException if the {@link MessageDigest} algorithm for version 3 (MD5) is not - * available on this platform. - */ - @NonNull - public static UUID nameBased(@NonNull UUID namespace, @NonNull byte[] name) { - return nameBased(namespace, name, 3); - } - - /** - * Creates a new name-based (version 3 or version 5) {@link UUID} from the given namespace UUID - * and the given string representing the name part. - * - *

Note that the given string will be converted to bytes using {@link StandardCharsets#UTF_8}. - * - * @param namespace The namespace UUID to use; cannot be null. - * @param name The name part; cannot be null. - * @param version The version to use, must be either 3 or 5; version 3 uses MD5 as its {@link - * MessageDigest} algorithm, while version 5 uses SHA-1. - * @throws NullPointerException if namespace or name is null. - * @throws IllegalArgumentException if version is not 3 nor 5. - * @throws IllegalStateException if the {@link MessageDigest} algorithm for the desired version is - * not available on this platform. - */ - @NonNull - public static UUID nameBased(@NonNull UUID namespace, @NonNull String name, int version) { - Objects.requireNonNull(name, "name cannot be null"); - return nameBased(namespace, name.getBytes(StandardCharsets.UTF_8), version); - } - - /** - * Creates a new name-based (version 3 or version 5) {@link UUID} from the given namespace UUID - * and the given byte array representing the name part. - * - * @param namespace The namespace UUID to use; cannot be null. - * @param name The name to use; cannot be null. - * @param version The version to use, must be either 3 or 5; version 3 uses MD5 as its {@link - * MessageDigest} algorithm, while version 5 uses SHA-1. - * @throws NullPointerException if namespace or name is null. - * @throws IllegalArgumentException if version is not 3 nor 5. - * @throws IllegalStateException if the {@link MessageDigest} algorithm for the desired version is - * not available on this platform. - */ - @NonNull - public static UUID nameBased(@NonNull UUID namespace, @NonNull byte[] name, int version) { - Objects.requireNonNull(namespace, "namespace cannot be null"); - Objects.requireNonNull(name, "name cannot be null"); - MessageDigest md = newMessageDigest(version); - md.update(toBytes(namespace)); - md.update(name); - return buildUuid(md.digest(), version); - } - - /** - * Creates a new name-based (version 3) {@link UUID} from the given byte array containing the - * namespace UUID and the name parts concatenated together. - * - *

The byte array is expected to be at least 16 bytes long. - * - * @param namespaceAndName A byte array containing the concatenated namespace UUID and name; - * cannot be null. - * @throws NullPointerException if namespaceAndName is null. - * @throws IllegalArgumentException if namespaceAndName is not at least 16 bytes - * long. - * @throws IllegalStateException if the {@link MessageDigest} algorithm for version 3 (MD5) is not - * available on this platform. - */ - @NonNull - public static UUID nameBased(@NonNull byte[] namespaceAndName) { - return nameBased(namespaceAndName, 3); - } - - /** - * Creates a new name-based (version 3 or version 5) {@link UUID} from the given byte array - * containing the namespace UUID and the name parts concatenated together. - * - *

The byte array is expected to be at least 16 bytes long. - * - * @param namespaceAndName A byte array containing the concatenated namespace UUID and name; - * cannot be null. - * @param version The version to use, must be either 3 or 5. - * @throws NullPointerException if namespaceAndName is null. - * @throws IllegalArgumentException if namespaceAndName is not at least 16 bytes - * long. - * @throws IllegalArgumentException if version is not 3 nor 5. - * @throws IllegalStateException if the {@link MessageDigest} algorithm for the desired version is - * not available on this platform. - */ - @NonNull - public static UUID nameBased(@NonNull byte[] namespaceAndName, int version) { - Objects.requireNonNull(namespaceAndName, "namespaceAndName cannot be null"); - if (namespaceAndName.length < 16) { - throw new IllegalArgumentException("namespaceAndName must be at least 16 bytes long"); - } - MessageDigest md = newMessageDigest(version); - md.update(namespaceAndName); - return buildUuid(md.digest(), version); - } - - @NonNull - private static MessageDigest newMessageDigest(int version) { - if (version != 3 && version != 5) { - throw new IllegalArgumentException( - "Invalid name-based UUID version, expecting 3 or 5, got: " + version); - } - String algorithm = version == 3 ? "MD5" : "SHA-1"; - try { - return MessageDigest.getInstance(algorithm); - } catch (NoSuchAlgorithmException e) { - throw new IllegalStateException(algorithm + " algorithm not available", e); - } - } - - @NonNull - private static UUID buildUuid(@NonNull byte[] data, int version) { - // clear and set version - data[6] &= (byte) 0x0f; - data[6] |= (byte) (version << 4); - // clear and set variant to IETF - data[8] &= (byte) 0x3f; - data[8] |= (byte) 0x80; - return fromBytes(data); - } - - private static UUID fromBytes(byte[] data) { - // data longer than 16 bytes will be truncated as mandated by the specs - assert data.length >= 16; - long msb = 0; - for (int i = 0; i < 8; i++) { - msb = (msb << 8) | (data[i] & 0xff); - } - long lsb = 0; - for (int i = 8; i < 16; i++) { - lsb = (lsb << 8) | (data[i] & 0xff); - } - return new UUID(msb, lsb); - } - - private static byte[] toBytes(UUID uuid) { - long msb = uuid.getMostSignificantBits(); - long lsb = uuid.getLeastSignificantBits(); - return toBytes(msb, lsb); - } - - private static byte[] toBytes(long msb, long lsb) { - byte[] out = new byte[16]; - for (int i = 0; i < 8; i++) { - out[i] = (byte) (msb >> ((7 - i) * 8)); - } - for (int i = 8; i < 16; i++) { - out[i] = (byte) (lsb >> ((15 - i) * 8)); - } - return out; - } - - /** - * Creates a new time-based (version 1) UUID. - * - *

UUIDs generated by this method are suitable for use with the {@code timeuuid} Cassandra - * type. In particular the generated UUID includes the timestamp of its generation. - * - *

Note that there is no way to provide your own timestamp. This is deliberate, as we feel that - * this does not conform to the UUID specification, and therefore don't want to encourage it - * through the API. If you want to do it anyway, use the following workaround: - * - *

-   * Random random = new Random();
-   * UUID uuid = new UUID(UUIDs.startOf(userProvidedTimestamp).getMostSignificantBits(), random.nextLong());
-   * 
- * - * If you simply need to perform a range query on a {@code timeuuid} column, use the "fake" UUID - * generated by {@link #startOf(long)} and {@link #endOf(long)}. - * - *

Usage with non-blocking threads: beware that this method may block the calling thread on its - * very first invocation, because the node part of time-based UUIDs needs to be computed at that - * moment, and the computation may require the loading of native libraries. If that is a problem, - * consider invoking this method once from a thread that is allowed to block. Subsequent - * invocations are guaranteed not to block. - */ - @NonNull - public static UUID timeBased() { - return new UUID(makeMsb(getCurrentTimestamp()), CLOCK_SEQ_AND_NODE.get()); - } - - /** - * Creates a "fake" time-based UUID that sorts as the smallest possible version 1 UUID generated - * at the provided timestamp. - * - *

Such created UUIDs are useful in queries to select a time range of a {@code timeuuid} - * column. - * - *

The UUIDs created by this method are not unique and as such are not suitable - * for anything else than querying a specific time range. In particular, you should not insert - * such UUIDs. "True" UUIDs from user-provided timestamps are not supported (see {@link - * #timeBased()} for more explanations). - * - *

Also, the timestamp to provide as a parameter must be a Unix timestamp (as returned by - * {@link System#currentTimeMillis} or {@link Date#getTime}), and not a count of - * 100-nanosecond intervals since 00:00:00.00, 15 October 1582 (as required by RFC-4122). - * - *

In other words, given a UUID {@code uuid}, you should never call {@code - * startOf(uuid.timestamp())} but rather {@code startOf(unixTimestamp(uuid))}. - * - *

Lastly, please note that Cassandra's {@code timeuuid} sorting is not compatible with {@link - * UUID#compareTo} and hence the UUIDs created by this method are not necessarily lower bound for - * that latter method. - * - * @param timestamp the Unix timestamp for which the created UUID must be a lower bound. - * @return the smallest (for Cassandra {@code timeuuid} sorting) UUID of {@code timestamp}. - */ - @NonNull - public static UUID startOf(long timestamp) { - return new UUID(makeMsb(fromUnixTimestamp(timestamp)), MIN_CLOCK_SEQ_AND_NODE); - } - - /** - * Creates a "fake" time-based UUID that sorts as the biggest possible version 1 UUID generated at - * the provided timestamp. - * - *

See {@link #startOf(long)} for explanations about the intended usage of such UUID. - * - * @param timestamp the Unix timestamp for which the created UUID must be an upper bound. - * @return the biggest (for Cassandra {@code timeuuid} sorting) UUID of {@code timestamp}. - */ - @NonNull - public static UUID endOf(long timestamp) { - long uuidTstamp = fromUnixTimestamp(timestamp + 1) - 1; - return new UUID(makeMsb(uuidTstamp), MAX_CLOCK_SEQ_AND_NODE); - } - - /** - * Returns the Unix timestamp contained by the provided time-based UUID. - * - *

This method is not equivalent to {@link UUID#timestamp()}. More precisely, a version 1 UUID - * stores a timestamp that represents the number of 100-nanoseconds intervals since midnight, 15 - * October 1582 and that is what {@link UUID#timestamp()} returns. This method however converts - * that timestamp to the equivalent Unix timestamp in milliseconds, i.e. a timestamp representing - * a number of milliseconds since midnight, January 1, 1970 UTC. In particular, the timestamps - * returned by this method are comparable to the timestamps returned by {@link - * System#currentTimeMillis}, {@link Date#getTime}, etc. - * - * @throws IllegalArgumentException if {@code uuid} is not a version 1 UUID. - */ - public static long unixTimestamp(@NonNull UUID uuid) { - if (uuid.version() != 1) { - throw new IllegalArgumentException( - String.format( - "Can only retrieve the unix timestamp for version 1 uuid (provided version %d)", - uuid.version())); - } - long timestamp = uuid.timestamp(); - return (timestamp / 10000) + START_EPOCH_MILLIS; - } - - // Use {@link System#currentTimeMillis} for a base time in milliseconds, and if we are in the same - // millisecond as the previous generation, increment the number of nanoseconds. - // However, since the precision is 100-nanosecond intervals, we can only generate 10K UUIDs within - // a millisecond safely. If we detect we have already generated that much UUIDs within a - // millisecond (which, while admittedly unlikely in a real application, is very achievable on even - // modest machines), then we stall the generator (busy spin) until the next millisecond as - // required by the RFC. - private static long getCurrentTimestamp() { - while (true) { - long now = fromUnixTimestamp(System.currentTimeMillis()); - long last = lastTimestamp.get(); - if (now > last) { - if (lastTimestamp.compareAndSet(last, now)) { - return now; - } - } else { - long lastMillis = millisOf(last); - // If the clock went back in time, bail out - if (millisOf(now) < millisOf(last)) { - return lastTimestamp.incrementAndGet(); - } - long candidate = last + 1; - // If we've generated more than 10k uuid in that millisecond, restart the whole process - // until we get to the next millis. Otherwise, we try use our candidate ... unless we've - // been beaten by another thread in which case we try again. - if (millisOf(candidate) == lastMillis && lastTimestamp.compareAndSet(last, candidate)) { - return candidate; - } - } - } - } - - @VisibleForTesting - static long fromUnixTimestamp(long tstamp) { - return (tstamp - START_EPOCH_MILLIS) * 10000; - } - - private static long millisOf(long timestamp) { - return timestamp / 10000; - } - - @VisibleForTesting - static long makeMsb(long timestamp) { - long msb = 0L; - msb |= (0x00000000ffffffffL & timestamp) << 32; - msb |= (0x0000ffff00000000L & timestamp) >>> 16; - msb |= (0x0fff000000000000L & timestamp) >>> 48; - msb |= 0x0000000000001000L; // sets the version to 1. - return msb; - } - - private static Set getAllLocalAddresses() { - Set allIps = new HashSet<>(); - try { - InetAddress localhost = InetAddress.getLocalHost(); - allIps.add(localhost.toString()); - // Also return the hostname if available, it won't hurt (this does a dns lookup, it's only - // done once at startup) - allIps.add(localhost.getCanonicalHostName()); - InetAddress[] allMyIps = InetAddress.getAllByName(localhost.getCanonicalHostName()); - if (allMyIps != null) { - for (InetAddress allMyIp : allMyIps) { - allIps.add(allMyIp.toString()); - } - } - } catch (UnknownHostException e) { - // Ignore, we'll try the network interfaces anyway - } - - try { - Enumeration en = NetworkInterface.getNetworkInterfaces(); - if (en != null) { - while (en.hasMoreElements()) { - Enumeration enumIpAddr = en.nextElement().getInetAddresses(); - while (enumIpAddr.hasMoreElements()) { - allIps.add(enumIpAddr.nextElement().toString()); - } - } - } - } catch (SocketException e) { - // Ignore, if we've really got nothing so far, we'll throw an exception - } - return allIps; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/api/package-info.java b/core/src/main/java/com/datastax/oss/driver/api/package-info.java deleted file mode 100644 index 7b2219647b2..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/api/package-info.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * The driver's public API. - * - *

This package, and all of its subpackages, contains all the types that are intended to be used - * by clients applications. Binary compatibility is guaranteed across minor versions. - */ -package com.datastax.oss.driver.api; diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/AsyncPagingIterableWrapper.java b/core/src/main/java/com/datastax/oss/driver/internal/core/AsyncPagingIterableWrapper.java deleted file mode 100644 index 055ab26909f..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/AsyncPagingIterableWrapper.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core; - -import com.datastax.oss.driver.api.core.AsyncPagingIterable; -import com.datastax.oss.driver.api.core.MappedAsyncPagingIterable; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.shaded.guava.common.collect.AbstractIterator; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Iterator; -import java.util.concurrent.CompletionStage; -import java.util.function.Function; - -public class AsyncPagingIterableWrapper - implements MappedAsyncPagingIterable { - - private final AsyncPagingIterable source; - private final Function elementMapper; - - private final Iterable currentPage; - - public AsyncPagingIterableWrapper( - AsyncPagingIterable source, - Function elementMapper) { - this.source = source; - this.elementMapper = elementMapper; - - Iterator sourceIterator = source.currentPage().iterator(); - Iterator iterator = - new AbstractIterator() { - @Override - protected TargetT computeNext() { - return sourceIterator.hasNext() - ? elementMapper.apply(sourceIterator.next()) - : endOfData(); - } - }; - this.currentPage = () -> iterator; - } - - @NonNull - @Override - public ColumnDefinitions getColumnDefinitions() { - return source.getColumnDefinitions(); - } - - @NonNull - @Override - public ExecutionInfo getExecutionInfo() { - return source.getExecutionInfo(); - } - - @Override - public int remaining() { - return source.remaining(); - } - - @NonNull - @Override - public Iterable currentPage() { - return currentPage; - } - - @Override - public boolean hasMorePages() { - return source.hasMorePages(); - } - - @NonNull - @Override - public CompletionStage> fetchNextPage() - throws IllegalStateException { - return source - .fetchNextPage() - .thenApply( - nextSource -> - new AsyncPagingIterableWrapper(nextSource, elementMapper)); - } - - @Override - public boolean wasApplied() { - return source.wasApplied(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/ConsistencyLevelRegistry.java b/core/src/main/java/com/datastax/oss/driver/internal/core/ConsistencyLevelRegistry.java deleted file mode 100644 index 7b66a61636c..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/ConsistencyLevelRegistry.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core; - -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; - -/** - * Extension point to plug custom consistency levels. - * - *

This is overridable through {@link InternalDriverContext}. - */ -public interface ConsistencyLevelRegistry { - - ConsistencyLevel codeToLevel(int code); - - int nameToCode(String name); - - ConsistencyLevel nameToLevel(String name); - - /** @return all the values known to this driver instance. */ - Iterable getValues(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/ContactPoints.java b/core/src/main/java/com/datastax/oss/driver/internal/core/ContactPoints.java deleted file mode 100644 index bb65661b72f..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/ContactPoints.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core; - -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.internal.core.metadata.DefaultEndPoint; -import com.datastax.oss.driver.internal.core.util.AddressUtils; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import com.datastax.oss.driver.shaded.guava.common.collect.Sets; -import java.net.InetSocketAddress; -import java.util.Collections; -import java.util.List; -import java.util.Set; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** Utility class to handle the initial contact points passed to the driver. */ -public class ContactPoints { - private static final Logger LOG = LoggerFactory.getLogger(ContactPoints.class); - - public static Set merge( - Set programmaticContactPoints, List configContactPoints, boolean resolve) { - - Set result = Sets.newHashSet(programmaticContactPoints); - for (String spec : configContactPoints) { - - Set addresses = Collections.emptySet(); - try { - addresses = AddressUtils.extract(spec, resolve); - } catch (RuntimeException e) { - LOG.warn("Ignoring invalid contact point {} ({})", spec, e.getMessage(), e); - } - - if (addresses.size() > 1) { - LOG.info( - "Contact point {} resolves to multiple addresses, will use them all ({})", - spec, - addresses); - } - - for (InetSocketAddress address : addresses) { - DefaultEndPoint endPoint = new DefaultEndPoint(address); - boolean wasNew = result.add(endPoint); - if (!wasNew) { - LOG.warn("Duplicate contact point {}", address); - } - } - } - return ImmutableSet.copyOf(result); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/CqlIdentifiers.java b/core/src/main/java/com/datastax/oss/driver/internal/core/CqlIdentifiers.java deleted file mode 100644 index a00da0e4b1a..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/CqlIdentifiers.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.function.Function; - -public class CqlIdentifiers { - - @NonNull - private static List wrap( - @NonNull Iterable in, @NonNull Function fn) { - - Objects.requireNonNull(in, "Input Iterable must not be null"); - Objects.requireNonNull(fn, "CqlIdentifier conversion function must not be null"); - ImmutableList.Builder builder = ImmutableList.builder(); - for (String name : in) { - builder.add(fn.apply(name)); - } - return builder.build(); - } - - @NonNull - public static List wrap(@NonNull Iterable in) { - return wrap(in, CqlIdentifier::fromCql); - } - - @NonNull - public static List wrapInternal(@NonNull Iterable in) { - return wrap(in, CqlIdentifier::fromInternal); - } - - @NonNull - private static Map wrapKeys( - @NonNull Map in, @NonNull Function fn) { - Objects.requireNonNull(in, "Input Map must not be null"); - Objects.requireNonNull(fn, "CqlIdentifier conversion function must not be null"); - ImmutableMap.Builder builder = ImmutableMap.builder(); - for (Map.Entry entry : in.entrySet()) { - builder.put(fn.apply(entry.getKey()), entry.getValue()); - } - return builder.build(); - } - - @NonNull - public static Map wrapKeys(@NonNull Map in) { - return wrapKeys(in, CqlIdentifier::fromCql); - } - - @NonNull - public static Map wrapKeysInternal(@NonNull Map in) { - return wrapKeys(in, CqlIdentifier::fromInternal); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/DefaultConsistencyLevelRegistry.java b/core/src/main/java/com/datastax/oss/driver/internal/core/DefaultConsistencyLevelRegistry.java deleted file mode 100644 index b563ad5facc..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/DefaultConsistencyLevelRegistry.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core; - -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.DefaultConsistencyLevel; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class DefaultConsistencyLevelRegistry implements ConsistencyLevelRegistry { - - private static final ImmutableList VALUES = - ImmutableList.builder().add(DefaultConsistencyLevel.values()).build(); - private static final ImmutableMap NAME_TO_CODE; - - static { - ImmutableMap.Builder nameToCodeBuilder = ImmutableMap.builder(); - for (DefaultConsistencyLevel consistencyLevel : DefaultConsistencyLevel.values()) { - nameToCodeBuilder.put(consistencyLevel.name(), consistencyLevel.getProtocolCode()); - } - NAME_TO_CODE = nameToCodeBuilder.build(); - } - - @Override - public ConsistencyLevel codeToLevel(int code) { - return DefaultConsistencyLevel.fromCode(code); - } - - @Override - public int nameToCode(String name) { - return NAME_TO_CODE.get(name); - } - - @Override - public ConsistencyLevel nameToLevel(String name) { - return DefaultConsistencyLevel.valueOf(name); - } - - @Override - public Iterable getValues() { - return VALUES; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/DefaultMavenCoordinates.java b/core/src/main/java/com/datastax/oss/driver/internal/core/DefaultMavenCoordinates.java deleted file mode 100644 index 8280ae8fec5..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/DefaultMavenCoordinates.java +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core; - -import com.datastax.oss.driver.api.core.MavenCoordinates; -import com.datastax.oss.driver.api.core.Version; -import edu.umd.cs.findbugs.annotations.NonNull; -import io.netty.buffer.ByteBuf; -import java.io.IOException; -import java.io.InputStreamReader; -import java.io.UncheckedIOException; -import java.net.URL; -import java.nio.charset.StandardCharsets; -import java.util.Properties; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class DefaultMavenCoordinates implements MavenCoordinates { - - private static final Logger LOG = LoggerFactory.getLogger(DefaultMavenCoordinates.class); - - public static MavenCoordinates buildFromResourceAndPrint(URL resource) { - MavenCoordinates info = buildFromResource(resource); - LOG.info("{}", info); - return info; - } - - public static DefaultMavenCoordinates buildFromResource(URL resource) { - // The resource is assumed to be a properties file, but - // encoded in UTF-8, not ISO-8859-1 as required by the Java specs, - // since our build tool (Maven) produces UTF-8-encoded resources. - try (InputStreamReader reader = - new InputStreamReader(resource.openStream(), StandardCharsets.UTF_8)) { - Properties props = new Properties(); - props.load(reader); - String name = props.getProperty("driver.name"); - String groupId = props.getProperty("driver.groupId"); - String artifactId = props.getProperty("driver.artifactId"); - String version = props.getProperty("driver.version"); - if (ByteBuf.class.getPackage().getName().contains("com.datastax.oss.driver.shaded")) { - artifactId += "-shaded"; - } - return new DefaultMavenCoordinates(name, groupId, artifactId, Version.parse(version)); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - } - - private final String name; - private final String groupId; - private final String artifactId; - private final Version version; - - public DefaultMavenCoordinates(String name, String groupId, String artifactId, Version version) { - this.name = name; - this.groupId = groupId; - this.artifactId = artifactId; - this.version = version; - } - - @NonNull - @Override - public String getName() { - return name; - } - - @NonNull - @Override - public String getGroupId() { - return groupId; - } - - @NonNull - @Override - public String getArtifactId() { - return artifactId; - } - - @NonNull - @Override - public Version getVersion() { - return version; - } - - @Override - public String toString() { - return String.format("%s (%s:%s) version %s", name, groupId, artifactId, version); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/DefaultProtocolFeature.java b/core/src/main/java/com/datastax/oss/driver/internal/core/DefaultProtocolFeature.java deleted file mode 100644 index 5d79f4ed0a5..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/DefaultProtocolFeature.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core; - -/** - * Features that are commonly supported by most Apache Cassandra protocol versions. - * - * @see com.datastax.oss.driver.api.core.DefaultProtocolVersion - */ -public enum DefaultProtocolFeature implements ProtocolFeature { - - /** - * The ability to leave variables unset in prepared statements. - * - * @see CASSANDRA-7304 - */ - UNSET_BOUND_VALUES, - - /** - * The ability to override the keyspace on a per-request basis. - * - * @see CASSANDRA-10145 - */ - PER_REQUEST_KEYSPACE, - - /** - * Support for smallint and tinyint types. - * - * @see CASSANDRA-8951 - */ - SMALLINT_AND_TINYINT_TYPES, - - /** - * Support for the date type. - * - * @see CASSANDRA-7523 - */ - DATE_TYPE, - - /** - * The ability to set a custom "now" time on statements (for testing purposes). - * - * @see CASSANDRA-14664 - */ - NOW_IN_SECONDS, - - /** - * The new protocol framing format introduced in Cassandra 4: wrapping multiple frames into a - * single "segment" to checksum (and possibly compress) them together. - * - * @see CASSANDRA-15299 - */ - MODERN_FRAMING, - ; -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/DefaultProtocolVersionRegistry.java b/core/src/main/java/com/datastax/oss/driver/internal/core/DefaultProtocolVersionRegistry.java deleted file mode 100644 index 80850e8e95a..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/DefaultProtocolVersionRegistry.java +++ /dev/null @@ -1,266 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core; - -import com.datastax.dse.driver.api.core.DseProtocolVersion; -import com.datastax.dse.driver.api.core.metadata.DseNodeProperties; -import com.datastax.dse.driver.internal.core.DseProtocolFeature; -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.UnsupportedProtocolVersionException; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import java.util.Collection; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Objects; -import java.util.Optional; -import java.util.Set; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Built-in implementation of the protocol version registry, supports all Cassandra and DSE - * versions. - */ -@ThreadSafe -public class DefaultProtocolVersionRegistry implements ProtocolVersionRegistry { - - private static final Logger LOG = LoggerFactory.getLogger(DefaultProtocolVersionRegistry.class); - private static final List allVersions = - ImmutableList.builder() - .add(DefaultProtocolVersion.values()) - .add(DseProtocolVersion.values()) - .build(); - - @VisibleForTesting - static final Version DSE_4_7_0 = Objects.requireNonNull(Version.parse("4.7.0")); - - @VisibleForTesting - static final Version DSE_5_0_0 = Objects.requireNonNull(Version.parse("5.0.0")); - - @VisibleForTesting - static final Version DSE_5_1_0 = Objects.requireNonNull(Version.parse("5.1.0")); - - @VisibleForTesting - static final Version DSE_6_0_0 = Objects.requireNonNull(Version.parse("6.0.0")); - - @VisibleForTesting - static final Version DSE_7_0_0 = Objects.requireNonNull(Version.parse("7.0.0")); - - private final String logPrefix; - - public DefaultProtocolVersionRegistry(String logPrefix) { - this.logPrefix = logPrefix; - } - - @Override - public ProtocolVersion fromName(String name) { - try { - return DefaultProtocolVersion.valueOf(name); - } catch (IllegalArgumentException noOssVersion) { - try { - return DseProtocolVersion.valueOf(name); - } catch (IllegalArgumentException noDseVersion) { - throw new IllegalArgumentException("Unknown protocol version name: " + name); - } - } - } - - @Override - public ProtocolVersion highestNonBeta() { - ProtocolVersion highest = allVersions.get(allVersions.size() - 1); - if (!highest.isBeta()) { - return highest; - } else { - return downgrade(highest) - .orElseThrow(() -> new AssertionError("There should be at least one non-beta version")); - } - } - - @Override - public Optional downgrade(ProtocolVersion version) { - int index = allVersions.indexOf(version); - if (index < 0) { - // This method is called with a value obtained from fromName, so this should never happen - throw new AssertionError(version + " is not a known version"); - } else if (index == 0) { - return Optional.empty(); - } else { - ProtocolVersion previousVersion = allVersions.get(index - 1); - // Beta versions are skipped during negotiation - return previousVersion.isBeta() ? downgrade(previousVersion) : Optional.of(previousVersion); - } - } - - @Override - public ProtocolVersion highestCommon(Collection nodes) { - if (nodes == null || nodes.isEmpty()) { - throw new IllegalArgumentException("Expected at least one node"); - } - - // Start with all non-beta versions (beta versions are always forced, and we don't call this - // method if the version was forced). - Set candidates = new LinkedHashSet<>(); - for (ProtocolVersion version : allVersions) { - if (!version.isBeta()) { - candidates.add(version); - } - } - // Keep an unfiltered copy in case we need to throw an exception below - ImmutableList initialCandidates = ImmutableList.copyOf(candidates); - - // For each node, remove the versions it doesn't support - for (Node node : nodes) { - - // We can't trust the Cassandra version reported by DSE to infer the maximum OSS protocol - // supported. For example DSE 6 reports release_version 4.0-SNAPSHOT, but only supports OSS - // protocol v4 (while Cassandra 4 will support v5). So we treat DSE separately. - Version dseVersion = (Version) node.getExtras().get(DseNodeProperties.DSE_VERSION); - if (dseVersion != null) { - LOG.debug("[{}] Node {} reports DSE version {}", logPrefix, node.getEndPoint(), dseVersion); - dseVersion = dseVersion.nextStable(); - if (dseVersion.compareTo(DSE_4_7_0) < 0) { - throw new UnsupportedProtocolVersionException( - node.getEndPoint(), - String.format( - "Node %s reports DSE version %s, " - + "but the driver only supports 4.7.0 and above", - node.getEndPoint(), dseVersion), - initialCandidates); - } else if (dseVersion.compareTo(DSE_5_0_0) < 0) { - // DSE 4.7.x, 4.8.x - removeHigherThan(DefaultProtocolVersion.V3, null, candidates); - } else if (dseVersion.compareTo(DSE_5_1_0) < 0) { - // DSE 5.0 - removeHigherThan(DefaultProtocolVersion.V4, null, candidates); - } else if (dseVersion.compareTo(DSE_6_0_0) < 0) { - // DSE 5.1 - removeHigherThan(DefaultProtocolVersion.V4, DseProtocolVersion.DSE_V1, candidates); - } else if (dseVersion.compareTo(DSE_7_0_0) < 0) { - // DSE 6 - removeHigherThan(DefaultProtocolVersion.V4, DseProtocolVersion.DSE_V2, candidates); - } else { - // DSE 7.0 - removeHigherThan(DefaultProtocolVersion.V5, DseProtocolVersion.DSE_V2, candidates); - } - } else { // not DSE - Version cassandraVersion = node.getCassandraVersion(); - if (cassandraVersion == null) { - LOG.warn( - "[{}] Node {} reports neither DSE version nor Cassandra version, " - + "ignoring it from optimal protocol version computation", - logPrefix, - node.getEndPoint()); - continue; - } - cassandraVersion = cassandraVersion.nextStable(); - LOG.debug( - "[{}] Node {} reports Cassandra version {}", - logPrefix, - node.getEndPoint(), - cassandraVersion); - if (cassandraVersion.compareTo(Version.V2_1_0) < 0) { - throw new UnsupportedProtocolVersionException( - node.getEndPoint(), - String.format( - "Node %s reports Cassandra version %s, " - + "but the driver only supports 2.1.0 and above", - node.getEndPoint(), cassandraVersion), - ImmutableList.of(DefaultProtocolVersion.V3, DefaultProtocolVersion.V4)); - } else if (cassandraVersion.compareTo(Version.V2_2_0) < 0) { - // 2.1.0 - removeHigherThan(DefaultProtocolVersion.V3, null, candidates); - } else if (cassandraVersion.compareTo(Version.V4_0_0) < 0) { - // 2.2, 3.x - removeHigherThan(DefaultProtocolVersion.V4, null, candidates); - } else { - // 4.0 - removeHigherThan(DefaultProtocolVersion.V5, null, candidates); - } - } - } - - // If we have versions left, return the highest one - ProtocolVersion max = null; - for (ProtocolVersion candidate : candidates) { - if (max == null || max.getCode() < candidate.getCode()) { - max = candidate; - } - } - if (max == null) { // Note: with the current algorithm, this never happens - throw new UnsupportedProtocolVersionException( - null, - String.format( - "Could not determine a common protocol version, " - + "enable DEBUG logs for '%s' for more details", - LOG.getName()), - initialCandidates); - } else { - return max; - } - } - - // Removes all versions strictly higher than the given versions from candidates. A null - // maxDseVersion means "remove all DSE versions". - private void removeHigherThan( - DefaultProtocolVersion maxOssVersion, - DseProtocolVersion maxDseVersion, - Set candidates) { - for (DefaultProtocolVersion ossVersion : DefaultProtocolVersion.values()) { - if (ossVersion.compareTo(maxOssVersion) > 0 && candidates.remove(ossVersion)) { - LOG.debug("[{}] Excluding protocol {}", logPrefix, ossVersion); - } - } - for (DseProtocolVersion dseVersion : DseProtocolVersion.values()) { - if ((maxDseVersion == null || dseVersion.compareTo(maxDseVersion) > 0) - && candidates.remove(dseVersion)) { - LOG.debug("[{}] Excluding protocol {}", logPrefix, dseVersion); - } - } - } - - @Override - public boolean supports(ProtocolVersion version, ProtocolFeature feature) { - int code = version.getCode(); - if (DefaultProtocolFeature.SMALLINT_AND_TINYINT_TYPES.equals(feature) - || DefaultProtocolFeature.DATE_TYPE.equals(feature) - || DefaultProtocolFeature.UNSET_BOUND_VALUES.equals(feature)) { - // All DSE versions and all OSS V4+ - return DefaultProtocolVersion.V4.getCode() <= code; - } else if (DefaultProtocolFeature.PER_REQUEST_KEYSPACE.equals(feature)) { - // Only DSE_V2+ and OSS V5+ - return (DefaultProtocolVersion.V5.getCode() <= code - && code < DseProtocolVersion.DSE_V1.getCode()) - || DseProtocolVersion.DSE_V2.getCode() <= code; - } else if (DefaultProtocolFeature.NOW_IN_SECONDS.equals(feature) - || DefaultProtocolFeature.MODERN_FRAMING.equals(feature)) { - // OSS only, V5+ - return DefaultProtocolVersion.V5.getCode() <= code - && code < DseProtocolVersion.DSE_V1.getCode(); - } else if (DseProtocolFeature.CONTINUOUS_PAGING.equals(feature)) { - // All DSE versions - return DseProtocolVersion.DSE_V1.getCode() <= code; - } else { - throw new IllegalArgumentException("Unhandled protocol feature: " + feature); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/PagingIterableWrapper.java b/core/src/main/java/com/datastax/oss/driver/internal/core/PagingIterableWrapper.java deleted file mode 100644 index 1f79f673d02..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/PagingIterableWrapper.java +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core; - -import com.datastax.oss.driver.api.core.PagingIterable; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.internal.core.cql.PagingIterableSpliterator; -import com.datastax.oss.driver.shaded.guava.common.collect.AbstractIterator; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Iterator; -import java.util.List; -import java.util.Spliterator; -import java.util.function.Function; - -public class PagingIterableWrapper implements PagingIterable { - - private final PagingIterable source; - private final boolean sized; - private final Iterator iterator; - - /** - * Creates a {@link PagingIterableWrapper} for the given source, with unknown size. Spliterators - * for this iterable will never report {@link Spliterator#SIZED}. - * - * @param source The source to wrap. - * @param elementMapper The element mapper. - */ - public PagingIterableWrapper( - @NonNull PagingIterable source, - @NonNull Function elementMapper) { - this(source, elementMapper, false); - } - - /** - * Creates a {@link PagingIterableWrapper} for the given source. If {@code sized} is {@code true}, - * spliterators for this iterable will report {@link Spliterator#SIZED} and {@link - * Spliterator#SUBSIZED} and their estimated size will be {@link #getAvailableWithoutFetching()}. - * - * @param source The source to wrap. - * @param elementMapper The element mapper. - * @param sized Whether this iterable has a known size or not. - */ - public PagingIterableWrapper( - @NonNull PagingIterable source, - @NonNull Function elementMapper, - boolean sized) { - this.source = source; - this.sized = sized; - Iterator sourceIterator = source.iterator(); - this.iterator = - new AbstractIterator() { - @Override - protected TargetT computeNext() { - return sourceIterator.hasNext() - ? elementMapper.apply(sourceIterator.next()) - : endOfData(); - } - }; - } - - @NonNull - @Override - public ColumnDefinitions getColumnDefinitions() { - return source.getColumnDefinitions(); - } - - @NonNull - @Override - public List getExecutionInfos() { - return source.getExecutionInfos(); - } - - @Override - public boolean isFullyFetched() { - return source.isFullyFetched(); - } - - @Override - public int getAvailableWithoutFetching() { - return source.getAvailableWithoutFetching(); - } - - @Override - public boolean wasApplied() { - return source.wasApplied(); - } - - @NonNull - @Override - public Iterator iterator() { - return iterator; - } - - @NonNull - @Override - public Spliterator spliterator() { - PagingIterableSpliterator.Builder builder = PagingIterableSpliterator.builder(this); - if (sized) { - builder.withEstimatedSize(getAvailableWithoutFetching()); - } - return builder.build(); - } - - @NonNull - @Override - public PagingIterable map( - Function elementMapper) { - return new PagingIterableWrapper<>(this, elementMapper, sized); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/ProtocolFeature.java b/core/src/main/java/com/datastax/oss/driver/internal/core/ProtocolFeature.java deleted file mode 100644 index bf73f7bbb16..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/ProtocolFeature.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core; - -import com.datastax.oss.driver.api.core.ProtocolVersion; - -/** - * A marker interface for features of the native protocol that are only supported by specific - * {@linkplain ProtocolVersion versions}. - * - *

The only reason to model this as an interface (as opposed to an enum type) is to accommodate - * for custom protocol extensions. If you're connecting to a standard Apache Cassandra cluster, all - * {@code ProtocolFeature}s are {@link DefaultProtocolFeature} instances. - * - * @see ProtocolVersionRegistry#supports(ProtocolVersion, ProtocolFeature) - * @see DefaultProtocolFeature - */ -public interface ProtocolFeature {} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/ProtocolVersionRegistry.java b/core/src/main/java/com/datastax/oss/driver/internal/core/ProtocolVersionRegistry.java deleted file mode 100644 index eff1d099905..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/ProtocolVersionRegistry.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.UnsupportedProtocolVersionException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.metadata.TopologyMonitor; -import java.util.Collection; -import java.util.Optional; - -/** Defines which native protocol versions are supported by a driver instance. */ -public interface ProtocolVersionRegistry { - - /** - * Look up a version by its {@link ProtocolVersion#name() name}. This is used when a version was - * forced in the configuration. - * - * @throws IllegalArgumentException if there is no known version with this name. - * @see DefaultDriverOption#PROTOCOL_VERSION - */ - ProtocolVersion fromName(String name); - - /** - * The highest, non-beta version supported by the driver. This is used as the starting point for - * the negotiation process for the initial connection (if the version wasn't forced). - */ - ProtocolVersion highestNonBeta(); - - /** - * Downgrade to a lower version if the current version is not supported by the server. This is - * used during the negotiation process for the initial connection (if the version wasn't forced). - * - * @return empty if there is no version to downgrade to. - */ - Optional downgrade(ProtocolVersion version); - - /** - * Computes the highest common version supported by the given nodes. This is called after the - * initial {@link TopologyMonitor#refreshNodeList()} node refresh} (provided that the version was - * not forced), to ensure that we proceed with a version that will work with all the nodes. - * - * @throws UnsupportedProtocolVersionException if no such version exists (the nodes support - * non-intersecting ranges), or if there was an error during the computation. This will cause - * the driver initialization to fail. - */ - ProtocolVersion highestCommon(Collection nodes); - - /** Whether a given version supports a given feature. */ - boolean supports(ProtocolVersion version, ProtocolFeature feature); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/Ec2MultiRegionAddressTranslator.java b/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/Ec2MultiRegionAddressTranslator.java deleted file mode 100644 index 88e6cdb3bb2..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/Ec2MultiRegionAddressTranslator.java +++ /dev/null @@ -1,160 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.addresstranslation; - -import com.datastax.oss.driver.api.core.addresstranslation.AddressTranslator; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.internal.core.util.Loggers; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.util.Enumeration; -import java.util.Hashtable; -import javax.naming.Context; -import javax.naming.NamingEnumeration; -import javax.naming.NamingException; -import javax.naming.directory.Attribute; -import javax.naming.directory.Attributes; -import javax.naming.directory.DirContext; -import javax.naming.directory.InitialDirContext; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * {@link AddressTranslator} implementation for a multi-region EC2 deployment where clients are - * also deployed in EC2. - * - *

Its distinctive feature is that it translates addresses according to the location of the - * Cassandra host: - * - *

    - *
  • addresses in different EC2 regions (than the client) are unchanged; - *
  • addresses in the same EC2 region are translated to private IPs. - *
- * - * This optimizes network costs, because Amazon charges more for communication over public IPs. - * - *

Implementation note: this class performs a reverse DNS lookup of the origin address, to find - * the domain name of the target instance. Then it performs a forward DNS lookup of the domain name; - * the EC2 DNS does the private/public switch automatically based on location. - */ -public class Ec2MultiRegionAddressTranslator implements AddressTranslator { - - private static final Logger LOG = LoggerFactory.getLogger(Ec2MultiRegionAddressTranslator.class); - - private final DirContext ctx; - private final String logPrefix; - - public Ec2MultiRegionAddressTranslator( - @SuppressWarnings("unused") @NonNull DriverContext context) { - this.logPrefix = context.getSessionName(); - @SuppressWarnings("JdkObsolete") - Hashtable env = new Hashtable<>(); - env.put(Context.INITIAL_CONTEXT_FACTORY, "com.sun.jndi.dns.DnsContextFactory"); - try { - ctx = new InitialDirContext(env); - } catch (NamingException e) { - throw new RuntimeException("Could not create translator", e); - } - } - - @VisibleForTesting - Ec2MultiRegionAddressTranslator(@NonNull DirContext ctx) { - this.logPrefix = "test"; - this.ctx = ctx; - } - - @NonNull - @Override - public InetSocketAddress translate(@NonNull InetSocketAddress socketAddress) { - InetAddress address = socketAddress.getAddress(); - try { - // InetAddress#getHostName() is supposed to perform a reverse DNS lookup, but for some reason - // it doesn't work within the same EC2 region (it returns the IP address itself). - // We use an alternate implementation: - String domainName = lookupPtrRecord(reverse(address)); - if (domainName == null) { - LOG.warn("[{}] Found no domain name for {}, returning it as-is", logPrefix, address); - return socketAddress; - } - - InetAddress translatedAddress = InetAddress.getByName(domainName); - LOG.debug("[{}] Resolved {} to {}", logPrefix, address, translatedAddress); - return new InetSocketAddress(translatedAddress, socketAddress.getPort()); - } catch (Exception e) { - Loggers.warnWithException( - LOG, "[{}] Error resolving {}, returning it as-is", logPrefix, address, e); - return socketAddress; - } - } - - private String lookupPtrRecord(String reversedDomain) throws Exception { - Attributes attrs = ctx.getAttributes(reversedDomain, new String[] {"PTR"}); - for (NamingEnumeration ae = attrs.getAll(); ae.hasMoreElements(); ) { - Attribute attr = (Attribute) ae.next(); - Enumeration vals = attr.getAll(); - if (vals.hasMoreElements()) { - return vals.nextElement().toString(); - } - } - return null; - } - - @Override - public void close() { - try { - ctx.close(); - } catch (NamingException e) { - Loggers.warnWithException(LOG, "Error closing translator", e); - } - } - - // Builds the "reversed" domain name in the ARPA domain to perform the reverse lookup - @VisibleForTesting - static String reverse(InetAddress address) { - byte[] bytes = address.getAddress(); - if (bytes.length == 4) return reverseIpv4(bytes); - else return reverseIpv6(bytes); - } - - private static String reverseIpv4(byte[] bytes) { - StringBuilder builder = new StringBuilder(); - for (int i = bytes.length - 1; i >= 0; i--) { - builder.append(bytes[i] & 0xFF).append('.'); - } - builder.append("in-addr.arpa"); - return builder.toString(); - } - - private static String reverseIpv6(byte[] bytes) { - StringBuilder builder = new StringBuilder(); - for (int i = bytes.length - 1; i >= 0; i--) { - byte b = bytes[i]; - int lowNibble = b & 0x0F; - int highNibble = b >> 4 & 0x0F; - builder - .append(Integer.toHexString(lowNibble)) - .append('.') - .append(Integer.toHexString(highNibble)) - .append('.'); - } - builder.append("ip6.arpa"); - return builder.toString(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/FixedHostNameAddressTranslator.java b/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/FixedHostNameAddressTranslator.java deleted file mode 100644 index 5cc6c2518fb..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/FixedHostNameAddressTranslator.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.addresstranslation; - -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.ADDRESS_TRANSLATOR_ADVERTISED_HOSTNAME; - -import com.datastax.oss.driver.api.core.addresstranslation.AddressTranslator; -import com.datastax.oss.driver.api.core.context.DriverContext; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.net.InetSocketAddress; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This translator always returns same hostname, no matter what IP address a node has but still - * using its native transport port. - * - *

The translator can be used for scenarios when all nodes are behind some kind of proxy, and it - * is not tailored for one concrete use case. One can use this, for example, for AWS PrivateLink as - * all nodes would be exposed to consumer - behind one hostname pointing to AWS Endpoint. - */ -public class FixedHostNameAddressTranslator implements AddressTranslator { - - private static final Logger LOG = LoggerFactory.getLogger(FixedHostNameAddressTranslator.class); - - private final String advertisedHostname; - private final String logPrefix; - - public FixedHostNameAddressTranslator(@NonNull DriverContext context) { - logPrefix = context.getSessionName(); - advertisedHostname = - context.getConfig().getDefaultProfile().getString(ADDRESS_TRANSLATOR_ADVERTISED_HOSTNAME); - } - - @NonNull - @Override - public InetSocketAddress translate(@NonNull InetSocketAddress address) { - final int port = address.getPort(); - LOG.debug("[{}] Resolved {}:{} to {}:{}", logPrefix, address, port, advertisedHostname, port); - return new InetSocketAddress(advertisedHostname, port); - } - - @Override - public void close() {} -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/PassThroughAddressTranslator.java b/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/PassThroughAddressTranslator.java deleted file mode 100644 index 0922821be8c..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/PassThroughAddressTranslator.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.addresstranslation; - -import com.datastax.oss.driver.api.core.addresstranslation.AddressTranslator; -import com.datastax.oss.driver.api.core.context.DriverContext; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.net.InetSocketAddress; -import net.jcip.annotations.ThreadSafe; - -/** - * An address translator that always returns the same address unchanged. - * - *

To activate this translator, modify the {@code advanced.address-translator} section in the - * driver configuration, for example: - * - *

- * datastax-java-driver {
- *   advanced.address-translator {
- *     class = PassThroughAddressTranslator
- *   }
- * }
- * 
- * - * See {@code reference.conf} (in the manual or core driver JAR) for more details. - */ -@ThreadSafe -public class PassThroughAddressTranslator implements AddressTranslator { - - public PassThroughAddressTranslator(@SuppressWarnings("unused") DriverContext context) { - // nothing to do - } - - @NonNull - @Override - public InetSocketAddress translate(@NonNull InetSocketAddress address) { - return address; - } - - @Override - public void close() { - // nothing to do - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/Subnet.java b/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/Subnet.java deleted file mode 100644 index 7c25e94e2f9..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/Subnet.java +++ /dev/null @@ -1,176 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.addresstranslation; - -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.base.Splitter; -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.util.Arrays; -import java.util.List; - -class Subnet { - private final byte[] subnet; - private final byte[] networkMask; - private final byte[] upper; - private final byte[] lower; - - private Subnet(byte[] subnet, byte[] networkMask) { - this.subnet = subnet; - this.networkMask = networkMask; - - byte[] upper = new byte[subnet.length]; - byte[] lower = new byte[subnet.length]; - for (int i = 0; i < subnet.length; i++) { - upper[i] = (byte) (subnet[i] | ~networkMask[i]); - lower[i] = (byte) (subnet[i] & networkMask[i]); - } - this.upper = upper; - this.lower = lower; - } - - static Subnet parse(String subnetCIDR) throws UnknownHostException { - List parts = Splitter.on("/").splitToList(subnetCIDR); - if (parts.size() != 2) { - throw new IllegalArgumentException("Invalid subnet: " + subnetCIDR); - } - - boolean isIPv6 = parts.get(0).contains(":"); - byte[] subnet = InetAddress.getByName(parts.get(0)).getAddress(); - if (isIPv4(subnet) && isIPv6) { - subnet = toIPv6(subnet); - } - int prefixLength = Integer.parseInt(parts.get(1)); - validatePrefixLength(subnet, prefixLength); - - byte[] networkMask = toNetworkMask(subnet, prefixLength); - validateSubnetIsPrefixBlock(subnet, networkMask, subnetCIDR); - return new Subnet(subnet, networkMask); - } - - private static byte[] toNetworkMask(byte[] subnet, int prefixLength) { - int fullBytes = prefixLength / 8; - int remainingBits = prefixLength % 8; - byte[] mask = new byte[subnet.length]; - Arrays.fill(mask, 0, fullBytes, (byte) 0xFF); - if (remainingBits > 0) { - mask[fullBytes] = (byte) (0xFF << (8 - remainingBits)); - } - return mask; - } - - private static void validatePrefixLength(byte[] subnet, int prefixLength) { - int max_prefix_length = subnet.length * 8; - if (prefixLength < 0 || max_prefix_length < prefixLength) { - throw new IllegalArgumentException( - String.format( - "Prefix length %s must be within [0; %s]", prefixLength, max_prefix_length)); - } - } - - private static void validateSubnetIsPrefixBlock( - byte[] subnet, byte[] networkMask, String subnetCIDR) { - byte[] prefixBlock = toPrefixBlock(subnet, networkMask); - if (!Arrays.equals(subnet, prefixBlock)) { - throw new IllegalArgumentException( - String.format("Subnet %s must be represented as a network prefix block", subnetCIDR)); - } - } - - private static byte[] toPrefixBlock(byte[] subnet, byte[] networkMask) { - byte[] prefixBlock = new byte[subnet.length]; - for (int i = 0; i < subnet.length; i++) { - prefixBlock[i] = (byte) (subnet[i] & networkMask[i]); - } - return prefixBlock; - } - - @VisibleForTesting - byte[] getSubnet() { - return Arrays.copyOf(subnet, subnet.length); - } - - @VisibleForTesting - byte[] getNetworkMask() { - return Arrays.copyOf(networkMask, networkMask.length); - } - - byte[] getUpper() { - return Arrays.copyOf(upper, upper.length); - } - - byte[] getLower() { - return Arrays.copyOf(lower, lower.length); - } - - boolean isIPv4() { - return isIPv4(subnet); - } - - boolean isIPv6() { - return isIPv6(subnet); - } - - boolean contains(byte[] ip) { - if (isIPv4() && !isIPv4(ip)) { - return false; - } - if (isIPv6() && isIPv4(ip)) { - ip = toIPv6(ip); - } - if (subnet.length != ip.length) { - throw new IllegalArgumentException( - "IP version is unknown: " + Arrays.toString(toZeroBasedByteArray(ip))); - } - for (int i = 0; i < subnet.length; i++) { - if (subnet[i] != (byte) (ip[i] & networkMask[i])) { - return false; - } - } - return true; - } - - private static boolean isIPv4(byte[] ip) { - return ip.length == 4; - } - - private static boolean isIPv6(byte[] ip) { - return ip.length == 16; - } - - private static byte[] toIPv6(byte[] ipv4) { - byte[] ipv6 = new byte[16]; - ipv6[10] = (byte) 0xFF; - ipv6[11] = (byte) 0xFF; - System.arraycopy(ipv4, 0, ipv6, 12, 4); - return ipv6; - } - - @Override - public String toString() { - return Arrays.toString(toZeroBasedByteArray(subnet)); - } - - private static int[] toZeroBasedByteArray(byte[] bytes) { - int[] res = new int[bytes.length]; - for (int i = 0; i < bytes.length; i++) { - res[i] = bytes[i] & 0xFF; - } - return res; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/SubnetAddress.java b/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/SubnetAddress.java deleted file mode 100644 index 105e776a507..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/SubnetAddress.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.addresstranslation; - -import java.net.InetSocketAddress; -import java.net.UnknownHostException; - -class SubnetAddress { - private final Subnet subnet; - private final InetSocketAddress address; - - SubnetAddress(String subnetCIDR, InetSocketAddress address) { - try { - this.subnet = Subnet.parse(subnetCIDR); - } catch (UnknownHostException e) { - throw new RuntimeException(e); - } - this.address = address; - } - - InetSocketAddress getAddress() { - return this.address; - } - - boolean isOverlapping(SubnetAddress other) { - Subnet thisSubnet = this.subnet; - Subnet otherSubnet = other.subnet; - return thisSubnet.contains(otherSubnet.getLower()) - || thisSubnet.contains(otherSubnet.getUpper()) - || otherSubnet.contains(thisSubnet.getLower()) - || otherSubnet.contains(thisSubnet.getUpper()); - } - - boolean contains(InetSocketAddress address) { - return subnet.contains(address.getAddress().getAddress()); - } - - boolean isIPv4() { - return subnet.isIPv4(); - } - - boolean isIPv6() { - return subnet.isIPv6(); - } - - @Override - public String toString() { - return "SubnetAddress[subnet=" + subnet + ", address=" + address + "]"; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/SubnetAddressTranslator.java b/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/SubnetAddressTranslator.java deleted file mode 100644 index 85f29e3fadd..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/SubnetAddressTranslator.java +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.addresstranslation; - -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.ADDRESS_TRANSLATOR_DEFAULT_ADDRESS; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.ADDRESS_TRANSLATOR_RESOLVE_ADDRESSES; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.ADDRESS_TRANSLATOR_SUBNET_ADDRESSES; - -import com.datastax.oss.driver.api.core.addresstranslation.AddressTranslator; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.internal.core.util.AddressUtils; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.net.InetSocketAddress; -import java.util.List; -import java.util.Optional; -import java.util.stream.Collectors; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This translator returns the proxy address of the private subnet containing the Cassandra node IP, - * or default address if no matching subnets, or passes through the original node address if no - * default configured. - * - *

The translator can be used for scenarios when all nodes are behind some kind of proxy, and - * that proxy is different for nodes located in different subnets (eg. when Cassandra is deployed in - * multiple datacenters/regions). One can use this, for example, for Cassandra on Kubernetes with - * different Cassandra datacenters deployed to different Kubernetes clusters. - */ -public class SubnetAddressTranslator implements AddressTranslator { - private static final Logger LOG = LoggerFactory.getLogger(SubnetAddressTranslator.class); - - private final List subnetAddresses; - - @SuppressWarnings("OptionalUsedAsFieldOrParameterType") - private final Optional defaultAddress; - - private final String logPrefix; - - public SubnetAddressTranslator(@NonNull DriverContext context) { - logPrefix = context.getSessionName(); - boolean resolveAddresses = - context - .getConfig() - .getDefaultProfile() - .getBoolean(ADDRESS_TRANSLATOR_RESOLVE_ADDRESSES, false); - this.subnetAddresses = - context.getConfig().getDefaultProfile().getStringMap(ADDRESS_TRANSLATOR_SUBNET_ADDRESSES) - .entrySet().stream() - .map( - e -> { - // Quoted and/or containing forward slashes map keys in reference.conf are read to - // strings with additional quotes, eg. 100.64.0.0/15 -> '100.64.0."0/15"' or - // "100.64.0.0/15" -> '"100.64.0.0/15"' - String subnetCIDR = e.getKey().replaceAll("\"", ""); - String address = e.getValue(); - return new SubnetAddress(subnetCIDR, parseAddress(address, resolveAddresses)); - }) - .collect(Collectors.toList()); - this.defaultAddress = - Optional.ofNullable( - context - .getConfig() - .getDefaultProfile() - .getString(ADDRESS_TRANSLATOR_DEFAULT_ADDRESS, null)) - .map(address -> parseAddress(address, resolveAddresses)); - - validateSubnetsAreOfSameProtocol(this.subnetAddresses); - validateSubnetsAreNotOverlapping(this.subnetAddresses); - } - - private static void validateSubnetsAreOfSameProtocol(List subnets) { - for (int i = 0; i < subnets.size() - 1; i++) { - for (int j = i + 1; j < subnets.size(); j++) { - SubnetAddress subnet1 = subnets.get(i); - SubnetAddress subnet2 = subnets.get(j); - if (subnet1.isIPv4() != subnet2.isIPv4() && subnet1.isIPv6() != subnet2.isIPv6()) { - throw new IllegalArgumentException( - String.format( - "Configured subnets are of the different protocols: %s, %s", subnet1, subnet2)); - } - } - } - } - - private static void validateSubnetsAreNotOverlapping(List subnets) { - for (int i = 0; i < subnets.size() - 1; i++) { - for (int j = i + 1; j < subnets.size(); j++) { - SubnetAddress subnet1 = subnets.get(i); - SubnetAddress subnet2 = subnets.get(j); - if (subnet1.isOverlapping(subnet2)) { - throw new IllegalArgumentException( - String.format("Configured subnets are overlapping: %s, %s", subnet1, subnet2)); - } - } - } - } - - @NonNull - @Override - public InetSocketAddress translate(@NonNull InetSocketAddress address) { - InetSocketAddress translatedAddress = null; - for (SubnetAddress subnetAddress : subnetAddresses) { - if (subnetAddress.contains(address)) { - translatedAddress = subnetAddress.getAddress(); - } - } - if (translatedAddress == null && defaultAddress.isPresent()) { - translatedAddress = defaultAddress.get(); - } - if (translatedAddress == null) { - translatedAddress = address; - } - LOG.debug("[{}] Translated {} to {}", logPrefix, address, translatedAddress); - return translatedAddress; - } - - @Override - public void close() {} - - @Nullable - private InetSocketAddress parseAddress(String address, boolean resolve) { - try { - InetSocketAddress parsedAddress = AddressUtils.extract(address, resolve).iterator().next(); - LOG.debug("[{}] Parsed {} to {}", logPrefix, address, parsedAddress); - return parsedAddress; - } catch (RuntimeException e) { - throw new IllegalArgumentException( - String.format("Invalid address %s (%s)", address, e.getMessage()), e); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/AdminRequestHandler.java b/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/AdminRequestHandler.java deleted file mode 100644 index 5078428c21a..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/AdminRequestHandler.java +++ /dev/null @@ -1,272 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.adminrequest; - -import com.datastax.oss.driver.api.core.DriverTimeoutException; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.connection.BusyConnectionException; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.channel.ResponseCallback; -import com.datastax.oss.driver.internal.core.util.concurrent.UncaughtExceptions; -import com.datastax.oss.driver.shaded.guava.common.collect.Maps; -import com.datastax.oss.protocol.internal.Frame; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.request.Query; -import com.datastax.oss.protocol.internal.request.query.QueryOptions; -import com.datastax.oss.protocol.internal.response.Result; -import com.datastax.oss.protocol.internal.response.result.Prepared; -import com.datastax.oss.protocol.internal.response.result.Rows; -import io.netty.util.concurrent.Future; -import io.netty.util.concurrent.ScheduledFuture; -import java.net.InetAddress; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.TimeUnit; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** Handles the lifecycle of an admin request (such as a node refresh or schema refresh query). */ -@ThreadSafe -public class AdminRequestHandler implements ResponseCallback { - private static final Logger LOG = LoggerFactory.getLogger(AdminRequestHandler.class); - - public static AdminRequestHandler call( - DriverChannel channel, Query query, Duration timeout, String logPrefix) { - return new AdminRequestHandler<>( - channel, - true, - query, - Frame.NO_PAYLOAD, - timeout, - logPrefix, - "call '" + query.query + "'", - com.datastax.oss.protocol.internal.response.result.Void.class); - } - - public static AdminRequestHandler query( - DriverChannel channel, - String query, - Map parameters, - Duration timeout, - int pageSize, - String logPrefix) { - Query message = - new Query( - query, - buildQueryOptions(pageSize, serialize(parameters, channel.protocolVersion()), null)); - String debugString = "query '" + message.query + "'"; - if (!parameters.isEmpty()) { - debugString += " with parameters " + parameters; - } - return new AdminRequestHandler<>( - channel, true, message, Frame.NO_PAYLOAD, timeout, logPrefix, debugString, Rows.class); - } - - public static AdminRequestHandler query( - DriverChannel channel, String query, Duration timeout, int pageSize, String logPrefix) { - return query(channel, query, Collections.emptyMap(), timeout, pageSize, logPrefix); - } - - private final DriverChannel channel; - private final boolean shouldPreAcquireId; - private final Message message; - private final Map customPayload; - private final Duration timeout; - private final String logPrefix; - private final String debugString; - private final Class expectedResponseType; - protected final CompletableFuture result = new CompletableFuture<>(); - - // This is only ever accessed on the channel's event loop, so it doesn't need to be volatile - private ScheduledFuture timeoutFuture; - - protected AdminRequestHandler( - DriverChannel channel, - boolean shouldPreAcquireId, - Message message, - Map customPayload, - Duration timeout, - String logPrefix, - String debugString, - Class expectedResponseType) { - this.channel = channel; - this.shouldPreAcquireId = shouldPreAcquireId; - this.message = message; - this.customPayload = customPayload; - this.timeout = timeout; - this.logPrefix = logPrefix; - this.debugString = debugString; - this.expectedResponseType = expectedResponseType; - } - - public CompletionStage start() { - LOG.debug("[{}] Executing {}", logPrefix, this); - if (shouldPreAcquireId && !channel.preAcquireId()) { - setFinalError( - new BusyConnectionException( - String.format( - "%s has reached its maximum number of simultaneous requests", channel))); - } else { - channel.write(message, false, customPayload, this).addListener(this::onWriteComplete); - } - return result; - } - - private void onWriteComplete(Future future) { - if (future.isSuccess()) { - LOG.debug("[{}] Successfully wrote {}, waiting for response", logPrefix, this); - if (timeout.toNanos() > 0) { - timeoutFuture = - channel - .eventLoop() - .schedule(this::fireTimeout, timeout.toNanos(), TimeUnit.NANOSECONDS); - timeoutFuture.addListener(UncaughtExceptions::log); - } - } else { - setFinalError(future.cause()); - } - } - - private void fireTimeout() { - setFinalError( - new DriverTimeoutException(String.format("%s timed out after %s", debugString, timeout))); - if (!channel.closeFuture().isDone()) { - channel.cancel(this); - } - } - - @Override - public void onFailure(Throwable error) { - if (timeoutFuture != null) { - timeoutFuture.cancel(true); - } - setFinalError(error); - } - - @Override - public void onResponse(Frame responseFrame) { - if (timeoutFuture != null) { - timeoutFuture.cancel(true); - } - Message message = responseFrame.message; - LOG.debug("[{}] Got response {}", logPrefix, responseFrame.message); - if (!expectedResponseType.isInstance(message)) { - // Note that this also covers error responses, no need to get too fancy here - setFinalError(new UnexpectedResponseException(debugString, message)); - } else if (expectedResponseType == Rows.class) { - Rows rows = (Rows) message; - ByteBuffer pagingState = rows.getMetadata().pagingState; - AdminRequestHandler nextHandler = (pagingState == null) ? null : this.copy(pagingState); - // The public factory methods guarantee that expectedResponseType and ResultT always match: - @SuppressWarnings("unchecked") - ResultT result = (ResultT) new AdminResult(rows, nextHandler, channel.protocolVersion()); - setFinalResult(result); - } else if (expectedResponseType == Prepared.class) { - Prepared prepared = (Prepared) message; - @SuppressWarnings("unchecked") - ResultT result = (ResultT) ByteBuffer.wrap(prepared.preparedQueryId); - setFinalResult(result); - } else if (expectedResponseType - == com.datastax.oss.protocol.internal.response.result.Void.class) { - setFinalResult(null); - } else { - setFinalError(new AssertionError("Unhandled response type" + expectedResponseType)); - } - } - - protected boolean setFinalResult(ResultT result) { - return this.result.complete(result); - } - - protected boolean setFinalError(Throwable error) { - return result.completeExceptionally(error); - } - - private AdminRequestHandler copy(ByteBuffer pagingState) { - assert message instanceof Query; - Query current = (Query) this.message; - QueryOptions currentOptions = current.options; - QueryOptions newOptions = - buildQueryOptions(currentOptions.pageSize, currentOptions.namedValues, pagingState); - return new AdminRequestHandler<>( - channel, - // This is called for next page queries, so we always need to reacquire an id: - true, - new Query(current.query, newOptions), - customPayload, - timeout, - logPrefix, - debugString, - expectedResponseType); - } - - private static QueryOptions buildQueryOptions( - int pageSize, Map serialize, ByteBuffer pagingState) { - return new QueryOptions( - ProtocolConstants.ConsistencyLevel.ONE, - Collections.emptyList(), - serialize, - false, - pageSize, - pagingState, - ProtocolConstants.ConsistencyLevel.SERIAL, - Statement.NO_DEFAULT_TIMESTAMP, - null, - Statement.NO_NOW_IN_SECONDS); - } - - private static Map serialize( - Map parameters, ProtocolVersion protocolVersion) { - Map result = Maps.newHashMapWithExpectedSize(parameters.size()); - for (Map.Entry entry : parameters.entrySet()) { - result.put(entry.getKey(), serialize(entry.getValue(), protocolVersion)); - } - return result; - } - - private static ByteBuffer serialize(Object parameter, ProtocolVersion protocolVersion) { - if (parameter instanceof String) { - return TypeCodecs.TEXT.encode((String) parameter, protocolVersion); - } else if (parameter instanceof InetAddress) { - return TypeCodecs.INET.encode((InetAddress) parameter, protocolVersion); - } else if (parameter instanceof List && ((List) parameter).get(0) instanceof String) { - @SuppressWarnings("unchecked") - List l = (List) parameter; - return AdminRow.LIST_OF_TEXT.encode(l, protocolVersion); - } else if (parameter instanceof Integer) { - return TypeCodecs.INT.encode((Integer) parameter, protocolVersion); - } else { - throw new IllegalArgumentException( - "Unsupported variable type for admin query: " + parameter.getClass()); - } - } - - @Override - public String toString() { - return debugString; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/AdminResult.java b/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/AdminResult.java deleted file mode 100644 index 686cc05c6b0..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/AdminResult.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.adminrequest; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.driver.shaded.guava.common.collect.AbstractIterator; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.protocol.internal.response.result.ColumnSpec; -import com.datastax.oss.protocol.internal.response.result.Rows; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Queue; -import java.util.concurrent.CompletionStage; -import net.jcip.annotations.NotThreadSafe; - -@NotThreadSafe // wraps a mutable queue -public class AdminResult implements Iterable { - - private final Queue> data; - private final Map columnSpecs; - private final AdminRequestHandler nextHandler; - private final ProtocolVersion protocolVersion; - - public AdminResult( - Rows rows, AdminRequestHandler nextHandler, ProtocolVersion protocolVersion) { - this.data = rows.getData(); - - ImmutableMap.Builder columnSpecsBuilder = ImmutableMap.builder(); - for (ColumnSpec spec : rows.getMetadata().columnSpecs) { - columnSpecsBuilder.put(spec.name, spec); - } - // Admin queries are simple selects only, so there are no duplicate names (if that ever - // changes, build() will fail and we'll have to do things differently) - this.columnSpecs = columnSpecsBuilder.build(); - - this.nextHandler = nextHandler; - this.protocolVersion = protocolVersion; - } - - /** This consumes the result's data and can be called only once. */ - @NonNull - @Override - public Iterator iterator() { - return new AbstractIterator() { - @Override - protected AdminRow computeNext() { - List rowData = data.poll(); - return (rowData == null) - ? endOfData() - : new AdminRow(columnSpecs, rowData, protocolVersion); - } - }; - } - - public boolean hasNextPage() { - return nextHandler != null; - } - - public CompletionStage nextPage() { - return (nextHandler == null) - ? CompletableFutures.failedFuture( - new AssertionError("No next page, use hasNextPage() before you call this method")) - : nextHandler.start(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/AdminRow.java b/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/AdminRow.java deleted file mode 100644 index 6e32ea845fb..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/AdminRow.java +++ /dev/null @@ -1,129 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.adminrequest; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.response.result.ColumnSpec; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.net.InetAddress; -import java.nio.ByteBuffer; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; -import net.jcip.annotations.Immutable; - -@Immutable -public class AdminRow { - - @VisibleForTesting - static final TypeCodec> LIST_OF_TEXT = TypeCodecs.listOf(TypeCodecs.TEXT); - - private static final TypeCodec> SET_OF_TEXT = TypeCodecs.setOf(TypeCodecs.TEXT); - private static final TypeCodec> MAP_OF_STRING_TO_STRING = - TypeCodecs.mapOf(TypeCodecs.TEXT, TypeCodecs.TEXT); - - private final Map columnSpecs; - private final List data; - private final ProtocolVersion protocolVersion; - - public AdminRow( - Map columnSpecs, List data, ProtocolVersion protocolVersion) { - this.columnSpecs = columnSpecs; - this.data = data; - this.protocolVersion = protocolVersion; - } - - @Nullable - public Boolean getBoolean(String columnName) { - return get(columnName, TypeCodecs.BOOLEAN); - } - - @Nullable - public Integer getInteger(String columnName) { - return get(columnName, TypeCodecs.INT); - } - - public boolean isString(String columnName) { - return columnSpecs.get(columnName).type.id == ProtocolConstants.DataType.VARCHAR; - } - - @Nullable - public String getString(String columnName) { - return get(columnName, TypeCodecs.TEXT); - } - - @Nullable - public UUID getUuid(String columnName) { - return get(columnName, TypeCodecs.UUID); - } - - @Nullable - public ByteBuffer getByteBuffer(String columnName) { - return get(columnName, TypeCodecs.BLOB); - } - - @Nullable - public InetAddress getInetAddress(String columnName) { - return get(columnName, TypeCodecs.INET); - } - - @Nullable - public List getListOfString(String columnName) { - return get(columnName, LIST_OF_TEXT); - } - - @Nullable - public Set getSetOfString(String columnName) { - return get(columnName, SET_OF_TEXT); - } - - @Nullable - public Map getMapOfStringToString(String columnName) { - return get(columnName, MAP_OF_STRING_TO_STRING); - } - - public boolean isNull(String columnName) { - if (!contains(columnName)) { - return true; - } else { - int index = columnSpecs.get(columnName).index; - return data.get(index) == null; - } - } - - public boolean contains(String columnName) { - return columnSpecs.containsKey(columnName); - } - - @Nullable - public T get(String columnName, TypeCodec codec) { - // Minimal checks here: this is for internal use, so the caller should know what they're - // doing - if (!contains(columnName)) { - return null; - } else { - int index = columnSpecs.get(columnName).index; - return codec.decode(data.get(index), protocolVersion); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/ThrottledAdminRequestHandler.java b/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/ThrottledAdminRequestHandler.java deleted file mode 100644 index 40ab21b759a..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/ThrottledAdminRequestHandler.java +++ /dev/null @@ -1,179 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.adminrequest; - -import com.datastax.oss.driver.api.core.DriverTimeoutException; -import com.datastax.oss.driver.api.core.RequestThrottlingException; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; -import com.datastax.oss.driver.api.core.session.throttling.RequestThrottler; -import com.datastax.oss.driver.api.core.session.throttling.Throttled; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.control.ControlConnection; -import com.datastax.oss.driver.internal.core.metrics.SessionMetricUpdater; -import com.datastax.oss.driver.internal.core.pool.ChannelPool; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.response.Result; -import com.datastax.oss.protocol.internal.response.result.Prepared; -import com.datastax.oss.protocol.internal.response.result.Rows; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.Map; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.TimeUnit; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class ThrottledAdminRequestHandler extends AdminRequestHandler - implements Throttled { - - /** - * @param shouldPreAcquireId whether to call {@link DriverChannel#preAcquireId()} before sending - * the request. This must be false if you obtained the connection from a pool ({@link - * ChannelPool#next()}, or {@link DefaultSession#getChannel(Node, String)}). It must be - * true if you are using a standalone channel (e.g. in {@link ControlConnection} or one of - * its auxiliary components). - */ - public static ThrottledAdminRequestHandler query( - DriverChannel channel, - boolean shouldPreAcquireId, - Message message, - Map customPayload, - Duration timeout, - RequestThrottler throttler, - SessionMetricUpdater metricUpdater, - String logPrefix, - String debugString) { - return new ThrottledAdminRequestHandler<>( - channel, - shouldPreAcquireId, - message, - customPayload, - timeout, - throttler, - metricUpdater, - logPrefix, - debugString, - Rows.class); - } - - /** - * @param shouldPreAcquireId whether to call {@link DriverChannel#preAcquireId()} before sending - * the request. See {@link #query(DriverChannel, boolean, Message, Map, Duration, - * RequestThrottler, SessionMetricUpdater, String, String)} for more explanations. - */ - public static ThrottledAdminRequestHandler prepare( - DriverChannel channel, - boolean shouldPreAcquireId, - Message message, - Map customPayload, - Duration timeout, - RequestThrottler throttler, - SessionMetricUpdater metricUpdater, - String logPrefix) { - return new ThrottledAdminRequestHandler<>( - channel, - shouldPreAcquireId, - message, - customPayload, - timeout, - throttler, - metricUpdater, - logPrefix, - message.toString(), - Prepared.class); - } - - private final long startTimeNanos; - private final RequestThrottler throttler; - private final SessionMetricUpdater metricUpdater; - - protected ThrottledAdminRequestHandler( - DriverChannel channel, - boolean preAcquireId, - Message message, - Map customPayload, - Duration timeout, - RequestThrottler throttler, - SessionMetricUpdater metricUpdater, - String logPrefix, - String debugString, - Class expectedResponseType) { - super( - channel, - preAcquireId, - message, - customPayload, - timeout, - logPrefix, - debugString, - expectedResponseType); - this.startTimeNanos = System.nanoTime(); - this.throttler = throttler; - this.metricUpdater = metricUpdater; - } - - @Override - public CompletionStage start() { - // Don't write request yet, wait for green light from throttler - throttler.register(this); - return result; - } - - @Override - public void onThrottleReady(boolean wasDelayed) { - if (wasDelayed) { - metricUpdater.updateTimer( - DefaultSessionMetric.THROTTLING_DELAY, - null, - System.nanoTime() - startTimeNanos, - TimeUnit.NANOSECONDS); - } - super.start(); - } - - @Override - public void onThrottleFailure(@NonNull RequestThrottlingException error) { - metricUpdater.incrementCounter(DefaultSessionMetric.THROTTLING_ERRORS, null); - setFinalError(error); - } - - @Override - protected boolean setFinalResult(ResultT result) { - boolean wasSet = super.setFinalResult(result); - if (wasSet) { - throttler.signalSuccess(this); - } - return wasSet; - } - - @Override - protected boolean setFinalError(Throwable error) { - boolean wasSet = super.setFinalError(error); - if (wasSet) { - if (error instanceof DriverTimeoutException) { - throttler.signalTimeout(this); - } else if (!(error instanceof RequestThrottlingException)) { - throttler.signalError(this, error); - } - } - return wasSet; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/UnexpectedResponseException.java b/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/UnexpectedResponseException.java deleted file mode 100644 index c842b655411..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/UnexpectedResponseException.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.adminrequest; - -import com.datastax.oss.protocol.internal.Message; - -public class UnexpectedResponseException extends Exception { - - public final Message message; - - public UnexpectedResponseException(String requestName, Message message) { - super(String.format("%s got unexpected response %s", requestName, message)); - this.message = message; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/package-info.java b/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/package-info.java deleted file mode 100644 index 55ab14c8981..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/package-info.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Infrastructure to execute internal requests in the driver, for example control connection - * queries, or automatic statement preparation. - * - *

This is a stripped-down version of the public API, with the bare minimum for our needs: - * - *

    - *
  • async mode only. - *
  • execution on a given channel, without retries. - *
  • {@code QUERY} and {@code PREPARE} messages only. - *
  • paging is possible, but only on the same channel. If the channel gets closed between pages, - * the query fails. - *
  • values can only be bound by name, and it is assumed that the target type can always be - * inferred unambiguously (i.e. the only integer type is {@code int}, etc). - *
  • limited result API: getters by internal name only, no custom codecs. - *
  • codecs are only implemented for the types we actually need for admin queries. - *
- */ -package com.datastax.oss.driver.internal.core.adminrequest; diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/auth/PlainTextAuthProvider.java b/core/src/main/java/com/datastax/oss/driver/internal/core/auth/PlainTextAuthProvider.java deleted file mode 100644 index f2dfdf14171..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/auth/PlainTextAuthProvider.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.auth; - -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.dse.driver.internal.core.auth.AuthUtils; -import com.datastax.oss.driver.api.core.auth.PlainTextAuthProviderBase; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.ThreadSafe; - -/** - * A simple authentication provider that supports SASL authentication using the PLAIN mechanism for - * version 3 (or above) of the CQL native protocol. - * - *

To activate this provider, add an {@code advanced.auth-provider} section in the driver - * configuration, for example: - * - *

- * datastax-java-driver {
- *   advanced.auth-provider {
- *     class = com.datastax.driver.api.core.auth.PlainTextAuthProvider
- *     username = cassandra
- *     password = cassandra
- *
- *     // If connecting to DataStax Enterprise, this additional option allows proxy authentication
- *     // (login as another user or role)
- *     authorization-id = userOrRole
- *   }
- * }
- * 
- * - * The authentication provider cannot be changed at runtime; however, the credentials can be changed - * at runtime: the new ones will be used for new connection attempts once the configuration gets - * {@linkplain com.datastax.oss.driver.api.core.config.DriverConfigLoader#reload() reloaded}. - * - *

See {@code reference.conf} (in the manual or core driver JAR) for more details. - */ -@ThreadSafe -public class PlainTextAuthProvider extends PlainTextAuthProviderBase { - - private final DriverExecutionProfile config; - - public PlainTextAuthProvider(DriverContext context) { - super(context.getSessionName()); - this.config = context.getConfig().getDefaultProfile(); - } - - @NonNull - @Override - protected Credentials getCredentials( - @NonNull EndPoint endPoint, @NonNull String serverAuthenticator) { - // It's not valid to use the PlainTextAuthProvider without a username or password, error out - // early here - AuthUtils.validateConfigPresent( - config, - PlainTextAuthProvider.class.getName(), - endPoint, - DefaultDriverOption.AUTH_PROVIDER_USER_NAME, - DefaultDriverOption.AUTH_PROVIDER_PASSWORD); - - String authorizationId = config.getString(DseDriverOption.AUTH_PROVIDER_AUTHORIZATION_ID, ""); - assert authorizationId != null; // per the default above - return new Credentials( - config.getString(DefaultDriverOption.AUTH_PROVIDER_USER_NAME).toCharArray(), - config.getString(DefaultDriverOption.AUTH_PROVIDER_PASSWORD).toCharArray(), - authorizationId.toCharArray()); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ChannelEvent.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ChannelEvent.java deleted file mode 100644 index 970ea061ec7..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ChannelEvent.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import com.datastax.oss.driver.api.core.metadata.Node; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -/** Events relating to driver channels. */ -@Immutable -public class ChannelEvent { - public enum Type { - OPENED, - CLOSED, - RECONNECTION_STARTED, - RECONNECTION_STOPPED, - CONTROL_CONNECTION_FAILED - } - - public static ChannelEvent channelOpened(Node node) { - return new ChannelEvent(Type.OPENED, node); - } - - public static ChannelEvent channelClosed(Node node) { - return new ChannelEvent(Type.CLOSED, node); - } - - public static ChannelEvent reconnectionStarted(Node node) { - return new ChannelEvent(Type.RECONNECTION_STARTED, node); - } - - public static ChannelEvent reconnectionStopped(Node node) { - return new ChannelEvent(Type.RECONNECTION_STOPPED, node); - } - - /** The control connection tried to use this node, but failed to open a channel. */ - public static ChannelEvent controlConnectionFailed(Node node) { - return new ChannelEvent(Type.CONTROL_CONNECTION_FAILED, node); - } - - public final Type type; - public final Node node; - - public ChannelEvent(Type type, Node node) { - this.type = type; - this.node = node; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof ChannelEvent) { - ChannelEvent that = (ChannelEvent) other; - return this.type == that.type && Objects.equals(this.node, that.node); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(type, node); - } - - @Override - public String toString() { - return "ChannelEvent(" + type + ", " + node + ")"; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ChannelFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ChannelFactory.java deleted file mode 100644 index 66a5c4edc0e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ChannelFactory.java +++ /dev/null @@ -1,394 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.UnsupportedProtocolVersionException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; -import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; -import com.datastax.oss.driver.internal.core.config.typesafe.TypesafeDriverConfig; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.context.NettyOptions; -import com.datastax.oss.driver.internal.core.metadata.DefaultNode; -import com.datastax.oss.driver.internal.core.metrics.NodeMetricUpdater; -import com.datastax.oss.driver.internal.core.metrics.NoopNodeMetricUpdater; -import com.datastax.oss.driver.internal.core.metrics.SessionMetricUpdater; -import com.datastax.oss.driver.internal.core.protocol.FrameDecoder; -import com.datastax.oss.driver.internal.core.protocol.FrameEncoder; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import io.netty.bootstrap.Bootstrap; -import io.netty.channel.Channel; -import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelInitializer; -import io.netty.channel.ChannelOption; -import io.netty.channel.ChannelPipeline; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.atomic.AtomicBoolean; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** Builds {@link DriverChannel} objects for an instance of the driver. */ -@ThreadSafe -public class ChannelFactory { - - private static final Logger LOG = LoggerFactory.getLogger(ChannelFactory.class); - - /** - * A value for {@link #productType} that indicates that we are connected to DataStax Cloud. This - * value matches the one defined at DSE DB server side at {@code ProductType.java}. - */ - private static final String DATASTAX_CLOUD_PRODUCT_TYPE = "DATASTAX_APOLLO"; - - private static final AtomicBoolean LOGGED_ORPHAN_WARNING = new AtomicBoolean(); - - /** - * A value for {@link #productType} that indicates that the server does not report any product - * type. - */ - private static final String UNKNOWN_PRODUCT_TYPE = "UNKNOWN"; - - // The names of the handlers on the pipeline: - public static final String SSL_HANDLER_NAME = "ssl"; - public static final String INBOUND_TRAFFIC_METER_NAME = "inboundTrafficMeter"; - public static final String OUTBOUND_TRAFFIC_METER_NAME = "outboundTrafficMeter"; - public static final String FRAME_TO_BYTES_ENCODER_NAME = "frameToBytesEncoder"; - public static final String FRAME_TO_SEGMENT_ENCODER_NAME = "frameToSegmentEncoder"; - public static final String SEGMENT_TO_BYTES_ENCODER_NAME = "segmentToBytesEncoder"; - public static final String BYTES_TO_FRAME_DECODER_NAME = "bytesToFrameDecoder"; - public static final String BYTES_TO_SEGMENT_DECODER_NAME = "bytesToSegmentDecoder"; - public static final String SEGMENT_TO_FRAME_DECODER_NAME = "segmentToFrameDecoder"; - public static final String HEARTBEAT_HANDLER_NAME = "heartbeat"; - public static final String INFLIGHT_HANDLER_NAME = "inflight"; - public static final String INIT_HANDLER_NAME = "init"; - - private final String logPrefix; - protected final InternalDriverContext context; - - /** either set from the configuration, or null and will be negotiated */ - @VisibleForTesting volatile ProtocolVersion protocolVersion; - - private volatile String clusterName; - - /** - * The value of the {@code PRODUCT_TYPE} option reported by the first channel we opened, in - * response to a {@code SUPPORTED} request. - * - *

If the server does not return that option, the value will be {@link #UNKNOWN_PRODUCT_TYPE}. - */ - @VisibleForTesting volatile String productType; - - public ChannelFactory(InternalDriverContext context) { - this.logPrefix = context.getSessionName(); - this.context = context; - - DriverExecutionProfile defaultConfig = context.getConfig().getDefaultProfile(); - if (defaultConfig.isDefined(DefaultDriverOption.PROTOCOL_VERSION)) { - String versionName = defaultConfig.getString(DefaultDriverOption.PROTOCOL_VERSION); - this.protocolVersion = context.getProtocolVersionRegistry().fromName(versionName); - } // else it will be negotiated with the first opened connection - } - - public ProtocolVersion getProtocolVersion() { - ProtocolVersion result = this.protocolVersion; - Preconditions.checkState( - result != null, "Protocol version not known yet, this should only be called after init"); - return result; - } - - /** - * WARNING: this is only used at the very beginning of the init process (when we just refreshed - * the list of nodes for the first time, and found out that one of them requires a lower version - * than was negotiated with the first contact point); it's safe at this time because we are in a - * controlled state (only the control connection is open, it's not executing queries and we're - * going to reconnect immediately after). Calling this method at any other time will likely wreak - * havoc. - */ - public void setProtocolVersion(ProtocolVersion newVersion) { - this.protocolVersion = newVersion; - } - - public String getClusterName() { - return clusterName; - } - - public CompletionStage connect(Node node, DriverChannelOptions options) { - NodeMetricUpdater nodeMetricUpdater; - if (node instanceof DefaultNode) { - nodeMetricUpdater = ((DefaultNode) node).getMetricUpdater(); - } else { - nodeMetricUpdater = NoopNodeMetricUpdater.INSTANCE; - } - return connect(node.getEndPoint(), options, nodeMetricUpdater); - } - - @VisibleForTesting - CompletionStage connect( - EndPoint endPoint, DriverChannelOptions options, NodeMetricUpdater nodeMetricUpdater) { - CompletableFuture resultFuture = new CompletableFuture<>(); - - ProtocolVersion currentVersion; - boolean isNegotiating; - List attemptedVersions = new CopyOnWriteArrayList<>(); - if (this.protocolVersion != null) { - currentVersion = protocolVersion; - isNegotiating = false; - } else { - currentVersion = context.getProtocolVersionRegistry().highestNonBeta(); - isNegotiating = true; - } - - connect( - endPoint, - options, - nodeMetricUpdater, - currentVersion, - isNegotiating, - attemptedVersions, - resultFuture); - return resultFuture; - } - - private void connect( - EndPoint endPoint, - DriverChannelOptions options, - NodeMetricUpdater nodeMetricUpdater, - ProtocolVersion currentVersion, - boolean isNegotiating, - List attemptedVersions, - CompletableFuture resultFuture) { - - NettyOptions nettyOptions = context.getNettyOptions(); - - Bootstrap bootstrap = - new Bootstrap() - .group(nettyOptions.ioEventLoopGroup()) - .channel(nettyOptions.channelClass()) - .option(ChannelOption.ALLOCATOR, nettyOptions.allocator()) - .handler( - initializer(endPoint, currentVersion, options, nodeMetricUpdater, resultFuture)); - - nettyOptions.afterBootstrapInitialized(bootstrap); - - ChannelFuture connectFuture = bootstrap.connect(endPoint.resolve()); - - connectFuture.addListener( - cf -> { - if (connectFuture.isSuccess()) { - Channel channel = connectFuture.channel(); - DriverChannel driverChannel = - new DriverChannel(endPoint, channel, context.getWriteCoalescer(), currentVersion); - // If this is the first successful connection, remember the protocol version and - // cluster name for future connections. - if (isNegotiating) { - ChannelFactory.this.protocolVersion = currentVersion; - } - if (ChannelFactory.this.clusterName == null) { - ChannelFactory.this.clusterName = driverChannel.getClusterName(); - } - Map> supportedOptions = driverChannel.getOptions(); - if (ChannelFactory.this.productType == null && supportedOptions != null) { - List productTypes = supportedOptions.get("PRODUCT_TYPE"); - String productType = - productTypes != null && !productTypes.isEmpty() - ? productTypes.get(0) - : UNKNOWN_PRODUCT_TYPE; - ChannelFactory.this.productType = productType; - DriverConfig driverConfig = context.getConfig(); - if (driverConfig instanceof TypesafeDriverConfig - && productType.equals(DATASTAX_CLOUD_PRODUCT_TYPE)) { - ((TypesafeDriverConfig) driverConfig) - .overrideDefaults( - ImmutableMap.of( - DefaultDriverOption.REQUEST_CONSISTENCY, - ConsistencyLevel.LOCAL_QUORUM.name())); - } - } - resultFuture.complete(driverChannel); - } else { - Throwable error = connectFuture.cause(); - if (error instanceof UnsupportedProtocolVersionException && isNegotiating) { - attemptedVersions.add(currentVersion); - Optional downgraded = - context.getProtocolVersionRegistry().downgrade(currentVersion); - if (downgraded.isPresent()) { - LOG.debug( - "[{}] Failed to connect with protocol {}, retrying with {}", - logPrefix, - currentVersion, - downgraded.get()); - connect( - endPoint, - options, - nodeMetricUpdater, - downgraded.get(), - true, - attemptedVersions, - resultFuture); - } else { - resultFuture.completeExceptionally( - UnsupportedProtocolVersionException.forNegotiation( - endPoint, attemptedVersions)); - } - } else { - // Note: might be completed already if the failure happened in initializer(), this is - // fine - resultFuture.completeExceptionally(error); - } - } - }); - } - - @VisibleForTesting - ChannelInitializer initializer( - EndPoint endPoint, - ProtocolVersion protocolVersion, - DriverChannelOptions options, - NodeMetricUpdater nodeMetricUpdater, - CompletableFuture resultFuture) { - return new ChannelFactoryInitializer( - endPoint, protocolVersion, options, nodeMetricUpdater, resultFuture); - }; - - class ChannelFactoryInitializer extends ChannelInitializer { - - private final EndPoint endPoint; - private final ProtocolVersion protocolVersion; - private final DriverChannelOptions options; - private final NodeMetricUpdater nodeMetricUpdater; - private final CompletableFuture resultFuture; - - ChannelFactoryInitializer( - EndPoint endPoint, - ProtocolVersion protocolVersion, - DriverChannelOptions options, - NodeMetricUpdater nodeMetricUpdater, - CompletableFuture resultFuture) { - - this.endPoint = endPoint; - this.protocolVersion = protocolVersion; - this.options = options; - this.nodeMetricUpdater = nodeMetricUpdater; - this.resultFuture = resultFuture; - } - - @Override - protected void initChannel(Channel channel) { - try { - DriverExecutionProfile defaultConfig = context.getConfig().getDefaultProfile(); - - long setKeyspaceTimeoutMillis = - defaultConfig - .getDuration(DefaultDriverOption.CONNECTION_SET_KEYSPACE_TIMEOUT) - .toMillis(); - int maxFrameLength = - (int) defaultConfig.getBytes(DefaultDriverOption.PROTOCOL_MAX_FRAME_LENGTH); - int maxRequestsPerConnection = - defaultConfig.getInt(DefaultDriverOption.CONNECTION_MAX_REQUESTS); - int maxOrphanRequests = - defaultConfig.getInt(DefaultDriverOption.CONNECTION_MAX_ORPHAN_REQUESTS); - if (maxOrphanRequests >= maxRequestsPerConnection) { - if (LOGGED_ORPHAN_WARNING.compareAndSet(false, true)) { - LOG.warn( - "[{}] Invalid value for {}: {}. It must be lower than {}. " - + "Defaulting to {} (1/4 of max-requests) instead.", - logPrefix, - DefaultDriverOption.CONNECTION_MAX_ORPHAN_REQUESTS.getPath(), - maxOrphanRequests, - DefaultDriverOption.CONNECTION_MAX_REQUESTS.getPath(), - maxRequestsPerConnection / 4); - } - maxOrphanRequests = maxRequestsPerConnection / 4; - } - - InFlightHandler inFlightHandler = - new InFlightHandler( - protocolVersion, - new StreamIdGenerator(maxRequestsPerConnection), - maxOrphanRequests, - setKeyspaceTimeoutMillis, - channel.newPromise(), - options.eventCallback, - options.ownerLogPrefix); - HeartbeatHandler heartbeatHandler = new HeartbeatHandler(defaultConfig); - ProtocolInitHandler initHandler = - new ProtocolInitHandler( - context, - protocolVersion, - clusterName, - endPoint, - options, - heartbeatHandler, - productType == null); - - ChannelPipeline pipeline = channel.pipeline(); - context - .getSslHandlerFactory() - .map(f -> f.newSslHandler(channel, endPoint)) - .map(h -> pipeline.addLast(SSL_HANDLER_NAME, h)); - - // Only add meter handlers on the pipeline if metrics are enabled. - SessionMetricUpdater sessionMetricUpdater = context.getMetricsFactory().getSessionUpdater(); - if (nodeMetricUpdater.isEnabled(DefaultNodeMetric.BYTES_RECEIVED, null) - || sessionMetricUpdater.isEnabled(DefaultSessionMetric.BYTES_RECEIVED, null)) { - pipeline.addLast( - INBOUND_TRAFFIC_METER_NAME, - new InboundTrafficMeter(nodeMetricUpdater, sessionMetricUpdater)); - } - - if (nodeMetricUpdater.isEnabled(DefaultNodeMetric.BYTES_SENT, null) - || sessionMetricUpdater.isEnabled(DefaultSessionMetric.BYTES_SENT, null)) { - pipeline.addLast( - OUTBOUND_TRAFFIC_METER_NAME, - new OutboundTrafficMeter(nodeMetricUpdater, sessionMetricUpdater)); - } - - pipeline - .addLast( - FRAME_TO_BYTES_ENCODER_NAME, - new FrameEncoder(context.getFrameCodec(), maxFrameLength)) - .addLast( - BYTES_TO_FRAME_DECODER_NAME, - new FrameDecoder(context.getFrameCodec(), maxFrameLength)) - // Note: HeartbeatHandler is inserted here once init completes - .addLast(INFLIGHT_HANDLER_NAME, inFlightHandler) - .addLast(INIT_HANDLER_NAME, initHandler); - - context.getNettyOptions().afterChannelInitialized(channel); - } catch (Throwable t) { - // If the init handler throws an exception, Netty swallows it and closes the channel. We - // want to propagate it instead, so fail the outer future (the result of connect()). - resultFuture.completeExceptionally(t); - throw t; - } - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ChannelHandlerRequest.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ChannelHandlerRequest.java deleted file mode 100644 index 3ba3d70eb8d..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ChannelHandlerRequest.java +++ /dev/null @@ -1,133 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import com.datastax.oss.driver.api.core.DriverTimeoutException; -import com.datastax.oss.driver.api.core.connection.BusyConnectionException; -import com.datastax.oss.driver.internal.core.util.ProtocolUtils; -import com.datastax.oss.driver.internal.core.util.concurrent.UncaughtExceptions; -import com.datastax.oss.protocol.internal.Frame; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.response.Error; -import io.netty.channel.Channel; -import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelHandlerContext; -import io.netty.util.concurrent.Future; -import io.netty.util.concurrent.ScheduledFuture; -import java.util.concurrent.TimeUnit; -import net.jcip.annotations.NotThreadSafe; - -/** Common infrastructure to send a native protocol request from a channel handler. */ -@NotThreadSafe // must be confined to the channel's event loop -abstract class ChannelHandlerRequest implements ResponseCallback { - - final Channel channel; - final ChannelHandlerContext ctx; - final InFlightHandler inFlightHandler; - private final long timeoutMillis; - - private ScheduledFuture timeoutFuture; - - ChannelHandlerRequest(ChannelHandlerContext ctx, long timeoutMillis) { - this.ctx = ctx; - this.channel = ctx.channel(); - this.inFlightHandler = ctx.pipeline().get(InFlightHandler.class); - assert inFlightHandler != null; - this.timeoutMillis = timeoutMillis; - } - - abstract String describe(); - - abstract Message getRequest(); - - abstract void onResponse(Message response); - - /** either message or cause can be null */ - abstract void fail(String message, Throwable cause); - - void fail(Throwable cause) { - fail(null, cause); - } - - void send() { - assert channel.eventLoop().inEventLoop(); - if (!inFlightHandler.preAcquireId()) { - fail( - new BusyConnectionException( - String.format( - "%s has reached its maximum number of simultaneous requests", channel))); - } else { - DriverChannel.RequestMessage message = - new DriverChannel.RequestMessage(getRequest(), false, Frame.NO_PAYLOAD, this); - ChannelFuture writeFuture = channel.writeAndFlush(message); - writeFuture.addListener(this::writeListener); - } - } - - private void writeListener(Future writeFuture) { - if (writeFuture.isSuccess()) { - timeoutFuture = - channel.eventLoop().schedule(this::onTimeout, timeoutMillis, TimeUnit.MILLISECONDS); - } else { - String message = - String.format("%s: failed to send request (%s)", describe(), writeFuture.cause()); - fail(message, writeFuture.cause()); - } - } - - @Override - public final void onResponse(Frame responseFrame) { - timeoutFuture.cancel(true); - onResponse(responseFrame.message); - } - - @Override - public final void onFailure(Throwable error) { - // timeoutFuture may not have been assigned if write failed. - if (timeoutFuture != null) { - timeoutFuture.cancel(true); - } - String message = String.format("%s: unexpected failure (%s)", describe(), error); - fail(message, error); - } - - private void onTimeout() { - fail(new DriverTimeoutException(describe() + ": timed out after " + timeoutMillis + " ms")); - if (!channel.closeFuture().isDone()) { - // Cancel the response callback - channel.writeAndFlush(this).addListener(UncaughtExceptions::log); - } - } - - void failOnUnexpected(Message response) { - if (response instanceof Error) { - Error error = (Error) response; - fail( - new IllegalArgumentException( - String.format( - "%s: server replied with unexpected error code [%s]: %s", - describe(), ProtocolUtils.errorCodeString(error.code), error.message))); - } else { - fail( - new IllegalArgumentException( - String.format( - "%s: server replied with unexpected response type (opcode=%s)", - describe(), ProtocolUtils.opcodeString(response.opcode)))); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ClusterNameMismatchException.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ClusterNameMismatchException.java deleted file mode 100644 index 8e47db3fb1b..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ClusterNameMismatchException.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import com.datastax.oss.driver.api.core.metadata.EndPoint; - -/** - * Indicates that we've attempted to connect to a node with a cluster name that doesn't match that - * of the other nodes known to the driver. - * - *

The driver runs the following query on each newly established connection: - * - *

- *     select cluster_name from system.local
- * 
- * - * The first connection sets the cluster name for this driver instance, all subsequent connections - * must match it or they will get rejected. This is intended to filter out errors in the discovery - * process (for example, stale entries in {@code system.peers}). - * - *

This error is never returned directly to the client. If we detect a mismatch, it will always - * be after the driver has connected successfully; the error will be logged and the offending node - * forced down. - */ -public class ClusterNameMismatchException extends RuntimeException { - - private static final long serialVersionUID = 0; - - public final EndPoint endPoint; - public final String expectedClusterName; - public final String actualClusterName; - - public ClusterNameMismatchException( - EndPoint endPoint, String actualClusterName, String expectedClusterName) { - super( - String.format( - "Node %s reports cluster name '%s' that doesn't match our cluster name '%s'. " - + "It will be forced down.", - endPoint, actualClusterName, expectedClusterName)); - this.endPoint = endPoint; - this.expectedClusterName = expectedClusterName; - this.actualClusterName = actualClusterName; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ConnectInitHandler.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ConnectInitHandler.java deleted file mode 100644 index 789981b4832..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ConnectInitHandler.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import com.datastax.oss.driver.internal.core.util.concurrent.PromiseCombiner; -import io.netty.channel.ChannelDuplexHandler; -import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelPromise; -import java.net.SocketAddress; -import net.jcip.annotations.NotThreadSafe; - -/** - * A handler that delays the promise returned by {@code bootstrap.connect()}, in order to run a - * custom initialization process before making the channel available to clients. - * - *

This handler is not shareable. It must be installed by the channel initializer, as the last - * channel in the pipeline. - * - *

It will be notified via {@link #onRealConnect(ChannelHandlerContext)} when the real underlying - * connection is established. It can then start sending messages on the connection, while external - * clients are still waiting on their promise. Once the custom initialization is finished, the - * clients' promise can be completed with {@link #setConnectSuccess()} or {@link - * #setConnectFailure(Throwable)}. - */ -@NotThreadSafe -public abstract class ConnectInitHandler extends ChannelDuplexHandler { - // the completion of the custom initialization process - private ChannelPromise initPromise; - private ChannelHandlerContext ctx; - - @Override - public void connect( - ChannelHandlerContext ctx, - SocketAddress remoteAddress, - SocketAddress localAddress, - ChannelPromise callerPromise) - throws Exception { - this.ctx = ctx; - initPromise = ctx.channel().newPromise(); - - // the completion of the real underlying connection: - ChannelPromise realConnectPromise = ctx.channel().newPromise(); - super.connect(ctx, remoteAddress, localAddress, realConnectPromise); - realConnectPromise.addListener(future -> onRealConnect(ctx)); - - // Make the caller's promise wait on the other two: - PromiseCombiner.combine(callerPromise, realConnectPromise, initPromise); - } - - protected abstract void onRealConnect(ChannelHandlerContext ctx); - - protected boolean setConnectSuccess() { - boolean result = initPromise.trySuccess(); - if (result) { - ctx.pipeline().remove(this); - } - return result; - } - - protected void setConnectFailure(Throwable cause) { - if (initPromise.tryFailure(cause)) { - ctx.channel().close(); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/DefaultWriteCoalescer.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/DefaultWriteCoalescer.java deleted file mode 100644 index 232fa83be44..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/DefaultWriteCoalescer.java +++ /dev/null @@ -1,144 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.context.DriverContext; -import io.netty.channel.Channel; -import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelPromise; -import io.netty.channel.EventLoop; -import java.util.HashSet; -import java.util.Queue; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentLinkedQueue; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import net.jcip.annotations.ThreadSafe; - -/** - * Default write coalescing strategy. - * - *

It maintains a queue per event loop, with the writes targeting the channels that run on this - * loop. As soon as a write gets enqueued, it triggers a task that will flush the queue (other - * writes may get enqueued before or while the task runs). - * - *

Note that Netty provides a similar mechanism out of the box ({@link - * io.netty.handler.flush.FlushConsolidationHandler}), but in our experience our approach allows - * more performance gains, because it allows consolidating not only the flushes, but also the write - * tasks themselves (a single consolidated write task is scheduled on the event loop, instead of - * multiple individual tasks, so there is less context switching). - */ -@ThreadSafe -public class DefaultWriteCoalescer implements WriteCoalescer { - private final long rescheduleIntervalNanos; - private final ConcurrentMap flushers = new ConcurrentHashMap<>(); - - public DefaultWriteCoalescer(DriverContext context) { - DriverExecutionProfile config = context.getConfig().getDefaultProfile(); - rescheduleIntervalNanos = config.getDuration(DefaultDriverOption.COALESCER_INTERVAL).toNanos(); - } - - @Override - public ChannelFuture writeAndFlush(Channel channel, Object message) { - ChannelPromise writePromise = channel.newPromise(); - Write write = new Write(channel, message, writePromise); - enqueue(write, channel.eventLoop()); - return writePromise; - } - - private void enqueue(Write write, EventLoop eventLoop) { - Flusher flusher = flushers.computeIfAbsent(eventLoop, Flusher::new); - flusher.enqueue(write); - } - - private class Flusher { - private final EventLoop eventLoop; - - // These variables are accessed both from client threads and the event loop - private final Queue writes = new ConcurrentLinkedQueue<>(); - private final AtomicBoolean running = new AtomicBoolean(); - - // This variable is accessed only from runOnEventLoop, it doesn't need to be thread-safe - private final Set channels = new HashSet<>(); - - private Flusher(EventLoop eventLoop) { - this.eventLoop = eventLoop; - } - - private void enqueue(Write write) { - boolean added = writes.offer(write); - assert added; // always true (see MpscLinkedAtomicQueue implementation) - if (running.compareAndSet(false, true)) { - eventLoop.execute(this::runOnEventLoop); - } - } - - private void runOnEventLoop() { - assert eventLoop.inEventLoop(); - - Write write; - while ((write = writes.poll()) != null) { - Channel channel = write.channel; - channels.add(channel); - channel.write(write.message, write.writePromise); - } - - for (Channel channel : channels) { - channel.flush(); - } - channels.clear(); - - // Prepare to stop - running.set(false); - - // enqueue() can be called concurrently with this method. There is a race condition if it: - // - added an element in the queue after we were done draining it - // - but observed running==true before we flipped it, and therefore didn't schedule another - // run - - // If nothing was added in the queue, there were no concurrent calls, we can stop safely now - if (writes.isEmpty()) { - return; - } - - // Otherwise, check if one of those calls scheduled a run. If so, they flipped the bit back - // on. If not, we need to do it ourselves. - boolean shouldRestartMyself = running.compareAndSet(false, true); - - if (shouldRestartMyself && !eventLoop.isShuttingDown()) { - eventLoop.schedule(this::runOnEventLoop, rescheduleIntervalNanos, TimeUnit.NANOSECONDS); - } - } - } - - private static class Write { - private final Channel channel; - private final Object message; - private final ChannelPromise writePromise; - - private Write(Channel channel, Object message, ChannelPromise writePromise) { - this.channel = channel; - this.message = message; - this.writePromise = writePromise; - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/DriverChannel.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/DriverChannel.java deleted file mode 100644 index e40aa6f3097..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/DriverChannel.java +++ /dev/null @@ -1,305 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.connection.BusyConnectionException; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRequestHandler; -import com.datastax.oss.driver.internal.core.adminrequest.ThrottledAdminRequestHandler; -import com.datastax.oss.driver.internal.core.pool.ChannelPool; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.internal.core.util.concurrent.UncaughtExceptions; -import com.datastax.oss.protocol.internal.Message; -import io.netty.channel.Channel; -import io.netty.channel.ChannelConfig; -import io.netty.channel.ChannelFuture; -import io.netty.channel.EventLoop; -import io.netty.util.AttributeKey; -import io.netty.util.concurrent.Future; -import io.netty.util.concurrent.Promise; -import java.net.SocketAddress; -import java.nio.ByteBuffer; -import java.util.List; -import java.util.Map; -import java.util.concurrent.atomic.AtomicBoolean; -import net.jcip.annotations.ThreadSafe; - -/** - * A thin wrapper around a Netty {@link Channel}, to send requests to a Cassandra node and receive - * responses. - */ -@ThreadSafe -public class DriverChannel { - - static final AttributeKey CLUSTER_NAME_KEY = AttributeKey.valueOf("cluster_name"); - static final AttributeKey>> OPTIONS_KEY = - AttributeKey.valueOf("options"); - - @SuppressWarnings("RedundantStringConstructorCall") - static final Object GRACEFUL_CLOSE_MESSAGE = new String("GRACEFUL_CLOSE_MESSAGE"); - - @SuppressWarnings("RedundantStringConstructorCall") - static final Object FORCEFUL_CLOSE_MESSAGE = new String("FORCEFUL_CLOSE_MESSAGE"); - - private final EndPoint endPoint; - private final Channel channel; - private final InFlightHandler inFlightHandler; - private final WriteCoalescer writeCoalescer; - private final ProtocolVersion protocolVersion; - private final AtomicBoolean closing = new AtomicBoolean(); - private final AtomicBoolean forceClosing = new AtomicBoolean(); - - DriverChannel( - EndPoint endPoint, - Channel channel, - WriteCoalescer writeCoalescer, - ProtocolVersion protocolVersion) { - this.endPoint = endPoint; - this.channel = channel; - this.inFlightHandler = channel.pipeline().get(InFlightHandler.class); - this.writeCoalescer = writeCoalescer; - this.protocolVersion = protocolVersion; - } - - /** - * @return a future that succeeds when the request frame was successfully written on the channel. - * Beyond that, the caller will be notified through the {@code responseCallback}. - */ - public Future write( - Message request, - boolean tracing, - Map customPayload, - ResponseCallback responseCallback) { - if (closing.get()) { - return channel.newFailedFuture(new IllegalStateException("Driver channel is closing")); - } - RequestMessage message = new RequestMessage(request, tracing, customPayload, responseCallback); - return writeCoalescer.writeAndFlush(channel, message); - } - - /** - * Cancels a callback, indicating that the client that wrote it is no longer interested in the - * answer. - * - *

Note that this does not cancel the request server-side (but might in the future if Cassandra - * supports it). - */ - public void cancel(ResponseCallback responseCallback) { - // To avoid creating an extra message, we adopt the convention that writing the callback - // directly means cancellation - writeCoalescer.writeAndFlush(channel, responseCallback).addListener(UncaughtExceptions::log); - } - - /** - * Switches the underlying Cassandra connection to a new keyspace (as if a {@code USE ...} - * statement was issued). - * - *

The future will complete once the change is effective. Only one change may run at a given - * time, concurrent attempts will fail. - * - *

Changing the keyspace is inherently thread-unsafe: if other queries are running at the same - * time, the keyspace they will use is unpredictable. - */ - public Future setKeyspace(CqlIdentifier newKeyspace) { - Promise promise = channel.eventLoop().newPromise(); - channel.pipeline().fireUserEventTriggered(new SetKeyspaceEvent(newKeyspace, promise)); - return promise; - } - - /** - * @return the name of the Cassandra cluster as returned by {@code system.local.cluster_name} on - * this connection. - */ - public String getClusterName() { - return channel.attr(CLUSTER_NAME_KEY).get(); - } - - public Map> getOptions() { - return channel.attr(OPTIONS_KEY).get(); - } - - /** - * @return the number of available stream ids on the channel; more precisely, this is the number - * of {@link #preAcquireId()} calls for which the id has not been released yet. This is used - * to weigh channels in pools that have a size bigger than 1, in the load balancing policy, - * and for monitoring purposes. - */ - public int getAvailableIds() { - return inFlightHandler.getAvailableIds(); - } - - /** - * Indicates the intention to send a request using this channel. - * - *

There must be exactly one invocation of this method before each call to {@link - * #write(Message, boolean, Map, ResponseCallback)}. If this method returns true, the client - * must proceed with the write. If it returns false, it must not proceed. - * - *

This method is used together with {@link #getAvailableIds()} to track how many requests are - * currently executing on the channel, and avoid submitting a request that would result in a - * {@link BusyConnectionException}. The two methods follow atomic semantics: {@link - * #getAvailableIds()} returns the exact count of clients that have called {@link #preAcquireId()} - * and not yet released their stream id at this point in time. - * - *

Most of the time, the driver code calls this method automatically: - * - *

    - *
  • if you obtained the channel from a pool ({@link ChannelPool#next()} or {@link - * DefaultSession#getChannel(Node, String)}), do not call this method: it has already - * been done as part of selecting the channel. - *
  • if you use {@link ChannelHandlerRequest} or {@link AdminRequestHandler} for internal - * queries, do not call this method, those classes already do it. - *
  • however, if you use {@link ThrottledAdminRequestHandler}, you must specify a {@code - * shouldPreAcquireId} argument to indicate whether to call this method or not. This is - * because those requests are sometimes used with a channel that comes from a pool - * (requiring {@code shouldPreAcquireId = false}), or sometimes with a standalone channel - * like in the control connection (requiring {@code shouldPreAcquireId = true}). - *
- */ - public boolean preAcquireId() { - return inFlightHandler.preAcquireId(); - } - - /** - * @return the number of requests currently executing on this channel (including {@link - * #getOrphanedIds() orphaned ids}). - */ - public int getInFlight() { - return inFlightHandler.getInFlight(); - } - - /** - * @return the number of stream ids for requests that have either timed out or been cancelled, but - * for which we can't release the stream id because a request might still come from the - * server. - */ - public int getOrphanedIds() { - return inFlightHandler.getOrphanIds(); - } - - public EventLoop eventLoop() { - return channel.eventLoop(); - } - - public ProtocolVersion protocolVersion() { - return protocolVersion; - } - - /** The endpoint that was used to establish the connection. */ - public EndPoint getEndPoint() { - return endPoint; - } - - public SocketAddress localAddress() { - return channel.localAddress(); - } - - /** @return The {@link ChannelConfig configuration} of this channel. */ - public ChannelConfig config() { - return channel.config(); - } - - /** - * Initiates a graceful shutdown: no new requests will be accepted, but all pending requests will - * be allowed to complete before the underlying channel is closed. - */ - public Future close() { - if (closing.compareAndSet(false, true) && channel.isOpen()) { - // go through the coalescer: this guarantees that we won't reject writes that were submitted - // before, but had not been coalesced yet. - writeCoalescer - .writeAndFlush(channel, GRACEFUL_CLOSE_MESSAGE) - .addListener(UncaughtExceptions::log); - } - return channel.closeFuture(); - } - - /** - * Initiates a forced shutdown: any pending request will be aborted and the underlying channel - * will be closed. - */ - public Future forceClose() { - this.close(); - if (forceClosing.compareAndSet(false, true) && channel.isOpen()) { - writeCoalescer - .writeAndFlush(channel, FORCEFUL_CLOSE_MESSAGE) - .addListener(UncaughtExceptions::log); - } - return channel.closeFuture(); - } - - /** - * Returns a future that will complete when a graceful close has started, but not yet completed. - * - *

In other words, the channel has stopped accepting new requests, but is still waiting for - * pending requests to finish. Once the last response has been received, the channel will really - * close and {@link #closeFuture()} will be completed. - * - *

If there were no pending requests when the graceful shutdown was initiated, or if {@link - * #forceClose()} is called first, this future will never complete. - */ - public ChannelFuture closeStartedFuture() { - return this.inFlightHandler.closeStartedFuture; - } - - /** - * Does not close the channel, but returns a future that will complete when it is completely - * closed. - */ - public ChannelFuture closeFuture() { - return channel.closeFuture(); - } - - @Override - public String toString() { - return channel.toString(); - } - - // This is essentially a stripped-down Frame. We can't materialize the frame before writing, - // because we need the stream id, which is assigned from within the event loop. - static class RequestMessage { - final Message request; - final boolean tracing; - final Map customPayload; - final ResponseCallback responseCallback; - - RequestMessage( - Message message, - boolean tracing, - Map customPayload, - ResponseCallback responseCallback) { - this.request = message; - this.tracing = tracing; - this.customPayload = customPayload; - this.responseCallback = responseCallback; - } - } - - static class SetKeyspaceEvent { - final CqlIdentifier keyspaceName; - final Promise promise; - - public SetKeyspaceEvent(CqlIdentifier keyspaceName, Promise promise) { - this.keyspaceName = keyspaceName; - this.promise = promise; - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/DriverChannelOptions.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/DriverChannelOptions.java deleted file mode 100644 index 208cf52ac22..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/DriverChannelOptions.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import java.util.Collections; -import java.util.List; -import net.jcip.annotations.Immutable; - -/** Options for the creation of a driver channel. */ -@Immutable -public class DriverChannelOptions { - - /** No keyspace, no events, don't report available stream ids. */ - public static DriverChannelOptions DEFAULT = builder().build(); - - public static Builder builder() { - return new Builder(); - } - - public final CqlIdentifier keyspace; - - /** - * What kind of protocol events to listen for. - * - * @see com.datastax.oss.protocol.internal.ProtocolConstants.EventType - */ - public final List eventTypes; - - public final EventCallback eventCallback; - - public final String ownerLogPrefix; - - private DriverChannelOptions( - CqlIdentifier keyspace, - List eventTypes, - EventCallback eventCallback, - String ownerLogPrefix) { - this.keyspace = keyspace; - this.eventTypes = eventTypes; - this.eventCallback = eventCallback; - this.ownerLogPrefix = ownerLogPrefix; - } - - public static class Builder { - private CqlIdentifier keyspace = null; - private List eventTypes = Collections.emptyList(); - private EventCallback eventCallback = null; - private String ownerLogPrefix = null; - - public Builder withKeyspace(CqlIdentifier keyspace) { - this.keyspace = keyspace; - return this; - } - - public Builder withEvents(List eventTypes, EventCallback eventCallback) { - Preconditions.checkArgument(eventTypes != null && !eventTypes.isEmpty()); - Preconditions.checkNotNull(eventCallback); - this.eventTypes = eventTypes; - this.eventCallback = eventCallback; - return this; - } - - public Builder withOwnerLogPrefix(String ownerLogPrefix) { - this.ownerLogPrefix = ownerLogPrefix; - return this; - } - - public DriverChannelOptions build() { - return new DriverChannelOptions(keyspace, eventTypes, eventCallback, ownerLogPrefix); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/EventCallback.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/EventCallback.java deleted file mode 100644 index 0ac71233fdd..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/EventCallback.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import com.datastax.oss.protocol.internal.Message; - -public interface EventCallback { - /** Invoked when a protocol event is received. */ - void onEvent(Message event); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/HeartbeatHandler.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/HeartbeatHandler.java deleted file mode 100644 index 3dac60f5216..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/HeartbeatHandler.java +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.connection.HeartbeatException; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.request.Options; -import com.datastax.oss.protocol.internal.response.Supported; -import io.netty.channel.ChannelHandlerContext; -import io.netty.handler.timeout.IdleState; -import io.netty.handler.timeout.IdleStateEvent; -import io.netty.handler.timeout.IdleStateHandler; -import net.jcip.annotations.NotThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@NotThreadSafe -class HeartbeatHandler extends IdleStateHandler { - - private static final Logger LOG = LoggerFactory.getLogger(HeartbeatHandler.class); - - private final DriverExecutionProfile config; - - private HeartbeatRequest request; - - HeartbeatHandler(DriverExecutionProfile config) { - super((int) config.getDuration(DefaultDriverOption.HEARTBEAT_INTERVAL).getSeconds(), 0, 0); - this.config = config; - } - - @Override - protected void channelIdle(ChannelHandlerContext ctx, IdleStateEvent evt) throws Exception { - if (evt.state() == IdleState.READER_IDLE) { - if (this.request != null) { - LOG.warn( - "Not sending heartbeat because a previous one is still in progress. " - + "Check that {} is not lower than {}.", - DefaultDriverOption.HEARTBEAT_INTERVAL.getPath(), - DefaultDriverOption.HEARTBEAT_TIMEOUT.getPath()); - } else { - LOG.debug( - "Connection was inactive for {} seconds, sending heartbeat", - config.getDuration(DefaultDriverOption.HEARTBEAT_INTERVAL).getSeconds()); - long timeoutMillis = config.getDuration(DefaultDriverOption.HEARTBEAT_TIMEOUT).toMillis(); - this.request = new HeartbeatRequest(ctx, timeoutMillis); - this.request.send(); - } - } - } - - private class HeartbeatRequest extends ChannelHandlerRequest { - - HeartbeatRequest(ChannelHandlerContext ctx, long timeoutMillis) { - super(ctx, timeoutMillis); - } - - @Override - String describe() { - return "Heartbeat request"; - } - - @Override - Message getRequest() { - return Options.INSTANCE; - } - - @Override - void onResponse(Message response) { - if (response instanceof Supported) { - LOG.debug("{} Heartbeat query succeeded", ctx.channel()); - HeartbeatHandler.this.request = null; - } else { - failOnUnexpected(response); - } - } - - @Override - void fail(String message, Throwable cause) { - if (cause instanceof HeartbeatException) { - // Ignore: this happens when the heartbeat query times out and the inflight handler aborts - // all queries (including the heartbeat query itself) - return; - } - - HeartbeatHandler.this.request = null; - if (message != null) { - LOG.debug("{} Heartbeat query failed: {}", ctx.channel(), message, cause); - } else { - LOG.debug("{} Heartbeat query failed", ctx.channel(), cause); - } - - // Notify InFlightHandler. - ctx.fireExceptionCaught( - new HeartbeatException(ctx.channel().remoteAddress(), message, cause)); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/InFlightHandler.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/InFlightHandler.java deleted file mode 100644 index 90b02f358cd..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/InFlightHandler.java +++ /dev/null @@ -1,457 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.connection.BusyConnectionException; -import com.datastax.oss.driver.api.core.connection.ClosedConnectionException; -import com.datastax.oss.driver.api.core.connection.HeartbeatException; -import com.datastax.oss.driver.internal.core.channel.DriverChannel.RequestMessage; -import com.datastax.oss.driver.internal.core.channel.DriverChannel.SetKeyspaceEvent; -import com.datastax.oss.driver.internal.core.protocol.FrameDecodingException; -import com.datastax.oss.driver.internal.core.util.Loggers; -import com.datastax.oss.driver.shaded.guava.common.collect.BiMap; -import com.datastax.oss.driver.shaded.guava.common.collect.HashBiMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import com.datastax.oss.protocol.internal.Frame; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.request.Query; -import com.datastax.oss.protocol.internal.response.result.SetKeyspace; -import io.netty.channel.ChannelDuplexHandler; -import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelHandler; -import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelPromise; -import io.netty.util.concurrent.Promise; -import java.util.HashMap; -import java.util.Map; -import java.util.Set; -import net.jcip.annotations.NotThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** Manages requests that are currently executing on a channel. */ -@NotThreadSafe -public class InFlightHandler extends ChannelDuplexHandler { - private static final Logger LOG = LoggerFactory.getLogger(InFlightHandler.class); - - private final ProtocolVersion protocolVersion; - private final StreamIdGenerator streamIds; - final ChannelPromise closeStartedFuture; - private final String ownerLogPrefix; - private final BiMap inFlight; - private final Map orphaned; - private volatile int orphanedSize; // thread-safe view for metrics - private final long setKeyspaceTimeoutMillis; - private final EventCallback eventCallback; - private final int maxOrphanStreamIds; - private boolean closingGracefully; - private SetKeyspaceRequest setKeyspaceRequest; - private String logPrefix; - - InFlightHandler( - ProtocolVersion protocolVersion, - StreamIdGenerator streamIds, - int maxOrphanStreamIds, - long setKeyspaceTimeoutMillis, - ChannelPromise closeStartedFuture, - EventCallback eventCallback, - String ownerLogPrefix) { - this.protocolVersion = protocolVersion; - this.streamIds = streamIds; - this.maxOrphanStreamIds = maxOrphanStreamIds; - this.closeStartedFuture = closeStartedFuture; - this.ownerLogPrefix = ownerLogPrefix; - this.logPrefix = ownerLogPrefix + "|connecting..."; - this.inFlight = HashBiMap.create(streamIds.getMaxAvailableIds()); - this.orphaned = new HashMap<>(maxOrphanStreamIds); - this.setKeyspaceTimeoutMillis = setKeyspaceTimeoutMillis; - this.eventCallback = eventCallback; - } - - @Override - public void channelActive(ChannelHandlerContext ctx) throws Exception { - super.channelActive(ctx); - String channelId = ctx.channel().toString(); - this.logPrefix = ownerLogPrefix + "|" + channelId.substring(1, channelId.length() - 1); - } - - @Override - public void write(ChannelHandlerContext ctx, Object in, ChannelPromise promise) throws Exception { - if (in == DriverChannel.GRACEFUL_CLOSE_MESSAGE) { - LOG.debug("[{}] Received graceful close request", logPrefix); - startGracefulShutdown(ctx); - } else if (in == DriverChannel.FORCEFUL_CLOSE_MESSAGE) { - LOG.debug("[{}] Received forceful close request, aborting pending queries", logPrefix); - abortAllInFlight(new ClosedConnectionException("Channel was force-closed")); - ctx.channel().close(); - } else if (in instanceof HeartbeatException) { - abortAllInFlight( - new ClosedConnectionException("Heartbeat query failed", ((HeartbeatException) in))); - ctx.close(); - } else if (in instanceof RequestMessage) { - write(ctx, (RequestMessage) in, promise); - } else if (in instanceof ResponseCallback) { - cancel(ctx, (ResponseCallback) in, promise); - } else { - promise.setFailure( - new IllegalArgumentException("Unsupported message type " + in.getClass().getName())); - } - } - - private void write(ChannelHandlerContext ctx, RequestMessage message, ChannelPromise promise) { - if (closingGracefully) { - promise.setFailure(new IllegalStateException("Channel is closing")); - streamIds.cancelPreAcquire(); - return; - } - int streamId = streamIds.acquire(); - if (streamId < 0) { - // Should not happen with the preAcquire mechanism, but handle gracefully - promise.setFailure( - new BusyConnectionException( - String.format( - "Couldn't acquire a stream id from InFlightHandler on %s", ctx.channel()))); - streamIds.cancelPreAcquire(); - return; - } - - if (inFlight.containsKey(streamId)) { - promise.setFailure( - new IllegalStateException("Found pending callback for stream id " + streamId)); - streamIds.cancelPreAcquire(); - return; - } - - LOG.trace("[{}] Writing {} on stream id {}", logPrefix, message.responseCallback, streamId); - Frame frame = - Frame.forRequest( - protocolVersion.getCode(), - streamId, - message.tracing, - message.customPayload, - message.request); - - inFlight.put(streamId, message.responseCallback); - ChannelFuture writeFuture = ctx.write(frame, promise); - writeFuture.addListener( - future -> { - if (future.isSuccess()) { - message.responseCallback.onStreamIdAssigned(streamId); - } else { - release(streamId, ctx); - } - }); - } - - private void cancel( - ChannelHandlerContext ctx, ResponseCallback responseCallback, ChannelPromise promise) { - Integer streamId = inFlight.inverse().remove(responseCallback); - if (streamId == null) { - LOG.trace( - "[{}] Received cancellation for unknown or already cancelled callback {}, skipping", - logPrefix, - responseCallback); - } else { - LOG.trace( - "[{}] Cancelled callback {} for stream id {}", logPrefix, responseCallback, streamId); - if (closingGracefully && inFlight.isEmpty()) { - LOG.debug("[{}] Last pending query was cancelled, closing channel", logPrefix); - ctx.channel().close(); - } else { - // We can't release the stream id, because a response might still come back from the server. - // Keep track of those "orphaned" ids, to release them later if we get a response and the - // callback says it's the last one. - orphaned.put(streamId, responseCallback); - if (orphaned.size() > maxOrphanStreamIds) { - LOG.debug( - "[{}] Orphan stream ids exceeded the configured threshold ({}), closing gracefully", - logPrefix, - maxOrphanStreamIds); - startGracefulShutdown(ctx); - } else { - orphanedSize = orphaned.size(); - } - } - } - promise.setSuccess(); - } - - private void startGracefulShutdown(ChannelHandlerContext ctx) { - if (inFlight.isEmpty()) { - LOG.debug("[{}] No pending queries, completing graceful shutdown now", logPrefix); - ctx.channel().close(); - } else { - // Remove heartbeat handler from pipeline if present. - ChannelHandler heartbeatHandler = ctx.pipeline().get(ChannelFactory.HEARTBEAT_HANDLER_NAME); - if (heartbeatHandler != null) { - ctx.pipeline().remove(heartbeatHandler); - } - LOG.debug("[{}] There are pending queries, delaying graceful shutdown", logPrefix); - closingGracefully = true; - closeStartedFuture.trySuccess(); - } - } - - @Override - @SuppressWarnings("NonAtomicVolatileUpdate") - public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { - Frame responseFrame = (Frame) msg; - int streamId = responseFrame.streamId; - - if (streamId < 0) { - Message event = responseFrame.message; - if (eventCallback == null) { - LOG.debug("[{}] Received event {} but no callback was registered", logPrefix, event); - } else { - LOG.debug("[{}] Received event {}, notifying callback", logPrefix, event); - try { - eventCallback.onEvent(event); - } catch (Throwable t) { - Loggers.warnWithException( - LOG, "[{}] Unexpected error while invoking event handler", logPrefix, t); - } - } - } else { - boolean wasInFlight = true; - ResponseCallback callback = inFlight.get(streamId); - if (callback == null) { - wasInFlight = false; - callback = orphaned.get(streamId); - if (callback == null) { - LOG.trace("[{}] Got response on unknown stream id {}, skipping", logPrefix, streamId); - return; - } - } - try { - if (callback.isLastResponse(responseFrame)) { - LOG.debug( - "[{}] Got last response on {} stream id {}, completing and releasing", - logPrefix, - wasInFlight ? "in-flight" : "orphaned", - streamId); - release(streamId, ctx); - } else { - LOG.trace( - "[{}] Got non-last response on {} stream id {}, still holding", - logPrefix, - wasInFlight ? "in-flight" : "orphaned", - streamId); - } - if (wasInFlight) { - callback.onResponse(responseFrame); - } - } catch (Throwable t) { - if (wasInFlight) { - fail( - callback, - new IllegalArgumentException("Unexpected error while invoking response handler", t)); - } else { - // Assume the callback is already completed, so it's better to log - Loggers.warnWithException( - LOG, - "[{}] Unexpected error while invoking response handler on stream id {}", - logPrefix, - t, - streamId); - } - } - } - } - - /** Called if an exception was thrown while processing an inbound event (i.e. a response). */ - @Override - public void exceptionCaught(ChannelHandlerContext ctx, Throwable exception) throws Exception { - if (exception instanceof FrameDecodingException) { - int streamId = ((FrameDecodingException) exception).streamId; - LOG.debug("[{}] Error while decoding response on stream id {}", logPrefix, streamId); - if (streamId >= 0) { - // We know which request matches the failing response, fail that one only - ResponseCallback responseCallback = inFlight.get(streamId); - if (responseCallback != null) { - fail(responseCallback, exception.getCause()); - } - release(streamId, ctx); - } else { - Loggers.warnWithException( - LOG, - "[{}] Unexpected error while decoding incoming event frame", - logPrefix, - exception.getCause()); - } - } else { - // Otherwise fail all pending requests - abortAllInFlight( - (exception instanceof HeartbeatException) - ? (HeartbeatException) exception - : new ClosedConnectionException("Unexpected error on channel", exception)); - ctx.close(); - } - } - - @Override - public void userEventTriggered(ChannelHandlerContext ctx, Object event) throws Exception { - if (event instanceof SetKeyspaceEvent) { - SetKeyspaceEvent setKeyspaceEvent = (SetKeyspaceEvent) event; - if (this.setKeyspaceRequest != null) { - setKeyspaceEvent.promise.setFailure( - new IllegalStateException( - "Can't call setKeyspace while a keyspace switch is already in progress")); - } else { - LOG.debug( - "[{}] Switching to keyspace {}", logPrefix, setKeyspaceEvent.keyspaceName.asInternal()); - this.setKeyspaceRequest = new SetKeyspaceRequest(ctx, setKeyspaceEvent); - this.setKeyspaceRequest.send(); - } - } else { - super.userEventTriggered(ctx, event); - } - } - - @Override - public void channelInactive(ChannelHandlerContext ctx) throws Exception { - // If the channel was closed normally (normal or forced shutdown), inFlight is already empty by - // the time we get here. So if it's not, it means the channel closed unexpectedly (e.g. the - // connection was dropped). - abortAllInFlight(new ClosedConnectionException("Lost connection to remote peer")); - super.channelInactive(ctx); - } - - private void release(int streamId, ChannelHandlerContext ctx) { - LOG.trace("[{}] Releasing stream id {}", logPrefix, streamId); - if (inFlight.remove(streamId) != null) { - // If we're in the middle of an orderly close and this was the last request, actually close - // the channel now - if (closingGracefully && inFlight.isEmpty()) { - LOG.debug("[{}] Done handling the last pending query, closing channel", logPrefix); - ctx.channel().close(); - } - } else if (orphaned.remove(streamId) != null) { - orphanedSize = orphaned.size(); - } - // Note: it's possible that the callback is in neither map, if we get here after a call to - // abortAllInFlight that already cleared the map (see JAVA-2000) - streamIds.release(streamId); - } - - private void abortAllInFlight(DriverException cause) { - abortAllInFlight(cause, null); - } - - /** - * @param ignore the ResponseCallback that called this method, if applicable (avoids a recursive - * loop) - */ - private void abortAllInFlight(DriverException cause, ResponseCallback ignore) { - if (!inFlight.isEmpty()) { - - // Create a local copy and clear the map immediately. This prevents - // ConcurrentModificationException if aborting one of the handlers recurses back into this - // method. - Set responseCallbacks = ImmutableSet.copyOf(inFlight.values()); - inFlight.clear(); - - for (ResponseCallback responseCallback : responseCallbacks) { - if (responseCallback != ignore) { - fail(responseCallback, cause); - } - } - // It's not necessary to release the stream ids, since we always call this method right before - // closing the channel - } - } - - private void fail(ResponseCallback callback, Throwable failure) { - try { - callback.onFailure(failure); - } catch (Throwable throwable) { - // Protect against unexpected errors. We don't have anywhere to report the error (since - // onFailure failed), so log as a last resort. - LOG.error("[{}] Unexpected error while failing {}", logPrefix, callback, throwable); - } - } - - int getAvailableIds() { - return streamIds.getAvailableIds(); - } - - boolean preAcquireId() { - return streamIds.preAcquire(); - } - - int getInFlight() { - return streamIds.getMaxAvailableIds() - streamIds.getAvailableIds(); - } - - int getOrphanIds() { - return orphanedSize; - } - - private class SetKeyspaceRequest extends ChannelHandlerRequest { - - private final CqlIdentifier keyspaceName; - private final Promise promise; - - SetKeyspaceRequest(ChannelHandlerContext ctx, SetKeyspaceEvent setKeyspaceEvent) { - super(ctx, setKeyspaceTimeoutMillis); - this.keyspaceName = setKeyspaceEvent.keyspaceName; - this.promise = setKeyspaceEvent.promise; - } - - @Override - String describe() { - return "[" + logPrefix + "] Set keyspace request (USE " + keyspaceName.asCql(true) + ")"; - } - - @Override - Message getRequest() { - return new Query("USE " + keyspaceName.asCql(false)); - } - - @Override - void onResponse(Message response) { - if (response instanceof SetKeyspace) { - if (promise.trySuccess(null)) { - InFlightHandler.this.setKeyspaceRequest = null; - } - } else { - failOnUnexpected(response); - } - } - - @Override - void fail(String message, Throwable cause) { - ClosedConnectionException setKeyspaceException = - new ClosedConnectionException(message, cause); - if (promise.tryFailure(setKeyspaceException)) { - InFlightHandler.this.setKeyspaceRequest = null; - // setKeyspace queries are not triggered directly by the user, but only as a response to a - // successful "USE... query", so the keyspace name should generally be valid. If the - // keyspace switch fails, this could be due to a schema disagreement or a more serious - // error. Rescheduling the switch is impractical, we can't do much better than closing the - // channel and letting it reconnect. - Loggers.warnWithException( - LOG, "[{}] Unexpected error while switching keyspace", logPrefix, setKeyspaceException); - abortAllInFlight(setKeyspaceException, this); - ctx.channel().close(); - } - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/InboundTrafficMeter.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/InboundTrafficMeter.java deleted file mode 100644 index 518f398a808..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/InboundTrafficMeter.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; -import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; -import com.datastax.oss.driver.internal.core.metrics.NodeMetricUpdater; -import com.datastax.oss.driver.internal.core.metrics.SessionMetricUpdater; -import io.netty.buffer.ByteBuf; -import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelInboundHandlerAdapter; - -public class InboundTrafficMeter extends ChannelInboundHandlerAdapter { - - private final NodeMetricUpdater nodeMetricUpdater; - private final SessionMetricUpdater sessionMetricUpdater; - - InboundTrafficMeter( - NodeMetricUpdater nodeMetricUpdater, SessionMetricUpdater sessionMetricUpdater) { - this.nodeMetricUpdater = nodeMetricUpdater; - this.sessionMetricUpdater = sessionMetricUpdater; - } - - @Override - public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { - if (msg instanceof ByteBuf) { - int bytes = ((ByteBuf) msg).readableBytes(); - nodeMetricUpdater.markMeter(DefaultNodeMetric.BYTES_RECEIVED, null, bytes); - sessionMetricUpdater.markMeter(DefaultSessionMetric.BYTES_RECEIVED, null, bytes); - } - super.channelRead(ctx, msg); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/OutboundTrafficMeter.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/OutboundTrafficMeter.java deleted file mode 100644 index 768eb047b9d..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/OutboundTrafficMeter.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; -import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; -import com.datastax.oss.driver.internal.core.metrics.NodeMetricUpdater; -import com.datastax.oss.driver.internal.core.metrics.SessionMetricUpdater; -import io.netty.buffer.ByteBuf; -import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelOutboundHandlerAdapter; -import io.netty.channel.ChannelPromise; - -public class OutboundTrafficMeter extends ChannelOutboundHandlerAdapter { - - private final NodeMetricUpdater nodeMetricUpdater; - private final SessionMetricUpdater sessionMetricUpdater; - - OutboundTrafficMeter( - NodeMetricUpdater nodeMetricUpdater, SessionMetricUpdater sessionMetricUpdater) { - this.nodeMetricUpdater = nodeMetricUpdater; - this.sessionMetricUpdater = sessionMetricUpdater; - } - - @Override - public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) - throws Exception { - if (msg instanceof ByteBuf) { - int bytes = ((ByteBuf) msg).readableBytes(); - nodeMetricUpdater.markMeter(DefaultNodeMetric.BYTES_SENT, null, bytes); - sessionMetricUpdater.markMeter(DefaultSessionMetric.BYTES_SENT, null, bytes); - } - super.write(ctx, msg, promise); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/PassThroughWriteCoalescer.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/PassThroughWriteCoalescer.java deleted file mode 100644 index 4e3f7d61f66..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/PassThroughWriteCoalescer.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import com.datastax.oss.driver.api.core.context.DriverContext; -import io.netty.channel.Channel; -import io.netty.channel.ChannelFuture; -import net.jcip.annotations.ThreadSafe; - -/** No-op implementation of the write coalescer: each write is flushed immediately. */ -@ThreadSafe -public class PassThroughWriteCoalescer implements WriteCoalescer { - - public PassThroughWriteCoalescer(@SuppressWarnings("unused") DriverContext context) { - // nothing to do - } - - @Override - public ChannelFuture writeAndFlush(Channel channel, Object message) { - return channel.writeAndFlush(message); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ProtocolInitHandler.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ProtocolInitHandler.java deleted file mode 100644 index 8a426f7b368..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ProtocolInitHandler.java +++ /dev/null @@ -1,420 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.InvalidKeyspaceException; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.UnsupportedProtocolVersionException; -import com.datastax.oss.driver.api.core.auth.AuthenticationException; -import com.datastax.oss.driver.api.core.auth.Authenticator; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.connection.ConnectionInitException; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.internal.core.DefaultProtocolFeature; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.protocol.BytesToSegmentDecoder; -import com.datastax.oss.driver.internal.core.protocol.FrameToSegmentEncoder; -import com.datastax.oss.driver.internal.core.protocol.SegmentToBytesEncoder; -import com.datastax.oss.driver.internal.core.protocol.SegmentToFrameDecoder; -import com.datastax.oss.driver.internal.core.util.ProtocolUtils; -import com.datastax.oss.driver.internal.core.util.concurrent.UncaughtExceptions; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.ProtocolConstants.ErrorCode; -import com.datastax.oss.protocol.internal.request.AuthResponse; -import com.datastax.oss.protocol.internal.request.Options; -import com.datastax.oss.protocol.internal.request.Query; -import com.datastax.oss.protocol.internal.request.Register; -import com.datastax.oss.protocol.internal.request.Startup; -import com.datastax.oss.protocol.internal.response.AuthChallenge; -import com.datastax.oss.protocol.internal.response.AuthSuccess; -import com.datastax.oss.protocol.internal.response.Authenticate; -import com.datastax.oss.protocol.internal.response.Error; -import com.datastax.oss.protocol.internal.response.Ready; -import com.datastax.oss.protocol.internal.response.Supported; -import com.datastax.oss.protocol.internal.response.result.Rows; -import com.datastax.oss.protocol.internal.response.result.SetKeyspace; -import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelPipeline; -import java.nio.ByteBuffer; -import java.util.List; -import java.util.Objects; -import net.jcip.annotations.NotThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Handles the sequence of internal requests that we send on a channel before it's ready to accept - * user requests. - */ -@NotThreadSafe -class ProtocolInitHandler extends ConnectInitHandler { - private static final Logger LOG = LoggerFactory.getLogger(ProtocolInitHandler.class); - private static final Query CLUSTER_NAME_QUERY = - new Query("SELECT cluster_name FROM system.local"); - - private final InternalDriverContext context; - private final long timeoutMillis; - private final ProtocolVersion initialProtocolVersion; - private final DriverChannelOptions options; - // might be null if this is the first channel to this cluster - private final String expectedClusterName; - private final EndPoint endPoint; - private final HeartbeatHandler heartbeatHandler; - private String logPrefix; - private ChannelHandlerContext ctx; - private final boolean querySupportedOptions; - - /** - * @param querySupportedOptions whether to send OPTIONS as the first message, to request which - * protocol options the channel supports. If this is true, the options will be stored as a - * channel attribute, and exposed via {@link DriverChannel#getOptions()}. - */ - ProtocolInitHandler( - InternalDriverContext context, - ProtocolVersion protocolVersion, - String expectedClusterName, - EndPoint endPoint, - DriverChannelOptions options, - HeartbeatHandler heartbeatHandler, - boolean querySupportedOptions) { - - this.context = context; - this.endPoint = endPoint; - - DriverExecutionProfile defaultConfig = context.getConfig().getDefaultProfile(); - - this.timeoutMillis = - defaultConfig.getDuration(DefaultDriverOption.CONNECTION_INIT_QUERY_TIMEOUT).toMillis(); - this.initialProtocolVersion = protocolVersion; - this.expectedClusterName = expectedClusterName; - this.options = options; - this.heartbeatHandler = heartbeatHandler; - this.querySupportedOptions = querySupportedOptions; - this.logPrefix = options.ownerLogPrefix + "|connecting..."; - } - - @Override - public void channelActive(ChannelHandlerContext ctx) throws Exception { - super.channelActive(ctx); - String channelId = ctx.channel().toString(); - this.logPrefix = options.ownerLogPrefix + "|" + channelId.substring(1, channelId.length() - 1); - } - - @Override - protected void onRealConnect(ChannelHandlerContext ctx) { - LOG.debug("[{}] Starting channel initialization", logPrefix); - this.ctx = ctx; - new InitRequest(ctx).send(); - } - - @Override - protected boolean setConnectSuccess() { - boolean result = super.setConnectSuccess(); - if (result) { - // add heartbeat to pipeline now that protocol is initialized. - ctx.pipeline() - .addBefore( - ChannelFactory.INFLIGHT_HANDLER_NAME, - ChannelFactory.HEARTBEAT_HANDLER_NAME, - heartbeatHandler); - } - return result; - } - - private enum Step { - OPTIONS, - STARTUP, - GET_CLUSTER_NAME, - SET_KEYSPACE, - AUTH_RESPONSE, - REGISTER, - } - - private class InitRequest extends ChannelHandlerRequest { - // This class is a finite-state automaton, that sends a different query depending on the step - // in the initialization sequence. - private Step step; - private int stepNumber = 0; - private Message request; - private Authenticator authenticator; - private ByteBuffer authResponseToken; - - InitRequest(ChannelHandlerContext ctx) { - super(ctx, timeoutMillis); - this.step = querySupportedOptions ? Step.OPTIONS : Step.STARTUP; - } - - @Override - String describe() { - return String.format( - "[%s] Protocol initialization request, step %d (%s)", logPrefix, stepNumber, request); - } - - @Override - Message getRequest() { - switch (step) { - case OPTIONS: - return request = Options.INSTANCE; - case STARTUP: - return request = new Startup(context.getStartupOptions()); - case GET_CLUSTER_NAME: - return request = CLUSTER_NAME_QUERY; - case SET_KEYSPACE: - return request = new Query("USE " + options.keyspace.asCql(false)); - case AUTH_RESPONSE: - return request = new AuthResponse(authResponseToken); - case REGISTER: - return request = new Register(options.eventTypes); - default: - throw new AssertionError("unhandled step: " + step); - } - } - - @Override - void send() { - stepNumber++; - super.send(); - } - - @Override - void onResponse(Message response) { - LOG.debug( - "[{}] step {} received response opcode={}", - logPrefix, - step, - ProtocolUtils.opcodeString(response.opcode)); - try { - if (step == Step.OPTIONS && response instanceof Supported) { - channel.attr(DriverChannel.OPTIONS_KEY).set(((Supported) response).options); - step = Step.STARTUP; - send(); - } else if (step == Step.STARTUP && response instanceof Ready) { - maybeSwitchToModernFraming(); - context.getAuthProvider().ifPresent(provider -> provider.onMissingChallenge(endPoint)); - step = Step.GET_CLUSTER_NAME; - send(); - } else if (step == Step.STARTUP && response instanceof Authenticate) { - maybeSwitchToModernFraming(); - Authenticate authenticate = (Authenticate) response; - authenticator = buildAuthenticator(endPoint, authenticate.authenticator); - authenticator - .initialResponse() - .whenCompleteAsync( - (token, error) -> { - if (error != null) { - fail( - new AuthenticationException( - endPoint, - String.format( - "Authenticator.initialResponse(): stage completed exceptionally (%s)", - error), - error)); - } else { - step = Step.AUTH_RESPONSE; - authResponseToken = token; - send(); - } - }, - channel.eventLoop()) - .exceptionally(UncaughtExceptions::log); - } else if (step == Step.AUTH_RESPONSE && response instanceof AuthChallenge) { - ByteBuffer challenge = ((AuthChallenge) response).token; - authenticator - .evaluateChallenge(challenge) - .whenCompleteAsync( - (token, error) -> { - if (error != null) { - fail( - new AuthenticationException( - endPoint, - String.format( - "Authenticator.evaluateChallenge(): stage completed exceptionally (%s)", - error), - error)); - } else { - step = Step.AUTH_RESPONSE; - authResponseToken = token; - send(); - } - }, - channel.eventLoop()) - .exceptionally(UncaughtExceptions::log); - } else if (step == Step.AUTH_RESPONSE && response instanceof AuthSuccess) { - ByteBuffer token = ((AuthSuccess) response).token; - authenticator - .onAuthenticationSuccess(token) - .whenCompleteAsync( - (ignored, error) -> { - if (error != null) { - fail( - new AuthenticationException( - endPoint, - String.format( - "Authenticator.onAuthenticationSuccess(): stage completed exceptionally (%s)", - error), - error)); - } else { - step = Step.GET_CLUSTER_NAME; - send(); - } - }, - channel.eventLoop()) - .exceptionally(UncaughtExceptions::log); - } else if (step == Step.AUTH_RESPONSE - && response instanceof Error - && ((Error) response).code == ProtocolConstants.ErrorCode.AUTH_ERROR) { - fail( - new AuthenticationException( - endPoint, - String.format( - "server replied with '%s' to AuthResponse request", - ((Error) response).message))); - } else if (step == Step.GET_CLUSTER_NAME && response instanceof Rows) { - Rows rows = (Rows) response; - List row = Objects.requireNonNull(rows.getData().poll()); - String actualClusterName = getString(row, 0); - if (expectedClusterName != null && !expectedClusterName.equals(actualClusterName)) { - fail( - new ClusterNameMismatchException(endPoint, actualClusterName, expectedClusterName)); - } else { - if (expectedClusterName == null) { - // Store the actual name so that it can be retrieved from the factory - channel.attr(DriverChannel.CLUSTER_NAME_KEY).set(actualClusterName); - } - if (options.keyspace != null) { - step = Step.SET_KEYSPACE; - send(); - } else if (!options.eventTypes.isEmpty()) { - step = Step.REGISTER; - send(); - } else { - setConnectSuccess(); - } - } - } else if (step == Step.SET_KEYSPACE && response instanceof SetKeyspace) { - if (!options.eventTypes.isEmpty()) { - step = Step.REGISTER; - send(); - } else { - setConnectSuccess(); - } - } else if (step == Step.REGISTER && response instanceof Ready) { - setConnectSuccess(); - } else if (response instanceof Error) { - Error error = (Error) response; - // Testing for a specific string is a tad fragile but Cassandra doesn't give us a more - // precise error code. - // C* 2.1 reports a server error instead of protocol error, see CASSANDRA-9451. - boolean firstRequest = - (step == Step.OPTIONS && querySupportedOptions) || step == Step.STARTUP; - boolean serverOrProtocolError = - error.code == ErrorCode.PROTOCOL_ERROR || error.code == ErrorCode.SERVER_ERROR; - boolean badProtocolVersionMessage = - error.message.contains("Invalid or unsupported protocol version") - // JAVA-2925: server is behind driver and considers the proposed version as beta - || error.message.contains("Beta version of the protocol used"); - if (firstRequest && serverOrProtocolError && badProtocolVersionMessage) { - fail( - UnsupportedProtocolVersionException.forSingleAttempt( - endPoint, initialProtocolVersion)); - } else if (step == Step.SET_KEYSPACE - && error.code == ProtocolConstants.ErrorCode.INVALID) { - fail(new InvalidKeyspaceException(error.message)); - } else { - failOnUnexpected(error); - } - } else { - failOnUnexpected(response); - } - } catch (AuthenticationException e) { - fail(e); - } catch (Throwable t) { - fail(String.format("%s: unexpected exception (%s)", describe(), t), t); - } - } - - @Override - void fail(String message, Throwable cause) { - Throwable finalException = - (message == null) ? cause : new ConnectionInitException(message, cause); - setConnectFailure(finalException); - } - - private Authenticator buildAuthenticator(EndPoint endPoint, String authenticator) { - return context - .getAuthProvider() - .map(p -> p.newAuthenticator(endPoint, authenticator)) - .orElseThrow( - () -> - new AuthenticationException( - endPoint, - String.format( - "Node %s requires authentication (%s), but no authenticator configured", - endPoint, authenticator))); - } - - @Override - public String toString() { - return "init query " + step; - } - } - - /** - * Rearranges the pipeline to deal with the new framing structure in protocol v5 and above. The - * first messages still use the legacy format, we only do this after a successful response to the - * first STARTUP message. - */ - private void maybeSwitchToModernFraming() { - if (context - .getProtocolVersionRegistry() - .supports(initialProtocolVersion, DefaultProtocolFeature.MODERN_FRAMING)) { - - ChannelPipeline pipeline = ctx.pipeline(); - - // We basically add one conversion step in the middle: frames <-> *segments* <-> bytes - // Outbound: - pipeline.replace( - ChannelFactory.FRAME_TO_BYTES_ENCODER_NAME, - ChannelFactory.FRAME_TO_SEGMENT_ENCODER_NAME, - new FrameToSegmentEncoder( - context.getPrimitiveCodec(), context.getFrameCodec(), logPrefix)); - pipeline.addBefore( - ChannelFactory.FRAME_TO_SEGMENT_ENCODER_NAME, - ChannelFactory.SEGMENT_TO_BYTES_ENCODER_NAME, - new SegmentToBytesEncoder(context.getSegmentCodec())); - - // Inbound: - pipeline.replace( - ChannelFactory.BYTES_TO_FRAME_DECODER_NAME, - ChannelFactory.BYTES_TO_SEGMENT_DECODER_NAME, - new BytesToSegmentDecoder(context.getSegmentCodec())); - pipeline.addAfter( - ChannelFactory.BYTES_TO_SEGMENT_DECODER_NAME, - ChannelFactory.SEGMENT_TO_FRAME_DECODER_NAME, - new SegmentToFrameDecoder(context.getFrameCodec(), logPrefix)); - } - } - - private String getString(List row, int i) { - return TypeCodecs.TEXT.decode(row.get(i), DefaultProtocolVersion.DEFAULT); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ResponseCallback.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ResponseCallback.java deleted file mode 100644 index 5a0e9e5eb86..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ResponseCallback.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import com.datastax.oss.protocol.internal.Frame; - -/** - * The outcome of a request sent to a Cassandra node. - * - *

This comes into play after the request has been successfully written to the channel. - * - *

Due to internal implementation constraints, different instances of this type must not be equal - * to each other (they are stored in a {@code BiMap} in {@link InFlightHandler}); reference equality - * should be appropriate in all cases. - */ -public interface ResponseCallback { - - /** - * Invoked when the server replies (note that the response frame might contain an error message). - */ - void onResponse(Frame responseFrame); - - /** - * Invoked if we couldn't get the response. - * - *

This can be triggered in two cases: - * - *

    - *
  • the connection was closed (for example, because of a heartbeat failure) before the - * response was received; - *
  • the response was received but there was an error while decoding it. - *
- */ - void onFailure(Throwable error); - - /** - * Reports the stream id used for the request on the current connection. - * - *

This is called every time the request is written successfully to a connection (and therefore - * might multiple times in case of retries). It is guaranteed to be invoked before any response to - * the request on that connection is processed. - * - *

The default implementation does nothing. This only needs to be overridden for specialized - * requests that hold the stream id across multiple responses. - * - * @see #isLastResponse(Frame) - */ - default void onStreamIdAssigned(int streamId) { - // nothing to do - } - - /** - * Whether the given frame is the last response to this request. - * - *

This is invoked for each response received by this callback; if it returns {@code true}, the - * driver assumes that the server is no longer using this stream id, and that it can be safely - * reused to send another request. - * - *

The default implementation always returns {@code true}: regular CQL requests only have one - * response, and we can reuse the stream id as soon as we've received it. This only needs to be - * overridden for specialized requests that hold the stream id across multiple responses. - */ - default boolean isLastResponse(Frame responseFrame) { - return true; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/StreamIdGenerator.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/StreamIdGenerator.java deleted file mode 100644 index 3384bc57c94..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/StreamIdGenerator.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import java.util.BitSet; -import java.util.concurrent.atomic.AtomicInteger; -import net.jcip.annotations.NotThreadSafe; - -/** - * Manages the set of identifiers used to distinguish multiplexed requests on a channel. - * - *

{@link #preAcquire()} / {@link #getAvailableIds()} follow atomic semantics. See {@link - * DriverChannel#preAcquireId()} for more explanations. - * - *

Other methods are not synchronized, they are only called by {@link InFlightHandler} on the I/O - * thread. - */ -@NotThreadSafe -class StreamIdGenerator { - - private final int maxAvailableIds; - // unset = available, set = borrowed (note that this is the opposite of the 3.x implementation) - private final BitSet ids; - private final AtomicInteger availableIds; - - StreamIdGenerator(int maxAvailableIds) { - this.maxAvailableIds = maxAvailableIds; - this.ids = new BitSet(this.maxAvailableIds); - this.availableIds = new AtomicInteger(this.maxAvailableIds); - } - - boolean preAcquire() { - while (true) { - int current = availableIds.get(); - assert current >= 0; - if (current == 0) { - return false; - } else if (availableIds.compareAndSet(current, current - 1)) { - return true; - } - } - } - - void cancelPreAcquire() { - int available = availableIds.incrementAndGet(); - assert available <= maxAvailableIds; - } - - int acquire() { - assert availableIds.get() < maxAvailableIds; - int id = ids.nextClearBit(0); - if (id >= maxAvailableIds) { - return -1; - } - ids.set(id); - return id; - } - - void release(int id) { - if (!ids.get(id)) { - throw new IllegalStateException("Tried to release id that hadn't been borrowed: " + id); - } - ids.clear(id); - int available = availableIds.incrementAndGet(); - assert available <= maxAvailableIds; - } - - int getAvailableIds() { - return availableIds.get(); - } - - int getMaxAvailableIds() { - return maxAvailableIds; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/WriteCoalescer.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/WriteCoalescer.java deleted file mode 100644 index 03391c57809..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/WriteCoalescer.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import io.netty.channel.Channel; -import io.netty.channel.ChannelFuture; - -/** - * Optimizes the write operations on Netty channels. - * - *

Flush operations are generally speaking expensive as these may trigger a syscall on the - * transport level. Thus it is in most cases (where write latency can be traded with throughput) a - * good idea to try to minimize flush operations as much as possible. This component allows writes - * to be accumulated and flushed together for better performance. - */ -public interface WriteCoalescer { - /** - * Writes and flushes the message to the channel, possibly at a later time, but the order of - * messages must be preserved. - */ - ChannelFuture writeAndFlush(Channel channel, Object message); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/package-info.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/package-info.java deleted file mode 100644 index d8514bdb88c..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/package-info.java +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** Handling of a single connection to a Cassandra node. */ -package com.datastax.oss.driver.internal.core.channel; diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/config/ConfigChangeEvent.java b/core/src/main/java/com/datastax/oss/driver/internal/core/config/ConfigChangeEvent.java deleted file mode 100644 index d2898d39925..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/config/ConfigChangeEvent.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config; - -/** An event triggered when the configuration was changed. */ -public enum ConfigChangeEvent { - // Implementation note: to find where this event is consumed, look for references to the class - // itself, not INSTANCE (EventBus.register takes a class not an object). - INSTANCE -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/config/DerivedExecutionProfile.java b/core/src/main/java/com/datastax/oss/driver/internal/core/config/DerivedExecutionProfile.java deleted file mode 100644 index 39c37d78c10..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/config/DerivedExecutionProfile.java +++ /dev/null @@ -1,198 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.config.DriverOption; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSortedSet; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.time.Duration; -import java.util.AbstractMap; -import java.util.List; -import java.util.Map; -import java.util.SortedSet; -import java.util.function.BiFunction; - -public class DerivedExecutionProfile implements DriverExecutionProfile { - - private static final Object NO_VALUE = new Object(); - - public static DerivedExecutionProfile with( - DriverExecutionProfile baseProfile, DriverOption option, Object value) { - if (baseProfile instanceof DerivedExecutionProfile) { - // Don't nest derived profiles, use same base and add to overrides - DerivedExecutionProfile previousDerived = (DerivedExecutionProfile) baseProfile; - ImmutableMap.Builder newOverrides = ImmutableMap.builder(); - for (Map.Entry override : previousDerived.overrides.entrySet()) { - if (!override.getKey().equals(option)) { - newOverrides.put(override.getKey(), override.getValue()); - } - } - newOverrides.put(option, value); - return new DerivedExecutionProfile(previousDerived.baseProfile, newOverrides.build()); - } else { - return new DerivedExecutionProfile(baseProfile, ImmutableMap.of(option, value)); - } - } - - public static DerivedExecutionProfile without( - DriverExecutionProfile baseProfile, DriverOption option) { - return with(baseProfile, option, NO_VALUE); - } - - private final DriverExecutionProfile baseProfile; - private final Map overrides; - - public DerivedExecutionProfile( - DriverExecutionProfile baseProfile, Map overrides) { - this.baseProfile = baseProfile; - this.overrides = overrides; - } - - @NonNull - @Override - public String getName() { - return baseProfile.getName(); - } - - @Override - public boolean isDefined(@NonNull DriverOption option) { - if (overrides.containsKey(option)) { - return overrides.get(option) != NO_VALUE; - } else { - return baseProfile.isDefined(option); - } - } - - @Override - public boolean getBoolean(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getBoolean); - } - - @NonNull - @Override - public List getBooleanList(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getBooleanList); - } - - @Override - public int getInt(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getInt); - } - - @NonNull - @Override - public List getIntList(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getIntList); - } - - @Override - public long getLong(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getLong); - } - - @NonNull - @Override - public List getLongList(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getLongList); - } - - @Override - public double getDouble(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getDouble); - } - - @NonNull - @Override - public List getDoubleList(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getDoubleList); - } - - @NonNull - @Override - public String getString(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getString); - } - - @NonNull - @Override - public List getStringList(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getStringList); - } - - @NonNull - @Override - public Map getStringMap(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getStringMap); - } - - @Override - public long getBytes(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getBytes); - } - - @NonNull - @Override - public List getBytesList(DriverOption option) { - return get(option, DriverExecutionProfile::getBytesList); - } - - @NonNull - @Override - public Duration getDuration(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getDuration); - } - - @NonNull - @Override - public List getDurationList(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getDurationList); - } - - @NonNull - @SuppressWarnings("unchecked") - private ValueT get( - @NonNull DriverOption option, - BiFunction getter) { - Object value = overrides.get(option); - if (value == null) { - value = getter.apply(baseProfile, option); - } - if (value == null || value == NO_VALUE) { - throw new IllegalArgumentException("Missing configuration option " + option.getPath()); - } - return (ValueT) value; - } - - @NonNull - @Override - public SortedSet> entrySet() { - ImmutableSortedSet.Builder> builder = - ImmutableSortedSet.orderedBy(Map.Entry.comparingByKey()); - // builder.add() has no effect if the element already exists, so process the overrides first - // since they have higher precedence - for (Map.Entry entry : overrides.entrySet()) { - if (entry.getValue() != NO_VALUE) { - builder.add(new AbstractMap.SimpleEntry<>(entry.getKey().getPath(), entry.getValue())); - } - } - builder.addAll(baseProfile.entrySet()); - return builder.build(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/config/DriverOptionConfigBuilder.java b/core/src/main/java/com/datastax/oss/driver/internal/core/config/DriverOptionConfigBuilder.java deleted file mode 100644 index 5775fcbe507..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/config/DriverOptionConfigBuilder.java +++ /dev/null @@ -1,164 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config; - -import com.datastax.oss.driver.api.core.config.DriverOption; -import edu.umd.cs.findbugs.annotations.CheckReturnValue; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.time.Duration; -import java.util.List; -import java.util.Map; - -/** @see com.datastax.oss.driver.internal.core.config.typesafe.DefaultDriverConfigLoaderBuilder */ -@Deprecated -public interface DriverOptionConfigBuilder { - - @NonNull - @CheckReturnValue - default SelfT withBoolean(@NonNull DriverOption option, boolean value) { - return with(option, value); - } - - @NonNull - @CheckReturnValue - default SelfT withBooleanList(@NonNull DriverOption option, @NonNull List value) { - return with(option, value); - } - - @NonNull - @CheckReturnValue - default SelfT withInt(@NonNull DriverOption option, int value) { - return with(option, value); - } - - @NonNull - @CheckReturnValue - default SelfT withIntList(@NonNull DriverOption option, @NonNull List value) { - return with(option, value); - } - - @NonNull - @CheckReturnValue - default SelfT withLong(@NonNull DriverOption option, long value) { - return with(option, value); - } - - @NonNull - @CheckReturnValue - default SelfT withLongList(@NonNull DriverOption option, @NonNull List value) { - return with(option, value); - } - - @NonNull - @CheckReturnValue - default SelfT withDouble(@NonNull DriverOption option, double value) { - return with(option, value); - } - - @NonNull - @CheckReturnValue - default SelfT withDoubleList(@NonNull DriverOption option, @NonNull List value) { - return with(option, value); - } - - @NonNull - @CheckReturnValue - default SelfT withString(@NonNull DriverOption option, @NonNull String value) { - return with(option, value); - } - - @NonNull - @CheckReturnValue - default SelfT withStringList(@NonNull DriverOption option, @NonNull List value) { - return with(option, value); - } - - @SuppressWarnings("unchecked") - @NonNull - @CheckReturnValue - default SelfT withStringMap(@NonNull DriverOption option, @NonNull Map value) { - SelfT v = (SelfT) this; - for (String key : value.keySet()) { - v = (SelfT) v.with(option.getPath() + "." + key, value.get(key)); - } - return v; - } - - /** - * Specifies a size in bytes. This is separate from {@link #withLong(DriverOption, long)}, in case - * implementations want to allow users to provide sizes in a more human-readable way, for example - * "256 MB". - */ - @NonNull - @CheckReturnValue - default SelfT withBytes(@NonNull DriverOption option, @NonNull String value) { - return with(option, value); - } - - @NonNull - @CheckReturnValue - default SelfT withBytes(@NonNull DriverOption option, long value) { - return with(option, value); - } - - @NonNull - @CheckReturnValue - default SelfT withBytesList(@NonNull DriverOption option, @NonNull List value) { - return with(option, value); - } - - @NonNull - @CheckReturnValue - default SelfT withDuration(@NonNull DriverOption option, @NonNull Duration value) { - return with(option, value); - } - - @NonNull - @CheckReturnValue - default SelfT withDurationList(@NonNull DriverOption option, @NonNull List value) { - return with(option, value); - } - - @NonNull - @CheckReturnValue - default SelfT withClass(@NonNull DriverOption option, @NonNull Class value) { - return with(option, value.getName()); - } - - /** Unsets an option. */ - @NonNull - @CheckReturnValue - default SelfT without(@NonNull DriverOption option) { - return with(option, null); - } - - @NonNull - @CheckReturnValue - default SelfT with(@NonNull DriverOption option, @Nullable Object value) { - return with(option.getPath(), value); - } - - /** - * Provides a simple path to value mapping, all default methods invoke this method directly. It is - * not recommended that it is used directly other than by these defaults. - */ - @NonNull - @CheckReturnValue - SelfT with(@NonNull String path, @Nullable Object value); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/config/cloud/CloudConfig.java b/core/src/main/java/com/datastax/oss/driver/internal/core/config/cloud/CloudConfig.java deleted file mode 100644 index 1a1076e9d78..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/config/cloud/CloudConfig.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config.cloud; - -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.core.ssl.SslEngineFactory; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.net.InetSocketAddress; -import java.util.List; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class CloudConfig { - - private final InetSocketAddress proxyAddress; - private final List endPoints; - private final String localDatacenter; - private final SslEngineFactory sslEngineFactory; - - CloudConfig( - @NonNull InetSocketAddress proxyAddress, - @NonNull List endPoints, - @NonNull String localDatacenter, - @NonNull SslEngineFactory sslEngineFactory) { - this.proxyAddress = proxyAddress; - this.endPoints = ImmutableList.copyOf(endPoints); - this.localDatacenter = localDatacenter; - this.sslEngineFactory = sslEngineFactory; - } - - @NonNull - public InetSocketAddress getProxyAddress() { - return proxyAddress; - } - - @NonNull - public List getEndPoints() { - return endPoints; - } - - @NonNull - public String getLocalDatacenter() { - return localDatacenter; - } - - @NonNull - public SslEngineFactory getSslEngineFactory() { - return sslEngineFactory; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/config/cloud/CloudConfigFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/config/cloud/CloudConfigFactory.java deleted file mode 100644 index 817b3263d25..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/config/cloud/CloudConfigFactory.java +++ /dev/null @@ -1,296 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config.cloud; - -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.internal.core.metadata.SniEndPoint; -import com.datastax.oss.driver.internal.core.ssl.SniSslEngineFactory; -import com.datastax.oss.driver.shaded.guava.common.io.ByteStreams; -import com.datastax.oss.driver.shaded.guava.common.net.HostAndPort; -import com.fasterxml.jackson.core.JsonParser; -import com.fasterxml.jackson.databind.JsonNode; -import com.fasterxml.jackson.databind.ObjectMapper; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.BufferedReader; -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; -import java.net.ConnectException; -import java.net.InetSocketAddress; -import java.net.MalformedURLException; -import java.net.URL; -import java.net.UnknownHostException; -import java.nio.charset.StandardCharsets; -import java.security.GeneralSecurityException; -import java.security.KeyStore; -import java.security.SecureRandom; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.Objects; -import java.util.zip.ZipEntry; -import java.util.zip.ZipInputStream; -import javax.net.ssl.HttpsURLConnection; -import javax.net.ssl.KeyManagerFactory; -import javax.net.ssl.SSLContext; -import javax.net.ssl.TrustManagerFactory; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -public class CloudConfigFactory { - private static final Logger LOG = LoggerFactory.getLogger(CloudConfigFactory.class); - /** - * Creates a {@link CloudConfig} with information fetched from the specified Cloud configuration - * URL. - * - *

The target URL must point to a valid secure connect bundle archive in ZIP format. - * - * @param cloudConfigUrl the URL to fetch the Cloud configuration from; cannot be null. - * @throws IOException If the Cloud configuration cannot be read. - * @throws GeneralSecurityException If the Cloud SSL context cannot be created. - */ - @NonNull - public CloudConfig createCloudConfig(@NonNull URL cloudConfigUrl) - throws IOException, GeneralSecurityException { - Objects.requireNonNull(cloudConfigUrl, "cloudConfigUrl cannot be null"); - return createCloudConfig(cloudConfigUrl.openStream()); - } - - /** - * Creates a {@link CloudConfig} with information fetched from the specified {@link InputStream}. - * - *

The stream must contain a valid secure connect bundle archive in ZIP format. Note that the - * stream will be closed after a call to that method and cannot be used anymore. - * - * @param cloudConfig the stream to read the Cloud configuration from; cannot be null. - * @throws IOException If the Cloud configuration cannot be read. - * @throws GeneralSecurityException If the Cloud SSL context cannot be created. - */ - @NonNull - public CloudConfig createCloudConfig(@NonNull InputStream cloudConfig) - throws IOException, GeneralSecurityException { - Objects.requireNonNull(cloudConfig, "cloudConfig cannot be null"); - JsonNode configJson = null; - ByteArrayOutputStream keyStoreOutputStream = null; - ByteArrayOutputStream trustStoreOutputStream = null; - ObjectMapper mapper = new ObjectMapper().configure(JsonParser.Feature.AUTO_CLOSE_SOURCE, false); - try (ZipInputStream zipInputStream = new ZipInputStream(cloudConfig)) { - ZipEntry entry; - while ((entry = zipInputStream.getNextEntry()) != null) { - String fileName = entry.getName(); - switch (fileName) { - case "config.json": - configJson = mapper.readTree(zipInputStream); - break; - case "identity.jks": - keyStoreOutputStream = new ByteArrayOutputStream(); - ByteStreams.copy(zipInputStream, keyStoreOutputStream); - break; - case "trustStore.jks": - trustStoreOutputStream = new ByteArrayOutputStream(); - ByteStreams.copy(zipInputStream, trustStoreOutputStream); - break; - } - } - } - if (configJson == null) { - throw new IllegalStateException("Invalid bundle: missing file config.json"); - } - if (keyStoreOutputStream == null) { - throw new IllegalStateException("Invalid bundle: missing file identity.jks"); - } - if (trustStoreOutputStream == null) { - throw new IllegalStateException("Invalid bundle: missing file trustStore.jks"); - } - char[] keyStorePassword = getKeyStorePassword(configJson); - char[] trustStorePassword = getTrustStorePassword(configJson); - ByteArrayInputStream keyStoreInputStream = - new ByteArrayInputStream(keyStoreOutputStream.toByteArray()); - ByteArrayInputStream trustStoreInputStream = - new ByteArrayInputStream(trustStoreOutputStream.toByteArray()); - SSLContext sslContext = - createSslContext( - keyStoreInputStream, keyStorePassword, trustStoreInputStream, trustStorePassword); - URL metadataServiceUrl = getMetadataServiceUrl(configJson); - JsonNode proxyMetadataJson; - try (BufferedReader proxyMetadata = fetchProxyMetadata(metadataServiceUrl, sslContext)) { - proxyMetadataJson = mapper.readTree(proxyMetadata); - } - InetSocketAddress sniProxyAddress = getSniProxyAddress(proxyMetadataJson); - List endPoints = getEndPoints(proxyMetadataJson, sniProxyAddress); - String localDatacenter = getLocalDatacenter(proxyMetadataJson); - SniSslEngineFactory sslEngineFactory = new SniSslEngineFactory(sslContext); - validateIfBundleContainsUsernamePassword(configJson); - return new CloudConfig(sniProxyAddress, endPoints, localDatacenter, sslEngineFactory); - } - - @NonNull - protected char[] getKeyStorePassword(JsonNode configFile) { - if (configFile.has("keyStorePassword")) { - return configFile.get("keyStorePassword").asText().toCharArray(); - } else { - throw new IllegalStateException("Invalid config.json: missing field keyStorePassword"); - } - } - - @NonNull - protected char[] getTrustStorePassword(JsonNode configFile) { - if (configFile.has("trustStorePassword")) { - return configFile.get("trustStorePassword").asText().toCharArray(); - } else { - throw new IllegalStateException("Invalid config.json: missing field trustStorePassword"); - } - } - - @NonNull - protected URL getMetadataServiceUrl(JsonNode configFile) throws MalformedURLException { - if (configFile.has("host")) { - String metadataServiceHost = configFile.get("host").asText(); - if (configFile.has("port")) { - int metadataServicePort = configFile.get("port").asInt(); - return new URL("https", metadataServiceHost, metadataServicePort, "/metadata"); - } else { - throw new IllegalStateException("Invalid config.json: missing field port"); - } - } else { - throw new IllegalStateException("Invalid config.json: missing field host"); - } - } - - protected void validateIfBundleContainsUsernamePassword(JsonNode configFile) { - if (configFile.has("username") || configFile.has("password")) { - LOG.info( - "The bundle contains config.json with username and/or password. Providing it in the bundle is deprecated and ignored."); - } - } - - @NonNull - protected SSLContext createSslContext( - @NonNull ByteArrayInputStream keyStoreInputStream, - @NonNull char[] keyStorePassword, - @NonNull ByteArrayInputStream trustStoreInputStream, - @NonNull char[] trustStorePassword) - throws IOException, GeneralSecurityException { - KeyManagerFactory kmf = createKeyManagerFactory(keyStoreInputStream, keyStorePassword); - TrustManagerFactory tmf = createTrustManagerFactory(trustStoreInputStream, trustStorePassword); - SSLContext sslContext = SSLContext.getInstance("SSL"); - sslContext.init(kmf.getKeyManagers(), tmf.getTrustManagers(), new SecureRandom()); - return sslContext; - } - - @NonNull - protected KeyManagerFactory createKeyManagerFactory( - @NonNull InputStream keyStoreInputStream, @NonNull char[] keyStorePassword) - throws IOException, GeneralSecurityException { - KeyStore ks = KeyStore.getInstance("JKS"); - ks.load(keyStoreInputStream, keyStorePassword); - KeyManagerFactory kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); - kmf.init(ks, keyStorePassword); - Arrays.fill(keyStorePassword, (char) 0); - return kmf; - } - - @NonNull - protected TrustManagerFactory createTrustManagerFactory( - @NonNull InputStream trustStoreInputStream, @NonNull char[] trustStorePassword) - throws IOException, GeneralSecurityException { - KeyStore ts = KeyStore.getInstance("JKS"); - ts.load(trustStoreInputStream, trustStorePassword); - TrustManagerFactory tmf = - TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); - tmf.init(ts); - Arrays.fill(trustStorePassword, (char) 0); - return tmf; - } - - @NonNull - protected BufferedReader fetchProxyMetadata( - @NonNull URL metadataServiceUrl, @NonNull SSLContext sslContext) throws IOException { - try { - HttpsURLConnection connection = (HttpsURLConnection) metadataServiceUrl.openConnection(); - connection.setSSLSocketFactory(sslContext.getSocketFactory()); - connection.setRequestMethod("GET"); - return new BufferedReader( - new InputStreamReader(connection.getInputStream(), StandardCharsets.UTF_8)); - } catch (ConnectException e) { - throw new IllegalStateException( - "Unable to connect to cloud metadata service. Please make sure your cluster is not parked or terminated", - e); - } catch (UnknownHostException e) { - throw new IllegalStateException( - "Unable to resolve host for cloud metadata service. Please make sure your cluster is not terminated", - e); - } - } - - @NonNull - protected String getLocalDatacenter(@NonNull JsonNode proxyMetadata) { - JsonNode contactInfo = getContactInfo(proxyMetadata); - if (contactInfo.has("local_dc")) { - return contactInfo.get("local_dc").asText(); - } else { - throw new IllegalStateException("Invalid proxy metadata: missing field local_dc"); - } - } - - @NonNull - protected InetSocketAddress getSniProxyAddress(@NonNull JsonNode proxyMetadata) { - JsonNode contactInfo = getContactInfo(proxyMetadata); - if (contactInfo.has("sni_proxy_address")) { - HostAndPort sniProxyHostAndPort = - HostAndPort.fromString(contactInfo.get("sni_proxy_address").asText()); - if (!sniProxyHostAndPort.hasPort()) { - throw new IllegalStateException( - "Invalid proxy metadata: missing port from field sni_proxy_address"); - } - return InetSocketAddress.createUnresolved( - sniProxyHostAndPort.getHost(), sniProxyHostAndPort.getPort()); - } else { - throw new IllegalStateException("Invalid proxy metadata: missing field sni_proxy_address"); - } - } - - @NonNull - protected List getEndPoints( - @NonNull JsonNode proxyMetadata, @NonNull InetSocketAddress sniProxyAddress) { - JsonNode contactInfo = getContactInfo(proxyMetadata); - if (contactInfo.has("contact_points")) { - List endPoints = new ArrayList<>(); - JsonNode hostIdsJson = contactInfo.get("contact_points"); - for (int i = 0; i < hostIdsJson.size(); i++) { - endPoints.add(new SniEndPoint(sniProxyAddress, hostIdsJson.get(i).asText())); - } - return endPoints; - } else { - throw new IllegalStateException("Invalid proxy metadata: missing field contact_points"); - } - } - - @NonNull - protected JsonNode getContactInfo(@NonNull JsonNode proxyMetadata) { - if (proxyMetadata.has("contact_info")) { - return proxyMetadata.get("contact_info"); - } else { - throw new IllegalStateException("Invalid proxy metadata: missing field contact_info"); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/config/composite/CompositeDriverConfig.java b/core/src/main/java/com/datastax/oss/driver/internal/core/config/composite/CompositeDriverConfig.java deleted file mode 100644 index 9a74d00df4f..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/config/composite/CompositeDriverConfig.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config.composite; - -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.shaded.guava.common.collect.Sets; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Collections; -import java.util.Map; -import java.util.Objects; -import java.util.concurrent.ConcurrentHashMap; - -public class CompositeDriverConfig implements DriverConfig { - - private final DriverConfig primaryConfig; - private final DriverConfig fallbackConfig; - private final Map profiles = new ConcurrentHashMap<>(); - - public CompositeDriverConfig( - @NonNull DriverConfig primaryConfig, @NonNull DriverConfig fallbackConfig) { - this.primaryConfig = Objects.requireNonNull(primaryConfig); - this.fallbackConfig = Objects.requireNonNull(fallbackConfig); - } - - @NonNull - @Override - public DriverExecutionProfile getProfile(@NonNull String profileName) { - return profiles.compute( - profileName, - (k, v) -> - (v == null) - ? new CompositeDriverExecutionProfile(primaryConfig, fallbackConfig, profileName) - : v.refresh()); - } - - @NonNull - @Override - public Map getProfiles() { - // The map is updated lazily, if we want all the profiles we need to fetch them explicitly - for (String name : - Sets.union(primaryConfig.getProfiles().keySet(), fallbackConfig.getProfiles().keySet())) { - getProfile(name); - } - return Collections.unmodifiableMap(profiles); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/config/composite/CompositeDriverConfigLoader.java b/core/src/main/java/com/datastax/oss/driver/internal/core/config/composite/CompositeDriverConfigLoader.java deleted file mode 100644 index 23baf458c85..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/config/composite/CompositeDriverConfigLoader.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config.composite; - -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Objects; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; - -public class CompositeDriverConfigLoader implements DriverConfigLoader { - - private final DriverConfigLoader primaryConfigLoader; - private final DriverConfigLoader fallbackConfigLoader; - - public CompositeDriverConfigLoader( - @NonNull DriverConfigLoader primaryConfigLoader, - @NonNull DriverConfigLoader fallbackConfigLoader) { - this.primaryConfigLoader = Objects.requireNonNull(primaryConfigLoader); - this.fallbackConfigLoader = Objects.requireNonNull(fallbackConfigLoader); - } - - @NonNull - @Override - public DriverConfig getInitialConfig() { - DriverConfig primaryConfig = primaryConfigLoader.getInitialConfig(); - DriverConfig fallbackConfig = fallbackConfigLoader.getInitialConfig(); - return new CompositeDriverConfig(primaryConfig, fallbackConfig); - } - - @Override - public void onDriverInit(@NonNull DriverContext context) { - fallbackConfigLoader.onDriverInit(context); - primaryConfigLoader.onDriverInit(context); - } - - @NonNull - @Override - public CompletionStage reload() { - if (!primaryConfigLoader.supportsReloading() && !fallbackConfigLoader.supportsReloading()) { - return CompletableFutures.failedFuture( - new UnsupportedOperationException( - "Reloading is not supported (this is a composite config, " - + "and neither the primary nor the fallback are reloadable)")); - } else if (!primaryConfigLoader.supportsReloading()) { - return fallbackConfigLoader.reload(); - } else if (!fallbackConfigLoader.supportsReloading()) { - return primaryConfigLoader.reload(); - } else { - CompletionStage primaryFuture = primaryConfigLoader.reload(); - CompletionStage fallbackFuture = fallbackConfigLoader.reload(); - CompletableFuture compositeFuture = new CompletableFuture<>(); - primaryFuture.whenComplete( - (primaryChanged, primaryError) -> - fallbackFuture.whenComplete( - (fallbackChanged, fallbackError) -> { - if (primaryError == null && fallbackError == null) { - compositeFuture.complete(primaryChanged || fallbackChanged); - } else if (fallbackError == null) { - compositeFuture.completeExceptionally(primaryError); - } else if (primaryError == null) { - compositeFuture.completeExceptionally(fallbackError); - } else { - primaryError.addSuppressed(fallbackError); - compositeFuture.completeExceptionally(primaryError); - } - })); - return compositeFuture; - } - } - - @Override - public boolean supportsReloading() { - return primaryConfigLoader.supportsReloading() || fallbackConfigLoader.supportsReloading(); - } - - @Override - public void close() { - primaryConfigLoader.close(); - fallbackConfigLoader.close(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/config/composite/CompositeDriverExecutionProfile.java b/core/src/main/java/com/datastax/oss/driver/internal/core/config/composite/CompositeDriverExecutionProfile.java deleted file mode 100644 index 147d9e0bdb4..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/config/composite/CompositeDriverExecutionProfile.java +++ /dev/null @@ -1,222 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config.composite; - -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.config.DriverOption; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSortedSet; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.time.Duration; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.SortedSet; -import java.util.TreeSet; -import java.util.function.BiFunction; - -public class CompositeDriverExecutionProfile implements DriverExecutionProfile { - - private final DriverConfig primaryConfig; - private final DriverConfig fallbackConfig; - private final String profileName; - - @Nullable private volatile DriverExecutionProfile primaryProfile; - @Nullable private volatile DriverExecutionProfile fallbackProfile; - - public CompositeDriverExecutionProfile( - @NonNull DriverConfig primaryConfig, - @NonNull DriverConfig fallbackConfig, - @NonNull String profileName) { - this.primaryConfig = Objects.requireNonNull(primaryConfig); - this.fallbackConfig = Objects.requireNonNull(fallbackConfig); - this.profileName = Objects.requireNonNull(profileName); - refreshInternal(); - } - - /** - * Fetches the underlying profiles again from the two backing configs. This is because some config - * implementations support adding/removing profiles at runtime. - * - *

For efficiency reasons this is only done when the user fetches the profile again from the - * main config, not every time an option is fetched from the profile. - */ - public CompositeDriverExecutionProfile refresh() { - return refreshInternal(); - } - - // This method only exists to avoid calling its public, overridable variant from the constructor - private CompositeDriverExecutionProfile refreshInternal() { - // There's no `hasProfile()` in the public API because it didn't make sense until now. So - // unfortunately we have to catch the exception. - try { - primaryProfile = primaryConfig.getProfile(profileName); - } catch (IllegalArgumentException e) { - primaryProfile = null; - } - try { - fallbackProfile = fallbackConfig.getProfile(profileName); - } catch (IllegalArgumentException e) { - fallbackProfile = null; - } - - Preconditions.checkArgument( - primaryProfile != null || fallbackProfile != null, - "Unknown profile '%s'. Check your configuration.", - profileName); - return this; - } - - @NonNull - @Override - public String getName() { - return profileName; - } - - @Override - public boolean isDefined(@NonNull DriverOption option) { - DriverExecutionProfile primaryProfile = this.primaryProfile; - if (primaryProfile != null && primaryProfile.isDefined(option)) { - return true; - } else { - DriverExecutionProfile fallbackProfile = this.fallbackProfile; - return fallbackProfile != null && fallbackProfile.isDefined(option); - } - } - - @Override - public boolean getBoolean(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getBoolean); - } - - @NonNull - @Override - public List getBooleanList(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getBooleanList); - } - - @Override - public int getInt(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getInt); - } - - @NonNull - @Override - public List getIntList(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getIntList); - } - - @Override - public long getLong(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getLong); - } - - @NonNull - @Override - public List getLongList(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getLongList); - } - - @Override - public double getDouble(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getDouble); - } - - @NonNull - @Override - public List getDoubleList(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getDoubleList); - } - - @NonNull - @Override - public String getString(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getString); - } - - @NonNull - @Override - public List getStringList(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getStringList); - } - - @NonNull - @Override - public Map getStringMap(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getStringMap); - } - - @Override - public long getBytes(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getBytes); - } - - @NonNull - @Override - public List getBytesList(DriverOption option) { - return get(option, DriverExecutionProfile::getBytesList); - } - - @NonNull - @Override - public Duration getDuration(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getDuration); - } - - @NonNull - @Override - public List getDurationList(@NonNull DriverOption option) { - return get(option, DriverExecutionProfile::getDurationList); - } - - private ValueT get( - @NonNull DriverOption option, - BiFunction getter) { - DriverExecutionProfile primaryProfile = this.primaryProfile; - if (primaryProfile != null && primaryProfile.isDefined(option)) { - return getter.apply(primaryProfile, option); - } else { - DriverExecutionProfile fallbackProfile = this.fallbackProfile; - if (fallbackProfile != null && fallbackProfile.isDefined(option)) { - return getter.apply(fallbackProfile, option); - } else { - throw new IllegalArgumentException("Unknown option: " + option); - } - } - } - - @NonNull - @Override - public SortedSet> entrySet() { - DriverExecutionProfile primaryProfile = this.primaryProfile; - DriverExecutionProfile fallbackProfile = this.fallbackProfile; - if (primaryProfile != null && fallbackProfile != null) { - SortedSet> result = new TreeSet<>(Map.Entry.comparingByKey()); - result.addAll(fallbackProfile.entrySet()); - result.addAll(primaryProfile.entrySet()); - return ImmutableSortedSet.copyOf(Map.Entry.comparingByKey(), result); - } else if (primaryProfile != null) { - return primaryProfile.entrySet(); - } else { - assert fallbackProfile != null; - return fallbackProfile.entrySet(); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/config/map/MapBasedDriverConfig.java b/core/src/main/java/com/datastax/oss/driver/internal/core/config/map/MapBasedDriverConfig.java deleted file mode 100644 index 74adbf120ca..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/config/map/MapBasedDriverConfig.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config.map; - -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.config.DriverOption; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Collections; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; - -/** @see MapBasedDriverConfigLoader */ -public class MapBasedDriverConfig implements DriverConfig { - - private final Map> optionsMap; - private final Map profiles = new ConcurrentHashMap<>(); - - public MapBasedDriverConfig(Map> optionsMap) { - this.optionsMap = optionsMap; - if (!optionsMap.containsKey(DriverExecutionProfile.DEFAULT_NAME)) { - throw new IllegalArgumentException( - "The options map must contain a profile named " + DriverExecutionProfile.DEFAULT_NAME); - } - createMissingProfiles(); - } - - @NonNull - @Override - public DriverExecutionProfile getProfile(@NonNull String profileName) { - return profiles.computeIfAbsent(profileName, this::newProfile); - } - - @NonNull - @Override - public Map getProfiles() { - // Refresh in case profiles were added to the backing map - createMissingProfiles(); - return Collections.unmodifiableMap(profiles); - } - - private void createMissingProfiles() { - for (Map.Entry> entry : optionsMap.entrySet()) { - String profileName = entry.getKey(); - if (!profiles.containsKey(profileName)) { - profiles.put(profileName, newProfile(profileName)); - } - } - } - - private MapBasedDriverExecutionProfile newProfile(String profileName) { - return new MapBasedDriverExecutionProfile(optionsMap, profileName); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/config/map/MapBasedDriverConfigLoader.java b/core/src/main/java/com/datastax/oss/driver/internal/core/config/map/MapBasedDriverConfigLoader.java deleted file mode 100644 index 14f959e5dc0..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/config/map/MapBasedDriverConfigLoader.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config.map; - -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.config.DriverOption; -import com.datastax.oss.driver.api.core.config.OptionsMap; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.internal.core.config.ConfigChangeEvent; -import com.datastax.oss.driver.internal.core.context.EventBus; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.function.Consumer; - -public class MapBasedDriverConfigLoader implements DriverConfigLoader, Consumer { - - @NonNull private final OptionsMap source; - @NonNull private final Map> rawMap; - private volatile EventBus eventBus; - - public MapBasedDriverConfigLoader( - @NonNull OptionsMap source, @NonNull Map> rawMap) { - this.source = source; - this.rawMap = rawMap; - } - - @NonNull - @Override - public DriverConfig getInitialConfig() { - return new MapBasedDriverConfig(rawMap); - } - - @Override - public void onDriverInit(@NonNull DriverContext context) { - eventBus = ((InternalDriverContext) context).getEventBus(); - source.addChangeListener(this); - } - - @Override - public void accept(OptionsMap map) { - assert eventBus != null; // listener is registered after setting this field - eventBus.fire(ConfigChangeEvent.INSTANCE); - } - - @NonNull - @Override - public CompletionStage reload() { - return CompletableFuture.completedFuture(true); - } - - @Override - public boolean supportsReloading() { - return true; - } - - @Override - public void close() { - source.removeChangeListener(this); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/config/map/MapBasedDriverExecutionProfile.java b/core/src/main/java/com/datastax/oss/driver/internal/core/config/map/MapBasedDriverExecutionProfile.java deleted file mode 100644 index 4234befd94b..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/config/map/MapBasedDriverExecutionProfile.java +++ /dev/null @@ -1,187 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config.map; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.config.DriverOption; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSortedSet; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.time.Duration; -import java.util.AbstractMap; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.SortedSet; - -/** @see MapBasedDriverConfigLoader */ -public class MapBasedDriverExecutionProfile implements DriverExecutionProfile { - - private final String profileName; - // The backing map for the current profile - private final Map profile; - // The backing map for the default profile (if the current one is not the default) - private final Map defaultProfile; - - public MapBasedDriverExecutionProfile( - Map> optionsMap, String profileName) { - this( - profileName, - optionsMap.get(profileName), - profileName.equals(DriverExecutionProfile.DEFAULT_NAME) - ? Collections.emptyMap() - : optionsMap.get(DriverExecutionProfile.DEFAULT_NAME)); - Preconditions.checkArgument( - optionsMap.containsKey(profileName), - "Unknown profile '%s'. Check your configuration.", - profileName); - } - - public MapBasedDriverExecutionProfile( - String profileName, - Map profile, - Map defaultProfile) { - this.profileName = profileName; - this.profile = profile; - this.defaultProfile = defaultProfile; - } - - @NonNull - @Override - public String getName() { - return profileName; - } - - @Override - public boolean isDefined(@NonNull DriverOption option) { - return profile.containsKey(option) || defaultProfile.containsKey(option); - } - - // Driver options don't encode the type, everything relies on the user putting the right types in - // the backing map, so no point in trying to type-check. - @SuppressWarnings({"unchecked", "TypeParameterUnusedInFormals"}) - @NonNull - private T get(@NonNull DriverOption option) { - Object value = profile.getOrDefault(option, defaultProfile.get(option)); - if (value == null) { - throw new IllegalArgumentException("Missing configuration option " + option.getPath()); - } - return (T) value; - } - - @Override - public boolean getBoolean(@NonNull DriverOption option) { - return get(option); - } - - @NonNull - @Override - public List getBooleanList(@NonNull DriverOption option) { - return get(option); - } - - @Override - public int getInt(@NonNull DriverOption option) { - return get(option); - } - - @NonNull - @Override - public List getIntList(@NonNull DriverOption option) { - return get(option); - } - - @Override - public long getLong(@NonNull DriverOption option) { - return get(option); - } - - @NonNull - @Override - public List getLongList(@NonNull DriverOption option) { - return get(option); - } - - @Override - public double getDouble(@NonNull DriverOption option) { - return get(option); - } - - @NonNull - @Override - public List getDoubleList(@NonNull DriverOption option) { - return get(option); - } - - @NonNull - @Override - public String getString(@NonNull DriverOption option) { - return get(option); - } - - @NonNull - @Override - public List getStringList(@NonNull DriverOption option) { - return get(option); - } - - @NonNull - @Override - public Map getStringMap(@NonNull DriverOption option) { - return get(option); - } - - @Override - public long getBytes(@NonNull DriverOption option) { - return get(option); - } - - @NonNull - @Override - public List getBytesList(DriverOption option) { - return get(option); - } - - @NonNull - @Override - public Duration getDuration(@NonNull DriverOption option) { - return get(option); - } - - @NonNull - @Override - public List getDurationList(@NonNull DriverOption option) { - return get(option); - } - - @NonNull - @Override - public SortedSet> entrySet() { - ImmutableSortedSet.Builder> builder = - ImmutableSortedSet.orderedBy(Map.Entry.comparingByKey()); - for (Map backingMap : - // builder.add() ignores duplicates, so process higher precedence backing maps first - ImmutableList.of(profile, defaultProfile)) { - for (Map.Entry entry : backingMap.entrySet()) { - builder.add(new AbstractMap.SimpleEntry<>(entry.getKey().getPath(), entry.getValue())); - } - } - return builder.build(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/DefaultDriverConfigLoader.java b/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/DefaultDriverConfigLoader.java deleted file mode 100644 index f1bfbea8249..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/DefaultDriverConfigLoader.java +++ /dev/null @@ -1,374 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config.typesafe; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.session.SessionBuilder; -import com.datastax.oss.driver.internal.core.config.ConfigChangeEvent; -import com.datastax.oss.driver.internal.core.context.EventBus; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.util.Loggers; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.driver.internal.core.util.concurrent.RunOrSchedule; -import com.typesafe.config.Config; -import com.typesafe.config.ConfigFactory; -import com.typesafe.config.ConfigParseOptions; -import edu.umd.cs.findbugs.annotations.NonNull; -import io.netty.util.concurrent.EventExecutor; -import io.netty.util.concurrent.ScheduledFuture; -import java.io.File; -import java.net.URL; -import java.time.Duration; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.RejectedExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.function.Supplier; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * The default loader; it is based on Typesafe Config and optionally reloads at a configurable - * interval. - */ -@ThreadSafe -public class DefaultDriverConfigLoader implements DriverConfigLoader { - - private static final Logger LOG = LoggerFactory.getLogger(DefaultDriverConfigLoader.class); - - public static final String DEFAULT_ROOT_PATH = "datastax-java-driver"; - - public static final Supplier DEFAULT_CONFIG_SUPPLIER = - () -> { - ConfigFactory.invalidateCaches(); - // The thread's context class loader will be used for application classpath resources, - // while the driver class loader will be used for reference classpath resources. - return ConfigFactory.defaultOverrides() - .withFallback(ConfigFactory.defaultApplication()) - .withFallback(ConfigFactory.defaultReference(CqlSession.class.getClassLoader())) - .resolve() - .getConfig(DEFAULT_ROOT_PATH); - }; - - @NonNull - public static DefaultDriverConfigLoader fromClasspath( - @NonNull String resourceBaseName, @NonNull ClassLoader appClassLoader) { - return new DefaultDriverConfigLoader( - () -> { - ConfigFactory.invalidateCaches(); - Config config = - ConfigFactory.defaultOverrides() - .withFallback( - ConfigFactory.parseResourcesAnySyntax( - resourceBaseName, - ConfigParseOptions.defaults().setClassLoader(appClassLoader))) - .withFallback(ConfigFactory.defaultReference(CqlSession.class.getClassLoader())) - .resolve(); - return config.getConfig(DEFAULT_ROOT_PATH); - }); - } - - @NonNull - public static DriverConfigLoader fromFile(@NonNull File file) { - return new DefaultDriverConfigLoader( - () -> { - ConfigFactory.invalidateCaches(); - Config config = - ConfigFactory.defaultOverrides() - .withFallback(ConfigFactory.parseFileAnySyntax(file)) - .withFallback(ConfigFactory.defaultReference(CqlSession.class.getClassLoader())) - .resolve(); - return config.getConfig(DEFAULT_ROOT_PATH); - }); - } - - @NonNull - public static DriverConfigLoader fromUrl(@NonNull URL url) { - return new DefaultDriverConfigLoader( - () -> { - ConfigFactory.invalidateCaches(); - Config config = - ConfigFactory.defaultOverrides() - .withFallback(ConfigFactory.parseURL(url)) - .withFallback(ConfigFactory.defaultReference(CqlSession.class.getClassLoader())) - .resolve(); - return config.getConfig(DEFAULT_ROOT_PATH); - }); - } - - @NonNull - public static DefaultDriverConfigLoader fromString(@NonNull String contents) { - return new DefaultDriverConfigLoader( - () -> { - ConfigFactory.invalidateCaches(); - Config config = - ConfigFactory.defaultOverrides() - .withFallback(ConfigFactory.parseString(contents)) - .withFallback(ConfigFactory.defaultReference(CqlSession.class.getClassLoader())) - .resolve(); - return config.getConfig(DEFAULT_ROOT_PATH); - }, - false); - } - - private final Supplier configSupplier; - private final TypesafeDriverConfig driverConfig; - private final boolean supportsReloading; - - private volatile SingleThreaded singleThreaded; - - /** - * Builds a new instance with the default Typesafe config loading rules (documented in {@link - * SessionBuilder#withConfigLoader(DriverConfigLoader)}) and the core driver options. This - * constructor enables config reloading (that is, {@link #supportsReloading} will return true). - * - *

Application-specific classpath resources will be located using the {@linkplain - * Thread#getContextClassLoader() the current thread's context class loader}. This might not be - * suitable for OSGi deployments, which should use {@link #DefaultDriverConfigLoader(ClassLoader)} - * instead. - */ - public DefaultDriverConfigLoader() { - this(DEFAULT_CONFIG_SUPPLIER); - } - - /** - * Builds a new instance with the default Typesafe config loading rules (documented in {@link - * SessionBuilder#withConfigLoader(DriverConfigLoader)}) and the core driver options, except that - * application-specific classpath resources will be located using the provided {@link ClassLoader} - * instead of {@linkplain Thread#getContextClassLoader() the current thread's context class - * loader}. This constructor enables config reloading (that is, {@link #supportsReloading} will - * return true). - */ - public DefaultDriverConfigLoader(@NonNull ClassLoader appClassLoader) { - this( - () -> { - ConfigFactory.invalidateCaches(); - return ConfigFactory.defaultOverrides() - .withFallback(ConfigFactory.defaultApplication(appClassLoader)) - .withFallback(ConfigFactory.defaultReference(CqlSession.class.getClassLoader())) - .resolve() - .getConfig(DEFAULT_ROOT_PATH); - }); - } - - /** - * Builds an instance with custom arguments, if you want to load the configuration from somewhere - * else. This constructor enables config reloading (that is, {@link #supportsReloading} will - * return true). - * - * @param configSupplier A supplier for the Typesafe {@link Config}; it will be invoked once when - * this object is instantiated, and at each reload attempt, if reloading is enabled. - */ - public DefaultDriverConfigLoader(@NonNull Supplier configSupplier) { - this(configSupplier, true); - } - - /** - * Builds an instance with custom arguments, if you want to load the configuration from somewhere - * else and/or modify config reload behavior. - * - * @param configSupplier A supplier for the Typesafe {@link Config}; it will be invoked once when - * this object is instantiated, and at each reload attempt, if reloading is enabled. - * @param supportsReloading Whether config reloading should be enabled or not. - */ - public DefaultDriverConfigLoader( - @NonNull Supplier configSupplier, boolean supportsReloading) { - this.configSupplier = configSupplier; - this.driverConfig = new TypesafeDriverConfig(configSupplier.get()); - this.supportsReloading = supportsReloading; - } - - @NonNull - @Override - public DriverConfig getInitialConfig() { - return driverConfig; - } - - @Override - public void onDriverInit(@NonNull DriverContext driverContext) { - this.singleThreaded = new SingleThreaded((InternalDriverContext) driverContext); - } - - @NonNull - @Override - public final CompletionStage reload() { - if (supportsReloading) { - CompletableFuture result = new CompletableFuture<>(); - RunOrSchedule.on(singleThreaded.adminExecutor, () -> singleThreaded.reload(result)); - return result; - } else { - return CompletableFutures.failedFuture( - new UnsupportedOperationException( - "This instance of DefaultDriverConfigLoader does not support reloading")); - } - } - - @Override - public final boolean supportsReloading() { - return supportsReloading; - } - - /** For internal use only, this leaks a Typesafe config type. */ - @NonNull - public Supplier getConfigSupplier() { - return configSupplier; - } - - @Override - public void close() { - SingleThreaded singleThreaded = this.singleThreaded; - if (singleThreaded != null && !singleThreaded.adminExecutor.terminationFuture().isDone()) { - try { - RunOrSchedule.on(singleThreaded.adminExecutor, singleThreaded::close); - } catch (RejectedExecutionException e) { - // Checking the future is racy, there is still a tiny window that could get us here. - // We can safely ignore this error because, if the execution is rejected, the periodic - // reload task, if any, has been already cancelled. - } - } - } - - /** - * Constructs a builder that may be used to provide additional configuration beyond those defined - * in your configuration files programmatically. For example: - * - *

{@code
-   * CqlSession session = CqlSession.builder()
-   *   .withConfigLoader(DefaultDriverConfigLoader.builder()
-   *     .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofMillis(500))
-   *     .build())
-   *   .build();
-   * }
- * - *

In the general case, use of this is not recommended, but it may be useful in situations - * where configuration must be defined at runtime or is derived from some other configuration - * source. - * - * @deprecated this feature is now available in the public API. Use {@link - * DriverConfigLoader#programmaticBuilder()} instead. - */ - @Deprecated - @NonNull - public static DefaultDriverConfigLoaderBuilder builder() { - return new DefaultDriverConfigLoaderBuilder(); - } - - private class SingleThreaded { - private final String logPrefix; - private final EventExecutor adminExecutor; - private final EventBus eventBus; - private final DriverExecutionProfile config; - - private Duration reloadInterval; - private ScheduledFuture periodicTaskHandle; - private boolean closeWasCalled; - - private SingleThreaded(InternalDriverContext context) { - this.logPrefix = context.getSessionName(); - this.adminExecutor = context.getNettyOptions().adminEventExecutorGroup().next(); - this.eventBus = context.getEventBus(); - this.config = context.getConfig().getDefaultProfile(); - this.reloadInterval = - context - .getConfig() - .getDefaultProfile() - .getDuration(DefaultDriverOption.CONFIG_RELOAD_INTERVAL); - - RunOrSchedule.on(adminExecutor, this::schedulePeriodicReload); - } - - private void schedulePeriodicReload() { - assert adminExecutor.inEventLoop(); - // Cancel any previously running task - if (periodicTaskHandle != null) { - periodicTaskHandle.cancel(false); - } - if (reloadInterval.isZero()) { - LOG.debug("[{}] Reload interval is 0, disabling periodic reloading", logPrefix); - } else { - LOG.debug("[{}] Scheduling periodic reloading with interval {}", logPrefix, reloadInterval); - periodicTaskHandle = - adminExecutor.scheduleAtFixedRate( - this::reloadInBackground, - reloadInterval.toNanos(), - reloadInterval.toNanos(), - TimeUnit.NANOSECONDS); - } - } - - /** - * @param reloadedFuture a future to complete when the reload is complete (might be null if the - * caller is not interested in being notified) - */ - private void reload(CompletableFuture reloadedFuture) { - assert adminExecutor.inEventLoop(); - if (closeWasCalled) { - if (reloadedFuture != null) { - reloadedFuture.completeExceptionally(new IllegalStateException("session is closing")); - } - return; - } - try { - boolean changed = driverConfig.reload(configSupplier.get()); - if (changed) { - LOG.info("[{}] Detected a configuration change", logPrefix); - eventBus.fire(ConfigChangeEvent.INSTANCE); - Duration newReloadInterval = - config.getDuration(DefaultDriverOption.CONFIG_RELOAD_INTERVAL); - if (!newReloadInterval.equals(reloadInterval)) { - reloadInterval = newReloadInterval; - schedulePeriodicReload(); - } - } else { - LOG.debug("[{}] Reloaded configuration but it hasn't changed", logPrefix); - } - if (reloadedFuture != null) { - reloadedFuture.complete(changed); - } - } catch (Error | RuntimeException e) { - if (reloadedFuture != null) { - reloadedFuture.completeExceptionally(e); - } else { - Loggers.warnWithException( - LOG, "[{}] Unexpected exception during scheduled reload", logPrefix, e); - } - } - } - - private void reloadInBackground() { - reload(null); - } - - private void close() { - assert adminExecutor.inEventLoop(); - if (closeWasCalled) { - return; - } - closeWasCalled = true; - if (periodicTaskHandle != null) { - periodicTaskHandle.cancel(false); - } - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/DefaultDriverConfigLoaderBuilder.java b/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/DefaultDriverConfigLoaderBuilder.java deleted file mode 100644 index 3096fd85ffb..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/DefaultDriverConfigLoaderBuilder.java +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config.typesafe; - -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableMap; -import com.typesafe.config.Config; -import com.typesafe.config.ConfigFactory; -import com.typesafe.config.ConfigValueFactory; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Map; -import net.jcip.annotations.NotThreadSafe; - -/** - * @deprecated this feature is now available in the public API. Use {@link - * DriverConfigLoader#programmaticBuilder()} instead. - */ -@NotThreadSafe -@Deprecated -public class DefaultDriverConfigLoaderBuilder - implements com.datastax.oss.driver.internal.core.config.DriverOptionConfigBuilder< - DefaultDriverConfigLoaderBuilder> { - - private NullAllowingImmutableMap.Builder values = - NullAllowingImmutableMap.builder(); - - /** - * @return a new {@link ProfileBuilder} to provide programmatic configuration at a profile level. - * @see #withProfile(String, Profile) - */ - @NonNull - public static ProfileBuilder profileBuilder() { - return new ProfileBuilder(); - } - - /** Adds configuration for a profile constructed using {@link #profileBuilder()} by name. */ - @NonNull - public DefaultDriverConfigLoaderBuilder withProfile( - @NonNull String profileName, @NonNull Profile profile) { - String prefix = "profiles." + profileName + "."; - for (Map.Entry entry : profile.values.entrySet()) { - this.with(prefix + entry.getKey(), entry.getValue()); - } - return this; - } - - /** - * @return constructed {@link DriverConfigLoader} using the configuration passed into this - * builder. - */ - @NonNull - public DriverConfigLoader build() { - // fallback on the default config supplier (config file) - return new DefaultDriverConfigLoader( - () -> buildConfig().withFallback(DefaultDriverConfigLoader.DEFAULT_CONFIG_SUPPLIER.get())); - } - - /** @return A {@link Config} containing only the options provided */ - protected Config buildConfig() { - Config config = ConfigFactory.empty(); - for (Map.Entry entry : values.build().entrySet()) { - config = config.withValue(entry.getKey(), ConfigValueFactory.fromAnyRef(entry.getValue())); - } - return config; - } - - @NonNull - @Override - public DefaultDriverConfigLoaderBuilder with(@NonNull String path, @Nullable Object value) { - values.put(path, value); - return this; - } - - /** A builder for specifying options at a profile level using {@code withXXX} methods. */ - @Deprecated - public static final class ProfileBuilder - implements com.datastax.oss.driver.internal.core.config.DriverOptionConfigBuilder< - ProfileBuilder> { - - final NullAllowingImmutableMap.Builder values = - NullAllowingImmutableMap.builder(); - - private ProfileBuilder() {} - - @NonNull - @Override - public ProfileBuilder with(@NonNull String path, @Nullable Object value) { - values.put(path, value); - return this; - } - - @NonNull - public Profile build() { - return new Profile(values.build()); - } - } - - /** - * A single-purpose holder of profile options as a map to be consumed by {@link - * DefaultDriverConfigLoaderBuilder}. - */ - public static final class Profile { - final Map values; - - private Profile(Map values) { - this.values = values; - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/DefaultProgrammaticDriverConfigLoaderBuilder.java b/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/DefaultProgrammaticDriverConfigLoaderBuilder.java deleted file mode 100644 index 2a7f6379362..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/DefaultProgrammaticDriverConfigLoaderBuilder.java +++ /dev/null @@ -1,269 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config.typesafe; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.config.DriverOption; -import com.datastax.oss.driver.api.core.config.ProgrammaticDriverConfigLoaderBuilder; -import com.typesafe.config.Config; -import com.typesafe.config.ConfigFactory; -import com.typesafe.config.ConfigValueFactory; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.time.Duration; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.function.Supplier; -import net.jcip.annotations.NotThreadSafe; - -@NotThreadSafe -public class DefaultProgrammaticDriverConfigLoaderBuilder - implements ProgrammaticDriverConfigLoaderBuilder { - - public static final Supplier DEFAULT_FALLBACK_SUPPLIER = - () -> - ConfigFactory.defaultApplication() - // Do not remove root path here, it must be done after merging configs - .withFallback(ConfigFactory.defaultReference(CqlSession.class.getClassLoader())); - - private final Map values = new HashMap<>(); - - private final Supplier fallbackSupplier; - private final String rootPath; - - private String currentProfileName = DriverExecutionProfile.DEFAULT_NAME; - - /** - * Creates an instance of {@link DefaultProgrammaticDriverConfigLoaderBuilder} with default - * settings. - * - *

Fallback configuration for options that haven't been specified programmatically will be - * obtained from standard classpath resources. Application-specific classpath resources will be - * located using the {@linkplain Thread#getContextClassLoader() the current thread's context class - * loader}. This might not be suitable for OSGi deployments, which should use {@link - * #DefaultProgrammaticDriverConfigLoaderBuilder(ClassLoader)} instead. - */ - public DefaultProgrammaticDriverConfigLoaderBuilder() { - this(DEFAULT_FALLBACK_SUPPLIER, DefaultDriverConfigLoader.DEFAULT_ROOT_PATH); - } - - /** - * Creates an instance of {@link DefaultProgrammaticDriverConfigLoaderBuilder} with default - * settings but a custom class loader. - * - *

Fallback configuration for options that haven't been specified programmatically will be - * obtained from standard classpath resources. Application-specific classpath resources will be - * located using the provided {@link ClassLoader} instead of {@linkplain - * Thread#getContextClassLoader() the current thread's context class loader}. - */ - public DefaultProgrammaticDriverConfigLoaderBuilder(@NonNull ClassLoader appClassLoader) { - this( - () -> - ConfigFactory.defaultApplication(appClassLoader) - .withFallback(ConfigFactory.defaultReference(CqlSession.class.getClassLoader())), - DefaultDriverConfigLoader.DEFAULT_ROOT_PATH); - } - - /** - * Creates an instance of {@link DefaultProgrammaticDriverConfigLoaderBuilder} using a custom - * fallback config supplier. - * - * @param fallbackSupplier the supplier that will provide fallback configuration for options that - * haven't been specified programmatically. - * @param rootPath the root path used in non-programmatic sources (fallback reference.conf and - * system properties). In most cases it should be {@link - * DefaultDriverConfigLoader#DEFAULT_ROOT_PATH}. Cannot be null but can be empty. - */ - public DefaultProgrammaticDriverConfigLoaderBuilder( - @NonNull Supplier fallbackSupplier, @NonNull String rootPath) { - this.fallbackSupplier = fallbackSupplier; - this.rootPath = rootPath; - } - - private ProgrammaticDriverConfigLoaderBuilder with( - @NonNull DriverOption option, @Nullable Object value) { - return with(option.getPath(), value); - } - - private ProgrammaticDriverConfigLoaderBuilder with(@NonNull String path, @Nullable Object value) { - if (!DriverExecutionProfile.DEFAULT_NAME.equals(currentProfileName)) { - path = "profiles." + currentProfileName + "." + path; - } - if (!rootPath.isEmpty()) { - path = rootPath + "." + path; - } - values.put(path, value); - return this; - } - - @NonNull - @Override - public ProgrammaticDriverConfigLoaderBuilder startProfile(@NonNull String profileName) { - currentProfileName = Objects.requireNonNull(profileName); - return this; - } - - @NonNull - @Override - public ProgrammaticDriverConfigLoaderBuilder endProfile() { - currentProfileName = DriverExecutionProfile.DEFAULT_NAME; - return this; - } - - @NonNull - @Override - public ProgrammaticDriverConfigLoaderBuilder withBoolean( - @NonNull DriverOption option, boolean value) { - return with(option, value); - } - - @NonNull - @Override - public ProgrammaticDriverConfigLoaderBuilder withBooleanList( - @NonNull DriverOption option, @NonNull List value) { - return with(option, value); - } - - @NonNull - @Override - public ProgrammaticDriverConfigLoaderBuilder withInt(@NonNull DriverOption option, int value) { - return with(option, value); - } - - @NonNull - @Override - public ProgrammaticDriverConfigLoaderBuilder withIntList( - @NonNull DriverOption option, @NonNull List value) { - return with(option, value); - } - - @NonNull - @Override - public ProgrammaticDriverConfigLoaderBuilder withLong(@NonNull DriverOption option, long value) { - return with(option, value); - } - - @NonNull - @Override - public ProgrammaticDriverConfigLoaderBuilder withLongList( - @NonNull DriverOption option, @NonNull List value) { - return with(option, value); - } - - @NonNull - @Override - public ProgrammaticDriverConfigLoaderBuilder withDouble( - @NonNull DriverOption option, double value) { - return with(option, value); - } - - @NonNull - @Override - public ProgrammaticDriverConfigLoaderBuilder withDoubleList( - @NonNull DriverOption option, @NonNull List value) { - return with(option, value); - } - - @NonNull - @Override - public ProgrammaticDriverConfigLoaderBuilder withString( - @NonNull DriverOption option, @NonNull String value) { - return with(option, value); - } - - @NonNull - @Override - public ProgrammaticDriverConfigLoaderBuilder withStringList( - @NonNull DriverOption option, @NonNull List value) { - return with(option, value); - } - - @NonNull - @Override - public ProgrammaticDriverConfigLoaderBuilder withStringMap( - @NonNull DriverOption option, @NonNull Map value) { - for (String key : value.keySet()) { - this.with(option.getPath() + "." + key, value.get(key)); - } - return this; - } - - @NonNull - @Override - public ProgrammaticDriverConfigLoaderBuilder withBytes(@NonNull DriverOption option, long value) { - return with(option, value); - } - - @NonNull - @Override - public ProgrammaticDriverConfigLoaderBuilder withBytesList( - @NonNull DriverOption option, @NonNull List value) { - return with(option, value); - } - - @NonNull - @Override - public ProgrammaticDriverConfigLoaderBuilder withDuration( - @NonNull DriverOption option, @NonNull Duration value) { - return with(option, value); - } - - @NonNull - @Override - public ProgrammaticDriverConfigLoaderBuilder withDurationList( - @NonNull DriverOption option, @NonNull List value) { - return with(option, value); - } - - @NonNull - @Override - public ProgrammaticDriverConfigLoaderBuilder without(@NonNull DriverOption option) { - return with(option, null); - } - - @NonNull - @Override - public DriverConfigLoader build() { - return new DefaultDriverConfigLoader( - () -> { - ConfigFactory.invalidateCaches(); - Config programmaticConfig = buildConfig(); - Config config = - ConfigFactory.defaultOverrides() - .withFallback(programmaticConfig) - .withFallback(fallbackSupplier.get()) - .resolve(); - // Only remove rootPath after the merge between system properties - // and fallback configuration, since both are supposed to - // contain the same rootPath prefix. - return rootPath.isEmpty() ? config : config.getConfig(rootPath); - }); - } - - private Config buildConfig() { - Config config = ConfigFactory.empty(); - for (Map.Entry entry : values.entrySet()) { - config = config.withValue(entry.getKey(), ConfigValueFactory.fromAnyRef(entry.getValue())); - } - return config; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/TypesafeDriverConfig.java b/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/TypesafeDriverConfig.java deleted file mode 100644 index e1d8c779f2c..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/TypesafeDriverConfig.java +++ /dev/null @@ -1,213 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config.typesafe; - -import static com.typesafe.config.ConfigValueType.OBJECT; - -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.config.DriverOption; -import com.datastax.oss.driver.internal.core.util.Loggers; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.typesafe.config.Config; -import com.typesafe.config.ConfigObject; -import com.typesafe.config.ConfigOrigin; -import com.typesafe.config.ConfigOriginFactory; -import com.typesafe.config.ConfigValue; -import com.typesafe.config.ConfigValueFactory; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.net.URL; -import java.util.Map; -import java.util.Optional; -import java.util.concurrent.ConcurrentHashMap; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -public class TypesafeDriverConfig implements DriverConfig { - - private static final Logger LOG = LoggerFactory.getLogger(TypesafeDriverConfig.class); - private static final ConfigOrigin DEFAULT_OVERRIDES_ORIGIN = - ConfigOriginFactory.newSimple("default was overridden programmatically"); - - private final ImmutableMap profiles; - // Only used to detect if reload saw any change - private volatile Config lastLoadedConfig; - - private final Map defaultOverrides = new ConcurrentHashMap<>(); - - private final TypesafeDriverExecutionProfile.Base defaultProfile; - - public TypesafeDriverConfig(Config config) { - this.lastLoadedConfig = config; - Map profileConfigs = extractProfiles(config); - - ImmutableMap.Builder builder = - ImmutableMap.builder(); - for (Map.Entry entry : profileConfigs.entrySet()) { - builder.put( - entry.getKey(), - new TypesafeDriverExecutionProfile.Base(entry.getKey(), entry.getValue())); - } - this.profiles = builder.build(); - this.defaultProfile = profiles.get(DriverExecutionProfile.DEFAULT_NAME); - } - - /** @return whether the configuration changed */ - public boolean reload(Config config) { - config = applyDefaultOverrides(config); - if (config.equals(lastLoadedConfig)) { - return false; - } else { - lastLoadedConfig = config; - try { - Map profileConfigs = extractProfiles(config); - for (Map.Entry entry : profileConfigs.entrySet()) { - String profileName = entry.getKey(); - TypesafeDriverExecutionProfile.Base profile = this.profiles.get(profileName); - if (profile == null) { - LOG.warn( - "Unknown profile '{}' while reloading configuration. " - + "Adding profiles at runtime is not supported.", - profileName); - } else { - profile.refresh(entry.getValue()); - } - } - return true; - } catch (Throwable t) { - Loggers.warnWithException(LOG, "Error reloading configuration, keeping previous one", t); - return false; - } - } - } - - /* - * Processes the raw configuration to extract profiles. For example: - * { - * foo = 1, bar = 2 - * profiles { - * custom1 { bar = 3 } - * } - * } - * Would produce: - * "default" => { foo = 1, bar = 2 } - * "custom1" => { foo = 1, bar = 3 } - */ - private Map extractProfiles(Config sourceConfig) { - ImmutableMap.Builder result = ImmutableMap.builder(); - - Config defaultProfileConfig = sourceConfig.withoutPath("profiles"); - result.put(DriverExecutionProfile.DEFAULT_NAME, defaultProfileConfig); - - // The rest of the method is a bit confusing because we navigate between Typesafe config's two - // APIs, see https://github.com/typesafehub/config#understanding-config-and-configobject - // In an attempt to clarify: - // xxxObject = `ConfigObject` API (config as a hierarchical structure) - // xxxConfig = `Config` API (config as a flat set of options with hierarchical paths) - ConfigObject rootObject = sourceConfig.root(); - if (rootObject.containsKey("profiles") && rootObject.get("profiles").valueType() == OBJECT) { - ConfigObject profilesObject = (ConfigObject) rootObject.get("profiles"); - for (String profileName : profilesObject.keySet()) { - if (profileName.equals(DriverExecutionProfile.DEFAULT_NAME)) { - throw new IllegalArgumentException( - String.format( - "Can't have %s as a profile name because it's used internally. Pick another name.", - profileName)); - } - ConfigValue profileObject = profilesObject.get(profileName); - if (profileObject.valueType() == OBJECT) { - Config profileConfig = ((ConfigObject) profileObject).toConfig(); - result.put(profileName, profileConfig.withFallback(defaultProfileConfig)); - } - } - } - return result.build(); - } - - @Override - public DriverExecutionProfile getDefaultProfile() { - return defaultProfile; - } - - @NonNull - @Override - public DriverExecutionProfile getProfile(@NonNull String profileName) { - if (profileName.equals(DriverExecutionProfile.DEFAULT_NAME)) { - return defaultProfile; - } - return Optional.ofNullable(profiles.get(profileName)) - .orElseThrow( - () -> - new IllegalArgumentException( - String.format("Unknown profile '%s'. Check your configuration.", profileName))); - } - - @NonNull - @Override - public Map getProfiles() { - return profiles; - } - - /** - * Replace the given options, only if the original values came from {@code - * reference.conf}: if the option was set explicitly in {@code application.conf}, then the - * override is ignored. - * - *

The overrides are also taken into account in profiles, and survive reloads. If this method - * is invoked multiple times, the last value for each option will be used. Note that it is - * currently not possible to use {@code null} as a value. - */ - public void overrideDefaults(@NonNull Map overrides) { - defaultOverrides.putAll(overrides); - reload(lastLoadedConfig); - } - - private Config applyDefaultOverrides(Config source) { - Config result = source; - for (Map.Entry entry : defaultOverrides.entrySet()) { - String path = entry.getKey().getPath(); - Object value = entry.getValue(); - if (isDefault(source, path)) { - LOG.debug("Replacing default value for {} by {}", path, value); - result = - result.withValue( - path, ConfigValueFactory.fromAnyRef(value).withOrigin(DEFAULT_OVERRIDES_ORIGIN)); - } else { - LOG.debug( - "Ignoring default override for {} because the user has overridden the value", path); - } - } - return result; - } - - // Whether the value in the given path comes from the reference.conf in the driver JAR. - private static boolean isDefault(Config config, String path) { - if (!config.hasPath(path)) { - return false; - } - ConfigOrigin origin = config.getValue(path).origin(); - if (origin.equals(DEFAULT_OVERRIDES_ORIGIN)) { - // Same default was overridden twice, should use the last value - return true; - } - URL url = origin.url(); - return url != null && url.toString().endsWith("reference.conf"); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/TypesafeDriverExecutionProfile.java b/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/TypesafeDriverExecutionProfile.java deleted file mode 100644 index b7dd5abe42e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/TypesafeDriverExecutionProfile.java +++ /dev/null @@ -1,413 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config.typesafe; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.config.DriverOption; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSortedSet; -import com.datastax.oss.driver.shaded.guava.common.collect.MapMaker; -import com.typesafe.config.Config; -import com.typesafe.config.ConfigFactory; -import com.typesafe.config.ConfigValue; -import com.typesafe.config.ConfigValueFactory; -import com.typesafe.config.ConfigValueType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.time.Duration; -import java.util.AbstractMap; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.SortedSet; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import java.util.function.Function; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public abstract class TypesafeDriverExecutionProfile implements DriverExecutionProfile { - - /** The original profile in the driver's configuration that this profile was derived from. */ - protected abstract Base getBaseProfile(); - - /** The extra options that were added with {@code withXxx} methods. */ - protected abstract Config getAddedOptions(); - - /** The actual options that will be used to answer {@code getXxx} calls. */ - protected abstract Config getEffectiveOptions(); - - protected final ConcurrentMap cache = new ConcurrentHashMap<>(); - - @Override - public boolean isDefined(@NonNull DriverOption option) { - return getEffectiveOptions().hasPath(option.getPath()); - } - - @Override - public boolean getBoolean(@NonNull DriverOption option) { - return getCached(option.getPath(), getEffectiveOptions()::getBoolean); - } - - // We override `with*` methods because they can be implemented a bit better with Typesafe config - @NonNull - @Override - public DriverExecutionProfile withBoolean(@NonNull DriverOption option, boolean value) { - return with(option, value); - } - - @NonNull - @Override - public List getBooleanList(@NonNull DriverOption option) { - return getCached(option.getPath(), getEffectiveOptions()::getBooleanList); - } - - @NonNull - @Override - public DriverExecutionProfile withBooleanList( - @NonNull DriverOption option, @NonNull List value) { - return with(option, value); - } - - @Override - public int getInt(@NonNull DriverOption option) { - return getCached(option.getPath(), getEffectiveOptions()::getInt); - } - - @NonNull - @Override - public DriverExecutionProfile withInt(@NonNull DriverOption option, int value) { - return with(option, value); - } - - @NonNull - @Override - public List getIntList(@NonNull DriverOption option) { - return getCached(option.getPath(), getEffectiveOptions()::getIntList); - } - - @NonNull - @Override - public DriverExecutionProfile withIntList( - @NonNull DriverOption option, @NonNull List value) { - return with(option, value); - } - - @Override - public long getLong(@NonNull DriverOption option) { - return getCached(option.getPath(), getEffectiveOptions()::getLong); - } - - @NonNull - @Override - public DriverExecutionProfile withLong(@NonNull DriverOption option, long value) { - return with(option, value); - } - - @NonNull - @Override - public List getLongList(@NonNull DriverOption option) { - return getCached(option.getPath(), getEffectiveOptions()::getLongList); - } - - @NonNull - @Override - public DriverExecutionProfile withLongList( - @NonNull DriverOption option, @NonNull List value) { - return with(option, value); - } - - @Override - public double getDouble(@NonNull DriverOption option) { - return getCached(option.getPath(), getEffectiveOptions()::getDouble); - } - - @NonNull - @Override - public DriverExecutionProfile withDouble(@NonNull DriverOption option, double value) { - return with(option, value); - } - - @NonNull - @Override - public List getDoubleList(@NonNull DriverOption option) { - return getCached(option.getPath(), getEffectiveOptions()::getDoubleList); - } - - @NonNull - @Override - public DriverExecutionProfile withDoubleList( - @NonNull DriverOption option, @NonNull List value) { - return with(option, value); - } - - @NonNull - @Override - public String getString(@NonNull DriverOption option) { - return getCached(option.getPath(), getEffectiveOptions()::getString); - } - - @NonNull - @Override - public DriverExecutionProfile withString(@NonNull DriverOption option, @NonNull String value) { - return with(option, value); - } - - @NonNull - @Override - public List getStringList(@NonNull DriverOption option) { - return getCached(option.getPath(), getEffectiveOptions()::getStringList); - } - - @NonNull - @Override - public DriverExecutionProfile withStringList( - @NonNull DriverOption option, @NonNull List value) { - return with(option, value); - } - - @NonNull - @Override - public Map getStringMap(@NonNull DriverOption option) { - Config subConfig = getCached(option.getPath(), getEffectiveOptions()::getConfig); - ImmutableMap.Builder builder = ImmutableMap.builder(); - for (Map.Entry entry : subConfig.entrySet()) { - if (entry.getValue().valueType().equals(ConfigValueType.STRING)) { - builder.put(entry.getKey(), (String) entry.getValue().unwrapped()); - } - } - return builder.build(); - } - - @NonNull - @Override - public DriverExecutionProfile withStringMap( - @NonNull DriverOption option, @NonNull Map map) { - Base base = getBaseProfile(); - // Add the new option to any already derived options - Config newAdded = getAddedOptions(); - for (String key : map.keySet()) { - newAdded = - newAdded.withValue( - option.getPath() + "." + key, ConfigValueFactory.fromAnyRef(map.get(key))); - } - Derived derived = new Derived(base, newAdded); - base.register(derived); - return derived; - } - - @Override - public long getBytes(@NonNull DriverOption option) { - return getCached(option.getPath(), getEffectiveOptions()::getBytes); - } - - @NonNull - @Override - public DriverExecutionProfile withBytes(@NonNull DriverOption option, long value) { - return with(option, value); - } - - @NonNull - @Override - public List getBytesList(DriverOption option) { - return getCached(option.getPath(), getEffectiveOptions()::getBytesList); - } - - @NonNull - @Override - public DriverExecutionProfile withBytesList( - @NonNull DriverOption option, @NonNull List value) { - return with(option, value); - } - - @NonNull - @Override - public Duration getDuration(@NonNull DriverOption option) { - return getCached(option.getPath(), getEffectiveOptions()::getDuration); - } - - @NonNull - @Override - public DriverExecutionProfile withDuration( - @NonNull DriverOption option, @NonNull Duration value) { - return with(option, value); - } - - @NonNull - @Override - public List getDurationList(@NonNull DriverOption option) { - return getCached(option.getPath(), getEffectiveOptions()::getDurationList); - } - - @NonNull - @Override - public DriverExecutionProfile withDurationList( - @NonNull DriverOption option, @NonNull List value) { - return with(option, value); - } - - @NonNull - @Override - public DriverExecutionProfile without(@NonNull DriverOption option) { - return with(option, null); - } - - @NonNull - @Override - public Object getComparisonKey(@NonNull DriverOption option) { - // This method has a default implementation in the interface, but here we can do it in one line: - return getEffectiveOptions().getConfig(option.getPath()); - } - - @NonNull - @Override - public SortedSet> entrySet() { - ImmutableSortedSet.Builder> builder = - ImmutableSortedSet.orderedBy(Map.Entry.comparingByKey()); - for (Map.Entry entry : getEffectiveOptions().entrySet()) { - builder.add(new AbstractMap.SimpleEntry<>(entry.getKey(), entry.getValue().unwrapped())); - } - return builder.build(); - } - - private T getCached(String path, Function compute) { - // compute's signature guarantees we get a T, and this is the only place where we mutate the - // entry - @SuppressWarnings("unchecked") - T t = (T) cache.computeIfAbsent(path, compute); - return t; - } - - private DriverExecutionProfile with(@NonNull DriverOption option, @Nullable Object value) { - Base base = getBaseProfile(); - // Add the new option to any already derived options - Config newAdded = - getAddedOptions().withValue(option.getPath(), ConfigValueFactory.fromAnyRef(value)); - Derived derived = new Derived(base, newAdded); - base.register(derived); - return derived; - } - - /** A profile that was loaded directly from the driver's configuration. */ - @ThreadSafe - static class Base extends TypesafeDriverExecutionProfile { - - private final String name; - private volatile Config options; - private volatile Set derivedProfiles; - - Base(String name, Config options) { - this.name = name; - this.options = options; - } - - @NonNull - @Override - public String getName() { - return name; - } - - @Override - protected Base getBaseProfile() { - return this; - } - - @Override - protected Config getAddedOptions() { - return ConfigFactory.empty(); - } - - @Override - protected Config getEffectiveOptions() { - return options; - } - - void refresh(Config newOptions) { - this.options = newOptions; - this.cache.clear(); - if (derivedProfiles != null) { - for (Derived derivedProfile : derivedProfiles) { - derivedProfile.refresh(); - } - } - } - - void register(Derived derivedProfile) { - getDerivedProfiles().add(derivedProfile); - } - - // Lazy init - private Set getDerivedProfiles() { - Set result = derivedProfiles; - if (result == null) { - synchronized (this) { - result = derivedProfiles; - if (result == null) { - derivedProfiles = - result = Collections.newSetFromMap(new MapMaker().weakKeys().makeMap()); - } - } - } - return result; - } - } - - /** - * A profile that was copied from another profile programmatically using {@code withXxx} methods. - */ - @ThreadSafe - static class Derived extends TypesafeDriverExecutionProfile { - - private final Base baseProfile; - private final Config addedOptions; - private volatile Config effectiveOptions; - - Derived(Base baseProfile, Config addedOptions) { - this.baseProfile = baseProfile; - this.addedOptions = addedOptions; - refresh(); - } - - void refresh() { - this.effectiveOptions = addedOptions.withFallback(baseProfile.getEffectiveOptions()); - this.cache.clear(); - } - - @NonNull - @Override - public String getName() { - return baseProfile.getName(); - } - - @Override - protected Base getBaseProfile() { - return baseProfile; - } - - @Override - protected Config getAddedOptions() { - return addedOptions; - } - - @Override - protected Config getEffectiveOptions() { - return effectiveOptions; - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/package-info.java b/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/package-info.java deleted file mode 100644 index 72e0ba5ae3d..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Implementation of the driver configuration based on the Typesafe config library. - */ -package com.datastax.oss.driver.internal.core.config.typesafe; diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/connection/ConstantReconnectionPolicy.java b/core/src/main/java/com/datastax/oss/driver/internal/core/connection/ConstantReconnectionPolicy.java deleted file mode 100644 index 03edb38f8d4..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/connection/ConstantReconnectionPolicy.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.connection; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.connection.ReconnectionPolicy; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.metadata.Node; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.time.Duration; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A reconnection policy that waits a constant time between each reconnection attempt. - * - *

To activate this policy, modify the {@code advanced.reconnection-policy} section in the driver - * configuration, for example: - * - *

- * datastax-java-driver {
- *   advanced.reconnection-policy {
- *     class = ConstantReconnectionPolicy
- *     base-delay = 1 second
- *   }
- * }
- * 
- * - * See {@code reference.conf} (in the manual or core driver JAR) for more details. - */ -public class ConstantReconnectionPolicy implements ReconnectionPolicy { - - private static final Logger LOG = LoggerFactory.getLogger(ConstantReconnectionPolicy.class); - - private final String logPrefix; - private final ReconnectionSchedule schedule; - - /** Builds a new instance. */ - public ConstantReconnectionPolicy(DriverContext context) { - this.logPrefix = context.getSessionName(); - DriverExecutionProfile config = context.getConfig().getDefaultProfile(); - Duration delay = config.getDuration(DefaultDriverOption.RECONNECTION_BASE_DELAY); - if (delay.isNegative()) { - throw new IllegalArgumentException( - String.format( - "Invalid negative delay for " - + DefaultDriverOption.RECONNECTION_BASE_DELAY.getPath() - + " (got %d)", - delay)); - } - this.schedule = () -> delay; - } - - @NonNull - @Override - public ReconnectionSchedule newNodeSchedule(@NonNull Node node) { - LOG.debug("[{}] Creating new schedule for {}", logPrefix, node); - return schedule; - } - - @NonNull - @Override - public ReconnectionSchedule newControlConnectionSchedule( - @SuppressWarnings("ignored") boolean isInitialConnection) { - LOG.debug("[{}] Creating new schedule for the control connection", logPrefix); - return schedule; - } - - @Override - public void close() { - // nothing to do - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/connection/ExponentialReconnectionPolicy.java b/core/src/main/java/com/datastax/oss/driver/internal/core/connection/ExponentialReconnectionPolicy.java deleted file mode 100644 index 5fa04cb63d6..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/connection/ExponentialReconnectionPolicy.java +++ /dev/null @@ -1,169 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.connection; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.connection.ReconnectionPolicy; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.time.Duration; -import java.util.concurrent.ThreadLocalRandom; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A reconnection policy that waits exponentially longer between each reconnection attempt (but - * keeps a constant delay once a maximum delay is reached). - * - *

It uses the same schedule implementation for individual nodes or the control connection: - * reconnection attempt {@code i} will be tried {@code Math.min(2^(i-1) * getBaseDelayMs(), - * getMaxDelayMs())} milliseconds after the previous one. A random amount of jitter (+/- 15%) will - * be added to the pure exponential delay value to avoid situations where many clients are in the - * reconnection process at exactly the same time. The jitter will never cause the delay to be less - * than the base delay, or more than the max delay. - * - *

To activate this policy, modify the {@code advanced.reconnection-policy} section in the driver - * configuration, for example: - * - *

- * datastax-java-driver {
- *   advanced.reconnection-policy {
- *     class = ExponentialReconnectionPolicy
- *     base-delay = 1 second
- *     max-delay = 60 seconds
- *   }
- * }
- * 
- * - * See {@code reference.conf} (in the manual or core driver JAR) for more details. - */ -@ThreadSafe -public class ExponentialReconnectionPolicy implements ReconnectionPolicy { - - private static final Logger LOG = LoggerFactory.getLogger(ExponentialReconnectionPolicy.class); - - private final String logPrefix; - private final long baseDelayMs; - private final long maxDelayMs; - private final long maxAttempts; - - /** Builds a new instance. */ - public ExponentialReconnectionPolicy(DriverContext context) { - this.logPrefix = context.getSessionName(); - - DriverExecutionProfile config = context.getConfig().getDefaultProfile(); - - this.baseDelayMs = config.getDuration(DefaultDriverOption.RECONNECTION_BASE_DELAY).toMillis(); - this.maxDelayMs = config.getDuration(DefaultDriverOption.RECONNECTION_MAX_DELAY).toMillis(); - - Preconditions.checkArgument( - baseDelayMs > 0, - "%s must be strictly positive (got %s)", - DefaultDriverOption.RECONNECTION_BASE_DELAY.getPath(), - baseDelayMs); - Preconditions.checkArgument( - maxDelayMs >= 0, - "%s must be positive (got %s)", - DefaultDriverOption.RECONNECTION_MAX_DELAY.getPath(), - maxDelayMs); - Preconditions.checkArgument( - maxDelayMs >= baseDelayMs, - "%s must be bigger than %s (got %s, %s)", - DefaultDriverOption.RECONNECTION_MAX_DELAY.getPath(), - DefaultDriverOption.RECONNECTION_BASE_DELAY.getPath(), - maxDelayMs, - baseDelayMs); - - // Maximum number of attempts after which we overflow - int ceil = (baseDelayMs & (baseDelayMs - 1)) == 0 ? 0 : 1; - this.maxAttempts = 64L - Long.numberOfLeadingZeros(Long.MAX_VALUE / baseDelayMs) - ceil; - } - - /** - * The base delay in milliseconds for this policy (e.g. the delay before the first reconnection - * attempt). - * - * @return the base delay in milliseconds for this policy. - */ - public long getBaseDelayMs() { - return baseDelayMs; - } - - /** - * The maximum delay in milliseconds between reconnection attempts for this policy. - * - * @return the maximum delay in milliseconds between reconnection attempts for this policy. - */ - public long getMaxDelayMs() { - return maxDelayMs; - } - - @NonNull - @Override - public ReconnectionSchedule newNodeSchedule(@NonNull Node node) { - LOG.debug("[{}] Creating new schedule for {}", logPrefix, node); - return new ExponentialSchedule(); - } - - @NonNull - @Override - public ReconnectionSchedule newControlConnectionSchedule( - @SuppressWarnings("ignored") boolean isInitialConnection) { - LOG.debug("[{}] Creating new schedule for the control connection", logPrefix); - return new ExponentialSchedule(); - } - - @Override - public void close() { - // nothing to do - } - - private class ExponentialSchedule implements ReconnectionSchedule { - - private int attempts; - - @NonNull - @Override - public Duration nextDelay() { - long delay = (attempts > maxAttempts) ? maxDelayMs : calculateDelayWithJitter(); - return Duration.ofMillis(delay); - } - - private long calculateDelayWithJitter() { - // assert we haven't hit the max attempts - assert attempts <= maxAttempts; - // get the pure exponential delay based on the attempt count - long delay = Math.min(baseDelayMs * (1L << attempts++), maxDelayMs); - // calculate up to 15% jitter, plus or minus (i.e. 85 - 115% of the pure value) - int jitter = ThreadLocalRandom.current().nextInt(85, 116); - // apply jitter - delay = (jitter * delay) / 100; - // ensure the final delay is between the base and max - delay = Math.min(maxDelayMs, Math.max(baseDelayMs, delay)); - return delay; - } - } - - public long getMaxAttempts() { - return maxAttempts; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/context/DefaultDriverContext.java b/core/src/main/java/com/datastax/oss/driver/internal/core/context/DefaultDriverContext.java deleted file mode 100644 index 3074bda2398..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/context/DefaultDriverContext.java +++ /dev/null @@ -1,1064 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.context; - -import static com.datastax.oss.driver.internal.core.util.Dependency.JACKSON; - -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.dse.driver.internal.core.InsightsClientLifecycleListener; -import com.datastax.dse.driver.internal.core.type.codec.DseTypeCodecsRegistrar; -import com.datastax.dse.protocol.internal.DseProtocolV1ClientCodecs; -import com.datastax.dse.protocol.internal.DseProtocolV2ClientCodecs; -import com.datastax.dse.protocol.internal.ProtocolV4ClientCodecsForDse; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.addresstranslation.AddressTranslator; -import com.datastax.oss.driver.api.core.auth.AuthProvider; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.connection.ReconnectionPolicy; -import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistanceEvaluator; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeStateListener; -import com.datastax.oss.driver.api.core.metadata.schema.SchemaChangeListener; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.session.ProgrammaticArguments; -import com.datastax.oss.driver.api.core.session.throttling.RequestThrottler; -import com.datastax.oss.driver.api.core.specex.SpeculativeExecutionPolicy; -import com.datastax.oss.driver.api.core.ssl.SslEngineFactory; -import com.datastax.oss.driver.api.core.time.TimestampGenerator; -import com.datastax.oss.driver.api.core.tracker.RequestIdGenerator; -import com.datastax.oss.driver.api.core.tracker.RequestTracker; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.api.core.type.codec.registry.MutableCodecRegistry; -import com.datastax.oss.driver.internal.core.ConsistencyLevelRegistry; -import com.datastax.oss.driver.internal.core.DefaultConsistencyLevelRegistry; -import com.datastax.oss.driver.internal.core.DefaultProtocolVersionRegistry; -import com.datastax.oss.driver.internal.core.ProtocolVersionRegistry; -import com.datastax.oss.driver.internal.core.channel.ChannelFactory; -import com.datastax.oss.driver.internal.core.channel.DefaultWriteCoalescer; -import com.datastax.oss.driver.internal.core.channel.WriteCoalescer; -import com.datastax.oss.driver.internal.core.control.ControlConnection; -import com.datastax.oss.driver.internal.core.metadata.CloudTopologyMonitor; -import com.datastax.oss.driver.internal.core.metadata.DefaultTopologyMonitor; -import com.datastax.oss.driver.internal.core.metadata.LoadBalancingPolicyWrapper; -import com.datastax.oss.driver.internal.core.metadata.MetadataManager; -import com.datastax.oss.driver.internal.core.metadata.MultiplexingNodeStateListener; -import com.datastax.oss.driver.internal.core.metadata.NoopNodeStateListener; -import com.datastax.oss.driver.internal.core.metadata.TopologyMonitor; -import com.datastax.oss.driver.internal.core.metadata.schema.MultiplexingSchemaChangeListener; -import com.datastax.oss.driver.internal.core.metadata.schema.NoopSchemaChangeListener; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.DefaultSchemaParserFactory; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.SchemaParserFactory; -import com.datastax.oss.driver.internal.core.metadata.schema.queries.DefaultSchemaQueriesFactory; -import com.datastax.oss.driver.internal.core.metadata.schema.queries.SchemaQueriesFactory; -import com.datastax.oss.driver.internal.core.metadata.token.DefaultReplicationStrategyFactory; -import com.datastax.oss.driver.internal.core.metadata.token.DefaultTokenFactoryRegistry; -import com.datastax.oss.driver.internal.core.metadata.token.ReplicationStrategyFactory; -import com.datastax.oss.driver.internal.core.metadata.token.TokenFactoryRegistry; -import com.datastax.oss.driver.internal.core.metrics.MetricIdGenerator; -import com.datastax.oss.driver.internal.core.metrics.MetricsFactory; -import com.datastax.oss.driver.internal.core.pool.ChannelPoolFactory; -import com.datastax.oss.driver.internal.core.protocol.BuiltInCompressors; -import com.datastax.oss.driver.internal.core.protocol.ByteBufPrimitiveCodec; -import com.datastax.oss.driver.internal.core.servererrors.DefaultWriteTypeRegistry; -import com.datastax.oss.driver.internal.core.servererrors.WriteTypeRegistry; -import com.datastax.oss.driver.internal.core.session.BuiltInRequestProcessors; -import com.datastax.oss.driver.internal.core.session.PoolManager; -import com.datastax.oss.driver.internal.core.session.RequestProcessor; -import com.datastax.oss.driver.internal.core.session.RequestProcessorRegistry; -import com.datastax.oss.driver.internal.core.ssl.JdkSslHandlerFactory; -import com.datastax.oss.driver.internal.core.ssl.SslHandlerFactory; -import com.datastax.oss.driver.internal.core.tracker.MultiplexingRequestTracker; -import com.datastax.oss.driver.internal.core.tracker.NoopRequestTracker; -import com.datastax.oss.driver.internal.core.tracker.RequestLogFormatter; -import com.datastax.oss.driver.internal.core.type.codec.registry.DefaultCodecRegistry; -import com.datastax.oss.driver.internal.core.util.DefaultDependencyChecker; -import com.datastax.oss.driver.internal.core.util.Reflection; -import com.datastax.oss.driver.internal.core.util.concurrent.CycleDetector; -import com.datastax.oss.driver.internal.core.util.concurrent.LazyReference; -import com.datastax.oss.protocol.internal.Compressor; -import com.datastax.oss.protocol.internal.FrameCodec; -import com.datastax.oss.protocol.internal.PrimitiveCodec; -import com.datastax.oss.protocol.internal.ProtocolV3ClientCodecs; -import com.datastax.oss.protocol.internal.ProtocolV5ClientCodecs; -import com.datastax.oss.protocol.internal.ProtocolV6ClientCodecs; -import com.datastax.oss.protocol.internal.SegmentCodec; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import io.netty.buffer.ByteBuf; -import java.net.InetSocketAddress; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.UUID; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.function.Predicate; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Default implementation of the driver context. - * - *

All non-constant components are initialized lazily. Some components depend on others, so there - * might be deadlocks or stack overflows if the dependency graph is badly designed. This can be - * checked automatically with the system property {@code - * -Dcom.datastax.oss.driver.DETECT_CYCLES=true} (this might have a slight impact on startup time, - * so the check is disabled by default). - * - *

This is DIY dependency injection. We stayed away from DI frameworks for simplicity, to avoid - * an extra dependency, and because end users might want to access some of these components in their - * own implementations (which wouldn't work well with compile-time approaches like Dagger). - * - *

This also provides extension points for stuff that is too low-level for the driver - * configuration: the intent is that someone can extend this class, override one (or more) of the - * buildXxx methods, and initialize the cluster with this new implementation. - */ -@ThreadSafe -public class DefaultDriverContext implements InternalDriverContext { - - private static final Logger LOG = LoggerFactory.getLogger(InternalDriverContext.class); - private static final AtomicInteger SESSION_NAME_COUNTER = new AtomicInteger(); - - protected final CycleDetector cycleDetector = - new CycleDetector("Detected cycle in context initialization"); - - private final LazyReference> loadBalancingPoliciesRef = - new LazyReference<>("loadBalancingPolicies", this::buildLoadBalancingPolicies, cycleDetector); - private final LazyReference reconnectionPolicyRef = - new LazyReference<>("reconnectionPolicy", this::buildReconnectionPolicy, cycleDetector); - private final LazyReference> retryPoliciesRef = - new LazyReference<>("retryPolicies", this::buildRetryPolicies, cycleDetector); - private final LazyReference> - speculativeExecutionPoliciesRef = - new LazyReference<>( - "speculativeExecutionPolicies", - this::buildSpeculativeExecutionPolicies, - cycleDetector); - private final LazyReference timestampGeneratorRef = - new LazyReference<>("timestampGenerator", this::buildTimestampGenerator, cycleDetector); - private final LazyReference addressTranslatorRef = - new LazyReference<>("addressTranslator", this::buildAddressTranslator, cycleDetector); - private final LazyReference> sslEngineFactoryRef; - - private final LazyReference eventBusRef = - new LazyReference<>("eventBus", this::buildEventBus, cycleDetector); - private final LazyReference> compressorRef = - new LazyReference<>("compressor", this::buildCompressor, cycleDetector); - private final LazyReference> primitiveCodecRef = - new LazyReference<>("primitiveCodec", this::buildPrimitiveCodec, cycleDetector); - private final LazyReference> frameCodecRef = - new LazyReference<>("frameCodec", this::buildFrameCodec, cycleDetector); - private final LazyReference> segmentCodecRef = - new LazyReference<>("segmentCodec", this::buildSegmentCodec, cycleDetector); - private final LazyReference protocolVersionRegistryRef = - new LazyReference<>( - "protocolVersionRegistry", this::buildProtocolVersionRegistry, cycleDetector); - private final LazyReference consistencyLevelRegistryRef = - new LazyReference<>( - "consistencyLevelRegistry", this::buildConsistencyLevelRegistry, cycleDetector); - private final LazyReference writeTypeRegistryRef = - new LazyReference<>("writeTypeRegistry", this::buildWriteTypeRegistry, cycleDetector); - private final LazyReference nettyOptionsRef = - new LazyReference<>("nettyOptions", this::buildNettyOptions, cycleDetector); - private final LazyReference writeCoalescerRef = - new LazyReference<>("writeCoalescer", this::buildWriteCoalescer, cycleDetector); - private final LazyReference> sslHandlerFactoryRef = - new LazyReference<>("sslHandlerFactory", this::buildSslHandlerFactory, cycleDetector); - private final LazyReference channelFactoryRef = - new LazyReference<>("channelFactory", this::buildChannelFactory, cycleDetector); - private final LazyReference topologyMonitorRef = - new LazyReference<>("topologyMonitor", this::buildTopologyMonitor, cycleDetector); - private final LazyReference metadataManagerRef = - new LazyReference<>("metadataManager", this::buildMetadataManager, cycleDetector); - private final LazyReference loadBalancingPolicyWrapperRef = - new LazyReference<>( - "loadBalancingPolicyWrapper", this::buildLoadBalancingPolicyWrapper, cycleDetector); - private final LazyReference controlConnectionRef = - new LazyReference<>("controlConnection", this::buildControlConnection, cycleDetector); - private final LazyReference requestProcessorRegistryRef = - new LazyReference<>( - "requestProcessorRegistry", this::buildRequestProcessorRegistry, cycleDetector); - private final LazyReference schemaQueriesFactoryRef = - new LazyReference<>("schemaQueriesFactory", this::buildSchemaQueriesFactory, cycleDetector); - private final LazyReference schemaParserFactoryRef = - new LazyReference<>("schemaParserFactory", this::buildSchemaParserFactory, cycleDetector); - private final LazyReference tokenFactoryRegistryRef = - new LazyReference<>("tokenFactoryRegistry", this::buildTokenFactoryRegistry, cycleDetector); - private final LazyReference replicationStrategyFactoryRef = - new LazyReference<>( - "replicationStrategyFactory", this::buildReplicationStrategyFactory, cycleDetector); - private final LazyReference poolManagerRef = - new LazyReference<>("poolManager", this::buildPoolManager, cycleDetector); - private final LazyReference metricsFactoryRef = - new LazyReference<>("metricsFactory", this::buildMetricsFactory, cycleDetector); - private final LazyReference metricIdGeneratorRef = - new LazyReference<>("metricIdGenerator", this::buildMetricIdGenerator, cycleDetector); - private final LazyReference requestThrottlerRef = - new LazyReference<>("requestThrottler", this::buildRequestThrottler, cycleDetector); - private final LazyReference startupOptionsRef = - new LazyReference<>("startupOptionsFactory", this::buildStartupOptionsFactory, cycleDetector); - private final LazyReference nodeStateListenerRef; - private final LazyReference schemaChangeListenerRef; - private final LazyReference requestTrackerRef; - private final LazyReference> requestIdGeneratorRef; - private final LazyReference> authProviderRef; - private final LazyReference> lifecycleListenersRef = - new LazyReference<>("lifecycleListeners", this::buildLifecycleListeners, cycleDetector); - - private final DriverConfig config; - private final DriverConfigLoader configLoader; - private final ChannelPoolFactory channelPoolFactory = new ChannelPoolFactory(); - private final CodecRegistry codecRegistry; - private final String sessionName; - private final NodeStateListener nodeStateListenerFromBuilder; - private final SchemaChangeListener schemaChangeListenerFromBuilder; - private final RequestTracker requestTrackerFromBuilder; - private final Map localDatacentersFromBuilder; - private final Map> nodeFiltersFromBuilder; - private final Map nodeDistanceEvaluatorsFromBuilder; - private final ClassLoader classLoader; - private final InetSocketAddress cloudProxyAddress; - private final LazyReference requestLogFormatterRef = - new LazyReference<>("requestLogFormatter", this::buildRequestLogFormatter, cycleDetector); - private final UUID startupClientId; - private final String startupApplicationName; - private final String startupApplicationVersion; - private final Object metricRegistry; - // A stack trace captured in the constructor. Used to extract information about the client - // application. - private final StackTraceElement[] initStackTrace; - - public DefaultDriverContext( - DriverConfigLoader configLoader, ProgrammaticArguments programmaticArguments) { - this.config = configLoader.getInitialConfig(); - this.configLoader = configLoader; - DriverExecutionProfile defaultProfile = config.getDefaultProfile(); - if (defaultProfile.isDefined(DefaultDriverOption.SESSION_NAME)) { - this.sessionName = defaultProfile.getString(DefaultDriverOption.SESSION_NAME); - } else { - this.sessionName = "s" + SESSION_NAME_COUNTER.getAndIncrement(); - } - this.localDatacentersFromBuilder = programmaticArguments.getLocalDatacenters(); - this.codecRegistry = buildCodecRegistry(programmaticArguments); - this.nodeStateListenerFromBuilder = programmaticArguments.getNodeStateListener(); - this.nodeStateListenerRef = - new LazyReference<>( - "nodeStateListener", - () -> buildNodeStateListener(nodeStateListenerFromBuilder), - cycleDetector); - this.schemaChangeListenerFromBuilder = programmaticArguments.getSchemaChangeListener(); - this.schemaChangeListenerRef = - new LazyReference<>( - "schemaChangeListener", - () -> buildSchemaChangeListener(schemaChangeListenerFromBuilder), - cycleDetector); - this.requestTrackerFromBuilder = programmaticArguments.getRequestTracker(); - - this.authProviderRef = - new LazyReference<>( - "authProvider", - () -> buildAuthProvider(programmaticArguments.getAuthProvider()), - cycleDetector); - this.requestTrackerRef = - new LazyReference<>( - "requestTracker", () -> buildRequestTracker(requestTrackerFromBuilder), cycleDetector); - this.requestIdGeneratorRef = - new LazyReference<>( - "requestIdGenerator", - () -> buildRequestIdGenerator(programmaticArguments.getRequestIdGenerator()), - cycleDetector); - this.sslEngineFactoryRef = - new LazyReference<>( - "sslEngineFactory", - () -> buildSslEngineFactory(programmaticArguments.getSslEngineFactory()), - cycleDetector); - @SuppressWarnings("deprecation") - Map> nodeFilters = programmaticArguments.getNodeFilters(); - this.nodeFiltersFromBuilder = nodeFilters; - this.nodeDistanceEvaluatorsFromBuilder = programmaticArguments.getNodeDistanceEvaluators(); - this.classLoader = programmaticArguments.getClassLoader(); - this.cloudProxyAddress = programmaticArguments.getCloudProxyAddress(); - this.startupClientId = programmaticArguments.getStartupClientId(); - this.startupApplicationName = programmaticArguments.getStartupApplicationName(); - this.startupApplicationVersion = programmaticArguments.getStartupApplicationVersion(); - StackTraceElement[] stackTrace; - try { - stackTrace = Thread.currentThread().getStackTrace(); - } catch (Exception ex) { - // ignore and use empty - stackTrace = new StackTraceElement[] {}; - } - this.initStackTrace = stackTrace; - this.metricRegistry = programmaticArguments.getMetricRegistry(); - } - - /** - * @deprecated this constructor only exists for backward compatibility. Please use {@link - * #DefaultDriverContext(DriverConfigLoader, ProgrammaticArguments)} instead. - */ - @Deprecated - public DefaultDriverContext( - DriverConfigLoader configLoader, - List> typeCodecs, - NodeStateListener nodeStateListener, - SchemaChangeListener schemaChangeListener, - RequestTracker requestTracker, - Map localDatacenters, - Map> nodeFilters, - ClassLoader classLoader) { - this( - configLoader, - ProgrammaticArguments.builder() - .addTypeCodecs(typeCodecs.toArray(new TypeCodec[0])) - .withNodeStateListener(nodeStateListener) - .withSchemaChangeListener(schemaChangeListener) - .withRequestTracker(requestTracker) - .withLocalDatacenters(localDatacenters) - .withNodeFilters(nodeFilters) - .withClassLoader(classLoader) - .build()); - } - - /** - * Returns builder of options to send in a Startup message. - * - * @see #getStartupOptions() - */ - protected StartupOptionsBuilder buildStartupOptionsFactory() { - return new StartupOptionsBuilder(this) - .withClientId(startupClientId) - .withApplicationName(startupApplicationName) - .withApplicationVersion(startupApplicationVersion); - } - - protected Map buildLoadBalancingPolicies() { - return Reflection.buildFromConfigProfiles( - this, - DefaultDriverOption.LOAD_BALANCING_POLICY_CLASS, - DefaultDriverOption.LOAD_BALANCING_POLICY, - LoadBalancingPolicy.class, - "com.datastax.oss.driver.internal.core.loadbalancing", - "com.datastax.dse.driver.internal.core.loadbalancing"); - } - - protected Map buildRetryPolicies() { - return Reflection.buildFromConfigProfiles( - this, - DefaultDriverOption.RETRY_POLICY_CLASS, - DefaultDriverOption.RETRY_POLICY, - RetryPolicy.class, - "com.datastax.oss.driver.internal.core.retry"); - } - - protected Map buildSpeculativeExecutionPolicies() { - return Reflection.buildFromConfigProfiles( - this, - DefaultDriverOption.SPECULATIVE_EXECUTION_POLICY_CLASS, - DefaultDriverOption.SPECULATIVE_EXECUTION_POLICY, - SpeculativeExecutionPolicy.class, - "com.datastax.oss.driver.internal.core.specex"); - } - - protected TimestampGenerator buildTimestampGenerator() { - return Reflection.buildFromConfig( - this, - DefaultDriverOption.TIMESTAMP_GENERATOR_CLASS, - TimestampGenerator.class, - "com.datastax.oss.driver.internal.core.time") - .orElseThrow( - () -> - new IllegalArgumentException( - String.format( - "Missing timestamp generator, check your configuration (%s)", - DefaultDriverOption.TIMESTAMP_GENERATOR_CLASS))); - } - - protected ReconnectionPolicy buildReconnectionPolicy() { - return Reflection.buildFromConfig( - this, - DefaultDriverOption.RECONNECTION_POLICY_CLASS, - ReconnectionPolicy.class, - "com.datastax.oss.driver.internal.core.connection") - .orElseThrow( - () -> - new IllegalArgumentException( - String.format( - "Missing reconnection policy, check your configuration (%s)", - DefaultDriverOption.RECONNECTION_POLICY_CLASS))); - } - - protected AddressTranslator buildAddressTranslator() { - return Reflection.buildFromConfig( - this, - DefaultDriverOption.ADDRESS_TRANSLATOR_CLASS, - AddressTranslator.class, - "com.datastax.oss.driver.internal.core.addresstranslation") - .orElseThrow( - () -> - new IllegalArgumentException( - String.format( - "Missing address translator, check your configuration (%s)", - DefaultDriverOption.ADDRESS_TRANSLATOR_CLASS))); - } - - protected Optional buildSslEngineFactory(SslEngineFactory factoryFromBuilder) { - return (factoryFromBuilder != null) - ? Optional.of(factoryFromBuilder) - : Reflection.buildFromConfig( - this, - DefaultDriverOption.SSL_ENGINE_FACTORY_CLASS, - SslEngineFactory.class, - "com.datastax.oss.driver.internal.core.ssl"); - } - - protected EventBus buildEventBus() { - return new EventBus(getSessionName()); - } - - protected Compressor buildCompressor() { - DriverExecutionProfile defaultProfile = getConfig().getDefaultProfile(); - String name = defaultProfile.getString(DefaultDriverOption.PROTOCOL_COMPRESSION, "none"); - assert name != null : "should use default value"; - return BuiltInCompressors.newInstance(name, this); - } - - protected PrimitiveCodec buildPrimitiveCodec() { - return new ByteBufPrimitiveCodec(getNettyOptions().allocator()); - } - - protected FrameCodec buildFrameCodec() { - return new FrameCodec<>( - getPrimitiveCodec(), - getCompressor(), - new ProtocolV3ClientCodecs(), - new ProtocolV4ClientCodecsForDse(), - new ProtocolV5ClientCodecs(), - new ProtocolV6ClientCodecs(), - new DseProtocolV1ClientCodecs(), - new DseProtocolV2ClientCodecs()); - } - - protected SegmentCodec buildSegmentCodec() { - return new SegmentCodec<>(getPrimitiveCodec(), getCompressor()); - } - - protected ProtocolVersionRegistry buildProtocolVersionRegistry() { - return new DefaultProtocolVersionRegistry(getSessionName()); - } - - protected ConsistencyLevelRegistry buildConsistencyLevelRegistry() { - return new DefaultConsistencyLevelRegistry(); - } - - protected WriteTypeRegistry buildWriteTypeRegistry() { - return new DefaultWriteTypeRegistry(); - } - - protected NettyOptions buildNettyOptions() { - return new DefaultNettyOptions(this); - } - - protected Optional buildSslHandlerFactory() { - // If a JDK-based factory was provided through the public API, wrap it - return getSslEngineFactory().map(JdkSslHandlerFactory::new); - - // For more advanced options (like using Netty's native OpenSSL support instead of the JDK), - // extend DefaultDriverContext and override this method - } - - protected WriteCoalescer buildWriteCoalescer() { - return new DefaultWriteCoalescer(this); - } - - protected ChannelFactory buildChannelFactory() { - return new ChannelFactory(this); - } - - protected TopologyMonitor buildTopologyMonitor() { - if (cloudProxyAddress == null) { - return new DefaultTopologyMonitor(this); - } - return new CloudTopologyMonitor(this, cloudProxyAddress); - } - - protected MetadataManager buildMetadataManager() { - return new MetadataManager(this); - } - - protected LoadBalancingPolicyWrapper buildLoadBalancingPolicyWrapper() { - return new LoadBalancingPolicyWrapper(this, getLoadBalancingPolicies()); - } - - protected ControlConnection buildControlConnection() { - return new ControlConnection(this); - } - - protected RequestProcessorRegistry buildRequestProcessorRegistry() { - List> processors = - BuiltInRequestProcessors.createDefaultProcessors(this); - return new RequestProcessorRegistry( - getSessionName(), processors.toArray(new RequestProcessor[0])); - } - - protected CodecRegistry buildCodecRegistry(ProgrammaticArguments arguments) { - MutableCodecRegistry registry = arguments.getCodecRegistry(); - if (registry == null) { - registry = new DefaultCodecRegistry(this.sessionName); - } - registry.register(arguments.getTypeCodecs()); - DseTypeCodecsRegistrar.registerDseCodecs(registry); - return registry; - } - - protected SchemaQueriesFactory buildSchemaQueriesFactory() { - return new DefaultSchemaQueriesFactory(this); - } - - protected SchemaParserFactory buildSchemaParserFactory() { - return new DefaultSchemaParserFactory(this); - } - - protected TokenFactoryRegistry buildTokenFactoryRegistry() { - return new DefaultTokenFactoryRegistry(this); - } - - protected ReplicationStrategyFactory buildReplicationStrategyFactory() { - return new DefaultReplicationStrategyFactory(this); - } - - protected PoolManager buildPoolManager() { - return new PoolManager(this); - } - - protected MetricsFactory buildMetricsFactory() { - return Reflection.buildFromConfig( - this, - DefaultDriverOption.METRICS_FACTORY_CLASS, - MetricsFactory.class, - "com.datastax.oss.driver.internal.core.metrics", - "com.datastax.oss.driver.internal.metrics.microprofile", - "com.datastax.oss.driver.internal.metrics.micrometer") - .orElseThrow( - () -> - new IllegalArgumentException( - String.format( - "Missing metrics factory, check your config (%s)", - DefaultDriverOption.METRICS_FACTORY_CLASS))); - } - - protected MetricIdGenerator buildMetricIdGenerator() { - return Reflection.buildFromConfig( - this, - DefaultDriverOption.METRICS_ID_GENERATOR_CLASS, - MetricIdGenerator.class, - "com.datastax.oss.driver.internal.core.metrics") - .orElseThrow( - () -> - new IllegalArgumentException( - String.format( - "Missing metric descriptor, check your config (%s)", - DefaultDriverOption.METRICS_ID_GENERATOR_CLASS))); - } - - protected RequestThrottler buildRequestThrottler() { - return Reflection.buildFromConfig( - this, - DefaultDriverOption.REQUEST_THROTTLER_CLASS, - RequestThrottler.class, - "com.datastax.oss.driver.internal.core.session.throttling") - .orElseThrow( - () -> - new IllegalArgumentException( - String.format( - "Missing request throttler, check your configuration (%s)", - DefaultDriverOption.REQUEST_THROTTLER_CLASS))); - } - - protected NodeStateListener buildNodeStateListener( - NodeStateListener nodeStateListenerFromBuilder) { - List listeners = new ArrayList<>(); - if (nodeStateListenerFromBuilder != null) { - listeners.add(nodeStateListenerFromBuilder); - } - DefaultDriverOption newOption = DefaultDriverOption.METADATA_NODE_STATE_LISTENER_CLASSES; - @SuppressWarnings("deprecation") - DefaultDriverOption legacyOption = DefaultDriverOption.METADATA_NODE_STATE_LISTENER_CLASS; - DriverExecutionProfile profile = config.getDefaultProfile(); - if (profile.isDefined(newOption)) { - listeners.addAll( - Reflection.buildFromConfigList( - this, - newOption, - NodeStateListener.class, - "com.datastax.oss.driver.internal.core.metadata")); - } - if (profile.isDefined(legacyOption)) { - LOG.warn( - "Option {} has been deprecated and will be removed in a future release; please use option {} instead.", - legacyOption, - newOption); - Reflection.buildFromConfig( - this, - legacyOption, - NodeStateListener.class, - "com.datastax.oss.driver.internal.core.metadata") - .ifPresent(listeners::add); - } - if (listeners.isEmpty()) { - return new NoopNodeStateListener(this); - } else if (listeners.size() == 1) { - return listeners.get(0); - } else { - return new MultiplexingNodeStateListener(listeners); - } - } - - protected SchemaChangeListener buildSchemaChangeListener( - SchemaChangeListener schemaChangeListenerFromBuilder) { - List listeners = new ArrayList<>(); - if (schemaChangeListenerFromBuilder != null) { - listeners.add(schemaChangeListenerFromBuilder); - } - DefaultDriverOption newOption = DefaultDriverOption.METADATA_SCHEMA_CHANGE_LISTENER_CLASSES; - @SuppressWarnings("deprecation") - DefaultDriverOption legacyOption = DefaultDriverOption.METADATA_SCHEMA_CHANGE_LISTENER_CLASS; - DriverExecutionProfile profile = config.getDefaultProfile(); - if (profile.isDefined(newOption)) { - listeners.addAll( - Reflection.buildFromConfigList( - this, - newOption, - SchemaChangeListener.class, - "com.datastax.oss.driver.internal.core.metadata.schema")); - } - if (profile.isDefined(legacyOption)) { - LOG.warn( - "Option {} has been deprecated and will be removed in a future release; please use option {} instead.", - legacyOption, - newOption); - Reflection.buildFromConfig( - this, - legacyOption, - SchemaChangeListener.class, - "com.datastax.oss.driver.internal.core.metadata.schema") - .ifPresent(listeners::add); - } - if (listeners.isEmpty()) { - return new NoopSchemaChangeListener(this); - } else if (listeners.size() == 1) { - return listeners.get(0); - } else { - return new MultiplexingSchemaChangeListener(listeners); - } - } - - protected RequestTracker buildRequestTracker(RequestTracker requestTrackerFromBuilder) { - List trackers = new ArrayList<>(); - if (requestTrackerFromBuilder != null) { - trackers.add(requestTrackerFromBuilder); - } - for (LoadBalancingPolicy lbp : this.getLoadBalancingPolicies().values()) { - lbp.getRequestTracker().ifPresent(trackers::add); - } - DefaultDriverOption newOption = DefaultDriverOption.REQUEST_TRACKER_CLASSES; - @SuppressWarnings("deprecation") - DefaultDriverOption legacyOption = DefaultDriverOption.REQUEST_TRACKER_CLASS; - DriverExecutionProfile profile = config.getDefaultProfile(); - if (profile.isDefined(newOption)) { - trackers.addAll( - Reflection.buildFromConfigList( - this, - newOption, - RequestTracker.class, - "com.datastax.oss.driver.internal.core.tracker")); - } - if (profile.isDefined(legacyOption)) { - LOG.warn( - "Option {} has been deprecated and will be removed in a future release; please use option {} instead.", - legacyOption, - newOption); - Reflection.buildFromConfig( - this, - legacyOption, - RequestTracker.class, - "com.datastax.oss.driver.internal.core.tracker") - .ifPresent(trackers::add); - } - if (trackers.isEmpty()) { - return new NoopRequestTracker(this); - } else if (trackers.size() == 1) { - return trackers.get(0); - } else { - return new MultiplexingRequestTracker(trackers); - } - } - - protected Optional buildRequestIdGenerator( - RequestIdGenerator requestIdGenerator) { - return (requestIdGenerator != null) - ? Optional.of(requestIdGenerator) - : Reflection.buildFromConfig( - this, - DefaultDriverOption.REQUEST_ID_GENERATOR_CLASS, - RequestIdGenerator.class, - "com.datastax.oss.driver.internal.core.tracker"); - } - - protected Optional buildAuthProvider(AuthProvider authProviderFromBuilder) { - return (authProviderFromBuilder != null) - ? Optional.of(authProviderFromBuilder) - : Reflection.buildFromConfig( - this, - DefaultDriverOption.AUTH_PROVIDER_CLASS, - AuthProvider.class, - "com.datastax.oss.driver.internal.core.auth", - "com.datastax.dse.driver.internal.core.auth"); - } - - protected List buildLifecycleListeners() { - if (DefaultDependencyChecker.isPresent(JACKSON)) { - return Collections.singletonList(new InsightsClientLifecycleListener(this, initStackTrace)); - } else { - if (config.getDefaultProfile().getBoolean(DseDriverOption.MONITOR_REPORTING_ENABLED)) { - LOG.info( - "Could not initialize Insights monitoring; " - + "this is normal if Jackson was explicitly excluded from classpath"); - } - return Collections.emptyList(); - } - } - - @NonNull - @Override - public String getSessionName() { - return sessionName; - } - - @NonNull - @Override - public DriverConfig getConfig() { - return config; - } - - @NonNull - @Override - public DriverConfigLoader getConfigLoader() { - return configLoader; - } - - @NonNull - @Override - public Map getLoadBalancingPolicies() { - return loadBalancingPoliciesRef.get(); - } - - @NonNull - @Override - public Map getRetryPolicies() { - return retryPoliciesRef.get(); - } - - @NonNull - @Override - public Map getSpeculativeExecutionPolicies() { - return speculativeExecutionPoliciesRef.get(); - } - - @NonNull - @Override - public TimestampGenerator getTimestampGenerator() { - return timestampGeneratorRef.get(); - } - - @NonNull - @Override - public ReconnectionPolicy getReconnectionPolicy() { - return reconnectionPolicyRef.get(); - } - - @NonNull - @Override - public AddressTranslator getAddressTranslator() { - return addressTranslatorRef.get(); - } - - @NonNull - @Override - public Optional getAuthProvider() { - return authProviderRef.get(); - } - - @NonNull - @Override - public Optional getSslEngineFactory() { - return sslEngineFactoryRef.get(); - } - - @NonNull - @Override - public EventBus getEventBus() { - return eventBusRef.get(); - } - - @NonNull - @Override - public Compressor getCompressor() { - return compressorRef.get(); - } - - @NonNull - @Override - public PrimitiveCodec getPrimitiveCodec() { - return primitiveCodecRef.get(); - } - - @NonNull - @Override - public FrameCodec getFrameCodec() { - return frameCodecRef.get(); - } - - @NonNull - @Override - public SegmentCodec getSegmentCodec() { - return segmentCodecRef.get(); - } - - @NonNull - @Override - public ProtocolVersionRegistry getProtocolVersionRegistry() { - return protocolVersionRegistryRef.get(); - } - - @NonNull - @Override - public ConsistencyLevelRegistry getConsistencyLevelRegistry() { - return consistencyLevelRegistryRef.get(); - } - - @NonNull - @Override - public WriteTypeRegistry getWriteTypeRegistry() { - return writeTypeRegistryRef.get(); - } - - @NonNull - @Override - public NettyOptions getNettyOptions() { - return nettyOptionsRef.get(); - } - - @NonNull - @Override - public WriteCoalescer getWriteCoalescer() { - return writeCoalescerRef.get(); - } - - @NonNull - @Override - public Optional getSslHandlerFactory() { - return sslHandlerFactoryRef.get(); - } - - @NonNull - @Override - public ChannelFactory getChannelFactory() { - return channelFactoryRef.get(); - } - - @NonNull - @Override - public ChannelPoolFactory getChannelPoolFactory() { - return channelPoolFactory; - } - - @NonNull - @Override - public TopologyMonitor getTopologyMonitor() { - return topologyMonitorRef.get(); - } - - @NonNull - @Override - public MetadataManager getMetadataManager() { - return metadataManagerRef.get(); - } - - @NonNull - @Override - public LoadBalancingPolicyWrapper getLoadBalancingPolicyWrapper() { - return loadBalancingPolicyWrapperRef.get(); - } - - @NonNull - @Override - public ControlConnection getControlConnection() { - return controlConnectionRef.get(); - } - - @NonNull - @Override - public RequestProcessorRegistry getRequestProcessorRegistry() { - return requestProcessorRegistryRef.get(); - } - - @NonNull - @Override - public SchemaQueriesFactory getSchemaQueriesFactory() { - return schemaQueriesFactoryRef.get(); - } - - @NonNull - @Override - public SchemaParserFactory getSchemaParserFactory() { - return schemaParserFactoryRef.get(); - } - - @NonNull - @Override - public TokenFactoryRegistry getTokenFactoryRegistry() { - return tokenFactoryRegistryRef.get(); - } - - @NonNull - @Override - public ReplicationStrategyFactory getReplicationStrategyFactory() { - return replicationStrategyFactoryRef.get(); - } - - @NonNull - @Override - public PoolManager getPoolManager() { - return poolManagerRef.get(); - } - - @NonNull - @Override - public MetricsFactory getMetricsFactory() { - return metricsFactoryRef.get(); - } - - @NonNull - @Override - public MetricIdGenerator getMetricIdGenerator() { - return metricIdGeneratorRef.get(); - } - - @NonNull - @Override - public RequestThrottler getRequestThrottler() { - return requestThrottlerRef.get(); - } - - @NonNull - @Override - public NodeStateListener getNodeStateListener() { - return nodeStateListenerRef.get(); - } - - @NonNull - @Override - public SchemaChangeListener getSchemaChangeListener() { - return schemaChangeListenerRef.get(); - } - - @NonNull - @Override - public RequestTracker getRequestTracker() { - return requestTrackerRef.get(); - } - - @NonNull - @Override - public Optional getRequestIdGenerator() { - return requestIdGeneratorRef.get(); - } - - @Nullable - @Override - public String getLocalDatacenter(@NonNull String profileName) { - return localDatacentersFromBuilder.get(profileName); - } - - @Nullable - @Override - @Deprecated - public Predicate getNodeFilter(@NonNull String profileName) { - return nodeFiltersFromBuilder.get(profileName); - } - - @Nullable - @Override - public NodeDistanceEvaluator getNodeDistanceEvaluator(@NonNull String profileName) { - return nodeDistanceEvaluatorsFromBuilder.get(profileName); - } - - @Nullable - @Override - public ClassLoader getClassLoader() { - return classLoader; - } - - @NonNull - @Override - public CodecRegistry getCodecRegistry() { - return codecRegistry; - } - - @NonNull - @Override - public ProtocolVersion getProtocolVersion() { - return getChannelFactory().getProtocolVersion(); - } - - @NonNull - @Override - public Map getStartupOptions() { - // startup options are calculated dynamically and may vary per connection - return startupOptionsRef.get().build(); - } - - protected RequestLogFormatter buildRequestLogFormatter() { - return new RequestLogFormatter(this); - } - - @NonNull - @Override - public RequestLogFormatter getRequestLogFormatter() { - return requestLogFormatterRef.get(); - } - - @NonNull - @Override - public List getLifecycleListeners() { - return lifecycleListenersRef.get(); - } - - @Nullable - @Override - public Object getMetricRegistry() { - return metricRegistry; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/context/DefaultNettyOptions.java b/core/src/main/java/com/datastax/oss/driver/internal/core/context/DefaultNettyOptions.java deleted file mode 100644 index 763a71f8b12..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/context/DefaultNettyOptions.java +++ /dev/null @@ -1,206 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.context; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.internal.core.util.concurrent.BlockingOperation; -import com.datastax.oss.driver.internal.core.util.concurrent.PromiseCombiner; -import com.datastax.oss.driver.shaded.guava.common.util.concurrent.ThreadFactoryBuilder; -import io.netty.bootstrap.Bootstrap; -import io.netty.buffer.ByteBufAllocator; -import io.netty.channel.Channel; -import io.netty.channel.ChannelOption; -import io.netty.channel.DefaultEventLoopGroup; -import io.netty.channel.EventLoopGroup; -import io.netty.channel.FixedRecvByteBufAllocator; -import io.netty.channel.nio.NioEventLoopGroup; -import io.netty.channel.socket.nio.NioSocketChannel; -import io.netty.util.HashedWheelTimer; -import io.netty.util.Timer; -import io.netty.util.concurrent.DefaultPromise; -import io.netty.util.concurrent.EventExecutorGroup; -import io.netty.util.concurrent.Future; -import io.netty.util.concurrent.GlobalEventExecutor; -import io.netty.util.internal.PlatformDependent; -import java.time.Duration; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.TimeUnit; -import net.jcip.annotations.Immutable; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@Immutable -public class DefaultNettyOptions implements NettyOptions { - - private static final Logger LOG = LoggerFactory.getLogger(DefaultNettyOptions.class); - - private final DriverExecutionProfile config; - private final EventLoopGroup ioEventLoopGroup; - private final EventLoopGroup adminEventLoopGroup; - private final int ioShutdownQuietPeriod; - private final int ioShutdownTimeout; - private final TimeUnit ioShutdownUnit; - private final int adminShutdownQuietPeriod; - private final int adminShutdownTimeout; - private final TimeUnit adminShutdownUnit; - private final Timer timer; - - public DefaultNettyOptions(InternalDriverContext context) { - this.config = context.getConfig().getDefaultProfile(); - boolean daemon = config.getBoolean(DefaultDriverOption.NETTY_DAEMON); - int ioGroupSize = config.getInt(DefaultDriverOption.NETTY_IO_SIZE); - this.ioShutdownQuietPeriod = config.getInt(DefaultDriverOption.NETTY_IO_SHUTDOWN_QUIET_PERIOD); - this.ioShutdownTimeout = config.getInt(DefaultDriverOption.NETTY_IO_SHUTDOWN_TIMEOUT); - this.ioShutdownUnit = - TimeUnit.valueOf(config.getString(DefaultDriverOption.NETTY_IO_SHUTDOWN_UNIT)); - int adminGroupSize = config.getInt(DefaultDriverOption.NETTY_ADMIN_SIZE); - this.adminShutdownQuietPeriod = - config.getInt(DefaultDriverOption.NETTY_ADMIN_SHUTDOWN_QUIET_PERIOD); - this.adminShutdownTimeout = config.getInt(DefaultDriverOption.NETTY_ADMIN_SHUTDOWN_TIMEOUT); - this.adminShutdownUnit = - TimeUnit.valueOf(config.getString(DefaultDriverOption.NETTY_ADMIN_SHUTDOWN_UNIT)); - - ThreadFactory safeFactory = new BlockingOperation.SafeThreadFactory(); - ThreadFactory ioThreadFactory = - new ThreadFactoryBuilder() - .setThreadFactory(safeFactory) - .setNameFormat(context.getSessionName() + "-io-%d") - .setDaemon(daemon) - .build(); - this.ioEventLoopGroup = new NioEventLoopGroup(ioGroupSize, ioThreadFactory); - - ThreadFactory adminThreadFactory = - new ThreadFactoryBuilder() - .setThreadFactory(safeFactory) - .setNameFormat(context.getSessionName() + "-admin-%d") - .setDaemon(daemon) - .build(); - this.adminEventLoopGroup = new DefaultEventLoopGroup(adminGroupSize, adminThreadFactory); - // setup the Timer - ThreadFactory timerThreadFactory = - new ThreadFactoryBuilder() - .setThreadFactory(safeFactory) - .setNameFormat(context.getSessionName() + "-timer-%d") - .setDaemon(daemon) - .build(); - - Duration tickDuration = config.getDuration(DefaultDriverOption.NETTY_TIMER_TICK_DURATION); - // JAVA-2264: tick durations on Windows cannot be less than 100 milliseconds, - // see https://github.com/netty/netty/issues/356. - if (PlatformDependent.isWindows() && tickDuration.toMillis() < 100) { - LOG.warn( - "Timer tick duration was set to a value too aggressive for Windows: {} ms; " - + "doing so is known to cause extreme CPU usage. " - + "Please set advanced.netty.timer.tick-duration to 100 ms or higher.", - tickDuration.toMillis()); - } - this.timer = createTimer(timerThreadFactory, tickDuration); - } - - private HashedWheelTimer createTimer(ThreadFactory timerThreadFactory, Duration tickDuration) { - HashedWheelTimer timer = - new HashedWheelTimer( - timerThreadFactory, - tickDuration.toNanos(), - TimeUnit.NANOSECONDS, - config.getInt(DefaultDriverOption.NETTY_TIMER_TICKS_PER_WHEEL)); - // Start the background thread eagerly during session initialization because - // it is a blocking operation. - timer.start(); - return timer; - } - - @Override - public EventLoopGroup ioEventLoopGroup() { - return ioEventLoopGroup; - } - - @Override - public EventExecutorGroup adminEventExecutorGroup() { - return adminEventLoopGroup; - } - - @Override - public Class channelClass() { - return NioSocketChannel.class; - } - - @Override - public ByteBufAllocator allocator() { - return ByteBufAllocator.DEFAULT; - } - - @Override - public void afterBootstrapInitialized(Bootstrap bootstrap) { - boolean tcpNoDelay = config.getBoolean(DefaultDriverOption.SOCKET_TCP_NODELAY); - bootstrap.option(ChannelOption.TCP_NODELAY, tcpNoDelay); - if (config.isDefined(DefaultDriverOption.SOCKET_KEEP_ALIVE)) { - boolean keepAlive = config.getBoolean(DefaultDriverOption.SOCKET_KEEP_ALIVE); - bootstrap.option(ChannelOption.SO_KEEPALIVE, keepAlive); - } - if (config.isDefined(DefaultDriverOption.SOCKET_REUSE_ADDRESS)) { - boolean reuseAddress = config.getBoolean(DefaultDriverOption.SOCKET_REUSE_ADDRESS); - bootstrap.option(ChannelOption.SO_REUSEADDR, reuseAddress); - } - if (config.isDefined(DefaultDriverOption.SOCKET_LINGER_INTERVAL)) { - int lingerInterval = config.getInt(DefaultDriverOption.SOCKET_LINGER_INTERVAL); - bootstrap.option(ChannelOption.SO_LINGER, lingerInterval); - } - if (config.isDefined(DefaultDriverOption.SOCKET_RECEIVE_BUFFER_SIZE)) { - int receiveBufferSize = config.getInt(DefaultDriverOption.SOCKET_RECEIVE_BUFFER_SIZE); - bootstrap - .option(ChannelOption.SO_RCVBUF, receiveBufferSize) - .option(ChannelOption.RCVBUF_ALLOCATOR, new FixedRecvByteBufAllocator(receiveBufferSize)); - } - if (config.isDefined(DefaultDriverOption.SOCKET_SEND_BUFFER_SIZE)) { - int sendBufferSize = config.getInt(DefaultDriverOption.SOCKET_SEND_BUFFER_SIZE); - bootstrap.option(ChannelOption.SO_SNDBUF, sendBufferSize); - } - if (config.isDefined(DefaultDriverOption.CONNECTION_CONNECT_TIMEOUT)) { - Duration connectTimeout = config.getDuration(DefaultDriverOption.CONNECTION_CONNECT_TIMEOUT); - bootstrap.option( - ChannelOption.CONNECT_TIMEOUT_MILLIS, Long.valueOf(connectTimeout.toMillis()).intValue()); - } - } - - @Override - public void afterChannelInitialized(Channel channel) { - // nothing to do - } - - @Override - public Future onClose() { - DefaultPromise closeFuture = new DefaultPromise<>(GlobalEventExecutor.INSTANCE); - GlobalEventExecutor.INSTANCE.execute( - () -> - PromiseCombiner.combine( - closeFuture, - adminEventLoopGroup.shutdownGracefully( - adminShutdownQuietPeriod, adminShutdownTimeout, adminShutdownUnit), - ioEventLoopGroup.shutdownGracefully( - ioShutdownQuietPeriod, ioShutdownTimeout, ioShutdownUnit))); - closeFuture.addListener(f -> timer.stop()); - return closeFuture; - } - - @Override - public Timer getTimer() { - return timer; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/context/EventBus.java b/core/src/main/java/com/datastax/oss/driver/internal/core/context/EventBus.java deleted file mode 100644 index dd9ccaa9979..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/context/EventBus.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.context; - -import com.datastax.oss.driver.shaded.guava.common.collect.HashMultimap; -import com.datastax.oss.driver.shaded.guava.common.collect.Multimaps; -import com.datastax.oss.driver.shaded.guava.common.collect.SetMultimap; -import java.util.function.Consumer; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Barebones event bus implementation, that allows components to communicate without knowing about - * each other. - * - *

This is intended for administrative events (topology changes, new connections, etc.), which - * are comparatively rare in the driver. Do not use it for anything on the request path, because it - * relies on synchronization. - * - *

We don't use Guava's implementation because Guava is shaded in the driver, and the event bus - * needs to be accessible from low-level 3rd party customizations. - */ -@ThreadSafe -public class EventBus { - private static final Logger LOG = LoggerFactory.getLogger(EventBus.class); - - private final String logPrefix; - private final SetMultimap, Consumer> listeners = - Multimaps.synchronizedSetMultimap(HashMultimap.create()); - - public EventBus(String logPrefix) { - this.logPrefix = logPrefix; - } - - /** - * Registers a listener for an event type. - * - *

If the listener has a shorter lifecycle than the {@code Cluster} instance, it is recommended - * to save the key returned by this method, and use it later to unregister and therefore avoid a - * leak. - * - * @return a key that is needed to unregister later. - */ - public Object register(Class eventClass, Consumer listener) { - LOG.debug("[{}] Registering {} for {}", logPrefix, listener, eventClass); - listeners.put(eventClass, listener); - // The reason for the key mechanism is that this will often be used with method references, - // and you get a different object every time you reference a method, so register(Foo::bar) - // followed by unregister(Foo::bar) wouldn't work as expected. - return listener; - } - - /** - * Unregisters a listener. - * - * @param key the key that was returned by {@link #register(Class, Consumer)} - */ - public boolean unregister(Object key, Class eventClass) { - LOG.debug("[{}] Unregistering {} for {}", logPrefix, key, eventClass); - return listeners.remove(eventClass, key); - } - - /** - * Sends an event that will notify any registered listener for that class. - * - *

Listeners are looked up by an exact match on the class of the object, as returned by - * {@code event.getClass()}. Listeners of a supertype won't be notified. - * - *

The listeners are invoked on the calling thread. It's their responsibility to schedule event - * processing asynchronously if needed. - */ - public void fire(Object event) { - LOG.debug("[{}] Firing an instance of {}: {}", logPrefix, event.getClass(), event); - // if the exact match thing gets too cumbersome, we can reconsider, but I'd like to avoid - // scanning all the keys with instanceof checks. - Class eventClass = event.getClass(); - for (Consumer l : listeners.get(eventClass)) { - @SuppressWarnings("unchecked") - Consumer listener = (Consumer) l; - LOG.debug("[{}] Notifying {} of {}", logPrefix, listener, event); - listener.accept(event); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/context/InternalDriverContext.java b/core/src/main/java/com/datastax/oss/driver/internal/core/context/InternalDriverContext.java deleted file mode 100644 index 81349b0c665..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/context/InternalDriverContext.java +++ /dev/null @@ -1,218 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.context; - -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistanceEvaluator; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.session.SessionBuilder; -import com.datastax.oss.driver.internal.core.ConsistencyLevelRegistry; -import com.datastax.oss.driver.internal.core.ProtocolVersionRegistry; -import com.datastax.oss.driver.internal.core.channel.ChannelFactory; -import com.datastax.oss.driver.internal.core.channel.WriteCoalescer; -import com.datastax.oss.driver.internal.core.control.ControlConnection; -import com.datastax.oss.driver.internal.core.metadata.LoadBalancingPolicyWrapper; -import com.datastax.oss.driver.internal.core.metadata.MetadataManager; -import com.datastax.oss.driver.internal.core.metadata.TopologyMonitor; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.SchemaParserFactory; -import com.datastax.oss.driver.internal.core.metadata.schema.queries.SchemaQueriesFactory; -import com.datastax.oss.driver.internal.core.metadata.token.ReplicationStrategyFactory; -import com.datastax.oss.driver.internal.core.metadata.token.TokenFactoryRegistry; -import com.datastax.oss.driver.internal.core.metrics.MetricIdGenerator; -import com.datastax.oss.driver.internal.core.metrics.MetricsFactory; -import com.datastax.oss.driver.internal.core.pool.ChannelPoolFactory; -import com.datastax.oss.driver.internal.core.servererrors.WriteTypeRegistry; -import com.datastax.oss.driver.internal.core.session.PoolManager; -import com.datastax.oss.driver.internal.core.session.RequestProcessorRegistry; -import com.datastax.oss.driver.internal.core.ssl.SslHandlerFactory; -import com.datastax.oss.driver.internal.core.tracker.RequestLogFormatter; -import com.datastax.oss.protocol.internal.Compressor; -import com.datastax.oss.protocol.internal.FrameCodec; -import com.datastax.oss.protocol.internal.PrimitiveCodec; -import com.datastax.oss.protocol.internal.SegmentCodec; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import io.netty.buffer.ByteBuf; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.function.Predicate; - -/** Extends the driver context with additional components that are not exposed by our public API. */ -public interface InternalDriverContext extends DriverContext { - - @NonNull - EventBus getEventBus(); - - @NonNull - Compressor getCompressor(); - - @NonNull - PrimitiveCodec getPrimitiveCodec(); - - @NonNull - FrameCodec getFrameCodec(); - - @NonNull - SegmentCodec getSegmentCodec(); - - @NonNull - ProtocolVersionRegistry getProtocolVersionRegistry(); - - @NonNull - ConsistencyLevelRegistry getConsistencyLevelRegistry(); - - @NonNull - WriteTypeRegistry getWriteTypeRegistry(); - - @NonNull - NettyOptions getNettyOptions(); - - @NonNull - WriteCoalescer getWriteCoalescer(); - - @NonNull - Optional getSslHandlerFactory(); - - @NonNull - ChannelFactory getChannelFactory(); - - @NonNull - ChannelPoolFactory getChannelPoolFactory(); - - @NonNull - TopologyMonitor getTopologyMonitor(); - - @NonNull - MetadataManager getMetadataManager(); - - @NonNull - LoadBalancingPolicyWrapper getLoadBalancingPolicyWrapper(); - - @NonNull - ControlConnection getControlConnection(); - - @NonNull - RequestProcessorRegistry getRequestProcessorRegistry(); - - @NonNull - SchemaQueriesFactory getSchemaQueriesFactory(); - - @NonNull - SchemaParserFactory getSchemaParserFactory(); - - @NonNull - TokenFactoryRegistry getTokenFactoryRegistry(); - - @NonNull - ReplicationStrategyFactory getReplicationStrategyFactory(); - - @NonNull - PoolManager getPoolManager(); - - @NonNull - MetricsFactory getMetricsFactory(); - - @NonNull - MetricIdGenerator getMetricIdGenerator(); - - /** - * The value that was passed to {@link SessionBuilder#withLocalDatacenter(String,String)} for this - * particular profile. If it was specified through the configuration instead, this method will - * return {@code null}. - */ - @Nullable - String getLocalDatacenter(@NonNull String profileName); - - /** - * This is the filter from {@link SessionBuilder#withNodeFilter(String, Predicate)}. If the filter - * for this profile was specified through the configuration instead, this method will return - * {@code null}. - * - * @deprecated Use {@link #getNodeDistanceEvaluator(String)} instead. - */ - @Nullable - @Deprecated - Predicate getNodeFilter(@NonNull String profileName); - - /** - * This is the node distance evaluator from {@link - * SessionBuilder#withNodeDistanceEvaluator(String, NodeDistanceEvaluator)}. If the evaluator for - * this profile was specified through the configuration instead, this method will return {@code - * null}. - */ - @Nullable - NodeDistanceEvaluator getNodeDistanceEvaluator(@NonNull String profileName); - - /** - * The {@link ClassLoader} to use to reflectively load class names defined in configuration. If - * null, the driver attempts to use the same {@link ClassLoader} that loaded the core driver - * classes. - */ - @Nullable - ClassLoader getClassLoader(); - - /** - * Retrieves the map of options to send in a Startup message. The returned map will be used to - * construct a {@link com.datastax.oss.protocol.internal.request.Startup} instance when - * initializing the native protocol handshake. - */ - @NonNull - Map getStartupOptions(); - - /** - * A list of additional components to notify of session lifecycle events. - * - *

For historical reasons, this method has a default implementation that returns an empty list. - * The built-in {@link DefaultDriverContext} overrides it to plug in the Insights monitoring - * listener. Custom driver extensions might override this method to add their own components. - * - *

Note that the driver assumes that the returned list is constant; there is no way to add - * listeners dynamically. - */ - @NonNull - default List getLifecycleListeners() { - return Collections.emptyList(); - } - - /** - * A {@link RequestLogFormatter} instance based on this {@link DriverContext}. - * - *

The {@link RequestLogFormatter} instance returned here will use the settings in - * advanced.request-tracker when formatting requests. - */ - @NonNull - RequestLogFormatter getRequestLogFormatter(); - - /** - * A metric registry for storing metrics. - * - *

This will return the object from {@link - * SessionBuilder#withMetricRegistry(java.lang.Object)}. Access to this registry object is only - * intended for {@link MetricsFactory} implementations that need to expose a way to specify the - * registry external to the Factory implementation itself. - * - *

The default metrics framework used by the Driver is DropWizard and does not need an external - * metrics registry object. - */ - @Nullable - default Object getMetricRegistry() { - return null; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/context/LifecycleListener.java b/core/src/main/java/com/datastax/oss/driver/internal/core/context/LifecycleListener.java deleted file mode 100644 index 39993e7094f..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/context/LifecycleListener.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.context; - -import com.datastax.oss.driver.api.core.session.SessionBuilder; - -/** A component that gets notified of certain events in the session's lifecycle. */ -public interface LifecycleListener extends AutoCloseable { - - /** - * Invoked when the session is ready to process user requests. - * - *

This corresponds to the moment when the {@link SessionBuilder#build()} returns, or the - * future returned by {@link SessionBuilder#buildAsync()} completes. If the session initialization - * fails, this method will not get called. - * - *

This method is invoked on a driver thread, it should complete relatively quickly and not - * block. - */ - void onSessionReady(); - - /** - * Invoked when the session shuts down. - * - *

Implementations should perform any necessary cleanup, for example freeing resources or - * cancelling scheduled tasks. - * - *

Note that this method gets called even if the shutdown results from a failed initialization. - * In that case, implementations should be ready to handle a call to this method even though - * {@link #onSessionReady()} hasn't been invoked. - * - *

This method is invoked on a driver thread, it should complete relatively quickly and not - * block. - */ - @Override - void close() throws Exception; -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/context/NettyOptions.java b/core/src/main/java/com/datastax/oss/driver/internal/core/context/NettyOptions.java deleted file mode 100644 index 5b4ff4dcec8..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/context/NettyOptions.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.context; - -import com.datastax.oss.driver.internal.core.util.concurrent.BlockingOperation; -import io.netty.bootstrap.Bootstrap; -import io.netty.buffer.ByteBufAllocator; -import io.netty.channel.Channel; -import io.netty.channel.EventLoopGroup; -import io.netty.util.Timer; -import io.netty.util.concurrent.EventExecutorGroup; -import io.netty.util.concurrent.Future; - -/** Low-level hooks to control certain aspects of Netty usage in the driver. */ -public interface NettyOptions { - - /** - * The event loop group that will be used for I/O. This must always return the same instance. - * - *

It is highly recommended that the threads in this event loop group be created by a {@link - * BlockingOperation.SafeThreadFactory}, so that the driver can protect against deadlocks - * introduced by bad client code. - */ - EventLoopGroup ioEventLoopGroup(); - - /** - * The class to create {@code Channel} instances from. This must be consistent with {@link - * #ioEventLoopGroup()}. - */ - Class channelClass(); - - /** - * An event executor group that will be used to schedule all tasks not related to request I/O: - * cluster events, refreshing metadata, reconnection, etc. - * - *

This must always return the same instance (it can be the same object as {@link - * #ioEventLoopGroup()}). - * - *

It is highly recommended that the threads in this event loop group be created by a {@link - * BlockingOperation.SafeThreadFactory}, so that the driver can protect against deadlocks - * introduced by bad client code. - */ - EventExecutorGroup adminEventExecutorGroup(); - - /** - * The byte buffer allocator to use. This must always return the same instance. Note that this is - * also used by the default implementation of {@link InternalDriverContext#getFrameCodec()}, and - * the built-in {@link com.datastax.oss.protocol.internal.Compressor} implementations. - */ - ByteBufAllocator allocator(); - - /** - * A hook invoked each time the driver creates a client bootstrap in order to open a channel. This - * is a good place to configure any custom option on the bootstrap. - */ - void afterBootstrapInitialized(Bootstrap bootstrap); - - /** - * A hook invoked on each channel, right after the channel has initialized it. This is a good - * place to register any custom handler on the channel's pipeline (note that built-in driver - * handlers are already installed at that point). - */ - void afterChannelInitialized(Channel channel); - - /** - * A hook involved when the driver instance shuts down. This is a good place to free any resources - * that you have allocated elsewhere in this component, for example shut down custom event loop - * groups. - */ - Future onClose(); - - /** - * The Timer on which non-I/O events should be scheduled. This must always return the same - * instance. This timer should be used for things like request timeout events and scheduling - * speculative executions. Under high load, scheduling these non-I/O events on a separate, lower - * resolution timer will allow for higher overall I/O throughput. - */ - Timer getTimer(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/context/StartupOptionsBuilder.java b/core/src/main/java/com/datastax/oss/driver/internal/core/context/StartupOptionsBuilder.java deleted file mode 100644 index 89a9266b3ac..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/context/StartupOptionsBuilder.java +++ /dev/null @@ -1,173 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.context; - -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.uuid.Uuids; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.protocol.internal.request.Startup; -import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableMap; -import com.fasterxml.jackson.databind.ObjectMapper; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Map; -import java.util.Optional; -import java.util.UUID; -import net.jcip.annotations.Immutable; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@Immutable -public class StartupOptionsBuilder { - - public static final String DRIVER_NAME_KEY = "DRIVER_NAME"; - public static final String DRIVER_VERSION_KEY = "DRIVER_VERSION"; - public static final String DRIVER_BAGGAGE = "DRIVER_BAGGAGE"; - public static final String APPLICATION_NAME_KEY = "APPLICATION_NAME"; - public static final String APPLICATION_VERSION_KEY = "APPLICATION_VERSION"; - public static final String CLIENT_ID_KEY = "CLIENT_ID"; - - private static final Logger LOG = LoggerFactory.getLogger(StartupOptionsBuilder.class); - private static final ObjectMapper mapper = new ObjectMapper(); - - protected final InternalDriverContext context; - private UUID clientId; - private String applicationName; - private String applicationVersion; - - public StartupOptionsBuilder(InternalDriverContext context) { - this.context = context; - } - - /** - * Sets the client ID to be sent in the Startup message options. - * - *

If this method is not invoked, or the id passed in is null, a random {@link UUID} will be - * generated and used by default. - */ - public StartupOptionsBuilder withClientId(@Nullable UUID clientId) { - this.clientId = clientId; - return this; - } - - /** - * Sets the client application name to be sent in the Startup message options. - * - *

If this method is not invoked, or the name passed in is null, no application name option - * will be sent in the startup message options. - */ - public StartupOptionsBuilder withApplicationName(@Nullable String applicationName) { - this.applicationName = applicationName; - return this; - } - - /** - * Sets the client application version to be sent in the Startup message options. - * - *

If this method is not invoked, or the name passed in is null, no application version option - * will be sent in the startup message options. - */ - public StartupOptionsBuilder withApplicationVersion(@Nullable String applicationVersion) { - this.applicationVersion = applicationVersion; - return this; - } - - /** - * Builds a map of options to send in a Startup message. - * - *

The default set of options are built here and include {@link - * com.datastax.oss.protocol.internal.request.Startup#COMPRESSION_KEY} (if the context passed in - * has a compressor/algorithm set), and the driver's {@link #DRIVER_NAME_KEY} and {@link - * #DRIVER_VERSION_KEY}. The {@link com.datastax.oss.protocol.internal.request.Startup} - * constructor will add {@link - * com.datastax.oss.protocol.internal.request.Startup#CQL_VERSION_KEY}. - * - * @return Map of Startup Options. - */ - public Map build() { - DriverExecutionProfile config = context.getConfig().getDefaultProfile(); - - NullAllowingImmutableMap.Builder builder = NullAllowingImmutableMap.builder(3); - // add compression (if configured) and driver name and version - String compressionAlgorithm = context.getCompressor().algorithm(); - if (compressionAlgorithm != null && !compressionAlgorithm.trim().isEmpty()) { - builder.put(Startup.COMPRESSION_KEY, compressionAlgorithm.trim()); - } - builder.put(DRIVER_NAME_KEY, getDriverName()).put(DRIVER_VERSION_KEY, getDriverVersion()); - - // Add Insights entries, falling back to generation / config if no programmatic values provided: - if (clientId == null) { - clientId = Uuids.random(); - } - builder.put(CLIENT_ID_KEY, clientId.toString()); - if (applicationName == null) { - applicationName = config.getString(DseDriverOption.APPLICATION_NAME, null); - } - if (applicationName != null) { - builder.put(APPLICATION_NAME_KEY, applicationName); - } - if (applicationVersion == null) { - applicationVersion = config.getString(DseDriverOption.APPLICATION_VERSION, null); - } - if (applicationVersion != null) { - builder.put(APPLICATION_VERSION_KEY, applicationVersion); - } - driverBaggage().ifPresent(s -> builder.put(DRIVER_BAGGAGE, s)); - - return builder.build(); - } - - /** - * Returns this driver's name. - * - *

By default, this method will pull from the bundled Driver.properties file. Subclasses should - * override this method if they need to report a different Driver name on Startup. - */ - protected String getDriverName() { - return Session.OSS_DRIVER_COORDINATES.getName(); - } - - /** - * Returns this driver's version. - * - *

By default, this method will pull from the bundled Driver.properties file. Subclasses should - * override this method if they need to report a different Driver version on Startup. - */ - protected String getDriverVersion() { - return Session.OSS_DRIVER_COORDINATES.getVersion().toString(); - } - - private Optional driverBaggage() { - ImmutableMap.Builder builder = new ImmutableMap.Builder<>(); - for (Map.Entry entry : - context.getLoadBalancingPolicies().entrySet()) { - Map config = entry.getValue().getStartupConfiguration(); - if (!config.isEmpty()) { - builder.put(entry.getKey(), config); - } - } - try { - return Optional.of(mapper.writeValueAsString(builder.build())); - } catch (Exception e) { - LOG.warn("Failed to construct startup driver baggage", e); - return Optional.empty(); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/control/ControlConnection.java b/core/src/main/java/com/datastax/oss/driver/internal/core/control/ControlConnection.java deleted file mode 100644 index 5c29a9b704b..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/control/ControlConnection.java +++ /dev/null @@ -1,619 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.control; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.AsyncAutoCloseable; -import com.datastax.oss.driver.api.core.auth.AuthenticationException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.connection.ReconnectionPolicy; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeState; -import com.datastax.oss.driver.internal.core.channel.ChannelEvent; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.channel.DriverChannelOptions; -import com.datastax.oss.driver.internal.core.channel.EventCallback; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.DefaultTopologyMonitor; -import com.datastax.oss.driver.internal.core.metadata.DistanceEvent; -import com.datastax.oss.driver.internal.core.metadata.MetadataManager; -import com.datastax.oss.driver.internal.core.metadata.NodeStateEvent; -import com.datastax.oss.driver.internal.core.metadata.TopologyEvent; -import com.datastax.oss.driver.internal.core.util.Loggers; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.driver.internal.core.util.concurrent.Reconnection; -import com.datastax.oss.driver.internal.core.util.concurrent.RunOrSchedule; -import com.datastax.oss.driver.internal.core.util.concurrent.UncaughtExceptions; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.response.Event; -import com.datastax.oss.protocol.internal.response.event.SchemaChangeEvent; -import com.datastax.oss.protocol.internal.response.event.StatusChangeEvent; -import com.datastax.oss.protocol.internal.response.event.TopologyChangeEvent; -import edu.umd.cs.findbugs.annotations.NonNull; -import io.netty.util.concurrent.EventExecutor; -import java.util.AbstractMap.SimpleEntry; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Queue; -import java.util.WeakHashMap; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.function.Consumer; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Maintains a dedicated connection to a Cassandra node for administrative queries. - * - *

If the control node goes down, a reconnection is triggered. The control node is chosen - * randomly among the contact points at startup, or according to the load balancing policy for later - * reconnections. - * - *

The control connection is used by: - * - *

    - *
  • {@link DefaultTopologyMonitor} to determine cluster connectivity and retrieve node - * metadata; - *
  • {@link MetadataManager} to run schema metadata queries. - *
- */ -@ThreadSafe -public class ControlConnection implements EventCallback, AsyncAutoCloseable { - private static final Logger LOG = LoggerFactory.getLogger(ControlConnection.class); - - private final InternalDriverContext context; - private final String logPrefix; - private final EventExecutor adminExecutor; - private final SingleThreaded singleThreaded; - - // The single channel used by this connection. This field is accessed concurrently, but only - // mutated on adminExecutor (by SingleThreaded methods) - private volatile DriverChannel channel; - - public ControlConnection(InternalDriverContext context) { - this.context = context; - this.logPrefix = context.getSessionName(); - this.adminExecutor = context.getNettyOptions().adminEventExecutorGroup().next(); - this.singleThreaded = new SingleThreaded(context); - } - - /** - * Initializes the control connection. If it is already initialized, this is a no-op and all - * parameters are ignored. - * - * @param listenToClusterEvents whether to register for TOPOLOGY_CHANGE and STATUS_CHANGE events. - * If the control connection has already initialized with another value, this is ignored. - * SCHEMA_CHANGE events are always registered. - * @param reconnectOnFailure whether to schedule a reconnection if the initial attempt fails (if - * true, the returned future will only complete once the reconnection has succeeded). - * @param useInitialReconnectionSchedule if no node can be reached, the type of reconnection - * schedule to use. In other words, the value that will be passed to {@link - * ReconnectionPolicy#newControlConnectionSchedule(boolean)}. Note that this parameter is only - * relevant if {@code reconnectOnFailure} is true, otherwise it is not used. - */ - public CompletionStage init( - boolean listenToClusterEvents, - boolean reconnectOnFailure, - boolean useInitialReconnectionSchedule) { - RunOrSchedule.on( - adminExecutor, - () -> - singleThreaded.init( - listenToClusterEvents, reconnectOnFailure, useInitialReconnectionSchedule)); - return singleThreaded.initFuture; - } - - public CompletionStage initFuture() { - return singleThreaded.initFuture; - } - - public boolean isInit() { - return singleThreaded.initFuture.isDone(); - } - - /** - * The channel currently used by this control connection. This is modified concurrently in the - * event of a reconnection, so it may occasionally return a closed channel (clients should be - * ready to deal with that). - */ - public DriverChannel channel() { - return channel; - } - - /** - * Forces an immediate reconnect: if we were connected to a node, that connection will be closed; - * if we were already reconnecting, the next attempt is started immediately, without waiting for - * the next scheduled interval; in all cases, a new query plan is fetched from the load balancing - * policy, and each node in it will be tried in sequence. - */ - public void reconnectNow() { - RunOrSchedule.on(adminExecutor, singleThreaded::reconnectNow); - } - - @NonNull - @Override - public CompletionStage closeFuture() { - return singleThreaded.closeFuture; - } - - @NonNull - @Override - public CompletionStage closeAsync() { - // Control queries are never critical, so there is no graceful close. - return forceCloseAsync(); - } - - @NonNull - @Override - public CompletionStage forceCloseAsync() { - RunOrSchedule.on(adminExecutor, singleThreaded::forceClose); - return singleThreaded.closeFuture; - } - - @Override - public void onEvent(Message eventMessage) { - if (!(eventMessage instanceof Event)) { - LOG.warn("[{}] Unsupported event class: {}", logPrefix, eventMessage.getClass().getName()); - } else { - LOG.debug("[{}] Processing incoming event {}", logPrefix, eventMessage); - Event event = (Event) eventMessage; - switch (event.type) { - case ProtocolConstants.EventType.TOPOLOGY_CHANGE: - processTopologyChange(event); - break; - case ProtocolConstants.EventType.STATUS_CHANGE: - processStatusChange(event); - break; - case ProtocolConstants.EventType.SCHEMA_CHANGE: - processSchemaChange(event); - break; - default: - LOG.warn("[{}] Unsupported event type: {}", logPrefix, event.type); - } - } - } - - private void processTopologyChange(Event event) { - TopologyChangeEvent tce = (TopologyChangeEvent) event; - switch (tce.changeType) { - case ProtocolConstants.TopologyChangeType.NEW_NODE: - context.getEventBus().fire(TopologyEvent.suggestAdded(tce.address)); - break; - case ProtocolConstants.TopologyChangeType.REMOVED_NODE: - context.getEventBus().fire(TopologyEvent.suggestRemoved(tce.address)); - break; - default: - LOG.warn("[{}] Unsupported topology change type: {}", logPrefix, tce.changeType); - } - } - - private void processStatusChange(Event event) { - StatusChangeEvent sce = (StatusChangeEvent) event; - switch (sce.changeType) { - case ProtocolConstants.StatusChangeType.UP: - context.getEventBus().fire(TopologyEvent.suggestUp(sce.address)); - break; - case ProtocolConstants.StatusChangeType.DOWN: - context.getEventBus().fire(TopologyEvent.suggestDown(sce.address)); - break; - default: - LOG.warn("[{}] Unsupported status change type: {}", logPrefix, sce.changeType); - } - } - - private void processSchemaChange(Event event) { - SchemaChangeEvent sce = (SchemaChangeEvent) event; - context - .getMetadataManager() - .refreshSchema(sce.keyspace, false, false) - .whenComplete( - (metadata, error) -> { - if (error != null) { - Loggers.warnWithException( - LOG, - "[{}] Unexpected error while refreshing schema for a SCHEMA_CHANGE event, " - + "keeping previous version", - logPrefix, - error); - } - }); - } - - private class SingleThreaded { - private final InternalDriverContext context; - private final DriverConfig config; - private final CompletableFuture initFuture = new CompletableFuture<>(); - private boolean initWasCalled; - private final CompletableFuture closeFuture = new CompletableFuture<>(); - private boolean closeWasCalled; - private final ReconnectionPolicy reconnectionPolicy; - private final Reconnection reconnection; - private DriverChannelOptions channelOptions; - // The last events received for each node - private final Map lastNodeDistance = new WeakHashMap<>(); - private final Map lastNodeState = new WeakHashMap<>(); - - private SingleThreaded(InternalDriverContext context) { - this.context = context; - this.config = context.getConfig(); - this.reconnectionPolicy = context.getReconnectionPolicy(); - this.reconnection = - new Reconnection( - logPrefix, - adminExecutor, - () -> reconnectionPolicy.newControlConnectionSchedule(false), - this::reconnect); - // In "reconnect-on-init" mode, handle cancellation of the initFuture by user code - CompletableFutures.whenCancelled( - this.initFuture, - () -> { - LOG.debug("[{}] Init future was cancelled, stopping reconnection", logPrefix); - reconnection.stop(); - }); - - context - .getEventBus() - .register(DistanceEvent.class, RunOrSchedule.on(adminExecutor, this::onDistanceEvent)); - context - .getEventBus() - .register(NodeStateEvent.class, RunOrSchedule.on(adminExecutor, this::onStateEvent)); - } - - private void init( - boolean listenToClusterEvents, - boolean reconnectOnFailure, - boolean useInitialReconnectionSchedule) { - assert adminExecutor.inEventLoop(); - if (initWasCalled) { - return; - } - initWasCalled = true; - try { - ImmutableList eventTypes = buildEventTypes(listenToClusterEvents); - LOG.debug("[{}] Initializing with event types {}", logPrefix, eventTypes); - channelOptions = - DriverChannelOptions.builder() - .withEvents(eventTypes, ControlConnection.this) - .withOwnerLogPrefix(logPrefix + "|control") - .build(); - - Queue nodes = context.getLoadBalancingPolicyWrapper().newQueryPlan(); - - connect( - nodes, - null, - () -> initFuture.complete(null), - error -> { - if (isAuthFailure(error)) { - LOG.warn( - "[{}] Authentication errors encountered on all contact points. Please check your authentication configuration.", - logPrefix); - } - if (reconnectOnFailure && !closeWasCalled) { - reconnection.start( - reconnectionPolicy.newControlConnectionSchedule( - useInitialReconnectionSchedule)); - } else { - // Special case for the initial connection: reword to a more user-friendly error - // message - if (error instanceof AllNodesFailedException) { - error = - ((AllNodesFailedException) error) - .reword( - "Could not reach any contact point, " - + "make sure you've provided valid addresses"); - } - initFuture.completeExceptionally(error); - } - }); - } catch (Throwable t) { - initFuture.completeExceptionally(t); - } - } - - private CompletionStage reconnect() { - assert adminExecutor.inEventLoop(); - Queue nodes = context.getLoadBalancingPolicyWrapper().newQueryPlan(); - CompletableFuture result = new CompletableFuture<>(); - connect( - nodes, - null, - () -> { - result.complete(true); - onSuccessfulReconnect(); - }, - error -> result.complete(false)); - return result; - } - - private void connect( - Queue nodes, - List> errors, - Runnable onSuccess, - Consumer onFailure) { - assert adminExecutor.inEventLoop(); - Node node = nodes.poll(); - if (node == null) { - onFailure.accept(AllNodesFailedException.fromErrors(errors)); - } else { - LOG.debug("[{}] Trying to establish a connection to {}", logPrefix, node); - context - .getChannelFactory() - .connect(node, channelOptions) - .whenCompleteAsync( - (channel, error) -> { - try { - NodeDistance lastDistance = lastNodeDistance.get(node); - NodeState lastState = lastNodeState.get(node); - if (error != null) { - if (closeWasCalled || initFuture.isCancelled()) { - onSuccess.run(); // abort, we don't really care about the result - } else { - if (error instanceof AuthenticationException) { - Loggers.warnWithException( - LOG, "[{}] Authentication error", logPrefix, error); - } else { - if (config - .getDefaultProfile() - .getBoolean(DefaultDriverOption.CONNECTION_WARN_INIT_ERROR)) { - Loggers.warnWithException( - LOG, - "[{}] Error connecting to {}, trying next node", - logPrefix, - node, - error); - } else { - LOG.debug( - "[{}] Error connecting to {}, trying next node", - logPrefix, - node, - error); - } - } - List> newErrors = - (errors == null) ? new ArrayList<>() : errors; - newErrors.add(new SimpleEntry<>(node, error)); - context.getEventBus().fire(ChannelEvent.controlConnectionFailed(node)); - connect(nodes, newErrors, onSuccess, onFailure); - } - } else if (closeWasCalled || initFuture.isCancelled()) { - LOG.debug( - "[{}] New channel opened ({}) but the control connection was closed, closing it", - logPrefix, - channel); - channel.forceClose(); - onSuccess.run(); - } else if (lastDistance == NodeDistance.IGNORED) { - LOG.debug( - "[{}] New channel opened ({}) but node became ignored, " - + "closing and trying next node", - logPrefix, - channel); - channel.forceClose(); - connect(nodes, errors, onSuccess, onFailure); - } else if (lastNodeState.containsKey(node) - && (lastState == null /*(removed)*/ - || lastState == NodeState.FORCED_DOWN)) { - LOG.debug( - "[{}] New channel opened ({}) but node was removed or forced down, " - + "closing and trying next node", - logPrefix, - channel); - channel.forceClose(); - connect(nodes, errors, onSuccess, onFailure); - } else { - LOG.debug("[{}] New channel opened {}", logPrefix, channel); - DriverChannel previousChannel = ControlConnection.this.channel; - ControlConnection.this.channel = channel; - if (previousChannel != null) { - // We were reconnecting: make sure previous channel gets closed (it may - // still be open if reconnection was forced) - LOG.debug( - "[{}] Forcefully closing previous channel {}", logPrefix, channel); - previousChannel.forceClose(); - } - context.getEventBus().fire(ChannelEvent.channelOpened(node)); - channel - .closeFuture() - .addListener( - f -> - adminExecutor - .submit(() -> onChannelClosed(channel, node)) - .addListener(UncaughtExceptions::log)); - onSuccess.run(); - } - } catch (Exception e) { - Loggers.warnWithException( - LOG, - "[{}] Unexpected exception while processing channel init result", - logPrefix, - e); - } - }, - adminExecutor); - } - } - - private void onSuccessfulReconnect() { - // If reconnectOnFailure was true and we've never connected before, complete the future now to - // signal that the initialization is complete. - boolean isFirstConnection = initFuture.complete(null); - - // Otherwise, perform a full refresh (we don't know how long we were disconnected) - if (!isFirstConnection) { - context - .getMetadataManager() - .refreshNodes() - .whenComplete( - (result, error) -> { - if (error != null) { - LOG.debug("[{}] Error while refreshing node list", logPrefix, error); - } else { - try { - // A failed node list refresh at startup is not fatal, so this might be the - // first successful refresh; make sure the LBP gets initialized (this is a - // no-op if it was initialized already). - context.getLoadBalancingPolicyWrapper().init(); - context - .getMetadataManager() - .refreshSchema(null, false, true) - .whenComplete( - (metadata, schemaError) -> { - if (schemaError != null) { - Loggers.warnWithException( - LOG, - "[{}] Unexpected error while refreshing schema after a " - + "successful reconnection, keeping previous version", - logPrefix, - schemaError); - } - }); - } catch (Throwable t) { - Loggers.warnWithException( - LOG, - "[{}] Unexpected error on control connection reconnect", - logPrefix, - t); - } - } - }); - } - } - - private void onChannelClosed(DriverChannel channel, Node node) { - assert adminExecutor.inEventLoop(); - if (!closeWasCalled) { - context.getEventBus().fire(ChannelEvent.channelClosed(node)); - // If this channel is the current control channel, we must start a - // reconnection attempt to get a new control channel. - if (channel == ControlConnection.this.channel) { - LOG.debug( - "[{}] The current control channel {} was closed, scheduling reconnection", - logPrefix, - channel); - reconnection.start(); - } else { - LOG.trace( - "[{}] A previous control channel {} was closed, reconnection not required", - logPrefix, - channel); - } - } - } - - private void reconnectNow() { - assert adminExecutor.inEventLoop(); - if (initWasCalled && !closeWasCalled) { - reconnection.reconnectNow(true); - } - } - - private void onDistanceEvent(DistanceEvent event) { - assert adminExecutor.inEventLoop(); - this.lastNodeDistance.put(event.node, event.distance); - if (event.distance == NodeDistance.IGNORED - && channel != null - && !channel.closeFuture().isDone() - && event.node.getEndPoint().equals(channel.getEndPoint())) { - LOG.debug( - "[{}] Control node {} became IGNORED, reconnecting to a different node", - logPrefix, - event.node); - reconnectNow(); - } - } - - private void onStateEvent(NodeStateEvent event) { - assert adminExecutor.inEventLoop(); - this.lastNodeState.put(event.node, event.newState); - if ((event.newState == null /*(removed)*/ || event.newState == NodeState.FORCED_DOWN) - && channel != null - && !channel.closeFuture().isDone() - && event.node.getEndPoint().equals(channel.getEndPoint())) { - LOG.debug( - "[{}] Control node {} was removed or forced down, reconnecting to a different node", - logPrefix, - event.node); - reconnectNow(); - } - } - - private void forceClose() { - assert adminExecutor.inEventLoop(); - if (closeWasCalled) { - return; - } - closeWasCalled = true; - LOG.debug("[{}] Starting shutdown", logPrefix); - reconnection.stop(); - if (channel == null) { - LOG.debug("[{}] Shutdown complete", logPrefix); - closeFuture.complete(null); - } else { - channel - .forceClose() - .addListener( - f -> { - if (f.isSuccess()) { - LOG.debug("[{}] Shutdown complete", logPrefix); - closeFuture.complete(null); - } else { - closeFuture.completeExceptionally(f.cause()); - } - }); - } - } - } - - private boolean isAuthFailure(Throwable error) { - if (error instanceof AllNodesFailedException) { - Collection> errors = - ((AllNodesFailedException) error).getAllErrors().values(); - if (errors.size() == 0) { - return false; - } - for (List nodeErrors : errors) { - for (Throwable nodeError : nodeErrors) { - if (!(nodeError instanceof AuthenticationException)) { - return false; - } - } - } - } - return true; - } - - private static ImmutableList buildEventTypes(boolean listenClusterEvents) { - ImmutableList.Builder builder = ImmutableList.builder(); - builder.add(ProtocolConstants.EventType.SCHEMA_CHANGE); - if (listenClusterEvents) { - builder - .add(ProtocolConstants.EventType.STATUS_CHANGE) - .add(ProtocolConstants.EventType.TOPOLOGY_CHANGE); - } - return builder.build(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/Conversions.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/Conversions.java deleted file mode 100644 index ff9384b3e24..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/Conversions.java +++ /dev/null @@ -1,593 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.BatchStatement; -import com.datastax.oss.driver.api.core.cql.BatchableStatement; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.ColumnDefinition; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.PrepareRequest; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.RelationMetadata; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.servererrors.AlreadyExistsException; -import com.datastax.oss.driver.api.core.servererrors.BootstrappingException; -import com.datastax.oss.driver.api.core.servererrors.CASWriteUnknownException; -import com.datastax.oss.driver.api.core.servererrors.CDCWriteFailureException; -import com.datastax.oss.driver.api.core.servererrors.CoordinatorException; -import com.datastax.oss.driver.api.core.servererrors.FunctionFailureException; -import com.datastax.oss.driver.api.core.servererrors.InvalidConfigurationInQueryException; -import com.datastax.oss.driver.api.core.servererrors.InvalidQueryException; -import com.datastax.oss.driver.api.core.servererrors.OverloadedException; -import com.datastax.oss.driver.api.core.servererrors.ProtocolError; -import com.datastax.oss.driver.api.core.servererrors.ReadFailureException; -import com.datastax.oss.driver.api.core.servererrors.ReadTimeoutException; -import com.datastax.oss.driver.api.core.servererrors.ServerError; -import com.datastax.oss.driver.api.core.servererrors.SyntaxError; -import com.datastax.oss.driver.api.core.servererrors.TruncateException; -import com.datastax.oss.driver.api.core.servererrors.UnauthorizedException; -import com.datastax.oss.driver.api.core.servererrors.UnavailableException; -import com.datastax.oss.driver.api.core.servererrors.WriteFailureException; -import com.datastax.oss.driver.api.core.servererrors.WriteTimeoutException; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.specex.SpeculativeExecutionPolicy; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.internal.core.ConsistencyLevelRegistry; -import com.datastax.oss.driver.internal.core.DefaultProtocolFeature; -import com.datastax.oss.driver.internal.core.ProtocolVersionRegistry; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.data.ValuesHelper; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.primitives.Ints; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.request.Batch; -import com.datastax.oss.protocol.internal.request.Execute; -import com.datastax.oss.protocol.internal.request.Query; -import com.datastax.oss.protocol.internal.request.query.QueryOptions; -import com.datastax.oss.protocol.internal.response.Error; -import com.datastax.oss.protocol.internal.response.Result; -import com.datastax.oss.protocol.internal.response.error.AlreadyExists; -import com.datastax.oss.protocol.internal.response.error.CASWriteUnknown; -import com.datastax.oss.protocol.internal.response.error.ReadFailure; -import com.datastax.oss.protocol.internal.response.error.ReadTimeout; -import com.datastax.oss.protocol.internal.response.error.Unavailable; -import com.datastax.oss.protocol.internal.response.error.WriteFailure; -import com.datastax.oss.protocol.internal.response.error.WriteTimeout; -import com.datastax.oss.protocol.internal.response.result.ColumnSpec; -import com.datastax.oss.protocol.internal.response.result.Prepared; -import com.datastax.oss.protocol.internal.response.result.Rows; -import com.datastax.oss.protocol.internal.response.result.RowsMetadata; -import com.datastax.oss.protocol.internal.util.Bytes; -import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableList; -import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableMap; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Map; - -/** - * Utility methods to convert to/from protocol messages. - * - *

The main goal of this class is to move this code out of the request handlers. - */ -public class Conversions { - - public static DriverExecutionProfile resolveExecutionProfile( - Request request, DriverContext context) { - if (request.getExecutionProfile() != null) { - return request.getExecutionProfile(); - } else { - DriverConfig config = context.getConfig(); - String profileName = request.getExecutionProfileName(); - return (profileName == null || profileName.isEmpty()) - ? config.getDefaultProfile() - : config.getProfile(profileName); - } - } - - public static Message toMessage( - Statement statement, DriverExecutionProfile config, InternalDriverContext context) { - ConsistencyLevelRegistry consistencyLevelRegistry = context.getConsistencyLevelRegistry(); - ConsistencyLevel consistency = statement.getConsistencyLevel(); - int consistencyCode = - (consistency == null) - ? consistencyLevelRegistry.nameToCode( - config.getString(DefaultDriverOption.REQUEST_CONSISTENCY)) - : consistency.getProtocolCode(); - int pageSize = statement.getPageSize(); - if (pageSize <= 0) { - pageSize = config.getInt(DefaultDriverOption.REQUEST_PAGE_SIZE); - } - ConsistencyLevel serialConsistency = statement.getSerialConsistencyLevel(); - int serialConsistencyCode = - (serialConsistency == null) - ? consistencyLevelRegistry.nameToCode( - config.getString(DefaultDriverOption.REQUEST_SERIAL_CONSISTENCY)) - : serialConsistency.getProtocolCode(); - long timestamp = statement.getQueryTimestamp(); - if (timestamp == Statement.NO_DEFAULT_TIMESTAMP) { - timestamp = context.getTimestampGenerator().next(); - } - CodecRegistry codecRegistry = context.getCodecRegistry(); - ProtocolVersion protocolVersion = context.getProtocolVersion(); - ProtocolVersionRegistry protocolVersionRegistry = context.getProtocolVersionRegistry(); - CqlIdentifier keyspace = statement.getKeyspace(); - int nowInSeconds = statement.getNowInSeconds(); - if (nowInSeconds != Statement.NO_NOW_IN_SECONDS - && !protocolVersionRegistry.supports( - protocolVersion, DefaultProtocolFeature.NOW_IN_SECONDS)) { - throw new IllegalArgumentException("Can't use nowInSeconds with protocol " + protocolVersion); - } - if (statement instanceof SimpleStatement) { - SimpleStatement simpleStatement = (SimpleStatement) statement; - List positionalValues = simpleStatement.getPositionalValues(); - Map namedValues = simpleStatement.getNamedValues(); - if (!positionalValues.isEmpty() && !namedValues.isEmpty()) { - throw new IllegalArgumentException( - "Can't have both positional and named values in a statement."); - } - if (keyspace != null - && !protocolVersionRegistry.supports( - protocolVersion, DefaultProtocolFeature.PER_REQUEST_KEYSPACE)) { - throw new IllegalArgumentException( - "Can't use per-request keyspace with protocol " + protocolVersion); - } - QueryOptions queryOptions = - new QueryOptions( - consistencyCode, - encode(positionalValues, codecRegistry, protocolVersion), - encode(namedValues, codecRegistry, protocolVersion), - false, - pageSize, - statement.getPagingState(), - serialConsistencyCode, - timestamp, - (keyspace == null) ? null : keyspace.asInternal(), - nowInSeconds); - return new Query(simpleStatement.getQuery(), queryOptions); - } else if (statement instanceof BoundStatement) { - BoundStatement boundStatement = (BoundStatement) statement; - if (!protocolVersionRegistry.supports( - protocolVersion, DefaultProtocolFeature.UNSET_BOUND_VALUES)) { - ensureAllSet(boundStatement); - } - boolean skipMetadata = - boundStatement.getPreparedStatement().getResultSetDefinitions().size() > 0; - QueryOptions queryOptions = - new QueryOptions( - consistencyCode, - boundStatement.getValues(), - Collections.emptyMap(), - skipMetadata, - pageSize, - statement.getPagingState(), - serialConsistencyCode, - timestamp, - null, - nowInSeconds); - PreparedStatement preparedStatement = boundStatement.getPreparedStatement(); - ByteBuffer id = preparedStatement.getId(); - ByteBuffer resultMetadataId = preparedStatement.getResultMetadataId(); - return new Execute( - Bytes.getArray(id), - (resultMetadataId == null) ? null : Bytes.getArray(resultMetadataId), - queryOptions); - } else if (statement instanceof BatchStatement) { - BatchStatement batchStatement = (BatchStatement) statement; - if (!protocolVersionRegistry.supports( - protocolVersion, DefaultProtocolFeature.UNSET_BOUND_VALUES)) { - ensureAllSet(batchStatement); - } - if (keyspace != null - && !protocolVersionRegistry.supports( - protocolVersion, DefaultProtocolFeature.PER_REQUEST_KEYSPACE)) { - throw new IllegalArgumentException( - "Can't use per-request keyspace with protocol " + protocolVersion); - } - List queriesOrIds = new ArrayList<>(batchStatement.size()); - List> values = new ArrayList<>(batchStatement.size()); - for (BatchableStatement child : batchStatement) { - if (child instanceof SimpleStatement) { - SimpleStatement simpleStatement = (SimpleStatement) child; - if (simpleStatement.getNamedValues().size() > 0) { - throw new IllegalArgumentException( - String.format( - "Batch statements cannot contain simple statements with named values " - + "(offending statement: %s)", - simpleStatement.getQuery())); - } - queriesOrIds.add(simpleStatement.getQuery()); - values.add(encode(simpleStatement.getPositionalValues(), codecRegistry, protocolVersion)); - } else if (child instanceof BoundStatement) { - BoundStatement boundStatement = (BoundStatement) child; - queriesOrIds.add(Bytes.getArray(boundStatement.getPreparedStatement().getId())); - values.add(boundStatement.getValues()); - } else { - throw new IllegalArgumentException( - "Unsupported child statement: " + child.getClass().getName()); - } - } - return new Batch( - batchStatement.getBatchType().getProtocolCode(), - queriesOrIds, - values, - consistencyCode, - serialConsistencyCode, - timestamp, - (keyspace == null) ? null : keyspace.asInternal(), - nowInSeconds); - } else { - throw new IllegalArgumentException( - "Unsupported statement type: " + statement.getClass().getName()); - } - } - - public static List encode( - List values, CodecRegistry codecRegistry, ProtocolVersion protocolVersion) { - if (values.isEmpty()) { - return Collections.emptyList(); - } else { - ByteBuffer[] encodedValues = new ByteBuffer[values.size()]; - int i = 0; - for (Object value : values) { - encodedValues[i++] = - (value == null) - ? null - : ValuesHelper.encodeToDefaultCqlMapping(value, codecRegistry, protocolVersion); - } - return NullAllowingImmutableList.of(encodedValues); - } - } - - public static Map encode( - Map values, - CodecRegistry codecRegistry, - ProtocolVersion protocolVersion) { - if (values.isEmpty()) { - return Collections.emptyMap(); - } else { - NullAllowingImmutableMap.Builder encodedValues = - NullAllowingImmutableMap.builder(values.size()); - for (Map.Entry entry : values.entrySet()) { - if (entry.getValue() == null) { - encodedValues.put(entry.getKey().asInternal(), null); - } else { - encodedValues.put( - entry.getKey().asInternal(), - ValuesHelper.encodeToDefaultCqlMapping( - entry.getValue(), codecRegistry, protocolVersion)); - } - } - return encodedValues.build(); - } - } - - public static void ensureAllSet(BoundStatement boundStatement) { - for (int i = 0; i < boundStatement.size(); i++) { - if (!boundStatement.isSet(i)) { - throw new IllegalStateException( - "Unset value at index " - + i - + ". " - + "If you want this value to be null, please set it to null explicitly."); - } - } - } - - public static void ensureAllSet(BatchStatement batchStatement) { - for (BatchableStatement batchableStatement : batchStatement) { - if (batchableStatement instanceof BoundStatement) { - ensureAllSet(((BoundStatement) batchableStatement)); - } - } - } - - public static AsyncResultSet toResultSet( - Result result, - ExecutionInfo executionInfo, - CqlSession session, - InternalDriverContext context) { - if (result instanceof Rows) { - Rows rows = (Rows) result; - Statement statement = (Statement) executionInfo.getRequest(); - ColumnDefinitions columnDefinitions = getResultDefinitions(rows, statement, context); - return new DefaultAsyncResultSet( - columnDefinitions, executionInfo, rows.getData(), session, context); - } else if (result instanceof Prepared) { - // This should never happen - throw new IllegalArgumentException("Unexpected PREPARED response to a CQL query"); - } else { - // Void, SetKeyspace, SchemaChange - return DefaultAsyncResultSet.empty(executionInfo); - } - } - - public static ColumnDefinitions getResultDefinitions( - Rows rows, Statement statement, InternalDriverContext context) { - RowsMetadata rowsMetadata = rows.getMetadata(); - if (rowsMetadata.columnSpecs.isEmpty()) { - // If the response has no metadata, it means the request had SKIP_METADATA set, the driver - // only ever does that for bound statements. - BoundStatement boundStatement = (BoundStatement) statement; - return boundStatement.getPreparedStatement().getResultSetDefinitions(); - } else { - // The response has metadata, always use it above anything else we might have locally. - ColumnDefinitions definitions = toColumnDefinitions(rowsMetadata, context); - // In addition, if the server signaled a schema change (see CASSANDRA-10786), update the - // prepared statement's copy of the metadata - if (rowsMetadata.newResultMetadataId != null) { - BoundStatement boundStatement = (BoundStatement) statement; - PreparedStatement preparedStatement = boundStatement.getPreparedStatement(); - preparedStatement.setResultMetadata( - ByteBuffer.wrap(rowsMetadata.newResultMetadataId).asReadOnlyBuffer(), definitions); - } - return definitions; - } - } - - public static DefaultPreparedStatement toPreparedStatement( - Prepared response, PrepareRequest request, InternalDriverContext context) { - ColumnDefinitions variableDefinitions = - toColumnDefinitions(response.variablesMetadata, context); - - int[] pkIndicesInResponse = response.variablesMetadata.pkIndices; - // null means a legacy protocol version that doesn't provide the info, try to compute it - List pkIndices = - (pkIndicesInResponse == null) - ? computePkIndices(variableDefinitions, context) - : Ints.asList(pkIndicesInResponse); - - return new DefaultPreparedStatement( - ByteBuffer.wrap(response.preparedQueryId).asReadOnlyBuffer(), - request.getQuery(), - variableDefinitions, - pkIndices, - (response.resultMetadataId == null) - ? null - : ByteBuffer.wrap(response.resultMetadataId).asReadOnlyBuffer(), - toColumnDefinitions(response.resultMetadata, context), - request.getKeyspace(), - NullAllowingImmutableMap.copyOf(request.getCustomPayload()), - request.getExecutionProfileNameForBoundStatements(), - request.getExecutionProfileForBoundStatements(), - request.getRoutingKeyspaceForBoundStatements(), - request.getRoutingKeyForBoundStatements(), - request.getRoutingTokenForBoundStatements(), - NullAllowingImmutableMap.copyOf(request.getCustomPayloadForBoundStatements()), - request.areBoundStatementsIdempotent(), - request.getTimeoutForBoundStatements(), - request.getPagingStateForBoundStatements(), - request.getPageSizeForBoundStatements(), - request.getConsistencyLevelForBoundStatements(), - request.getSerialConsistencyLevelForBoundStatements(), - request.areBoundStatementsTracing(), - context.getCodecRegistry(), - context.getProtocolVersion()); - } - - public static ColumnDefinitions toColumnDefinitions( - RowsMetadata metadata, InternalDriverContext context) { - ColumnDefinition[] values = new ColumnDefinition[metadata.columnSpecs.size()]; - int i = 0; - for (ColumnSpec columnSpec : metadata.columnSpecs) { - values[i++] = new DefaultColumnDefinition(columnSpec, context); - } - return DefaultColumnDefinitions.valueOf(ImmutableList.copyOf(values)); - } - - public static List computePkIndices( - ColumnDefinitions variables, InternalDriverContext context) { - if (variables.size() == 0) { - return Collections.emptyList(); - } - // The rest of the computation relies on the fact that CQL does not have joins: all variables - // belong to the same keyspace and table. - ColumnDefinition firstVariable = variables.get(0); - return context - .getMetadataManager() - .getMetadata() - .getKeyspace(firstVariable.getKeyspace()) - .flatMap(ks -> ks.getTable(firstVariable.getTable())) - .map(RelationMetadata::getPartitionKey) - .map(pk -> findIndices(pk, variables)) - .orElse(Collections.emptyList()); - } - - // Find at which position in `variables` each element of `partitionKey` appears - @VisibleForTesting - static List findIndices(List partitionKey, ColumnDefinitions variables) { - ImmutableList.Builder result = - ImmutableList.builderWithExpectedSize(partitionKey.size()); - for (ColumnMetadata pkColumn : partitionKey) { - int firstIndex = variables.firstIndexOf(pkColumn.getName()); - if (firstIndex < 0) { - // If a single column is missing, we can abort right away - return Collections.emptyList(); - } else { - result.add(firstIndex); - } - } - return result.build(); - } - - public static CoordinatorException toThrowable( - Node node, Error errorMessage, InternalDriverContext context) { - switch (errorMessage.code) { - case ProtocolConstants.ErrorCode.UNPREPARED: - throw new AssertionError( - "UNPREPARED should be handled as a special case, not turned into an exception"); - case ProtocolConstants.ErrorCode.SERVER_ERROR: - return new ServerError(node, errorMessage.message); - case ProtocolConstants.ErrorCode.PROTOCOL_ERROR: - return new ProtocolError(node, errorMessage.message); - case ProtocolConstants.ErrorCode.AUTH_ERROR: - // This method is used for query execution, authentication errors should only happen during - // connection init - return new ProtocolError( - node, "Unexpected authentication error (" + errorMessage.message + ")"); - case ProtocolConstants.ErrorCode.UNAVAILABLE: - Unavailable unavailable = (Unavailable) errorMessage; - return new UnavailableException( - node, - context.getConsistencyLevelRegistry().codeToLevel(unavailable.consistencyLevel), - unavailable.required, - unavailable.alive); - case ProtocolConstants.ErrorCode.OVERLOADED: - return new OverloadedException(node, errorMessage.message); - case ProtocolConstants.ErrorCode.IS_BOOTSTRAPPING: - return new BootstrappingException(node); - case ProtocolConstants.ErrorCode.TRUNCATE_ERROR: - return new TruncateException(node, errorMessage.message); - case ProtocolConstants.ErrorCode.WRITE_TIMEOUT: - WriteTimeout writeTimeout = (WriteTimeout) errorMessage; - return new WriteTimeoutException( - node, - context.getConsistencyLevelRegistry().codeToLevel(writeTimeout.consistencyLevel), - writeTimeout.received, - writeTimeout.blockFor, - context.getWriteTypeRegistry().fromName(writeTimeout.writeType)); - case ProtocolConstants.ErrorCode.READ_TIMEOUT: - ReadTimeout readTimeout = (ReadTimeout) errorMessage; - return new ReadTimeoutException( - node, - context.getConsistencyLevelRegistry().codeToLevel(readTimeout.consistencyLevel), - readTimeout.received, - readTimeout.blockFor, - readTimeout.dataPresent); - case ProtocolConstants.ErrorCode.READ_FAILURE: - ReadFailure readFailure = (ReadFailure) errorMessage; - return new ReadFailureException( - node, - context.getConsistencyLevelRegistry().codeToLevel(readFailure.consistencyLevel), - readFailure.received, - readFailure.blockFor, - readFailure.numFailures, - readFailure.dataPresent, - readFailure.reasonMap); - case ProtocolConstants.ErrorCode.FUNCTION_FAILURE: - return new FunctionFailureException(node, errorMessage.message); - case ProtocolConstants.ErrorCode.WRITE_FAILURE: - WriteFailure writeFailure = (WriteFailure) errorMessage; - return new WriteFailureException( - node, - context.getConsistencyLevelRegistry().codeToLevel(writeFailure.consistencyLevel), - writeFailure.received, - writeFailure.blockFor, - context.getWriteTypeRegistry().fromName(writeFailure.writeType), - writeFailure.numFailures, - writeFailure.reasonMap); - case ProtocolConstants.ErrorCode.CDC_WRITE_FAILURE: - return new CDCWriteFailureException(node, errorMessage.message); - case ProtocolConstants.ErrorCode.CAS_WRITE_UNKNOWN: - CASWriteUnknown casFailure = (CASWriteUnknown) errorMessage; - return new CASWriteUnknownException( - node, - context.getConsistencyLevelRegistry().codeToLevel(casFailure.consistencyLevel), - casFailure.received, - casFailure.blockFor); - case ProtocolConstants.ErrorCode.SYNTAX_ERROR: - return new SyntaxError(node, errorMessage.message); - case ProtocolConstants.ErrorCode.UNAUTHORIZED: - return new UnauthorizedException(node, errorMessage.message); - case ProtocolConstants.ErrorCode.INVALID: - return new InvalidQueryException(node, errorMessage.message); - case ProtocolConstants.ErrorCode.CONFIG_ERROR: - return new InvalidConfigurationInQueryException(node, errorMessage.message); - case ProtocolConstants.ErrorCode.ALREADY_EXISTS: - AlreadyExists alreadyExists = (AlreadyExists) errorMessage; - return new AlreadyExistsException(node, alreadyExists.keyspace, alreadyExists.table); - default: - return new ProtocolError(node, "Unknown error code: " + errorMessage.code); - } - } - - /** Use {@link #resolveIdempotence(Request, DriverExecutionProfile)} instead. */ - @Deprecated - public static boolean resolveIdempotence(Request request, InternalDriverContext context) { - return resolveIdempotence(request, resolveExecutionProfile(request, context)); - } - - public static boolean resolveIdempotence( - Request request, DriverExecutionProfile executionProfile) { - Boolean requestIsIdempotent = request.isIdempotent(); - return (requestIsIdempotent == null) - ? executionProfile.getBoolean(DefaultDriverOption.REQUEST_DEFAULT_IDEMPOTENCE) - : requestIsIdempotent; - } - - /** Use {@link #resolveRequestTimeout(Request, DriverExecutionProfile)} instead. */ - @Deprecated - public static Duration resolveRequestTimeout(Request request, InternalDriverContext context) { - return resolveRequestTimeout(request, resolveExecutionProfile(request, context)); - } - - public static Duration resolveRequestTimeout( - Request request, DriverExecutionProfile executionProfile) { - Duration timeout = request.getTimeout(); - return timeout != null - ? timeout - : executionProfile.getDuration(DefaultDriverOption.REQUEST_TIMEOUT); - } - - /** Use {@link #resolveRetryPolicy(InternalDriverContext, DriverExecutionProfile)} instead. */ - @Deprecated - public static RetryPolicy resolveRetryPolicy(Request request, InternalDriverContext context) { - DriverExecutionProfile executionProfile = resolveExecutionProfile(request, context); - return context.getRetryPolicy(executionProfile.getName()); - } - - public static RetryPolicy resolveRetryPolicy( - InternalDriverContext context, DriverExecutionProfile executionProfile) { - return context.getRetryPolicy(executionProfile.getName()); - } - - /** - * Use {@link #resolveSpeculativeExecutionPolicy(InternalDriverContext, DriverExecutionProfile)} - * instead. - */ - @Deprecated - public static SpeculativeExecutionPolicy resolveSpeculativeExecutionPolicy( - Request request, InternalDriverContext context) { - DriverExecutionProfile executionProfile = resolveExecutionProfile(request, context); - return context.getSpeculativeExecutionPolicy(executionProfile.getName()); - } - - public static SpeculativeExecutionPolicy resolveSpeculativeExecutionPolicy( - InternalDriverContext context, DriverExecutionProfile executionProfile) { - return context.getSpeculativeExecutionPolicy(executionProfile.getName()); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlPrepareAsyncProcessor.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlPrepareAsyncProcessor.java deleted file mode 100644 index a3d11cff054..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlPrepareAsyncProcessor.java +++ /dev/null @@ -1,181 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.cql.PrepareRequest; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.ListType; -import com.datastax.oss.driver.api.core.type.MapType; -import com.datastax.oss.driver.api.core.type.SetType; -import com.datastax.oss.driver.api.core.type.TupleType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.schema.events.TypeChangeEvent; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.internal.core.session.RequestProcessor; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.driver.internal.core.util.concurrent.RunOrSchedule; -import com.datastax.oss.driver.shaded.guava.common.base.Functions; -import com.datastax.oss.driver.shaded.guava.common.cache.Cache; -import com.datastax.oss.driver.shaded.guava.common.cache.CacheBuilder; -import com.datastax.oss.driver.shaded.guava.common.collect.Iterables; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import edu.umd.cs.findbugs.annotations.NonNull; -import io.netty.util.concurrent.EventExecutor; -import java.util.Map; -import java.util.Optional; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.ExecutionException; -import java.util.function.Function; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -public class CqlPrepareAsyncProcessor - implements RequestProcessor> { - - private static final Logger LOG = LoggerFactory.getLogger(CqlPrepareAsyncProcessor.class); - - protected final Cache> cache; - - public CqlPrepareAsyncProcessor() { - this(Optional.empty()); - } - - public CqlPrepareAsyncProcessor(@NonNull Optional context) { - this(context, Functions.identity()); - } - - protected CqlPrepareAsyncProcessor( - Optional context, - Function, CacheBuilder> decorator) { - - CacheBuilder baseCache = CacheBuilder.newBuilder().weakValues(); - this.cache = decorator.apply(baseCache).build(); - context.ifPresent( - (ctx) -> { - LOG.info("Adding handler to invalidate cached prepared statements on type changes"); - EventExecutor adminExecutor = ctx.getNettyOptions().adminEventExecutorGroup().next(); - ctx.getEventBus() - .register( - TypeChangeEvent.class, RunOrSchedule.on(adminExecutor, this::onTypeChanged)); - }); - } - - private static boolean typeMatches(UserDefinedType oldType, DataType typeToCheck) { - - switch (typeToCheck.getProtocolCode()) { - case ProtocolConstants.DataType.UDT: - UserDefinedType udtType = (UserDefinedType) typeToCheck; - return udtType.equals(oldType) - ? true - : Iterables.any(udtType.getFieldTypes(), (testType) -> typeMatches(oldType, testType)); - case ProtocolConstants.DataType.LIST: - ListType listType = (ListType) typeToCheck; - return typeMatches(oldType, listType.getElementType()); - case ProtocolConstants.DataType.SET: - SetType setType = (SetType) typeToCheck; - return typeMatches(oldType, setType.getElementType()); - case ProtocolConstants.DataType.MAP: - MapType mapType = (MapType) typeToCheck; - return typeMatches(oldType, mapType.getKeyType()) - || typeMatches(oldType, mapType.getValueType()); - case ProtocolConstants.DataType.TUPLE: - TupleType tupleType = (TupleType) typeToCheck; - return Iterables.any( - tupleType.getComponentTypes(), (testType) -> typeMatches(oldType, testType)); - default: - return false; - } - } - - private void onTypeChanged(TypeChangeEvent event) { - for (Map.Entry> entry : - this.cache.asMap().entrySet()) { - - try { - PreparedStatement stmt = entry.getValue().get(); - if (Iterables.any( - stmt.getResultSetDefinitions(), (def) -> typeMatches(event.oldType, def.getType())) - || Iterables.any( - stmt.getVariableDefinitions(), - (def) -> typeMatches(event.oldType, def.getType()))) { - - this.cache.invalidate(entry.getKey()); - this.cache.cleanUp(); - } - } catch (Exception e) { - LOG.info("Exception while invalidating prepared statement cache due to UDT change", e); - } - } - } - - @Override - public boolean canProcess(Request request, GenericType resultType) { - return request instanceof PrepareRequest && resultType.equals(PrepareRequest.ASYNC); - } - - @Override - public CompletionStage process( - PrepareRequest request, - DefaultSession session, - InternalDriverContext context, - String sessionLogPrefix) { - - try { - CompletableFuture result = cache.getIfPresent(request); - if (result == null) { - CompletableFuture mine = new CompletableFuture<>(); - result = cache.get(request, () -> mine); - if (result == mine) { - new CqlPrepareHandler(request, session, context, sessionLogPrefix) - .handle() - .whenComplete( - (preparedStatement, error) -> { - if (error != null) { - mine.completeExceptionally(error); - cache.invalidate(request); // Make sure failure isn't cached indefinitely - } else { - mine.complete(preparedStatement); - } - }); - } - } - // Return a defensive copy. So if a client cancels its request, the cache won't be impacted - // nor a potential concurrent request. - return result.thenApply(x -> x); // copy() is available only since Java 9 - } catch (ExecutionException e) { - return CompletableFutures.failedFuture(e.getCause()); - } - } - - @Override - public CompletionStage newFailure(RuntimeException error) { - return CompletableFutures.failedFuture(error); - } - - public Cache> getCache() { - return cache; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlPrepareHandler.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlPrepareHandler.java deleted file mode 100644 index 1ee1f303ab2..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlPrepareHandler.java +++ /dev/null @@ -1,487 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.DriverTimeoutException; -import com.datastax.oss.driver.api.core.NodeUnavailableException; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.RequestThrottlingException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.PrepareRequest; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; -import com.datastax.oss.driver.api.core.retry.RetryDecision; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.retry.RetryVerdict; -import com.datastax.oss.driver.api.core.servererrors.BootstrappingException; -import com.datastax.oss.driver.api.core.servererrors.CoordinatorException; -import com.datastax.oss.driver.api.core.servererrors.FunctionFailureException; -import com.datastax.oss.driver.api.core.servererrors.ProtocolError; -import com.datastax.oss.driver.api.core.servererrors.QueryValidationException; -import com.datastax.oss.driver.api.core.session.throttling.RequestThrottler; -import com.datastax.oss.driver.api.core.session.throttling.Throttled; -import com.datastax.oss.driver.internal.core.DefaultProtocolFeature; -import com.datastax.oss.driver.internal.core.ProtocolVersionRegistry; -import com.datastax.oss.driver.internal.core.adminrequest.ThrottledAdminRequestHandler; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.channel.ResponseCallback; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.internal.core.util.Loggers; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.protocol.internal.Frame; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.request.Prepare; -import com.datastax.oss.protocol.internal.response.Error; -import com.datastax.oss.protocol.internal.response.result.Prepared; -import edu.umd.cs.findbugs.annotations.NonNull; -import io.netty.util.Timeout; -import io.netty.util.Timer; -import io.netty.util.concurrent.Future; -import io.netty.util.concurrent.GenericFutureListener; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.AbstractMap; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Queue; -import java.util.concurrent.CancellationException; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.TimeUnit; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** Handles the lifecycle of the preparation of a CQL statement. */ -@ThreadSafe -public class CqlPrepareHandler implements Throttled { - - private static final Logger LOG = LoggerFactory.getLogger(CqlPrepareHandler.class); - - private final long startTimeNanos; - private final String logPrefix; - private final PrepareRequest initialRequest; - private final DefaultSession session; - private final InternalDriverContext context; - private final Queue queryPlan; - protected final CompletableFuture result; - private final Timer timer; - private final Timeout scheduledTimeout; - private final RequestThrottler throttler; - private final Boolean prepareOnAllNodes; - private final DriverExecutionProfile executionProfile; - private volatile InitialPrepareCallback initialCallback; - - // The errors on the nodes that were already tried (lazily initialized on the first error). - // We don't use a map because nodes can appear multiple times. - private volatile List> errors; - - protected CqlPrepareHandler( - PrepareRequest request, - DefaultSession session, - InternalDriverContext context, - String sessionLogPrefix) { - - this.startTimeNanos = System.nanoTime(); - this.logPrefix = sessionLogPrefix + "|" + this.hashCode(); - LOG.trace("[{}] Creating new handler for prepare request {}", logPrefix, request); - - this.initialRequest = request; - this.session = session; - this.context = context; - executionProfile = Conversions.resolveExecutionProfile(request, context); - this.queryPlan = - context - .getLoadBalancingPolicyWrapper() - .newQueryPlan(request, executionProfile.getName(), session); - - this.result = new CompletableFuture<>(); - this.result.exceptionally( - t -> { - try { - if (t instanceof CancellationException) { - cancelTimeout(); - context.getRequestThrottler().signalCancel(this); - } - } catch (Throwable t2) { - Loggers.warnWithException(LOG, "[{}] Uncaught exception", logPrefix, t2); - } - return null; - }); - this.timer = context.getNettyOptions().getTimer(); - - Duration timeout = Conversions.resolveRequestTimeout(request, executionProfile); - this.scheduledTimeout = scheduleTimeout(timeout); - this.prepareOnAllNodes = executionProfile.getBoolean(DefaultDriverOption.PREPARE_ON_ALL_NODES); - - this.throttler = context.getRequestThrottler(); - this.throttler.register(this); - } - - @Override - public void onThrottleReady(boolean wasDelayed) { - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(initialRequest, context); - if (wasDelayed) { - session - .getMetricUpdater() - .updateTimer( - DefaultSessionMetric.THROTTLING_DELAY, - executionProfile.getName(), - System.nanoTime() - startTimeNanos, - TimeUnit.NANOSECONDS); - } - sendRequest(initialRequest, null, 0); - } - - public CompletableFuture handle() { - return result; - } - - private Timeout scheduleTimeout(Duration timeoutDuration) { - if (timeoutDuration.toNanos() > 0) { - return this.timer.newTimeout( - (Timeout timeout1) -> { - setFinalError(new DriverTimeoutException("Query timed out after " + timeoutDuration)); - if (initialCallback != null) { - initialCallback.cancel(); - } - }, - timeoutDuration.toNanos(), - TimeUnit.NANOSECONDS); - } else { - return null; - } - } - - private void cancelTimeout() { - if (this.scheduledTimeout != null) { - this.scheduledTimeout.cancel(); - } - } - - private void sendRequest(PrepareRequest request, Node node, int retryCount) { - if (result.isDone()) { - return; - } - DriverChannel channel = null; - if (node == null || (channel = session.getChannel(node, logPrefix)) == null) { - while (!result.isDone() && (node = queryPlan.poll()) != null) { - channel = session.getChannel(node, logPrefix); - if (channel != null) { - break; - } else { - recordError(node, new NodeUnavailableException(node)); - } - } - } - if (channel == null) { - setFinalError(AllNodesFailedException.fromErrors(this.errors)); - } else { - InitialPrepareCallback initialPrepareCallback = - new InitialPrepareCallback(request, node, channel, retryCount); - - Prepare message = toPrepareMessage(request); - - channel - .write(message, false, request.getCustomPayload(), initialPrepareCallback) - .addListener(initialPrepareCallback); - } - } - - @NonNull - private Prepare toPrepareMessage(PrepareRequest request) { - ProtocolVersion protocolVersion = context.getProtocolVersion(); - ProtocolVersionRegistry registry = context.getProtocolVersionRegistry(); - CqlIdentifier keyspace = request.getKeyspace(); - if (keyspace != null - && !registry.supports(protocolVersion, DefaultProtocolFeature.PER_REQUEST_KEYSPACE)) { - throw new IllegalArgumentException( - "Can't use per-request keyspace with protocol " + protocolVersion); - } - return new Prepare(request.getQuery(), (keyspace == null) ? null : keyspace.asInternal()); - } - - private void recordError(Node node, Throwable error) { - // Use a local variable to do only a single single volatile read in the nominal case - List> errorsSnapshot = this.errors; - if (errorsSnapshot == null) { - synchronized (CqlPrepareHandler.this) { - errorsSnapshot = this.errors; - if (errorsSnapshot == null) { - this.errors = errorsSnapshot = new CopyOnWriteArrayList<>(); - } - } - } - errorsSnapshot.add(new AbstractMap.SimpleEntry<>(node, error)); - } - - private void setFinalResult(PrepareRequest request, Prepared response) { - - // Whatever happens below, we're done with this stream id - throttler.signalSuccess(this); - - DefaultPreparedStatement preparedStatement = - Conversions.toPreparedStatement(response, request, context); - - session - .getRepreparePayloads() - .put(preparedStatement.getId(), preparedStatement.getRepreparePayload()); - if (prepareOnAllNodes) { - prepareOnOtherNodes(request) - .thenRun( - () -> { - LOG.trace( - "[{}] Done repreparing on other nodes, completing the request", logPrefix); - result.complete(preparedStatement); - }) - .exceptionally( - error -> { - result.completeExceptionally(error); - return null; - }); - } else { - LOG.trace("[{}] Prepare on all nodes is disabled, completing the request", logPrefix); - result.complete(preparedStatement); - } - } - - private CompletionStage prepareOnOtherNodes(PrepareRequest request) { - List> otherNodesFutures = new ArrayList<>(); - // Only process the rest of the query plan. Any node before that is either the coordinator, or - // a node that failed (we assume that retrying right now has little chance of success). - for (Node node : queryPlan) { - otherNodesFutures.add(prepareOnOtherNode(request, node)); - } - return CompletableFutures.allDone(otherNodesFutures); - } - - // Try to reprepare on another node, after the initial query has succeeded. Errors are not - // blocking, the preparation will be retried later on that node. Simply warn and move on. - private CompletionStage prepareOnOtherNode(PrepareRequest request, Node node) { - LOG.trace("[{}] Repreparing on {}", logPrefix, node); - DriverChannel channel = session.getChannel(node, logPrefix); - if (channel == null) { - LOG.trace("[{}] Could not get a channel to reprepare on {}, skipping", logPrefix, node); - return CompletableFuture.completedFuture(null); - } else { - ThrottledAdminRequestHandler handler = - ThrottledAdminRequestHandler.prepare( - channel, - false, - toPrepareMessage(request), - request.getCustomPayload(), - Conversions.resolveRequestTimeout(request, executionProfile), - throttler, - session.getMetricUpdater(), - logPrefix); - return handler - .start() - .handle( - (result, error) -> { - if (error == null) { - LOG.trace("[{}] Successfully reprepared on {}", logPrefix, node); - } else { - Loggers.warnWithException( - LOG, "[{}] Error while repreparing on {}", node, logPrefix, error); - } - return null; - }); - } - } - - @Override - public void onThrottleFailure(@NonNull RequestThrottlingException error) { - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(initialRequest, context); - session - .getMetricUpdater() - .incrementCounter(DefaultSessionMetric.THROTTLING_ERRORS, executionProfile.getName()); - setFinalError(error); - } - - private void setFinalError(Throwable error) { - if (result.completeExceptionally(error)) { - cancelTimeout(); - if (error instanceof DriverTimeoutException) { - throttler.signalTimeout(this); - } else if (!(error instanceof RequestThrottlingException)) { - throttler.signalError(this, error); - } - } - } - - private class InitialPrepareCallback - implements ResponseCallback, GenericFutureListener> { - private final PrepareRequest request; - private final Node node; - private final DriverChannel channel; - // How many times we've invoked the retry policy and it has returned a "retry" decision (0 for - // the first attempt of each execution). - private final int retryCount; - - private InitialPrepareCallback( - PrepareRequest request, Node node, DriverChannel channel, int retryCount) { - this.request = request; - this.node = node; - this.channel = channel; - this.retryCount = retryCount; - } - - // this gets invoked once the write completes. - @Override - public void operationComplete(Future future) { - if (!future.isSuccess()) { - LOG.trace( - "[{}] Failed to send request on {}, trying next node (cause: {})", - logPrefix, - node, - future.cause().toString()); - recordError(node, future.cause()); - sendRequest(request, null, retryCount); // try next host - } else { - if (result.isDone()) { - // Might happen if the timeout just fired - cancel(); - } else { - LOG.trace("[{}] Request sent to {}", logPrefix, node); - initialCallback = this; - } - } - } - - @Override - public void onResponse(Frame responseFrame) { - if (result.isDone()) { - return; - } - try { - Message responseMessage = responseFrame.message; - if (responseMessage instanceof Prepared) { - LOG.trace("[{}] Got result, completing", logPrefix); - setFinalResult(request, (Prepared) responseMessage); - } else if (responseMessage instanceof Error) { - LOG.trace("[{}] Got error response, processing", logPrefix); - processErrorResponse((Error) responseMessage); - } else { - setFinalError(new IllegalStateException("Unexpected response " + responseMessage)); - } - } catch (Throwable t) { - setFinalError(t); - } - } - - private void processErrorResponse(Error errorMessage) { - if (errorMessage.code == ProtocolConstants.ErrorCode.UNPREPARED - || errorMessage.code == ProtocolConstants.ErrorCode.ALREADY_EXISTS - || errorMessage.code == ProtocolConstants.ErrorCode.READ_FAILURE - || errorMessage.code == ProtocolConstants.ErrorCode.READ_TIMEOUT - || errorMessage.code == ProtocolConstants.ErrorCode.WRITE_FAILURE - || errorMessage.code == ProtocolConstants.ErrorCode.WRITE_TIMEOUT - || errorMessage.code == ProtocolConstants.ErrorCode.UNAVAILABLE - || errorMessage.code == ProtocolConstants.ErrorCode.TRUNCATE_ERROR) { - setFinalError( - new IllegalStateException( - "Unexpected server error for a PREPARE query" + errorMessage)); - return; - } - CoordinatorException error = Conversions.toThrowable(node, errorMessage, context); - if (error instanceof BootstrappingException) { - LOG.trace("[{}] {} is bootstrapping, trying next node", logPrefix, node); - recordError(node, error); - sendRequest(request, null, retryCount); - } else if (error instanceof QueryValidationException - || error instanceof FunctionFailureException - || error instanceof ProtocolError) { - LOG.trace("[{}] Unrecoverable error, rethrowing", logPrefix); - setFinalError(error); - } else { - // Because prepare requests are known to always be idempotent, we call the retry policy - // directly, without checking the flag. - RetryPolicy retryPolicy = Conversions.resolveRetryPolicy(context, executionProfile); - RetryVerdict verdict = retryPolicy.onErrorResponseVerdict(request, error, retryCount); - processRetryVerdict(verdict, error); - } - } - - private void processRetryVerdict(RetryVerdict verdict, Throwable error) { - RetryDecision decision = verdict.getRetryDecision(); - LOG.trace("[{}] Processing retry decision {}", logPrefix, decision); - switch (decision) { - case RETRY_SAME: - recordError(node, error); - sendRequest(verdict.getRetryRequest(request), node, retryCount + 1); - break; - case RETRY_NEXT: - recordError(node, error); - sendRequest(verdict.getRetryRequest(request), null, retryCount + 1); - break; - case RETHROW: - setFinalError(error); - break; - case IGNORE: - setFinalError( - new IllegalArgumentException( - "IGNORE decisions are not allowed for prepare requests, " - + "please fix your retry policy.")); - break; - } - } - - @Override - public void onFailure(Throwable error) { - if (result.isDone()) { - return; - } - LOG.trace("[{}] Request failure, processing: {}", logPrefix, error.toString()); - RetryVerdict verdict; - try { - RetryPolicy retryPolicy = Conversions.resolveRetryPolicy(context, executionProfile); - verdict = retryPolicy.onRequestAbortedVerdict(request, error, retryCount); - } catch (Throwable cause) { - setFinalError( - new IllegalStateException("Unexpected error while invoking the retry policy", cause)); - return; - } - processRetryVerdict(verdict, error); - } - - public void cancel() { - try { - if (!channel.closeFuture().isDone()) { - this.channel.cancel(this); - } - } catch (Throwable t) { - Loggers.warnWithException(LOG, "[{}] Error cancelling", logPrefix, t); - } - } - - @Override - public String toString() { - return logPrefix; - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlPrepareSyncProcessor.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlPrepareSyncProcessor.java deleted file mode 100644 index 0896df07140..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlPrepareSyncProcessor.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.cql.PrepareRequest; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.internal.core.session.RequestProcessor; -import com.datastax.oss.driver.internal.core.util.concurrent.BlockingOperation; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.driver.shaded.guava.common.cache.Cache; -import java.util.concurrent.CompletableFuture; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class CqlPrepareSyncProcessor - implements RequestProcessor { - - private final CqlPrepareAsyncProcessor asyncProcessor; - - /** - * Note: if you also register a {@link CqlPrepareAsyncProcessor} with your session, make sure that - * you pass that same instance to this constructor. This is necessary for proper behavior of the - * prepared statement cache. - */ - public CqlPrepareSyncProcessor(CqlPrepareAsyncProcessor asyncProcessor) { - this.asyncProcessor = asyncProcessor; - } - - @Override - public boolean canProcess(Request request, GenericType resultType) { - return request instanceof PrepareRequest && resultType.equals(PrepareRequest.SYNC); - } - - @Override - public PreparedStatement process( - PrepareRequest request, - DefaultSession session, - InternalDriverContext context, - String sessionLogPrefix) { - - BlockingOperation.checkNotDriverThread(); - return CompletableFutures.getUninterruptibly( - asyncProcessor.process(request, session, context, sessionLogPrefix)); - } - - public Cache> getCache() { - return asyncProcessor.getCache(); - } - - @Override - public PreparedStatement newFailure(RuntimeException error) { - throw error; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlRequestAsyncProcessor.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlRequestAsyncProcessor.java deleted file mode 100644 index 3013848372b..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlRequestAsyncProcessor.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.internal.core.session.RequestProcessor; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import java.util.concurrent.CompletionStage; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class CqlRequestAsyncProcessor - implements RequestProcessor, CompletionStage> { - - @Override - public boolean canProcess(Request request, GenericType resultType) { - return request instanceof Statement && resultType.equals(Statement.ASYNC); - } - - @Override - public CompletionStage process( - Statement request, - DefaultSession session, - InternalDriverContext context, - String sessionLogPrefix) { - return new CqlRequestHandler(request, session, context, sessionLogPrefix).handle(); - } - - @Override - public CompletionStage newFailure(RuntimeException error) { - return CompletableFutures.failedFuture(error); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandler.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandler.java deleted file mode 100644 index 6842547b11a..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandler.java +++ /dev/null @@ -1,976 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.DriverTimeoutException; -import com.datastax.oss.driver.api.core.NodeUnavailableException; -import com.datastax.oss.driver.api.core.RequestThrottlingException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.connection.FrameTooLongException; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; -import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.retry.RetryVerdict; -import com.datastax.oss.driver.api.core.servererrors.BootstrappingException; -import com.datastax.oss.driver.api.core.servererrors.CoordinatorException; -import com.datastax.oss.driver.api.core.servererrors.FunctionFailureException; -import com.datastax.oss.driver.api.core.servererrors.ProtocolError; -import com.datastax.oss.driver.api.core.servererrors.QueryValidationException; -import com.datastax.oss.driver.api.core.servererrors.ReadTimeoutException; -import com.datastax.oss.driver.api.core.servererrors.UnavailableException; -import com.datastax.oss.driver.api.core.servererrors.WriteTimeoutException; -import com.datastax.oss.driver.api.core.session.throttling.RequestThrottler; -import com.datastax.oss.driver.api.core.session.throttling.Throttled; -import com.datastax.oss.driver.api.core.tracker.RequestIdGenerator; -import com.datastax.oss.driver.api.core.tracker.RequestTracker; -import com.datastax.oss.driver.internal.core.adminrequest.ThrottledAdminRequestHandler; -import com.datastax.oss.driver.internal.core.adminrequest.UnexpectedResponseException; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.channel.ResponseCallback; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.DefaultNode; -import com.datastax.oss.driver.internal.core.metrics.NodeMetricUpdater; -import com.datastax.oss.driver.internal.core.metrics.SessionMetricUpdater; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.internal.core.session.RepreparePayload; -import com.datastax.oss.driver.internal.core.tracker.NoopRequestTracker; -import com.datastax.oss.driver.internal.core.tracker.RequestLogger; -import com.datastax.oss.driver.internal.core.util.Loggers; -import com.datastax.oss.driver.internal.core.util.collection.SimpleQueryPlan; -import com.datastax.oss.driver.shaded.guava.common.base.Joiner; -import com.datastax.oss.protocol.internal.Frame; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.request.Prepare; -import com.datastax.oss.protocol.internal.response.Error; -import com.datastax.oss.protocol.internal.response.Result; -import com.datastax.oss.protocol.internal.response.error.Unprepared; -import com.datastax.oss.protocol.internal.response.result.Rows; -import com.datastax.oss.protocol.internal.response.result.SchemaChange; -import com.datastax.oss.protocol.internal.response.result.SetKeyspace; -import com.datastax.oss.protocol.internal.response.result.Void; -import com.datastax.oss.protocol.internal.util.Bytes; -import edu.umd.cs.findbugs.annotations.NonNull; -import io.netty.handler.codec.EncoderException; -import io.netty.util.Timeout; -import io.netty.util.Timer; -import io.netty.util.concurrent.Future; -import io.netty.util.concurrent.GenericFutureListener; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.AbstractMap; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Queue; -import java.util.concurrent.CancellationException; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -public class CqlRequestHandler implements Throttled { - - private static final Logger LOG = LoggerFactory.getLogger(CqlRequestHandler.class); - private static final long NANOTIME_NOT_MEASURED_YET = -1; - - private final long startTimeNanos; - private final String handlerLogPrefix; - private final Statement initialStatement; - private final DefaultSession session; - private final CqlIdentifier keyspace; - private final InternalDriverContext context; - protected final CompletableFuture result; - private final Timer timer; - /** - * How many speculative executions are currently running (including the initial execution). We - * track this in order to know when to fail the request if all executions have reached the end of - * the query plan. - */ - private final AtomicInteger activeExecutionsCount; - /** - * How many speculative executions have started (excluding the initial execution), whether they - * have completed or not. We track this in order to fill {@link - * ExecutionInfo#getSpeculativeExecutionCount()}. - */ - private final AtomicInteger startedSpeculativeExecutionsCount; - - final Timeout scheduledTimeout; - final List scheduledExecutions; - private final List inFlightCallbacks; - private final RequestThrottler throttler; - private final RequestTracker requestTracker; - private final Optional requestIdGenerator; - private final SessionMetricUpdater sessionMetricUpdater; - private final DriverExecutionProfile executionProfile; - - // The errors on the nodes that were already tried (lazily initialized on the first error). - // We don't use a map because nodes can appear multiple times. - private volatile List> errors; - - private final Joiner logPrefixJoiner = Joiner.on('|'); - private final String sessionName; - private final String sessionRequestId; - - protected CqlRequestHandler( - Statement statement, - DefaultSession session, - InternalDriverContext context, - String sessionName) { - - this.startTimeNanos = System.nanoTime(); - this.requestIdGenerator = context.getRequestIdGenerator(); - this.sessionName = sessionName; - this.sessionRequestId = - this.requestIdGenerator - .map(RequestIdGenerator::getSessionRequestId) - .orElse(Integer.toString(this.hashCode())); - this.handlerLogPrefix = logPrefixJoiner.join(sessionName, sessionRequestId); - LOG.trace("[{}] Creating new handler for request {}", handlerLogPrefix, statement); - - this.initialStatement = statement; - this.session = session; - this.keyspace = session.getKeyspace().orElse(null); - this.context = context; - this.result = new CompletableFuture<>(); - this.result.exceptionally( - t -> { - try { - if (t instanceof CancellationException) { - cancelScheduledTasks(); - context.getRequestThrottler().signalCancel(this); - } - } catch (Throwable t2) { - Loggers.warnWithException(LOG, "[{}] Uncaught exception", handlerLogPrefix, t2); - } - return null; - }); - - this.activeExecutionsCount = new AtomicInteger(1); - this.startedSpeculativeExecutionsCount = new AtomicInteger(0); - this.scheduledExecutions = new CopyOnWriteArrayList<>(); - this.inFlightCallbacks = new CopyOnWriteArrayList<>(); - - this.requestTracker = context.getRequestTracker(); - this.sessionMetricUpdater = session.getMetricUpdater(); - - this.timer = context.getNettyOptions().getTimer(); - this.executionProfile = Conversions.resolveExecutionProfile(initialStatement, context); - Duration timeout = Conversions.resolveRequestTimeout(statement, executionProfile); - this.scheduledTimeout = scheduleTimeout(timeout); - - this.throttler = context.getRequestThrottler(); - this.throttler.register(this); - } - - @Override - public void onThrottleReady(boolean wasDelayed) { - if (wasDelayed - // avoid call to nanoTime() if metric is disabled: - && sessionMetricUpdater.isEnabled( - DefaultSessionMetric.THROTTLING_DELAY, executionProfile.getName())) { - sessionMetricUpdater.updateTimer( - DefaultSessionMetric.THROTTLING_DELAY, - executionProfile.getName(), - System.nanoTime() - startTimeNanos, - TimeUnit.NANOSECONDS); - } - Queue queryPlan = - this.initialStatement.getNode() != null - ? new SimpleQueryPlan(this.initialStatement.getNode()) - : context - .getLoadBalancingPolicyWrapper() - .newQueryPlan(initialStatement, executionProfile.getName(), session); - sendRequest(initialStatement, null, queryPlan, 0, 0, true); - } - - public CompletionStage handle() { - return result; - } - - private Timeout scheduleTimeout(Duration timeoutDuration) { - if (timeoutDuration.toNanos() > 0) { - try { - return this.timer.newTimeout( - (Timeout timeout1) -> - setFinalError( - initialStatement, - new DriverTimeoutException("Query timed out after " + timeoutDuration), - null, - -1), - timeoutDuration.toNanos(), - TimeUnit.NANOSECONDS); - } catch (IllegalStateException e) { - // If we raced with session shutdown the timer might be closed already, rethrow with a more - // explicit message - result.completeExceptionally( - "cannot be started once stopped".equals(e.getMessage()) - ? new IllegalStateException("Session is closed") - : e); - } - } - return null; - } - - /** - * Sends the request to the next available node. - * - * @param statement The statement to execute. - * @param retriedNode if not null, it will be attempted first before the rest of the query plan. - * @param queryPlan the list of nodes to try (shared with all other executions) - * @param currentExecutionIndex 0 for the initial execution, 1 for the first speculative one, etc. - * @param retryCount the number of times that the retry policy was invoked for this execution - * already (note that some internal retries don't go through the policy, and therefore don't - * increment this counter) - * @param scheduleNextExecution whether to schedule the next speculative execution - */ - private void sendRequest( - Statement statement, - Node retriedNode, - Queue queryPlan, - int currentExecutionIndex, - int retryCount, - boolean scheduleNextExecution) { - if (result.isDone()) { - return; - } - Node node = retriedNode; - DriverChannel channel = null; - if (node == null || (channel = session.getChannel(node, handlerLogPrefix)) == null) { - while (!result.isDone() && (node = queryPlan.poll()) != null) { - channel = session.getChannel(node, handlerLogPrefix); - if (channel != null) { - break; - } else { - recordError(node, new NodeUnavailableException(node)); - } - } - } - if (channel == null) { - // We've reached the end of the query plan without finding any node to write to - if (!result.isDone() && activeExecutionsCount.decrementAndGet() == 0) { - // We're the last execution so fail the result - setFinalError(statement, AllNodesFailedException.fromErrors(this.errors), null, -1); - } - } else { - Statement finalStatement = statement; - String nodeRequestId = - this.requestIdGenerator - .map((g) -> g.getNodeRequestId(finalStatement, sessionRequestId)) - .orElse(Integer.toString(this.hashCode())); - statement = - this.requestIdGenerator - .map((g) -> g.getDecoratedStatement(finalStatement, nodeRequestId)) - .orElse(finalStatement); - - NodeResponseCallback nodeResponseCallback = - new NodeResponseCallback( - statement, - node, - queryPlan, - channel, - currentExecutionIndex, - retryCount, - scheduleNextExecution, - logPrefixJoiner.join(this.sessionName, nodeRequestId, currentExecutionIndex)); - Message message = Conversions.toMessage(statement, executionProfile, context); - channel - .write(message, statement.isTracing(), statement.getCustomPayload(), nodeResponseCallback) - .addListener(nodeResponseCallback); - } - } - - private void recordError(Node node, Throwable error) { - // Use a local variable to do only a single single volatile read in the nominal case - List> errorsSnapshot = this.errors; - if (errorsSnapshot == null) { - synchronized (CqlRequestHandler.this) { - errorsSnapshot = this.errors; - if (errorsSnapshot == null) { - this.errors = errorsSnapshot = new CopyOnWriteArrayList<>(); - } - } - } - errorsSnapshot.add(new AbstractMap.SimpleEntry<>(node, error)); - } - - private void cancelScheduledTasks() { - if (this.scheduledTimeout != null) { - this.scheduledTimeout.cancel(); - } - if (scheduledExecutions != null) { - for (Timeout scheduledExecution : scheduledExecutions) { - scheduledExecution.cancel(); - } - } - for (NodeResponseCallback callback : inFlightCallbacks) { - callback.cancel(); - } - } - - private void setFinalResult( - Result resultMessage, - Frame responseFrame, - boolean schemaInAgreement, - NodeResponseCallback callback) { - try { - ExecutionInfo executionInfo = - buildExecutionInfo(callback, resultMessage, responseFrame, schemaInAgreement); - AsyncResultSet resultSet = - Conversions.toResultSet(resultMessage, executionInfo, session, context); - if (result.complete(resultSet)) { - cancelScheduledTasks(); - throttler.signalSuccess(this); - - // Only call nanoTime() if we're actually going to use it - long completionTimeNanos = NANOTIME_NOT_MEASURED_YET, - totalLatencyNanos = NANOTIME_NOT_MEASURED_YET; - - if (!(requestTracker instanceof NoopRequestTracker)) { - completionTimeNanos = System.nanoTime(); - totalLatencyNanos = completionTimeNanos - startTimeNanos; - long nodeLatencyNanos = completionTimeNanos - callback.nodeStartTimeNanos; - requestTracker.onNodeSuccess( - callback.statement, - nodeLatencyNanos, - executionProfile, - callback.node, - handlerLogPrefix); - requestTracker.onSuccess( - callback.statement, - totalLatencyNanos, - executionProfile, - callback.node, - handlerLogPrefix); - } - if (sessionMetricUpdater.isEnabled( - DefaultSessionMetric.CQL_REQUESTS, executionProfile.getName())) { - if (completionTimeNanos == NANOTIME_NOT_MEASURED_YET) { - completionTimeNanos = System.nanoTime(); - totalLatencyNanos = completionTimeNanos - startTimeNanos; - } - sessionMetricUpdater.updateTimer( - DefaultSessionMetric.CQL_REQUESTS, - executionProfile.getName(), - totalLatencyNanos, - TimeUnit.NANOSECONDS); - } - } - // log the warnings if they have NOT been disabled - if (!executionInfo.getWarnings().isEmpty() - && executionProfile.getBoolean(DefaultDriverOption.REQUEST_LOG_WARNINGS) - && LOG.isWarnEnabled()) { - logServerWarnings(callback.statement, executionProfile, executionInfo.getWarnings()); - } - } catch (Throwable error) { - setFinalError(callback.statement, error, callback.node, -1); - } - } - - private void logServerWarnings( - Statement statement, DriverExecutionProfile executionProfile, List warnings) { - // use the RequestLogFormatter to format the query - StringBuilder statementString = new StringBuilder(); - context - .getRequestLogFormatter() - .appendRequest( - statement, - executionProfile.getInt( - DefaultDriverOption.REQUEST_LOGGER_MAX_QUERY_LENGTH, - RequestLogger.DEFAULT_REQUEST_LOGGER_MAX_QUERY_LENGTH), - executionProfile.getBoolean( - DefaultDriverOption.REQUEST_LOGGER_VALUES, - RequestLogger.DEFAULT_REQUEST_LOGGER_SHOW_VALUES), - executionProfile.getInt( - DefaultDriverOption.REQUEST_LOGGER_MAX_VALUES, - RequestLogger.DEFAULT_REQUEST_LOGGER_MAX_VALUES), - executionProfile.getInt( - DefaultDriverOption.REQUEST_LOGGER_MAX_VALUE_LENGTH, - RequestLogger.DEFAULT_REQUEST_LOGGER_MAX_VALUE_LENGTH), - statementString); - // log each warning separately - warnings.forEach( - (warning) -> - LOG.warn("Query '{}' generated server side warning(s): {}", statementString, warning)); - } - - private ExecutionInfo buildExecutionInfo( - NodeResponseCallback callback, - Result resultMessage, - Frame responseFrame, - boolean schemaInAgreement) { - ByteBuffer pagingState = - (resultMessage instanceof Rows) ? ((Rows) resultMessage).getMetadata().pagingState : null; - return new DefaultExecutionInfo( - callback.statement, - callback.node, - startedSpeculativeExecutionsCount.get(), - callback.execution, - errors, - pagingState, - responseFrame, - schemaInAgreement, - session, - context, - executionProfile); - } - - @Override - public void onThrottleFailure(@NonNull RequestThrottlingException error) { - sessionMetricUpdater.incrementCounter( - DefaultSessionMetric.THROTTLING_ERRORS, executionProfile.getName()); - setFinalError(initialStatement, error, null, -1); - } - - private void setFinalError(Statement statement, Throwable error, Node node, int execution) { - if (error instanceof DriverException) { - ((DriverException) error) - .setExecutionInfo( - new DefaultExecutionInfo( - statement, - node, - startedSpeculativeExecutionsCount.get(), - execution, - errors, - null, - null, - true, - session, - context, - executionProfile)); - } - if (result.completeExceptionally(error)) { - cancelScheduledTasks(); - if (!(requestTracker instanceof NoopRequestTracker)) { - long latencyNanos = System.nanoTime() - startTimeNanos; - requestTracker.onError( - statement, error, latencyNanos, executionProfile, node, handlerLogPrefix); - } - if (error instanceof DriverTimeoutException) { - throttler.signalTimeout(this); - sessionMetricUpdater.incrementCounter( - DefaultSessionMetric.CQL_CLIENT_TIMEOUTS, executionProfile.getName()); - } else if (!(error instanceof RequestThrottlingException)) { - throttler.signalError(this, error); - } - } - } - - /** - * Handles the interaction with a single node in the query plan. - * - *

An instance of this class is created each time we (re)try a node. - */ - private class NodeResponseCallback - implements ResponseCallback, GenericFutureListener> { - - private final long nodeStartTimeNanos = System.nanoTime(); - private final Statement statement; - private final Node node; - private final Queue queryPlan; - private final DriverChannel channel; - // The identifier of the current execution (0 for the initial execution, 1 for the first - // speculative execution, etc.) - private final int execution; - // How many times we've invoked the retry policy and it has returned a "retry" decision (0 for - // the first attempt of each execution). - private final int retryCount; - private final boolean scheduleNextExecution; - private final String logPrefix; - - private NodeResponseCallback( - Statement statement, - Node node, - Queue queryPlan, - DriverChannel channel, - int execution, - int retryCount, - boolean scheduleNextExecution, - String logPrefix) { - this.statement = statement; - this.node = node; - this.queryPlan = queryPlan; - this.channel = channel; - this.execution = execution; - this.retryCount = retryCount; - this.scheduleNextExecution = scheduleNextExecution; - this.logPrefix = logPrefix; - } - - // this gets invoked once the write completes. - @Override - public void operationComplete(Future future) throws Exception { - if (!future.isSuccess()) { - Throwable error = future.cause(); - if (error instanceof EncoderException - && error.getCause() instanceof FrameTooLongException) { - trackNodeError(node, error.getCause(), NANOTIME_NOT_MEASURED_YET); - setFinalError(statement, error.getCause(), node, execution); - } else { - LOG.trace( - "[{}] Failed to send request on {}, trying next node (cause: {})", - logPrefix, - channel, - error); - recordError(node, error); - trackNodeError(node, error, NANOTIME_NOT_MEASURED_YET); - ((DefaultNode) node) - .getMetricUpdater() - .incrementCounter(DefaultNodeMetric.UNSENT_REQUESTS, executionProfile.getName()); - sendRequest( - statement, - null, - queryPlan, - execution, - retryCount, - scheduleNextExecution); // try next node - } - } else { - LOG.trace("[{}] Request sent on {}", logPrefix, channel); - if (result.isDone()) { - // If the handler completed since the last time we checked, cancel directly because we - // don't know if cancelScheduledTasks() has run yet - cancel(); - } else { - inFlightCallbacks.add(this); - if (scheduleNextExecution - && Conversions.resolveIdempotence(statement, executionProfile)) { - int nextExecution = execution + 1; - long nextDelay; - try { - nextDelay = - Conversions.resolveSpeculativeExecutionPolicy(context, executionProfile) - .nextExecution(node, keyspace, statement, nextExecution); - } catch (Throwable cause) { - // This is a bug in the policy, but not fatal since we have at least one other - // execution already running. Don't fail the whole request. - LOG.error( - "[{}] Unexpected error while invoking the speculative execution policy", - logPrefix, - cause); - return; - } - if (nextDelay >= 0) { - scheduleSpeculativeExecution(nextExecution, nextDelay); - } else { - LOG.trace( - "[{}] Speculative execution policy returned {}, no next execution", - logPrefix, - nextDelay); - } - } - } - } - } - - private void scheduleSpeculativeExecution(int index, long delay) { - LOG.trace("[{}] Scheduling speculative execution {} in {} ms", logPrefix, index, delay); - try { - scheduledExecutions.add( - timer.newTimeout( - (Timeout timeout1) -> { - if (!result.isDone()) { - LOG.trace( - "[{}] Starting speculative execution {}", - CqlRequestHandler.this.handlerLogPrefix, - index); - activeExecutionsCount.incrementAndGet(); - startedSpeculativeExecutionsCount.incrementAndGet(); - // Note that `node` is the first node of the execution, it might not be the - // "slow" one if there were retries, but in practice retries are rare. - ((DefaultNode) node) - .getMetricUpdater() - .incrementCounter( - DefaultNodeMetric.SPECULATIVE_EXECUTIONS, executionProfile.getName()); - sendRequest(statement, null, queryPlan, index, 0, true); - } - }, - delay, - TimeUnit.MILLISECONDS)); - } catch (IllegalStateException e) { - // If we're racing with session shutdown, the timer might be stopped already. We don't want - // to schedule more executions anyway, so swallow the error. - if (!"cannot be started once stopped".equals(e.getMessage())) { - Loggers.warnWithException( - LOG, "[{}] Error while scheduling speculative execution", logPrefix, e); - } - } - } - - @Override - public void onResponse(Frame responseFrame) { - long nodeResponseTimeNanos = NANOTIME_NOT_MEASURED_YET; - NodeMetricUpdater nodeMetricUpdater = ((DefaultNode) node).getMetricUpdater(); - if (nodeMetricUpdater.isEnabled(DefaultNodeMetric.CQL_MESSAGES, executionProfile.getName())) { - nodeResponseTimeNanos = System.nanoTime(); - long nodeLatency = System.nanoTime() - nodeStartTimeNanos; - nodeMetricUpdater.updateTimer( - DefaultNodeMetric.CQL_MESSAGES, - executionProfile.getName(), - nodeLatency, - TimeUnit.NANOSECONDS); - } - inFlightCallbacks.remove(this); - if (result.isDone()) { - return; - } - try { - Message responseMessage = responseFrame.message; - if (responseMessage instanceof SchemaChange) { - SchemaChange schemaChange = (SchemaChange) responseMessage; - context - .getMetadataManager() - .refreshSchema(schemaChange.keyspace, false, false) - .whenComplete( - (result, error) -> { - boolean schemaInAgreement; - if (error != null) { - Loggers.warnWithException( - LOG, - "[{}] Unexpected error while refreshing schema after DDL query, " - + "keeping previous version", - logPrefix, - error); - schemaInAgreement = false; - } else { - schemaInAgreement = result.isSchemaInAgreement(); - } - setFinalResult(schemaChange, responseFrame, schemaInAgreement, this); - }); - } else if (responseMessage instanceof SetKeyspace) { - SetKeyspace setKeyspace = (SetKeyspace) responseMessage; - session - .setKeyspace(CqlIdentifier.fromInternal(setKeyspace.keyspace)) - .whenComplete((v, error) -> setFinalResult(setKeyspace, responseFrame, true, this)); - } else if (responseMessage instanceof Result) { - LOG.trace("[{}] Got result, completing", logPrefix); - setFinalResult((Result) responseMessage, responseFrame, true, this); - } else if (responseMessage instanceof Error) { - LOG.trace("[{}] Got error response, processing", logPrefix); - processErrorResponse((Error) responseMessage); - } else { - trackNodeError( - node, - new IllegalStateException("Unexpected response " + responseMessage), - nodeResponseTimeNanos); - setFinalError( - statement, - new IllegalStateException("Unexpected response " + responseMessage), - node, - execution); - } - } catch (Throwable t) { - trackNodeError(node, t, nodeResponseTimeNanos); - setFinalError(statement, t, node, execution); - } - } - - private void processErrorResponse(Error errorMessage) { - if (errorMessage.code == ProtocolConstants.ErrorCode.UNPREPARED) { - ByteBuffer idToReprepare = ByteBuffer.wrap(((Unprepared) errorMessage).id); - LOG.trace( - "[{}] Statement {} is not prepared on {}, repreparing", - logPrefix, - Bytes.toHexString(idToReprepare), - node); - RepreparePayload repreparePayload = session.getRepreparePayloads().get(idToReprepare); - if (repreparePayload == null) { - throw new IllegalStateException( - String.format( - "Tried to execute unprepared query %s but we don't have the data to reprepare it", - Bytes.toHexString(idToReprepare))); - } - Prepare reprepareMessage = repreparePayload.toMessage(); - ThrottledAdminRequestHandler reprepareHandler = - ThrottledAdminRequestHandler.prepare( - channel, - true, - reprepareMessage, - repreparePayload.customPayload, - Conversions.resolveRequestTimeout(statement, executionProfile), - throttler, - sessionMetricUpdater, - logPrefix); - reprepareHandler - .start() - .handle( - (repreparedId, exception) -> { - if (exception != null) { - // If the error is not recoverable, surface it to the client instead of retrying - if (exception instanceof UnexpectedResponseException) { - Message prepareErrorMessage = - ((UnexpectedResponseException) exception).message; - if (prepareErrorMessage instanceof Error) { - CoordinatorException prepareError = - Conversions.toThrowable(node, (Error) prepareErrorMessage, context); - if (prepareError instanceof QueryValidationException - || prepareError instanceof FunctionFailureException - || prepareError instanceof ProtocolError) { - LOG.trace("[{}] Unrecoverable error on reprepare, rethrowing", logPrefix); - trackNodeError(node, prepareError, NANOTIME_NOT_MEASURED_YET); - setFinalError(statement, prepareError, node, execution); - return null; - } - } - } else if (exception instanceof RequestThrottlingException) { - trackNodeError(node, exception, NANOTIME_NOT_MEASURED_YET); - setFinalError(statement, exception, node, execution); - return null; - } - recordError(node, exception); - trackNodeError(node, exception, NANOTIME_NOT_MEASURED_YET); - LOG.trace("[{}] Reprepare failed, trying next node", logPrefix); - sendRequest(statement, null, queryPlan, execution, retryCount, false); - } else { - if (!repreparedId.equals(idToReprepare)) { - IllegalStateException illegalStateException = - new IllegalStateException( - String.format( - "ID mismatch while trying to reprepare (expected %s, got %s). " - + "This prepared statement won't work anymore. " - + "This usually happens when you run a 'USE...' query after " - + "the statement was prepared.", - Bytes.toHexString(idToReprepare), - Bytes.toHexString(repreparedId))); - trackNodeError(node, illegalStateException, NANOTIME_NOT_MEASURED_YET); - setFinalError(statement, illegalStateException, node, execution); - } - LOG.trace("[{}] Reprepare sucessful, retrying", logPrefix); - sendRequest(statement, node, queryPlan, execution, retryCount, false); - } - return null; - }); - return; - } - CoordinatorException error = Conversions.toThrowable(node, errorMessage, context); - NodeMetricUpdater metricUpdater = ((DefaultNode) node).getMetricUpdater(); - if (error instanceof BootstrappingException) { - LOG.trace("[{}] {} is bootstrapping, trying next node", logPrefix, node); - recordError(node, error); - trackNodeError(node, error, NANOTIME_NOT_MEASURED_YET); - sendRequest(statement, null, queryPlan, execution, retryCount, false); - } else if (error instanceof QueryValidationException - || error instanceof FunctionFailureException - || error instanceof ProtocolError) { - LOG.trace("[{}] Unrecoverable error, rethrowing", logPrefix); - metricUpdater.incrementCounter(DefaultNodeMetric.OTHER_ERRORS, executionProfile.getName()); - trackNodeError(node, error, NANOTIME_NOT_MEASURED_YET); - setFinalError(statement, error, node, execution); - } else { - RetryPolicy retryPolicy = Conversions.resolveRetryPolicy(context, executionProfile); - RetryVerdict verdict; - if (error instanceof ReadTimeoutException) { - ReadTimeoutException readTimeout = (ReadTimeoutException) error; - verdict = - retryPolicy.onReadTimeoutVerdict( - statement, - readTimeout.getConsistencyLevel(), - readTimeout.getBlockFor(), - readTimeout.getReceived(), - readTimeout.wasDataPresent(), - retryCount); - updateErrorMetrics( - metricUpdater, - verdict, - DefaultNodeMetric.READ_TIMEOUTS, - DefaultNodeMetric.RETRIES_ON_READ_TIMEOUT, - DefaultNodeMetric.IGNORES_ON_READ_TIMEOUT); - } else if (error instanceof WriteTimeoutException) { - WriteTimeoutException writeTimeout = (WriteTimeoutException) error; - verdict = - Conversions.resolveIdempotence(statement, executionProfile) - ? retryPolicy.onWriteTimeoutVerdict( - statement, - writeTimeout.getConsistencyLevel(), - writeTimeout.getWriteType(), - writeTimeout.getBlockFor(), - writeTimeout.getReceived(), - retryCount) - : RetryVerdict.RETHROW; - updateErrorMetrics( - metricUpdater, - verdict, - DefaultNodeMetric.WRITE_TIMEOUTS, - DefaultNodeMetric.RETRIES_ON_WRITE_TIMEOUT, - DefaultNodeMetric.IGNORES_ON_WRITE_TIMEOUT); - } else if (error instanceof UnavailableException) { - UnavailableException unavailable = (UnavailableException) error; - verdict = - retryPolicy.onUnavailableVerdict( - statement, - unavailable.getConsistencyLevel(), - unavailable.getRequired(), - unavailable.getAlive(), - retryCount); - updateErrorMetrics( - metricUpdater, - verdict, - DefaultNodeMetric.UNAVAILABLES, - DefaultNodeMetric.RETRIES_ON_UNAVAILABLE, - DefaultNodeMetric.IGNORES_ON_UNAVAILABLE); - } else { - verdict = - Conversions.resolveIdempotence(statement, executionProfile) - ? retryPolicy.onErrorResponseVerdict(statement, error, retryCount) - : RetryVerdict.RETHROW; - updateErrorMetrics( - metricUpdater, - verdict, - DefaultNodeMetric.OTHER_ERRORS, - DefaultNodeMetric.RETRIES_ON_OTHER_ERROR, - DefaultNodeMetric.IGNORES_ON_OTHER_ERROR); - } - processRetryVerdict(verdict, error); - } - } - - private void processRetryVerdict(RetryVerdict verdict, Throwable error) { - LOG.trace("[{}] Processing retry decision {}", logPrefix, verdict); - switch (verdict.getRetryDecision()) { - case RETRY_SAME: - recordError(node, error); - trackNodeError(node, error, NANOTIME_NOT_MEASURED_YET); - sendRequest( - verdict.getRetryRequest(statement), - node, - queryPlan, - execution, - retryCount + 1, - false); - break; - case RETRY_NEXT: - recordError(node, error); - trackNodeError(node, error, NANOTIME_NOT_MEASURED_YET); - sendRequest( - verdict.getRetryRequest(statement), - null, - queryPlan, - execution, - retryCount + 1, - false); - break; - case RETHROW: - trackNodeError(node, error, NANOTIME_NOT_MEASURED_YET); - setFinalError(statement, error, node, execution); - break; - case IGNORE: - setFinalResult(Void.INSTANCE, null, true, this); - break; - } - } - - private void updateErrorMetrics( - NodeMetricUpdater metricUpdater, - RetryVerdict verdict, - DefaultNodeMetric error, - DefaultNodeMetric retriesOnError, - DefaultNodeMetric ignoresOnError) { - metricUpdater.incrementCounter(error, executionProfile.getName()); - switch (verdict.getRetryDecision()) { - case RETRY_SAME: - case RETRY_NEXT: - metricUpdater.incrementCounter(DefaultNodeMetric.RETRIES, executionProfile.getName()); - metricUpdater.incrementCounter(retriesOnError, executionProfile.getName()); - break; - case IGNORE: - metricUpdater.incrementCounter(DefaultNodeMetric.IGNORES, executionProfile.getName()); - metricUpdater.incrementCounter(ignoresOnError, executionProfile.getName()); - break; - case RETHROW: - // nothing do do - } - } - - @Override - public void onFailure(Throwable error) { - inFlightCallbacks.remove(this); - if (result.isDone()) { - return; - } - LOG.trace("[{}] Request failure, processing: {}", logPrefix, error); - RetryVerdict verdict; - if (!Conversions.resolveIdempotence(statement, executionProfile) - || error instanceof FrameTooLongException) { - verdict = RetryVerdict.RETHROW; - } else { - try { - RetryPolicy retryPolicy = Conversions.resolveRetryPolicy(context, executionProfile); - verdict = retryPolicy.onRequestAbortedVerdict(statement, error, retryCount); - } catch (Throwable cause) { - setFinalError( - statement, - new IllegalStateException("Unexpected error while invoking the retry policy", cause), - null, - execution); - return; - } - } - processRetryVerdict(verdict, error); - updateErrorMetrics( - ((DefaultNode) node).getMetricUpdater(), - verdict, - DefaultNodeMetric.ABORTED_REQUESTS, - DefaultNodeMetric.RETRIES_ON_ABORTED, - DefaultNodeMetric.IGNORES_ON_ABORTED); - } - - public void cancel() { - try { - if (!channel.closeFuture().isDone()) { - this.channel.cancel(this); - } - } catch (Throwable t) { - Loggers.warnWithException(LOG, "[{}] Error cancelling", logPrefix, t); - } - } - - /** - * @param nodeResponseTimeNanos the time we received the response, if it's already been - * measured. If {@link #NANOTIME_NOT_MEASURED_YET}, it hasn't and we need to measure it now - * (this is to avoid unnecessary calls to System.nanoTime) - */ - private void trackNodeError(Node node, Throwable error, long nodeResponseTimeNanos) { - if (requestTracker instanceof NoopRequestTracker) { - return; - } - if (nodeResponseTimeNanos == NANOTIME_NOT_MEASURED_YET) { - nodeResponseTimeNanos = System.nanoTime(); - } - long latencyNanos = nodeResponseTimeNanos - this.nodeStartTimeNanos; - requestTracker.onNodeError(statement, error, latencyNanos, executionProfile, node, logPrefix); - } - - @Override - public String toString() { - return logPrefix; - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlRequestSyncProcessor.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlRequestSyncProcessor.java deleted file mode 100644 index d3bd40149fb..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlRequestSyncProcessor.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.internal.core.session.RequestProcessor; -import com.datastax.oss.driver.internal.core.util.concurrent.BlockingOperation; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class CqlRequestSyncProcessor implements RequestProcessor, ResultSet> { - - private final CqlRequestAsyncProcessor asyncProcessor; - - public CqlRequestSyncProcessor(CqlRequestAsyncProcessor asyncProcessor) { - this.asyncProcessor = asyncProcessor; - } - - @Override - public boolean canProcess(Request request, GenericType resultType) { - return request instanceof Statement && resultType.equals(Statement.SYNC); - } - - @Override - public ResultSet process( - Statement request, - DefaultSession session, - InternalDriverContext context, - String sessionLogPrefix) { - - BlockingOperation.checkNotDriverThread(); - AsyncResultSet firstPage = - CompletableFutures.getUninterruptibly( - asyncProcessor.process(request, session, context, sessionLogPrefix)); - return ResultSets.newInstance(firstPage); - } - - @Override - public ResultSet newFailure(RuntimeException error) { - throw error; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultAsyncResultSet.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultAsyncResultSet.java deleted file mode 100644 index 243e9aeb775..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultAsyncResultSet.java +++ /dev/null @@ -1,172 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.util.CountingIterator; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.util.Collections; -import java.util.List; -import java.util.Queue; -import java.util.concurrent.CompletionStage; -import net.jcip.annotations.NotThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@NotThreadSafe // wraps a mutable queue -public class DefaultAsyncResultSet implements AsyncResultSet { - - private static final Logger LOG = LoggerFactory.getLogger(DefaultAsyncResultSet.class); - - private final ColumnDefinitions definitions; - private final ExecutionInfo executionInfo; - private final CqlSession session; - private final CountingIterator iterator; - private final Iterable currentPage; - - public DefaultAsyncResultSet( - ColumnDefinitions definitions, - ExecutionInfo executionInfo, - Queue> data, - CqlSession session, - InternalDriverContext context) { - this.definitions = definitions; - this.executionInfo = executionInfo; - this.session = session; - this.iterator = - new CountingIterator(data.size()) { - @Override - protected Row computeNext() { - List rowData = data.poll(); - return (rowData == null) ? endOfData() : new DefaultRow(definitions, rowData, context); - } - }; - this.currentPage = () -> iterator; - } - - @NonNull - @Override - public ColumnDefinitions getColumnDefinitions() { - return definitions; - } - - @NonNull - @Override - public ExecutionInfo getExecutionInfo() { - return executionInfo; - } - - @NonNull - @Override - public Iterable currentPage() { - return currentPage; - } - - @Override - public int remaining() { - return iterator.remaining(); - } - - @Override - public boolean hasMorePages() { - return executionInfo.getPagingState() != null; - } - - @NonNull - @Override - public CompletionStage fetchNextPage() throws IllegalStateException { - ByteBuffer nextState = executionInfo.getPagingState(); - if (nextState == null) { - throw new IllegalStateException( - "No next page. Use #hasMorePages before calling this method to avoid this error."); - } - Statement statement = (Statement) executionInfo.getRequest(); - LOG.trace("Fetching next page for {}", statement); - Statement nextStatement = statement.copy(nextState); - return session.executeAsync(nextStatement); - } - - @Override - public boolean wasApplied() { - if (!definitions.contains("[applied]") - || !definitions.get("[applied]").getType().equals(DataTypes.BOOLEAN)) { - return true; - } else if (iterator.hasNext()) { - // Note that [applied] has the same value for all rows, so as long as we have a row we don't - // care which one it is. - return iterator.peek().getBoolean("[applied]"); - } else { - // If the server provided [applied], it means there was at least one row. So if we get here it - // means the client consumed all the rows before, we can't handle that case because we have - // nowhere left to read the boolean from. - throw new IllegalStateException("This method must be called before consuming all the rows"); - } - } - - static AsyncResultSet empty(final ExecutionInfo executionInfo) { - return new AsyncResultSet() { - @NonNull - @Override - public ColumnDefinitions getColumnDefinitions() { - return EmptyColumnDefinitions.INSTANCE; - } - - @NonNull - @Override - public ExecutionInfo getExecutionInfo() { - return executionInfo; - } - - @NonNull - @Override - public Iterable currentPage() { - return Collections.emptyList(); - } - - @Override - public int remaining() { - return 0; - } - - @Override - public boolean hasMorePages() { - return false; - } - - @NonNull - @Override - public CompletionStage fetchNextPage() throws IllegalStateException { - throw new IllegalStateException( - "No next page. Use #hasMorePages before calling this method to avoid this error."); - } - - @Override - public boolean wasApplied() { - return true; - } - }; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultBatchStatement.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultBatchStatement.java deleted file mode 100644 index 38b6cf242a1..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultBatchStatement.java +++ /dev/null @@ -1,788 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.BatchStatement; -import com.datastax.oss.driver.api.core.cql.BatchType; -import com.datastax.oss.driver.api.core.cql.BatchableStatement; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.Iterables; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultBatchStatement implements BatchStatement { - - private final BatchType batchType; - private final List> statements; - private final String executionProfileName; - private final DriverExecutionProfile executionProfile; - private final CqlIdentifier keyspace; - private final CqlIdentifier routingKeyspace; - private final ByteBuffer routingKey; - private final Token routingToken; - private final Map customPayload; - private final Boolean idempotent; - private final boolean tracing; - private final long timestamp; - private final ByteBuffer pagingState; - private final int pageSize; - private final ConsistencyLevel consistencyLevel; - private final ConsistencyLevel serialConsistencyLevel; - private final Duration timeout; - private final Node node; - private final int nowInSeconds; - - public DefaultBatchStatement( - BatchType batchType, - List> statements, - String executionProfileName, - DriverExecutionProfile executionProfile, - CqlIdentifier keyspace, - CqlIdentifier routingKeyspace, - ByteBuffer routingKey, - Token routingToken, - Map customPayload, - Boolean idempotent, - boolean tracing, - long timestamp, - ByteBuffer pagingState, - int pageSize, - ConsistencyLevel consistencyLevel, - ConsistencyLevel serialConsistencyLevel, - Duration timeout, - Node node, - int nowInSeconds) { - this.batchType = batchType; - this.statements = ImmutableList.copyOf(statements); - this.executionProfileName = executionProfileName; - this.executionProfile = executionProfile; - this.keyspace = keyspace; - this.routingKeyspace = routingKeyspace; - this.routingKey = routingKey; - this.routingToken = routingToken; - this.customPayload = customPayload; - this.idempotent = idempotent; - this.tracing = tracing; - this.timestamp = timestamp; - this.pagingState = pagingState; - this.pageSize = pageSize; - this.consistencyLevel = consistencyLevel; - this.serialConsistencyLevel = serialConsistencyLevel; - this.timeout = timeout; - this.node = node; - this.nowInSeconds = nowInSeconds; - } - - @NonNull - @Override - public BatchType getBatchType() { - return batchType; - } - - @NonNull - @Override - public BatchStatement setBatchType(@NonNull BatchType newBatchType) { - return new DefaultBatchStatement( - newBatchType, - statements, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @NonNull - @Override - public BatchStatement setKeyspace(@Nullable CqlIdentifier newKeyspace) { - return new DefaultBatchStatement( - batchType, - statements, - executionProfileName, - executionProfile, - newKeyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @NonNull - @Override - public BatchStatement add(@NonNull BatchableStatement statement) { - if (statements.size() >= 0xFFFF) { - throw new IllegalStateException( - "Batch statement cannot contain more than " + 0xFFFF + " statements."); - } else { - return new DefaultBatchStatement( - batchType, - ImmutableList.>builder().addAll(statements).add(statement).build(), - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - } - - @NonNull - @Override - public BatchStatement addAll(@NonNull Iterable> newStatements) { - if (statements.size() + Iterables.size(newStatements) > 0xFFFF) { - throw new IllegalStateException( - "Batch statement cannot contain more than " + 0xFFFF + " statements."); - } else { - return new DefaultBatchStatement( - batchType, - ImmutableList.>builder() - .addAll(statements) - .addAll(newStatements) - .build(), - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - } - - @Override - public int size() { - return statements.size(); - } - - @NonNull - @Override - public BatchStatement clear() { - return new DefaultBatchStatement( - batchType, - ImmutableList.of(), - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @NonNull - @Override - public Iterator> iterator() { - return statements.iterator(); - } - - @Override - public ByteBuffer getPagingState() { - return pagingState; - } - - @NonNull - @Override - public BatchStatement setPagingState(ByteBuffer newPagingState) { - return new DefaultBatchStatement( - batchType, - statements, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - newPagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @Override - public int getPageSize() { - return pageSize; - } - - @NonNull - @Override - public BatchStatement setPageSize(int newPageSize) { - return new DefaultBatchStatement( - batchType, - statements, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - newPageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @Nullable - @Override - public ConsistencyLevel getConsistencyLevel() { - return consistencyLevel; - } - - @NonNull - @Override - public BatchStatement setConsistencyLevel(@Nullable ConsistencyLevel newConsistencyLevel) { - return new DefaultBatchStatement( - batchType, - statements, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - newConsistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @Nullable - @Override - public ConsistencyLevel getSerialConsistencyLevel() { - return serialConsistencyLevel; - } - - @NonNull - @Override - public BatchStatement setSerialConsistencyLevel( - @Nullable ConsistencyLevel newSerialConsistencyLevel) { - return new DefaultBatchStatement( - batchType, - statements, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - newSerialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @Override - public String getExecutionProfileName() { - return executionProfileName; - } - - @NonNull - @Override - public BatchStatement setExecutionProfileName(@Nullable String newConfigProfileName) { - return new DefaultBatchStatement( - batchType, - statements, - newConfigProfileName, - (newConfigProfileName == null) ? executionProfile : null, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @Override - public DriverExecutionProfile getExecutionProfile() { - return executionProfile; - } - - @NonNull - @Override - public DefaultBatchStatement setExecutionProfile(@Nullable DriverExecutionProfile newProfile) { - return new DefaultBatchStatement( - batchType, - statements, - (newProfile == null) ? executionProfileName : null, - newProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @Override - public CqlIdentifier getKeyspace() { - if (keyspace != null) { - return keyspace; - } else { - for (BatchableStatement statement : statements) { - if (statement instanceof SimpleStatement && statement.getKeyspace() != null) { - return statement.getKeyspace(); - } - } - } - return null; - } - - @Override - public CqlIdentifier getRoutingKeyspace() { - if (routingKeyspace != null) { - return routingKeyspace; - } else { - for (BatchableStatement statement : statements) { - CqlIdentifier ks = statement.getRoutingKeyspace(); - if (ks != null) { - return ks; - } - } - } - return null; - } - - @NonNull - @Override - public BatchStatement setRoutingKeyspace(CqlIdentifier newRoutingKeyspace) { - return new DefaultBatchStatement( - batchType, - statements, - executionProfileName, - executionProfile, - keyspace, - newRoutingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @NonNull - @Override - public BatchStatement setNode(@Nullable Node newNode) { - return new DefaultBatchStatement( - batchType, - statements, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - newNode, - nowInSeconds); - } - - @Nullable - @Override - public Node getNode() { - return node; - } - - @Override - public ByteBuffer getRoutingKey() { - if (routingKey != null) { - return routingKey; - } else { - for (BatchableStatement statement : statements) { - ByteBuffer key = statement.getRoutingKey(); - if (key != null) { - return key; - } - } - } - return null; - } - - @NonNull - @Override - public BatchStatement setRoutingKey(ByteBuffer newRoutingKey) { - return new DefaultBatchStatement( - batchType, - statements, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - newRoutingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @Override - public Token getRoutingToken() { - if (routingToken != null) { - return routingToken; - } else { - for (BatchableStatement statement : statements) { - Token token = statement.getRoutingToken(); - if (token != null) { - return token; - } - } - } - return null; - } - - @NonNull - @Override - public BatchStatement setRoutingToken(Token newRoutingToken) { - return new DefaultBatchStatement( - batchType, - statements, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - newRoutingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @NonNull - @Override - public Map getCustomPayload() { - return customPayload; - } - - @NonNull - @Override - public DefaultBatchStatement setCustomPayload(@NonNull Map newCustomPayload) { - return new DefaultBatchStatement( - batchType, - statements, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - newCustomPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @Override - public Boolean isIdempotent() { - return idempotent; - } - - @Nullable - @Override - public Duration getTimeout() { - return null; - } - - @NonNull - @Override - public DefaultBatchStatement setIdempotent(Boolean newIdempotence) { - return new DefaultBatchStatement( - batchType, - statements, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - newIdempotence, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @Override - public boolean isTracing() { - return tracing; - } - - @NonNull - @Override - public BatchStatement setTracing(boolean newTracing) { - return new DefaultBatchStatement( - batchType, - statements, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - newTracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @Override - public long getQueryTimestamp() { - return timestamp; - } - - @NonNull - @Override - public BatchStatement setQueryTimestamp(long newTimestamp) { - return new DefaultBatchStatement( - batchType, - statements, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - newTimestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @NonNull - @Override - public BatchStatement setTimeout(@Nullable Duration newTimeout) { - return new DefaultBatchStatement( - batchType, - statements, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - newTimeout, - node, - nowInSeconds); - } - - @Override - public int getNowInSeconds() { - return nowInSeconds; - } - - @NonNull - @Override - public BatchStatement setNowInSeconds(int newNowInSeconds) { - return new DefaultBatchStatement( - batchType, - statements, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - newNowInSeconds); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultBoundStatement.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultBoundStatement.java deleted file mode 100644 index 3cf99c1be6e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultBoundStatement.java +++ /dev/null @@ -1,764 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.internal.core.util.RoutingKey; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.Arrays; -import java.util.List; -import java.util.Map; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultBoundStatement implements BoundStatement { - - private final PreparedStatement preparedStatement; - private final ColumnDefinitions variableDefinitions; - private final ByteBuffer[] values; - private final String executionProfileName; - private final DriverExecutionProfile executionProfile; - private final CqlIdentifier routingKeyspace; - private final ByteBuffer routingKey; - private final Token routingToken; - private final Map customPayload; - private final Boolean idempotent; - private final boolean tracing; - private final long timestamp; - private final ByteBuffer pagingState; - private final int pageSize; - private final ConsistencyLevel consistencyLevel; - private final ConsistencyLevel serialConsistencyLevel; - private final Duration timeout; - private final CodecRegistry codecRegistry; - private final ProtocolVersion protocolVersion; - private final Node node; - private final int nowInSeconds; - - public DefaultBoundStatement( - PreparedStatement preparedStatement, - ColumnDefinitions variableDefinitions, - ByteBuffer[] values, - String executionProfileName, - DriverExecutionProfile executionProfile, - CqlIdentifier routingKeyspace, - ByteBuffer routingKey, - Token routingToken, - Map customPayload, - Boolean idempotent, - boolean tracing, - long timestamp, - ByteBuffer pagingState, - int pageSize, - ConsistencyLevel consistencyLevel, - ConsistencyLevel serialConsistencyLevel, - Duration timeout, - CodecRegistry codecRegistry, - ProtocolVersion protocolVersion, - Node node, - int nowInSeconds) { - this.preparedStatement = preparedStatement; - this.variableDefinitions = variableDefinitions; - this.values = values; - this.executionProfileName = executionProfileName; - this.executionProfile = executionProfile; - this.routingKeyspace = routingKeyspace; - this.routingKey = routingKey; - this.routingToken = routingToken; - this.customPayload = customPayload; - this.idempotent = idempotent; - this.tracing = tracing; - this.timestamp = timestamp; - this.pagingState = pagingState; - this.pageSize = pageSize; - this.consistencyLevel = consistencyLevel; - this.serialConsistencyLevel = serialConsistencyLevel; - this.timeout = timeout; - this.codecRegistry = codecRegistry; - this.protocolVersion = protocolVersion; - this.node = node; - this.nowInSeconds = nowInSeconds; - } - - @Override - public int size() { - return variableDefinitions.size(); - } - - @NonNull - @Override - public DataType getType(int i) { - return variableDefinitions.get(i).getType(); - } - - @NonNull - @Override - public List allIndicesOf(@NonNull CqlIdentifier id) { - List indices = variableDefinitions.allIndicesOf(id); - if (indices.isEmpty()) { - throw new IllegalArgumentException(id + " is not a variable in this bound statement"); - } - return indices; - } - - @Override - public int firstIndexOf(@NonNull CqlIdentifier id) { - int indexOf = variableDefinitions.firstIndexOf(id); - if (indexOf == -1) { - throw new IllegalArgumentException(id + " is not a variable in this bound statement"); - } - return indexOf; - } - - @NonNull - @Override - public List allIndicesOf(@NonNull String name) { - List indices = variableDefinitions.allIndicesOf(name); - if (indices.isEmpty()) { - throw new IllegalArgumentException(name + " is not a variable in this bound statement"); - } - return indices; - } - - @Override - public int firstIndexOf(@NonNull String name) { - int indexOf = variableDefinitions.firstIndexOf(name); - if (indexOf == -1) { - throw new IllegalArgumentException(name + " is not a variable in this bound statement"); - } - return indexOf; - } - - @NonNull - @Override - public CodecRegistry codecRegistry() { - return codecRegistry; - } - - @NonNull - @Override - public ProtocolVersion protocolVersion() { - return protocolVersion; - } - - @Override - public ByteBuffer getBytesUnsafe(int i) { - return values[i]; - } - - @NonNull - @Override - public BoundStatement setBytesUnsafe(int i, ByteBuffer v) { - ByteBuffer[] newValues = Arrays.copyOf(values, values.length); - newValues[i] = v; - return new DefaultBoundStatement( - preparedStatement, - variableDefinitions, - newValues, - executionProfileName, - executionProfile, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - codecRegistry, - protocolVersion, - node, - nowInSeconds); - } - - @NonNull - @Override - public PreparedStatement getPreparedStatement() { - return preparedStatement; - } - - @NonNull - @Override - public List getValues() { - return Arrays.asList(values); - } - - @Override - public String getExecutionProfileName() { - return executionProfileName; - } - - @NonNull - @Override - public BoundStatement setExecutionProfileName(@Nullable String newConfigProfileName) { - return new DefaultBoundStatement( - preparedStatement, - variableDefinitions, - values, - newConfigProfileName, - (newConfigProfileName == null) ? executionProfile : null, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - codecRegistry, - protocolVersion, - node, - nowInSeconds); - } - - @Override - public DriverExecutionProfile getExecutionProfile() { - return executionProfile; - } - - @NonNull - @Override - public BoundStatement setExecutionProfile(@Nullable DriverExecutionProfile newProfile) { - return new DefaultBoundStatement( - preparedStatement, - variableDefinitions, - values, - (newProfile == null) ? executionProfileName : null, - newProfile, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - codecRegistry, - protocolVersion, - node, - nowInSeconds); - } - - @Override - public CqlIdentifier getRoutingKeyspace() { - // If it was set explicitly, use that value, else try to infer it from the prepared statement's - // metadata - if (routingKeyspace != null) { - return routingKeyspace; - } else { - ColumnDefinitions definitions = preparedStatement.getVariableDefinitions(); - return (definitions.size() == 0) ? null : definitions.get(0).getKeyspace(); - } - } - - @NonNull - @Override - public BoundStatement setRoutingKeyspace(@Nullable CqlIdentifier newRoutingKeyspace) { - return new DefaultBoundStatement( - preparedStatement, - variableDefinitions, - values, - executionProfileName, - executionProfile, - newRoutingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - codecRegistry, - protocolVersion, - node, - nowInSeconds); - } - - @NonNull - @Override - public BoundStatement setNode(@Nullable Node newNode) { - return new DefaultBoundStatement( - preparedStatement, - variableDefinitions, - values, - executionProfileName, - executionProfile, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - codecRegistry, - protocolVersion, - newNode, - nowInSeconds); - } - - @Nullable - @Override - public Node getNode() { - return node; - } - - @Override - public ByteBuffer getRoutingKey() { - if (routingKey != null) { - return routingKey; - } else { - List indices = preparedStatement.getPartitionKeyIndices(); - if (indices.isEmpty()) { - return null; - } else if (indices.size() == 1) { - int index = indices.get(0); - return isSet(index) ? getBytesUnsafe(index) : null; - } else { - ByteBuffer[] components = new ByteBuffer[indices.size()]; - for (int i = 0; i < components.length; i++) { - ByteBuffer value; - int index = indices.get(i); - if (!isSet(index) || (value = getBytesUnsafe(index)) == null) { - return null; - } else { - components[i] = value; - } - } - return RoutingKey.compose(components); - } - } - } - - @NonNull - @Override - public BoundStatement setRoutingKey(@Nullable ByteBuffer newRoutingKey) { - return new DefaultBoundStatement( - preparedStatement, - variableDefinitions, - values, - executionProfileName, - executionProfile, - routingKeyspace, - newRoutingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - codecRegistry, - protocolVersion, - node, - nowInSeconds); - } - - @Override - public Token getRoutingToken() { - return routingToken; - } - - @NonNull - @Override - public BoundStatement setRoutingToken(@Nullable Token newRoutingToken) { - return new DefaultBoundStatement( - preparedStatement, - variableDefinitions, - values, - executionProfileName, - executionProfile, - routingKeyspace, - routingKey, - newRoutingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - codecRegistry, - protocolVersion, - node, - nowInSeconds); - } - - @NonNull - @Override - public Map getCustomPayload() { - return customPayload; - } - - @NonNull - @Override - public BoundStatement setCustomPayload(@NonNull Map newCustomPayload) { - return new DefaultBoundStatement( - preparedStatement, - variableDefinitions, - values, - executionProfileName, - executionProfile, - routingKeyspace, - routingKey, - routingToken, - newCustomPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - codecRegistry, - protocolVersion, - node, - nowInSeconds); - } - - @Override - public Boolean isIdempotent() { - return idempotent; - } - - @NonNull - @Override - public BoundStatement setIdempotent(@Nullable Boolean newIdempotence) { - return new DefaultBoundStatement( - preparedStatement, - variableDefinitions, - values, - executionProfileName, - executionProfile, - routingKeyspace, - routingKey, - routingToken, - customPayload, - newIdempotence, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - codecRegistry, - protocolVersion, - node, - nowInSeconds); - } - - @Override - public boolean isTracing() { - return tracing; - } - - @NonNull - @Override - public BoundStatement setTracing(boolean newTracing) { - return new DefaultBoundStatement( - preparedStatement, - variableDefinitions, - values, - executionProfileName, - executionProfile, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - newTracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - codecRegistry, - protocolVersion, - node, - nowInSeconds); - } - - @Override - public long getQueryTimestamp() { - return timestamp; - } - - @NonNull - @Override - public BoundStatement setQueryTimestamp(long newTimestamp) { - return new DefaultBoundStatement( - preparedStatement, - variableDefinitions, - values, - executionProfileName, - executionProfile, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - newTimestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - codecRegistry, - protocolVersion, - node, - nowInSeconds); - } - - @Nullable - @Override - public Duration getTimeout() { - return timeout; - } - - @NonNull - @Override - public BoundStatement setTimeout(@Nullable Duration newTimeout) { - return new DefaultBoundStatement( - preparedStatement, - variableDefinitions, - values, - executionProfileName, - executionProfile, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - newTimeout, - codecRegistry, - protocolVersion, - node, - nowInSeconds); - } - - @Override - public ByteBuffer getPagingState() { - return pagingState; - } - - @NonNull - @Override - public BoundStatement setPagingState(@Nullable ByteBuffer newPagingState) { - return new DefaultBoundStatement( - preparedStatement, - variableDefinitions, - values, - executionProfileName, - executionProfile, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - newPagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - codecRegistry, - protocolVersion, - node, - nowInSeconds); - } - - @Override - public int getPageSize() { - return pageSize; - } - - @NonNull - @Override - public BoundStatement setPageSize(int newPageSize) { - return new DefaultBoundStatement( - preparedStatement, - variableDefinitions, - values, - executionProfileName, - executionProfile, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - newPageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - codecRegistry, - protocolVersion, - node, - nowInSeconds); - } - - @Nullable - @Override - public ConsistencyLevel getConsistencyLevel() { - return consistencyLevel; - } - - @NonNull - @Override - public BoundStatement setConsistencyLevel(@Nullable ConsistencyLevel newConsistencyLevel) { - return new DefaultBoundStatement( - preparedStatement, - variableDefinitions, - values, - executionProfileName, - executionProfile, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - newConsistencyLevel, - serialConsistencyLevel, - timeout, - codecRegistry, - protocolVersion, - node, - nowInSeconds); - } - - @Nullable - @Override - public ConsistencyLevel getSerialConsistencyLevel() { - return serialConsistencyLevel; - } - - @NonNull - @Override - public BoundStatement setSerialConsistencyLevel( - @Nullable ConsistencyLevel newSerialConsistencyLevel) { - return new DefaultBoundStatement( - preparedStatement, - variableDefinitions, - values, - executionProfileName, - executionProfile, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - newSerialConsistencyLevel, - timeout, - codecRegistry, - protocolVersion, - node, - nowInSeconds); - } - - @Override - public int getNowInSeconds() { - return nowInSeconds; - } - - @NonNull - @Override - public BoundStatement setNowInSeconds(int newNowInSeconds) { - return new DefaultBoundStatement( - preparedStatement, - variableDefinitions, - values, - executionProfileName, - executionProfile, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - codecRegistry, - protocolVersion, - node, - newNowInSeconds); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultColumnDefinition.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultColumnDefinition.java deleted file mode 100644 index e003637c07f..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultColumnDefinition.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.cql.ColumnDefinition; -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.internal.core.type.DataTypeHelper; -import com.datastax.oss.protocol.internal.response.result.ColumnSpec; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.Serializable; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultColumnDefinition implements ColumnDefinition, Serializable { - - private static final long serialVersionUID = 1; - - /** @serial */ - private final CqlIdentifier keyspace; - /** @serial */ - private final CqlIdentifier table; - /** @serial */ - private final CqlIdentifier name; - /** @serial */ - private final DataType type; - - /** @param spec the raw data decoded by the protocol layer */ - public DefaultColumnDefinition( - @NonNull ColumnSpec spec, @NonNull AttachmentPoint attachmentPoint) { - this.keyspace = CqlIdentifier.fromInternal(spec.ksName); - this.table = CqlIdentifier.fromInternal(spec.tableName); - this.name = CqlIdentifier.fromInternal(spec.name); - this.type = DataTypeHelper.fromProtocolSpec(spec.type, attachmentPoint); - } - - @NonNull - @Override - public CqlIdentifier getKeyspace() { - return keyspace; - } - - @NonNull - @Override - public CqlIdentifier getTable() { - return table; - } - - @NonNull - @Override - public CqlIdentifier getName() { - return name; - } - - @NonNull - @Override - public DataType getType() { - return type; - } - - @Override - public boolean isDetached() { - return type.isDetached(); - } - - @Override - public void attach(@NonNull AttachmentPoint attachmentPoint) { - type.attach(attachmentPoint); - } - - @Override - public String toString() { - return keyspace.asCql(true) + "." + table.asCql(true) + "." + name.asCql(true) + " " + type; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultColumnDefinitions.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultColumnDefinitions.java deleted file mode 100644 index 58304cb4f67..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultColumnDefinitions.java +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.cql.ColumnDefinition; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.internal.core.data.IdentifierIndex; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.InvalidObjectException; -import java.io.ObjectInputStream; -import java.io.Serializable; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultColumnDefinitions implements ColumnDefinitions, Serializable { - - public static ColumnDefinitions valueOf(List definitions) { - return definitions.isEmpty() - ? EmptyColumnDefinitions.INSTANCE - : new DefaultColumnDefinitions(definitions); - } - - private final List definitions; - private final IdentifierIndex index; - - private DefaultColumnDefinitions(List definitions) { - assert definitions != null && definitions.size() > 0; - this.definitions = definitions; - this.index = buildIndex(definitions); - } - - @Override - public int size() { - return definitions.size(); - } - - @NonNull - @Override - public ColumnDefinition get(int i) { - return definitions.get(i); - } - - @NonNull - @Override - public Iterator iterator() { - return definitions.iterator(); - } - - @Override - public boolean contains(@NonNull String name) { - return index.firstIndexOf(name) >= 0; - } - - @Override - public boolean contains(@NonNull CqlIdentifier id) { - return index.firstIndexOf(id) >= 0; - } - - @NonNull - @Override - public List allIndicesOf(@NonNull String name) { - return index.allIndicesOf(name); - } - - @Override - public int firstIndexOf(@NonNull String name) { - return index.firstIndexOf(name); - } - - @NonNull - @Override - public List allIndicesOf(@NonNull CqlIdentifier id) { - return index.allIndicesOf(id); - } - - @Override - public int firstIndexOf(@NonNull CqlIdentifier id) { - return index.firstIndexOf(id); - } - - @Override - public boolean isDetached() { - return definitions.get(0).isDetached(); - } - - @Override - public void attach(@NonNull AttachmentPoint attachmentPoint) { - for (ColumnDefinition definition : definitions) { - definition.attach(attachmentPoint); - } - } - - private static IdentifierIndex buildIndex(List definitions) { - List identifiers = new ArrayList<>(definitions.size()); - for (ColumnDefinition definition : definitions) { - identifiers.add(definition.getName()); - } - return new IdentifierIndex(identifiers); - } - - /** - * @serialData The list of definitions (the identifier index is reconstructed at deserialization). - */ - private Object writeReplace() { - return new SerializationProxy(this); - } - - private void readObject(@SuppressWarnings("unused") ObjectInputStream stream) - throws InvalidObjectException { - // Should never be called since we serialized a proxy - throw new InvalidObjectException("Proxy required"); - } - - private static class SerializationProxy implements Serializable { - - private static final long serialVersionUID = 1; - - private final List definitions; - - private SerializationProxy(DefaultColumnDefinitions columnDefinitions) { - this.definitions = columnDefinitions.definitions; - } - - private Object readResolve() { - return new DefaultColumnDefinitions(this.definitions); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultExecutionInfo.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultExecutionInfo.java deleted file mode 100644 index 3ab57ddc598..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultExecutionInfo.java +++ /dev/null @@ -1,192 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.PagingState; -import com.datastax.oss.driver.api.core.cql.QueryTrace; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.protocol.internal.Frame; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import java.util.concurrent.CompletionStage; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultExecutionInfo implements ExecutionInfo { - - private final Request request; - private final Node coordinator; - private final int speculativeExecutionCount; - private final int successfulExecutionIndex; - private final List> errors; - private final ByteBuffer pagingState; - private final UUID tracingId; - private final int responseSizeInBytes; - private final int compressedResponseSizeInBytes; - private final List warnings; - private final Map customPayload; - private final boolean schemaInAgreement; - private final DefaultSession session; - private final InternalDriverContext context; - private final DriverExecutionProfile executionProfile; - - public DefaultExecutionInfo( - Request request, - Node coordinator, - int speculativeExecutionCount, - int successfulExecutionIndex, - List> errors, - ByteBuffer pagingState, - Frame frame, - boolean schemaInAgreement, - DefaultSession session, - InternalDriverContext context, - DriverExecutionProfile executionProfile) { - - this.request = request; - this.coordinator = coordinator; - this.speculativeExecutionCount = speculativeExecutionCount; - this.successfulExecutionIndex = successfulExecutionIndex; - this.errors = errors; - this.pagingState = pagingState; - - this.tracingId = (frame == null) ? null : frame.tracingId; - this.responseSizeInBytes = (frame == null) ? -1 : frame.size; - this.compressedResponseSizeInBytes = (frame == null) ? -1 : frame.compressedSize; - // Note: the collections returned by the protocol layer are already unmodifiable - this.warnings = (frame == null) ? Collections.emptyList() : frame.warnings; - this.customPayload = (frame == null) ? Collections.emptyMap() : frame.customPayload; - this.schemaInAgreement = schemaInAgreement; - this.session = session; - this.context = context; - this.executionProfile = executionProfile; - } - - @NonNull - @Override - @Deprecated - public Statement getStatement() { - return (Statement) request; - } - - @NonNull - @Override - public Request getRequest() { - return request; - } - - @Nullable - @Override - public Node getCoordinator() { - return coordinator; - } - - @Override - public int getSpeculativeExecutionCount() { - return speculativeExecutionCount; - } - - @Override - public int getSuccessfulExecutionIndex() { - return successfulExecutionIndex; - } - - @NonNull - @Override - public List> getErrors() { - // Assume this method will be called 0 or 1 time, so we create the unmodifiable wrapper on - // demand. - return (errors == null) ? Collections.emptyList() : Collections.unmodifiableList(errors); - } - - @Override - @Nullable - public ByteBuffer getPagingState() { - return pagingState; - } - - @Nullable - @Override - public PagingState getSafePagingState() { - if (pagingState == null) { - return null; - } else { - if (!(request instanceof Statement)) { - throw new IllegalStateException("Only statements should have a paging state"); - } - Statement statement = (Statement) request; - return new DefaultPagingState(pagingState, statement, session.getContext()); - } - } - - @NonNull - @Override - public List getWarnings() { - return warnings; - } - - @NonNull - @Override - public Map getIncomingPayload() { - return customPayload; - } - - @Override - public boolean isSchemaInAgreement() { - return schemaInAgreement; - } - - @Override - @Nullable - public UUID getTracingId() { - return tracingId; - } - - @NonNull - @Override - public CompletionStage getQueryTraceAsync() { - if (tracingId == null) { - return CompletableFutures.failedFuture( - new IllegalStateException("Tracing was disabled for this request")); - } else { - return new QueryTraceFetcher(tracingId, session, context, executionProfile).fetch(); - } - } - - @Override - public int getResponseSizeInBytes() { - return responseSizeInBytes; - } - - @Override - public int getCompressedResponseSizeInBytes() { - return compressedResponseSizeInBytes; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultPagingState.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultPagingState.java deleted file mode 100644 index 71243285e3e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultPagingState.java +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.cql.BatchStatement; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.PagingState; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.internal.core.data.ValuesHelper; -import com.datastax.oss.protocol.internal.util.Bytes; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.nio.charset.Charset; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; -import java.util.Arrays; - -public class DefaultPagingState implements PagingState { - - private final ByteBuffer rawPagingState; - private final byte[] hash; - private final int protocolVersion; - - public DefaultPagingState( - ByteBuffer rawPagingState, Statement statement, AttachmentPoint attachmentPoint) { - this( - rawPagingState, - hash(statement, rawPagingState, attachmentPoint), - attachmentPoint.getProtocolVersion().getCode()); - } - - private DefaultPagingState(ByteBuffer rawPagingState, byte[] hash, int protocolVersion) { - this.rawPagingState = rawPagingState; - this.hash = hash; - this.protocolVersion = protocolVersion; - } - - // Same serialized form as in driver 3: - // size of raw state|size of hash|raw state|hash|protocol version - // - // The protocol version might be absent, in which case it defaults to V2 (this is for backward - // compatibility with 2.0.10 where it is always absent). - public static DefaultPagingState fromBytes(byte[] bytes) { - ByteBuffer buffer = ByteBuffer.wrap(bytes); - short rawPagingStateLength = buffer.getShort(); - short hashLength = buffer.getShort(); - int length = rawPagingStateLength + hashLength + 2; - int legacyLength = rawPagingStateLength + hashLength; // without protocol version - if (buffer.remaining() != length && buffer.remaining() != legacyLength) { - throw new IllegalArgumentException( - "Cannot deserialize paging state, invalid format. The serialized form was corrupted, " - + "or not initially generated from a PagingState object."); - } - byte[] rawPagingState = new byte[rawPagingStateLength]; - buffer.get(rawPagingState); - byte[] hash = new byte[hashLength]; - buffer.get(hash); - int protocolVersion = buffer.hasRemaining() ? buffer.getShort() : 2; - return new DefaultPagingState(ByteBuffer.wrap(rawPagingState), hash, protocolVersion); - } - - @Override - public byte[] toBytes() { - ByteBuffer buffer = ByteBuffer.allocate(rawPagingState.remaining() + hash.length + 6); - buffer.putShort((short) rawPagingState.remaining()); - buffer.putShort((short) hash.length); - buffer.put(rawPagingState.duplicate()); - buffer.put(hash); - buffer.putShort((short) protocolVersion); - buffer.rewind(); - return buffer.array(); - } - - public static DefaultPagingState fromString(String string) { - byte[] bytes = Bytes.getArray(Bytes.fromHexString("0x" + string)); - return fromBytes(bytes); - } - - @Override - public String toString() { - return Bytes.toHexString(toBytes()).substring(2); // remove "0x" prefix - } - - @Override - public boolean matches(@NonNull Statement statement, @Nullable Session session) { - AttachmentPoint attachmentPoint = - (session == null) ? AttachmentPoint.NONE : session.getContext(); - byte[] actual = hash(statement, rawPagingState, attachmentPoint); - return Arrays.equals(actual, hash); - } - - @NonNull - @Override - public ByteBuffer getRawPagingState() { - return rawPagingState; - } - - // Hashes a statement's query string and parameters. We also include the paging state itself in - // the hash computation, to make the serialized form a bit more resistant to manual tampering. - private static byte[] hash( - @NonNull Statement statement, - ByteBuffer rawPagingState, - @NonNull AttachmentPoint attachmentPoint) { - // Batch statements don't have paging, the driver should never call this method for one - assert !(statement instanceof BatchStatement); - - MessageDigest messageDigest; - try { - messageDigest = MessageDigest.getInstance("MD5"); - } catch (NoSuchAlgorithmException e) { - throw new IllegalStateException( - "It looks like this JVM doesn't support MD5 digests, " - + "can't use the rich paging state feature", - e); - } - if (statement instanceof BoundStatement) { - BoundStatement boundStatement = (BoundStatement) statement; - String queryString = boundStatement.getPreparedStatement().getQuery(); - messageDigest.update(queryString.getBytes(Charset.defaultCharset())); - for (ByteBuffer value : boundStatement.getValues()) { - messageDigest.update(value.duplicate()); - } - } else { - SimpleStatement simpleStatement = (SimpleStatement) statement; - String queryString = simpleStatement.getQuery(); - messageDigest.update(queryString.getBytes(Charset.defaultCharset())); - for (Object value : simpleStatement.getPositionalValues()) { - ByteBuffer encodedValue = - ValuesHelper.encodeToDefaultCqlMapping( - value, attachmentPoint.getCodecRegistry(), attachmentPoint.getProtocolVersion()); - messageDigest.update(encodedValue); - } - for (Object value : simpleStatement.getNamedValues().values()) { - ByteBuffer encodedValue = - ValuesHelper.encodeToDefaultCqlMapping( - value, attachmentPoint.getCodecRegistry(), attachmentPoint.getProtocolVersion()); - messageDigest.update(encodedValue); - } - } - messageDigest.update(rawPagingState.duplicate()); - return messageDigest.digest(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultPrepareRequest.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultPrepareRequest.java deleted file mode 100644 index 7f87dbe5b51..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultPrepareRequest.java +++ /dev/null @@ -1,221 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.PrepareRequest; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.Map; -import net.jcip.annotations.Immutable; - -/** - * Default implementation of a prepare request, which is built internally to handle calls such as - * {@link CqlSession#prepare(String)} and {@link CqlSession#prepare(SimpleStatement)}. - * - *

When built from a {@link SimpleStatement}, it propagates the attributes to bound statements - * according to the rules described in {@link CqlSession#prepare(SimpleStatement)}. The prepare - * request itself: - * - *

    - *
  • will use the same execution profile (or execution profile name) as the {@code - * SimpleStatement}; - *
  • will use the same custom payload as the {@code SimpleStatement}; - *
  • will use a {@code null} timeout in order to default to the configuration (assuming that if - * a statement with a custom timeout is prepared, it is intended for the bound statements, not - * the preparation itself). - *
- */ -@Immutable -public class DefaultPrepareRequest implements PrepareRequest { - - private final SimpleStatement statement; - - public DefaultPrepareRequest(SimpleStatement statement) { - this.statement = statement; - } - - public DefaultPrepareRequest(String query) { - this.statement = SimpleStatement.newInstance(query); - } - - @NonNull - @Override - public String getQuery() { - return statement.getQuery(); - } - - @Nullable - @Override - public String getExecutionProfileName() { - return statement.getExecutionProfileName(); - } - - @Nullable - @Override - public DriverExecutionProfile getExecutionProfile() { - return statement.getExecutionProfile(); - } - - @Nullable - @Override - public CqlIdentifier getKeyspace() { - return statement.getKeyspace(); - } - - @Nullable - @Override - public CqlIdentifier getRoutingKeyspace() { - // Prepare requests do not operate on a particular partition, token-aware routing doesn't apply. - return null; - } - - @Nullable - @Override - public ByteBuffer getRoutingKey() { - return null; - } - - @Nullable - @Override - public Token getRoutingToken() { - return null; - } - - @NonNull - @Override - public Map getCustomPayload() { - return statement.getCustomPayload(); - } - - @Nullable - @Override - public Duration getTimeout() { - return null; - } - - @Nullable - @Override - public String getExecutionProfileNameForBoundStatements() { - return statement.getExecutionProfileName(); - } - - @Nullable - @Override - public DriverExecutionProfile getExecutionProfileForBoundStatements() { - return statement.getExecutionProfile(); - } - - @Nullable - @Override - public CqlIdentifier getRoutingKeyspaceForBoundStatements() { - return (statement.getKeyspace() != null) - ? statement.getKeyspace() - : statement.getRoutingKeyspace(); - } - - @Nullable - @Override - public ByteBuffer getRoutingKeyForBoundStatements() { - return statement.getRoutingKey(); - } - - @Nullable - @Override - public Token getRoutingTokenForBoundStatements() { - return statement.getRoutingToken(); - } - - @NonNull - @Override - public Map getCustomPayloadForBoundStatements() { - return statement.getCustomPayload(); - } - - @Nullable - @Override - public Boolean areBoundStatementsIdempotent() { - return statement.isIdempotent(); - } - - @Nullable - @Override - public Duration getTimeoutForBoundStatements() { - return statement.getTimeout(); - } - - @Nullable - @Override - public ByteBuffer getPagingStateForBoundStatements() { - return statement.getPagingState(); - } - - @Override - public int getPageSizeForBoundStatements() { - return statement.getPageSize(); - } - - @Nullable - @Override - public ConsistencyLevel getConsistencyLevelForBoundStatements() { - return statement.getConsistencyLevel(); - } - - @Nullable - @Override - public ConsistencyLevel getSerialConsistencyLevelForBoundStatements() { - return statement.getSerialConsistencyLevel(); - } - - @Nullable - @Override - public Node getNode() { - // never target prepare requests - return null; - } - - @Override - public boolean areBoundStatementsTracing() { - return statement.isTracing(); - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof DefaultPrepareRequest) { - DefaultPrepareRequest that = (DefaultPrepareRequest) other; - return this.statement.equals(that.statement); - } else { - return false; - } - } - - @Override - public int hashCode() { - return statement.hashCode(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultPreparedStatement.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultPreparedStatement.java deleted file mode 100644 index e45e1e5add0..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultPreparedStatement.java +++ /dev/null @@ -1,222 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.BoundStatementBuilder; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.internal.core.data.ValuesHelper; -import com.datastax.oss.driver.internal.core.session.RepreparePayload; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.List; -import java.util.Map; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class DefaultPreparedStatement implements PreparedStatement { - - private final ByteBuffer id; - private final RepreparePayload repreparePayload; - private final ColumnDefinitions variableDefinitions; - private final List partitionKeyIndices; - private volatile ResultMetadata resultMetadata; - private final CodecRegistry codecRegistry; - private final ProtocolVersion protocolVersion; - private final String executionProfileNameForBoundStatements; - private final DriverExecutionProfile executionProfileForBoundStatements; - private final ByteBuffer pagingStateForBoundStatements; - private final CqlIdentifier routingKeyspaceForBoundStatements; - private final ByteBuffer routingKeyForBoundStatements; - private final Token routingTokenForBoundStatements; - private final Map customPayloadForBoundStatements; - private final Boolean areBoundStatementsIdempotent; - private final boolean areBoundStatementsTracing; - private final int pageSizeForBoundStatements; - private final ConsistencyLevel consistencyLevelForBoundStatements; - private final ConsistencyLevel serialConsistencyLevelForBoundStatements; - private final Duration timeoutForBoundStatements; - - public DefaultPreparedStatement( - ByteBuffer id, - String query, - ColumnDefinitions variableDefinitions, - List partitionKeyIndices, - ByteBuffer resultMetadataId, - ColumnDefinitions resultSetDefinitions, - CqlIdentifier keyspace, - Map customPayloadForPrepare, - String executionProfileNameForBoundStatements, - DriverExecutionProfile executionProfileForBoundStatements, - CqlIdentifier routingKeyspaceForBoundStatements, - ByteBuffer routingKeyForBoundStatements, - Token routingTokenForBoundStatements, - Map customPayloadForBoundStatements, - Boolean areBoundStatementsIdempotent, - Duration timeoutForBoundStatements, - ByteBuffer pagingStateForBoundStatements, - int pageSizeForBoundStatements, - ConsistencyLevel consistencyLevelForBoundStatements, - ConsistencyLevel serialConsistencyLevelForBoundStatements, - boolean areBoundStatementsTracing, - CodecRegistry codecRegistry, - ProtocolVersion protocolVersion) { - this.id = id; - this.partitionKeyIndices = partitionKeyIndices; - // It's important that we keep a reference to this object, so that it only gets evicted from - // the map in DefaultSession if no client reference the PreparedStatement anymore. - this.repreparePayload = new RepreparePayload(id, query, keyspace, customPayloadForPrepare); - this.variableDefinitions = variableDefinitions; - this.resultMetadata = new ResultMetadata(resultMetadataId, resultSetDefinitions); - - this.executionProfileNameForBoundStatements = executionProfileNameForBoundStatements; - this.executionProfileForBoundStatements = executionProfileForBoundStatements; - this.routingKeyspaceForBoundStatements = routingKeyspaceForBoundStatements; - this.routingKeyForBoundStatements = routingKeyForBoundStatements; - this.routingTokenForBoundStatements = routingTokenForBoundStatements; - this.customPayloadForBoundStatements = customPayloadForBoundStatements; - this.areBoundStatementsIdempotent = areBoundStatementsIdempotent; - this.timeoutForBoundStatements = timeoutForBoundStatements; - this.pagingStateForBoundStatements = pagingStateForBoundStatements; - this.pageSizeForBoundStatements = pageSizeForBoundStatements; - this.consistencyLevelForBoundStatements = consistencyLevelForBoundStatements; - this.serialConsistencyLevelForBoundStatements = serialConsistencyLevelForBoundStatements; - this.areBoundStatementsTracing = areBoundStatementsTracing; - - this.codecRegistry = codecRegistry; - this.protocolVersion = protocolVersion; - } - - @NonNull - @Override - public ByteBuffer getId() { - return id; - } - - @NonNull - @Override - public String getQuery() { - return repreparePayload.query; - } - - @NonNull - @Override - public ColumnDefinitions getVariableDefinitions() { - return variableDefinitions; - } - - @NonNull - @Override - public List getPartitionKeyIndices() { - return partitionKeyIndices; - } - - @Override - public ByteBuffer getResultMetadataId() { - return resultMetadata.resultMetadataId; - } - - @NonNull - @Override - public ColumnDefinitions getResultSetDefinitions() { - return resultMetadata.resultSetDefinitions; - } - - @Override - public void setResultMetadata( - @NonNull ByteBuffer newResultMetadataId, @NonNull ColumnDefinitions newResultSetDefinitions) { - this.resultMetadata = new ResultMetadata(newResultMetadataId, newResultSetDefinitions); - } - - @NonNull - @Override - public BoundStatement bind(@NonNull Object... values) { - return new DefaultBoundStatement( - this, - variableDefinitions, - ValuesHelper.encodePreparedValues( - values, variableDefinitions, codecRegistry, protocolVersion), - executionProfileNameForBoundStatements, - executionProfileForBoundStatements, - routingKeyspaceForBoundStatements, - routingKeyForBoundStatements, - routingTokenForBoundStatements, - customPayloadForBoundStatements, - areBoundStatementsIdempotent, - areBoundStatementsTracing, - Statement.NO_DEFAULT_TIMESTAMP, - pagingStateForBoundStatements, - pageSizeForBoundStatements, - consistencyLevelForBoundStatements, - serialConsistencyLevelForBoundStatements, - timeoutForBoundStatements, - codecRegistry, - protocolVersion, - null, - Statement.NO_NOW_IN_SECONDS); - } - - @NonNull - @Override - public BoundStatementBuilder boundStatementBuilder(@NonNull Object... values) { - return new BoundStatementBuilder( - this, - variableDefinitions, - ValuesHelper.encodePreparedValues( - values, variableDefinitions, codecRegistry, protocolVersion), - executionProfileNameForBoundStatements, - executionProfileForBoundStatements, - routingKeyspaceForBoundStatements, - routingKeyForBoundStatements, - routingTokenForBoundStatements, - customPayloadForBoundStatements, - areBoundStatementsIdempotent, - areBoundStatementsTracing, - Statement.NO_DEFAULT_TIMESTAMP, - pagingStateForBoundStatements, - pageSizeForBoundStatements, - consistencyLevelForBoundStatements, - serialConsistencyLevelForBoundStatements, - timeoutForBoundStatements, - codecRegistry, - protocolVersion); - } - - public RepreparePayload getRepreparePayload() { - return this.repreparePayload; - } - - private static class ResultMetadata { - private ByteBuffer resultMetadataId; - private ColumnDefinitions resultSetDefinitions; - - private ResultMetadata(ByteBuffer resultMetadataId, ColumnDefinitions resultSetDefinitions) { - this.resultMetadataId = resultMetadataId; - this.resultSetDefinitions = resultSetDefinitions; - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultQueryTrace.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultQueryTrace.java deleted file mode 100644 index db95cc408b6..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultQueryTrace.java +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.cql.QueryTrace; -import com.datastax.oss.driver.api.core.cql.TraceEvent; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultQueryTrace implements QueryTrace { - - private final UUID tracingId; - private final String requestType; - private final int durationMicros; - private final InetSocketAddress coordinator; - private final Map parameters; - private final long startedAt; - private final List events; - - public DefaultQueryTrace( - UUID tracingId, - String requestType, - int durationMicros, - InetSocketAddress coordinator, - Map parameters, - long startedAt, - List events) { - this.tracingId = tracingId; - this.requestType = requestType; - this.durationMicros = durationMicros; - this.coordinator = coordinator; - this.parameters = parameters; - this.startedAt = startedAt; - this.events = events; - } - - @NonNull - @Override - public UUID getTracingId() { - return tracingId; - } - - @NonNull - @Override - public String getRequestType() { - return requestType; - } - - @Override - public int getDurationMicros() { - return durationMicros; - } - - @NonNull - @Override - @Deprecated - public InetAddress getCoordinator() { - return coordinator.getAddress(); - } - - @NonNull - @Override - public InetSocketAddress getCoordinatorAddress() { - return coordinator; - } - - @NonNull - @Override - public Map getParameters() { - return parameters; - } - - @Override - public long getStartedAt() { - return startedAt; - } - - @NonNull - @Override - public List getEvents() { - return events; - } - - @Override - public String toString() { - return String.format("%s [%s] - %dµs", requestType, tracingId, durationMicros); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultRow.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultRow.java deleted file mode 100644 index d6bf39ab9c0..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultRow.java +++ /dev/null @@ -1,190 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.protocol.internal.util.Bytes; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.io.InvalidObjectException; -import java.io.ObjectInputStream; -import java.io.Serializable; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.List; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultRow implements Row, Serializable { - - private final ColumnDefinitions definitions; - private final List data; - private transient volatile AttachmentPoint attachmentPoint; - - public DefaultRow( - ColumnDefinitions definitions, List data, AttachmentPoint attachmentPoint) { - this.definitions = definitions; - this.data = data; - this.attachmentPoint = attachmentPoint; - } - - public DefaultRow(ColumnDefinitions definitions, List data) { - this(definitions, data, AttachmentPoint.NONE); - } - - @NonNull - @Override - public ColumnDefinitions getColumnDefinitions() { - return definitions; - } - - @Override - public int size() { - return definitions.size(); - } - - @NonNull - @Override - public DataType getType(int i) { - return definitions.get(i).getType(); - } - - @NonNull - @Override - public List allIndicesOf(@NonNull CqlIdentifier id) { - List indices = definitions.allIndicesOf(id); - if (indices.isEmpty()) { - throw new IllegalArgumentException(id + " is not a column in this row"); - } - return indices; - } - - @Override - public int firstIndexOf(@NonNull CqlIdentifier id) { - int indexOf = definitions.firstIndexOf(id); - if (indexOf == -1) { - throw new IllegalArgumentException(id + " is not a column in this row"); - } - return indexOf; - } - - @NonNull - @Override - public DataType getType(@NonNull CqlIdentifier id) { - return definitions.get(firstIndexOf(id)).getType(); - } - - @NonNull - @Override - public List allIndicesOf(@NonNull String name) { - List indices = definitions.allIndicesOf(name); - if (indices.isEmpty()) { - throw new IllegalArgumentException(name + " is not a column in this row"); - } - return indices; - } - - @Override - public int firstIndexOf(@NonNull String name) { - int indexOf = definitions.firstIndexOf(name); - if (indexOf == -1) { - throw new IllegalArgumentException(name + " is not a column in this row"); - } - return indexOf; - } - - @NonNull - @Override - public DataType getType(@NonNull String name) { - return definitions.get(firstIndexOf(name)).getType(); - } - - @NonNull - @Override - public CodecRegistry codecRegistry() { - return attachmentPoint.getCodecRegistry(); - } - - @NonNull - @Override - public ProtocolVersion protocolVersion() { - return attachmentPoint.getProtocolVersion(); - } - - @Override - public boolean isDetached() { - return attachmentPoint == AttachmentPoint.NONE; - } - - @Override - public void attach(@NonNull AttachmentPoint attachmentPoint) { - this.attachmentPoint = attachmentPoint; - this.definitions.attach(attachmentPoint); - } - - @Nullable - @Override - public ByteBuffer getBytesUnsafe(int i) { - return data.get(i); - } - /** - * @serialData The column definitions, followed by an array of byte arrays representing the column - * values (null values are represented by {@code null}). - */ - private Object writeReplace() { - return new SerializationProxy(this); - } - - private void readObject(@SuppressWarnings("unused") ObjectInputStream stream) - throws InvalidObjectException { - // Should never be called since we serialized a proxy - throw new InvalidObjectException("Proxy required"); - } - - private static class SerializationProxy implements Serializable { - - private static final long serialVersionUID = 1; - - private final ColumnDefinitions definitions; - private final byte[][] values; - - SerializationProxy(DefaultRow row) { - this.definitions = row.definitions; - this.values = new byte[row.data.size()][]; - int i = 0; - for (ByteBuffer buffer : row.data) { - this.values[i] = (buffer == null) ? null : Bytes.getArray(buffer); - i += 1; - } - } - - private Object readResolve() { - List data = new ArrayList<>(this.values.length); - for (byte[] value : this.values) { - data.add((value == null) ? null : ByteBuffer.wrap(value)); - } - return new DefaultRow(this.definitions, data); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultSimpleStatement.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultSimpleStatement.java deleted file mode 100644 index c763860479e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultSimpleStatement.java +++ /dev/null @@ -1,810 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableList; -import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultSimpleStatement implements SimpleStatement { - - private final String query; - private final List positionalValues; - private final Map namedValues; - private final String executionProfileName; - private final DriverExecutionProfile executionProfile; - private final CqlIdentifier keyspace; - private final CqlIdentifier routingKeyspace; - private final ByteBuffer routingKey; - private final Token routingToken; - - private final Map customPayload; - private final Boolean idempotent; - private final boolean tracing; - private final long timestamp; - private final ByteBuffer pagingState; - private final int pageSize; - private final ConsistencyLevel consistencyLevel; - private final ConsistencyLevel serialConsistencyLevel; - private final Duration timeout; - private final Node node; - private final int nowInSeconds; - - /** @see SimpleStatement#builder(String) */ - public DefaultSimpleStatement( - String query, - List positionalValues, - Map namedValues, - String executionProfileName, - DriverExecutionProfile executionProfile, - CqlIdentifier keyspace, - CqlIdentifier routingKeyspace, - ByteBuffer routingKey, - Token routingToken, - Map customPayload, - Boolean idempotent, - boolean tracing, - long timestamp, - ByteBuffer pagingState, - int pageSize, - ConsistencyLevel consistencyLevel, - ConsistencyLevel serialConsistencyLevel, - Duration timeout, - Node node, - int nowInSeconds) { - if (!positionalValues.isEmpty() && !namedValues.isEmpty()) { - throw new IllegalArgumentException("Can't have both positional and named values"); - } - this.query = query; - this.positionalValues = NullAllowingImmutableList.copyOf(positionalValues); - this.namedValues = NullAllowingImmutableMap.copyOf(namedValues); - this.executionProfileName = executionProfileName; - this.executionProfile = executionProfile; - this.keyspace = keyspace; - this.routingKeyspace = routingKeyspace; - this.routingKey = routingKey; - this.routingToken = routingToken; - this.customPayload = customPayload; - this.idempotent = idempotent; - this.tracing = tracing; - this.timestamp = timestamp; - this.pagingState = pagingState; - this.pageSize = pageSize; - this.consistencyLevel = consistencyLevel; - this.serialConsistencyLevel = serialConsistencyLevel; - this.timeout = timeout; - this.node = node; - this.nowInSeconds = nowInSeconds; - } - - @NonNull - @Override - public String getQuery() { - return query; - } - - @NonNull - @Override - public SimpleStatement setQuery(@NonNull String newQuery) { - return new DefaultSimpleStatement( - newQuery, - positionalValues, - namedValues, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @NonNull - @Override - public List getPositionalValues() { - return positionalValues; - } - - @NonNull - @Override - public SimpleStatement setPositionalValues(@NonNull List newPositionalValues) { - return new DefaultSimpleStatement( - query, - newPositionalValues, - namedValues, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @NonNull - @Override - public Map getNamedValues() { - return namedValues; - } - - @NonNull - @Override - public SimpleStatement setNamedValuesWithIds(@NonNull Map newNamedValues) { - return new DefaultSimpleStatement( - query, - positionalValues, - newNamedValues, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @Nullable - @Override - public String getExecutionProfileName() { - return executionProfileName; - } - - @NonNull - @Override - public SimpleStatement setExecutionProfileName(@Nullable String newConfigProfileName) { - return new DefaultSimpleStatement( - query, - positionalValues, - namedValues, - newConfigProfileName, - (newConfigProfileName == null) ? executionProfile : null, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @Nullable - @Override - public DriverExecutionProfile getExecutionProfile() { - return executionProfile; - } - - @NonNull - @Override - public SimpleStatement setExecutionProfile(@Nullable DriverExecutionProfile newProfile) { - return new DefaultSimpleStatement( - query, - positionalValues, - namedValues, - (newProfile == null) ? executionProfileName : null, - newProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @Nullable - @Override - public CqlIdentifier getKeyspace() { - return keyspace; - } - - @NonNull - @Override - public SimpleStatement setKeyspace(@Nullable CqlIdentifier newKeyspace) { - return new DefaultSimpleStatement( - query, - positionalValues, - namedValues, - executionProfileName, - executionProfile, - newKeyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @Nullable - @Override - public CqlIdentifier getRoutingKeyspace() { - return routingKeyspace; - } - - @NonNull - @Override - public SimpleStatement setRoutingKeyspace(@Nullable CqlIdentifier newRoutingKeyspace) { - return new DefaultSimpleStatement( - query, - positionalValues, - namedValues, - executionProfileName, - executionProfile, - keyspace, - newRoutingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @NonNull - @Override - public SimpleStatement setNode(@Nullable Node newNode) { - return new DefaultSimpleStatement( - query, - positionalValues, - namedValues, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - newNode, - nowInSeconds); - } - - @Nullable - @Override - public Node getNode() { - return node; - } - - @Nullable - @Override - public ByteBuffer getRoutingKey() { - return routingKey; - } - - @NonNull - @Override - public SimpleStatement setRoutingKey(@Nullable ByteBuffer newRoutingKey) { - return new DefaultSimpleStatement( - query, - positionalValues, - namedValues, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - newRoutingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @Nullable - @Override - public Token getRoutingToken() { - return routingToken; - } - - @NonNull - @Override - public SimpleStatement setRoutingToken(@Nullable Token newRoutingToken) { - return new DefaultSimpleStatement( - query, - positionalValues, - namedValues, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - newRoutingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @NonNull - @Override - public Map getCustomPayload() { - return customPayload; - } - - @NonNull - @Override - public SimpleStatement setCustomPayload(@NonNull Map newCustomPayload) { - return new DefaultSimpleStatement( - query, - positionalValues, - namedValues, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - newCustomPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @Nullable - @Override - public Boolean isIdempotent() { - return idempotent; - } - - @NonNull - @Override - public SimpleStatement setIdempotent(@Nullable Boolean newIdempotence) { - return new DefaultSimpleStatement( - query, - positionalValues, - namedValues, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - newIdempotence, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @Override - public boolean isTracing() { - return tracing; - } - - @NonNull - @Override - public SimpleStatement setTracing(boolean newTracing) { - return new DefaultSimpleStatement( - query, - positionalValues, - namedValues, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - newTracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @Override - public long getQueryTimestamp() { - return timestamp; - } - - @NonNull - @Override - public SimpleStatement setQueryTimestamp(long newTimestamp) { - return new DefaultSimpleStatement( - query, - positionalValues, - namedValues, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - newTimestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @Nullable - @Override - public Duration getTimeout() { - return timeout; - } - - @NonNull - @Override - public SimpleStatement setTimeout(@Nullable Duration newTimeout) { - return new DefaultSimpleStatement( - query, - positionalValues, - namedValues, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - newTimeout, - node, - nowInSeconds); - } - - @Nullable - @Override - public ByteBuffer getPagingState() { - return pagingState; - } - - @NonNull - @Override - public SimpleStatement setPagingState(@Nullable ByteBuffer newPagingState) { - return new DefaultSimpleStatement( - query, - positionalValues, - namedValues, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - newPagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @Override - public int getPageSize() { - return pageSize; - } - - @NonNull - @Override - public SimpleStatement setPageSize(int newPageSize) { - return new DefaultSimpleStatement( - query, - positionalValues, - namedValues, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - newPageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @Nullable - @Override - public ConsistencyLevel getConsistencyLevel() { - return consistencyLevel; - } - - @NonNull - @Override - public SimpleStatement setConsistencyLevel(@Nullable ConsistencyLevel newConsistencyLevel) { - return new DefaultSimpleStatement( - query, - positionalValues, - namedValues, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - newConsistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @Nullable - @Override - public ConsistencyLevel getSerialConsistencyLevel() { - return serialConsistencyLevel; - } - - @NonNull - @Override - public SimpleStatement setSerialConsistencyLevel( - @Nullable ConsistencyLevel newSerialConsistencyLevel) { - return new DefaultSimpleStatement( - query, - positionalValues, - namedValues, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - newSerialConsistencyLevel, - timeout, - node, - nowInSeconds); - } - - @Override - public int getNowInSeconds() { - return nowInSeconds; - } - - @NonNull - @Override - public SimpleStatement setNowInSeconds(int newNowInSeconds) { - return new DefaultSimpleStatement( - query, - positionalValues, - namedValues, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - newNowInSeconds); - } - - public static Map wrapKeys(Map namedValues) { - NullAllowingImmutableMap.Builder builder = - NullAllowingImmutableMap.builder(); - for (Map.Entry entry : namedValues.entrySet()) { - builder.put(CqlIdentifier.fromCql(entry.getKey()), entry.getValue()); - } - return builder.build(); - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof DefaultSimpleStatement) { - DefaultSimpleStatement that = (DefaultSimpleStatement) other; - return this.query.equals(that.query) - && this.positionalValues.equals(that.positionalValues) - && this.namedValues.equals(that.namedValues) - && Objects.equals(this.executionProfileName, that.executionProfileName) - && Objects.equals(this.executionProfile, that.executionProfile) - && Objects.equals(this.keyspace, that.keyspace) - && Objects.equals(this.routingKeyspace, that.routingKeyspace) - && Objects.equals(this.routingKey, that.routingKey) - && Objects.equals(this.routingToken, that.routingToken) - && Objects.equals(this.customPayload, that.customPayload) - && Objects.equals(this.idempotent, that.idempotent) - && this.tracing == that.tracing - && this.timestamp == that.timestamp - && Objects.equals(this.pagingState, that.pagingState) - && this.pageSize == that.pageSize - && Objects.equals(this.consistencyLevel, that.consistencyLevel) - && Objects.equals(this.serialConsistencyLevel, that.serialConsistencyLevel) - && Objects.equals(this.timeout, that.timeout) - && Objects.equals(this.node, that.node) - && this.nowInSeconds == that.nowInSeconds; - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash( - query, - positionalValues, - namedValues, - executionProfileName, - executionProfile, - keyspace, - routingKeyspace, - routingKey, - routingToken, - customPayload, - idempotent, - tracing, - timestamp, - pagingState, - pageSize, - consistencyLevel, - serialConsistencyLevel, - timeout, - node, - nowInSeconds); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultTraceEvent.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultTraceEvent.java deleted file mode 100644 index 9bf7ff7c8ee..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultTraceEvent.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.cql.TraceEvent; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.util.Date; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultTraceEvent implements TraceEvent { - - private final String activity; - private final long timestamp; - private final InetSocketAddress source; - private final int sourceElapsedMicros; - private final String threadName; - - public DefaultTraceEvent( - String activity, - long timestamp, - InetSocketAddress source, - int sourceElapsedMicros, - String threadName) { - this.activity = activity; - // Convert the UUID timestamp to an epoch timestamp - this.timestamp = (timestamp - 0x01b21dd213814000L) / 10000; - this.source = source; - this.sourceElapsedMicros = sourceElapsedMicros; - this.threadName = threadName; - } - - @Override - public String getActivity() { - return activity; - } - - @Override - public long getTimestamp() { - return timestamp; - } - - @Override - @Deprecated - public InetAddress getSource() { - return source.getAddress(); - } - - @Override - public InetSocketAddress getSourceAddress() { - return source; - } - - @Override - public int getSourceElapsedMicros() { - return sourceElapsedMicros; - } - - @Override - public String getThreadName() { - return threadName; - } - - @Override - public String toString() { - return String.format("%s on %s[%s] at %s", activity, source, threadName, new Date(timestamp)); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/EmptyColumnDefinitions.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/EmptyColumnDefinitions.java deleted file mode 100644 index 53cfee98b3e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/EmptyColumnDefinitions.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.cql.ColumnDefinition; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; - -/** - * The singleton that represents no column definitions (implemented as an enum which provides the - * serialization machinery for free). - */ -public enum EmptyColumnDefinitions implements ColumnDefinitions { - INSTANCE; - - @Override - public int size() { - return 0; - } - - @NonNull - @Override - public ColumnDefinition get(int i) { - throw new ArrayIndexOutOfBoundsException(); - } - - @Override - public boolean contains(@NonNull String name) { - return false; - } - - @Override - public boolean contains(@NonNull CqlIdentifier id) { - return false; - } - - @NonNull - @Override - public List allIndicesOf(@NonNull String name) { - return Collections.emptyList(); - } - - @Override - public int firstIndexOf(@NonNull String name) { - return -1; - } - - @NonNull - @Override - public List allIndicesOf(@NonNull CqlIdentifier id) { - return Collections.emptyList(); - } - - @Override - public int firstIndexOf(@NonNull CqlIdentifier id) { - return -1; - } - - @Override - public boolean isDetached() { - return false; - } - - @Override - public void attach(@NonNull AttachmentPoint attachmentPoint) {} - - @Override - public Iterator iterator() { - return Collections.emptyList().iterator(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/MultiPageResultSet.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/MultiPageResultSet.java deleted file mode 100644 index 2115a127dc6..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/MultiPageResultSet.java +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.internal.core.util.CountingIterator; -import com.datastax.oss.driver.internal.core.util.concurrent.BlockingOperation; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import net.jcip.annotations.NotThreadSafe; - -@NotThreadSafe -public class MultiPageResultSet implements ResultSet { - - private final RowIterator iterator; - private final List executionInfos = new ArrayList<>(); - private ColumnDefinitions columnDefinitions; - - public MultiPageResultSet(@NonNull AsyncResultSet firstPage) { - assert firstPage.hasMorePages(); - this.iterator = new RowIterator(firstPage); - this.executionInfos.add(firstPage.getExecutionInfo()); - this.columnDefinitions = firstPage.getColumnDefinitions(); - } - - @NonNull - @Override - public ColumnDefinitions getColumnDefinitions() { - return columnDefinitions; - } - - @NonNull - @Override - public List getExecutionInfos() { - return executionInfos; - } - - @Override - public boolean isFullyFetched() { - return iterator.isFullyFetched(); - } - - @Override - public int getAvailableWithoutFetching() { - return iterator.remaining(); - } - - @NonNull - @Override - public Iterator iterator() { - return iterator; - } - - @Override - public boolean wasApplied() { - return iterator.wasApplied(); - } - - private class RowIterator extends CountingIterator { - private AsyncResultSet currentPage; - private Iterator currentRows; - - private RowIterator(AsyncResultSet firstPage) { - super(firstPage.remaining()); - this.currentPage = firstPage; - this.currentRows = firstPage.currentPage().iterator(); - } - - @Override - protected Row computeNext() { - maybeMoveToNextPage(); - return currentRows.hasNext() ? currentRows.next() : endOfData(); - } - - private void maybeMoveToNextPage() { - if (!currentRows.hasNext() && currentPage.hasMorePages()) { - BlockingOperation.checkNotDriverThread(); - AsyncResultSet nextPage = - CompletableFutures.getUninterruptibly(currentPage.fetchNextPage()); - currentPage = nextPage; - remaining += nextPage.remaining(); - currentRows = nextPage.currentPage().iterator(); - executionInfos.add(nextPage.getExecutionInfo()); - // The definitions can change from page to page if this result set was built from a bound - // 'SELECT *', and the schema was altered. - columnDefinitions = nextPage.getColumnDefinitions(); - } - } - - private boolean isFullyFetched() { - return !currentPage.hasMorePages(); - } - - private boolean wasApplied() { - return currentPage.wasApplied(); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/PagingIterableSpliterator.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/PagingIterableSpliterator.java deleted file mode 100644 index 742699d2c1e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/PagingIterableSpliterator.java +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.PagingIterable; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Objects; -import java.util.Spliterator; -import java.util.Spliterators; -import java.util.function.Consumer; -import net.jcip.annotations.NotThreadSafe; - -/** - * A Spliterator for {@link PagingIterable} instances that splits the stream in chunks of equal - * size. - * - * @param The element type of the underlying stream. - */ -@NotThreadSafe -public class PagingIterableSpliterator implements Spliterator { - - @NonNull - public static Builder builder(@NonNull PagingIterable iterable) { - return new Builder<>(iterable); - } - - /** The default chunk size for {@link PagingIterableSpliterator}. */ - public static final int DEFAULT_CHUNK_SIZE = 128; - - private final PagingIterable iterable; - private long estimatedSize; - private final int chunkSize; - private final int characteristics; - - /** - * Creates a new {@link PagingIterableSpliterator} for the given iterable, with unknown size and - * default chunk size ({@value #DEFAULT_CHUNK_SIZE}). - * - * @param iterable The {@link PagingIterable} to create a spliterator for. - */ - public PagingIterableSpliterator(@NonNull PagingIterable iterable) { - this(iterable, Long.MAX_VALUE, DEFAULT_CHUNK_SIZE); - } - - private PagingIterableSpliterator( - @NonNull PagingIterable iterable, long estimatedSize, int chunkSize) { - this.iterable = Objects.requireNonNull(iterable, "iterable cannot be null"); - this.estimatedSize = estimatedSize; - this.chunkSize = chunkSize; - if (estimatedSize < Long.MAX_VALUE) { - characteristics = - Spliterator.ORDERED - | Spliterator.IMMUTABLE - | Spliterator.NONNULL - | Spliterator.SIZED - | Spliterator.SUBSIZED; - } else { - characteristics = Spliterator.ORDERED | Spliterator.IMMUTABLE | Spliterator.NONNULL; - } - } - - @Override - public boolean tryAdvance(Consumer action) { - Objects.requireNonNull(action, "action cannot be null"); - ElementT row = iterable.one(); - if (row == null) { - return false; - } - action.accept(row); - return true; - } - - @Override - @Nullable - public Spliterator trySplit() { - if (estimatedSize != Long.MAX_VALUE && estimatedSize <= chunkSize) { - // There is no point in splitting if the number of remaining elements is below the chunk size - return null; - } - ElementT row = iterable.one(); - if (row == null) { - return null; - } - Object[] array = new Object[chunkSize]; - int i = 0; - do { - array[i++] = row; - if (i < chunkSize) { - row = iterable.one(); - } else { - break; - } - } while (row != null); - if (estimatedSize != Long.MAX_VALUE) { - estimatedSize -= i; - } - // Splits will also report SIZED and SUBSIZED as well. - return Spliterators.spliterator(array, 0, i, characteristics()); - } - - @Override - public void forEachRemaining(Consumer action) { - iterable.iterator().forEachRemaining(action); - } - - @Override - public long estimateSize() { - return estimatedSize; - } - - @Override - public int characteristics() { - return characteristics; - } - - public static class Builder { - - private final PagingIterable iterable; - private long estimatedSize = Long.MAX_VALUE; - private int chunkSize = DEFAULT_CHUNK_SIZE; - - Builder(@NonNull PagingIterable iterable) { - this.iterable = iterable; - } - - @NonNull - public Builder withEstimatedSize(long estimatedSize) { - Preconditions.checkArgument(estimatedSize >= 0, "estimatedSize must be >= 0"); - this.estimatedSize = estimatedSize; - return this; - } - - @NonNull - public Builder withChunkSize(int chunkSize) { - Preconditions.checkArgument(chunkSize > 0, "chunkSize must be > 0"); - this.chunkSize = chunkSize; - return this; - } - - @NonNull - public PagingIterableSpliterator build() { - return new PagingIterableSpliterator<>(iterable, estimatedSize, chunkSize); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/QueryTraceFetcher.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/QueryTraceFetcher.java deleted file mode 100644 index 7ea54aa3b0e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/QueryTraceFetcher.java +++ /dev/null @@ -1,167 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.QueryTrace; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.cql.TraceEvent; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.Iterables; -import io.netty.util.concurrent.EventExecutor; -import java.net.InetSocketAddress; -import java.nio.ByteBuffer; -import java.time.Instant; -import java.util.ArrayList; -import java.util.List; -import java.util.UUID; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.TimeUnit; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -class QueryTraceFetcher { - - private final UUID tracingId; - private final CqlSession session; - private final DriverExecutionProfile config; - private final int maxAttempts; - private final long intervalNanos; - private final EventExecutor scheduler; - private final CompletableFuture resultFuture = new CompletableFuture<>(); - - QueryTraceFetcher( - UUID tracingId, - CqlSession session, - InternalDriverContext context, - DriverExecutionProfile config) { - this.tracingId = tracingId; - this.session = session; - - String regularConsistency = config.getString(DefaultDriverOption.REQUEST_CONSISTENCY); - String traceConsistency = config.getString(DefaultDriverOption.REQUEST_TRACE_CONSISTENCY); - this.config = - traceConsistency.equals(regularConsistency) - ? config - : config.withString(DefaultDriverOption.REQUEST_CONSISTENCY, traceConsistency); - - this.maxAttempts = config.getInt(DefaultDriverOption.REQUEST_TRACE_ATTEMPTS); - this.intervalNanos = config.getDuration(DefaultDriverOption.REQUEST_TRACE_INTERVAL).toNanos(); - this.scheduler = context.getNettyOptions().adminEventExecutorGroup().next(); - - querySession(maxAttempts); - } - - CompletionStage fetch() { - return resultFuture; - } - - private void querySession(int remainingAttempts) { - session - .executeAsync( - SimpleStatement.builder("SELECT * FROM system_traces.sessions WHERE session_id = ?") - .addPositionalValue(tracingId) - .setExecutionProfile(config) - .build()) - .whenComplete( - (rs, error) -> { - if (error != null) { - resultFuture.completeExceptionally(error); - } else { - Row row = rs.one(); - if (row == null || row.isNull("duration") || row.isNull("started_at")) { - // Trace is incomplete => fail if last try, or schedule retry - if (remainingAttempts == 1) { - resultFuture.completeExceptionally( - new IllegalStateException( - String.format( - "Trace %s still not complete after %d attempts", - tracingId, maxAttempts))); - } else { - scheduler.schedule( - () -> querySession(remainingAttempts - 1), - intervalNanos, - TimeUnit.NANOSECONDS); - } - } else { - queryEvents(row, new ArrayList<>(), null); - } - } - }); - } - - private void queryEvents(Row sessionRow, List events, ByteBuffer pagingState) { - session - .executeAsync( - SimpleStatement.builder("SELECT * FROM system_traces.events WHERE session_id = ?") - .addPositionalValue(tracingId) - .setPagingState(pagingState) - .setExecutionProfile(config) - .build()) - .whenComplete( - (rs, error) -> { - if (error != null) { - resultFuture.completeExceptionally(error); - } else { - Iterables.addAll(events, rs.currentPage()); - ByteBuffer nextPagingState = rs.getExecutionInfo().getPagingState(); - if (nextPagingState == null) { - resultFuture.complete(buildTrace(sessionRow, events)); - } else { - queryEvents(sessionRow, events, nextPagingState); - } - } - }); - } - - private QueryTrace buildTrace(Row sessionRow, Iterable eventRows) { - ImmutableList.Builder eventsBuilder = ImmutableList.builder(); - for (Row eventRow : eventRows) { - UUID eventId = eventRow.getUuid("event_id"); - int sourcePort = 0; - if (eventRow.getColumnDefinitions().contains("source_port")) { - sourcePort = eventRow.getInt("source_port"); - } - eventsBuilder.add( - new DefaultTraceEvent( - eventRow.getString("activity"), - eventId == null ? -1 : eventId.timestamp(), - new InetSocketAddress(eventRow.getInetAddress("source"), sourcePort), - eventRow.getInt("source_elapsed"), - eventRow.getString("thread"))); - } - Instant startedAt = sessionRow.getInstant("started_at"); - int coordinatorPort = 0; - if (sessionRow.getColumnDefinitions().contains("coordinator_port")) { - coordinatorPort = sessionRow.getInt("coordinator_port"); - } - return new DefaultQueryTrace( - tracingId, - sessionRow.getString("request"), - sessionRow.getInt("duration"), - new InetSocketAddress(sessionRow.getInetAddress("coordinator"), coordinatorPort), - sessionRow.getMap("parameters", String.class, String.class), - startedAt == null ? -1 : startedAt.toEpochMilli(), - eventsBuilder.build()); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/ResultSets.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/ResultSets.java deleted file mode 100644 index eb15d92acc5..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/ResultSets.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.ResultSet; - -public class ResultSets { - public static ResultSet newInstance(AsyncResultSet firstPage) { - return firstPage.hasMorePages() - ? new MultiPageResultSet(firstPage) - : new SinglePageResultSet(firstPage); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/SinglePageResultSet.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/SinglePageResultSet.java deleted file mode 100644 index eb33da3f430..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/SinglePageResultSet.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import com.datastax.oss.driver.api.core.PagingIterable; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.internal.core.PagingIterableWrapper; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Iterator; -import java.util.List; -import java.util.Spliterator; -import java.util.function.Function; -import net.jcip.annotations.NotThreadSafe; - -@NotThreadSafe -public class SinglePageResultSet implements ResultSet { - private final AsyncResultSet onlyPage; - - public SinglePageResultSet(AsyncResultSet onlyPage) { - this.onlyPage = onlyPage; - assert !onlyPage.hasMorePages(); - } - - @NonNull - @Override - public ColumnDefinitions getColumnDefinitions() { - return onlyPage.getColumnDefinitions(); - } - - @NonNull - @Override - public ExecutionInfo getExecutionInfo() { - return onlyPage.getExecutionInfo(); - } - - @NonNull - @Override - public List getExecutionInfos() { - // Assuming this will be called 0 or 1 time, avoid creating the list if it's 0. - return ImmutableList.of(onlyPage.getExecutionInfo()); - } - - @Override - public boolean isFullyFetched() { - return true; - } - - @Override - public int getAvailableWithoutFetching() { - return onlyPage.remaining(); - } - - @NonNull - @Override - public Iterator iterator() { - return onlyPage.currentPage().iterator(); - } - - @NonNull - @Override - public Spliterator spliterator() { - return PagingIterableSpliterator.builder(this) - .withEstimatedSize(getAvailableWithoutFetching()) - .build(); - } - - @NonNull - @Override - public PagingIterable map( - Function elementMapper) { - return new PagingIterableWrapper<>(this, elementMapper, true); - } - - @Override - public boolean wasApplied() { - return onlyPage.wasApplied(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/data/DefaultTupleValue.java b/core/src/main/java/com/datastax/oss/driver/internal/core/data/DefaultTupleValue.java deleted file mode 100644 index 77cfa759237..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/data/DefaultTupleValue.java +++ /dev/null @@ -1,202 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.data; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.data.TupleValue; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.TupleType; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.protocol.internal.util.Bytes; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.io.InvalidObjectException; -import java.io.ObjectInputStream; -import java.io.Serializable; -import java.nio.ByteBuffer; -import java.util.Objects; -import net.jcip.annotations.NotThreadSafe; - -/** - * Implementation note: contrary to most GettableBy* and SettableBy* implementations, this class is - * mutable. - */ -@NotThreadSafe -public class DefaultTupleValue implements TupleValue, Serializable { - - private static final long serialVersionUID = 1; - private final TupleType type; - private final ByteBuffer[] values; - - public DefaultTupleValue(@NonNull TupleType type) { - this(type, new ByteBuffer[type.getComponentTypes().size()]); - } - - public DefaultTupleValue(@NonNull TupleType type, @NonNull Object... values) { - this( - type, - ValuesHelper.encodeValues( - values, - type.getComponentTypes(), - type.getAttachmentPoint().getCodecRegistry(), - type.getAttachmentPoint().getProtocolVersion())); - } - - private DefaultTupleValue(TupleType type, ByteBuffer[] values) { - Preconditions.checkNotNull(type); - this.type = type; - this.values = values; - } - - @NonNull - @Override - public TupleType getType() { - return type; - } - - @Override - public int size() { - return values.length; - } - - @Override - public ByteBuffer getBytesUnsafe(int i) { - return values[i]; - } - - @NonNull - @Override - public TupleValue setBytesUnsafe(int i, @Nullable ByteBuffer v) { - values[i] = v; - return this; - } - - @NonNull - @Override - public DataType getType(int i) { - return type.getComponentTypes().get(i); - } - - @NonNull - @Override - public CodecRegistry codecRegistry() { - return type.getAttachmentPoint().getCodecRegistry(); - } - - @NonNull - @Override - public ProtocolVersion protocolVersion() { - return type.getAttachmentPoint().getProtocolVersion(); - } - - /** - * @serialData The type of the tuple, followed by an array of byte arrays representing the values - * (null values are represented by {@code null}). - */ - private Object writeReplace() { - return new SerializationProxy(this); - } - - private void readObject(@SuppressWarnings("unused") ObjectInputStream stream) - throws InvalidObjectException { - // Should never be called since we serialized a proxy - throw new InvalidObjectException("Proxy required"); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - - if (!(o instanceof TupleValue)) { - return false; - } - TupleValue that = (TupleValue) o; - - if (!type.equals(that.getType())) { - return false; - } - - for (int i = 0; i < values.length; i++) { - DataType innerThisType = type.getComponentTypes().get(i); - DataType innerThatType = that.getType().getComponentTypes().get(i); - if (!innerThisType.equals(innerThatType)) { - return false; - } - Object thisValue = - this.codecRegistry() - .codecFor(innerThisType) - .decode(this.getBytesUnsafe(i), this.protocolVersion()); - Object thatValue = - that.codecRegistry() - .codecFor(innerThatType) - .decode(that.getBytesUnsafe(i), that.protocolVersion()); - if (!Objects.equals(thisValue, thatValue)) { - return false; - } - } - return true; - } - - @Override - public int hashCode() { - - int result = type.hashCode(); - - for (int i = 0; i < values.length; i++) { - DataType innerThisType = type.getComponentTypes().get(i); - Object thisValue = - this.codecRegistry() - .codecFor(innerThisType) - .decode(this.values[i], this.protocolVersion()); - if (thisValue != null) { - result = 31 * result + thisValue.hashCode(); - } - } - - return result; - } - - private static class SerializationProxy implements Serializable { - - private static final long serialVersionUID = 1; - - private final TupleType type; - private final byte[][] values; - - SerializationProxy(DefaultTupleValue tuple) { - this.type = tuple.type; - this.values = new byte[tuple.values.length][]; - for (int i = 0; i < tuple.values.length; i++) { - ByteBuffer buffer = tuple.values[i]; - this.values[i] = (buffer == null) ? null : Bytes.getArray(buffer); - } - } - - private Object readResolve() { - ByteBuffer[] buffers = new ByteBuffer[this.values.length]; - for (int i = 0; i < this.values.length; i++) { - byte[] value = this.values[i]; - buffers[i] = (value == null) ? null : ByteBuffer.wrap(value); - } - return new DefaultTupleValue(this.type, buffers); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/data/DefaultUdtValue.java b/core/src/main/java/com/datastax/oss/driver/internal/core/data/DefaultUdtValue.java deleted file mode 100644 index c9bf986fcc8..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/data/DefaultUdtValue.java +++ /dev/null @@ -1,240 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.data; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.protocol.internal.util.Bytes; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.io.InvalidObjectException; -import java.io.ObjectInputStream; -import java.io.Serializable; -import java.nio.ByteBuffer; -import java.util.List; -import java.util.Objects; -import net.jcip.annotations.NotThreadSafe; - -/** - * Implementation note: contrary to most GettableBy* and SettableBy* implementations, this class is - * mutable. - */ -@NotThreadSafe -public class DefaultUdtValue implements UdtValue, Serializable { - - private static final long serialVersionUID = 1; - - private final UserDefinedType type; - private final ByteBuffer[] values; - - public DefaultUdtValue(@NonNull UserDefinedType type) { - this(type, new ByteBuffer[type.getFieldTypes().size()]); - } - - public DefaultUdtValue(@NonNull UserDefinedType type, @NonNull Object... values) { - this( - type, - ValuesHelper.encodeValues( - values, - type.getFieldTypes(), - type.getAttachmentPoint().getCodecRegistry(), - type.getAttachmentPoint().getProtocolVersion())); - } - - private DefaultUdtValue(UserDefinedType type, ByteBuffer[] values) { - Preconditions.checkNotNull(type); - this.type = type; - this.values = values; - } - - @NonNull - @Override - public UserDefinedType getType() { - return type; - } - - @Override - public int size() { - return values.length; - } - - @NonNull - @Override - public List allIndicesOf(@NonNull CqlIdentifier id) { - List indices = type.allIndicesOf(id); - if (indices.isEmpty()) { - throw new IllegalArgumentException(id + " is not a field in this UDT"); - } - return indices; - } - - @Override - public int firstIndexOf(@NonNull CqlIdentifier id) { - int indexOf = type.firstIndexOf(id); - if (indexOf == -1) { - throw new IllegalArgumentException(id + " is not a field in this UDT"); - } - return indexOf; - } - - @NonNull - @Override - public List allIndicesOf(@NonNull String name) { - List indices = type.allIndicesOf(name); - if (indices.isEmpty()) { - throw new IllegalArgumentException(name + " is not a field in this UDT"); - } - return indices; - } - - @Override - public int firstIndexOf(@NonNull String name) { - int indexOf = type.firstIndexOf(name); - if (indexOf == -1) { - throw new IllegalArgumentException(name + " is not a field in this UDT"); - } - return indexOf; - } - - @NonNull - @Override - public DataType getType(int i) { - return type.getFieldTypes().get(i); - } - - @Override - public ByteBuffer getBytesUnsafe(int i) { - return values[i]; - } - - @NonNull - @Override - public UdtValue setBytesUnsafe(int i, @Nullable ByteBuffer v) { - values[i] = v; - return this; - } - - @NonNull - @Override - public CodecRegistry codecRegistry() { - return type.getAttachmentPoint().getCodecRegistry(); - } - - @NonNull - @Override - public ProtocolVersion protocolVersion() { - return type.getAttachmentPoint().getProtocolVersion(); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - - if (!(o instanceof UdtValue)) { - return false; - } - UdtValue that = (UdtValue) o; - - if (!type.equals(that.getType())) { - return false; - } - - for (int i = 0; i < values.length; i++) { - - DataType innerThisType = type.getFieldTypes().get(i); - DataType innerThatType = that.getType().getFieldTypes().get(i); - - Object thisValue = - this.codecRegistry() - .codecFor(innerThisType) - .decode(this.getBytesUnsafe(i), this.protocolVersion()); - Object thatValue = - that.codecRegistry() - .codecFor(innerThatType) - .decode(that.getBytesUnsafe(i), that.protocolVersion()); - - if (!Objects.equals(thisValue, thatValue)) { - return false; - } - } - return true; - } - - @Override - public int hashCode() { - int result = type.hashCode(); - for (int i = 0; i < values.length; i++) { - DataType innerThisType = type.getFieldTypes().get(i); - Object thisValue = - this.codecRegistry() - .codecFor(innerThisType) - .decode(this.values[i], this.protocolVersion()); - if (thisValue != null) { - result = 31 * result + thisValue.hashCode(); - } - } - return result; - } - - /** - * @serialData The type of the tuple, followed by an array of byte arrays representing the values - * (null values are represented by {@code null}). - */ - private Object writeReplace() { - return new SerializationProxy(this); - } - - private void readObject(@SuppressWarnings("unused") ObjectInputStream stream) - throws InvalidObjectException { - // Should never be called since we serialized a proxy - throw new InvalidObjectException("Proxy required"); - } - - private static class SerializationProxy implements Serializable { - - private static final long serialVersionUID = 1; - - private final UserDefinedType type; - private final byte[][] values; - - SerializationProxy(DefaultUdtValue udt) { - this.type = udt.type; - this.values = new byte[udt.values.length][]; - for (int i = 0; i < udt.values.length; i++) { - ByteBuffer buffer = udt.values[i]; - this.values[i] = (buffer == null) ? null : Bytes.getArray(buffer); - } - } - - private Object readResolve() { - ByteBuffer[] buffers = new ByteBuffer[this.values.length]; - for (int i = 0; i < this.values.length; i++) { - byte[] value = this.values[i]; - buffers[i] = (value == null) ? null : ByteBuffer.wrap(value); - } - return new DefaultUdtValue(this.type, buffers); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/data/IdentifierIndex.java b/core/src/main/java/com/datastax/oss/driver/internal/core/data/IdentifierIndex.java deleted file mode 100644 index d35c164eb84..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/data/IdentifierIndex.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.data; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.data.AccessibleByName; -import com.datastax.oss.driver.api.core.data.GettableById; -import com.datastax.oss.driver.api.core.data.GettableByName; -import com.datastax.oss.driver.internal.core.util.Strings; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableListMultimap; -import com.datastax.oss.driver.shaded.guava.common.collect.ListMultimap; -import java.util.Iterator; -import java.util.List; -import java.util.Locale; -import net.jcip.annotations.Immutable; - -/** - * Indexes an ordered list of identifiers. - * - * @see GettableByName - * @see GettableById - */ -@Immutable -public class IdentifierIndex { - - private final ListMultimap byId; - private final ListMultimap byCaseSensitiveName; - private final ListMultimap byCaseInsensitiveName; - - public IdentifierIndex(List ids) { - ImmutableListMultimap.Builder byId = ImmutableListMultimap.builder(); - ImmutableListMultimap.Builder byCaseSensitiveName = - ImmutableListMultimap.builder(); - ImmutableListMultimap.Builder byCaseInsensitiveName = - ImmutableListMultimap.builder(); - - int i = 0; - for (CqlIdentifier id : ids) { - byId.put(id, i); - byCaseSensitiveName.put(id.asInternal(), i); - byCaseInsensitiveName.put(id.asInternal().toLowerCase(Locale.ROOT), i); - i += 1; - } - - this.byId = byId.build(); - this.byCaseSensitiveName = byCaseSensitiveName.build(); - this.byCaseInsensitiveName = byCaseInsensitiveName.build(); - } - - /** - * Returns all occurrences of a given name, given the matching rules described in {@link - * AccessibleByName}. - */ - public List allIndicesOf(String name) { - return Strings.isDoubleQuoted(name) - ? byCaseSensitiveName.get(Strings.unDoubleQuote(name)) - : byCaseInsensitiveName.get(name.toLowerCase(Locale.ROOT)); - } - - /** - * Returns the first occurrence of a given name, given the matching rules described in {@link - * AccessibleByName}, or -1 if it's not in the list. - */ - public int firstIndexOf(String name) { - Iterator indices = allIndicesOf(name).iterator(); - return indices.hasNext() ? indices.next() : -1; - } - - /** Returns all occurrences of a given identifier. */ - public List allIndicesOf(CqlIdentifier id) { - return byId.get(id); - } - - /** Returns the first occurrence of a given identifier, or -1 if it's not in the list. */ - public int firstIndexOf(CqlIdentifier id) { - Iterator indices = allIndicesOf(id).iterator(); - return indices.hasNext() ? indices.next() : -1; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/data/ValuesHelper.java b/core/src/main/java/com/datastax/oss/driver/internal/core/data/ValuesHelper.java deleted file mode 100644 index 24490ca2509..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/data/ValuesHelper.java +++ /dev/null @@ -1,141 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.data; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.internal.core.metadata.token.ByteOrderedToken; -import com.datastax.oss.driver.internal.core.metadata.token.Murmur3Token; -import com.datastax.oss.driver.internal.core.metadata.token.RandomToken; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import java.nio.ByteBuffer; -import java.util.List; - -public class ValuesHelper { - - public static ByteBuffer[] encodeValues( - Object[] values, - List fieldTypes, - CodecRegistry codecRegistry, - ProtocolVersion protocolVersion) { - Preconditions.checkArgument( - values.length <= fieldTypes.size(), - "Too many values (expected %s, got %s)", - fieldTypes.size(), - values.length); - - ByteBuffer[] encodedValues = new ByteBuffer[fieldTypes.size()]; - for (int i = 0; i < values.length; i++) { - Object value = values[i]; - ByteBuffer encodedValue; - if (value instanceof Token) { - if (value instanceof Murmur3Token) { - encodedValue = - TypeCodecs.BIGINT.encode(((Murmur3Token) value).getValue(), protocolVersion); - } else if (value instanceof ByteOrderedToken) { - encodedValue = - TypeCodecs.BLOB.encode(((ByteOrderedToken) value).getValue(), protocolVersion); - } else if (value instanceof RandomToken) { - encodedValue = - TypeCodecs.VARINT.encode(((RandomToken) value).getValue(), protocolVersion); - } else { - throw new IllegalArgumentException("Unsupported token type " + value.getClass()); - } - } else { - TypeCodec codec = - (value == null) - ? codecRegistry.codecFor(fieldTypes.get(i)) - : codecRegistry.codecFor(fieldTypes.get(i), value); - encodedValue = codec.encode(value, protocolVersion); - } - encodedValues[i] = encodedValue; - } - return encodedValues; - } - - public static ByteBuffer[] encodePreparedValues( - Object[] values, - ColumnDefinitions variableDefinitions, - CodecRegistry codecRegistry, - ProtocolVersion protocolVersion) { - - // Almost same as encodeValues, but we can't reuse because of variableDefinitions. Rebuilding a - // list of datatypes is not worth it, so duplicate the code. - - Preconditions.checkArgument( - values.length <= variableDefinitions.size(), - "Too many variables (expected %s, got %s)", - variableDefinitions.size(), - values.length); - - ByteBuffer[] encodedValues = new ByteBuffer[variableDefinitions.size()]; - int i; - for (i = 0; i < values.length; i++) { - Object value = values[i]; - ByteBuffer encodedValue; - if (value instanceof Token) { - if (value instanceof Murmur3Token) { - encodedValue = - TypeCodecs.BIGINT.encode(((Murmur3Token) value).getValue(), protocolVersion); - } else if (value instanceof ByteOrderedToken) { - encodedValue = - TypeCodecs.BLOB.encode(((ByteOrderedToken) value).getValue(), protocolVersion); - } else if (value instanceof RandomToken) { - encodedValue = - TypeCodecs.VARINT.encode(((RandomToken) value).getValue(), protocolVersion); - } else { - throw new IllegalArgumentException("Unsupported token type " + value.getClass()); - } - } else { - TypeCodec codec = - (value == null) - ? codecRegistry.codecFor(variableDefinitions.get(i).getType()) - : codecRegistry.codecFor(variableDefinitions.get(i).getType(), value); - encodedValue = codec.encode(value, protocolVersion); - } - encodedValues[i] = encodedValue; - } - for (; i < encodedValues.length; i++) { - encodedValues[i] = ProtocolConstants.UNSET_VALUE; - } - return encodedValues; - } - - public static ByteBuffer encodeToDefaultCqlMapping( - Object value, CodecRegistry codecRegistry, ProtocolVersion protocolVersion) { - if (value instanceof Token) { - if (value instanceof Murmur3Token) { - return TypeCodecs.BIGINT.encode(((Murmur3Token) value).getValue(), protocolVersion); - } else if (value instanceof ByteOrderedToken) { - return TypeCodecs.BLOB.encode(((ByteOrderedToken) value).getValue(), protocolVersion); - } else if (value instanceof RandomToken) { - return TypeCodecs.VARINT.encode(((RandomToken) value).getValue(), protocolVersion); - } else { - throw new IllegalArgumentException("Unsupported token type " + value.getClass()); - } - } else { - return codecRegistry.codecFor(value).encode(value, protocolVersion); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicy.java b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicy.java deleted file mode 100644 index a02a5eb3148..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicy.java +++ /dev/null @@ -1,498 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing; - -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistanceEvaluator; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeState; -import com.datastax.oss.driver.api.core.metadata.TokenMap; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.loadbalancing.helper.DefaultNodeDistanceEvaluatorHelper; -import com.datastax.oss.driver.internal.core.loadbalancing.helper.OptionalLocalDcHelper; -import com.datastax.oss.driver.internal.core.loadbalancing.nodeset.DcAgnosticNodeSet; -import com.datastax.oss.driver.internal.core.loadbalancing.nodeset.MultiDcNodeSet; -import com.datastax.oss.driver.internal.core.loadbalancing.nodeset.NodeSet; -import com.datastax.oss.driver.internal.core.loadbalancing.nodeset.SingleDcNodeSet; -import com.datastax.oss.driver.internal.core.util.ArrayUtils; -import com.datastax.oss.driver.internal.core.util.collection.CompositeQueryPlan; -import com.datastax.oss.driver.internal.core.util.collection.LazyQueryPlan; -import com.datastax.oss.driver.internal.core.util.collection.QueryPlan; -import com.datastax.oss.driver.internal.core.util.collection.SimpleQueryPlan; -import com.datastax.oss.driver.shaded.guava.common.base.Predicates; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import com.datastax.oss.driver.shaded.guava.common.collect.Sets; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.util.Collections; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; -import java.util.Queue; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.function.IntUnaryOperator; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A basic implementation of {@link LoadBalancingPolicy} that can serve as a building block for more - * advanced use cases. - * - *

To activate this policy, modify the {@code basic.load-balancing-policy} section in the driver - * configuration, for example: - * - *

- * datastax-java-driver {
- *   basic.load-balancing-policy {
- *     class = BasicLoadBalancingPolicy
- *     local-datacenter = datacenter1 # optional
- *   }
- * }
- * 
- * - * See {@code reference.conf} (in the manual or core driver JAR) for more details. - * - *

Local datacenter: This implementation will only define a local datacenter if it is - * explicitly set either through configuration or programmatically; if the local datacenter is - * unspecified, this implementation will effectively act as a datacenter-agnostic load balancing - * policy and will consider all nodes in the cluster when creating query plans, regardless of their - * datacenter. - * - *

Query plan: This implementation prioritizes replica nodes over non-replica ones; if - * more than one replica is available, the replicas will be shuffled. Non-replica nodes will be - * included in a round-robin fashion. If the local datacenter is defined (see above), query plans - * will only include local nodes, never remote ones; if it is unspecified however, query plans may - * contain nodes from different datacenters. - * - *

This class is not recommended for normal users who should always prefer {@link - * DefaultLoadBalancingPolicy}. - */ -@ThreadSafe -public class BasicLoadBalancingPolicy implements LoadBalancingPolicy { - - private static final Logger LOG = LoggerFactory.getLogger(BasicLoadBalancingPolicy.class); - - protected static final IntUnaryOperator INCREMENT = i -> (i == Integer.MAX_VALUE) ? 0 : i + 1; - private static final Object[] EMPTY_NODES = new Object[0]; - - @NonNull protected final InternalDriverContext context; - @NonNull protected final DriverExecutionProfile profile; - @NonNull protected final String logPrefix; - - protected final AtomicInteger roundRobinAmount = new AtomicInteger(); - - private final int maxNodesPerRemoteDc; - private final boolean allowDcFailoverForLocalCl; - private final ConsistencyLevel defaultConsistencyLevel; - - // private because they should be set in init() and never be modified after - private volatile DistanceReporter distanceReporter; - private volatile NodeDistanceEvaluator nodeDistanceEvaluator; - private volatile String localDc; - private volatile NodeSet liveNodes; - private final LinkedHashSet preferredRemoteDcs; - - public BasicLoadBalancingPolicy(@NonNull DriverContext context, @NonNull String profileName) { - this.context = (InternalDriverContext) context; - profile = context.getConfig().getProfile(profileName); - logPrefix = context.getSessionName() + "|" + profileName; - maxNodesPerRemoteDc = - profile.getInt(DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_MAX_NODES_PER_REMOTE_DC); - allowDcFailoverForLocalCl = - profile.getBoolean( - DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_ALLOW_FOR_LOCAL_CONSISTENCY_LEVELS); - defaultConsistencyLevel = - this.context - .getConsistencyLevelRegistry() - .nameToLevel(profile.getString(DefaultDriverOption.REQUEST_CONSISTENCY)); - - preferredRemoteDcs = - new LinkedHashSet<>( - profile.getStringList( - DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_PREFERRED_REMOTE_DCS)); - } - - /** - * Returns the local datacenter name, if known; empty otherwise. - * - *

When this method returns null, then datacenter awareness is completely disabled. All - * non-ignored nodes will be considered "local" regardless of their actual datacenters, and will - * have equal chances of being selected for query plans. - * - *

After the policy is {@linkplain #init(Map, DistanceReporter) initialized} this method will - * return the local datacenter that was discovered by calling {@link #discoverLocalDc(Map)}. - * Before initialization, this method always returns null. - */ - @Nullable - public String getLocalDatacenter() { - return localDc; - } - - @NonNull - @Override - public Map getStartupConfiguration() { - ImmutableMap.Builder builder = new ImmutableMap.Builder<>(); - if (localDc != null) { - builder.put("localDc", localDc); - } else { - // Local data center may not be discovered prior to connection pool initialization. - // In such scenario, return configured local data center name. - // Note that when using DC inferring load balancing policy, startup configuration - // may not show local DC name, because it will be discovered only once control connection - // is established and datacenter of contact points known. - Optional configuredDc = - new OptionalLocalDcHelper(context, profile, logPrefix).configuredLocalDc(); - configuredDc.ifPresent(d -> builder.put("localDc", d)); - } - if (!preferredRemoteDcs.isEmpty()) { - builder.put("preferredRemoteDcs", preferredRemoteDcs); - } - if (allowDcFailoverForLocalCl) { - builder.put("allowDcFailoverForLocalCl", allowDcFailoverForLocalCl); - } - if (maxNodesPerRemoteDc > 0) { - builder.put("maxNodesPerRemoteDc", maxNodesPerRemoteDc); - } - return ImmutableMap.of(BasicLoadBalancingPolicy.class.getSimpleName(), builder.build()); - } - - /** @return The nodes currently considered as live. */ - protected NodeSet getLiveNodes() { - return liveNodes; - } - - @Override - public void init(@NonNull Map nodes, @NonNull DistanceReporter distanceReporter) { - this.distanceReporter = distanceReporter; - localDc = discoverLocalDc(nodes).orElse(null); - nodeDistanceEvaluator = createNodeDistanceEvaluator(localDc, nodes); - liveNodes = - localDc == null - ? new DcAgnosticNodeSet() - : maxNodesPerRemoteDc <= 0 ? new SingleDcNodeSet(localDc) : new MultiDcNodeSet(); - for (Node node : nodes.values()) { - NodeDistance distance = computeNodeDistance(node); - distanceReporter.setDistance(node, distance); - if (distance != NodeDistance.IGNORED && node.getState() != NodeState.DOWN) { - // This includes state == UNKNOWN. If the node turns out to be unreachable, this will be - // detected when we try to open a pool to it, it will get marked down and this will be - // signaled back to this policy, which will then remove it from the live set. - liveNodes.add(node); - } - } - } - - /** - * Returns the local datacenter, if it can be discovered, or returns {@link Optional#empty empty} - * otherwise. - * - *

This method is called only once, during {@linkplain LoadBalancingPolicy#init(Map, - * LoadBalancingPolicy.DistanceReporter) initialization}. - * - *

Implementors may choose to throw {@link IllegalStateException} instead of returning {@link - * Optional#empty empty}, if they require a local datacenter to be defined in order to operate - * properly. - * - *

If this method returns empty, then datacenter awareness will be completely disabled. All - * non-ignored nodes will be considered "local" regardless of their actual datacenters, and will - * have equal chances of being selected for query plans. - * - * @param nodes All the nodes that were known to exist in the cluster (regardless of their state) - * when the load balancing policy was initialized. This argument is provided in case - * implementors need to inspect the cluster topology to discover the local datacenter. - * @return The local datacenter, or {@link Optional#empty empty} if none found. - * @throws IllegalStateException if the local datacenter could not be discovered, and this policy - * cannot operate without it. - */ - @NonNull - protected Optional discoverLocalDc(@NonNull Map nodes) { - return new OptionalLocalDcHelper(context, profile, logPrefix).discoverLocalDc(nodes); - } - - /** - * Creates a new node distance evaluator to use with this policy. - * - *

This method is called only once, during {@linkplain LoadBalancingPolicy#init(Map, - * LoadBalancingPolicy.DistanceReporter) initialization}, and only after local datacenter - * discovery has been attempted. - * - * @param localDc The local datacenter that was just discovered, or null if none found. - * @param nodes All the nodes that were known to exist in the cluster (regardless of their state) - * when the load balancing policy was initialized. This argument is provided in case - * implementors need to inspect the cluster topology to create the evaluator. - * @return the distance evaluator to use. - */ - @NonNull - protected NodeDistanceEvaluator createNodeDistanceEvaluator( - @Nullable String localDc, @NonNull Map nodes) { - return new DefaultNodeDistanceEvaluatorHelper(context, profile, logPrefix) - .createNodeDistanceEvaluator(localDc, nodes); - } - - @NonNull - @Override - public Queue newQueryPlan(@Nullable Request request, @Nullable Session session) { - // Take a snapshot since the set is concurrent: - Object[] currentNodes = liveNodes.dc(localDc).toArray(); - - Set allReplicas = getReplicas(request, session); - int replicaCount = 0; // in currentNodes - - if (!allReplicas.isEmpty()) { - // Move replicas to the beginning - for (int i = 0; i < currentNodes.length; i++) { - Node node = (Node) currentNodes[i]; - if (allReplicas.contains(node)) { - ArrayUtils.bubbleUp(currentNodes, i, replicaCount); - replicaCount += 1; - } - } - - if (replicaCount > 1) { - shuffleHead(currentNodes, replicaCount); - } - } - - LOG.trace("[{}] Prioritizing {} local replicas", logPrefix, replicaCount); - - // Round-robin the remaining nodes - ArrayUtils.rotate( - currentNodes, - replicaCount, - currentNodes.length - replicaCount, - roundRobinAmount.getAndUpdate(INCREMENT)); - - QueryPlan plan = currentNodes.length == 0 ? QueryPlan.EMPTY : new SimpleQueryPlan(currentNodes); - return maybeAddDcFailover(request, plan); - } - - @NonNull - protected Set getReplicas(@Nullable Request request, @Nullable Session session) { - if (request == null || session == null) { - return Collections.emptySet(); - } - - Optional maybeTokenMap = context.getMetadataManager().getMetadata().getTokenMap(); - if (!maybeTokenMap.isPresent()) { - return Collections.emptySet(); - } - - // Note: we're on the hot path and the getXxx methods are potentially more than simple getters, - // so we only call each method when strictly necessary (which is why the code below looks a bit - // weird). - CqlIdentifier keyspace; - Token token; - ByteBuffer key; - try { - keyspace = request.getKeyspace(); - if (keyspace == null) { - keyspace = request.getRoutingKeyspace(); - } - if (keyspace == null && session.getKeyspace().isPresent()) { - keyspace = session.getKeyspace().get(); - } - if (keyspace == null) { - return Collections.emptySet(); - } - - token = request.getRoutingToken(); - key = (token == null) ? request.getRoutingKey() : null; - if (token == null && key == null) { - return Collections.emptySet(); - } - } catch (Exception e) { - // Protect against poorly-implemented Request instances - LOG.error("Unexpected error while trying to compute query plan", e); - return Collections.emptySet(); - } - - TokenMap tokenMap = maybeTokenMap.get(); - return token != null - ? tokenMap.getReplicas(keyspace, token) - : tokenMap.getReplicas(keyspace, key); - } - - @NonNull - protected Queue maybeAddDcFailover(@Nullable Request request, @NonNull Queue local) { - if (maxNodesPerRemoteDc <= 0 || localDc == null) { - return local; - } - if (!allowDcFailoverForLocalCl && request instanceof Statement) { - Statement statement = (Statement) request; - ConsistencyLevel consistency = statement.getConsistencyLevel(); - if (consistency == null) { - consistency = defaultConsistencyLevel; - } - if (consistency.isDcLocal()) { - return local; - } - } - if (preferredRemoteDcs.isEmpty()) { - return new CompositeQueryPlan(local, buildRemoteQueryPlanAll()); - } - return new CompositeQueryPlan(local, buildRemoteQueryPlanPreferred()); - } - - private QueryPlan buildRemoteQueryPlanAll() { - - return new LazyQueryPlan() { - @Override - protected Object[] computeNodes() { - - Object[] remoteNodes = - liveNodes.dcs().stream() - .filter(Predicates.not(Predicates.equalTo(localDc))) - .flatMap(dc -> liveNodes.dc(dc).stream().limit(maxNodesPerRemoteDc)) - .toArray(); - if (remoteNodes.length == 0) { - return EMPTY_NODES; - } - shuffleHead(remoteNodes, remoteNodes.length); - return remoteNodes; - } - }; - } - - private QueryPlan buildRemoteQueryPlanPreferred() { - - Set dcs = liveNodes.dcs(); - List orderedDcs = Lists.newArrayListWithCapacity(dcs.size()); - orderedDcs.addAll(preferredRemoteDcs); - orderedDcs.addAll(Sets.difference(dcs, preferredRemoteDcs)); - - QueryPlan[] queryPlans = - orderedDcs.stream() - .filter(Predicates.not(Predicates.equalTo(localDc))) - .map( - (dc) -> { - return new LazyQueryPlan() { - @Override - protected Object[] computeNodes() { - Object[] rv = liveNodes.dc(dc).stream().limit(maxNodesPerRemoteDc).toArray(); - if (rv.length == 0) { - return EMPTY_NODES; - } - shuffleHead(rv, rv.length); - return rv; - } - }; - }) - .toArray(QueryPlan[]::new); - - return new CompositeQueryPlan(queryPlans); - } - - /** Exposed as a protected method so that it can be accessed by tests */ - protected void shuffleHead(Object[] currentNodes, int headLength) { - ArrayUtils.shuffleHead(currentNodes, headLength); - } - - @Override - public void onAdd(@NonNull Node node) { - NodeDistance distance = computeNodeDistance(node); - // Setting to a non-ignored distance triggers the session to open a pool, which will in turn - // set the node UP when the first channel gets opened, then #onUp will be called, and the - // node will be eventually added to the live set. - distanceReporter.setDistance(node, distance); - LOG.debug("[{}] {} was added, setting distance to {}", logPrefix, node, distance); - } - - @Override - public void onUp(@NonNull Node node) { - NodeDistance distance = computeNodeDistance(node); - if (node.getDistance() != distance) { - distanceReporter.setDistance(node, distance); - } - if (distance != NodeDistance.IGNORED && liveNodes.add(node)) { - LOG.debug("[{}] {} came back UP, added to live set", logPrefix, node); - } - } - - @Override - public void onDown(@NonNull Node node) { - if (liveNodes.remove(node)) { - LOG.debug("[{}] {} went DOWN, removed from live set", logPrefix, node); - } - } - - @Override - public void onRemove(@NonNull Node node) { - if (liveNodes.remove(node)) { - LOG.debug("[{}] {} was removed, removed from live set", logPrefix, node); - } - } - - /** - * Computes the distance of the given node. - * - *

This method is called during {@linkplain #init(Map, DistanceReporter) initialization}, when - * a node {@linkplain #onAdd(Node) is added}, and when a node {@linkplain #onUp(Node) is back UP}. - */ - protected NodeDistance computeNodeDistance(@NonNull Node node) { - // We interrogate the custom evaluator every time since it could be dynamic - // and change its verdict between two invocations of this method. - NodeDistance distance = nodeDistanceEvaluator.evaluateDistance(node, localDc); - if (distance != null) { - return distance; - } - // no local DC defined: all nodes are considered LOCAL. - if (localDc == null) { - return NodeDistance.LOCAL; - } - // otherwise, the node is LOCAL if its datacenter is the local datacenter. - if (Objects.equals(node.getDatacenter(), localDc)) { - return NodeDistance.LOCAL; - } - // otherwise, the node will be either REMOTE or IGNORED, depending - // on how many remote nodes we accept per DC. - if (maxNodesPerRemoteDc > 0) { - Object[] remoteNodes = liveNodes.dc(node.getDatacenter()).toArray(); - for (int i = 0; i < maxNodesPerRemoteDc; i++) { - if (i == remoteNodes.length) { - // there is still room for one more REMOTE node in this DC - return NodeDistance.REMOTE; - } else if (remoteNodes[i] == node) { - return NodeDistance.REMOTE; - } - } - } - return NodeDistance.IGNORED; - } - - @Override - public void close() { - // nothing to do - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicy.java b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicy.java deleted file mode 100644 index 1d978091c9d..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicy.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.loadbalancing.helper.InferringLocalDcHelper; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; -import java.util.Optional; -import java.util.UUID; -import net.jcip.annotations.ThreadSafe; - -/** - * An implementation of {@link LoadBalancingPolicy} that infers the local datacenter from the - * contact points, if no datacenter was provided neither through configuration nor programmatically. - * - *

To activate this policy, modify the {@code basic.load-balancing-policy} section in the driver - * configuration, for example: - * - *

- * datastax-java-driver {
- *   basic.load-balancing-policy {
- *     class = DcInferringLoadBalancingPolicy
- *     local-datacenter = datacenter1 # optional
- *   }
- * }
- * 
- * - * See {@code reference.conf} (in the manual or core driver JAR) for more details. - * - *

Local datacenter: This implementation requires a local datacenter to be defined, - * otherwise it will throw an {@link IllegalStateException}. A local datacenter can be supplied - * either: - * - *

    - *
  1. Programmatically with {@link - * com.datastax.oss.driver.api.core.session.SessionBuilder#withLocalDatacenter(String) - * SessionBuilder#withLocalDatacenter(String)}; - *
  2. Through configuration, by defining the option {@link - * DefaultDriverOption#LOAD_BALANCING_LOCAL_DATACENTER - * basic.load-balancing-policy.local-datacenter}; - *
  3. Or implicitly: in this case this implementation will infer the local datacenter from the - * provided contact points, if and only if they are all located in the same datacenter. - *
- * - *

Query plan: see {@link DefaultLoadBalancingPolicy} for details on the computation of - * query plans. - * - *

This class is not recommended for normal users who should always prefer {@link - * DefaultLoadBalancingPolicy}. - */ -@ThreadSafe -public class DcInferringLoadBalancingPolicy extends DefaultLoadBalancingPolicy { - - public DcInferringLoadBalancingPolicy( - @NonNull DriverContext context, @NonNull String profileName) { - super(context, profileName); - } - - @NonNull - @Override - protected Optional discoverLocalDc(@NonNull Map nodes) { - return new InferringLocalDcHelper(context, profile, logPrefix).discoverLocalDc(nodes); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicy.java b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicy.java deleted file mode 100644 index 8e1c1fe5039..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicy.java +++ /dev/null @@ -1,363 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing; - -import static java.util.concurrent.TimeUnit.MILLISECONDS; -import static java.util.concurrent.TimeUnit.MINUTES; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.tracker.RequestTracker; -import com.datastax.oss.driver.internal.core.loadbalancing.helper.MandatoryLocalDcHelper; -import com.datastax.oss.driver.internal.core.pool.ChannelPool; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.internal.core.util.ArrayUtils; -import com.datastax.oss.driver.internal.core.util.collection.QueryPlan; -import com.datastax.oss.driver.internal.core.util.collection.SimpleQueryPlan; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.MapMaker; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.BitSet; -import java.util.Map; -import java.util.Optional; -import java.util.OptionalLong; -import java.util.Queue; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.ThreadLocalRandom; -import java.util.concurrent.atomic.AtomicLongArray; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * The default load balancing policy implementation. - * - *

To activate this policy, modify the {@code basic.load-balancing-policy} section in the driver - * configuration, for example: - * - *

- * datastax-java-driver {
- *   basic.load-balancing-policy {
- *     class = DefaultLoadBalancingPolicy
- *     local-datacenter = datacenter1
- *   }
- * }
- * 
- * - * See {@code reference.conf} (in the manual or core driver JAR) for more details. - * - *

Local datacenter: This implementation requires a local datacenter to be defined, - * otherwise it will throw an {@link IllegalStateException}. A local datacenter can be supplied - * either: - * - *

    - *
  1. Programmatically with {@link - * com.datastax.oss.driver.api.core.session.SessionBuilder#withLocalDatacenter(String) - * SessionBuilder#withLocalDatacenter(String)}; - *
  2. Through configuration, by defining the option {@link - * DefaultDriverOption#LOAD_BALANCING_LOCAL_DATACENTER - * basic.load-balancing-policy.local-datacenter}; - *
  3. Or implicitly, if and only if no explicit contact points were provided: in this case this - * implementation will infer the local datacenter from the implicit contact point (localhost). - *
- * - *

Query plan: This implementation prioritizes replica nodes over non-replica ones; if - * more than one replica is available, the replicas will be shuffled; if more than 2 replicas are - * available, they will be ordered from most healthy to least healthy ("Power of 2 choices" or busy - * node avoidance algorithm). Non-replica nodes will be included in a round-robin fashion. If the - * local datacenter is defined (see above), query plans will only include local nodes, never remote - * ones; if it is unspecified however, query plans may contain nodes from different datacenters. - */ -@ThreadSafe -public class DefaultLoadBalancingPolicy extends BasicLoadBalancingPolicy implements RequestTracker { - - private static final Logger LOG = LoggerFactory.getLogger(DefaultLoadBalancingPolicy.class); - - private static final long NEWLY_UP_INTERVAL_NANOS = MINUTES.toNanos(1); - private static final int MAX_IN_FLIGHT_THRESHOLD = 10; - private static final long RESPONSE_COUNT_RESET_INTERVAL_NANOS = MILLISECONDS.toNanos(200); - - protected final ConcurrentMap responseTimes; - protected final Map upTimes = new ConcurrentHashMap<>(); - private final boolean avoidSlowReplicas; - - public DefaultLoadBalancingPolicy(@NonNull DriverContext context, @NonNull String profileName) { - super(context, profileName); - this.avoidSlowReplicas = - profile.getBoolean(DefaultDriverOption.LOAD_BALANCING_POLICY_SLOW_AVOIDANCE, true); - this.responseTimes = new MapMaker().weakKeys().makeMap(); - } - - @NonNull - @Override - public Optional getRequestTracker() { - if (avoidSlowReplicas) { - return Optional.of(this); - } else { - return Optional.empty(); - } - } - - @NonNull - @Override - protected Optional discoverLocalDc(@NonNull Map nodes) { - return new MandatoryLocalDcHelper(context, profile, logPrefix).discoverLocalDc(nodes); - } - - @NonNull - @Override - public Queue newQueryPlan(@Nullable Request request, @Nullable Session session) { - if (!avoidSlowReplicas) { - return super.newQueryPlan(request, session); - } - - // Take a snapshot since the set is concurrent: - Object[] currentNodes = getLiveNodes().dc(getLocalDatacenter()).toArray(); - - Set allReplicas = getReplicas(request, session); - int replicaCount = 0; // in currentNodes - - if (!allReplicas.isEmpty()) { - - // Move replicas to the beginning of the plan - for (int i = 0; i < currentNodes.length; i++) { - Node node = (Node) currentNodes[i]; - if (allReplicas.contains(node)) { - ArrayUtils.bubbleUp(currentNodes, i, replicaCount); - replicaCount++; - } - } - - if (replicaCount > 1) { - - shuffleHead(currentNodes, replicaCount); - - if (replicaCount > 2) { - - assert session != null; - - // Test replicas health - Node newestUpReplica = null; - BitSet unhealthyReplicas = null; // bit mask storing indices of unhealthy replicas - long mostRecentUpTimeNanos = -1; - long now = nanoTime(); - for (int i = 0; i < replicaCount; i++) { - Node node = (Node) currentNodes[i]; - assert node != null; - Long upTimeNanos = upTimes.get(node); - if (upTimeNanos != null - && now - upTimeNanos - NEWLY_UP_INTERVAL_NANOS < 0 - && upTimeNanos - mostRecentUpTimeNanos > 0) { - newestUpReplica = node; - mostRecentUpTimeNanos = upTimeNanos; - } - if (newestUpReplica == null && isUnhealthy(node, session, now)) { - if (unhealthyReplicas == null) { - unhealthyReplicas = new BitSet(replicaCount); - } - unhealthyReplicas.set(i); - } - } - - // When: - // - there isn't any newly UP replica and - // - there is one or more unhealthy replicas and - // - there is a majority of healthy replicas - int unhealthyReplicasCount = - unhealthyReplicas == null ? 0 : unhealthyReplicas.cardinality(); - if (newestUpReplica == null - && unhealthyReplicasCount > 0 - && unhealthyReplicasCount < (replicaCount / 2.0)) { - - // Reorder the unhealthy replicas to the back of the list - // Start from the back of the replicas, then move backwards; - // stop once all unhealthy replicas are moved to the back. - int counter = 0; - for (int i = replicaCount - 1; i >= 0 && counter < unhealthyReplicasCount; i--) { - if (unhealthyReplicas.get(i)) { - ArrayUtils.bubbleDown(currentNodes, i, replicaCount - 1 - counter); - counter++; - } - } - } - - // When: - // - there is a newly UP replica and - // - the replica in first or second position is the most recent replica marked as UP and - // - dice roll 1d4 != 1 - else if ((newestUpReplica == currentNodes[0] || newestUpReplica == currentNodes[1]) - && diceRoll1d4() != 1) { - - // Send it to the back of the replicas - ArrayUtils.bubbleDown( - currentNodes, newestUpReplica == currentNodes[0] ? 0 : 1, replicaCount - 1); - } - - // Reorder the first two replicas in the shuffled list based on the number of - // in-flight requests - if (getInFlight((Node) currentNodes[0], session) - > getInFlight((Node) currentNodes[1], session)) { - ArrayUtils.swap(currentNodes, 0, 1); - } - } - } - } - - LOG.trace("[{}] Prioritizing {} local replicas", logPrefix, replicaCount); - - // Round-robin the remaining nodes - ArrayUtils.rotate( - currentNodes, - replicaCount, - currentNodes.length - replicaCount, - roundRobinAmount.getAndUpdate(INCREMENT)); - - QueryPlan plan = currentNodes.length == 0 ? QueryPlan.EMPTY : new SimpleQueryPlan(currentNodes); - return maybeAddDcFailover(request, plan); - } - - @Override - public void onNodeSuccess( - @NonNull Request request, - long latencyNanos, - @NonNull DriverExecutionProfile executionProfile, - @NonNull Node node, - @NonNull String nodeRequestLogPrefix) { - updateResponseTimes(node); - } - - @Override - public void onNodeError( - @NonNull Request request, - @NonNull Throwable error, - long latencyNanos, - @NonNull DriverExecutionProfile executionProfile, - @NonNull Node node, - @NonNull String nodeRequestLogPrefix) { - updateResponseTimes(node); - } - - /** Exposed as a protected method so that it can be accessed by tests */ - protected long nanoTime() { - return System.nanoTime(); - } - - /** Exposed as a protected method so that it can be accessed by tests */ - protected int diceRoll1d4() { - return ThreadLocalRandom.current().nextInt(4); - } - - protected boolean isUnhealthy(@NonNull Node node, @NonNull Session session, long now) { - return isBusy(node, session) && isResponseRateInsufficient(node, now); - } - - protected boolean isBusy(@NonNull Node node, @NonNull Session session) { - return getInFlight(node, session) >= MAX_IN_FLIGHT_THRESHOLD; - } - - protected boolean isResponseRateInsufficient(@NonNull Node node, long now) { - NodeResponseRateSample sample = responseTimes.get(node); - return !(sample == null || sample.hasSufficientResponses(now)); - } - - /** - * Synchronously updates the response times for the given node. It is synchronous because the - * {@link #DefaultLoadBalancingPolicy(com.datastax.oss.driver.api.core.context.DriverContext, - * java.lang.String) CacheLoader.load} assigned is synchronous. - * - * @param node The node to update. - */ - protected void updateResponseTimes(@NonNull Node node) { - this.responseTimes.compute(node, (k, v) -> v == null ? new NodeResponseRateSample() : v.next()); - } - - protected int getInFlight(@NonNull Node node, @NonNull Session session) { - // The cast will always succeed because there's no way to replace the internal session impl - ChannelPool pool = ((DefaultSession) session).getPools().get(node); - // Note: getInFlight() includes orphaned ids, which is what we want as we need to account - // for requests that were cancelled or timed out (since the node is likely to still be - // processing them). - return (pool == null) ? 0 : pool.getInFlight(); - } - - protected class NodeResponseRateSample { - - @VisibleForTesting protected final long oldest; - @VisibleForTesting protected final OptionalLong newest; - - private NodeResponseRateSample() { - long now = nanoTime(); - this.oldest = now; - this.newest = OptionalLong.empty(); - } - - private NodeResponseRateSample(long oldestSample) { - this(oldestSample, nanoTime()); - } - - private NodeResponseRateSample(long oldestSample, long newestSample) { - this.oldest = oldestSample; - this.newest = OptionalLong.of(newestSample); - } - - @VisibleForTesting - protected NodeResponseRateSample(AtomicLongArray times) { - assert times.length() >= 1; - this.oldest = times.get(0); - this.newest = (times.length() > 1) ? OptionalLong.of(times.get(1)) : OptionalLong.empty(); - } - - // Our newest sample becomes the oldest in the next generation - private NodeResponseRateSample next() { - return new NodeResponseRateSample(this.getNewestValidSample(), nanoTime()); - } - - // If we have a pair of values return the newest, otherwise we have just one value... so just - // return it - private long getNewestValidSample() { - return this.newest.orElse(this.oldest); - } - - // response rate is considered insufficient when less than 2 responses were obtained in - // the past interval delimited by RESPONSE_COUNT_RESET_INTERVAL_NANOS. - private boolean hasSufficientResponses(long now) { - // If we only have one sample it's an automatic failure - if (!this.newest.isPresent()) return true; - long threshold = now - RESPONSE_COUNT_RESET_INTERVAL_NANOS; - return this.oldest - threshold >= 0; - } - } - - @NonNull - @Override - public Map getStartupConfiguration() { - Map parent = super.getStartupConfiguration(); - return ImmutableMap.of( - DefaultLoadBalancingPolicy.class.getSimpleName(), - parent.get(BasicLoadBalancingPolicy.class.getSimpleName())); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/DefaultNodeDistanceEvaluatorHelper.java b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/DefaultNodeDistanceEvaluatorHelper.java deleted file mode 100644 index 537497b83c8..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/DefaultNodeDistanceEvaluatorHelper.java +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing.helper; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistanceEvaluator; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.util.Reflection; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Map; -import java.util.UUID; -import java.util.function.Predicate; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A {@link NodeDistanceEvaluatorHelper} implementation that fetches the user-supplied evaluator, if - * any, from the programmatic configuration API, or else, from the driver configuration. If no - * user-supplied evaluator can be retrieved, a dummy evaluator will be used which always evaluates - * null distances. - */ -@ThreadSafe -public class DefaultNodeDistanceEvaluatorHelper implements NodeDistanceEvaluatorHelper { - - private static final Logger LOG = - LoggerFactory.getLogger(DefaultNodeDistanceEvaluatorHelper.class); - - @NonNull protected final InternalDriverContext context; - @NonNull protected final DriverExecutionProfile profile; - @NonNull protected final String logPrefix; - - public DefaultNodeDistanceEvaluatorHelper( - @NonNull InternalDriverContext context, - @NonNull DriverExecutionProfile profile, - @NonNull String logPrefix) { - this.context = context; - this.profile = profile; - this.logPrefix = logPrefix; - } - - @NonNull - @Override - public NodeDistanceEvaluator createNodeDistanceEvaluator( - @Nullable String localDc, @NonNull Map nodes) { - NodeDistanceEvaluator nodeDistanceEvaluatorFromConfig = nodeDistanceEvaluatorFromConfig(); - return (node, dc) -> { - NodeDistance distance = nodeDistanceEvaluatorFromConfig.evaluateDistance(node, dc); - if (distance != null) { - LOG.debug("[{}] Evaluator assigned distance {} to node {}", logPrefix, distance, node); - } else { - LOG.debug("[{}] Evaluator did not assign a distance to node {}", logPrefix, node); - } - return distance; - }; - } - - @NonNull - protected NodeDistanceEvaluator nodeDistanceEvaluatorFromConfig() { - NodeDistanceEvaluator evaluator = context.getNodeDistanceEvaluator(profile.getName()); - if (evaluator != null) { - LOG.debug("[{}] Node distance evaluator set programmatically", logPrefix); - } else { - evaluator = - Reflection.buildFromConfig( - context, - profile.getName(), - DefaultDriverOption.LOAD_BALANCING_DISTANCE_EVALUATOR_CLASS, - NodeDistanceEvaluator.class) - .orElse(null); - if (evaluator != null) { - LOG.debug("[{}] Node distance evaluator set from configuration", logPrefix); - } else { - @SuppressWarnings({"unchecked", "deprecation"}) - Predicate nodeFilterFromConfig = - Reflection.buildFromConfig( - context, - profile.getName(), - DefaultDriverOption.LOAD_BALANCING_FILTER_CLASS, - Predicate.class) - .orElse(null); - if (nodeFilterFromConfig != null) { - evaluator = new NodeFilterToDistanceEvaluatorAdapter(nodeFilterFromConfig); - LOG.debug( - "[{}] Node distance evaluator set from deprecated node filter configuration", - logPrefix); - } - } - } - if (evaluator == null) { - evaluator = PASS_THROUGH_DISTANCE_EVALUATOR; - } - return evaluator; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/InferringLocalDcHelper.java b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/InferringLocalDcHelper.java deleted file mode 100644 index 8608b855e8d..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/InferringLocalDcHelper.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing.helper; - -import static com.datastax.oss.driver.internal.core.time.Clock.LOG; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.DefaultNode; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.HashSet; -import java.util.Map; -import java.util.Optional; -import java.util.Set; -import java.util.UUID; -import net.jcip.annotations.ThreadSafe; - -/** - * An implementation of {@link LocalDcHelper} that fetches the user-supplied datacenter, if any, - * from the programmatic configuration API, or else, from the driver configuration. If no local - * datacenter is explicitly defined, this implementation infers the local datacenter from the - * contact points: if all contact points share the same datacenter, that datacenter is returned. If - * the contact points are from different datacenters, or if no contact points reported any - * datacenter, an {@link IllegalStateException} is thrown. - */ -@ThreadSafe -public class InferringLocalDcHelper extends OptionalLocalDcHelper { - - public InferringLocalDcHelper( - @NonNull InternalDriverContext context, - @NonNull DriverExecutionProfile profile, - @NonNull String logPrefix) { - super(context, profile, logPrefix); - } - - /** @return The local datacenter; always present. */ - @NonNull - @Override - public Optional discoverLocalDc(@NonNull Map nodes) { - Optional optionalLocalDc = super.discoverLocalDc(nodes); - if (optionalLocalDc.isPresent()) { - return optionalLocalDc; - } - Set datacenters = new HashSet<>(); - Set contactPoints = context.getMetadataManager().getContactPoints(); - for (Node node : contactPoints) { - String datacenter = node.getDatacenter(); - if (datacenter != null) { - datacenters.add(datacenter); - } - } - if (datacenters.size() == 1) { - String localDc = datacenters.iterator().next(); - LOG.info("[{}] Inferred local DC from contact points: {}", logPrefix, localDc); - return Optional.of(localDc); - } - if (datacenters.isEmpty()) { - throw new IllegalStateException( - "The local DC could not be inferred from contact points, please set it explicitly (see " - + DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER.getPath() - + " in the config, or set it programmatically with SessionBuilder.withLocalDatacenter)"); - } - throw new IllegalStateException( - String.format( - "No local DC was provided, but the contact points are from different DCs: %s; " - + "please set the local DC explicitly (see " - + DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER.getPath() - + " in the config, or set it programmatically with SessionBuilder.withLocalDatacenter)", - formatNodesAndDcs(contactPoints))); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/LocalDcHelper.java b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/LocalDcHelper.java deleted file mode 100644 index 183c7f90dec..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/LocalDcHelper.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing.helper; - -import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; -import com.datastax.oss.driver.api.core.metadata.Node; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; -import java.util.Optional; -import java.util.UUID; -import net.jcip.annotations.ThreadSafe; - -@FunctionalInterface -@ThreadSafe -public interface LocalDcHelper { - - /** - * Returns the local datacenter, if it can be discovered, or returns {@link Optional#empty empty} - * otherwise. - * - *

Implementors may choose to throw {@link IllegalStateException} instead of returning {@link - * Optional#empty empty}, if they require a local datacenter to be defined in order to operate - * properly. - * - * @param nodes All the nodes that were known to exist in the cluster (regardless of their state) - * when the load balancing policy was {@linkplain LoadBalancingPolicy#init(Map, - * LoadBalancingPolicy.DistanceReporter) initialized}. This argument is provided in case - * implementors need to inspect the cluster topology to discover the local datacenter. - * @return The local datacenter, or {@link Optional#empty empty} if none found. - * @throws IllegalStateException if the local datacenter could not be discovered, and this policy - * cannot operate without it. - */ - @NonNull - Optional discoverLocalDc(@NonNull Map nodes); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/MandatoryLocalDcHelper.java b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/MandatoryLocalDcHelper.java deleted file mode 100644 index 9a0e9a2d4ce..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/MandatoryLocalDcHelper.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing.helper; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.DefaultNode; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; -import java.util.Optional; -import java.util.Set; -import java.util.UUID; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * An implementation of {@link LocalDcHelper} that fetches the user-supplied datacenter, if any, - * from the programmatic configuration API, or else, from the driver configuration. If no local - * datacenter is explicitly defined, this implementation will consider two distinct situations: - * - *

    - *
  1. If no explicit contact points were provided, this implementation will infer the local - * datacenter from the implicit contact point (localhost). - *
  2. If explicit contact points were provided however, this implementation will throw {@link - * IllegalStateException}. - *
- */ -@ThreadSafe -public class MandatoryLocalDcHelper extends OptionalLocalDcHelper { - - private static final Logger LOG = LoggerFactory.getLogger(MandatoryLocalDcHelper.class); - - public MandatoryLocalDcHelper( - @NonNull InternalDriverContext context, - @NonNull DriverExecutionProfile profile, - @NonNull String logPrefix) { - super(context, profile, logPrefix); - } - - /** @return The local datacenter; always present. */ - @NonNull - @Override - public Optional discoverLocalDc(@NonNull Map nodes) { - Optional optionalLocalDc = super.discoverLocalDc(nodes); - if (optionalLocalDc.isPresent()) { - return optionalLocalDc; - } - Set contactPoints = context.getMetadataManager().getContactPoints(); - if (context.getMetadataManager().wasImplicitContactPoint()) { - // We only allow automatic inference of the local DC in this specific case - assert contactPoints.size() == 1; - Node contactPoint = contactPoints.iterator().next(); - String localDc = contactPoint.getDatacenter(); - if (localDc != null) { - LOG.debug( - "[{}] Local DC set from implicit contact point {}: {}", - logPrefix, - contactPoint, - localDc); - return Optional.of(localDc); - } else { - throw new IllegalStateException( - "The local DC could not be inferred from implicit contact point, please set it explicitly (see " - + DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER.getPath() - + " in the config, or set it programmatically with SessionBuilder.withLocalDatacenter)"); - } - } else { - throw new IllegalStateException( - "Since you provided explicit contact points, the local DC must be explicitly set (see " - + DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER.getPath() - + " in the config, or set it programmatically with SessionBuilder.withLocalDatacenter). " - + "Current contact points are: " - + formatNodesAndDcs(contactPoints) - + ". Current DCs in this cluster are: " - + formatDcs(nodes.values())); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/NodeDistanceEvaluatorHelper.java b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/NodeDistanceEvaluatorHelper.java deleted file mode 100644 index 61e094b318a..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/NodeDistanceEvaluatorHelper.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing.helper; - -import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistanceEvaluator; -import com.datastax.oss.driver.api.core.metadata.Node; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Map; -import java.util.UUID; -import net.jcip.annotations.ThreadSafe; - -@FunctionalInterface -@ThreadSafe -public interface NodeDistanceEvaluatorHelper { - - NodeDistanceEvaluator PASS_THROUGH_DISTANCE_EVALUATOR = (node, localDc) -> null; - - /** - * Creates a new node distance evaluator. - * - * @param localDc The local datacenter, or null if none defined. - * @param nodes All the nodes that were known to exist in the cluster (regardless of their state) - * when the load balancing policy was {@linkplain LoadBalancingPolicy#init(Map, - * LoadBalancingPolicy.DistanceReporter) initialized}. This argument is provided in case - * implementors need to inspect the cluster topology to create the node distance evaluator. - * @return the node distance evaluator to use. - */ - @NonNull - NodeDistanceEvaluator createNodeDistanceEvaluator( - @Nullable String localDc, @NonNull Map nodes); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/NodeFilterToDistanceEvaluatorAdapter.java b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/NodeFilterToDistanceEvaluatorAdapter.java deleted file mode 100644 index 902018fb7d4..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/NodeFilterToDistanceEvaluatorAdapter.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing.helper; - -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistanceEvaluator; -import com.datastax.oss.driver.api.core.metadata.Node; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.function.Predicate; - -public class NodeFilterToDistanceEvaluatorAdapter implements NodeDistanceEvaluator { - - private final Predicate nodeFilter; - - public NodeFilterToDistanceEvaluatorAdapter(@NonNull Predicate nodeFilter) { - this.nodeFilter = nodeFilter; - } - - @Nullable - @Override - public NodeDistance evaluateDistance(@NonNull Node node, @Nullable String localDc) { - return nodeFilter.test(node) ? null : NodeDistance.IGNORED; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/OptionalLocalDcHelper.java b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/OptionalLocalDcHelper.java deleted file mode 100644 index c6143f3fa16..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/OptionalLocalDcHelper.java +++ /dev/null @@ -1,150 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing.helper; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.ArrayList; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; -import java.util.Set; -import java.util.TreeSet; -import java.util.UUID; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * An implementation of {@link LocalDcHelper} that fetches the local datacenter from the - * programmatic configuration API, or else, from the driver configuration. If no user-supplied - * datacenter can be retrieved, it returns {@link Optional#empty empty}. - */ -@ThreadSafe -public class OptionalLocalDcHelper implements LocalDcHelper { - - private static final Logger LOG = LoggerFactory.getLogger(OptionalLocalDcHelper.class); - - @NonNull protected final InternalDriverContext context; - @NonNull protected final DriverExecutionProfile profile; - @NonNull protected final String logPrefix; - - public OptionalLocalDcHelper( - @NonNull InternalDriverContext context, - @NonNull DriverExecutionProfile profile, - @NonNull String logPrefix) { - this.context = context; - this.profile = profile; - this.logPrefix = logPrefix; - } - - /** - * @return The local datacenter from the programmatic configuration API, or from the driver - * configuration; {@link Optional#empty empty} if none found. - */ - @Override - @NonNull - public Optional discoverLocalDc(@NonNull Map nodes) { - Optional localDc = configuredLocalDc(); - if (localDc.isPresent()) { - checkLocalDatacenterCompatibility( - localDc.get(), context.getMetadataManager().getContactPoints()); - } else { - LOG.debug("[{}] Local DC not set, DC awareness will be disabled", logPrefix); - } - return localDc; - } - - /** - * Checks if the contact points are compatible with the local datacenter specified either through - * configuration, or programmatically. - * - *

The default implementation logs a warning when a contact point reports a datacenter - * different from the local one, and only for the default profile. - * - * @param localDc The local datacenter, as specified in the config, or programmatically. - * @param contactPoints The contact points provided when creating the session. - */ - protected void checkLocalDatacenterCompatibility( - @NonNull String localDc, Set contactPoints) { - if (profile.getName().equals(DriverExecutionProfile.DEFAULT_NAME)) { - Set badContactPoints = new LinkedHashSet<>(); - for (Node node : contactPoints) { - if (!Objects.equals(localDc, node.getDatacenter())) { - badContactPoints.add(node); - } - } - if (!badContactPoints.isEmpty()) { - LOG.warn( - "[{}] You specified {} as the local DC, but some contact points are from a different DC: {}; " - + "please provide the correct local DC, or check your contact points", - logPrefix, - localDc, - formatNodesAndDcs(badContactPoints)); - } - } - } - - /** - * Formats the given nodes as a string detailing each contact point and its datacenter, for - * informational purposes. - */ - @NonNull - protected String formatNodesAndDcs(Iterable nodes) { - List l = new ArrayList<>(); - for (Node node : nodes) { - l.add(node + "=" + node.getDatacenter()); - } - return String.join(", ", l); - } - - /** - * Formats the given nodes as a string detailing each distinct datacenter, for informational - * purposes. - */ - @NonNull - protected String formatDcs(Iterable nodes) { - List l = new ArrayList<>(); - for (Node node : nodes) { - if (node.getDatacenter() != null) { - l.add(node.getDatacenter()); - } - } - return String.join(", ", new TreeSet<>(l)); - } - - /** @return Local data center set programmatically or from configuration file. */ - @NonNull - public Optional configuredLocalDc() { - String localDc = context.getLocalDatacenter(profile.getName()); - if (localDc != null) { - LOG.debug("[{}] Local DC set programmatically: {}", logPrefix, localDc); - return Optional.of(localDc); - } else if (profile.isDefined(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)) { - localDc = profile.getString(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER); - LOG.debug("[{}] Local DC set from configuration: {}", logPrefix, localDc); - return Optional.of(localDc); - } - return Optional.empty(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/DcAgnosticNodeSet.java b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/DcAgnosticNodeSet.java deleted file mode 100644 index 2a6e79023de..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/DcAgnosticNodeSet.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing.nodeset; - -import com.datastax.oss.driver.api.core.metadata.Node; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Collections; -import java.util.Set; -import java.util.concurrent.CopyOnWriteArraySet; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class DcAgnosticNodeSet implements NodeSet { - - private final Set nodes = new CopyOnWriteArraySet<>(); - - @Override - public boolean add(@NonNull Node node) { - return nodes.add(node); - } - - @Override - public boolean remove(@NonNull Node node) { - return nodes.remove(node); - } - - @Override - @NonNull - public Set dc(@Nullable String dc) { - return nodes; - } - - @Override - public Set dcs() { - return Collections.emptySet(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/MultiDcNodeSet.java b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/MultiDcNodeSet.java deleted file mode 100644 index 37f02bec878..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/MultiDcNodeSet.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing.nodeset; - -import com.datastax.oss.driver.api.core.metadata.Node; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Collections; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.CopyOnWriteArraySet; -import java.util.concurrent.atomic.AtomicBoolean; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class MultiDcNodeSet implements NodeSet { - - private static final String UNKNOWN_DC = ""; - - private final Map> nodes = new ConcurrentHashMap<>(); - - @Override - public boolean add(@NonNull Node node) { - AtomicBoolean added = new AtomicBoolean(); - nodes.compute( - getMapKey(node), - (key, current) -> { - if (current == null) { - // We use CopyOnWriteArraySet because we need - // 1) to preserve insertion order, and - // 2) a "snapshot"-style toArray() implementation - current = new CopyOnWriteArraySet<>(); - } - if (current.add(node)) { - added.set(true); - } - return current; - }); - return added.get(); - } - - @Override - public boolean remove(@NonNull Node node) { - AtomicBoolean removed = new AtomicBoolean(); - nodes.compute( - getMapKey(node), - (key, current) -> { - if (current != null) { - if (current.remove(node)) { - removed.set(true); - } - } - return current; - }); - return removed.get(); - } - - @Override - @NonNull - public Set dc(@Nullable String dc) { - return nodes.getOrDefault(getMapKey(dc), Collections.emptySet()); - } - - @Override - public Set dcs() { - return nodes.keySet(); - } - - @NonNull - private String getMapKey(@NonNull Node node) { - return getMapKey(node.getDatacenter()); - } - - @NonNull - private String getMapKey(@Nullable String dc) { - return dc == null ? UNKNOWN_DC : dc; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/NodeSet.java b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/NodeSet.java deleted file mode 100644 index 66460e16a7c..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/NodeSet.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing.nodeset; - -import com.datastax.oss.driver.api.core.metadata.Node; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Set; -import net.jcip.annotations.ThreadSafe; - -/** - * A thread-safe abstraction around a map of nodes per datacenter, to facilitate node management by - * load balancing policies. - */ -@ThreadSafe -public interface NodeSet { - - /** - * Adds the given node to this set. - * - *

If this set was initialized with datacenter awareness, the node will be added to its - * datacenter's specific set; otherwise, the node is added to a general set containing all nodes - * in the cluster. - * - * @param node The node to add. - * @return true if the node was added, false otherwise (because it was already present). - */ - boolean add(@NonNull Node node); - - /** - * Removes the node from the set. - * - * @param node The node to remove. - * @return true if the node was removed, false otherwise (because it was not present). - */ - boolean remove(@NonNull Node node); - - /** - * Returns the current nodes in the given datacenter. - * - *

If this set was initialized with datacenter awareness, the returned set will contain only - * nodes pertaining to the given datacenter; otherwise, the given datacenter name is ignored and - * the returned set will contain all nodes in the cluster. - * - * @param dc The datacenter name, or null if the datacenter name is not known, or irrelevant. - * @return the current nodes in the given datacenter. - */ - @NonNull - Set dc(@Nullable String dc); - - /** - * Returns the current datacenter names known to this set. If datacenter awareness has been - * disabled, this method returns an empty set. - */ - Set dcs(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/SingleDcNodeSet.java b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/SingleDcNodeSet.java deleted file mode 100644 index 21c89d46927..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/SingleDcNodeSet.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing.nodeset; - -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Collections; -import java.util.Objects; -import java.util.Set; -import java.util.concurrent.CopyOnWriteArraySet; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class SingleDcNodeSet implements NodeSet { - - private final Set nodes = new CopyOnWriteArraySet<>(); - - private final String dc; - private final Set dcs; - - public SingleDcNodeSet(@NonNull String dc) { - this.dc = dc; - dcs = ImmutableSet.of(dc); - } - - @Override - public boolean add(@NonNull Node node) { - if (Objects.equals(node.getDatacenter(), dc)) { - return nodes.add(node); - } - return false; - } - - @Override - public boolean remove(@NonNull Node node) { - if (Objects.equals(node.getDatacenter(), dc)) { - return nodes.remove(node); - } - return false; - } - - @Override - @NonNull - public Set dc(@Nullable String dc) { - if (Objects.equals(this.dc, dc)) { - return nodes; - } - return Collections.emptySet(); - } - - @Override - public Set dcs() { - return dcs; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/AddNodeRefresh.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/AddNodeRefresh.java deleted file mode 100644 index ac68b92fef2..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/AddNodeRefresh.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import java.util.Map; -import java.util.UUID; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class AddNodeRefresh extends NodesRefresh { - - @VisibleForTesting final NodeInfo newNodeInfo; - - AddNodeRefresh(NodeInfo newNodeInfo) { - this.newNodeInfo = newNodeInfo; - } - - @Override - public Result compute( - DefaultMetadata oldMetadata, boolean tokenMapEnabled, InternalDriverContext context) { - Map oldNodes = oldMetadata.getNodes(); - Node existing = oldNodes.get(newNodeInfo.getHostId()); - if (existing == null) { - DefaultNode newNode = new DefaultNode(newNodeInfo.getEndPoint(), context); - copyInfos(newNodeInfo, newNode, context); - Map newNodes = - ImmutableMap.builder() - .putAll(oldNodes) - .put(newNode.getHostId(), newNode) - .build(); - return new Result( - oldMetadata.withNodes(newNodes, tokenMapEnabled, false, null, context), - ImmutableList.of(NodeStateEvent.added(newNode))); - } else { - // If a node is restarted after changing its broadcast RPC address, Cassandra considers that - // an addition, even though the host_id hasn't changed :( - // Update the existing instance and emit an UP event to trigger a pool reconnection. - if (!existing.getEndPoint().equals(newNodeInfo.getEndPoint())) { - copyInfos(newNodeInfo, ((DefaultNode) existing), context); - assert newNodeInfo.getBroadcastRpcAddress().isPresent(); // always for peer nodes - return new Result( - oldMetadata, - ImmutableList.of(TopologyEvent.suggestUp(newNodeInfo.getBroadcastRpcAddress().get()))); - } else { - return new Result(oldMetadata); - } - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/CloudTopologyMonitor.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/CloudTopologyMonitor.java deleted file mode 100644 index 021824a9b16..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/CloudTopologyMonitor.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.net.InetSocketAddress; -import java.util.Objects; -import java.util.UUID; - -public class CloudTopologyMonitor extends DefaultTopologyMonitor { - - private final InetSocketAddress cloudProxyAddress; - - public CloudTopologyMonitor(InternalDriverContext context, InetSocketAddress cloudProxyAddress) { - super(context); - this.cloudProxyAddress = cloudProxyAddress; - } - - @NonNull - @Override - protected EndPoint buildNodeEndPoint( - @NonNull AdminRow row, - @Nullable InetSocketAddress broadcastRpcAddress, - @NonNull EndPoint localEndPoint) { - UUID hostId = Objects.requireNonNull(row.getUuid("host_id")); - return new SniEndPoint(cloudProxyAddress, hostId.toString()); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DefaultEndPoint.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DefaultEndPoint.java deleted file mode 100644 index 7ffbee8e4bb..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DefaultEndPoint.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.Serializable; -import java.net.InetSocketAddress; -import java.util.Objects; - -public class DefaultEndPoint implements EndPoint, Serializable { - - private static final long serialVersionUID = 1; - - private final InetSocketAddress address; - private final String metricPrefix; - - public DefaultEndPoint(InetSocketAddress address) { - this.address = Objects.requireNonNull(address, "address can't be null"); - this.metricPrefix = buildMetricPrefix(address); - } - - @NonNull - @Override - public InetSocketAddress resolve() { - return address; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof DefaultEndPoint) { - InetSocketAddress thisAddress = this.address; - InetSocketAddress thatAddress = ((DefaultEndPoint) other).address; - // If only one of the addresses is unresolved, resolve the other. Otherwise (both resolved or - // both unresolved), compare as-is. - if (thisAddress.isUnresolved() && !thatAddress.isUnresolved()) { - thisAddress = new InetSocketAddress(thisAddress.getHostName(), thisAddress.getPort()); - } else if (thatAddress.isUnresolved() && !thisAddress.isUnresolved()) { - thatAddress = new InetSocketAddress(thatAddress.getHostName(), thatAddress.getPort()); - } - return thisAddress.equals(thatAddress); - } else { - return false; - } - } - - @Override - public int hashCode() { - return address.hashCode(); - } - - @Override - public String toString() { - return address.toString(); - } - - @NonNull - @Override - public String asMetricPrefix() { - return metricPrefix; - } - - private static String buildMetricPrefix(InetSocketAddress address) { - String hostString = address.getHostString(); - if (hostString == null) { - throw new IllegalArgumentException( - "Could not extract a host string from provided address " + address); - } - // Append the port since Cassandra 4 supports nodes with different ports - return hostString.replace('.', '_') + ':' + address.getPort(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DefaultMetadata.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DefaultMetadata.java deleted file mode 100644 index 38f7e4a093e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DefaultMetadata.java +++ /dev/null @@ -1,193 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.Metadata; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.TokenMap; -import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.token.DefaultTokenMap; -import com.datastax.oss.driver.internal.core.metadata.token.ReplicationStrategyFactory; -import com.datastax.oss.driver.internal.core.metadata.token.TokenFactory; -import com.datastax.oss.driver.internal.core.util.Loggers; -import com.datastax.oss.driver.internal.core.util.NanoTime; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Collections; -import java.util.Map; -import java.util.Optional; -import java.util.UUID; -import net.jcip.annotations.Immutable; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This class is immutable, so that metadata changes are atomic for the client. Every mutation - * operation must return a new instance, that will replace the existing one in {@link - * MetadataManager}'s volatile field. - */ -@Immutable -public class DefaultMetadata implements Metadata { - private static final Logger LOG = LoggerFactory.getLogger(DefaultMetadata.class); - public static DefaultMetadata EMPTY = - new DefaultMetadata(Collections.emptyMap(), Collections.emptyMap(), null, null); - - protected final Map nodes; - protected final Map keyspaces; - protected final TokenMap tokenMap; - protected final String clusterName; - - protected DefaultMetadata( - Map nodes, - Map keyspaces, - TokenMap tokenMap, - String clusterName) { - this.nodes = nodes; - this.keyspaces = keyspaces; - this.tokenMap = tokenMap; - this.clusterName = clusterName; - } - - @NonNull - @Override - public Map getNodes() { - return nodes; - } - - @NonNull - @Override - public Map getKeyspaces() { - return keyspaces; - } - - @NonNull - @Override - public Optional getTokenMap() { - return Optional.ofNullable(tokenMap); - } - - @NonNull - @Override - public Optional getClusterName() { - return Optional.ofNullable(clusterName); - } - - /** - * Refreshes the current metadata with the given list of nodes. - * - * @param tokenMapEnabled whether to rebuild the token map or not; if this is {@code false} the - * current token map will be copied into the new metadata without being recomputed. - * @param tokensChanged whether we observed a change of tokens for at least one node. This will - * require a full rebuild of the token map. - * @param tokenFactory only needed for the initial refresh, afterwards the existing one in the - * token map is used. - * @return the new metadata. - */ - public DefaultMetadata withNodes( - Map newNodes, - boolean tokenMapEnabled, - boolean tokensChanged, - TokenFactory tokenFactory, - InternalDriverContext context) { - - // Force a rebuild if at least one node has different tokens, or there are new or removed nodes. - boolean forceFullRebuild = tokensChanged || !newNodes.equals(nodes); - - return new DefaultMetadata( - ImmutableMap.copyOf(newNodes), - this.keyspaces, - rebuildTokenMap( - newNodes, keyspaces, tokenMapEnabled, forceFullRebuild, tokenFactory, context), - context.getChannelFactory().getClusterName()); - } - - public DefaultMetadata withSchema( - Map newKeyspaces, - boolean tokenMapEnabled, - InternalDriverContext context) { - return new DefaultMetadata( - this.nodes, - ImmutableMap.copyOf(newKeyspaces), - rebuildTokenMap(nodes, newKeyspaces, tokenMapEnabled, false, null, context), - context.getChannelFactory().getClusterName()); - } - - @Nullable - protected TokenMap rebuildTokenMap( - Map newNodes, - Map newKeyspaces, - boolean tokenMapEnabled, - boolean forceFullRebuild, - TokenFactory tokenFactory, - InternalDriverContext context) { - - String logPrefix = context.getSessionName(); - ReplicationStrategyFactory replicationStrategyFactory = context.getReplicationStrategyFactory(); - - if (!tokenMapEnabled) { - LOG.debug("[{}] Token map is disabled, skipping", logPrefix); - return this.tokenMap; - } - long start = System.nanoTime(); - try { - DefaultTokenMap oldTokenMap = (DefaultTokenMap) this.tokenMap; - if (oldTokenMap == null) { - // Initial build, we need the token factory - if (tokenFactory == null) { - LOG.debug( - "[{}] Building initial token map but the token factory is missing, skipping", - logPrefix); - return null; - } else { - LOG.debug("[{}] Building initial token map", logPrefix); - return DefaultTokenMap.build( - newNodes.values(), - newKeyspaces.values(), - tokenFactory, - replicationStrategyFactory, - logPrefix); - } - } else if (forceFullRebuild) { - LOG.debug( - "[{}] Updating token map but some nodes/tokens have changed, full rebuild", logPrefix); - return DefaultTokenMap.build( - newNodes.values(), - newKeyspaces.values(), - oldTokenMap.getTokenFactory(), - replicationStrategyFactory, - logPrefix); - } else { - LOG.debug("[{}] Refreshing token map (only schema has changed)", logPrefix); - return oldTokenMap.refresh( - newNodes.values(), newKeyspaces.values(), replicationStrategyFactory); - } - } catch (Throwable t) { - Loggers.warnWithException( - LOG, - "[{}] Unexpected error while refreshing token map, keeping previous version", - logPrefix, - t); - return this.tokenMap; - } finally { - LOG.debug("[{}] Rebuilding token map took {}", logPrefix, NanoTime.formatTimeSince(start)); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DefaultNode.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DefaultNode.java deleted file mode 100644 index 28f9e2de81c..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DefaultNode.java +++ /dev/null @@ -1,198 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeState; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metrics.NodeMetricUpdater; -import com.datastax.oss.driver.internal.core.metrics.NoopNodeMetricUpdater; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.io.Serializable; -import java.net.InetSocketAddress; -import java.util.Collections; -import java.util.Map; -import java.util.Optional; -import java.util.Set; -import java.util.UUID; -import net.jcip.annotations.ThreadSafe; - -/** - * Implementation note: all the mutable state in this class is read concurrently, but only mutated - * from {@link MetadataManager}'s admin thread. - */ -@ThreadSafe -public class DefaultNode implements Node, Serializable { - - private static final long serialVersionUID = 1; - - private volatile EndPoint endPoint; - // A deserialized node is not attached to a session anymore, so we don't need to retain this - private transient volatile NodeMetricUpdater metricUpdater; - - volatile InetSocketAddress broadcastRpcAddress; - volatile InetSocketAddress broadcastAddress; - volatile InetSocketAddress listenAddress; - volatile String datacenter; - volatile String rack; - volatile Version cassandraVersion; - // Keep a copy of the raw tokens, to detect if they have changed when we refresh the node - volatile Set rawTokens; - volatile Map extras; - volatile UUID hostId; - volatile UUID schemaVersion; - - // These 4 fields are read concurrently, but only mutated on NodeStateManager's admin thread - volatile NodeState state; - volatile int openConnections; - volatile int reconnections; - volatile long upSinceMillis; - - volatile NodeDistance distance; - - public DefaultNode(EndPoint endPoint, InternalDriverContext context) { - this.endPoint = endPoint; - this.state = NodeState.UNKNOWN; - this.distance = NodeDistance.IGNORED; - this.rawTokens = Collections.emptySet(); - this.extras = Collections.emptyMap(); - // We leak a reference to a partially constructed object (this), but in practice this won't be a - // problem because the node updater only needs the connect address to initialize. - this.metricUpdater = context.getMetricsFactory().newNodeUpdater(this); - this.upSinceMillis = -1; - } - - @NonNull - @Override - public EndPoint getEndPoint() { - return endPoint; - } - - public void setEndPoint(@NonNull EndPoint newEndPoint, @NonNull InternalDriverContext context) { - if (!newEndPoint.equals(endPoint)) { - endPoint = newEndPoint; - - // The endpoint is also used to build metric names, so make sure they get updated - NodeMetricUpdater previousMetricUpdater = metricUpdater; - if (!(previousMetricUpdater instanceof NoopNodeMetricUpdater)) { - metricUpdater = context.getMetricsFactory().newNodeUpdater(this); - } - } - } - - @NonNull - @Override - public Optional getBroadcastRpcAddress() { - return Optional.ofNullable(broadcastRpcAddress); - } - - @NonNull - @Override - public Optional getBroadcastAddress() { - return Optional.ofNullable(broadcastAddress); - } - - @NonNull - @Override - public Optional getListenAddress() { - return Optional.ofNullable(listenAddress); - } - - @Nullable - @Override - public String getDatacenter() { - return datacenter; - } - - @Nullable - @Override - public String getRack() { - return rack; - } - - @Nullable - @Override - public Version getCassandraVersion() { - return cassandraVersion; - } - - @Nullable - @Override - public UUID getHostId() { - return hostId; - } - - @Nullable - @Override - public UUID getSchemaVersion() { - return schemaVersion; - } - - @NonNull - @Override - public Map getExtras() { - return extras; - } - - @NonNull - @Override - public NodeState getState() { - return state; - } - - @Override - public long getUpSinceMillis() { - return upSinceMillis; - } - - @Override - public int getOpenConnections() { - return openConnections; - } - - @Override - public boolean isReconnecting() { - return reconnections > 0; - } - - @NonNull - @Override - public NodeDistance getDistance() { - return distance; - } - - public NodeMetricUpdater getMetricUpdater() { - return metricUpdater; - } - - @Override - public String toString() { - // Include the hash code because this class uses reference equality - return String.format( - "Node(endPoint=%s, hostId=%s, hashCode=%x)", getEndPoint(), getHostId(), hashCode()); - } - - /** Note: deliberately not exposed by the public interface. */ - public Set getRawTokens() { - return rawTokens; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DefaultNodeInfo.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DefaultNodeInfo.java deleted file mode 100644 index 8908f0be078..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DefaultNodeInfo.java +++ /dev/null @@ -1,216 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.net.InetSocketAddress; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; -import java.util.Optional; -import java.util.Set; -import java.util.UUID; -import net.jcip.annotations.Immutable; -import net.jcip.annotations.NotThreadSafe; - -@Immutable -public class DefaultNodeInfo implements NodeInfo { - public static Builder builder() { - return new Builder(); - } - - private final EndPoint endPoint; - private final InetSocketAddress broadcastRpcAddress; - private final InetSocketAddress broadcastAddress; - private final InetSocketAddress listenAddress; - private final String datacenter; - private final String rack; - private final String cassandraVersion; - private final String partitioner; - private final Set tokens; - private final Map extras; - private final UUID hostId; - private final UUID schemaVersion; - - private DefaultNodeInfo(Builder builder) { - this.endPoint = builder.endPoint; - this.broadcastRpcAddress = builder.broadcastRpcAddress; - this.broadcastAddress = builder.broadcastAddress; - this.listenAddress = builder.listenAddress; - this.datacenter = builder.datacenter; - this.rack = builder.rack; - this.cassandraVersion = builder.cassandraVersion; - this.partitioner = builder.partitioner; - this.tokens = (builder.tokens == null) ? Collections.emptySet() : builder.tokens; - this.hostId = builder.hostId; - this.schemaVersion = builder.schemaVersion; - this.extras = (builder.extras == null) ? Collections.emptyMap() : builder.extras; - } - - @NonNull - @Override - public EndPoint getEndPoint() { - return endPoint; - } - - @NonNull - @Override - public Optional getBroadcastRpcAddress() { - return Optional.ofNullable(broadcastRpcAddress); - } - - @NonNull - @Override - public Optional getBroadcastAddress() { - return Optional.ofNullable(broadcastAddress); - } - - @NonNull - @Override - public Optional getListenAddress() { - return Optional.ofNullable(listenAddress); - } - - @Override - public String getDatacenter() { - return datacenter; - } - - @Override - public String getRack() { - return rack; - } - - @Override - public String getCassandraVersion() { - return cassandraVersion; - } - - @Override - public String getPartitioner() { - return partitioner; - } - - @Override - public Set getTokens() { - return tokens; - } - - @Override - public Map getExtras() { - return extras; - } - - @NonNull - @Override - public UUID getHostId() { - return hostId; - } - - @Override - public UUID getSchemaVersion() { - return schemaVersion; - } - - @NotThreadSafe - public static class Builder { - private EndPoint endPoint; - private InetSocketAddress broadcastRpcAddress; - private InetSocketAddress broadcastAddress; - private InetSocketAddress listenAddress; - private String datacenter; - private String rack; - private String cassandraVersion; - private String partitioner; - private Set tokens; - private Map extras; - private UUID hostId; - private UUID schemaVersion; - - public Builder withEndPoint(@NonNull EndPoint endPoint) { - this.endPoint = endPoint; - return this; - } - - public Builder withBroadcastRpcAddress(@Nullable InetSocketAddress address) { - this.broadcastRpcAddress = address; - return this; - } - - public Builder withBroadcastAddress(@Nullable InetSocketAddress address) { - this.broadcastAddress = address; - return this; - } - - public Builder withListenAddress(@Nullable InetSocketAddress address) { - this.listenAddress = address; - return this; - } - - public Builder withDatacenter(@Nullable String datacenter) { - this.datacenter = datacenter; - return this; - } - - public Builder withRack(@Nullable String rack) { - this.rack = rack; - return this; - } - - public Builder withCassandraVersion(@Nullable String cassandraVersion) { - this.cassandraVersion = cassandraVersion; - return this; - } - - public Builder withPartitioner(@Nullable String partitioner) { - this.partitioner = partitioner; - return this; - } - - public Builder withTokens(@Nullable Set tokens) { - this.tokens = tokens; - return this; - } - - public Builder withHostId(@NonNull UUID hostId) { - this.hostId = hostId; - return this; - } - - public Builder withSchemaVersion(@Nullable UUID schemaVersion) { - this.schemaVersion = schemaVersion; - return this; - } - - public Builder withExtra(@NonNull String key, @Nullable Object value) { - if (value != null) { - if (this.extras == null) { - this.extras = new HashMap<>(); - } - this.extras.put(key, value); - } - return this; - } - - public DefaultNodeInfo build() { - return new DefaultNodeInfo(this); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DefaultTopologyMonitor.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DefaultTopologyMonitor.java deleted file mode 100644 index f3dc988cfbc..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DefaultTopologyMonitor.java +++ /dev/null @@ -1,598 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.dse.driver.api.core.metadata.DseNodeProperties; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRequestHandler; -import com.datastax.oss.driver.internal.core.adminrequest.AdminResult; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.internal.core.adminrequest.UnexpectedResponseException; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.control.ControlConnection; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import com.datastax.oss.driver.shaded.guava.common.collect.Iterators; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.response.Error; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.net.SocketAddress; -import java.time.Duration; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * The default topology monitor, based on {@link ControlConnection}. - * - *

Note that event processing is implemented directly in the control connection, not here. - */ -@ThreadSafe -public class DefaultTopologyMonitor implements TopologyMonitor { - private static final Logger LOG = LoggerFactory.getLogger(DefaultTopologyMonitor.class); - - // Assume topology queries never need paging - private static final int INFINITE_PAGE_SIZE = -1; - - // A few system.peers columns which get special handling below - private static final String NATIVE_PORT = "native_port"; - private static final String NATIVE_TRANSPORT_PORT = "native_transport_port"; - - private final String logPrefix; - private final InternalDriverContext context; - private final ControlConnection controlConnection; - private final Duration timeout; - private final boolean reconnectOnInit; - private final CompletableFuture closeFuture; - - @VisibleForTesting volatile boolean isSchemaV2; - @VisibleForTesting volatile int port = -1; - - public DefaultTopologyMonitor(InternalDriverContext context) { - this.logPrefix = context.getSessionName(); - this.context = context; - this.controlConnection = context.getControlConnection(); - DriverExecutionProfile config = context.getConfig().getDefaultProfile(); - this.timeout = config.getDuration(DefaultDriverOption.CONTROL_CONNECTION_TIMEOUT); - this.reconnectOnInit = config.getBoolean(DefaultDriverOption.RECONNECT_ON_INIT); - this.closeFuture = new CompletableFuture<>(); - // Set this to true initially, after the first refreshNodes is called this will either stay true - // or be set to false; - this.isSchemaV2 = true; - } - - @Override - public CompletionStage init() { - if (closeFuture.isDone()) { - return CompletableFutures.failedFuture(new IllegalStateException("closed")); - } - return controlConnection.init(true, reconnectOnInit, true); - } - - @Override - public CompletionStage initFuture() { - return controlConnection.initFuture(); - } - - @Override - public CompletionStage> refreshNode(Node node) { - if (closeFuture.isDone()) { - return CompletableFutures.failedFuture(new IllegalStateException("closed")); - } - LOG.debug("[{}] Refreshing info for {}", logPrefix, node); - DriverChannel channel = controlConnection.channel(); - EndPoint localEndPoint = channel.getEndPoint(); - if (node.getEndPoint().equals(channel.getEndPoint())) { - // refreshNode is called for nodes that just came up. If the control node just came up, it - // means the control connection just reconnected, which means we did a full node refresh. So - // we don't need to process this call. - LOG.debug("[{}] Ignoring refresh of control node", logPrefix); - return CompletableFuture.completedFuture(Optional.empty()); - } else if (node.getBroadcastAddress().isPresent()) { - CompletionStage query; - if (isSchemaV2) { - query = - query( - channel, - "SELECT * FROM " - + getPeerTableName() - + " WHERE peer = :address and peer_port = :port", - ImmutableMap.of( - "address", - node.getBroadcastAddress().get().getAddress(), - "port", - node.getBroadcastAddress().get().getPort())); - } else { - query = - query( - channel, - "SELECT * FROM " + getPeerTableName() + " WHERE peer = :address", - ImmutableMap.of("address", node.getBroadcastAddress().get().getAddress())); - } - return query.thenApply(result -> firstPeerRowAsNodeInfo(result, localEndPoint)); - } else { - return query(channel, "SELECT * FROM " + getPeerTableName()) - .thenApply(result -> findInPeers(result, node.getHostId(), localEndPoint)); - } - } - - @Override - public CompletionStage> getNewNodeInfo(InetSocketAddress broadcastRpcAddress) { - if (closeFuture.isDone()) { - return CompletableFutures.failedFuture(new IllegalStateException("closed")); - } - LOG.debug("[{}] Fetching info for new node {}", logPrefix, broadcastRpcAddress); - DriverChannel channel = controlConnection.channel(); - EndPoint localEndPoint = channel.getEndPoint(); - return query(channel, "SELECT * FROM " + getPeerTableName()) - .thenApply(result -> findInPeers(result, broadcastRpcAddress, localEndPoint)); - } - - @Override - public CompletionStage> refreshNodeList() { - if (closeFuture.isDone()) { - return CompletableFutures.failedFuture(new IllegalStateException("closed")); - } - LOG.debug("[{}] Refreshing node list", logPrefix); - DriverChannel channel = controlConnection.channel(); - EndPoint localEndPoint = channel.getEndPoint(); - - savePort(channel); - - CompletionStage localQuery = query(channel, "SELECT * FROM system.local"); - CompletionStage peersV2Query = query(channel, "SELECT * FROM system.peers_v2"); - CompletableFuture peersQuery = new CompletableFuture<>(); - - peersV2Query.whenComplete( - (r, t) -> { - if (t != null) { - // If system.peers_v2 does not exist, downgrade to system.peers - if (t instanceof UnexpectedResponseException - && ((UnexpectedResponseException) t).message instanceof Error) { - Error error = (Error) ((UnexpectedResponseException) t).message; - if (error.code == ProtocolConstants.ErrorCode.INVALID - // Also downgrade on server error with a specific error message (DSE 6.0.0 to - // 6.0.2 with search enabled) - || (error.code == ProtocolConstants.ErrorCode.SERVER_ERROR - && error.message.contains("Unknown keyspace/cf pair (system.peers_v2)"))) { - this.isSchemaV2 = false; // We should not attempt this query in the future. - CompletableFutures.completeFrom( - query(channel, "SELECT * FROM system.peers"), peersQuery); - return; - } - } - peersQuery.completeExceptionally(t); - } else { - peersQuery.complete(r); - } - }); - - return localQuery.thenCombine( - peersQuery, - (controlNodeResult, peersResult) -> { - List nodeInfos = new ArrayList<>(); - AdminRow localRow = controlNodeResult.iterator().next(); - InetSocketAddress localBroadcastRpcAddress = - getBroadcastRpcAddress(localRow, localEndPoint); - nodeInfos.add(nodeInfoBuilder(localRow, localBroadcastRpcAddress, localEndPoint).build()); - for (AdminRow peerRow : peersResult) { - if (isPeerValid(peerRow)) { - InetSocketAddress peerBroadcastRpcAddress = - getBroadcastRpcAddress(peerRow, localEndPoint); - if (peerBroadcastRpcAddress != null) { - NodeInfo nodeInfo = - nodeInfoBuilder(peerRow, peerBroadcastRpcAddress, localEndPoint).build(); - nodeInfos.add(nodeInfo); - } - } - } - return nodeInfos; - }); - } - - @Override - public CompletionStage checkSchemaAgreement() { - if (closeFuture.isDone()) { - return CompletableFuture.completedFuture(true); - } - DriverChannel channel = controlConnection.channel(); - return new SchemaAgreementChecker(channel, context, logPrefix).run(); - } - - @NonNull - @Override - public CompletionStage closeFuture() { - return closeFuture; - } - - @NonNull - @Override - public CompletionStage closeAsync() { - closeFuture.complete(null); - return closeFuture; - } - - @NonNull - @Override - public CompletionStage forceCloseAsync() { - return closeAsync(); - } - - @VisibleForTesting - protected CompletionStage query( - DriverChannel channel, String queryString, Map parameters) { - AdminRequestHandler handler; - try { - handler = - AdminRequestHandler.query( - channel, queryString, parameters, timeout, INFINITE_PAGE_SIZE, logPrefix); - } catch (Exception e) { - return CompletableFutures.failedFuture(e); - } - return handler.start(); - } - - private CompletionStage query(DriverChannel channel, String queryString) { - return query(channel, queryString, Collections.emptyMap()); - } - - private String getPeerTableName() { - return isSchemaV2 ? "system.peers_v2" : "system.peers"; - } - - private Optional firstPeerRowAsNodeInfo(AdminResult result, EndPoint localEndPoint) { - Iterator iterator = result.iterator(); - if (iterator.hasNext()) { - AdminRow row = iterator.next(); - if (isPeerValid(row)) { - return Optional.ofNullable(getBroadcastRpcAddress(row, localEndPoint)) - .map( - broadcastRpcAddress -> - nodeInfoBuilder(row, broadcastRpcAddress, localEndPoint).build()); - } - } - return Optional.empty(); - } - - /** - * Creates a {@link DefaultNodeInfo.Builder} instance from the given row. - * - * @param broadcastRpcAddress this is a parameter only because we already have it when we come - * from {@link #findInPeers(AdminResult, InetSocketAddress, EndPoint)}. Callers that don't - * already have it can use {@link #getBroadcastRpcAddress}. For the control host, this can be - * null; if this node is a peer however, this cannot be null, since we use that address to - * create the node's endpoint. Callers can use {@link #isPeerValid(AdminRow)} to check that - * before calling this method. - * @param localEndPoint the control node endpoint that was used to query the node's system tables. - * This is a parameter because it would be racy to call {@code - * controlConnection.channel().getEndPoint()} from within this method, as the control - * connection may have changed its channel since. So this parameter must be provided by the - * caller. - */ - @NonNull - protected DefaultNodeInfo.Builder nodeInfoBuilder( - @NonNull AdminRow row, - @Nullable InetSocketAddress broadcastRpcAddress, - @NonNull EndPoint localEndPoint) { - - EndPoint endPoint = buildNodeEndPoint(row, broadcastRpcAddress, localEndPoint); - - // in system.local - InetAddress broadcastInetAddress = row.getInetAddress("broadcast_address"); - if (broadcastInetAddress == null) { - // in system.peers or system.peers_v2 - broadcastInetAddress = row.getInetAddress("peer"); - } - - Integer broadcastPort = 0; - if (row.contains("broadcast_port")) { - // system.local for Cassandra >= 4.0 - broadcastPort = row.getInteger("broadcast_port"); - } else if (row.contains("peer_port")) { - // system.peers_v2 - broadcastPort = row.getInteger("peer_port"); - } - - InetSocketAddress broadcastAddress = null; - if (broadcastInetAddress != null && broadcastPort != null) { - broadcastAddress = new InetSocketAddress(broadcastInetAddress, broadcastPort); - } - - // in system.local only, and only for Cassandra versions >= 2.0.17, 2.1.8, 2.2.0 rc2; - // not present in system.peers nor system.peers_v2 - InetAddress listenInetAddress = row.getInetAddress("listen_address"); - - // in system.local only, and only for Cassandra >= 4.0 - Integer listenPort = 0; - if (row.contains("listen_port")) { - listenPort = row.getInteger("listen_port"); - } - - InetSocketAddress listenAddress = null; - if (listenInetAddress != null && listenPort != null) { - listenAddress = new InetSocketAddress(listenInetAddress, listenPort); - } - - DefaultNodeInfo.Builder builder = - DefaultNodeInfo.builder() - .withEndPoint(endPoint) - .withBroadcastRpcAddress(broadcastRpcAddress) - .withBroadcastAddress(broadcastAddress) - .withListenAddress(listenAddress) - .withDatacenter(row.getString("data_center")) - .withRack(row.getString("rack")) - .withCassandraVersion(row.getString("release_version")) - .withTokens(row.getSetOfString("tokens")) - .withPartitioner(row.getString("partitioner")) - .withHostId(Objects.requireNonNull(row.getUuid("host_id"))) - .withSchemaVersion(row.getUuid("schema_version")); - - // Handle DSE-specific columns, if present - String rawVersion = row.getString("dse_version"); - if (rawVersion != null) { - builder.withExtra(DseNodeProperties.DSE_VERSION, Version.parse(rawVersion)); - } - - ImmutableSet.Builder workloadsBuilder = ImmutableSet.builder(); - Boolean legacyGraph = row.getBoolean("graph"); // DSE 5.0 - if (legacyGraph != null && legacyGraph) { - workloadsBuilder.add("Graph"); - } - String legacyWorkload = row.getString("workload"); // DSE 5.0 (other than graph) - if (legacyWorkload != null) { - workloadsBuilder.add(legacyWorkload); - } - Set modernWorkloads = row.getSetOfString("workloads"); // DSE 5.1+ - if (modernWorkloads != null) { - workloadsBuilder.addAll(modernWorkloads); - } - ImmutableSet workloads = workloadsBuilder.build(); - if (!workloads.isEmpty()) { - builder.withExtra(DseNodeProperties.DSE_WORKLOADS, workloads); - } - - // Note: withExtra discards null values - builder - .withExtra(DseNodeProperties.SERVER_ID, row.getString("server_id")) - .withExtra(DseNodeProperties.NATIVE_TRANSPORT_PORT, row.getInteger("native_transport_port")) - .withExtra( - DseNodeProperties.NATIVE_TRANSPORT_PORT_SSL, - row.getInteger("native_transport_port_ssl")) - .withExtra(DseNodeProperties.STORAGE_PORT, row.getInteger("storage_port")) - .withExtra(DseNodeProperties.STORAGE_PORT_SSL, row.getInteger("storage_port_ssl")) - .withExtra(DseNodeProperties.JMX_PORT, row.getInteger("jmx_port")); - - return builder; - } - - /** - * Builds the node's endpoint from the given row. - * - * @param broadcastRpcAddress this is a parameter only because we already have it when we come - * from {@link #findInPeers(AdminResult, InetSocketAddress, EndPoint)}. Callers that don't - * already have it can use {@link #getBroadcastRpcAddress}. For the control host, this can be - * null; if this node is a peer however, this cannot be null, since we use that address to - * create the node's endpoint. Callers can use {@link #isPeerValid(AdminRow)} to check that - * before calling this method. - * @param localEndPoint the control node endpoint that was used to query the node's system tables. - * This is a parameter because it would be racy to call {@code - * controlConnection.channel().getEndPoint()} from within this method, as the control - * connection may have changed its channel since. So this parameter must be provided by the - * caller. - */ - @NonNull - protected EndPoint buildNodeEndPoint( - @NonNull AdminRow row, - @Nullable InetSocketAddress broadcastRpcAddress, - @NonNull EndPoint localEndPoint) { - boolean peer = row.contains("peer"); - if (peer) { - // If this node is a peer, its broadcast RPC address must be present. - Objects.requireNonNull( - broadcastRpcAddress, "broadcastRpcAddress cannot be null for a peer row"); - // Deployments that use a custom EndPoint implementation will need their own TopologyMonitor. - // One simple approach is to extend this class and override this method. - return new DefaultEndPoint(context.getAddressTranslator().translate(broadcastRpcAddress)); - } else { - // Don't rely on system.local.rpc_address for the control node, because it mistakenly - // reports the normal RPC address instead of the broadcast one (CASSANDRA-11181). We - // already know the endpoint anyway since we've just used it to query. - return localEndPoint; - } - } - - // Called when a new node is being added; the peers table is keyed by broadcast_address, - // but the received event only contains broadcast_rpc_address, so - // we have to traverse the whole table and check the rows one by one. - private Optional findInPeers( - AdminResult result, InetSocketAddress broadcastRpcAddressToFind, EndPoint localEndPoint) { - for (AdminRow row : result) { - InetSocketAddress broadcastRpcAddress = getBroadcastRpcAddress(row, localEndPoint); - if (broadcastRpcAddress != null - && broadcastRpcAddress.equals(broadcastRpcAddressToFind) - && isPeerValid(row)) { - return Optional.of(nodeInfoBuilder(row, broadcastRpcAddress, localEndPoint).build()); - } - } - LOG.debug("[{}] Could not find any peer row matching {}", logPrefix, broadcastRpcAddressToFind); - return Optional.empty(); - } - - // Called when refreshing an existing node, and we don't know its broadcast address; in this - // case we attempt a search by host id and have to traverse the whole table and check the rows one - // by one. - private Optional findInPeers( - AdminResult result, UUID hostIdToFind, EndPoint localEndPoint) { - for (AdminRow row : result) { - UUID hostId = row.getUuid("host_id"); - if (hostId != null && hostId.equals(hostIdToFind) && isPeerValid(row)) { - return Optional.ofNullable(getBroadcastRpcAddress(row, localEndPoint)) - .map( - broadcastRpcAddress -> - nodeInfoBuilder(row, broadcastRpcAddress, localEndPoint).build()); - } - } - LOG.debug("[{}] Could not find any peer row matching {}", logPrefix, hostIdToFind); - return Optional.empty(); - } - - // Current versions of Cassandra (3.11 at the time of writing), require the same port for all - // nodes. As a consequence, the port is not stored in system tables. - // We save it the first time we get a control connection channel. - private void savePort(DriverChannel channel) { - if (port < 0) { - SocketAddress address = channel.getEndPoint().resolve(); - if (address instanceof InetSocketAddress) { - port = ((InetSocketAddress) address).getPort(); - } - } - } - - /** - * Determines the broadcast RPC address of the node represented by the given row. - * - * @param row The row to inspect; can represent either a local (control) node or a peer node. - * @param localEndPoint the control node endpoint that was used to query the node's system tables. - * This is a parameter because it would be racy to call {@code - * controlConnection.channel().getEndPoint()} from within this method, as the control - * connection may have changed its channel since. So this parameter must be provided by the - * caller. - * @return the broadcast RPC address of the node, if it could be determined; or {@code null} - * otherwise. - */ - @Nullable - protected InetSocketAddress getBroadcastRpcAddress( - @NonNull AdminRow row, @NonNull EndPoint localEndPoint) { - - InetAddress broadcastRpcInetAddress = null; - Iterator addrCandidates = - Iterators.forArray( - // in system.peers_v2 (Cassandra >= 4.0) - "native_address", - // DSE 6.8 introduced native_transport_address and native_transport_port for the - // listen address. - "native_transport_address", - // in system.peers or system.local - "rpc_address"); - - while (broadcastRpcInetAddress == null && addrCandidates.hasNext()) - broadcastRpcInetAddress = row.getInetAddress(addrCandidates.next()); - // This could only happen if system tables are corrupted, but handle gracefully - if (broadcastRpcInetAddress == null) { - LOG.warn( - "[{}] Unable to determine broadcast RPC IP address, returning null. " - + "This is likely due to a misconfiguration or invalid system tables. " - + "Please validate the contents of system.local and/or {}.", - logPrefix, - getPeerTableName()); - return null; - } - - Integer broadcastRpcPort = null; - Iterator portCandidates = - Iterators.forArray( - // in system.peers_v2 (Cassandra >= 4.0) - NATIVE_PORT, - // DSE 6.8 introduced native_transport_address and native_transport_port for the - // listen address. - NATIVE_TRANSPORT_PORT, - // system.local for Cassandra >= 4.0 - "rpc_port"); - - while ((broadcastRpcPort == null || broadcastRpcPort == 0) && portCandidates.hasNext()) { - - String colName = portCandidates.next(); - broadcastRpcPort = row.getInteger(colName); - // Support override for SSL port (if enabled) in DSE - if (NATIVE_TRANSPORT_PORT.equals(colName) && context.getSslEngineFactory().isPresent()) { - - String sslColName = colName + "_ssl"; - broadcastRpcPort = row.getInteger(sslColName); - } - } - // use the default port if no port information was found in the row; - // note that in rare situations, the default port might not be known, in which case we - // report zero, as advertised in the javadocs of Node and NodeInfo. - if (broadcastRpcPort == null || broadcastRpcPort == 0) { - - LOG.warn( - "[{}] Unable to determine broadcast RPC port. " - + "Trying to fall back to port used by the control connection.", - logPrefix); - broadcastRpcPort = port == -1 ? 0 : port; - } - - InetSocketAddress broadcastRpcAddress = - new InetSocketAddress(broadcastRpcInetAddress, broadcastRpcPort); - if (row.contains("peer") && broadcastRpcAddress.equals(localEndPoint.resolve())) { - // JAVA-2303: if the peer is actually the control node, ignore that peer as it is likely - // a misconfiguration problem. - LOG.warn( - "[{}] Control node {} has an entry for itself in {}: this entry will be ignored. " - + "This is likely due to a misconfiguration; please verify your rpc_address " - + "configuration in cassandra.yaml on all nodes in your cluster.", - logPrefix, - localEndPoint, - getPeerTableName()); - return null; - } - - return broadcastRpcAddress; - } - - /** - * Returns {@code true} if the given peer row is valid, and {@code false} otherwise. - * - *

This method must at least ensure that the row contains enough information to extract the - * node's broadcast RPC address and host ID; otherwise the driver may not work properly. - */ - protected boolean isPeerValid(AdminRow peerRow) { - if (PeerRowValidator.isValid(peerRow)) { - return true; - } else { - LOG.warn( - "[{}] Found invalid row in {} for peer: {}. " - + "This is likely a gossip or snitch issue, this node will be ignored.", - logPrefix, - getPeerTableName(), - peerRow.getInetAddress("peer")); - return false; - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DistanceEvent.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DistanceEvent.java deleted file mode 100644 index 5d58727484c..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DistanceEvent.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -/** - * Indicates that the load balancing policy has assigned a new distance to a host. - * - *

This is informational only: firing this event manually does not change the distance. - */ -@Immutable -public class DistanceEvent { - public final NodeDistance distance; - public final DefaultNode node; - - public DistanceEvent(NodeDistance distance, DefaultNode node) { - this.distance = distance; - this.node = node; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof DistanceEvent) { - DistanceEvent that = (DistanceEvent) other; - return this.distance == that.distance && Objects.equals(this.node, that.node); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(this.distance, this.node); - } - - @Override - public String toString() { - return "DistanceEvent(" + distance + ", " + node + ")"; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/FullNodeListRefresh.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/FullNodeListRefresh.java deleted file mode 100644 index 7388980c230..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/FullNodeListRefresh.java +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.token.DefaultTokenMap; -import com.datastax.oss.driver.internal.core.metadata.token.TokenFactory; -import com.datastax.oss.driver.internal.core.metadata.token.TokenFactoryRegistry; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.Sets; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; -import java.util.UUID; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -class FullNodeListRefresh extends NodesRefresh { - - private static final Logger LOG = LoggerFactory.getLogger(FullNodeListRefresh.class); - - @VisibleForTesting final Iterable nodeInfos; - - FullNodeListRefresh(Iterable nodeInfos) { - this.nodeInfos = nodeInfos; - } - - @Override - public Result compute( - DefaultMetadata oldMetadata, boolean tokenMapEnabled, InternalDriverContext context) { - - String logPrefix = context.getSessionName(); - TokenFactoryRegistry tokenFactoryRegistry = context.getTokenFactoryRegistry(); - - Map oldNodes = oldMetadata.getNodes(); - - Map added = new HashMap<>(); - Set seen = new HashSet<>(); - - TokenFactory tokenFactory = - oldMetadata.getTokenMap().map(m -> ((DefaultTokenMap) m).getTokenFactory()).orElse(null); - boolean tokensChanged = false; - - for (NodeInfo nodeInfo : nodeInfos) { - UUID id = nodeInfo.getHostId(); - if (seen.contains(id)) { - LOG.warn( - "[{}] Found duplicate entries with host_id {} in system.peers, " - + "keeping only the first one", - logPrefix, - id); - } else { - seen.add(id); - DefaultNode node = (DefaultNode) oldNodes.get(id); - if (node == null) { - node = new DefaultNode(nodeInfo.getEndPoint(), context); - LOG.debug("[{}] Adding new node {}", logPrefix, node); - added.put(id, node); - } - if (tokenFactory == null && nodeInfo.getPartitioner() != null) { - tokenFactory = tokenFactoryRegistry.tokenFactoryFor(nodeInfo.getPartitioner()); - } - tokensChanged |= copyInfos(nodeInfo, node, context); - } - } - - Set removed = Sets.difference(oldNodes.keySet(), seen); - - if (added.isEmpty() && removed.isEmpty()) { // The list didn't change - if (!oldMetadata.getTokenMap().isPresent() && tokenFactory != null) { - // First time we found out what the partitioner is => set the token factory and trigger a - // token map rebuild: - return new Result( - oldMetadata.withNodes( - oldMetadata.getNodes(), tokenMapEnabled, true, tokenFactory, context)); - } else { - // No need to create a new metadata instance - return new Result(oldMetadata); - } - } else { - ImmutableMap.Builder newNodesBuilder = ImmutableMap.builder(); - ImmutableList.Builder eventsBuilder = ImmutableList.builder(); - - newNodesBuilder.putAll(added); - for (Map.Entry entry : oldNodes.entrySet()) { - if (!removed.contains(entry.getKey())) { - newNodesBuilder.put(entry.getKey(), entry.getValue()); - } - } - - for (Node node : added.values()) { - eventsBuilder.add(NodeStateEvent.added((DefaultNode) node)); - } - for (UUID id : removed) { - Node node = oldNodes.get(id); - eventsBuilder.add(NodeStateEvent.removed((DefaultNode) node)); - } - - return new Result( - oldMetadata.withNodes( - newNodesBuilder.build(), tokenMapEnabled, tokensChanged, tokenFactory, context), - eventsBuilder.build()); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/InitialNodeListRefresh.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/InitialNodeListRefresh.java deleted file mode 100644 index 517bfca27fa..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/InitialNodeListRefresh.java +++ /dev/null @@ -1,127 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.token.TokenFactory; -import com.datastax.oss.driver.internal.core.metadata.token.TokenFactoryRegistry; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * The first node list refresh: contact points are not in the metadata yet, we need to copy them - * over. - */ -@ThreadSafe -class InitialNodeListRefresh extends NodesRefresh { - - private static final Logger LOG = LoggerFactory.getLogger(InitialNodeListRefresh.class); - - @VisibleForTesting final Iterable nodeInfos; - @VisibleForTesting final Set contactPoints; - - InitialNodeListRefresh(Iterable nodeInfos, Set contactPoints) { - this.nodeInfos = nodeInfos; - this.contactPoints = contactPoints; - } - - @Override - public Result compute( - DefaultMetadata oldMetadata, boolean tokenMapEnabled, InternalDriverContext context) { - - String logPrefix = context.getSessionName(); - TokenFactoryRegistry tokenFactoryRegistry = context.getTokenFactoryRegistry(); - - // Since this is the first refresh, and we've stored contact points separately until now, the - // metadata is empty. - assert oldMetadata == DefaultMetadata.EMPTY; - TokenFactory tokenFactory = null; - - Map newNodes = new HashMap<>(); - // Contact point nodes don't have host ID as well as other info yet, so we fill them with node - // info found on first match by endpoint - Set matchedContactPoints = new HashSet<>(); - List addedNodes = new ArrayList<>(); - - for (NodeInfo nodeInfo : nodeInfos) { - UUID hostId = nodeInfo.getHostId(); - if (newNodes.containsKey(hostId)) { - LOG.warn( - "[{}] Found duplicate entries with host_id {} in system.peers, " - + "keeping only the first one {}", - logPrefix, - hostId, - newNodes.get(hostId)); - } else { - EndPoint endPoint = nodeInfo.getEndPoint(); - DefaultNode contactPointNode = findContactPointNode(endPoint); - DefaultNode node; - if (contactPointNode == null || matchedContactPoints.contains(endPoint)) { - node = new DefaultNode(endPoint, context); - addedNodes.add(node); - LOG.debug("[{}] Adding new node {}", logPrefix, node); - } else { - matchedContactPoints.add(contactPointNode.getEndPoint()); - node = contactPointNode; - LOG.debug("[{}] Copying contact point {}", logPrefix, node); - } - if (tokenMapEnabled && tokenFactory == null && nodeInfo.getPartitioner() != null) { - tokenFactory = tokenFactoryRegistry.tokenFactoryFor(nodeInfo.getPartitioner()); - } - copyInfos(nodeInfo, node, context); - newNodes.put(hostId, node); - } - } - - ImmutableList.Builder eventsBuilder = ImmutableList.builder(); - for (DefaultNode addedNode : addedNodes) { - eventsBuilder.add(NodeStateEvent.added(addedNode)); - } - for (DefaultNode contactPoint : contactPoints) { - if (!matchedContactPoints.contains(contactPoint.getEndPoint())) { - eventsBuilder.add(NodeStateEvent.removed(contactPoint)); - } - } - - return new Result( - oldMetadata.withNodes( - ImmutableMap.copyOf(newNodes), tokenMapEnabled, true, tokenFactory, context), - eventsBuilder.build()); - } - - private DefaultNode findContactPointNode(EndPoint endPoint) { - for (DefaultNode node : contactPoints) { - if (node.getEndPoint().equals(endPoint)) { - return node; - } - } - return null; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/LoadBalancingPolicyWrapper.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/LoadBalancingPolicyWrapper.java deleted file mode 100644 index 5c8473a3b67..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/LoadBalancingPolicyWrapper.java +++ /dev/null @@ -1,275 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.metadata.Metadata; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeState; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.util.concurrent.ReplayingEventFilter; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Queue; -import java.util.Set; -import java.util.WeakHashMap; -import java.util.concurrent.ConcurrentLinkedQueue; -import java.util.concurrent.atomic.AtomicReference; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; -import net.jcip.annotations.GuardedBy; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Wraps the user-provided LBPs for internal use. This serves multiple purposes: - * - *
    - *
  • help enforce the guarantee that init is called exactly once, and before any other method. - *
  • handle the early stages of initialization (before first actual connect), where the LBPs are - * not ready yet. - *
  • handle incoming node state events from the outside world and propagate them to the - * policies. - *
  • process distance decisions from the policies and propagate them to the outside world. - *
- */ -@ThreadSafe -public class LoadBalancingPolicyWrapper implements AutoCloseable { - - private static final Logger LOG = LoggerFactory.getLogger(LoadBalancingPolicyWrapper.class); - - private enum State { - BEFORE_INIT, - DURING_INIT, - RUNNING, - CLOSING - } - - private final InternalDriverContext context; - private final Set policies; - private final Map policiesPerProfile; - private final Map reporters; - - private final Lock distancesLock = new ReentrantLock(); - - // Remember which distance each policy reported for each node. We assume that distance events will - // be rare, so don't try to be too clever, a global lock should suffice. - @GuardedBy("distancesLock") - private final Map> distances; - - private final String logPrefix; - private final ReplayingEventFilter eventFilter = - new ReplayingEventFilter<>(this::processNodeStateEvent); - private final AtomicReference stateRef = new AtomicReference<>(State.BEFORE_INIT); - - public LoadBalancingPolicyWrapper( - @NonNull InternalDriverContext context, - @NonNull Map policiesPerProfile) { - this.context = context; - - this.policiesPerProfile = policiesPerProfile; - ImmutableMap.Builder reportersBuilder = - ImmutableMap.builder(); - // ImmutableMap.values does not remove duplicates, do it now so that we won't invoke a policy - // more than once if it's associated with multiple profiles - for (LoadBalancingPolicy policy : ImmutableSet.copyOf(policiesPerProfile.values())) { - reportersBuilder.put(policy, new SinglePolicyDistanceReporter(policy)); - } - this.reporters = reportersBuilder.build(); - // Just an alias to make the rest of the code more readable - this.policies = reporters.keySet(); - - this.distances = new WeakHashMap<>(); - - this.logPrefix = context.getSessionName(); - context.getEventBus().register(NodeStateEvent.class, this::onNodeStateEvent); - } - - public void init() { - if (stateRef.compareAndSet(State.BEFORE_INIT, State.DURING_INIT)) { - LOG.debug("[{}] Initializing policies", logPrefix); - // State events can happen concurrently with init, so we must record them and replay once the - // policy is initialized. - eventFilter.start(); - MetadataManager metadataManager = context.getMetadataManager(); - Metadata metadata = metadataManager.getMetadata(); - for (LoadBalancingPolicy policy : policies) { - policy.init(metadata.getNodes(), reporters.get(policy)); - } - if (stateRef.compareAndSet(State.DURING_INIT, State.RUNNING)) { - eventFilter.markReady(); - } else { // closed during init - assert stateRef.get() == State.CLOSING; - for (LoadBalancingPolicy policy : policies) { - policy.close(); - } - } - } - } - - /** - * Note: we could infer the profile name from the request again in this method, but since that's - * already done in request processors, pass the value directly. - * - * @see LoadBalancingPolicy#newQueryPlan(Request, Session) - */ - @NonNull - public Queue newQueryPlan( - @Nullable Request request, @NonNull String executionProfileName, @Nullable Session session) { - switch (stateRef.get()) { - case BEFORE_INIT: - case DURING_INIT: - // The contact points are not stored in the metadata yet: - List nodes = new ArrayList<>(context.getMetadataManager().getContactPoints()); - Collections.shuffle(nodes); - return new ConcurrentLinkedQueue<>(nodes); - case RUNNING: - LoadBalancingPolicy policy = policiesPerProfile.get(executionProfileName); - if (policy == null) { - policy = policiesPerProfile.get(DriverExecutionProfile.DEFAULT_NAME); - } - return policy.newQueryPlan(request, session); - default: - return new ConcurrentLinkedQueue<>(); - } - } - - @NonNull - public Queue newQueryPlan() { - return newQueryPlan(null, DriverExecutionProfile.DEFAULT_NAME, null); - } - - // when it comes in from the outside - private void onNodeStateEvent(NodeStateEvent event) { - eventFilter.accept(event); - } - - // once it has gone through the filter - private void processNodeStateEvent(NodeStateEvent event) { - DefaultNode node = event.node; - switch (stateRef.get()) { - case BEFORE_INIT: - case DURING_INIT: - throw new AssertionError("Filter should not be marked ready until LBP init"); - case CLOSING: - return; // ignore - case RUNNING: - for (LoadBalancingPolicy policy : policies) { - if (event.newState == NodeState.UP) { - policy.onUp(node); - } else if (event.newState == NodeState.DOWN || event.newState == NodeState.FORCED_DOWN) { - policy.onDown(node); - } else if (event.newState == NodeState.UNKNOWN) { - policy.onAdd(node); - } else if (event.newState == null) { - policy.onRemove(node); - } else { - LOG.warn("[{}] Unsupported event: {}", logPrefix, event); - } - } - break; - } - } - - @Override - public void close() { - State old; - while (true) { - old = stateRef.get(); - if (old == State.CLOSING) { - return; // already closed - } else if (stateRef.compareAndSet(old, State.CLOSING)) { - break; - } - } - // If BEFORE_INIT, no need to close because they were never initialized - // If DURING_INIT, this will be handled in init() - if (old == State.RUNNING) { - for (LoadBalancingPolicy policy : policies) { - policy.close(); - } - } - } - - // An individual distance reporter for one of the policies. The results are aggregated across all - // policies, the smallest distance for each node is used. - private class SinglePolicyDistanceReporter implements LoadBalancingPolicy.DistanceReporter { - - private final LoadBalancingPolicy policy; - - private SinglePolicyDistanceReporter(LoadBalancingPolicy policy) { - this.policy = policy; - } - - @Override - public void setDistance(@NonNull Node node, @NonNull NodeDistance suggestedDistance) { - LOG.debug( - "[{}] {} suggested {} to {}, checking what other policies said", - logPrefix, - policy, - node, - suggestedDistance); - distancesLock.lock(); - try { - Map distancesForNode = - distances.computeIfAbsent(node, (n) -> new HashMap<>()); - distancesForNode.put(policy, suggestedDistance); - NodeDistance newDistance = aggregate(distancesForNode); - LOG.debug("[{}] Shortest distance across all policies is {}", logPrefix, newDistance); - - // There is a small race condition here (check-then-act on a volatile field). However this - // would only happen if external code changes the distance, which is unlikely (and - // dangerous). - // The driver internals only ever set the distance here, and we're protected by the lock. - NodeDistance oldDistance = node.getDistance(); - if (!oldDistance.equals(newDistance)) { - LOG.debug("[{}] {} was {}, changing to {}", logPrefix, node, oldDistance, newDistance); - DefaultNode defaultNode = (DefaultNode) node; - defaultNode.distance = newDistance; - context.getEventBus().fire(new DistanceEvent(newDistance, defaultNode)); - } else { - LOG.debug("[{}] {} was already {}, ignoring", logPrefix, node, oldDistance); - } - } finally { - distancesLock.unlock(); - } - } - - private NodeDistance aggregate(Map distances) { - NodeDistance minimum = NodeDistance.IGNORED; - for (NodeDistance candidate : distances.values()) { - if (candidate.compareTo(minimum) < 0) { - minimum = candidate; - } - } - return minimum; - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/MetadataManager.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/MetadataManager.java deleted file mode 100644 index efb04bde5e1..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/MetadataManager.java +++ /dev/null @@ -1,530 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.api.core.AsyncAutoCloseable; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.core.metadata.Metadata; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.config.ConfigChangeEvent; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.control.ControlConnection; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.SchemaParserFactory; -import com.datastax.oss.driver.internal.core.metadata.schema.queries.KeyspaceFilter; -import com.datastax.oss.driver.internal.core.metadata.schema.queries.SchemaQueriesFactory; -import com.datastax.oss.driver.internal.core.metadata.schema.queries.SchemaRows; -import com.datastax.oss.driver.internal.core.metadata.schema.refresh.SchemaRefresh; -import com.datastax.oss.driver.internal.core.util.Loggers; -import com.datastax.oss.driver.internal.core.util.NanoTime; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.driver.internal.core.util.concurrent.Debouncer; -import com.datastax.oss.driver.internal.core.util.concurrent.RunOrSchedule; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import edu.umd.cs.findbugs.annotations.NonNull; -import io.netty.util.concurrent.EventExecutor; -import java.net.InetSocketAddress; -import java.util.Collections; -import java.util.List; -import java.util.Set; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** Holds the immutable instance of the {@link Metadata}, and handles requests to update it. */ -@ThreadSafe -public class MetadataManager implements AsyncAutoCloseable { - private static final Logger LOG = LoggerFactory.getLogger(MetadataManager.class); - - static final EndPoint DEFAULT_CONTACT_POINT = - new DefaultEndPoint(new InetSocketAddress("127.0.0.1", 9042)); - - private final InternalDriverContext context; - private final String logPrefix; - private final EventExecutor adminExecutor; - private final DriverExecutionProfile config; - private final SingleThreaded singleThreaded; - private final ControlConnection controlConnection; - - private volatile DefaultMetadata metadata; // only updated from adminExecutor - private volatile boolean schemaEnabledInConfig; - private volatile List refreshedKeyspaces; - private volatile KeyspaceFilter keyspaceFilter; - private volatile Boolean schemaEnabledProgrammatically; - private volatile boolean tokenMapEnabled; - private volatile Set contactPoints; - private volatile boolean wasImplicitContactPoint; - - public MetadataManager(InternalDriverContext context) { - this(context, DefaultMetadata.EMPTY); - } - - protected MetadataManager(InternalDriverContext context, DefaultMetadata initialMetadata) { - this.context = context; - this.metadata = initialMetadata; - this.logPrefix = context.getSessionName(); - this.adminExecutor = context.getNettyOptions().adminEventExecutorGroup().next(); - this.config = context.getConfig().getDefaultProfile(); - this.singleThreaded = new SingleThreaded(context, config); - this.controlConnection = context.getControlConnection(); - this.schemaEnabledInConfig = config.getBoolean(DefaultDriverOption.METADATA_SCHEMA_ENABLED); - this.refreshedKeyspaces = - config.getStringList( - DefaultDriverOption.METADATA_SCHEMA_REFRESHED_KEYSPACES, Collections.emptyList()); - this.keyspaceFilter = KeyspaceFilter.newInstance(logPrefix, refreshedKeyspaces); - this.tokenMapEnabled = config.getBoolean(DefaultDriverOption.METADATA_TOKEN_MAP_ENABLED); - - context.getEventBus().register(ConfigChangeEvent.class, this::onConfigChanged); - } - - private void onConfigChanged(@SuppressWarnings("unused") ConfigChangeEvent event) { - boolean schemaEnabledBefore = isSchemaEnabled(); - boolean tokenMapEnabledBefore = tokenMapEnabled; - List keyspacesBefore = this.refreshedKeyspaces; - - this.schemaEnabledInConfig = config.getBoolean(DefaultDriverOption.METADATA_SCHEMA_ENABLED); - this.refreshedKeyspaces = - config.getStringList( - DefaultDriverOption.METADATA_SCHEMA_REFRESHED_KEYSPACES, Collections.emptyList()); - this.keyspaceFilter = KeyspaceFilter.newInstance(logPrefix, refreshedKeyspaces); - this.tokenMapEnabled = config.getBoolean(DefaultDriverOption.METADATA_TOKEN_MAP_ENABLED); - - if ((!schemaEnabledBefore - || !keyspacesBefore.equals(refreshedKeyspaces) - || (!tokenMapEnabledBefore && tokenMapEnabled)) - && isSchemaEnabled()) { - refreshSchema(null, false, true) - .whenComplete( - (metadata, error) -> { - if (error != null) { - Loggers.warnWithException( - LOG, - "[{}] Unexpected error while refreshing schema after it was re-enabled " - + "in the configuration, keeping previous version", - logPrefix, - error); - } - }); - } - } - - public Metadata getMetadata() { - return this.metadata; - } - - public void addContactPoints(Set providedContactPoints) { - // Convert the EndPoints to Nodes, but we can't put them into the Metadata yet, because we - // don't know their host_id. So store them in a volatile field instead, they will get copied - // during the first node refresh. - ImmutableSet.Builder contactPointsBuilder = ImmutableSet.builder(); - if (providedContactPoints == null || providedContactPoints.isEmpty()) { - LOG.info( - "[{}] No contact points provided, defaulting to {}", logPrefix, DEFAULT_CONTACT_POINT); - this.wasImplicitContactPoint = true; - contactPointsBuilder.add(new DefaultNode(DEFAULT_CONTACT_POINT, context)); - } else { - for (EndPoint endPoint : providedContactPoints) { - contactPointsBuilder.add(new DefaultNode(endPoint, context)); - } - } - this.contactPoints = contactPointsBuilder.build(); - LOG.debug("[{}] Adding initial contact points {}", logPrefix, contactPoints); - } - - /** - * The contact points that were used by the driver to initialize. If none were provided - * explicitly, this will be the default (127.0.0.1:9042). - * - * @see #wasImplicitContactPoint() - */ - public Set getContactPoints() { - return contactPoints; - } - - /** Whether the default contact point was used (because none were provided explicitly). */ - public boolean wasImplicitContactPoint() { - return wasImplicitContactPoint; - } - - public CompletionStage refreshNodes() { - return context - .getTopologyMonitor() - .refreshNodeList() - .thenApplyAsync(singleThreaded::refreshNodes, adminExecutor); - } - - public CompletionStage refreshNode(Node node) { - return context - .getTopologyMonitor() - .refreshNode(node) - .thenApplyAsync( - maybeInfo -> { - if (maybeInfo.isPresent()) { - boolean tokensChanged = - NodesRefresh.copyInfos(maybeInfo.get(), (DefaultNode) node, context); - if (tokensChanged) { - apply(new TokensChangedRefresh()); - } - } else { - LOG.debug( - "[{}] Topology monitor did not return any info for the refresh of {}, skipping", - logPrefix, - node); - } - return null; - }, - adminExecutor); - } - - public void addNode(InetSocketAddress broadcastRpcAddress) { - context - .getTopologyMonitor() - .getNewNodeInfo(broadcastRpcAddress) - .whenCompleteAsync( - (info, error) -> { - if (error != null) { - LOG.debug( - "[{}] Error refreshing node info for {}, " - + "this will be retried on the next full refresh", - logPrefix, - broadcastRpcAddress, - error); - } else { - singleThreaded.addNode(broadcastRpcAddress, info.orElse(null)); - } - }, - adminExecutor); - } - - public void removeNode(InetSocketAddress broadcastRpcAddress) { - RunOrSchedule.on(adminExecutor, () -> singleThreaded.removeNode(broadcastRpcAddress)); - } - - /** - * @param keyspace if this refresh was triggered by an event, that event's keyspace, otherwise - * null (this is only used to discard the event if it targets a keyspace that we're ignoring) - * @param evenIfDisabled force the refresh even if schema is currently disabled (used for user - * request) - * @param flushNow bypass the debouncer and force an immediate refresh (used to avoid a delay at - * startup) - */ - public CompletionStage refreshSchema( - String keyspace, boolean evenIfDisabled, boolean flushNow) { - CompletableFuture future = new CompletableFuture<>(); - RunOrSchedule.on( - adminExecutor, - () -> singleThreaded.refreshSchema(keyspace, evenIfDisabled, flushNow, future)); - return future; - } - - public static class RefreshSchemaResult { - private final Metadata metadata; - private final boolean isSchemaInAgreement; - - public RefreshSchemaResult(Metadata metadata, boolean isSchemaInAgreement) { - this.metadata = metadata; - this.isSchemaInAgreement = isSchemaInAgreement; - } - - public RefreshSchemaResult(Metadata metadata) { - this( - metadata, - // This constructor is used in corner cases where agreement doesn't matter - true); - } - - public Metadata getMetadata() { - return metadata; - } - - public boolean isSchemaInAgreement() { - return isSchemaInAgreement; - } - } - - public boolean isSchemaEnabled() { - return (schemaEnabledProgrammatically != null) - ? schemaEnabledProgrammatically - : schemaEnabledInConfig; - } - - public CompletionStage setSchemaEnabled(Boolean newValue) { - boolean wasEnabledBefore = isSchemaEnabled(); - schemaEnabledProgrammatically = newValue; - if (!wasEnabledBefore && isSchemaEnabled()) { - return refreshSchema(null, false, true).thenApply(RefreshSchemaResult::getMetadata); - } else { - return CompletableFuture.completedFuture(metadata); - } - } - - @NonNull - @Override - public CompletionStage closeFuture() { - return singleThreaded.closeFuture; - } - - @NonNull - @Override - public CompletionStage closeAsync() { - RunOrSchedule.on(adminExecutor, singleThreaded::close); - return singleThreaded.closeFuture; - } - - @NonNull - @Override - public CompletionStage forceCloseAsync() { - return this.closeAsync(); - } - - private class SingleThreaded { - private final CompletableFuture closeFuture = new CompletableFuture<>(); - private boolean closeWasCalled; - private final CompletableFuture firstSchemaRefreshFuture = new CompletableFuture<>(); - private final Debouncer< - CompletableFuture, CompletableFuture> - schemaRefreshDebouncer; - private final SchemaQueriesFactory schemaQueriesFactory; - private final SchemaParserFactory schemaParserFactory; - - // We don't allow concurrent schema refreshes. If one is already running, the next one is queued - // (and the ones after that are merged with the queued one). - private CompletableFuture currentSchemaRefresh; - private CompletableFuture queuedSchemaRefresh; - - private boolean didFirstNodeListRefresh; - - private SingleThreaded(InternalDriverContext context, DriverExecutionProfile config) { - this.schemaRefreshDebouncer = - new Debouncer<>( - logPrefix + "|metadata debouncer", - adminExecutor, - this::coalesceSchemaRequests, - this::startSchemaRequest, - config.getDuration(DefaultDriverOption.METADATA_SCHEMA_WINDOW), - config.getInt(DefaultDriverOption.METADATA_SCHEMA_MAX_EVENTS)); - this.schemaQueriesFactory = context.getSchemaQueriesFactory(); - this.schemaParserFactory = context.getSchemaParserFactory(); - } - - private Void refreshNodes(Iterable nodeInfos) { - MetadataRefresh refresh = - didFirstNodeListRefresh - ? new FullNodeListRefresh(nodeInfos) - : new InitialNodeListRefresh(nodeInfos, contactPoints); - didFirstNodeListRefresh = true; - return apply(refresh); - } - - private void addNode(InetSocketAddress address, NodeInfo info) { - try { - if (info != null) { - if (!address.equals(info.getBroadcastRpcAddress().orElse(null))) { - // This would be a bug in the TopologyMonitor, protect against it - LOG.warn( - "[{}] Received a request to add a node for broadcast RPC address {}, " - + "but the provided info reports {}, ignoring it", - logPrefix, - address, - info.getBroadcastAddress()); - } else { - apply(new AddNodeRefresh(info)); - } - } else { - LOG.debug( - "[{}] Ignoring node addition for {} because the " - + "topology monitor didn't return any information", - logPrefix, - address); - } - } catch (Throwable t) { - LOG.warn("[" + logPrefix + "] Unexpected exception while handling added node", logPrefix); - } - } - - private void removeNode(InetSocketAddress broadcastRpcAddress) { - apply(new RemoveNodeRefresh(broadcastRpcAddress)); - } - - private void refreshSchema( - String keyspace, - boolean evenIfDisabled, - boolean flushNow, - CompletableFuture future) { - - if (!didFirstNodeListRefresh) { - // This happen if the control connection receives a schema event during init. We can't - // refresh yet because we don't know the nodes' versions, simply ignore. - future.complete(new RefreshSchemaResult(metadata)); - return; - } - - // If this is an event, make sure it's not targeting a keyspace that we're ignoring. - boolean isRefreshedKeyspace = keyspace == null || keyspaceFilter.includes(keyspace); - - if (isRefreshedKeyspace && (evenIfDisabled || isSchemaEnabled())) { - acceptSchemaRequest(future, flushNow); - } else { - future.complete(new RefreshSchemaResult(metadata)); - singleThreaded.firstSchemaRefreshFuture.complete(null); - } - } - - // An external component has requested a schema refresh, feed it to the debouncer. - private void acceptSchemaRequest( - CompletableFuture future, boolean flushNow) { - assert adminExecutor.inEventLoop(); - if (closeWasCalled) { - future.complete(new RefreshSchemaResult(metadata)); - } else { - schemaRefreshDebouncer.receive(future); - if (flushNow) { - schemaRefreshDebouncer.flushNow(); - } - } - } - - // Multiple requests have arrived within the debouncer window, coalesce them. - private CompletableFuture coalesceSchemaRequests( - List> futures) { - assert adminExecutor.inEventLoop(); - assert !futures.isEmpty(); - // Keep only one, but ensure that the discarded ones will still be completed when we're done - CompletableFuture result = null; - for (CompletableFuture future : futures) { - if (result == null) { - result = future; - } else { - CompletableFutures.completeFrom(result, future); - } - } - return result; - } - - // The debouncer has flushed, start the actual work. - private void startSchemaRequest(CompletableFuture refreshFuture) { - assert adminExecutor.inEventLoop(); - if (closeWasCalled) { - refreshFuture.complete(new RefreshSchemaResult(metadata)); - return; - } - if (currentSchemaRefresh == null) { - currentSchemaRefresh = refreshFuture; - LOG.debug("[{}] Starting schema refresh", logPrefix); - initControlConnectionForSchema() - .thenCompose(v -> context.getTopologyMonitor().checkSchemaAgreement()) - .whenComplete( - (schemaInAgreement, agreementError) -> { - if (agreementError != null) { - refreshFuture.completeExceptionally(agreementError); - } else { - try { - schemaQueriesFactory - .newInstance() - .execute() - .thenApplyAsync(this::parseAndApplySchemaRows, adminExecutor) - .whenComplete( - (newMetadata, metadataError) -> { - if (metadataError != null) { - refreshFuture.completeExceptionally(metadataError); - } else { - refreshFuture.complete( - new RefreshSchemaResult(newMetadata, schemaInAgreement)); - } - - firstSchemaRefreshFuture.complete(null); - - currentSchemaRefresh = null; - // If another refresh was enqueued during this one, run it now - if (queuedSchemaRefresh != null) { - CompletableFuture tmp = - this.queuedSchemaRefresh; - this.queuedSchemaRefresh = null; - startSchemaRequest(tmp); - } - }); - } catch (Throwable t) { - LOG.debug("[{}] Exception getting new metadata", logPrefix, t); - refreshFuture.completeExceptionally(t); - } - } - }); - } else if (queuedSchemaRefresh == null) { - queuedSchemaRefresh = refreshFuture; // wait for our turn - } else { - CompletableFutures.completeFrom( - queuedSchemaRefresh, refreshFuture); // join the queued request - } - } - - // To query schema tables, we need the control connection. - // Normally that the topology monitor has already initialized it to query node tables. But if a - // custom topology monitor is in place, it might not use the control connection at all. - private CompletionStage initControlConnectionForSchema() { - if (firstSchemaRefreshFuture.isDone()) { - // We tried to refresh the schema before, so we know we called init already. Don't call it - // again since that is cheaper. - return firstSchemaRefreshFuture; - } else { - // Trigger init (a no-op if the topology monitor already done so) - return controlConnection.init(false, true, false); - } - } - - private Metadata parseAndApplySchemaRows(SchemaRows schemaRows) { - assert adminExecutor.inEventLoop(); - SchemaRefresh schemaRefresh = schemaParserFactory.newInstance(schemaRows).parse(); - long start = System.nanoTime(); - apply(schemaRefresh); - LOG.debug("[{}] Applying schema refresh took {}", logPrefix, NanoTime.formatTimeSince(start)); - return metadata; - } - - private void close() { - if (closeWasCalled) { - return; - } - closeWasCalled = true; - LOG.debug("[{}] Closing", logPrefix); - // The current schema refresh should fail when its channel gets closed. - if (queuedSchemaRefresh != null) { - queuedSchemaRefresh.completeExceptionally(new IllegalStateException("Cluster is closed")); - } - closeFuture.complete(null); - } - } - - @VisibleForTesting - Void apply(MetadataRefresh refresh) { - assert adminExecutor.inEventLoop(); - MetadataRefresh.Result result = refresh.compute(metadata, tokenMapEnabled, context); - metadata = result.newMetadata; - boolean isFirstSchemaRefresh = - refresh instanceof SchemaRefresh && !singleThreaded.firstSchemaRefreshFuture.isDone(); - if (!singleThreaded.closeWasCalled && !isFirstSchemaRefresh) { - for (Object event : result.events) { - context.getEventBus().fire(event); - } - } - return null; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/MetadataRefresh.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/MetadataRefresh.java deleted file mode 100644 index fc31f317622..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/MetadataRefresh.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import java.util.Collections; -import java.util.List; - -/** - * Any update to the driver's metadata. It produces a new metadata instance, and may also trigger - * events. - * - *

This is modelled as a separate type for modularity, and because we can't send the events while - * we are doing the refresh (by contract, the new copy of the metadata needs to be visible before - * the events are sent). This also makes unit testing very easy. - * - *

This is only instantiated and called from {@link MetadataManager}'s admin thread, therefore - * implementations don't need to be thread-safe. - * - * @see Session#getMetadata() - */ -public interface MetadataRefresh { - - Result compute( - DefaultMetadata oldMetadata, boolean tokenMapEnabled, InternalDriverContext context); - - class Result { - public final DefaultMetadata newMetadata; - public final List events; - - public Result(DefaultMetadata newMetadata, List events) { - this.newMetadata = newMetadata; - this.events = events; - } - - public Result(DefaultMetadata newMetadata) { - this(newMetadata, Collections.emptyList()); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/MultiplexingNodeStateListener.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/MultiplexingNodeStateListener.java deleted file mode 100644 index 8ee6d04bbae..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/MultiplexingNodeStateListener.java +++ /dev/null @@ -1,127 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeStateListener; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.internal.core.util.Loggers; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Arrays; -import java.util.Collection; -import java.util.List; -import java.util.Objects; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.function.Consumer; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Combines multiple node state listeners into a single one. - * - *

Any exception thrown by a child listener is caught and logged. - */ -@ThreadSafe -public class MultiplexingNodeStateListener implements NodeStateListener { - - private static final Logger LOG = LoggerFactory.getLogger(MultiplexingNodeStateListener.class); - - private final List listeners = new CopyOnWriteArrayList<>(); - - public MultiplexingNodeStateListener() {} - - public MultiplexingNodeStateListener(NodeStateListener... listeners) { - this(Arrays.asList(listeners)); - } - - public MultiplexingNodeStateListener(Collection listeners) { - addListeners(listeners); - } - - private void addListeners(Collection source) { - for (NodeStateListener listener : source) { - addListener(listener); - } - } - - private void addListener(NodeStateListener toAdd) { - Objects.requireNonNull(toAdd, "listener cannot be null"); - if (toAdd instanceof MultiplexingNodeStateListener) { - addListeners(((MultiplexingNodeStateListener) toAdd).listeners); - } else { - listeners.add(toAdd); - } - } - - public void register(@NonNull NodeStateListener listener) { - addListener(listener); - } - - @Override - public void onAdd(@NonNull Node node) { - invokeListeners(listener -> listener.onAdd(node), "onAdd"); - } - - @Override - public void onUp(@NonNull Node node) { - invokeListeners(listener -> listener.onUp(node), "onUp"); - } - - @Override - public void onDown(@NonNull Node node) { - invokeListeners(listener -> listener.onDown(node), "onDown"); - } - - @Override - public void onRemove(@NonNull Node node) { - invokeListeners(listener -> listener.onRemove(node), "onRemove"); - } - - @Override - public void onSessionReady(@NonNull Session session) { - invokeListeners(listener -> listener.onSessionReady(session), "onSessionReady"); - } - - @Override - public void close() throws Exception { - for (NodeStateListener listener : listeners) { - try { - listener.close(); - } catch (Exception e) { - Loggers.warnWithException( - LOG, "Unexpected error while closing node state listener {}.", listener, e); - } - } - } - - private void invokeListeners(@NonNull Consumer action, String event) { - for (NodeStateListener listener : listeners) { - try { - action.accept(listener); - } catch (Exception e) { - Loggers.warnWithException( - LOG, - "Unexpected error while notifying node state listener {} of an {} event.", - listener, - event, - e); - } - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/NodeInfo.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/NodeInfo.java deleted file mode 100644 index 6a9651d8376..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/NodeInfo.java +++ /dev/null @@ -1,161 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.core.metadata.Metadata; -import com.datastax.oss.driver.api.core.metadata.Node; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.net.InetSocketAddress; -import java.util.Map; -import java.util.Optional; -import java.util.Set; -import java.util.UUID; - -/** - * Information about a node, returned by the {@link TopologyMonitor}. - * - *

This information will be copied to the corresponding {@link Node} in the metadata. - */ -public interface NodeInfo { - - /** - * The endpoint that the driver will use to connect to the node. - * - *

This information is required; the driver will not function properly if this method returns - * {@code null}. - */ - @NonNull - EndPoint getEndPoint(); - - /** - * The node's broadcast RPC address and port. That is, the address that clients are supposed to - * use to communicate with that node. - * - *

This is currently only used to match broadcast RPC addresses received in status events - * coming in on the control connection. The driver does not use this value to actually connect to - * the node, but rather uses {@link #getEndPoint()}. - * - * @see Node#getBroadcastRpcAddress() - */ - @NonNull - Optional getBroadcastRpcAddress(); - - /** - * The node's broadcast address and port. That is, the address that other nodes use to communicate - * with that node. - * - *

This is only used by the default topology monitor, so if you are writing a custom one and - * don't need this information, you can leave it empty. - */ - @NonNull - Optional getBroadcastAddress(); - - /** - * The node's listen address and port. That is, the address that the Cassandra process binds to. - * - *

This is currently not used anywhere in the driver. If you write a custom topology monitor - * and don't need this information, you can leave it empty. - */ - @NonNull - Optional getListenAddress(); - - /** - * The data center that this node belongs to, according to the Cassandra snitch. - * - *

This is used by some {@link LoadBalancingPolicy} implementations to compute the {@link - * NodeDistance}. - */ - @Nullable - String getDatacenter(); - - /** - * The rack that this node belongs to, according to the Cassandra snitch. - * - *

This is used by some {@link LoadBalancingPolicy} implementations to compute the {@link - * NodeDistance}. - */ - @Nullable - String getRack(); - - /** - * The Cassandra version that this node runs. - * - *

This is used when parsing the schema (schema tables sometimes change from one version to the - * next, even if the protocol version stays the same). If this is null, schema parsing will use - * the lowest version for the current protocol version, which might lead to inaccuracies. - */ - @Nullable - String getCassandraVersion(); - - /** - * The fully-qualifier name of the partitioner class that distributes data across the nodes, as it - * appears in {@code system.local.partitioner}. - * - *

This is used to compute the driver-side token metadata (in particular, token-aware routing - * relies on this information). It is only really needed for the first node of the initial node - * list refresh (but it doesn't hurt to always include it if possible). If it is absent, {@link - * Metadata#getTokenMap()} will remain empty. - */ - @Nullable - String getPartitioner(); - - /** - * The tokens that this node owns on the ring. - * - *

This is used to compute the driver-side token metadata (in particular, token-aware routing - * relies on this information). If you're not using token metadata in any way, you may return an - * empty set here. - */ - @Nullable - Set getTokens(); - - /** - * An additional map of free-form properties, that can be used by custom implementations. They - * will be copied as-is into {@link Node#getExtras()}. - * - *

This is not required; if you don't have anything specific to report here, it can be null or - * empty. - */ - @Nullable - Map getExtras(); - - /** - * The host ID that is assigned to this host by cassandra. The driver uses this to uniquely - * identify a node. - * - *

This information is required; the driver will not function properly if this method returns - * {@code null}. - */ - @NonNull - UUID getHostId(); - - /** - * The current version that is associated with the node's schema. - * - *

This is not required; the driver reports it in {@link Node#getSchemaVersion()}, but for - * informational purposes only. It is not used anywhere internally (schema agreement is checked - * with {@link TopologyMonitor#checkSchemaAgreement()}, which by default queries system tables - * directly, not this field). - */ - @Nullable - UUID getSchemaVersion(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/NodeStateEvent.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/NodeStateEvent.java deleted file mode 100644 index 2f5c3c1d230..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/NodeStateEvent.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.api.core.metadata.NodeState; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -/** - * The transition of a node from one {@linkplain NodeState state} to another. - * - *

For simplicity, this is also used to represent a node addition ({@code oldState=null, - * newState=UNKNOWN}) or removal ({@code oldState=newState=null}). - */ -@Immutable -public class NodeStateEvent { - public static NodeStateEvent changed(NodeState oldState, NodeState newState, DefaultNode node) { - Preconditions.checkNotNull(oldState); - Preconditions.checkNotNull(newState); - return new NodeStateEvent(oldState, newState, node); - } - - public static NodeStateEvent added(DefaultNode node) { - return new NodeStateEvent(null, NodeState.UNKNOWN, node); - } - - public static NodeStateEvent removed(DefaultNode node) { - return new NodeStateEvent(null, null, node); - } - - /** The state before the change, or {@code null} if this is an addition or a removal. */ - public final NodeState oldState; - - /** - * The state after the change ({@link NodeState#UNKNOWN} if the node was just added), or {@code - * null} if this is a removal. - */ - public final NodeState newState; - - public final DefaultNode node; - - private NodeStateEvent(NodeState oldState, NodeState newState, DefaultNode node) { - this.node = node; - this.oldState = oldState; - this.newState = newState; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof NodeStateEvent) { - NodeStateEvent that = (NodeStateEvent) other; - return this.oldState == that.oldState - && this.newState == that.newState - && Objects.equals(this.node, that.node); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(oldState, newState, node); - } - - @Override - public String toString() { - return "NodeStateEvent(" + oldState + "=>" + newState + ", " + node + ")"; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/NodeStateManager.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/NodeStateManager.java deleted file mode 100644 index c8a52e4fa00..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/NodeStateManager.java +++ /dev/null @@ -1,352 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.api.core.AsyncAutoCloseable; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeState; -import com.datastax.oss.driver.internal.core.channel.ChannelEvent; -import com.datastax.oss.driver.internal.core.context.EventBus; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.util.Loggers; -import com.datastax.oss.driver.internal.core.util.concurrent.Debouncer; -import com.datastax.oss.driver.internal.core.util.concurrent.RunOrSchedule; -import com.datastax.oss.driver.shaded.guava.common.collect.Maps; -import edu.umd.cs.findbugs.annotations.NonNull; -import io.netty.util.concurrent.EventExecutor; -import java.net.InetSocketAddress; -import java.util.Collection; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Maintains the state of the Cassandra nodes, based on the events received from other components of - * the driver. - * - *

See {@link NodeState} and {@link TopologyEvent} for a description of the state change rules. - */ -@ThreadSafe -public class NodeStateManager implements AsyncAutoCloseable { - private static final Logger LOG = LoggerFactory.getLogger(NodeStateManager.class); - - private final EventExecutor adminExecutor; - private final SingleThreaded singleThreaded; - private final String logPrefix; - - public NodeStateManager(InternalDriverContext context) { - this.adminExecutor = context.getNettyOptions().adminEventExecutorGroup().next(); - this.singleThreaded = new SingleThreaded(context); - this.logPrefix = context.getSessionName(); - } - - /** - * Indicates when the driver initialization is complete (that is, we have performed the first node - * list refresh and are about to initialize the load balancing policy). - */ - public void markInitialized() { - RunOrSchedule.on(adminExecutor, singleThreaded::markInitialized); - } - - @NonNull - @Override - public CompletionStage closeFuture() { - return singleThreaded.closeFuture; - } - - @NonNull - @Override - public CompletionStage closeAsync() { - RunOrSchedule.on(adminExecutor, singleThreaded::close); - return singleThreaded.closeFuture; - } - - @NonNull - @Override - public CompletionStage forceCloseAsync() { - return closeAsync(); - } - - private class SingleThreaded { - - private final MetadataManager metadataManager; - private final EventBus eventBus; - private final Debouncer> topologyEventDebouncer; - private final CompletableFuture closeFuture = new CompletableFuture<>(); - private boolean isInitialized = false; - private boolean closeWasCalled; - - private SingleThreaded(InternalDriverContext context) { - this.metadataManager = context.getMetadataManager(); - - DriverExecutionProfile config = context.getConfig().getDefaultProfile(); - this.topologyEventDebouncer = - new Debouncer<>( - logPrefix + "|topology debouncer", - adminExecutor, - this::coalesceTopologyEvents, - this::flushTopologyEvents, - config.getDuration(DefaultDriverOption.METADATA_TOPOLOGY_WINDOW), - config.getInt(DefaultDriverOption.METADATA_TOPOLOGY_MAX_EVENTS)); - - this.eventBus = context.getEventBus(); - this.eventBus.register( - ChannelEvent.class, RunOrSchedule.on(adminExecutor, this::onChannelEvent)); - this.eventBus.register( - TopologyEvent.class, RunOrSchedule.on(adminExecutor, this::onTopologyEvent)); - // Note: this component exists for the whole life of the driver instance, so don't worry about - // unregistering the listeners. - } - - private void markInitialized() { - assert adminExecutor.inEventLoop(); - isInitialized = true; - } - - // Updates to DefaultNode's volatile fields are confined to the admin thread - @SuppressWarnings({"NonAtomicVolatileUpdate", "NonAtomicOperationOnVolatileField"}) - private void onChannelEvent(ChannelEvent event) { - assert adminExecutor.inEventLoop(); - if (closeWasCalled) { - return; - } - LOG.debug("[{}] Processing {}", logPrefix, event); - DefaultNode node = (DefaultNode) event.node; - assert node != null; - switch (event.type) { - case OPENED: - node.openConnections += 1; - if (node.state == NodeState.DOWN || node.state == NodeState.UNKNOWN) { - setState(node, NodeState.UP, "a new connection was opened to it"); - } - break; - case CLOSED: - node.openConnections -= 1; - if (node.openConnections == 0 && node.reconnections > 0) { - setState(node, NodeState.DOWN, "it was reconnecting and lost its last connection"); - } - break; - case RECONNECTION_STARTED: - node.reconnections += 1; - if (node.openConnections == 0) { - setState(node, NodeState.DOWN, "it has no connections and started reconnecting"); - } - break; - case RECONNECTION_STOPPED: - node.reconnections -= 1; - break; - case CONTROL_CONNECTION_FAILED: - // Special case for init, where this means that a contact point is down. In other - // situations that information is not really useful, we rely on - // openConnections/reconnections instead. - if (!isInitialized) { - setState(node, NodeState.DOWN, "it was tried as a contact point but failed"); - } - break; - } - } - - private void onDebouncedTopologyEvent(TopologyEvent event) { - assert adminExecutor.inEventLoop(); - if (closeWasCalled) { - return; - } - LOG.debug("[{}] Processing {}", logPrefix, event); - Optional maybeNode = metadataManager.getMetadata().findNode(event.broadcastRpcAddress); - switch (event.type) { - case SUGGEST_UP: - if (maybeNode.isPresent()) { - DefaultNode node = (DefaultNode) maybeNode.get(); - if (node.state == NodeState.FORCED_DOWN) { - LOG.debug("[{}] Not setting {} UP because it is FORCED_DOWN", logPrefix, node); - } else if (node.distance == NodeDistance.IGNORED) { - setState(node, NodeState.UP, "it is IGNORED and an UP topology event was received"); - } - } else { - LOG.debug( - "[{}] Received UP event for unknown node {}, refreshing node list", - logPrefix, - event.broadcastRpcAddress); - metadataManager.refreshNodes(); - } - break; - case SUGGEST_DOWN: - if (maybeNode.isPresent()) { - DefaultNode node = (DefaultNode) maybeNode.get(); - if (node.openConnections > 0) { - LOG.debug( - "[{}] Not setting {} DOWN because it still has active connections", - logPrefix, - node); - } else if (node.state == NodeState.FORCED_DOWN) { - LOG.debug("[{}] Not setting {} DOWN because it is FORCED_DOWN", logPrefix, node); - } else if (node.distance == NodeDistance.IGNORED) { - setState( - node, NodeState.DOWN, "it is IGNORED and a DOWN topology event was received"); - } - } else { - LOG.debug( - "[{}] Received DOWN event for unknown node {}, ignoring it", - logPrefix, - event.broadcastRpcAddress); - } - break; - case FORCE_UP: - if (maybeNode.isPresent()) { - DefaultNode node = (DefaultNode) maybeNode.get(); - setState(node, NodeState.UP, "a FORCE_UP topology event was received"); - } else { - LOG.debug( - "[{}] Received FORCE_UP event for unknown node {}, adding it", - logPrefix, - event.broadcastRpcAddress); - metadataManager.addNode(event.broadcastRpcAddress); - } - break; - case FORCE_DOWN: - if (maybeNode.isPresent()) { - DefaultNode node = (DefaultNode) maybeNode.get(); - setState(node, NodeState.FORCED_DOWN, "a FORCE_DOWN topology event was received"); - } else { - LOG.debug( - "[{}] Received FORCE_DOWN event for unknown node {}, ignoring it", - logPrefix, - event.broadcastRpcAddress); - } - break; - case SUGGEST_ADDED: - if (maybeNode.isPresent()) { - DefaultNode node = (DefaultNode) maybeNode.get(); - LOG.debug( - "[{}] Received ADDED event for {} but it is already in our metadata, ignoring", - logPrefix, - node); - } else { - metadataManager.addNode(event.broadcastRpcAddress); - } - break; - case SUGGEST_REMOVED: - if (maybeNode.isPresent()) { - metadataManager.removeNode(event.broadcastRpcAddress); - } else { - LOG.debug( - "[{}] Received REMOVED event for {} but it is not in our metadata, ignoring", - logPrefix, - event.broadcastRpcAddress); - } - break; - } - } - - // Called by the event bus, needs debouncing - private void onTopologyEvent(TopologyEvent event) { - assert adminExecutor.inEventLoop(); - topologyEventDebouncer.receive(event); - } - - // Called to process debounced events before flushing - private Collection coalesceTopologyEvents(List events) { - assert adminExecutor.inEventLoop(); - Collection result; - if (events.size() == 1) { - result = events; - } else { - // Keep the last FORCE* event for each node, or if there is none the last normal event - Map last = Maps.newHashMapWithExpectedSize(events.size()); - for (TopologyEvent event : events) { - if (event.isForceEvent() - || !last.containsKey(event.broadcastRpcAddress) - || !last.get(event.broadcastRpcAddress).isForceEvent()) { - last.put(event.broadcastRpcAddress, event); - } - } - result = last.values(); - } - LOG.debug("[{}] Coalesced topology events: {} => {}", logPrefix, events, result); - return result; - } - - // Called when the debouncer flushes - private void flushTopologyEvents(Collection events) { - assert adminExecutor.inEventLoop(); - for (TopologyEvent event : events) { - onDebouncedTopologyEvent(event); - } - } - - private void close() { - assert adminExecutor.inEventLoop(); - if (closeWasCalled) { - return; - } - closeWasCalled = true; - topologyEventDebouncer.stop(); - closeFuture.complete(null); - } - - private void setState(DefaultNode node, NodeState newState, String reason) { - NodeState oldState = node.state; - if (oldState != newState) { - LOG.debug( - "[{}] Transitioning {} {}=>{} (because {})", - logPrefix, - node, - oldState, - newState, - reason); - node.state = newState; - if (newState == NodeState.UP) { - node.upSinceMillis = System.currentTimeMillis(); - } else { - node.upSinceMillis = -1; - } - // Fire the state change event, either immediately, or after a refresh if the node just came - // back up. - // If oldState == UNKNOWN, the node was just added, we already refreshed while processing - // the addition. - if (oldState == NodeState.UNKNOWN || newState != NodeState.UP) { - eventBus.fire(NodeStateEvent.changed(oldState, newState, node)); - } else { - metadataManager - .refreshNode(node) - .whenComplete( - (success, error) -> { - try { - if (error != null) { - Loggers.warnWithException( - LOG, "[{}] Error while refreshing info for {}", logPrefix, node, error); - } - // Fire the event whether the refresh succeeded or not - eventBus.fire(NodeStateEvent.changed(oldState, newState, node)); - } catch (Throwable t) { - Loggers.warnWithException(LOG, "[{}] Unexpected exception", logPrefix, t); - } - }); - } - } - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/NodesRefresh.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/NodesRefresh.java deleted file mode 100644 index befb55e3740..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/NodesRefresh.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import java.util.Collections; -import java.util.Objects; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -abstract class NodesRefresh implements MetadataRefresh { - - private static final Logger LOG = LoggerFactory.getLogger(NodesRefresh.class); - - /** - * @return whether the node's token have changed as a result of this operation (unfortunately we - * mutate the tokens in-place, so there is no way to check this after the fact). - */ - protected static boolean copyInfos( - NodeInfo nodeInfo, DefaultNode node, InternalDriverContext context) { - - node.setEndPoint(nodeInfo.getEndPoint(), context); - node.broadcastRpcAddress = nodeInfo.getBroadcastRpcAddress().orElse(null); - node.broadcastAddress = nodeInfo.getBroadcastAddress().orElse(null); - node.listenAddress = nodeInfo.getListenAddress().orElse(null); - node.datacenter = nodeInfo.getDatacenter(); - node.rack = nodeInfo.getRack(); - node.hostId = Objects.requireNonNull(nodeInfo.getHostId()); - node.schemaVersion = nodeInfo.getSchemaVersion(); - String versionString = nodeInfo.getCassandraVersion(); - try { - node.cassandraVersion = Version.parse(versionString); - } catch (IllegalArgumentException e) { - LOG.warn( - "[{}] Error converting Cassandra version '{}' for {}", - context.getSessionName(), - versionString, - node.getEndPoint()); - } - boolean tokensChanged = !node.rawTokens.equals(nodeInfo.getTokens()); - if (tokensChanged) { - node.rawTokens = nodeInfo.getTokens(); - } - node.extras = - (nodeInfo.getExtras() == null) - ? Collections.emptyMap() - : ImmutableMap.copyOf(nodeInfo.getExtras()); - return tokensChanged; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/NoopNodeStateListener.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/NoopNodeStateListener.java deleted file mode 100644 index b879e1f2104..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/NoopNodeStateListener.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.metadata.NodeStateListenerBase; -import net.jcip.annotations.ThreadSafe; - -/** - * Default node state listener implementation with empty methods. This implementation is used when - * no listeners were registered, neither programmatically nor through the configuration. - */ -@ThreadSafe -public class NoopNodeStateListener extends NodeStateListenerBase { - - public NoopNodeStateListener(@SuppressWarnings("unused") DriverContext context) { - // nothing to do - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/PeerRowValidator.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/PeerRowValidator.java deleted file mode 100644 index 4782d72abbb..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/PeerRowValidator.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class PeerRowValidator { - - /** Returns {@code true} if the given peer row is valid, and {@code false} otherwise. */ - public static boolean isValid(@NonNull AdminRow peerRow) { - - boolean hasPeersRpcAddress = !peerRow.isNull("rpc_address"); - boolean hasPeersV2RpcAddress = - !peerRow.isNull("native_address") && !peerRow.isNull("native_port"); - boolean hasRpcAddress = hasPeersRpcAddress || hasPeersV2RpcAddress; - - return hasRpcAddress - && !peerRow.isNull("host_id") - && !peerRow.isNull("data_center") - && !peerRow.isNull("rack") - && !peerRow.isNull("tokens") - && !peerRow.isNull("schema_version"); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/RemoveNodeRefresh.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/RemoveNodeRefresh.java deleted file mode 100644 index 46de1989278..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/RemoveNodeRefresh.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import java.net.InetSocketAddress; -import java.util.Map; -import java.util.UUID; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -public class RemoveNodeRefresh extends NodesRefresh { - - private static final Logger LOG = LoggerFactory.getLogger(RemoveNodeRefresh.class); - - @VisibleForTesting final InetSocketAddress broadcastRpcAddressToRemove; - - RemoveNodeRefresh(InetSocketAddress broadcastRpcAddressToRemove) { - this.broadcastRpcAddressToRemove = broadcastRpcAddressToRemove; - } - - @Override - public Result compute( - DefaultMetadata oldMetadata, boolean tokenMapEnabled, InternalDriverContext context) { - - String logPrefix = context.getSessionName(); - - Map oldNodes = oldMetadata.getNodes(); - - ImmutableMap.Builder newNodesBuilder = ImmutableMap.builder(); - Node removedNode = null; - for (Node node : oldNodes.values()) { - if (node.getBroadcastRpcAddress().isPresent() - && node.getBroadcastRpcAddress().get().equals(broadcastRpcAddressToRemove)) { - removedNode = node; - } else { - assert node.getHostId() != null; // nodes in metadata.getNodes() always have their id set - newNodesBuilder.put(node.getHostId(), node); - } - } - - if (removedNode == null) { - // This should never happen because we already check the event in NodeStateManager, but handle - // just in case. - LOG.debug("[{}] Couldn't find node {} to remove", logPrefix, broadcastRpcAddressToRemove); - return new Result(oldMetadata); - } else { - LOG.debug("[{}] Removing node {}", logPrefix, removedNode); - return new Result( - oldMetadata.withNodes(newNodesBuilder.build(), tokenMapEnabled, false, null, context), - ImmutableList.of(NodeStateEvent.removed((DefaultNode) removedNode))); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/SchemaAgreementChecker.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/SchemaAgreementChecker.java deleted file mode 100644 index c5935dba4bb..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/SchemaAgreementChecker.java +++ /dev/null @@ -1,212 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeState; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRequestHandler; -import com.datastax.oss.driver.internal.core.adminrequest.AdminResult; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.util.NanoTime; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.time.Duration; -import java.util.Iterator; -import java.util.Map; -import java.util.Objects; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.TimeUnit; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -class SchemaAgreementChecker { - - private static final Logger LOG = LoggerFactory.getLogger(SchemaAgreementChecker.class); - private static final int INFINITE_PAGE_SIZE = -1; - @VisibleForTesting static final InetAddress BIND_ALL_ADDRESS; - - static { - try { - BIND_ALL_ADDRESS = InetAddress.getByAddress(new byte[4]); - } catch (UnknownHostException e) { - throw new RuntimeException(e); - } - } - - private final DriverChannel channel; - private final InternalDriverContext context; - private final String logPrefix; - private final Duration queryTimeout; - private final long intervalNs; - private final long timeoutNs; - private final boolean warnOnFailure; - private final long start; - private final CompletableFuture result = new CompletableFuture<>(); - - SchemaAgreementChecker(DriverChannel channel, InternalDriverContext context, String logPrefix) { - this.channel = channel; - this.context = context; - this.logPrefix = logPrefix; - DriverExecutionProfile config = context.getConfig().getDefaultProfile(); - this.queryTimeout = config.getDuration(DefaultDriverOption.CONTROL_CONNECTION_TIMEOUT); - this.intervalNs = - config.getDuration(DefaultDriverOption.CONTROL_CONNECTION_AGREEMENT_INTERVAL).toNanos(); - this.timeoutNs = - config.getDuration(DefaultDriverOption.CONTROL_CONNECTION_AGREEMENT_TIMEOUT).toNanos(); - this.warnOnFailure = config.getBoolean(DefaultDriverOption.CONTROL_CONNECTION_AGREEMENT_WARN); - this.start = System.nanoTime(); - } - - public CompletionStage run() { - LOG.debug("[{}] Checking schema agreement", logPrefix); - if (timeoutNs == 0) { - result.complete(false); - } else { - sendQueries(); - } - return result; - } - - private void sendQueries() { - long elapsedNs = System.nanoTime() - start; - if (elapsedNs > timeoutNs) { - String message = - String.format( - "[%s] Schema agreement not reached after %s", logPrefix, NanoTime.format(elapsedNs)); - if (warnOnFailure) { - LOG.warn(message); - } else { - LOG.debug(message); - } - result.complete(false); - } else { - CompletionStage localQuery = - query("SELECT schema_version FROM system.local WHERE key='local'"); - CompletionStage peersQuery = query("SELECT * FROM system.peers"); - - localQuery - .thenCombine(peersQuery, this::extractSchemaVersions) - .whenComplete(this::completeOrReschedule); - } - } - - private Set extractSchemaVersions(AdminResult controlNodeResult, AdminResult peersResult) { - // Gather the versions of all the nodes that are UP - ImmutableSet.Builder schemaVersions = ImmutableSet.builder(); - - // Control node (implicitly UP, we've just queried it) - Iterator iterator = controlNodeResult.iterator(); - if (iterator.hasNext()) { - AdminRow localRow = iterator.next(); - UUID schemaVersion = localRow.getUuid("schema_version"); - if (schemaVersion == null) { - LOG.warn( - "[{}] Missing schema_version for control node {}, " - + "excluding from schema agreement check", - logPrefix, - channel.getEndPoint()); - } else { - schemaVersions.add(schemaVersion); - } - } else { - LOG.warn( - "[{}] Missing system.local row for control node {}, " - + "excluding from schema agreement check", - logPrefix, - channel.getEndPoint()); - } - - Map nodes = context.getMetadataManager().getMetadata().getNodes(); - for (AdminRow peerRow : peersResult) { - if (isPeerValid(peerRow, nodes)) { - UUID schemaVersion = Objects.requireNonNull(peerRow.getUuid("schema_version")); - schemaVersions.add(schemaVersion); - } - } - return schemaVersions.build(); - } - - private void completeOrReschedule(Set uuids, Throwable error) { - if (error != null) { - LOG.debug( - "[{}] Error while checking schema agreement, completing now (false)", logPrefix, error); - result.complete(false); - } else if (uuids.size() == 1) { - LOG.debug( - "[{}] Schema agreement reached ({}), completing", logPrefix, uuids.iterator().next()); - result.complete(true); - } else { - LOG.debug( - "[{}] Schema agreement not reached yet ({}), rescheduling in {}", - logPrefix, - uuids, - NanoTime.format(intervalNs)); - channel - .eventLoop() - .schedule(this::sendQueries, intervalNs, TimeUnit.NANOSECONDS) - .addListener( - f -> { - if (!f.isSuccess()) { - LOG.debug( - "[{}] Error while rescheduling schema agreement, completing now (false)", - logPrefix, - f.cause()); - } - }); - } - } - - @VisibleForTesting - protected CompletionStage query(String queryString) { - return AdminRequestHandler.query( - channel, queryString, queryTimeout, INFINITE_PAGE_SIZE, logPrefix) - .start(); - } - - protected boolean isPeerValid(AdminRow peerRow, Map nodes) { - if (PeerRowValidator.isValid(peerRow)) { - UUID hostId = peerRow.getUuid("host_id"); - Node node = nodes.get(hostId); - if (node == null) { - LOG.warn("[{}] Unknown peer {}, excluding from schema agreement check", logPrefix, hostId); - return false; - } else if (node.getState() != NodeState.UP) { - LOG.debug("[{}] Peer {} is down, excluding from schema agreement check", logPrefix, hostId); - return false; - } - return true; - } else { - LOG.warn( - "[{}] Found invalid system.peers row for peer: {}, excluding from schema agreement check.", - logPrefix, - peerRow.getInetAddress("peer")); - return false; - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/SniEndPoint.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/SniEndPoint.java deleted file mode 100644 index d1ab8eec98d..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/SniEndPoint.java +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.shaded.guava.common.primitives.UnsignedBytes; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.net.UnknownHostException; -import java.util.Arrays; -import java.util.Comparator; -import java.util.Objects; -import java.util.concurrent.atomic.AtomicInteger; - -public class SniEndPoint implements EndPoint { - private static final AtomicInteger OFFSET = new AtomicInteger(); - - private final InetSocketAddress proxyAddress; - private final String serverName; - - /** - * @param proxyAddress the address of the proxy. If it is {@linkplain - * InetSocketAddress#isUnresolved() unresolved}, each call to {@link #resolve()} will - * re-resolve it, fetch all of its A-records, and if there are more than 1 pick one in a - * round-robin fashion. - * @param serverName the SNI server name. In the context of Cloud, this is the string - * representation of the host id. - */ - public SniEndPoint(InetSocketAddress proxyAddress, String serverName) { - this.proxyAddress = Objects.requireNonNull(proxyAddress, "SNI address cannot be null"); - this.serverName = Objects.requireNonNull(serverName, "SNI Server name cannot be null"); - } - - public String getServerName() { - return serverName; - } - - @NonNull - @Override - public InetSocketAddress resolve() { - try { - InetAddress[] aRecords = InetAddress.getAllByName(proxyAddress.getHostName()); - if (aRecords.length == 0) { - // Probably never happens, but the JDK docs don't explicitly say so - throw new IllegalArgumentException( - "Could not resolve proxy address " + proxyAddress.getHostName()); - } - // The order of the returned address is unspecified. Sort by IP to make sure we get a true - // round-robin - Arrays.sort(aRecords, IP_COMPARATOR); - int index = - (aRecords.length == 1) - ? 0 - : OFFSET.getAndUpdate(x -> x == Integer.MAX_VALUE ? 0 : x + 1) % aRecords.length; - return new InetSocketAddress(aRecords[index], proxyAddress.getPort()); - } catch (UnknownHostException e) { - throw new IllegalArgumentException( - "Could not resolve proxy address " + proxyAddress.getHostName(), e); - } - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof SniEndPoint) { - SniEndPoint that = (SniEndPoint) other; - return this.proxyAddress.equals(that.proxyAddress) && this.serverName.equals(that.serverName); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(proxyAddress, serverName); - } - - @Override - public String toString() { - // Note that this uses the original proxy address, so if there are multiple A-records it won't - // show which one was selected. If that turns out to be a problem for debugging, we might need - // to store the result of resolve() in Connection and log that instead of the endpoint. - return proxyAddress.toString() + ":" + serverName; - } - - @NonNull - @Override - public String asMetricPrefix() { - String hostString = proxyAddress.getHostString(); - if (hostString == null) { - throw new IllegalArgumentException( - "Could not extract a host string from provided proxy address " + proxyAddress); - } - return hostString.replace('.', '_') + ':' + proxyAddress.getPort() + '_' + serverName; - } - - @SuppressWarnings("UnnecessaryLambda") - private static final Comparator IP_COMPARATOR = - (InetAddress address1, InetAddress address2) -> - UnsignedBytes.lexicographicalComparator() - .compare(address1.getAddress(), address2.getAddress()); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/TokensChangedRefresh.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/TokensChangedRefresh.java deleted file mode 100644 index 6f60e9a790b..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/TokensChangedRefresh.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -class TokensChangedRefresh implements MetadataRefresh { - - @Override - public Result compute( - DefaultMetadata oldMetadata, boolean tokenMapEnabled, InternalDriverContext context) { - return new Result( - oldMetadata.withNodes(oldMetadata.getNodes(), tokenMapEnabled, true, null, context)); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/TopologyEvent.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/TopologyEvent.java deleted file mode 100644 index c7ea8c93088..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/TopologyEvent.java +++ /dev/null @@ -1,180 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.api.core.metadata.Node; -import java.net.InetSocketAddress; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -/** - * An event emitted from the {@link TopologyMonitor}, indicating a change in the topology of the - * Cassandra cluster. - * - *

Internally, the driver uses this to handle {@code TOPOLOGY_CHANGE} and {@code STATUS_CHANGE} - * events received on the control connection; for historical reasons, those protocol events identify - * nodes by their (untranslated) {@linkplain Node#getBroadcastRpcAddress() broadcast RPC address}. - * - *

As shown by the names, most of these events are mere suggestions, that the driver might choose - * to ignore if they contradict other information it has about the nodes; see the documentation of - * each factory method for detailed explanations. - */ -@Immutable -public class TopologyEvent { - - public enum Type { - SUGGEST_UP, - SUGGEST_DOWN, - FORCE_UP, - FORCE_DOWN, - SUGGEST_ADDED, - SUGGEST_REMOVED, - } - - /** - * Suggests that a node is up. - * - *

    - *
  • if the node is currently ignored by the driver's load balancing policy, this is reflected - * in the driver metadata's corresponding {@link Node}, for information purposes only. - *
  • otherwise: - *
      - *
    • if the driver already had active connections to that node, this has no effect. - *
    • if the driver was currently reconnecting to the node, this causes the current - * {@link - * com.datastax.oss.driver.api.core.connection.ReconnectionPolicy.ReconnectionSchedule} - * to be reset, and the next reconnection attempt to happen immediately. - *
    - *
- */ - public static TopologyEvent suggestUp(InetSocketAddress broadcastRpcAddress) { - return new TopologyEvent(Type.SUGGEST_UP, broadcastRpcAddress); - } - - /** - * Suggests that a node is down. - * - *
    - *
  • if the node is currently ignored by the driver's load balancing policy, this is reflected - * in the driver metadata's corresponding {@link Node}, for information purposes only. - *
  • otherwise, if the driver still has at least one active connection to that node, this is - * ignored. In other words, a functioning connection is considered a more reliable - * indication than a topology event. - *

    If you want to bypass that behavior and force the node down, use {@link - * #forceDown(InetSocketAddress)}. - *

- */ - public static TopologyEvent suggestDown(InetSocketAddress broadcastRpcAddress) { - return new TopologyEvent(Type.SUGGEST_DOWN, broadcastRpcAddress); - } - - /** - * Forces the driver to set a node down. - * - *
    - *
  • if the node is currently ignored by the driver's load balancing policy, this is reflected - * in the driver metadata, for information purposes only. - *
  • otherwise, all active connections to the node are closed, and any active reconnection is - * cancelled. - *
- * - * In all cases, the driver will never try to reconnect to the node again. If you decide to - * reconnect to it later, use {@link #forceUp(InetSocketAddress)}. - * - *

This is intended for deployments that use a custom {@link TopologyMonitor} (for example if - * you do some kind of maintenance on a live node). This is also used internally by the driver - * when it detects an unrecoverable error, such as a node that does not support the current - * protocol version. - */ - public static TopologyEvent forceDown(InetSocketAddress broadcastRpcAddress) { - return new TopologyEvent(Type.FORCE_DOWN, broadcastRpcAddress); - } - - /** - * Cancels a previous {@link #forceDown(InetSocketAddress)} event for the node. - * - *

The node will be set back UP. If it is not ignored by the load balancing policy, a - * connection pool will be reopened. - */ - public static TopologyEvent forceUp(InetSocketAddress broadcastRpcAddress) { - return new TopologyEvent(Type.FORCE_UP, broadcastRpcAddress); - } - - /** - * Suggests that a new node was added in the cluster. - * - *

The driver will ignore this event if the node is already present in its metadata, or if - * information about the node can't be refreshed (i.e. {@link - * TopologyMonitor#getNewNodeInfo(InetSocketAddress)} fails). - */ - public static TopologyEvent suggestAdded(InetSocketAddress broadcastRpcAddress) { - return new TopologyEvent(Type.SUGGEST_ADDED, broadcastRpcAddress); - } - - /** - * Suggests that a node was removed from the cluster. - * - *

The driver ignore this event if the node does not exist in its metadata. - */ - public static TopologyEvent suggestRemoved(InetSocketAddress broadcastRpcAddress) { - return new TopologyEvent(Type.SUGGEST_REMOVED, broadcastRpcAddress); - } - - public final Type type; - - /** - * Note that this is the untranslated broadcast RPC address, as it was received in the - * protocol event. - * - * @see Node#getBroadcastRpcAddress() - */ - public final InetSocketAddress broadcastRpcAddress; - - /** Builds a new instance (the static methods in this class are a preferred alternative). */ - public TopologyEvent(Type type, InetSocketAddress broadcastRpcAddress) { - this.type = type; - this.broadcastRpcAddress = broadcastRpcAddress; - } - - public boolean isForceEvent() { - return type == Type.FORCE_DOWN || type == Type.FORCE_UP; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof TopologyEvent) { - TopologyEvent that = (TopologyEvent) other; - return this.type == that.type - && Objects.equals(this.broadcastRpcAddress, that.broadcastRpcAddress); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(this.type, this.broadcastRpcAddress); - } - - @Override - public String toString() { - return "TopologyEvent(" + type + ", " + broadcastRpcAddress + ")"; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/TopologyMonitor.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/TopologyMonitor.java deleted file mode 100644 index e7741f11196..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/TopologyMonitor.java +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.api.core.AsyncAutoCloseable; -import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.internal.core.context.EventBus; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import java.net.InetSocketAddress; -import java.util.Optional; -import java.util.concurrent.CompletionStage; - -/** - * Monitors the state of the Cassandra cluster. - * - *

It can either push {@link TopologyEvent topology events} to the rest of the driver (to do - * that, retrieve the {@link EventBus}) from the {@link InternalDriverContext}), or receive requests - * to refresh data about the nodes. - * - *

The default implementation uses the control connection: {@code TOPOLOGY_CHANGE} and {@code - * STATUS_CHANGE} events on the connection are converted into {@code TopologyEvent}s, and node - * refreshes are done with queries to system tables. If you prefer to rely on an external monitoring - * tool, this can be completely overridden. - */ -public interface TopologyMonitor extends AsyncAutoCloseable { - - /** - * Triggers the initialization of the monitor. - * - *

The completion of the future returned by this method marks the point when the driver - * considers itself "connected" to the cluster, and proceeds with the rest of the initialization: - * refreshing the list of nodes and the metadata, opening connection pools, etc. By then, the - * topology monitor should be ready to accept calls to its other methods; in particular, {@link - * #refreshNodeList()} will be called shortly after the completion of the future, to load the - * initial list of nodes to connect to. - * - *

If {@code advanced.reconnect-on-init = true} in the configuration, this method is - * responsible for handling reconnection. That is, if the initial attempt to "connect" to the - * cluster fails, it must schedule reattempts, and only complete the returned future when - * connection eventually succeeds. If the user cancels the returned future, then the reconnection - * attempts should stop. - * - *

If this method is called multiple times, it should trigger initialization only once, and - * return the same future on subsequent invocations. - */ - CompletionStage init(); - - /** - * The future returned by {@link #init()}. - * - *

Note that this method may be called before {@link #init()}; at that stage, the future should - * already exist, but be incomplete. - */ - CompletionStage initFuture(); - - /** - * Invoked when the driver needs to refresh the information about an existing node. This is called - * when the node was back and comes back up. - * - *

This will be invoked directly from a driver's internal thread; if the refresh involves - * blocking I/O or heavy computations, it should be scheduled on a separate thread. - * - * @param node the node to refresh. - * @return a future that completes with the information. If the monitor can't fulfill the request - * at this time, it should reply with {@link Optional#empty()}, and the driver will carry on - * with its current information. - */ - CompletionStage> refreshNode(Node node); - - /** - * Invoked when the driver needs to get information about a newly discovered node. - * - *

This will be invoked directly from a driver's internal thread; if the refresh involves - * blocking I/O or heavy computations, it should be scheduled on a separate thread. - * - * @param broadcastRpcAddress the node's broadcast RPC address,. - * @return a future that completes with the information. If the monitor doesn't know any node with - * this address, it should reply with {@link Optional#empty()}; the new node will be ignored. - * @see Node#getBroadcastRpcAddress() - */ - CompletionStage> getNewNodeInfo(InetSocketAddress broadcastRpcAddress); - - /** - * Invoked when the driver needs to refresh information about all the nodes. - * - *

This will be invoked directly from a driver's internal thread; if the refresh involves - * blocking I/O or heavy computations, it should be scheduled on a separate thread. - * - *

The driver calls this at initialization, and uses the result to initialize the {@link - * LoadBalancingPolicy}; successful initialization of the {@link Session} object depends on that - * initial call succeeding. - * - * @return a future that completes with the information. We assume that the full node list will - * always be returned in a single message (no paging). - */ - CompletionStage> refreshNodeList(); - - /** - * Checks whether the nodes in the cluster agree on a common schema version. - * - *

This should typically be implemented with a few retries and a timeout, as the schema can - * take a while to replicate across nodes. - */ - CompletionStage checkSchemaAgreement(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultAggregateMetadata.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultAggregateMetadata.java deleted file mode 100644 index 669f925af65..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultAggregateMetadata.java +++ /dev/null @@ -1,175 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.AggregateMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.FunctionSignature; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.io.Serializable; -import java.util.Objects; -import java.util.Optional; -import net.jcip.annotations.Immutable; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@Immutable -public class DefaultAggregateMetadata implements AggregateMetadata, Serializable { - - private static final Logger LOG = LoggerFactory.getLogger(DefaultAggregateMetadata.class); - - private static final long serialVersionUID = 1; - - @NonNull private final CqlIdentifier keyspace; - @NonNull private final FunctionSignature signature; - @Nullable private final FunctionSignature finalFuncSignature; - @Nullable private final Object initCond; - @Nullable private final String formattedInitCond; - @NonNull private final DataType returnType; - @NonNull private final FunctionSignature stateFuncSignature; - @NonNull private final DataType stateType; - - public DefaultAggregateMetadata( - @NonNull CqlIdentifier keyspace, - @NonNull FunctionSignature signature, - @Nullable FunctionSignature finalFuncSignature, - @Nullable Object initCond, - @NonNull DataType returnType, - @NonNull FunctionSignature stateFuncSignature, - @NonNull DataType stateType, - @NonNull TypeCodec stateTypeCodec) { - this.keyspace = keyspace; - this.signature = signature; - this.finalFuncSignature = finalFuncSignature; - this.initCond = initCond; - this.formattedInitCond = computeFormattedInitCond(initCond, stateTypeCodec); - this.returnType = returnType; - this.stateFuncSignature = stateFuncSignature; - this.stateType = stateType; - } - - @NonNull - @Override - public CqlIdentifier getKeyspace() { - return keyspace; - } - - @NonNull - @Override - public FunctionSignature getSignature() { - return signature; - } - - @NonNull - @Override - public Optional getFinalFuncSignature() { - return Optional.ofNullable(finalFuncSignature); - } - - @NonNull - @Override - public Optional getInitCond() { - return Optional.ofNullable(initCond); - } - - @NonNull - @Override - public DataType getReturnType() { - return returnType; - } - - @NonNull - @Override - public FunctionSignature getStateFuncSignature() { - return stateFuncSignature; - } - - @NonNull - @Override - public DataType getStateType() { - return stateType; - } - - @NonNull - @Override - public Optional formatInitCond() { - return Optional.ofNullable(this.formattedInitCond); - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof AggregateMetadata) { - AggregateMetadata that = (AggregateMetadata) other; - return Objects.equals(this.keyspace, that.getKeyspace()) - && Objects.equals(this.signature, that.getSignature()) - && Objects.equals(this.finalFuncSignature, that.getFinalFuncSignature().orElse(null)) - && Objects.equals(this.initCond, that.getInitCond().orElse(null)) - && Objects.equals(this.returnType, that.getReturnType()) - && Objects.equals(this.stateFuncSignature, that.getStateFuncSignature()) - && Objects.equals(this.stateType, that.getStateType()); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash( - keyspace, - signature, - finalFuncSignature, - initCond, - returnType, - stateFuncSignature, - stateType); - } - - @Override - public String toString() { - return "DefaultAggregateMetadata@" - + Integer.toHexString(hashCode()) - + "(" - + keyspace.asInternal() - + "." - + signature - + ")"; - } - - @Nullable - private String computeFormattedInitCond( - @Nullable Object initCond, @NonNull TypeCodec stateTypeCodec) { - - if (initCond == null) { - return null; - } - try { - return stateTypeCodec.format(initCond); - } catch (Throwable t) { - LOG.warn( - String.format( - "Failed to format INITCOND for %s.%s, using toString instead", - keyspace.asInternal(), signature.getName().asInternal())); - return initCond.toString(); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultColumnMetadata.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultColumnMetadata.java deleted file mode 100644 index 3d0c6209880..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultColumnMetadata.java +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; -import com.datastax.oss.driver.api.core.type.DataType; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.Serializable; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultColumnMetadata implements ColumnMetadata, Serializable { - - private static final long serialVersionUID = 1; - - @NonNull private final CqlIdentifier keyspace; - @NonNull private final CqlIdentifier parent; - @NonNull private final CqlIdentifier name; - @NonNull private final DataType dataType; - private final boolean isStatic; - - public DefaultColumnMetadata( - @NonNull CqlIdentifier keyspace, - @NonNull CqlIdentifier parent, - @NonNull CqlIdentifier name, - @NonNull DataType dataType, - boolean isStatic) { - this.keyspace = keyspace; - this.parent = parent; - this.name = name; - this.dataType = dataType; - this.isStatic = isStatic; - } - - @NonNull - @Override - public CqlIdentifier getKeyspace() { - return keyspace; - } - - @NonNull - @Override - public CqlIdentifier getParent() { - return parent; - } - - @NonNull - @Override - public CqlIdentifier getName() { - return name; - } - - @NonNull - @Override - public DataType getType() { - return dataType; - } - - @Override - public boolean isStatic() { - return isStatic; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof ColumnMetadata) { - ColumnMetadata that = (ColumnMetadata) other; - return Objects.equals(this.keyspace, that.getKeyspace()) - && Objects.equals(this.parent, that.getParent()) - && Objects.equals(this.name, that.getName()) - && Objects.equals(this.dataType, that.getType()) - && this.isStatic == that.isStatic(); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(keyspace, parent, name, dataType, isStatic); - } - - @Override - public String toString() { - return "DefaultColumnMetadata@" - + Integer.toHexString(hashCode()) - + "(" - + keyspace.asInternal() - + "." - + parent.asInternal() - + "." - + name.asInternal() - + " " - + dataType.asCql(true, false) - + ")"; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultFunctionMetadata.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultFunctionMetadata.java deleted file mode 100644 index 75b343d77b1..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultFunctionMetadata.java +++ /dev/null @@ -1,141 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.FunctionMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.FunctionSignature; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.Serializable; -import java.util.List; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultFunctionMetadata implements FunctionMetadata, Serializable { - - private static final long serialVersionUID = 1; - - @NonNull private final CqlIdentifier keyspace; - @NonNull private final FunctionSignature signature; - @NonNull private final List parameterNames; - @NonNull private final String body; - private final boolean calledOnNullInput; - @NonNull private final String language; - @NonNull private final DataType returnType; - - public DefaultFunctionMetadata( - @NonNull CqlIdentifier keyspace, - @NonNull FunctionSignature signature, - @NonNull List parameterNames, - @NonNull String body, - boolean calledOnNullInput, - @NonNull String language, - @NonNull DataType returnType) { - Preconditions.checkArgument( - signature.getParameterTypes().size() == parameterNames.size(), - "Number of parameter names should match number of types in the signature (got %s and %s)", - parameterNames.size(), - signature.getParameterTypes().size()); - this.keyspace = keyspace; - this.signature = signature; - this.parameterNames = parameterNames; - this.body = body; - this.calledOnNullInput = calledOnNullInput; - this.language = language; - this.returnType = returnType; - } - - @NonNull - @Override - public CqlIdentifier getKeyspace() { - return keyspace; - } - - @NonNull - @Override - public FunctionSignature getSignature() { - return signature; - } - - @NonNull - @Override - public List getParameterNames() { - return parameterNames; - } - - @NonNull - @Override - public String getBody() { - return body; - } - - @Override - public boolean isCalledOnNullInput() { - return calledOnNullInput; - } - - @NonNull - @Override - public String getLanguage() { - return language; - } - - @NonNull - @Override - public DataType getReturnType() { - return returnType; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof FunctionMetadata) { - FunctionMetadata that = (FunctionMetadata) other; - return Objects.equals(this.keyspace, that.getKeyspace()) - && Objects.equals(this.signature, that.getSignature()) - && Objects.equals(this.parameterNames, that.getParameterNames()) - && Objects.equals(this.body, that.getBody()) - && this.calledOnNullInput == that.isCalledOnNullInput() - && Objects.equals(this.language, that.getLanguage()) - && Objects.equals(this.returnType, that.getReturnType()); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash( - keyspace, signature, parameterNames, body, calledOnNullInput, language, returnType); - } - - @Override - public String toString() { - return "DefaultFunctionMetadata@" - + Integer.toHexString(hashCode()) - + "(" - + keyspace.asInternal() - + "." - + signature - + ")"; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultIndexMetadata.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultIndexMetadata.java deleted file mode 100644 index 8ff0263fcc8..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultIndexMetadata.java +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.IndexKind; -import com.datastax.oss.driver.api.core.metadata.schema.IndexMetadata; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.Serializable; -import java.util.Map; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultIndexMetadata implements IndexMetadata, Serializable { - - private static final long serialVersionUID = 1; - - @NonNull private final CqlIdentifier keyspace; - @NonNull private final CqlIdentifier table; - @NonNull private final CqlIdentifier name; - @NonNull private final IndexKind kind; - @NonNull private final String target; - @NonNull private final Map options; - - public DefaultIndexMetadata( - @NonNull CqlIdentifier keyspace, - @NonNull CqlIdentifier table, - @NonNull CqlIdentifier name, - @NonNull IndexKind kind, - @NonNull String target, - @NonNull Map options) { - this.keyspace = keyspace; - this.table = table; - this.name = name; - this.kind = kind; - this.target = target; - this.options = options; - } - - @NonNull - @Override - public CqlIdentifier getKeyspace() { - return keyspace; - } - - @NonNull - @Override - public CqlIdentifier getTable() { - return table; - } - - @NonNull - @Override - public CqlIdentifier getName() { - return name; - } - - @NonNull - @Override - public IndexKind getKind() { - return kind; - } - - @NonNull - @Override - public String getTarget() { - return target; - } - - @NonNull - @Override - public Map getOptions() { - return options; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof IndexMetadata) { - IndexMetadata that = (IndexMetadata) other; - return Objects.equals(this.keyspace, that.getKeyspace()) - && Objects.equals(this.table, that.getTable()) - && Objects.equals(this.name, that.getName()) - && Objects.equals(this.kind, that.getKind()) - && Objects.equals(this.target, that.getTarget()) - && Objects.equals(this.options, that.getOptions()); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(keyspace, table, name, kind, target, options); - } - - @Override - public String toString() { - return "DefaultIndexMetadata@" - + Integer.toHexString(hashCode()) - + "(" - + keyspace.asInternal() - + "." - + table.asInternal() - + "." - + name.asInternal() - + ")"; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultKeyspaceMetadata.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultKeyspaceMetadata.java deleted file mode 100644 index 3d443dd8c16..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultKeyspaceMetadata.java +++ /dev/null @@ -1,156 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.AggregateMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.FunctionMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.FunctionSignature; -import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.ViewMetadata; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.Serializable; -import java.util.Map; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultKeyspaceMetadata implements KeyspaceMetadata, Serializable { - - private static final long serialVersionUID = 1; - - @NonNull private final CqlIdentifier name; - private final boolean durableWrites; - private final boolean virtual; - @NonNull private final Map replication; - @NonNull private final Map types; - @NonNull private final Map tables; - @NonNull private final Map views; - @NonNull private final Map functions; - @NonNull private final Map aggregates; - - public DefaultKeyspaceMetadata( - @NonNull CqlIdentifier name, - boolean durableWrites, - boolean virtual, - @NonNull Map replication, - @NonNull Map types, - @NonNull Map tables, - @NonNull Map views, - @NonNull Map functions, - @NonNull Map aggregates) { - this.name = name; - this.durableWrites = durableWrites; - this.virtual = virtual; - this.replication = replication; - this.types = types; - this.tables = tables; - this.views = views; - this.functions = functions; - this.aggregates = aggregates; - } - - @NonNull - @Override - public CqlIdentifier getName() { - return name; - } - - @Override - public boolean isDurableWrites() { - return durableWrites; - } - - @Override - public boolean isVirtual() { - return virtual; - } - - @NonNull - @Override - public Map getReplication() { - return replication; - } - - @NonNull - @Override - public Map getUserDefinedTypes() { - return types; - } - - @NonNull - @Override - public Map getTables() { - return tables; - } - - @NonNull - @Override - public Map getViews() { - return views; - } - - @NonNull - @Override - public Map getFunctions() { - return functions; - } - - @NonNull - @Override - public Map getAggregates() { - return aggregates; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof KeyspaceMetadata) { - KeyspaceMetadata that = (KeyspaceMetadata) other; - return Objects.equals(this.name, that.getName()) - && this.durableWrites == that.isDurableWrites() - && this.virtual == that.isVirtual() - && Objects.equals(this.replication, that.getReplication()) - && Objects.equals(this.types, that.getUserDefinedTypes()) - && Objects.equals(this.tables, that.getTables()) - && Objects.equals(this.views, that.getViews()) - && Objects.equals(this.functions, that.getFunctions()) - && Objects.equals(this.aggregates, that.getAggregates()); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash( - name, durableWrites, virtual, replication, types, tables, views, functions, aggregates); - } - - @Override - public String toString() { - return "DefaultKeyspaceMetadata@" - + Integer.toHexString(hashCode()) - + "(" - + name.asInternal() - + ")"; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultTableMetadata.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultTableMetadata.java deleted file mode 100644 index 4c339f89299..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultTableMetadata.java +++ /dev/null @@ -1,179 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.ClusteringOrder; -import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.IndexMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.io.Serializable; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; -import java.util.UUID; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultTableMetadata implements TableMetadata, Serializable { - - private static final long serialVersionUID = 1; - - @NonNull private final CqlIdentifier keyspace; - @NonNull private final CqlIdentifier name; - // null for virtual tables - @Nullable private final UUID id; - private final boolean compactStorage; - private final boolean virtual; - @NonNull private final List partitionKey; - @NonNull private final Map clusteringColumns; - @NonNull private final Map columns; - @NonNull private final Map options; - @NonNull private final Map indexes; - - public DefaultTableMetadata( - @NonNull CqlIdentifier keyspace, - @NonNull CqlIdentifier name, - @Nullable UUID id, - boolean compactStorage, - boolean virtual, - @NonNull List partitionKey, - @NonNull Map clusteringColumns, - @NonNull Map columns, - @NonNull Map options, - @NonNull Map indexes) { - this.keyspace = keyspace; - this.name = name; - this.id = id; - this.compactStorage = compactStorage; - this.virtual = virtual; - this.partitionKey = partitionKey; - this.clusteringColumns = clusteringColumns; - this.columns = columns; - this.options = options; - this.indexes = indexes; - } - - @NonNull - @Override - public CqlIdentifier getKeyspace() { - return keyspace; - } - - @NonNull - @Override - public CqlIdentifier getName() { - return name; - } - - @NonNull - @Override - public Optional getId() { - return Optional.ofNullable(id); - } - - @Override - public boolean isCompactStorage() { - return compactStorage; - } - - @Override - public boolean isVirtual() { - return virtual; - } - - @NonNull - @Override - public List getPartitionKey() { - return partitionKey; - } - - @NonNull - @Override - public Map getClusteringColumns() { - return clusteringColumns; - } - - @NonNull - @Override - public Map getColumns() { - return columns; - } - - @NonNull - @Override - public Map getOptions() { - return options; - } - - @NonNull - @Override - public Map getIndexes() { - return indexes; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof TableMetadata) { - TableMetadata that = (TableMetadata) other; - return Objects.equals(this.keyspace, that.getKeyspace()) - && Objects.equals(this.name, that.getName()) - && Objects.equals(Optional.ofNullable(this.id), that.getId()) - && this.compactStorage == that.isCompactStorage() - && this.virtual == that.isVirtual() - && Objects.equals(this.partitionKey, that.getPartitionKey()) - && Objects.equals(this.clusteringColumns, that.getClusteringColumns()) - && Objects.equals(this.columns, that.getColumns()) - && Objects.equals(this.indexes, that.getIndexes()) - && Objects.equals(this.options, that.getOptions()); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash( - keyspace, - name, - id, - compactStorage, - virtual, - partitionKey, - clusteringColumns, - columns, - indexes, - options); - } - - @Override - public String toString() { - return "DefaultTableMetadata@" - + Integer.toHexString(hashCode()) - + "(" - + keyspace.asInternal() - + "." - + name.asInternal() - + ")"; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultViewMetadata.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultViewMetadata.java deleted file mode 100644 index 2c5e5a9603e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultViewMetadata.java +++ /dev/null @@ -1,180 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.ClusteringOrder; -import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.ViewMetadata; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.io.Serializable; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; -import java.util.UUID; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultViewMetadata implements ViewMetadata, Serializable { - - private static final long serialVersionUID = 1; - - @NonNull private final CqlIdentifier keyspace; - @NonNull private final CqlIdentifier name; - @NonNull private final CqlIdentifier baseTable; - private final boolean includesAllColumns; - @Nullable private final String whereClause; - @NonNull private final UUID id; - @NonNull private final ImmutableList partitionKey; - @NonNull private final ImmutableMap clusteringColumns; - @NonNull private final ImmutableMap columns; - @NonNull private final Map options; - - public DefaultViewMetadata( - @NonNull CqlIdentifier keyspace, - @NonNull CqlIdentifier name, - @NonNull CqlIdentifier baseTable, - boolean includesAllColumns, - @Nullable String whereClause, - @NonNull UUID id, - @NonNull ImmutableList partitionKey, - @NonNull ImmutableMap clusteringColumns, - @NonNull ImmutableMap columns, - @NonNull Map options) { - this.keyspace = keyspace; - this.name = name; - this.baseTable = baseTable; - this.includesAllColumns = includesAllColumns; - this.whereClause = whereClause; - this.id = id; - this.partitionKey = partitionKey; - this.clusteringColumns = clusteringColumns; - this.columns = columns; - this.options = options; - } - - @NonNull - @Override - public CqlIdentifier getKeyspace() { - return keyspace; - } - - @NonNull - @Override - public CqlIdentifier getName() { - return name; - } - - @NonNull - @Override - public Optional getId() { - return Optional.of(id); - } - - @NonNull - @Override - public CqlIdentifier getBaseTable() { - return baseTable; - } - - @Override - public boolean includesAllColumns() { - return includesAllColumns; - } - - @NonNull - @Override - public Optional getWhereClause() { - return Optional.ofNullable(whereClause); - } - - @NonNull - @Override - public List getPartitionKey() { - return partitionKey; - } - - @NonNull - @Override - public Map getClusteringColumns() { - return clusteringColumns; - } - - @NonNull - @Override - public Map getColumns() { - return columns; - } - - @NonNull - @Override - public Map getOptions() { - return options; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof ViewMetadata) { - ViewMetadata that = (ViewMetadata) other; - return Objects.equals(this.keyspace, that.getKeyspace()) - && Objects.equals(this.name, that.getName()) - && Objects.equals(this.baseTable, that.getBaseTable()) - && this.includesAllColumns == that.includesAllColumns() - && Objects.equals(this.whereClause, that.getWhereClause().orElse(null)) - && Objects.equals(Optional.of(this.id), that.getId()) - && Objects.equals(this.partitionKey, that.getPartitionKey()) - && Objects.equals(this.clusteringColumns, that.getClusteringColumns()) - && Objects.equals(this.columns, that.getColumns()) - && Objects.equals(this.options, that.getOptions()); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash( - keyspace, - name, - baseTable, - includesAllColumns, - whereClause, - id, - partitionKey, - clusteringColumns, - columns, - options); - } - - @Override - public String toString() { - return "DefaultViewMetadata@" - + Integer.toHexString(hashCode()) - + "(" - + keyspace.asInternal() - + "." - + name.asInternal() - + ")"; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/MultiplexingSchemaChangeListener.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/MultiplexingSchemaChangeListener.java deleted file mode 100644 index eebe16364d1..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/MultiplexingSchemaChangeListener.java +++ /dev/null @@ -1,211 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema; - -import com.datastax.oss.driver.api.core.metadata.schema.AggregateMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.FunctionMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.SchemaChangeListener; -import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.ViewMetadata; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.core.util.Loggers; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Arrays; -import java.util.Collection; -import java.util.List; -import java.util.Objects; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.function.Consumer; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Combines multiple schema change listeners into a single one. - * - *

Any exception thrown by a child listener is caught and logged. - */ -@ThreadSafe -public class MultiplexingSchemaChangeListener implements SchemaChangeListener { - - private static final Logger LOG = LoggerFactory.getLogger(MultiplexingSchemaChangeListener.class); - - private final List listeners = new CopyOnWriteArrayList<>(); - - public MultiplexingSchemaChangeListener() {} - - public MultiplexingSchemaChangeListener(SchemaChangeListener... listeners) { - this(Arrays.asList(listeners)); - } - - public MultiplexingSchemaChangeListener(Collection listeners) { - addListeners(listeners); - } - - private void addListeners(Collection source) { - for (SchemaChangeListener listener : source) { - addListener(listener); - } - } - - private void addListener(SchemaChangeListener toAdd) { - Objects.requireNonNull(toAdd, "listener cannot be null"); - if (toAdd instanceof MultiplexingSchemaChangeListener) { - addListeners(((MultiplexingSchemaChangeListener) toAdd).listeners); - } else { - listeners.add(toAdd); - } - } - - public void register(@NonNull SchemaChangeListener listener) { - addListener(listener); - } - - @Override - public void onKeyspaceCreated(@NonNull KeyspaceMetadata keyspace) { - invokeListeners(listener -> listener.onKeyspaceCreated(keyspace), "onKeyspaceCreated"); - } - - @Override - public void onKeyspaceDropped(@NonNull KeyspaceMetadata keyspace) { - invokeListeners(listener -> listener.onKeyspaceDropped(keyspace), "onKeyspaceDropped"); - } - - @Override - public void onKeyspaceUpdated( - @NonNull KeyspaceMetadata current, @NonNull KeyspaceMetadata previous) { - invokeListeners(listener -> listener.onKeyspaceUpdated(current, previous), "onKeyspaceUpdated"); - } - - @Override - public void onTableCreated(@NonNull TableMetadata table) { - invokeListeners(listener -> listener.onTableCreated(table), "onTableCreated"); - } - - @Override - public void onTableDropped(@NonNull TableMetadata table) { - invokeListeners(listener -> listener.onTableDropped(table), "onTableDropped"); - } - - @Override - public void onTableUpdated(@NonNull TableMetadata current, @NonNull TableMetadata previous) { - invokeListeners(listener -> listener.onTableUpdated(current, previous), "onTableUpdated"); - } - - @Override - public void onUserDefinedTypeCreated(@NonNull UserDefinedType type) { - invokeListeners( - listener -> listener.onUserDefinedTypeCreated(type), "onUserDefinedTypeCreated"); - } - - @Override - public void onUserDefinedTypeDropped(@NonNull UserDefinedType type) { - invokeListeners( - listener -> listener.onUserDefinedTypeDropped(type), "onUserDefinedTypeDropped"); - } - - @Override - public void onUserDefinedTypeUpdated( - @NonNull UserDefinedType current, @NonNull UserDefinedType previous) { - invokeListeners( - listener -> listener.onUserDefinedTypeUpdated(current, previous), - "onUserDefinedTypeUpdated"); - } - - @Override - public void onFunctionCreated(@NonNull FunctionMetadata function) { - invokeListeners(listener -> listener.onFunctionCreated(function), "onFunctionCreated"); - } - - @Override - public void onFunctionDropped(@NonNull FunctionMetadata function) { - invokeListeners(listener -> listener.onFunctionDropped(function), "onFunctionDropped"); - } - - @Override - public void onFunctionUpdated( - @NonNull FunctionMetadata current, @NonNull FunctionMetadata previous) { - invokeListeners(listener -> listener.onFunctionUpdated(current, previous), "onFunctionUpdated"); - } - - @Override - public void onAggregateCreated(@NonNull AggregateMetadata aggregate) { - invokeListeners(listener -> listener.onAggregateCreated(aggregate), "onAggregateCreated"); - } - - @Override - public void onAggregateDropped(@NonNull AggregateMetadata aggregate) { - invokeListeners(listener -> listener.onAggregateDropped(aggregate), "onAggregateDropped"); - } - - @Override - public void onAggregateUpdated( - @NonNull AggregateMetadata current, @NonNull AggregateMetadata previous) { - invokeListeners( - listener -> listener.onAggregateUpdated(current, previous), "onAggregateUpdated"); - } - - @Override - public void onViewCreated(@NonNull ViewMetadata view) { - invokeListeners(listener -> listener.onViewCreated(view), "onViewCreated"); - } - - @Override - public void onViewDropped(@NonNull ViewMetadata view) { - invokeListeners(listener -> listener.onViewDropped(view), "onViewDropped"); - } - - @Override - public void onViewUpdated(@NonNull ViewMetadata current, @NonNull ViewMetadata previous) { - invokeListeners(listener -> listener.onViewUpdated(current, previous), "onViewUpdated"); - } - - @Override - public void onSessionReady(@NonNull Session session) { - invokeListeners(listener -> listener.onSessionReady(session), "onSessionReady"); - } - - @Override - public void close() throws Exception { - for (SchemaChangeListener listener : listeners) { - try { - listener.close(); - } catch (Exception e) { - Loggers.warnWithException( - LOG, "Unexpected error while closing schema change listener {}.", listener, e); - } - } - } - - private void invokeListeners(@NonNull Consumer action, String event) { - for (SchemaChangeListener listener : listeners) { - try { - action.accept(listener); - } catch (Exception e) { - Loggers.warnWithException( - LOG, - "Unexpected error while notifying schema change listener {} of an {} event.", - listener, - event, - e); - } - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/NoopSchemaChangeListener.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/NoopSchemaChangeListener.java deleted file mode 100644 index 76fed2e5d24..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/NoopSchemaChangeListener.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema; - -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.metadata.schema.SchemaChangeListenerBase; -import net.jcip.annotations.ThreadSafe; - -/** - * Default schema change listener implementation with empty methods. This implementation is used - * when no listeners were registered, neither programmatically nor through the configuration. - */ -@ThreadSafe -public class NoopSchemaChangeListener extends SchemaChangeListenerBase { - - public NoopSchemaChangeListener(@SuppressWarnings("unused") DriverContext context) { - // nothing to do - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/SchemaChangeType.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/SchemaChangeType.java deleted file mode 100644 index 5f01d019ee0..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/SchemaChangeType.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema; - -public enum SchemaChangeType { - CREATED, - UPDATED, - DROPPED, - ; -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/ScriptBuilder.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/ScriptBuilder.java deleted file mode 100644 index b762f35b885..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/ScriptBuilder.java +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.Describable; -import com.datastax.oss.driver.shaded.guava.common.base.Strings; -import java.util.function.Consumer; -import net.jcip.annotations.NotThreadSafe; - -/** - * A simple builder that is used internally for the queries of {@link Describable} schema elements. - */ -@NotThreadSafe -public class ScriptBuilder { - private static final int INDENT_SIZE = 4; - - private final boolean pretty; - private final StringBuilder builder = new StringBuilder(); - private int indent; - private boolean isAtLineStart; - private boolean isFirstOption = true; - - public ScriptBuilder(boolean pretty) { - this.pretty = pretty; - } - - public ScriptBuilder append(String s) { - if (pretty && isAtLineStart && indent > 0) { - builder.append(Strings.repeat(" ", indent * INDENT_SIZE)); - } - isAtLineStart = false; - builder.append(s); - return this; - } - - public ScriptBuilder append(CqlIdentifier id) { - append(id.asCql(pretty)); - return this; - } - - public ScriptBuilder newLine() { - if (pretty) { - builder.append('\n'); - } else { - builder.append(' '); - } - isAtLineStart = true; - return this; - } - - public ScriptBuilder forceNewLine(int count) { - builder.append(Strings.repeat("\n", count)); - isAtLineStart = true; - return this; - } - - public ScriptBuilder increaseIndent() { - indent += 1; - return this; - } - - public ScriptBuilder decreaseIndent() { - if (indent > 0) { - indent -= 1; - } - return this; - } - - /** Appends "WITH " the first time it's called, then "AND " the next times. */ - public ScriptBuilder andWith() { - if (isFirstOption) { - append(" WITH "); - isFirstOption = false; - } else { - newLine(); - append("AND "); - } - return this; - } - - public ScriptBuilder forEach(Iterable iterable, Consumer action) { - for (E e : iterable) { - action.accept(e); - } - return this; - } - - public String build() { - return builder.toString(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/ShallowUserDefinedType.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/ShallowUserDefinedType.java deleted file mode 100644 index 069ce3752b2..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/ShallowUserDefinedType.java +++ /dev/null @@ -1,171 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.UserDefinedTypeParser; -import com.datastax.oss.driver.internal.core.type.DefaultUserDefinedType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.io.IOException; -import java.io.ObjectInputStream; -import java.io.ObjectOutputStream; -import java.io.Serializable; -import java.util.List; -import net.jcip.annotations.Immutable; - -/** - * A temporary UDT implementation that only contains the keyspace and name. - * - *

When we refresh a keyspace's UDTs, we can't fully materialize them right away, because they - * might depend on each other and the system table query does not return them in topological order. - * So we do a first pass where UDTs that are nested into other UDTsare resolved as instances of this - * class, then a topological sort, then a second pass to replace all shallow definitions by the - * actual instance (which will be a {@link DefaultUserDefinedType}). - * - *

This type is also used in the schema builder's internal representation: the keyspace, name and - * frozen-ness are the only things we need to generate a query string. - * - * @see UserDefinedTypeParser - */ -@Immutable -public class ShallowUserDefinedType implements UserDefinedType, Serializable { - - private static final long serialVersionUID = 1; - - private final CqlIdentifier keyspace; - private final CqlIdentifier name; - private final boolean frozen; - - public ShallowUserDefinedType(CqlIdentifier keyspace, CqlIdentifier name, boolean frozen) { - this.keyspace = keyspace; - this.name = name; - this.frozen = frozen; - } - - @Nullable - @Override - public CqlIdentifier getKeyspace() { - return keyspace; - } - - @NonNull - @Override - public CqlIdentifier getName() { - return name; - } - - @Override - public boolean isFrozen() { - return frozen; - } - - @NonNull - @Override - public List getFieldNames() { - throw new UnsupportedOperationException( - "This implementation should only be used internally, this is likely a driver bug"); - } - - @NonNull - @Override - public List allIndicesOf(@NonNull CqlIdentifier id) { - throw new UnsupportedOperationException( - "This implementation should only be used internally, this is likely a driver bug"); - } - - @Override - public int firstIndexOf(@NonNull CqlIdentifier id) { - throw new UnsupportedOperationException( - "This implementation should only be used internally, this is likely a driver bug"); - } - - @NonNull - @Override - public List allIndicesOf(@NonNull String name) { - throw new UnsupportedOperationException( - "This implementation should only be used internally, this is likely a driver bug"); - } - - @Override - public int firstIndexOf(@NonNull String name) { - throw new UnsupportedOperationException( - "This implementation should only be used internally, this is likely a driver bug"); - } - - @NonNull - @Override - public List getFieldTypes() { - throw new UnsupportedOperationException( - "This implementation should only be used internally, this is likely a driver bug"); - } - - @NonNull - @Override - public UserDefinedType copy(boolean newFrozen) { - throw new UnsupportedOperationException( - "This implementation should only be used internally, this is likely a driver bug"); - } - - @NonNull - @Override - public UdtValue newValue() { - throw new UnsupportedOperationException( - "This implementation should only be used internally, this is likely a driver bug"); - } - - @NonNull - @Override - public UdtValue newValue(@NonNull Object... fields) { - throw new UnsupportedOperationException( - "This implementation should only be used internally, this is likely a driver bug"); - } - - @NonNull - @Override - public AttachmentPoint getAttachmentPoint() { - throw new UnsupportedOperationException( - "This implementation should only be used internally, this is likely a driver bug"); - } - - @Override - public boolean isDetached() { - throw new UnsupportedOperationException( - "This implementation should only be used internally, this is likely a driver bug"); - } - - @Override - public void attach(@NonNull AttachmentPoint attachmentPoint) { - throw new UnsupportedOperationException( - "This implementation should only be used internally, this is likely a driver bug"); - } - - private void readObject(@SuppressWarnings("unused") ObjectInputStream s) throws IOException { - throw new UnsupportedOperationException( - "This implementation should only be used internally, this is likely a driver bug"); - } - - private void writeObject(@SuppressWarnings("unused") ObjectOutputStream s) throws IOException { - throw new UnsupportedOperationException( - "This implementation should only be used internally, this is likely a driver bug"); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/AggregateChangeEvent.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/AggregateChangeEvent.java deleted file mode 100644 index fe175a98579..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/AggregateChangeEvent.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.events; - -import com.datastax.oss.driver.api.core.metadata.schema.AggregateMetadata; -import com.datastax.oss.driver.internal.core.metadata.schema.SchemaChangeType; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -@Immutable -public class AggregateChangeEvent { - - public static AggregateChangeEvent dropped(AggregateMetadata oldAggregate) { - return new AggregateChangeEvent(SchemaChangeType.DROPPED, oldAggregate, null); - } - - public static AggregateChangeEvent created(AggregateMetadata newAggregate) { - return new AggregateChangeEvent(SchemaChangeType.CREATED, null, newAggregate); - } - - public static AggregateChangeEvent updated( - AggregateMetadata oldAggregate, AggregateMetadata newAggregate) { - return new AggregateChangeEvent(SchemaChangeType.UPDATED, oldAggregate, newAggregate); - } - - public final SchemaChangeType changeType; - /** {@code null} if the event is a creation */ - public final AggregateMetadata oldAggregate; - /** {@code null} if the event is a drop */ - public final AggregateMetadata newAggregate; - - private AggregateChangeEvent( - SchemaChangeType changeType, AggregateMetadata oldAggregate, AggregateMetadata newAggregate) { - this.changeType = changeType; - this.oldAggregate = oldAggregate; - this.newAggregate = newAggregate; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof AggregateChangeEvent) { - AggregateChangeEvent that = (AggregateChangeEvent) other; - return this.changeType == that.changeType - && Objects.equals(this.oldAggregate, that.oldAggregate) - && Objects.equals(this.newAggregate, that.newAggregate); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(changeType, oldAggregate, newAggregate); - } - - @Override - public String toString() { - switch (changeType) { - case CREATED: - return String.format("AggregateChangeEvent(CREATED %s)", newAggregate.getSignature()); - case UPDATED: - return String.format( - "AggregateChangeEvent(UPDATED %s=>%s)", - oldAggregate.getSignature(), newAggregate.getSignature()); - case DROPPED: - return String.format("AggregateChangeEvent(DROPPED %s)", oldAggregate.getSignature()); - default: - throw new IllegalStateException("Unsupported change type " + changeType); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/FunctionChangeEvent.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/FunctionChangeEvent.java deleted file mode 100644 index 4ab4f0946ec..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/FunctionChangeEvent.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.events; - -import com.datastax.oss.driver.api.core.metadata.schema.FunctionMetadata; -import com.datastax.oss.driver.internal.core.metadata.schema.SchemaChangeType; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -@Immutable -public class FunctionChangeEvent { - - public static FunctionChangeEvent dropped(FunctionMetadata oldFunction) { - return new FunctionChangeEvent(SchemaChangeType.DROPPED, oldFunction, null); - } - - public static FunctionChangeEvent created(FunctionMetadata newFunction) { - return new FunctionChangeEvent(SchemaChangeType.CREATED, null, newFunction); - } - - public static FunctionChangeEvent updated( - FunctionMetadata oldFunction, FunctionMetadata newFunction) { - return new FunctionChangeEvent(SchemaChangeType.UPDATED, oldFunction, newFunction); - } - - public final SchemaChangeType changeType; - /** {@code null} if the event is a creation */ - public final FunctionMetadata oldFunction; - /** {@code null} if the event is a drop */ - public final FunctionMetadata newFunction; - - private FunctionChangeEvent( - SchemaChangeType changeType, FunctionMetadata oldFunction, FunctionMetadata newFunction) { - this.changeType = changeType; - this.oldFunction = oldFunction; - this.newFunction = newFunction; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof FunctionChangeEvent) { - FunctionChangeEvent that = (FunctionChangeEvent) other; - return this.changeType == that.changeType - && Objects.equals(this.oldFunction, that.oldFunction) - && Objects.equals(this.newFunction, that.newFunction); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(changeType, oldFunction, newFunction); - } - - @Override - public String toString() { - switch (changeType) { - case CREATED: - return String.format("FunctionChangeEvent(CREATED %s)", newFunction.getSignature()); - case UPDATED: - return String.format( - "FunctionChangeEvent(UPDATED %s=>%s)", - oldFunction.getSignature(), newFunction.getSignature()); - case DROPPED: - return String.format("FunctionChangeEvent(DROPPED %s)", oldFunction.getSignature()); - default: - throw new IllegalStateException("Unsupported change type " + changeType); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/KeyspaceChangeEvent.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/KeyspaceChangeEvent.java deleted file mode 100644 index 0bd2a9d75af..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/KeyspaceChangeEvent.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.events; - -import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; -import com.datastax.oss.driver.internal.core.metadata.schema.SchemaChangeType; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -@Immutable -public class KeyspaceChangeEvent { - - public static KeyspaceChangeEvent dropped(KeyspaceMetadata oldKeyspace) { - return new KeyspaceChangeEvent(SchemaChangeType.DROPPED, oldKeyspace, null); - } - - public static KeyspaceChangeEvent created(KeyspaceMetadata newKeyspace) { - return new KeyspaceChangeEvent(SchemaChangeType.CREATED, null, newKeyspace); - } - - public static KeyspaceChangeEvent updated( - KeyspaceMetadata oldKeyspace, KeyspaceMetadata newKeyspace) { - return new KeyspaceChangeEvent(SchemaChangeType.UPDATED, oldKeyspace, newKeyspace); - } - - public final SchemaChangeType changeType; - /** {@code null} if the event is a creation */ - public final KeyspaceMetadata oldKeyspace; - /** {@code null} if the event is a drop */ - public final KeyspaceMetadata newKeyspace; - - private KeyspaceChangeEvent( - SchemaChangeType changeType, KeyspaceMetadata oldKeyspace, KeyspaceMetadata newKeyspace) { - this.changeType = changeType; - this.oldKeyspace = oldKeyspace; - this.newKeyspace = newKeyspace; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof KeyspaceChangeEvent) { - KeyspaceChangeEvent that = (KeyspaceChangeEvent) other; - return this.changeType == that.changeType - && Objects.equals(this.oldKeyspace, that.oldKeyspace) - && Objects.equals(this.newKeyspace, that.newKeyspace); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(changeType, oldKeyspace, newKeyspace); - } - - @Override - public String toString() { - switch (changeType) { - case CREATED: - return String.format("KeyspaceChangeEvent(CREATED %s)", newKeyspace.getName()); - case UPDATED: - return String.format( - "KeyspaceChangeEvent(UPDATED %s=>%s)", oldKeyspace.getName(), newKeyspace.getName()); - case DROPPED: - return String.format("KeyspaceChangeEvent(DROPPED %s)", oldKeyspace.getName()); - default: - throw new IllegalStateException("Unsupported change type " + changeType); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/TableChangeEvent.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/TableChangeEvent.java deleted file mode 100644 index 0902cf4e5b8..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/TableChangeEvent.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.events; - -import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; -import com.datastax.oss.driver.internal.core.metadata.schema.SchemaChangeType; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -@Immutable -public class TableChangeEvent { - - public static TableChangeEvent dropped(TableMetadata oldTable) { - return new TableChangeEvent(SchemaChangeType.DROPPED, oldTable, null); - } - - public static TableChangeEvent created(TableMetadata newTable) { - return new TableChangeEvent(SchemaChangeType.CREATED, null, newTable); - } - - public static TableChangeEvent updated(TableMetadata oldTable, TableMetadata newTable) { - return new TableChangeEvent(SchemaChangeType.UPDATED, oldTable, newTable); - } - - public final SchemaChangeType changeType; - /** {@code null} if the event is a creation */ - public final TableMetadata oldTable; - /** {@code null} if the event is a drop */ - public final TableMetadata newTable; - - private TableChangeEvent( - SchemaChangeType changeType, TableMetadata oldTable, TableMetadata newTable) { - this.changeType = changeType; - this.oldTable = oldTable; - this.newTable = newTable; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof TableChangeEvent) { - TableChangeEvent that = (TableChangeEvent) other; - return this.changeType == that.changeType - && Objects.equals(this.oldTable, that.oldTable) - && Objects.equals(this.newTable, that.newTable); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(changeType, oldTable, newTable); - } - - @Override - public String toString() { - switch (changeType) { - case CREATED: - return String.format("TableChangeEvent(CREATED %s)", newTable.getName()); - case UPDATED: - return String.format( - "TableChangeEvent(UPDATED %s=>%s)", oldTable.getName(), newTable.getName()); - case DROPPED: - return String.format("TableChangeEvent(DROPPED %s)", oldTable.getName()); - default: - throw new IllegalStateException("Unsupported change type " + changeType); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/TypeChangeEvent.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/TypeChangeEvent.java deleted file mode 100644 index f8048570ac2..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/TypeChangeEvent.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.events; - -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.core.metadata.schema.SchemaChangeType; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -@Immutable -public class TypeChangeEvent { - - public static TypeChangeEvent dropped(UserDefinedType oldType) { - return new TypeChangeEvent(SchemaChangeType.DROPPED, oldType, null); - } - - public static TypeChangeEvent created(UserDefinedType newType) { - return new TypeChangeEvent(SchemaChangeType.CREATED, null, newType); - } - - public static TypeChangeEvent updated(UserDefinedType oldType, UserDefinedType newType) { - return new TypeChangeEvent(SchemaChangeType.UPDATED, oldType, newType); - } - - public final SchemaChangeType changeType; - /** {@code null} if the event is a creation */ - public final UserDefinedType oldType; - /** {@code null} if the event is a drop */ - public final UserDefinedType newType; - - private TypeChangeEvent( - SchemaChangeType changeType, UserDefinedType oldType, UserDefinedType newType) { - this.changeType = changeType; - this.oldType = oldType; - this.newType = newType; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof TypeChangeEvent) { - TypeChangeEvent that = (TypeChangeEvent) other; - return this.changeType == that.changeType - && Objects.equals(this.oldType, that.oldType) - && Objects.equals(this.newType, that.newType); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(changeType, oldType, newType); - } - - @Override - public String toString() { - switch (changeType) { - case CREATED: - return String.format("TypeChangeEvent(CREATED %s)", newType.getName()); - case UPDATED: - return String.format( - "TypeChangeEvent(UPDATED %s=>%s)", oldType.getName(), newType.getName()); - case DROPPED: - return String.format("TypeChangeEvent(DROPPED %s)", oldType.getName()); - default: - throw new IllegalStateException("Unsupported change type " + changeType); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/ViewChangeEvent.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/ViewChangeEvent.java deleted file mode 100644 index 91e59d287f1..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/ViewChangeEvent.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.events; - -import com.datastax.oss.driver.api.core.metadata.schema.ViewMetadata; -import com.datastax.oss.driver.internal.core.metadata.schema.SchemaChangeType; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -@Immutable -public class ViewChangeEvent { - - public static ViewChangeEvent dropped(ViewMetadata oldView) { - return new ViewChangeEvent(SchemaChangeType.DROPPED, oldView, null); - } - - public static ViewChangeEvent created(ViewMetadata newView) { - return new ViewChangeEvent(SchemaChangeType.CREATED, null, newView); - } - - public static ViewChangeEvent updated(ViewMetadata oldView, ViewMetadata newView) { - return new ViewChangeEvent(SchemaChangeType.UPDATED, oldView, newView); - } - - public final SchemaChangeType changeType; - /** {@code null} if the event is a creation */ - public final ViewMetadata oldView; - /** {@code null} if the event is a drop */ - public final ViewMetadata newView; - - private ViewChangeEvent(SchemaChangeType changeType, ViewMetadata oldView, ViewMetadata newView) { - this.changeType = changeType; - this.oldView = oldView; - this.newView = newView; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof ViewChangeEvent) { - ViewChangeEvent that = (ViewChangeEvent) other; - return this.changeType == that.changeType - && Objects.equals(this.oldView, that.oldView) - && Objects.equals(this.newView, that.newView); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(changeType, oldView, newView); - } - - @Override - public String toString() { - switch (changeType) { - case CREATED: - return String.format("ViewChangeEvent(CREATED %s)", newView.getName()); - case UPDATED: - return String.format( - "ViewChangeEvent(UPDATED %s=>%s)", oldView.getName(), newView.getName()); - case DROPPED: - return String.format("ViewChangeEvent(DROPPED %s)", oldView.getName()); - default: - throw new IllegalStateException("Unsupported change type " + changeType); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/AggregateParser.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/AggregateParser.java deleted file mode 100644 index d1f8640a744..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/AggregateParser.java +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.parsing; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.AggregateMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.FunctionSignature; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.schema.DefaultAggregateMetadata; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import java.nio.ByteBuffer; -import java.util.List; -import java.util.Map; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class AggregateParser { - private final DataTypeParser dataTypeParser; - private final InternalDriverContext context; - - public AggregateParser(DataTypeParser dataTypeParser, InternalDriverContext context) { - this.dataTypeParser = dataTypeParser; - this.context = context; - } - - public AggregateMetadata parseAggregate( - AdminRow row, - CqlIdentifier keyspaceId, - Map userDefinedTypes) { - // Cassandra < 3.0: - // CREATE TABLE system.schema_aggregates ( - // keyspace_name text, - // aggregate_name text, - // signature frozen>, - // argument_types list, - // final_func text, - // initcond blob, - // return_type text, - // state_func text, - // state_type text, - // PRIMARY KEY (keyspace_name, aggregate_name, signature) - // ) WITH CLUSTERING ORDER BY (aggregate_name ASC, signature ASC) - // - // Cassandra >= 3.0: - // CREATE TABLE system.schema_aggregates ( - // keyspace_name text, - // aggregate_name text, - // argument_types frozen>, - // final_func text, - // initcond text, - // return_type text, - // state_func text, - // state_type text, - // PRIMARY KEY (keyspace_name, aggregate_name, argument_types) - // ) WITH CLUSTERING ORDER BY (aggregate_name ASC, argument_types ASC) - String simpleName = row.getString("aggregate_name"); - List argumentTypes = row.getListOfString("argument_types"); - FunctionSignature signature = - new FunctionSignature( - CqlIdentifier.fromInternal(simpleName), - dataTypeParser.parse(keyspaceId, argumentTypes, userDefinedTypes, context)); - - DataType stateType = - dataTypeParser.parse(keyspaceId, row.getString("state_type"), userDefinedTypes, context); - TypeCodec stateTypeCodec = context.getCodecRegistry().codecFor(stateType); - - String stateFuncSimpleName = row.getString("state_func"); - FunctionSignature stateFuncSignature = - new FunctionSignature( - CqlIdentifier.fromInternal(stateFuncSimpleName), - ImmutableList.builder() - .add(stateType) - .addAll(signature.getParameterTypes()) - .build()); - - String finalFuncSimpleName = row.getString("final_func"); - FunctionSignature finalFuncSignature = - (finalFuncSimpleName == null) - ? null - : new FunctionSignature(CqlIdentifier.fromInternal(finalFuncSimpleName), stateType); - - DataType returnType = - dataTypeParser.parse(keyspaceId, row.getString("return_type"), userDefinedTypes, context); - - Object initCond; - if (row.isString("initcond")) { // Cassandra 3 - String initCondString = row.getString("initcond"); - initCond = (initCondString == null) ? null : stateTypeCodec.parse(initCondString); - } else { // Cassandra 2.2 - ByteBuffer initCondBlob = row.getByteBuffer("initcond"); - initCond = - (initCondBlob == null) - ? null - : stateTypeCodec.decode(initCondBlob, context.getProtocolVersion()); - } - return new DefaultAggregateMetadata( - keyspaceId, - signature, - finalFuncSignature, - initCond, - returnType, - stateFuncSignature, - stateType, - stateTypeCodec); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/CassandraSchemaParser.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/CassandraSchemaParser.java deleted file mode 100644 index 9749a921aae..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/CassandraSchemaParser.java +++ /dev/null @@ -1,225 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.parsing; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.AggregateMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.FunctionMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.FunctionSignature; -import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.ViewMetadata; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.schema.DefaultKeyspaceMetadata; -import com.datastax.oss.driver.internal.core.metadata.schema.queries.SchemaRows; -import com.datastax.oss.driver.internal.core.metadata.schema.refresh.SchemaRefresh; -import com.datastax.oss.driver.internal.core.util.NanoTime; -import com.datastax.oss.driver.shaded.guava.common.base.MoreObjects; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import java.util.Collections; -import java.util.Map; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Default parser implementation for Cassandra. - * - *

For modularity, the code for each element row is split into separate classes (schema stuff is - * not on the hot path, so creating a few extra objects doesn't matter). - */ -@ThreadSafe -public class CassandraSchemaParser implements SchemaParser { - - private static final Logger LOG = LoggerFactory.getLogger(CassandraSchemaParser.class); - - private final SchemaRows rows; - private final UserDefinedTypeParser userDefinedTypeParser; - private final TableParser tableParser; - private final ViewParser viewParser; - private final FunctionParser functionParser; - private final AggregateParser aggregateParser; - private final String logPrefix; - private final long startTimeNs = System.nanoTime(); - - public CassandraSchemaParser(SchemaRows rows, InternalDriverContext context) { - this.rows = rows; - this.logPrefix = context.getSessionName(); - - this.userDefinedTypeParser = new UserDefinedTypeParser(rows.dataTypeParser(), context); - this.tableParser = new TableParser(rows, context); - this.viewParser = new ViewParser(rows, context); - this.functionParser = new FunctionParser(rows.dataTypeParser(), context); - this.aggregateParser = new AggregateParser(rows.dataTypeParser(), context); - } - - @Override - public SchemaRefresh parse() { - ImmutableMap.Builder keyspacesBuilder = ImmutableMap.builder(); - for (AdminRow row : rows.keyspaces()) { - KeyspaceMetadata keyspace = parseKeyspace(row); - keyspacesBuilder.put(keyspace.getName(), keyspace); - } - for (AdminRow row : rows.virtualKeyspaces()) { - KeyspaceMetadata keyspace = parseVirtualKeyspace(row); - keyspacesBuilder.put(keyspace.getName(), keyspace); - } - SchemaRefresh refresh = new SchemaRefresh(keyspacesBuilder.build()); - LOG.debug("[{}] Schema parsing took {}", logPrefix, NanoTime.formatTimeSince(startTimeNs)); - return refresh; - } - - private KeyspaceMetadata parseKeyspace(AdminRow keyspaceRow) { - - // Cassandra <= 2.2 - // CREATE TABLE system.schema_keyspaces ( - // keyspace_name text PRIMARY KEY, - // durable_writes boolean, - // strategy_class text, - // strategy_options text - // ) - // - // Cassandra >= 3.0: - // CREATE TABLE system_schema.keyspaces ( - // keyspace_name text PRIMARY KEY, - // durable_writes boolean, - // replication frozen> - // ) - CqlIdentifier keyspaceId = CqlIdentifier.fromInternal(keyspaceRow.getString("keyspace_name")); - boolean durableWrites = - MoreObjects.firstNonNull(keyspaceRow.getBoolean("durable_writes"), false); - - Map replicationOptions; - if (keyspaceRow.contains("strategy_class")) { - String strategyClass = keyspaceRow.getString("strategy_class"); - Map strategyOptions = - SimpleJsonParser.parseStringMap(keyspaceRow.getString("strategy_options")); - replicationOptions = - ImmutableMap.builder() - .putAll(strategyOptions) - .put("class", strategyClass) - .build(); - } else { - replicationOptions = keyspaceRow.getMapOfStringToString("replication"); - } - - Map types = parseTypes(keyspaceId); - - return new DefaultKeyspaceMetadata( - keyspaceId, - durableWrites, - false, - replicationOptions, - types, - parseTables(keyspaceId, types), - parseViews(keyspaceId, types), - parseFunctions(keyspaceId, types), - parseAggregates(keyspaceId, types)); - } - - private KeyspaceMetadata parseVirtualKeyspace(AdminRow keyspaceRow) { - - CqlIdentifier keyspaceId = CqlIdentifier.fromInternal(keyspaceRow.getString("keyspace_name")); - boolean durableWrites = - MoreObjects.firstNonNull(keyspaceRow.getBoolean("durable_writes"), false); - - Map replicationOptions = Collections.emptyMap(); - ; - - Map types = parseTypes(keyspaceId); - - return new DefaultKeyspaceMetadata( - keyspaceId, - durableWrites, - true, - replicationOptions, - types, - parseVirtualTables(keyspaceId, types), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap()); - } - - private Map parseTypes(CqlIdentifier keyspaceId) { - return userDefinedTypeParser.parse(rows.types().get(keyspaceId), keyspaceId); - } - - private Map parseVirtualTables( - CqlIdentifier keyspaceId, Map types) { - ImmutableMap.Builder tablesBuilder = ImmutableMap.builder(); - for (AdminRow tableRow : rows.virtualTables().get(keyspaceId)) { - TableMetadata table = tableParser.parseVirtualTable(tableRow, keyspaceId, types); - if (table != null) { - tablesBuilder.put(table.getName(), table); - } - } - return tablesBuilder.build(); - } - - private Map parseTables( - CqlIdentifier keyspaceId, Map types) { - ImmutableMap.Builder tablesBuilder = ImmutableMap.builder(); - for (AdminRow tableRow : rows.tables().get(keyspaceId)) { - TableMetadata table = tableParser.parseTable(tableRow, keyspaceId, types); - if (table != null) { - tablesBuilder.put(table.getName(), table); - } - } - return tablesBuilder.build(); - } - - private Map parseViews( - CqlIdentifier keyspaceId, Map types) { - ImmutableMap.Builder viewsBuilder = ImmutableMap.builder(); - for (AdminRow viewRow : rows.views().get(keyspaceId)) { - ViewMetadata view = viewParser.parseView(viewRow, keyspaceId, types); - if (view != null) { - viewsBuilder.put(view.getName(), view); - } - } - return viewsBuilder.build(); - } - - private Map parseFunctions( - CqlIdentifier keyspaceId, Map types) { - ImmutableMap.Builder functionsBuilder = - ImmutableMap.builder(); - for (AdminRow functionRow : rows.functions().get(keyspaceId)) { - FunctionMetadata function = functionParser.parseFunction(functionRow, keyspaceId, types); - if (function != null) { - functionsBuilder.put(function.getSignature(), function); - } - } - return functionsBuilder.build(); - } - - private Map parseAggregates( - CqlIdentifier keyspaceId, Map types) { - ImmutableMap.Builder aggregatesBuilder = - ImmutableMap.builder(); - for (AdminRow aggregateRow : rows.aggregates().get(keyspaceId)) { - AggregateMetadata aggregate = aggregateParser.parseAggregate(aggregateRow, keyspaceId, types); - if (aggregate != null) { - aggregatesBuilder.put(aggregate.getSignature(), aggregate); - } - } - return aggregatesBuilder.build(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeClassNameCompositeParser.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeClassNameCompositeParser.java deleted file mode 100644 index 1037ccda1ae..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeClassNameCompositeParser.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.parsing; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class DataTypeClassNameCompositeParser extends DataTypeClassNameParser { - - public ParseResult parseWithComposite( - String className, - CqlIdentifier keyspaceId, - Map userTypes, - InternalDriverContext context) { - Parser parser = new Parser(className, 0); - - String next = parser.parseNextName(); - if (!isComposite(next)) { - return new ParseResult(parse(keyspaceId, className, userTypes, context), isReversed(next)); - } - - List subClassNames = parser.getTypeParameters(); - int count = subClassNames.size(); - String last = subClassNames.get(count - 1); - Map collections = new HashMap<>(); - if (isCollection(last)) { - count--; - Parser collectionParser = new Parser(last, 0); - collectionParser.parseNextName(); // skips columnToCollectionType - Map params = collectionParser.getCollectionsParameters(); - for (Map.Entry entry : params.entrySet()) { - collections.put(entry.getKey(), parse(keyspaceId, entry.getValue(), userTypes, context)); - } - } - - List types = new ArrayList<>(count); - List reversed = new ArrayList<>(count); - for (int i = 0; i < count; i++) { - types.add(parse(keyspaceId, subClassNames.get(i), userTypes, context)); - reversed.add(isReversed(subClassNames.get(i))); - } - - return new ParseResult(true, types, reversed, collections); - } - - public static class ParseResult { - public final boolean isComposite; - public final List types; - public final List reversed; - public final Map collections; - - private ParseResult(DataType type, boolean reversed) { - this( - false, - Collections.singletonList(type), - Collections.singletonList(reversed), - Collections.emptyMap()); - } - - private ParseResult( - boolean isComposite, - List types, - List reversed, - Map collections) { - this.isComposite = isComposite; - this.types = types; - this.reversed = reversed; - this.collections = collections; - } - } - - private static boolean isComposite(String className) { - return className.startsWith("org.apache.cassandra.db.marshal.CompositeType"); - } - - private static boolean isCollection(String className) { - return className.startsWith("org.apache.cassandra.db.marshal.ColumnToCollectionType"); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeClassNameParser.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeClassNameParser.java deleted file mode 100644 index bf252d0bc57..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeClassNameParser.java +++ /dev/null @@ -1,409 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.parsing; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.type.DefaultTupleType; -import com.datastax.oss.driver.internal.core.type.UserDefinedTypeBuilder; -import com.datastax.oss.driver.internal.core.type.codec.ParseUtils; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Iterator; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Parses data types from schema tables, for Cassandra 2.2 and below. - * - *

In these versions, data types appear as class names, like - * "org.apache.cassandra.db.marshal.AsciiType" or - * "org.apache.cassandra.db.marshal.TupleType(org.apache.cassandra.db.marshal.Int32Type,org.apache.cassandra.db.marshal.Int32Type)". - * - *

This is modified (and simplified) from Cassandra's {@code TypeParser} class to suit our needs. - * In particular it's not very efficient, but it doesn't really matter since it's rarely used and - * never in a critical path. - */ -@ThreadSafe -public class DataTypeClassNameParser implements DataTypeParser { - - private static final Logger LOG = LoggerFactory.getLogger(DataTypeClassNameParser.class); - - @Override - public DataType parse( - CqlIdentifier keyspaceId, - String toParse, - Map userTypes, - InternalDriverContext context) { - // We take keyspaceId as a parameter because of the parent interface, but it's actually unused - // by this implementation. - return parse(toParse, userTypes, context, context.getSessionName()); - } - - /** - * Simplified parse method for external use. - * - *

This is intended for use in Cassandra's UDF implementation (the current version uses the - * similar method from driver 3). - */ - public DataType parse(String toParse, AttachmentPoint attachmentPoint) { - return parse( - toParse, - null, // No caching of user types: nested types will always be fully re-parsed - attachmentPoint, - "parser"); - } - - private DataType parse( - String toParse, - Map userTypes, - AttachmentPoint attachmentPoint, - String logPrefix) { - boolean frozen = false; - if (isReversed(toParse)) { - // Just skip the ReversedType part, we don't care - toParse = getNestedClassName(toParse); - } else if (toParse.startsWith("org.apache.cassandra.db.marshal.FrozenType")) { - frozen = true; - toParse = getNestedClassName(toParse); - } - - Parser parser = new Parser(toParse, 0); - String next = parser.parseNextName(); - - if (next.startsWith("org.apache.cassandra.db.marshal.ListType")) { - DataType elementType = - parse(parser.getTypeParameters().get(0), userTypes, attachmentPoint, logPrefix); - return DataTypes.listOf(elementType, frozen); - } - - if (next.startsWith("org.apache.cassandra.db.marshal.SetType")) { - DataType elementType = - parse(parser.getTypeParameters().get(0), userTypes, attachmentPoint, logPrefix); - return DataTypes.setOf(elementType, frozen); - } - - if (next.startsWith("org.apache.cassandra.db.marshal.MapType")) { - List parameters = parser.getTypeParameters(); - DataType keyType = parse(parameters.get(0), userTypes, attachmentPoint, logPrefix); - DataType valueType = parse(parameters.get(1), userTypes, attachmentPoint, logPrefix); - return DataTypes.mapOf(keyType, valueType, frozen); - } - - if (frozen) - LOG.warn( - "[{}] Got o.a.c.db.marshal.FrozenType for something else than a collection, " - + "this driver version might be too old for your version of Cassandra", - logPrefix); - - if (next.startsWith("org.apache.cassandra.db.marshal.UserType")) { - ++parser.idx; // skipping '(' - - CqlIdentifier keyspace = CqlIdentifier.fromInternal(parser.readOne()); - parser.skipBlankAndComma(); - String typeName = - TypeCodecs.TEXT.decode( - Bytes.fromHexString("0x" + parser.readOne()), attachmentPoint.getProtocolVersion()); - if (typeName == null) { - throw new AssertionError("Type name cannot be null, this is a server bug"); - } - CqlIdentifier typeId = CqlIdentifier.fromInternal(typeName); - Map nameAndTypeParameters = parser.getNameAndTypeParameters(); - - // Avoid re-parsing if we already have the definition - if (userTypes != null && userTypes.containsKey(typeId)) { - // copy as frozen since C* 2.x UDTs are always frozen. - return userTypes.get(typeId).copy(true); - } else { - UserDefinedTypeBuilder builder = new UserDefinedTypeBuilder(keyspace, typeId); - parser.skipBlankAndComma(); - for (Map.Entry entry : nameAndTypeParameters.entrySet()) { - CqlIdentifier fieldName = CqlIdentifier.fromInternal(entry.getKey()); - DataType fieldType = parse(entry.getValue(), userTypes, attachmentPoint, logPrefix); - builder.withField(fieldName, fieldType); - } - // Create a frozen UserType since C* 2.x UDTs are always frozen. - return builder.frozen().withAttachmentPoint(attachmentPoint).build(); - } - } - - if (next.startsWith("org.apache.cassandra.db.marshal.TupleType")) { - List rawTypes = parser.getTypeParameters(); - ImmutableList.Builder componentTypesBuilder = ImmutableList.builder(); - for (String rawType : rawTypes) { - componentTypesBuilder.add(parse(rawType, userTypes, attachmentPoint, logPrefix)); - } - return new DefaultTupleType(componentTypesBuilder.build(), attachmentPoint); - } - - if (next.startsWith("org.apache.cassandra.db.marshal.VectorType")) { - Iterator rawTypes = parser.getTypeParameters().iterator(); - DataType subtype = parse(rawTypes.next(), userTypes, attachmentPoint, logPrefix); - int dimensions = Integer.parseInt(rawTypes.next()); - return DataTypes.vectorOf(subtype, dimensions); - } - - DataType type = NATIVE_TYPES_BY_CLASS_NAME.get(next); - return type == null ? DataTypes.custom(toParse) : type; - } - - static boolean isReversed(String toParse) { - return toParse.startsWith("org.apache.cassandra.db.marshal.ReversedType"); - } - - private static String getNestedClassName(String className) { - Parser p = new Parser(className, 0); - p.parseNextName(); - List l = p.getTypeParameters(); - if (l.size() != 1) { - throw new IllegalStateException(); - } - className = l.get(0); - return className; - } - - static class Parser { - - private final String str; - private int idx; - - Parser(String str, int idx) { - this.str = str; - this.idx = idx; - } - - String parseNextName() { - skipBlank(); - return readNextIdentifier(); - } - - private String readOne() { - String name = parseNextName(); - String args = readRawArguments(); - return name + args; - } - - // Assumes we have just read a class name and read it's potential arguments - // blindly. I.e. it assume that either parsing is done or that we're on a '(' - // and this reads everything up until the corresponding closing ')'. It - // returns everything read, including the enclosing parenthesis. - private String readRawArguments() { - skipBlank(); - - if (isEOS() || str.charAt(idx) == ')' || str.charAt(idx) == ',') { - return ""; - } - - if (str.charAt(idx) != '(') { - throw new IllegalStateException( - String.format( - "Expecting char %d of %s to be '(' but '%c' found", idx, str, str.charAt(idx))); - } - - int i = idx; - int open = 1; - while (open > 0) { - ++idx; - - if (isEOS()) { - throw new IllegalStateException("Non closed parenthesis"); - } - - if (str.charAt(idx) == '(') { - open++; - } else if (str.charAt(idx) == ')') { - open--; - } - } - // we've stopped at the last closing ')' so move past that - ++idx; - return str.substring(i, idx); - } - - List getTypeParameters() { - List list = new ArrayList<>(); - - if (isEOS()) { - return list; - } - - if (str.charAt(idx) != '(') { - throw new IllegalStateException(); - } - - ++idx; // skipping '(' - - while (skipBlankAndComma()) { - if (str.charAt(idx) == ')') { - ++idx; - return list; - } - list.add(readOne()); - } - throw new IllegalArgumentException( - String.format( - "Syntax error parsing '%s' at char %d: unexpected end of string", str, idx)); - } - - Map getCollectionsParameters() { - if (isEOS()) { - return Collections.emptyMap(); - } - if (str.charAt(idx) != '(') { - throw new IllegalStateException(); - } - ++idx; // skipping '(' - return getNameAndTypeParameters(); - } - - // Must be at the start of the first parameter to read - private Map getNameAndTypeParameters() { - // The order of the hashmap matters for UDT - Map map = new LinkedHashMap<>(); - - while (skipBlankAndComma()) { - if (str.charAt(idx) == ')') { - ++idx; - return map; - } - - String bbHex = readNextIdentifier(); - String name = null; - try { - name = - TypeCodecs.TEXT.decode( - Bytes.fromHexString("0x" + bbHex), DefaultProtocolVersion.DEFAULT); - } catch (NumberFormatException e) { - throwSyntaxError(e.getMessage()); - } - - skipBlank(); - if (str.charAt(idx) != ':') { - throwSyntaxError("expecting ':' token"); - } - - ++idx; - skipBlank(); - map.put(name, readOne()); - } - throw new IllegalArgumentException( - String.format( - "Syntax error parsing '%s' at char %d: unexpected end of string", str, idx)); - } - - private void throwSyntaxError(String msg) { - throw new IllegalArgumentException( - String.format("Syntax error parsing '%s' at char %d: %s", str, idx, msg)); - } - - private boolean isEOS() { - return isEOS(str, idx); - } - - private static boolean isEOS(String str, int i) { - return i >= str.length(); - } - - private void skipBlank() { - idx = skipBlank(str, idx); - } - - private static int skipBlank(String str, int i) { - while (!isEOS(str, i) && ParseUtils.isBlank(str.charAt(i))) { - ++i; - } - return i; - } - - // skip all blank and at best one comma, return true if there not EOS - private boolean skipBlankAndComma() { - boolean commaFound = false; - while (!isEOS()) { - int c = str.charAt(idx); - if (c == ',') { - if (commaFound) { - return true; - } else { - commaFound = true; - } - } else if (!ParseUtils.isBlank(c)) { - return true; - } - ++idx; - } - return false; - } - - // left idx positioned on the character stopping the read - private String readNextIdentifier() { - int i = idx; - while (!isEOS() && ParseUtils.isCqlIdentifierChar(str.charAt(idx))) { - ++idx; - } - return str.substring(i, idx); - } - - @Override - public String toString() { - return str.substring(0, idx) - + "[" - + (idx == str.length() ? "" : str.charAt(idx)) - + "]" - + str.substring(idx + 1); - } - } - - @VisibleForTesting - static ImmutableMap NATIVE_TYPES_BY_CLASS_NAME = - new ImmutableMap.Builder() - .put("org.apache.cassandra.db.marshal.AsciiType", DataTypes.ASCII) - .put("org.apache.cassandra.db.marshal.LongType", DataTypes.BIGINT) - .put("org.apache.cassandra.db.marshal.BytesType", DataTypes.BLOB) - .put("org.apache.cassandra.db.marshal.BooleanType", DataTypes.BOOLEAN) - .put("org.apache.cassandra.db.marshal.CounterColumnType", DataTypes.COUNTER) - .put("org.apache.cassandra.db.marshal.DecimalType", DataTypes.DECIMAL) - .put("org.apache.cassandra.db.marshal.DoubleType", DataTypes.DOUBLE) - .put("org.apache.cassandra.db.marshal.FloatType", DataTypes.FLOAT) - .put("org.apache.cassandra.db.marshal.InetAddressType", DataTypes.INET) - .put("org.apache.cassandra.db.marshal.Int32Type", DataTypes.INT) - .put("org.apache.cassandra.db.marshal.UTF8Type", DataTypes.TEXT) - .put("org.apache.cassandra.db.marshal.TimestampType", DataTypes.TIMESTAMP) - .put("org.apache.cassandra.db.marshal.SimpleDateType", DataTypes.DATE) - .put("org.apache.cassandra.db.marshal.TimeType", DataTypes.TIME) - .put("org.apache.cassandra.db.marshal.UUIDType", DataTypes.UUID) - .put("org.apache.cassandra.db.marshal.IntegerType", DataTypes.VARINT) - .put("org.apache.cassandra.db.marshal.TimeUUIDType", DataTypes.TIMEUUID) - .put("org.apache.cassandra.db.marshal.ByteType", DataTypes.TINYINT) - .put("org.apache.cassandra.db.marshal.ShortType", DataTypes.SMALLINT) - .put("org.apache.cassandra.db.marshal.DurationType", DataTypes.DURATION) - .build(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeCqlNameParser.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeCqlNameParser.java deleted file mode 100644 index 8d5e068b431..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeCqlNameParser.java +++ /dev/null @@ -1,340 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.parsing; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.schema.ShallowUserDefinedType; -import com.datastax.oss.driver.internal.core.type.DefaultTupleType; -import com.datastax.oss.driver.internal.core.type.DefaultVectorType; -import com.datastax.oss.driver.internal.core.type.codec.ParseUtils; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import java.util.ArrayList; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import net.jcip.annotations.ThreadSafe; - -/** - * Parses data types from schema tables, for Cassandra 3.0 and above. - * - *

In these versions, data types appear as string literals, like "ascii" or - * "tuple<int,int>". - */ -@ThreadSafe -public class DataTypeCqlNameParser implements DataTypeParser { - - @Override - public DataType parse( - CqlIdentifier keyspaceId, - String toParse, - Map userTypes, - InternalDriverContext context) { - // Top-level is never frozen, it is only set recursively when we encounter the frozen<> keyword - return parse(toParse, keyspaceId, false, userTypes, context); - } - - private DataType parse( - String toParse, - CqlIdentifier keyspaceId, - boolean frozen, - Map userTypes, - InternalDriverContext context) { - - if (toParse.startsWith("'")) { - return DataTypes.custom(toParse.substring(1, toParse.length() - 1)); - } - - Parser parser = new Parser(toParse, 0); - String type = parser.parseTypeName(); - - if (type.equalsIgnoreCase(RawColumn.THRIFT_EMPTY_TYPE)) { - return DataTypes.custom(type); - } - - DataType nativeType = NATIVE_TYPES_BY_NAME.get(type.toLowerCase(Locale.ROOT)); - if (nativeType != null) { - return nativeType; - } - - if (parser.isEOS()) { - // No parameters => it's a UDT - CqlIdentifier name = CqlIdentifier.fromCql(type); - if (userTypes != null) { - UserDefinedType userType = userTypes.get(name); - if (userType == null) { - throw new IllegalStateException( - String.format("Can't find referenced user type %s", type)); - } - return userType.copy(frozen); - } else { - return new ShallowUserDefinedType(keyspaceId, name, frozen); - } - } - - List parameters = parser.parseTypeParameters(); - if (type.equalsIgnoreCase("list")) { - if (parameters.size() != 1) { - throw new IllegalArgumentException( - String.format("Expecting single parameter for list, got %s", parameters)); - } - DataType elementType = parse(parameters.get(0), keyspaceId, false, userTypes, context); - return DataTypes.listOf(elementType, frozen); - } - - if (type.equalsIgnoreCase("set")) { - if (parameters.size() != 1) { - throw new IllegalArgumentException( - String.format("Expecting single parameter for set, got %s", parameters)); - } - DataType elementType = parse(parameters.get(0), keyspaceId, false, userTypes, context); - return DataTypes.setOf(elementType, frozen); - } - - if (type.equalsIgnoreCase("map")) { - if (parameters.size() != 2) { - throw new IllegalArgumentException( - String.format("Expecting two parameters for map, got %s", parameters)); - } - DataType keyType = parse(parameters.get(0), keyspaceId, false, userTypes, context); - DataType valueType = parse(parameters.get(1), keyspaceId, false, userTypes, context); - return DataTypes.mapOf(keyType, valueType, frozen); - } - - if (type.equalsIgnoreCase("frozen")) { - if (parameters.size() != 1) { - throw new IllegalArgumentException( - String.format("Expecting single parameter for frozen keyword, got %s", parameters)); - } - return parse(parameters.get(0), keyspaceId, true, userTypes, context); - } - - if (type.equalsIgnoreCase("tuple")) { - if (parameters.isEmpty()) { - throw new IllegalArgumentException("Expecting at list one parameter for tuple, got none"); - } - ImmutableList.Builder componentTypesBuilder = ImmutableList.builder(); - for (String rawType : parameters) { - componentTypesBuilder.add(parse(rawType, keyspaceId, false, userTypes, context)); - } - return new DefaultTupleType(componentTypesBuilder.build(), context); - } - - if (type.equalsIgnoreCase("vector")) { - if (parameters.size() != 2) { - throw new IllegalArgumentException( - String.format("Expecting two parameters for vector custom type, got %s", parameters)); - } - DataType subType = parse(parameters.get(0), keyspaceId, false, userTypes, context); - int dimensions = Integer.parseInt(parameters.get(1)); - return new DefaultVectorType(subType, dimensions); - } - - throw new IllegalArgumentException("Could not parse type name " + toParse); - } - - private static class Parser { - - private final String str; - - private int idx; - - Parser(String str, int idx) { - this.str = str; - this.idx = idx; - } - - String parseTypeName() { - idx = ParseUtils.skipSpaces(str, idx); - return readNextIdentifier(); - } - - List parseTypeParameters() { - List list = new ArrayList<>(); - - if (isEOS()) { - return list; - } - - skipBlankAndComma(); - - if (str.charAt(idx) != '<') { - throw new IllegalStateException(); - } - - ++idx; // skipping '<' - - while (skipBlankAndComma()) { - if (str.charAt(idx) == '>') { - ++idx; - return list; - } - - String name = parseTypeName(); - String args = readRawTypeParameters(); - list.add(name + args); - } - throw new IllegalArgumentException( - String.format( - "Syntax error parsing '%s' at char %d: unexpected end of string", str, idx)); - } - - // left idx positioned on the character stopping the read - private String readNextIdentifier() { - int startIdx = idx; - if (str.charAt(startIdx) == '"') { // case-sensitive name included in double quotes - ++idx; - // read until closing quote. - while (!isEOS()) { - boolean atQuote = str.charAt(idx) == '"'; - ++idx; - if (atQuote) { - // if the next character is also a quote, this is an escaped quote, continue reading, - // otherwise stop. - if (!isEOS() && str.charAt(idx) == '"') { - ++idx; - } else { - break; - } - } - } - } else if (str.charAt(startIdx) == '\'') { // custom type name included in single quotes - ++idx; - // read until closing quote. - while (!isEOS() && str.charAt(idx++) != '\'') { - /* loop */ - } - } else { - while (!isEOS() - && (ParseUtils.isCqlIdentifierChar(str.charAt(idx)) || str.charAt(idx) == '"')) { - ++idx; - } - } - return str.substring(startIdx, idx); - } - - // Assumes we have just read a type name and read its potential arguments blindly. I.e. it - // assumes that either parsing is done or that we're on a '<' and this reads everything up until - // the corresponding closing '>'. It returns everything read, including the enclosing brackets. - private String readRawTypeParameters() { - idx = ParseUtils.skipSpaces(str, idx); - - if (isEOS() || str.charAt(idx) == '>' || str.charAt(idx) == ',') { - return ""; - } - - if (str.charAt(idx) != '<') { - throw new IllegalStateException( - String.format( - "Expecting char %d of %s to be '<' but '%c' found", idx, str, str.charAt(idx))); - } - - int i = idx; - int open = 1; - boolean inQuotes = false; - while (open > 0) { - ++idx; - - if (isEOS()) { - throw new IllegalStateException("Non closed angle brackets"); - } - - // Only parse for '<' and '>' characters if not within a quoted identifier. - // Note we don't need to handle escaped quotes ("") in type names here, because they just - // cause inQuotes to flip to false and immediately back to true - if (!inQuotes) { - if (str.charAt(idx) == '"') { - inQuotes = true; - } else if (str.charAt(idx) == '<') { - open++; - } else if (str.charAt(idx) == '>') { - open--; - } - } else if (str.charAt(idx) == '"') { - inQuotes = false; - } - } - // we've stopped at the last closing ')' so move past that - ++idx; - return str.substring(i, idx); - } - - // skip all blank and at best one comma, return true if there not EOS - private boolean skipBlankAndComma() { - boolean commaFound = false; - while (!isEOS()) { - int c = str.charAt(idx); - if (c == ',') { - if (commaFound) { - return true; - } else { - commaFound = true; - } - } else if (!ParseUtils.isBlank(c)) { - return true; - } - ++idx; - } - return false; - } - - private boolean isEOS() { - return idx >= str.length(); - } - - @Override - public String toString() { - return str.substring(0, idx) - + "[" - + (idx == str.length() ? "" : str.charAt(idx)) - + "]" - + str.substring(idx + 1); - } - } - - @VisibleForTesting - static final ImmutableMap NATIVE_TYPES_BY_NAME = - new ImmutableMap.Builder() - .put("ascii", DataTypes.ASCII) - .put("bigint", DataTypes.BIGINT) - .put("blob", DataTypes.BLOB) - .put("boolean", DataTypes.BOOLEAN) - .put("counter", DataTypes.COUNTER) - .put("decimal", DataTypes.DECIMAL) - .put("double", DataTypes.DOUBLE) - .put("float", DataTypes.FLOAT) - .put("inet", DataTypes.INET) - .put("int", DataTypes.INT) - .put("text", DataTypes.TEXT) - .put("varchar", DataTypes.TEXT) - .put("timestamp", DataTypes.TIMESTAMP) - .put("date", DataTypes.DATE) - .put("time", DataTypes.TIME) - .put("uuid", DataTypes.UUID) - .put("varint", DataTypes.VARINT) - .put("timeuuid", DataTypes.TIMEUUID) - .put("tinyint", DataTypes.TINYINT) - .put("smallint", DataTypes.SMALLINT) - .put("duration", DataTypes.DURATION) - .build(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeParser.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeParser.java deleted file mode 100644 index 0f191d08a53..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeParser.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.parsing; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.schema.ShallowUserDefinedType; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import java.util.Collections; -import java.util.List; -import java.util.Map; - -/** Parses data types from their string representation in schema tables. */ -public interface DataTypeParser { - - /** - * @param userTypes the UDTs in the current keyspace, if we know them already. This is used to - * resolve subtypes if the type to parse is complex (such as {@code list}). The only - * situation where we don't have them is when we refresh all the UDTs of a keyspace; in that - * case, the filed will be {@code null} and any UDT encountered by this method will always be - * re-created from scratch: for Cassandra < 2.2, this means parsing the whole definition; - * for > 3.0, this means materializing it as a {@link ShallowUserDefinedType} that will be - * resolved in a second pass. - */ - DataType parse( - CqlIdentifier keyspaceId, - String toParse, - Map userTypes, - InternalDriverContext context); - - default List parse( - CqlIdentifier keyspaceId, - List typeStrings, - Map userTypes, - InternalDriverContext context) { - if (typeStrings.isEmpty()) { - return Collections.emptyList(); - } else { - ImmutableList.Builder builder = ImmutableList.builder(); - for (String typeString : typeStrings) { - builder.add(parse(keyspaceId, typeString, userTypes, context)); - } - return builder.build(); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DefaultSchemaParserFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DefaultSchemaParserFactory.java deleted file mode 100644 index 5fa64027be5..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DefaultSchemaParserFactory.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.parsing; - -import com.datastax.dse.driver.api.core.metadata.DseNodeProperties; -import com.datastax.dse.driver.internal.core.metadata.schema.parsing.DseSchemaParser; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.schema.queries.SchemaRows; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class DefaultSchemaParserFactory implements SchemaParserFactory { - - private final InternalDriverContext context; - - public DefaultSchemaParserFactory(InternalDriverContext context) { - this.context = context; - } - - @Override - public SchemaParser newInstance(SchemaRows rows) { - boolean isDse = rows.getNode().getExtras().containsKey(DseNodeProperties.DSE_VERSION); - return isDse ? new DseSchemaParser(rows, context) : new CassandraSchemaParser(rows, context); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/FunctionParser.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/FunctionParser.java deleted file mode 100644 index 54786e999ac..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/FunctionParser.java +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.parsing; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.FunctionMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.FunctionSignature; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.schema.DefaultFunctionMetadata; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import java.util.List; -import java.util.Map; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -public class FunctionParser { - - private static final Logger LOG = LoggerFactory.getLogger(FunctionParser.class); - - private final DataTypeParser dataTypeParser; - private final InternalDriverContext context; - private final String logPrefix; - - public FunctionParser(DataTypeParser dataTypeParser, InternalDriverContext context) { - this.dataTypeParser = dataTypeParser; - this.context = context; - this.logPrefix = context.getSessionName(); - } - - public FunctionMetadata parseFunction( - AdminRow row, - CqlIdentifier keyspaceId, - Map userDefinedTypes) { - // Cassandra < 3.0: - // CREATE TABLE system.schema_functions ( - // keyspace_name text, - // function_name text, - // signature frozen>, - // argument_names list, - // argument_types list, - // body text, - // called_on_null_input boolean, - // language text, - // return_type text, - // PRIMARY KEY (keyspace_name, function_name, signature) - // ) WITH CLUSTERING ORDER BY (function_name ASC, signature ASC) - // - // Cassandra >= 3.0: - // CREATE TABLE system_schema.functions ( - // keyspace_name text, - // function_name text, - // argument_names frozen>, - // argument_types frozen>, - // body text, - // called_on_null_input boolean, - // language text, - // return_type text, - // PRIMARY KEY (keyspace_name, function_name, argument_types) - // ) WITH CLUSTERING ORDER BY (function_name ASC, argument_types ASC) - String simpleName = row.getString("function_name"); - List argumentNames = - ImmutableList.copyOf( - Lists.transform(row.getListOfString("argument_names"), CqlIdentifier::fromInternal)); - List argumentTypes = row.getListOfString("argument_types"); - if (argumentNames.size() != argumentTypes.size()) { - LOG.warn( - "[{}] Error parsing system row for function {}.{}, " - + "number of argument names and types don't match (got {} and {}).", - logPrefix, - keyspaceId.asInternal(), - simpleName, - argumentNames.size(), - argumentTypes.size()); - return null; - } - FunctionSignature signature = - new FunctionSignature( - CqlIdentifier.fromInternal(simpleName), - dataTypeParser.parse(keyspaceId, argumentTypes, userDefinedTypes, context)); - String body = row.getString("body"); - Boolean calledOnNullInput = row.getBoolean("called_on_null_input"); - String language = row.getString("language"); - DataType returnType = - dataTypeParser.parse(keyspaceId, row.getString("return_type"), userDefinedTypes, context); - - return new DefaultFunctionMetadata( - keyspaceId, signature, argumentNames, body, calledOnNullInput, language, returnType); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/RawColumn.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/RawColumn.java deleted file mode 100644 index 331f4841f79..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/RawColumn.java +++ /dev/null @@ -1,215 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.parsing; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import com.datastax.oss.driver.shaded.guava.common.primitives.Ints; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.ListIterator; -import java.util.Map; -import net.jcip.annotations.NotThreadSafe; - -/** - * An intermediary format to manipulate columns before we turn them into {@link ColumnMetadata} - * instances. - */ -@NotThreadSafe -public class RawColumn implements Comparable { - - public static final String KIND_PARTITION_KEY = "partition_key"; - public static final String KIND_CLUSTERING_COLUMN = "clustering"; - public static final String KIND_REGULAR = "regular"; - public static final String KIND_COMPACT_VALUE = "compact_value"; - public static final String KIND_STATIC = "static"; - - /** - * Upon migration from thrift to CQL, Cassandra internally creates a surrogate column "value" of - * type {@code EmptyType} for dense tables. This resolves into this CQL type name. - * - *

This column shouldn't be exposed to the user but is currently exposed in system tables. - */ - public static final String THRIFT_EMPTY_TYPE = "empty"; - - public final CqlIdentifier name; - public String kind; - public final int position; - public final String dataType; - public final boolean reversed; - public final String indexName; - public final String indexType; - public final Map indexOptions; - - private RawColumn(AdminRow row) { - // Cassandra < 3.0: - // CREATE TABLE system.schema_columns ( - // keyspace_name text, - // columnfamily_name text, - // column_name text, - // component_index int, - // index_name text, - // index_options text, - // index_type text, - // type text, - // validator text, - // PRIMARY KEY (keyspace_name, columnfamily_name, column_name) - // ) WITH CLUSTERING ORDER BY (columnfamily_name ASC, column_name ASC) - // - // Cassandra >= 3.0: - // CREATE TABLE system_schema.columns ( - // keyspace_name text, - // table_name text, - // column_name text, - // clustering_order text, - // column_name_bytes blob, - // kind text, - // position int, - // type text, - // PRIMARY KEY (keyspace_name, table_name, column_name) - // ) WITH CLUSTERING ORDER BY (table_name ASC, column_name ASC) - this.name = CqlIdentifier.fromInternal(row.getString("column_name")); - if (row.contains("kind")) { - this.kind = row.getString("kind"); - } else { - this.kind = row.getString("type"); - // remap clustering_key to KIND_CLUSTERING_COLUMN so code doesn't have to check for both. - if (this.kind.equals("clustering_key")) { - this.kind = KIND_CLUSTERING_COLUMN; - } - } - - Integer rawPosition = - row.contains("position") ? row.getInteger("position") : row.getInteger("component_index"); - this.position = (rawPosition == null || rawPosition == -1) ? 0 : rawPosition; - - this.dataType = row.contains("validator") ? row.getString("validator") : row.getString("type"); - this.reversed = - row.contains("clustering_order") - ? "desc".equals(row.getString("clustering_order")) - : DataTypeClassNameParser.isReversed(dataType); - this.indexName = row.getString("index_name"); - this.indexType = row.getString("index_type"); - // index_options can apparently contain the string 'null' (JAVA-834) - String indexOptionsString = row.getString("index_options"); - this.indexOptions = - (indexOptionsString == null || indexOptionsString.equals("null")) - ? Collections.emptyMap() - : SimpleJsonParser.parseStringMap(indexOptionsString); - } - - @Override - public int compareTo(@NonNull RawColumn that) { - // First, order by kind. Then order partition key and clustering columns by position. For - // other kinds, order by column name. - if (!this.kind.equals(that.kind)) { - return Ints.compare(rank(this.kind), rank(that.kind)); - } else if (kind.equals(KIND_PARTITION_KEY) || kind.equals(KIND_CLUSTERING_COLUMN)) { - return Integer.compare(this.position, that.position); - } else { - return this.name.asInternal().compareTo(that.name.asInternal()); - } - } - - private static int rank(String kind) { - switch (kind) { - case KIND_PARTITION_KEY: - return 1; - case KIND_CLUSTERING_COLUMN: - return 2; - case KIND_REGULAR: - return 3; - case KIND_COMPACT_VALUE: - return 4; - case KIND_STATIC: - return 5; - default: - return Integer.MAX_VALUE; - } - } - - @SuppressWarnings("MixedMutabilityReturnType") - public static List toRawColumns(Collection rows) { - if (rows.isEmpty()) { - return Collections.emptyList(); - } else { - // Use a mutable list, we might remove some elements later - List result = Lists.newArrayListWithExpectedSize(rows.size()); - for (AdminRow row : rows) { - result.add(new RawColumn(row)); - } - return result; - } - } - - /** - * Helper method to filter columns while parsing a table's metadata. - * - *

Upon migration from thrift to CQL, Cassandra internally creates a pair of surrogate - * clustering/regular columns for compact static tables. These columns shouldn't be exposed to the - * user but are currently returned by C*. We also need to remove the static keyword for all other - * columns in the table. - */ - public static void pruneStaticCompactTableColumns(List columns) { - ListIterator iterator = columns.listIterator(); - while (iterator.hasNext()) { - RawColumn column = iterator.next(); - switch (column.kind) { - case KIND_CLUSTERING_COLUMN: - case KIND_REGULAR: - iterator.remove(); - break; - case KIND_STATIC: - column.kind = KIND_REGULAR; - break; - default: - // nothing to do - } - } - } - - /** Helper method to filter columns while parsing a table's metadata. */ - public static void pruneDenseTableColumnsV3(List columns) { - ListIterator iterator = columns.listIterator(); - while (iterator.hasNext()) { - RawColumn column = iterator.next(); - if (column.kind.equals(KIND_REGULAR) && THRIFT_EMPTY_TYPE.equals(column.dataType)) { - iterator.remove(); - } - } - } - - /** - * Helper method to filter columns while parsing a table's metadata. - * - *

This is similar to {@link #pruneDenseTableColumnsV3(List)}, but for legacy C* versions. - */ - public static void pruneDenseTableColumnsV2(List columns) { - ListIterator iterator = columns.listIterator(); - while (iterator.hasNext()) { - RawColumn column = iterator.next(); - if (column.kind.equals(KIND_COMPACT_VALUE) && column.name.asInternal().isEmpty()) { - iterator.remove(); - } - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/RelationParser.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/RelationParser.java deleted file mode 100644 index 86c914459d7..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/RelationParser.java +++ /dev/null @@ -1,170 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.parsing; - -import com.datastax.dse.driver.api.core.metadata.DseNodeProperties; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.schema.Describable; -import com.datastax.oss.driver.api.core.metadata.schema.RelationMetadata; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.schema.ScriptBuilder; -import com.datastax.oss.driver.internal.core.metadata.schema.queries.SchemaRows; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import java.nio.ByteBuffer; -import java.util.Map; -import net.jcip.annotations.ThreadSafe; - -// Shared code for table and view parsing -@ThreadSafe -public abstract class RelationParser { - - protected final SchemaRows rows; - protected final InternalDriverContext context; - protected final String logPrefix; - - protected RelationParser(SchemaRows rows, InternalDriverContext context) { - this.rows = rows; - this.context = context; - this.logPrefix = context.getSessionName(); - } - - protected Map parseOptions(AdminRow row) { - ImmutableMap.Builder builder = ImmutableMap.builder(); - for (Map.Entry> entry : OPTION_CODECS.entrySet()) { - String name = entry.getKey(); - CqlIdentifier id = CqlIdentifier.fromInternal(name); - TypeCodec codec = entry.getValue(); - - if (name.equals("caching") && row.isString("caching")) { - // C* <=2.2, caching is stored as a string, and also appears as a string in the WITH clause. - builder.put(id, row.getString(name)); - } else if (name.equals("compaction_strategy_class")) { - // C* <=2.2, compaction options split in two columns - String strategyClass = row.getString(name); - if (strategyClass != null) { - builder.put( - CqlIdentifier.fromInternal("compaction"), - ImmutableMap.builder() - .put("class", strategyClass) - .putAll( - SimpleJsonParser.parseStringMap(row.getString("compaction_strategy_options"))) - .build()); - } - } else if (name.equals("compression_parameters")) { - // C* <=2.2, compression stored as a string - String compressionParameters = row.getString(name); - if (compressionParameters != null) { - builder.put( - CqlIdentifier.fromInternal("compression"), - ImmutableMap.copyOf(SimpleJsonParser.parseStringMap(row.getString(name)))); - } - } else if (!isDeprecatedInCassandra4(name)) { - // Default case, read the value in a generic fashion - Object value = row.get(name, codec); - if (value != null) { - builder.put(id, value); - } - } - } - return builder.build(); - } - - /** - * Handle a few oddities in Cassandra 4: some options still appear in system_schema.tables, but - * they are not valid in CREATE statements anymore. We need to exclude them from our metadata, - * otherwise {@link Describable#describe(boolean)} will generate invalid CQL. - */ - private boolean isDeprecatedInCassandra4(String name) { - return isCassandra4OrAbove() - && (name.equals("read_repair_chance") - || name.equals("dclocal_read_repair_chance") - // default_time_to_live is not allowed in CREATE MATERIALIZED VIEW statements - || (name.equals("default_time_to_live") && (this instanceof ViewParser))); - } - - private boolean isCassandra4OrAbove() { - Node node = rows.getNode(); - return !node.getExtras().containsKey(DseNodeProperties.DSE_VERSION) - && node.getCassandraVersion() != null - && node.getCassandraVersion().nextStable().compareTo(Version.V4_0_0) >= 0; - } - - public static void appendOptions(Map options, ScriptBuilder builder) { - for (Map.Entry entry : options.entrySet()) { - CqlIdentifier name = entry.getKey(); - Object value = entry.getValue(); - String formattedValue; - if (name.asInternal().equals("caching") && value instanceof String) { - formattedValue = TypeCodecs.TEXT.format((String) value); - } else { - @SuppressWarnings("unchecked") - TypeCodec codec = - (TypeCodec) RelationParser.OPTION_CODECS.get(name.asInternal()); - formattedValue = codec.format(value); - } - String optionName = name.asCql(true); - if ("local_read_repair_chance".equals(optionName)) { - // Another small quirk in C* <= 2.2 - optionName = "dclocal_read_repair_chance"; - } - builder.andWith().append(optionName).append(" = ").append(formattedValue); - } - } - - public static final TypeCodec> MAP_OF_TEXT_TO_TEXT = - TypeCodecs.mapOf(TypeCodecs.TEXT, TypeCodecs.TEXT); - private static final TypeCodec> MAP_OF_TEXT_TO_BLOB = - TypeCodecs.mapOf(TypeCodecs.TEXT, TypeCodecs.BLOB); - /** - * The columns of the system table that are turned into entries in {@link - * RelationMetadata#getOptions()}. - */ - public static final ImmutableMap> OPTION_CODECS = - ImmutableMap.>builder() - .put("additional_write_policy", TypeCodecs.TEXT) - .put("bloom_filter_fp_chance", TypeCodecs.DOUBLE) - // In C* <= 2.2, this is a string, not a map (this is special-cased in parseOptions): - .put("caching", MAP_OF_TEXT_TO_TEXT) - .put("cdc", TypeCodecs.BOOLEAN) - .put("comment", TypeCodecs.TEXT) - .put("compaction", MAP_OF_TEXT_TO_TEXT) - // In C*<=2.2, must read from this column and another one called - // 'compaction_strategy_options' (this is special-cased in parseOptions): - .put("compaction_strategy_class", TypeCodecs.TEXT) - .put("compression", MAP_OF_TEXT_TO_TEXT) - // In C*<=2.2, must parse this column into a map (this is special-cased in parseOptions): - .put("compression_parameters", TypeCodecs.TEXT) - .put("crc_check_chance", TypeCodecs.DOUBLE) - .put("dclocal_read_repair_chance", TypeCodecs.DOUBLE) - .put("default_time_to_live", TypeCodecs.INT) - .put("extensions", MAP_OF_TEXT_TO_BLOB) - .put("gc_grace_seconds", TypeCodecs.INT) - .put("local_read_repair_chance", TypeCodecs.DOUBLE) - .put("max_index_interval", TypeCodecs.INT) - .put("memtable_flush_period_in_ms", TypeCodecs.INT) - .put("min_index_interval", TypeCodecs.INT) - .put("read_repair", TypeCodecs.TEXT) - .put("read_repair_chance", TypeCodecs.DOUBLE) - .put("speculative_retry", TypeCodecs.TEXT) - .build(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/SchemaParser.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/SchemaParser.java deleted file mode 100644 index 109ebea45c1..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/SchemaParser.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.parsing; - -import com.datastax.oss.driver.internal.core.metadata.schema.queries.SchemaRows; -import com.datastax.oss.driver.internal.core.metadata.schema.refresh.SchemaRefresh; - -/** - * The main entry point for system schema rows parsing. - * - *

Implementations must be thread-safe. - */ -public interface SchemaParser { - - /** - * Process the rows that this parser was initialized with, and creates a refresh that will be - * applied to the metadata. - * - * @see SchemaParserFactory#newInstance(SchemaRows) - */ - SchemaRefresh parse(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/SchemaParserFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/SchemaParserFactory.java deleted file mode 100644 index 93db1472e4d..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/SchemaParserFactory.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.parsing; - -import com.datastax.oss.driver.internal.core.metadata.schema.queries.SchemaRows; - -public interface SchemaParserFactory { - SchemaParser newInstance(SchemaRows rows); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/SimpleJsonParser.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/SimpleJsonParser.java deleted file mode 100644 index e979a8fd822..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/SimpleJsonParser.java +++ /dev/null @@ -1,185 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.parsing; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import net.jcip.annotations.NotThreadSafe; - -/** - * A very simple json parser. The only reason we need to read json in the driver is because for - * historical reason Cassandra encodes a few properties using json in the schema and we need to - * decode them. - * - *

We however don't need a full-blown JSON library because: 1) we know we only need to decode - * string lists and string maps 2) we can basically assume the input is valid, we don't particularly - * have to bother about decoding exactly JSON as long as we at least decode what we need. 3) we - * don't really care much about performance, none of this is done in performance sensitive parts. - * - *

So instead of pulling a new dependency, we roll out our own very dumb parser. We should - * obviously not expose this publicly. - */ -@NotThreadSafe -public class SimpleJsonParser { - - private final String input; - private int idx; - - private SimpleJsonParser(String input) { - this.input = input; - } - - @SuppressWarnings("MixedMutabilityReturnType") - public static List parseStringList(String input) { - if (input == null || input.isEmpty()) { - return Collections.emptyList(); - } - - List output = new ArrayList<>(); - SimpleJsonParser parser = new SimpleJsonParser(input); - if (parser.nextCharSkipSpaces() != '[') { - throw new IllegalArgumentException("Not a JSON list: " + input); - } - - char c = parser.nextCharSkipSpaces(); - if (c == ']') { - return output; - } - - while (true) { - assert c == '"'; - output.add(parser.nextString()); - c = parser.nextCharSkipSpaces(); - if (c == ']') { - return output; - } - assert c == ','; - c = parser.nextCharSkipSpaces(); - } - } - - @SuppressWarnings("MixedMutabilityReturnType") - public static Map parseStringMap(String input) { - if (input == null || input.isEmpty()) { - return Collections.emptyMap(); - } - - Map output = new HashMap<>(); - SimpleJsonParser parser = new SimpleJsonParser(input); - if (parser.nextCharSkipSpaces() != '{') { - throw new IllegalArgumentException("Not a JSON map: " + input); - } - - char c = parser.nextCharSkipSpaces(); - if (c == '}') { - return output; - } - - while (true) { - assert c == '"'; - String key = parser.nextString(); - c = parser.nextCharSkipSpaces(); - assert c == ':'; - c = parser.nextCharSkipSpaces(); - assert c == '"'; - String value = parser.nextString(); - output.put(key, value); - c = parser.nextCharSkipSpaces(); - if (c == '}') { - return output; - } - assert c == ','; - c = parser.nextCharSkipSpaces(); - } - } - - /** Read the next char, the one at position idx, and advance ix. */ - private char nextChar() { - if (idx >= input.length()) { - throw new IllegalArgumentException("Invalid json input: " + input); - } - return input.charAt(idx++); - } - - /** Same as nextChar, except that it skips space characters (' ', '\t' and '\n'). */ - private char nextCharSkipSpaces() { - char c = nextChar(); - while (c == ' ' || c == '\t' || c == '\n') { - c = nextChar(); - } - return c; - } - - /** - * Reads a String, assuming idx is on the first character of the string (i.e. the one after the - * opening double-quote character). After the string has been read, idx will be on the first - * character after the closing double-quote. - */ - private String nextString() { - assert input.charAt(idx - 1) == '"' : "Char is '" + input.charAt(idx - 1) + '\''; - StringBuilder sb = new StringBuilder(); - while (true) { - char c = nextChar(); - switch (c) { - case '\n': - case '\r': - throw new IllegalArgumentException("Unterminated string"); - case '\\': - c = nextChar(); - switch (c) { - case 'b': - sb.append('\b'); - break; - case 't': - sb.append('\t'); - break; - case 'n': - sb.append('\n'); - break; - case 'f': - sb.append('\f'); - break; - case 'r': - sb.append('\r'); - break; - case 'u': - sb.append((char) Integer.parseInt(input.substring(idx, idx + 4), 16)); - idx += 4; - break; - case '"': - case '\'': - case '\\': - case '/': - sb.append(c); - break; - default: - throw new IllegalArgumentException("Illegal escape"); - } - break; - default: - if (c == '"') { - return sb.toString(); - } - sb.append(c); - } - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/TableParser.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/TableParser.java deleted file mode 100644 index a3bda428ef3..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/TableParser.java +++ /dev/null @@ -1,334 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.parsing; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.ClusteringOrder; -import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.IndexKind; -import com.datastax.oss.driver.api.core.metadata.schema.IndexMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.ListType; -import com.datastax.oss.driver.api.core.type.MapType; -import com.datastax.oss.driver.api.core.type.SetType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.schema.DefaultColumnMetadata; -import com.datastax.oss.driver.internal.core.metadata.schema.DefaultIndexMetadata; -import com.datastax.oss.driver.internal.core.metadata.schema.DefaultTableMetadata; -import com.datastax.oss.driver.internal.core.metadata.schema.queries.SchemaRows; -import com.datastax.oss.driver.internal.core.util.Loggers; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMultimap; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -public class TableParser extends RelationParser { - - private static final Logger LOG = LoggerFactory.getLogger(TableParser.class); - - public TableParser(SchemaRows rows, InternalDriverContext context) { - super(rows, context); - } - - public TableMetadata parseTable( - AdminRow tableRow, CqlIdentifier keyspaceId, Map userTypes) { - // Cassandra <= 2.2: - // CREATE TABLE system.schema_columnfamilies ( - // keyspace_name text, - // columnfamily_name text, - // bloom_filter_fp_chance double, - // caching text, - // cf_id uuid, - // column_aliases text, (2.1 only) - // comment text, - // compaction_strategy_class text, - // compaction_strategy_options text, - // comparator text, - // compression_parameters text, - // default_time_to_live int, - // default_validator text, - // dropped_columns map, - // gc_grace_seconds int, - // index_interval int, - // is_dense boolean, (2.1 only) - // key_aliases text, (2.1 only) - // key_validator text, - // local_read_repair_chance double, - // max_compaction_threshold int, - // max_index_interval int, - // memtable_flush_period_in_ms int, - // min_compaction_threshold int, - // min_index_interval int, - // read_repair_chance double, - // speculative_retry text, - // subcomparator text, - // type text, - // value_alias text, (2.1 only) - // PRIMARY KEY (keyspace_name, columnfamily_name) - // ) WITH CLUSTERING ORDER BY (columnfamily_name ASC) - // - // Cassandra 3.0: - // CREATE TABLE system_schema.tables ( - // keyspace_name text, - // table_name text, - // bloom_filter_fp_chance double, - // caching frozen>, - // cdc boolean, - // comment text, - // compaction frozen>, - // compression frozen>, - // crc_check_chance double, - // dclocal_read_repair_chance double, - // default_time_to_live int, - // extensions frozen>, - // flags frozen>, - // gc_grace_seconds int, - // id uuid, - // max_index_interval int, - // memtable_flush_period_in_ms int, - // min_index_interval int, - // read_repair_chance double, - // speculative_retry text, - // PRIMARY KEY (keyspace_name, table_name) - // ) WITH CLUSTERING ORDER BY (table_name ASC) - CqlIdentifier tableId = - CqlIdentifier.fromInternal( - tableRow.getString( - tableRow.contains("table_name") ? "table_name" : "columnfamily_name")); - - UUID uuid = tableRow.contains("id") ? tableRow.getUuid("id") : tableRow.getUuid("cf_id"); - - List rawColumns = - RawColumn.toRawColumns( - rows.columns().getOrDefault(keyspaceId, ImmutableMultimap.of()).get(tableId)); - if (rawColumns.isEmpty()) { - LOG.warn( - "[{}] Processing TABLE refresh for {}.{} but found no matching rows, skipping", - logPrefix, - keyspaceId, - tableId); - return null; - } - - boolean isCompactStorage; - if (tableRow.contains("flags")) { - Set flags = tableRow.getSetOfString("flags"); - boolean isDense = flags.contains("dense"); - boolean isSuper = flags.contains("super"); - boolean isCompound = flags.contains("compound"); - isCompactStorage = isSuper || isDense || !isCompound; - boolean isStaticCompact = !isSuper && !isDense && !isCompound; - if (isStaticCompact) { - RawColumn.pruneStaticCompactTableColumns(rawColumns); - } else if (isDense) { - RawColumn.pruneDenseTableColumnsV3(rawColumns); - } - } else { - boolean isDense = tableRow.getBoolean("is_dense"); - if (isDense) { - RawColumn.pruneDenseTableColumnsV2(rawColumns); - } - DataTypeClassNameCompositeParser.ParseResult comparator = - new DataTypeClassNameCompositeParser() - .parseWithComposite(tableRow.getString("comparator"), keyspaceId, userTypes, context); - isCompactStorage = isDense || !comparator.isComposite; - } - - Collections.sort(rawColumns); - ImmutableMap.Builder allColumnsBuilder = ImmutableMap.builder(); - ImmutableList.Builder partitionKeyBuilder = ImmutableList.builder(); - ImmutableMap.Builder clusteringColumnsBuilder = - ImmutableMap.builder(); - ImmutableMap.Builder indexesBuilder = ImmutableMap.builder(); - - for (RawColumn raw : rawColumns) { - DataType dataType = rows.dataTypeParser().parse(keyspaceId, raw.dataType, userTypes, context); - ColumnMetadata column = - new DefaultColumnMetadata( - keyspaceId, tableId, raw.name, dataType, raw.kind.equals(RawColumn.KIND_STATIC)); - switch (raw.kind) { - case RawColumn.KIND_PARTITION_KEY: - partitionKeyBuilder.add(column); - break; - case RawColumn.KIND_CLUSTERING_COLUMN: - clusteringColumnsBuilder.put( - column, raw.reversed ? ClusteringOrder.DESC : ClusteringOrder.ASC); - break; - default: - // nothing to do - } - allColumnsBuilder.put(column.getName(), column); - - IndexMetadata index = buildLegacyIndex(raw, column); - if (index != null) { - indexesBuilder.put(index.getName(), index); - } - } - - Map options; - try { - options = parseOptions(tableRow); - } catch (Exception e) { - // Options change the most often, so be especially lenient if anything goes wrong. - Loggers.warnWithException( - LOG, - "[{}] Error while parsing options for {}.{}, getOptions() will be empty", - logPrefix, - keyspaceId, - tableId, - e); - options = Collections.emptyMap(); - } - - Collection indexRows = - rows.indexes().getOrDefault(keyspaceId, ImmutableMultimap.of()).get(tableId); - for (AdminRow indexRow : indexRows) { - IndexMetadata index = buildModernIndex(keyspaceId, tableId, indexRow); - indexesBuilder.put(index.getName(), index); - } - - return new DefaultTableMetadata( - keyspaceId, - tableId, - uuid, - isCompactStorage, - false, - partitionKeyBuilder.build(), - clusteringColumnsBuilder.build(), - allColumnsBuilder.build(), - options, - indexesBuilder.build()); - } - - TableMetadata parseVirtualTable( - AdminRow tableRow, CqlIdentifier keyspaceId, Map userTypes) { - - CqlIdentifier tableId = CqlIdentifier.fromInternal(tableRow.getString("table_name")); - - List rawColumns = - RawColumn.toRawColumns( - rows.virtualColumns().getOrDefault(keyspaceId, ImmutableMultimap.of()).get(tableId)); - if (rawColumns.isEmpty()) { - LOG.warn( - "[{}] Processing TABLE refresh for {}.{} but found no matching rows, skipping", - logPrefix, - keyspaceId, - tableId); - return null; - } - - Collections.sort(rawColumns); - ImmutableMap.Builder allColumnsBuilder = ImmutableMap.builder(); - ImmutableList.Builder partitionKeyBuilder = ImmutableList.builder(); - ImmutableMap.Builder clusteringColumnsBuilder = - ImmutableMap.builder(); - - for (RawColumn raw : rawColumns) { - DataType dataType = rows.dataTypeParser().parse(keyspaceId, raw.dataType, userTypes, context); - ColumnMetadata column = - new DefaultColumnMetadata( - keyspaceId, tableId, raw.name, dataType, raw.kind.equals(RawColumn.KIND_STATIC)); - switch (raw.kind) { - case RawColumn.KIND_PARTITION_KEY: - partitionKeyBuilder.add(column); - break; - case RawColumn.KIND_CLUSTERING_COLUMN: - clusteringColumnsBuilder.put( - column, raw.reversed ? ClusteringOrder.DESC : ClusteringOrder.ASC); - break; - default: - } - - allColumnsBuilder.put(column.getName(), column); - } - - return new DefaultTableMetadata( - keyspaceId, - tableId, - null, - false, - true, - partitionKeyBuilder.build(), - clusteringColumnsBuilder.build(), - allColumnsBuilder.build(), - Collections.emptyMap(), - Collections.emptyMap()); - } - - // In C*<=2.2, index information is stored alongside the column. - private IndexMetadata buildLegacyIndex(RawColumn raw, ColumnMetadata column) { - if (raw.indexName == null) { - return null; - } - return new DefaultIndexMetadata( - column.getKeyspace(), - column.getParent(), - CqlIdentifier.fromInternal(raw.indexName), - IndexKind.valueOf(raw.indexType), - buildLegacyIndexTarget(column, raw.indexOptions), - raw.indexOptions); - } - - private static String buildLegacyIndexTarget(ColumnMetadata column, Map options) { - String columnName = column.getName().asCql(true); - DataType columnType = column.getType(); - if (options.containsKey("index_keys")) { - return String.format("keys(%s)", columnName); - } - if (options.containsKey("index_keys_and_values")) { - return String.format("entries(%s)", columnName); - } - if ((columnType instanceof ListType && ((ListType) columnType).isFrozen()) - || (columnType instanceof SetType && ((SetType) columnType).isFrozen()) - || (columnType instanceof MapType && ((MapType) columnType).isFrozen())) { - return String.format("full(%s)", columnName); - } - // Note: the keyword 'values' is not accepted as a valid index target function until 3.0 - return columnName; - } - - // In C*>=3.0, index information is stored in a dedicated table: - // CREATE TABLE system_schema.indexes ( - // keyspace_name text, - // table_name text, - // index_name text, - // kind text, - // options frozen>, - // PRIMARY KEY (keyspace_name, table_name, index_name) - // ) WITH CLUSTERING ORDER BY (table_name ASC, index_name ASC) - private IndexMetadata buildModernIndex( - CqlIdentifier keyspaceId, CqlIdentifier tableId, AdminRow row) { - CqlIdentifier name = CqlIdentifier.fromInternal(row.getString("index_name")); - IndexKind kind = IndexKind.valueOf(row.getString("kind")); - Map options = row.getMapOfStringToString("options"); - String target = options.get("target"); - return new DefaultIndexMetadata(keyspaceId, tableId, name, kind, target, options); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/UserDefinedTypeParser.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/UserDefinedTypeParser.java deleted file mode 100644 index 442f46ee432..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/UserDefinedTypeParser.java +++ /dev/null @@ -1,164 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.parsing; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.ListType; -import com.datastax.oss.driver.api.core.type.MapType; -import com.datastax.oss.driver.api.core.type.SetType; -import com.datastax.oss.driver.api.core.type.TupleType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.type.DefaultUserDefinedType; -import com.datastax.oss.driver.internal.core.util.DirectedGraph; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class UserDefinedTypeParser { - private final DataTypeParser dataTypeParser; - private final InternalDriverContext context; - - public UserDefinedTypeParser(DataTypeParser dataTypeParser, InternalDriverContext context) { - this.dataTypeParser = dataTypeParser; - this.context = context; - } - - /** - * Contrary to other element parsers, this one processes all the types of a keyspace in one go. - * UDTs can depend on each other, but the system table returns them in alphabetical order. In - * order to properly build the definitions, we need to do a topological sort of the rows first, so - * that each type is parsed after its dependencies. - */ - public Map parse( - Collection typeRows, CqlIdentifier keyspaceId) { - if (typeRows.isEmpty()) { - return Collections.emptyMap(); - } else { - Map types = new LinkedHashMap<>(); - for (AdminRow row : topologicalSort(typeRows, keyspaceId)) { - UserDefinedType type = parseType(row, keyspaceId, types); - types.put(type.getName(), type); - } - return ImmutableMap.copyOf(types); - } - } - - @VisibleForTesting - Map parse(CqlIdentifier keyspaceId, AdminRow... typeRows) { - return parse(Arrays.asList(typeRows), keyspaceId); - } - - private List topologicalSort(Collection typeRows, CqlIdentifier keyspaceId) { - if (typeRows.size() == 1) { - AdminRow row = typeRows.iterator().next(); - return Collections.singletonList(row); - } else { - DirectedGraph graph = new DirectedGraph<>(typeRows); - for (AdminRow dependent : typeRows) { - for (AdminRow dependency : typeRows) { - if (dependent != dependency && dependsOn(dependent, dependency, keyspaceId)) { - // Edges mean "is depended upon by"; we want the types with no dependencies to come - // first in the sort. - graph.addEdge(dependency, dependent); - } - } - } - return graph.topologicalSort(); - } - } - - private boolean dependsOn(AdminRow dependent, AdminRow dependency, CqlIdentifier keyspaceId) { - CqlIdentifier dependencyId = CqlIdentifier.fromInternal(dependency.getString("type_name")); - for (String fieldTypeName : dependent.getListOfString("field_types")) { - DataType fieldType = dataTypeParser.parse(keyspaceId, fieldTypeName, null, context); - if (references(fieldType, dependencyId)) { - return true; - } - } - return false; - } - - private boolean references(DataType dependent, CqlIdentifier dependency) { - if (dependent instanceof UserDefinedType) { - UserDefinedType userType = (UserDefinedType) dependent; - return userType.getName().equals(dependency); - } else if (dependent instanceof ListType) { - ListType listType = (ListType) dependent; - return references(listType.getElementType(), dependency); - } else if (dependent instanceof SetType) { - SetType setType = (SetType) dependent; - return references(setType.getElementType(), dependency); - } else if (dependent instanceof MapType) { - MapType mapType = (MapType) dependent; - return references(mapType.getKeyType(), dependency) - || references(mapType.getValueType(), dependency); - } else if (dependent instanceof TupleType) { - TupleType tupleType = (TupleType) dependent; - for (DataType componentType : tupleType.getComponentTypes()) { - if (references(componentType, dependency)) { - return true; - } - } - } - return false; - } - - private UserDefinedType parseType( - AdminRow row, - CqlIdentifier keyspaceId, - Map userDefinedTypes) { - // Cassandra < 3.0: - // CREATE TABLE system.schema_usertypes ( - // keyspace_name text, - // type_name text, - // field_names list, - // field_types list, - // PRIMARY KEY (keyspace_name, type_name) - // ) WITH CLUSTERING ORDER BY (type_name ASC) - // - // Cassandra >= 3.0: - // CREATE TABLE system_schema.types ( - // keyspace_name text, - // type_name text, - // field_names frozen>, - // field_types frozen>, - // PRIMARY KEY (keyspace_name, type_name) - // ) WITH CLUSTERING ORDER BY (type_name ASC) - CqlIdentifier name = CqlIdentifier.fromInternal(row.getString("type_name")); - List fieldNames = - ImmutableList.copyOf( - Lists.transform(row.getListOfString("field_names"), CqlIdentifier::fromInternal)); - List fieldTypes = - dataTypeParser.parse( - keyspaceId, row.getListOfString("field_types"), userDefinedTypes, context); - - return new DefaultUserDefinedType(keyspaceId, name, false, fieldNames, fieldTypes, context); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/ViewParser.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/ViewParser.java deleted file mode 100644 index 52773ea1c45..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/ViewParser.java +++ /dev/null @@ -1,154 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.parsing; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.ClusteringOrder; -import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.ViewMetadata; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.schema.DefaultColumnMetadata; -import com.datastax.oss.driver.internal.core.metadata.schema.DefaultViewMetadata; -import com.datastax.oss.driver.internal.core.metadata.schema.queries.SchemaRows; -import com.datastax.oss.driver.internal.core.util.Loggers; -import com.datastax.oss.driver.shaded.guava.common.base.MoreObjects; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMultimap; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -public class ViewParser extends RelationParser { - - private static final Logger LOG = LoggerFactory.getLogger(ViewParser.class); - - public ViewParser(SchemaRows rows, InternalDriverContext context) { - super(rows, context); - } - - public ViewMetadata parseView( - AdminRow viewRow, CqlIdentifier keyspaceId, Map userTypes) { - // Cassandra 3.0 (no views in earlier versions): - // CREATE TABLE system_schema.views ( - // keyspace_name text, - // view_name text, - // base_table_id uuid, - // base_table_name text, - // bloom_filter_fp_chance double, - // caching frozen>, - // cdc boolean, - // comment text, - // compaction frozen>, - // compression frozen>, - // crc_check_chance double, - // dclocal_read_repair_chance double, - // default_time_to_live int, - // extensions frozen>, - // gc_grace_seconds int, - // id uuid, - // include_all_columns boolean, - // max_index_interval int, - // memtable_flush_period_in_ms int, - // min_index_interval int, - // read_repair_chance double, - // speculative_retry text, - // where_clause text, - // PRIMARY KEY (keyspace_name, view_name) - // ) WITH CLUSTERING ORDER BY (view_name ASC) - CqlIdentifier viewId = CqlIdentifier.fromInternal(viewRow.getString("view_name")); - - UUID uuid = viewRow.getUuid("id"); - CqlIdentifier baseTableId = CqlIdentifier.fromInternal(viewRow.getString("base_table_name")); - boolean includesAllColumns = - MoreObjects.firstNonNull(viewRow.getBoolean("include_all_columns"), false); - String whereClause = viewRow.getString("where_clause"); - - List rawColumns = - RawColumn.toRawColumns( - rows.columns().getOrDefault(keyspaceId, ImmutableMultimap.of()).get(viewId)); - if (rawColumns.isEmpty()) { - LOG.warn( - "[{}] Processing VIEW refresh for {}.{} but found no matching rows, skipping", - logPrefix, - keyspaceId, - viewId); - return null; - } - - Collections.sort(rawColumns); - ImmutableMap.Builder allColumnsBuilder = ImmutableMap.builder(); - ImmutableList.Builder partitionKeyBuilder = ImmutableList.builder(); - ImmutableMap.Builder clusteringColumnsBuilder = - ImmutableMap.builder(); - - for (RawColumn raw : rawColumns) { - DataType dataType = rows.dataTypeParser().parse(keyspaceId, raw.dataType, userTypes, context); - ColumnMetadata column = - new DefaultColumnMetadata( - keyspaceId, viewId, raw.name, dataType, raw.kind.equals(RawColumn.KIND_STATIC)); - switch (raw.kind) { - case RawColumn.KIND_PARTITION_KEY: - partitionKeyBuilder.add(column); - break; - case RawColumn.KIND_CLUSTERING_COLUMN: - clusteringColumnsBuilder.put( - column, raw.reversed ? ClusteringOrder.DESC : ClusteringOrder.ASC); - break; - default: - // nothing to do - } - allColumnsBuilder.put(column.getName(), column); - } - - Map options; - try { - options = parseOptions(viewRow); - } catch (Exception e) { - // Options change the most often, so be especially lenient if anything goes wrong. - Loggers.warnWithException( - LOG, - "[{}] Error while parsing options for {}.{}, getOptions() will be empty", - logPrefix, - keyspaceId, - viewId, - e); - options = Collections.emptyMap(); - } - - return new DefaultViewMetadata( - keyspaceId, - viewId, - baseTableId, - includesAllColumns, - whereClause, - uuid, - partitionKeyBuilder.build(), - clusteringColumnsBuilder.build(), - allColumnsBuilder.build(), - options); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra21SchemaQueries.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra21SchemaQueries.java deleted file mode 100644 index 7577fd1bb92..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra21SchemaQueries.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.queries; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import java.util.Optional; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class Cassandra21SchemaQueries extends CassandraSchemaQueries { - public Cassandra21SchemaQueries( - DriverChannel channel, Node node, DriverExecutionProfile config, String logPrefix) { - super(channel, node, config, logPrefix); - } - - @Override - protected String selectKeyspacesQuery() { - return "SELECT * FROM system.schema_keyspaces"; - } - - @Override - protected String selectTablesQuery() { - return "SELECT * FROM system.schema_columnfamilies"; - } - - @Override - protected Optional selectViewsQuery() { - return Optional.empty(); - } - - @Override - protected Optional selectIndexesQuery() { - return Optional.empty(); - } - - @Override - protected String selectColumnsQuery() { - return "SELECT * FROM system.schema_columns"; - } - - @Override - protected String selectTypesQuery() { - return "SELECT * FROM system.schema_usertypes"; - } - - @Override - protected Optional selectFunctionsQuery() { - return Optional.empty(); - } - - @Override - protected Optional selectAggregatesQuery() { - return Optional.empty(); - } - - @Override - protected Optional selectVirtualKeyspacesQuery() { - return Optional.empty(); - } - - @Override - protected Optional selectVirtualTablesQuery() { - return Optional.empty(); - } - - @Override - protected Optional selectVirtualColumnsQuery() { - return Optional.empty(); - } - - @Override - protected Optional selectEdgesQuery() { - return Optional.empty(); - } - - @Override - protected Optional selectVerticiesQuery() { - return Optional.empty(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra22SchemaQueries.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra22SchemaQueries.java deleted file mode 100644 index ff09917b3c7..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra22SchemaQueries.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.queries; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import java.util.Optional; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class Cassandra22SchemaQueries extends CassandraSchemaQueries { - public Cassandra22SchemaQueries( - DriverChannel channel, Node node, DriverExecutionProfile config, String logPrefix) { - super(channel, node, config, logPrefix); - } - - @Override - protected String selectKeyspacesQuery() { - return "SELECT * FROM system.schema_keyspaces"; - } - - @Override - protected String selectTablesQuery() { - return "SELECT * FROM system.schema_columnfamilies"; - } - - @Override - protected Optional selectViewsQuery() { - return Optional.empty(); - } - - @Override - protected Optional selectIndexesQuery() { - return Optional.empty(); - } - - @Override - protected String selectColumnsQuery() { - return "SELECT * FROM system.schema_columns"; - } - - @Override - protected String selectTypesQuery() { - return "SELECT * FROM system.schema_usertypes"; - } - - @Override - protected Optional selectFunctionsQuery() { - return Optional.of("SELECT * FROM system.schema_functions"); - } - - @Override - protected Optional selectAggregatesQuery() { - return Optional.of("SELECT * FROM system.schema_aggregates"); - } - - @Override - protected Optional selectVirtualKeyspacesQuery() { - return Optional.empty(); - } - - @Override - protected Optional selectVirtualTablesQuery() { - return Optional.empty(); - } - - @Override - protected Optional selectVirtualColumnsQuery() { - return Optional.empty(); - } - - @Override - protected Optional selectEdgesQuery() { - return Optional.empty(); - } - - @Override - protected Optional selectVerticiesQuery() { - return Optional.empty(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra3SchemaQueries.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra3SchemaQueries.java deleted file mode 100644 index 8c36d0f4217..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra3SchemaQueries.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.queries; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import java.util.Optional; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class Cassandra3SchemaQueries extends CassandraSchemaQueries { - public Cassandra3SchemaQueries( - DriverChannel channel, Node node, DriverExecutionProfile config, String logPrefix) { - super(channel, node, config, logPrefix); - } - - @Override - protected String selectKeyspacesQuery() { - return "SELECT * FROM system_schema.keyspaces"; - } - - @Override - protected String selectTablesQuery() { - return "SELECT * FROM system_schema.tables"; - } - - @Override - protected Optional selectViewsQuery() { - return Optional.of("SELECT * FROM system_schema.views"); - } - - @Override - protected Optional selectIndexesQuery() { - return Optional.of("SELECT * FROM system_schema.indexes"); - } - - @Override - protected String selectColumnsQuery() { - return "SELECT * FROM system_schema.columns"; - } - - @Override - protected String selectTypesQuery() { - return "SELECT * FROM system_schema.types"; - } - - @Override - protected Optional selectFunctionsQuery() { - return Optional.of("SELECT * FROM system_schema.functions"); - } - - @Override - protected Optional selectAggregatesQuery() { - return Optional.of("SELECT * FROM system_schema.aggregates"); - } - - @Override - protected Optional selectVirtualKeyspacesQuery() { - return Optional.empty(); - } - - @Override - protected Optional selectVirtualTablesQuery() { - return Optional.empty(); - } - - @Override - protected Optional selectVirtualColumnsQuery() { - return Optional.empty(); - } - - @Override - protected Optional selectEdgesQuery() { - return Optional.empty(); - } - - @Override - protected Optional selectVerticiesQuery() { - return Optional.empty(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra4SchemaQueries.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra4SchemaQueries.java deleted file mode 100644 index e2de0b419ed..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra4SchemaQueries.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.queries; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import java.util.Optional; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class Cassandra4SchemaQueries extends Cassandra3SchemaQueries { - public Cassandra4SchemaQueries( - DriverChannel channel, Node node, DriverExecutionProfile config, String logPrefix) { - super(channel, node, config, logPrefix); - } - - @Override - protected Optional selectVirtualKeyspacesQuery() { - return Optional.of("SELECT * FROM system_virtual_schema.keyspaces"); - } - - @Override - protected Optional selectVirtualTablesQuery() { - return Optional.of("SELECT * FROM system_virtual_schema.tables"); - } - - @Override - protected Optional selectVirtualColumnsQuery() { - return Optional.of("SELECT * FROM system_virtual_schema.columns"); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/CassandraSchemaQueries.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/CassandraSchemaQueries.java deleted file mode 100644 index 92ab2501c12..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/CassandraSchemaQueries.java +++ /dev/null @@ -1,187 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.queries; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRequestHandler; -import com.datastax.oss.driver.internal.core.adminrequest.AdminResult; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.util.NanoTime; -import com.datastax.oss.driver.internal.core.util.concurrent.RunOrSchedule; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import io.netty.util.concurrent.EventExecutor; -import java.time.Duration; -import java.util.Collections; -import java.util.List; -import java.util.Optional; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.function.Function; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -public abstract class CassandraSchemaQueries implements SchemaQueries { - - private static final Logger LOG = LoggerFactory.getLogger(CassandraSchemaQueries.class); - - private final DriverChannel channel; - private final EventExecutor adminExecutor; - private final Node node; - private final String logPrefix; - private final Duration timeout; - private final int pageSize; - private final KeyspaceFilter keyspaceFilter; - // The future we return from execute, completes when all the queries are done. - private final CompletableFuture schemaRowsFuture = new CompletableFuture<>(); - private final long startTimeNs = System.nanoTime(); - - // All non-final fields are accessed exclusively on adminExecutor - private CassandraSchemaRows.Builder schemaRowsBuilder; - private int pendingQueries; - - protected CassandraSchemaQueries( - DriverChannel channel, Node node, DriverExecutionProfile config, String logPrefix) { - this.channel = channel; - this.adminExecutor = channel.eventLoop(); - this.node = node; - this.logPrefix = logPrefix; - this.timeout = config.getDuration(DefaultDriverOption.METADATA_SCHEMA_REQUEST_TIMEOUT); - this.pageSize = config.getInt(DefaultDriverOption.METADATA_SCHEMA_REQUEST_PAGE_SIZE); - - List refreshedKeyspaces = - config.getStringList( - DefaultDriverOption.METADATA_SCHEMA_REFRESHED_KEYSPACES, Collections.emptyList()); - assert refreshedKeyspaces != null; // per the default value - this.keyspaceFilter = KeyspaceFilter.newInstance(logPrefix, refreshedKeyspaces); - } - - protected abstract String selectKeyspacesQuery(); - - protected abstract Optional selectVirtualKeyspacesQuery(); - - protected abstract String selectTablesQuery(); - - protected abstract Optional selectVirtualTablesQuery(); - - protected abstract Optional selectViewsQuery(); - - protected abstract Optional selectIndexesQuery(); - - protected abstract String selectColumnsQuery(); - - protected abstract Optional selectVirtualColumnsQuery(); - - protected abstract String selectTypesQuery(); - - protected abstract Optional selectFunctionsQuery(); - - protected abstract Optional selectAggregatesQuery(); - - protected abstract Optional selectEdgesQuery(); - - protected abstract Optional selectVerticiesQuery(); - - @Override - public CompletionStage execute() { - RunOrSchedule.on(adminExecutor, this::executeOnAdminExecutor); - return schemaRowsFuture; - } - - private void executeOnAdminExecutor() { - assert adminExecutor.inEventLoop(); - - schemaRowsBuilder = new CassandraSchemaRows.Builder(node, keyspaceFilter, logPrefix); - String whereClause = keyspaceFilter.getWhereClause(); - - query(selectKeyspacesQuery() + whereClause, schemaRowsBuilder::withKeyspaces); - query(selectTypesQuery() + whereClause, schemaRowsBuilder::withTypes); - query(selectTablesQuery() + whereClause, schemaRowsBuilder::withTables); - query(selectColumnsQuery() + whereClause, schemaRowsBuilder::withColumns); - selectIndexesQuery() - .ifPresent(select -> query(select + whereClause, schemaRowsBuilder::withIndexes)); - selectViewsQuery() - .ifPresent(select -> query(select + whereClause, schemaRowsBuilder::withViews)); - selectFunctionsQuery() - .ifPresent(select -> query(select + whereClause, schemaRowsBuilder::withFunctions)); - selectAggregatesQuery() - .ifPresent(select -> query(select + whereClause, schemaRowsBuilder::withAggregates)); - selectVirtualKeyspacesQuery() - .ifPresent(select -> query(select + whereClause, schemaRowsBuilder::withVirtualKeyspaces)); - selectVirtualTablesQuery() - .ifPresent(select -> query(select + whereClause, schemaRowsBuilder::withVirtualTables)); - selectVirtualColumnsQuery() - .ifPresent(select -> query(select + whereClause, schemaRowsBuilder::withVirtualColumns)); - selectEdgesQuery() - .ifPresent(select -> query(select + whereClause, schemaRowsBuilder::withEdges)); - selectVerticiesQuery() - .ifPresent(select -> query(select + whereClause, schemaRowsBuilder::withVertices)); - } - - private void query( - String queryString, - Function, CassandraSchemaRows.Builder> builderUpdater) { - assert adminExecutor.inEventLoop(); - - pendingQueries += 1; - query(queryString) - .whenCompleteAsync( - (result, error) -> handleResult(result, error, builderUpdater), adminExecutor); - } - - @VisibleForTesting - protected CompletionStage query(String query) { - return AdminRequestHandler.query(channel, query, timeout, pageSize, logPrefix).start(); - } - - private void handleResult( - AdminResult result, - Throwable error, - Function, CassandraSchemaRows.Builder> builderUpdater) { - - // If another query already failed, we've already propagated the failure so just ignore this one - if (schemaRowsFuture.isCompletedExceptionally()) { - return; - } - - if (error != null) { - schemaRowsFuture.completeExceptionally(error); - } else { - // Store the rows of the current page in the builder - schemaRowsBuilder = builderUpdater.apply(result); - if (result.hasNextPage()) { - result - .nextPage() - .whenCompleteAsync( - (nextResult, nextError) -> handleResult(nextResult, nextError, builderUpdater), - adminExecutor); - } else { - pendingQueries -= 1; - if (pendingQueries == 0) { - LOG.debug( - "[{}] Schema queries took {}", logPrefix, NanoTime.formatTimeSince(startTimeNs)); - schemaRowsFuture.complete(schemaRowsBuilder.build()); - } - } - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/CassandraSchemaRows.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/CassandraSchemaRows.java deleted file mode 100644 index 95af0739300..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/CassandraSchemaRows.java +++ /dev/null @@ -1,393 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.queries; - -import com.datastax.dse.driver.api.core.metadata.DseNodeProperties; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.DataTypeClassNameParser; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.DataTypeCqlNameParser; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.DataTypeParser; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableListMultimap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMultimap; -import com.datastax.oss.driver.shaded.guava.common.collect.Multimap; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import net.jcip.annotations.Immutable; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@Immutable -public class CassandraSchemaRows implements SchemaRows { - - private final Node node; - private final DataTypeParser dataTypeParser; - private final List keyspaces; - private final List virtualKeyspaces; - private final Multimap tables; - private final Multimap virtualTables; - private final Multimap views; - private final Multimap types; - private final Multimap functions; - private final Multimap aggregates; - private final Map> columns; - private final Map> virtualColumns; - private final Map> indexes; - private final Map> vertices; - private final Map> edges; - - private CassandraSchemaRows( - Node node, - DataTypeParser dataTypeParser, - List keyspaces, - List virtualKeyspaces, - Multimap tables, - Multimap virtualTables, - Multimap views, - Map> columns, - Map> virtualColumns, - Map> indexes, - Multimap types, - Multimap functions, - Multimap aggregates, - Map> vertices, - Map> edges) { - this.node = node; - this.dataTypeParser = dataTypeParser; - this.keyspaces = keyspaces; - this.virtualKeyspaces = virtualKeyspaces; - this.tables = tables; - this.virtualTables = virtualTables; - this.views = views; - this.columns = columns; - this.virtualColumns = virtualColumns; - this.indexes = indexes; - this.types = types; - this.functions = functions; - this.aggregates = aggregates; - this.vertices = vertices; - this.edges = edges; - } - - @NonNull - @Override - public Node getNode() { - return node; - } - - @Override - public DataTypeParser dataTypeParser() { - return dataTypeParser; - } - - @Override - public List keyspaces() { - return keyspaces; - } - - @Override - public List virtualKeyspaces() { - return virtualKeyspaces; - } - - @Override - public Multimap tables() { - return tables; - } - - @Override - public Multimap virtualTables() { - return virtualTables; - } - - @Override - public Multimap views() { - return views; - } - - @Override - public Multimap types() { - return types; - } - - @Override - public Multimap functions() { - return functions; - } - - @Override - public Multimap aggregates() { - return aggregates; - } - - @Override - public Map> columns() { - return columns; - } - - @Override - public Map> virtualColumns() { - return virtualColumns; - } - - @Override - public Map> indexes() { - return indexes; - } - - @Override - public Map> vertices() { - return vertices; - } - - @Override - public Map> edges() { - return edges; - } - - public static class Builder { - private static final Logger LOG = LoggerFactory.getLogger(Builder.class); - - private final Node node; - private final DataTypeParser dataTypeParser; - private final String tableNameColumn; - private final KeyspaceFilter keyspaceFilter; - private final String logPrefix; - private final ImmutableList.Builder keyspacesBuilder = ImmutableList.builder(); - private final ImmutableList.Builder virtualKeyspacesBuilder = ImmutableList.builder(); - private final ImmutableMultimap.Builder tablesBuilder = - ImmutableListMultimap.builder(); - private final ImmutableMultimap.Builder virtualTablesBuilder = - ImmutableListMultimap.builder(); - private final ImmutableMultimap.Builder viewsBuilder = - ImmutableListMultimap.builder(); - private final ImmutableMultimap.Builder typesBuilder = - ImmutableListMultimap.builder(); - private final ImmutableMultimap.Builder functionsBuilder = - ImmutableListMultimap.builder(); - private final ImmutableMultimap.Builder aggregatesBuilder = - ImmutableListMultimap.builder(); - private final Map> - columnsBuilders = new LinkedHashMap<>(); - private final Map> - virtualColumnsBuilders = new LinkedHashMap<>(); - private final Map> - indexesBuilders = new LinkedHashMap<>(); - private final Map> - verticesBuilders = new LinkedHashMap<>(); - private final Map> - edgesBuilders = new LinkedHashMap<>(); - - public Builder(Node node, KeyspaceFilter keyspaceFilter, String logPrefix) { - this.node = node; - this.keyspaceFilter = keyspaceFilter; - this.logPrefix = logPrefix; - if (isCassandraV3OrAbove(node)) { - this.tableNameColumn = "table_name"; - this.dataTypeParser = new DataTypeCqlNameParser(); - } else { - this.tableNameColumn = "columnfamily_name"; - this.dataTypeParser = new DataTypeClassNameParser(); - } - } - - private static boolean isCassandraV3OrAbove(Node node) { - // We already did those checks in DefaultSchemaQueriesFactory. - // We could pass along booleans (isCassandraV3, isDse...), but passing the whole Node is - // better for maintainability, in case we need to do more checks in downstream components in - // the future. - Version dseVersion = (Version) node.getExtras().get(DseNodeProperties.DSE_VERSION); - if (dseVersion != null) { - dseVersion = dseVersion.nextStable(); - return dseVersion.compareTo(Version.V5_0_0) >= 0; - } else { - Version cassandraVersion = node.getCassandraVersion(); - if (cassandraVersion == null) { - cassandraVersion = Version.V3_0_0; - } else { - cassandraVersion = cassandraVersion.nextStable(); - } - return cassandraVersion.compareTo(Version.V3_0_0) >= 0; - } - } - - public Builder withKeyspaces(Iterable rows) { - for (AdminRow row : rows) { - put(keyspacesBuilder, row); - } - return this; - } - - public Builder withVirtualKeyspaces(Iterable rows) { - for (AdminRow row : rows) { - put(virtualKeyspacesBuilder, row); - } - return this; - } - - public Builder withTables(Iterable rows) { - for (AdminRow row : rows) { - putByKeyspace(row, tablesBuilder); - } - return this; - } - - public Builder withVirtualTables(Iterable rows) { - for (AdminRow row : rows) { - putByKeyspace(row, virtualTablesBuilder); - } - return this; - } - - public Builder withViews(Iterable rows) { - for (AdminRow row : rows) { - putByKeyspace(row, viewsBuilder); - } - return this; - } - - public Builder withTypes(Iterable rows) { - for (AdminRow row : rows) { - putByKeyspace(row, typesBuilder); - } - return this; - } - - public Builder withFunctions(Iterable rows) { - for (AdminRow row : rows) { - putByKeyspace(row, functionsBuilder); - } - return this; - } - - public Builder withAggregates(Iterable rows) { - for (AdminRow row : rows) { - putByKeyspace(row, aggregatesBuilder); - } - return this; - } - - public Builder withColumns(Iterable rows) { - for (AdminRow row : rows) { - putByKeyspaceAndTable(row, columnsBuilders); - } - return this; - } - - public Builder withVirtualColumns(Iterable rows) { - for (AdminRow row : rows) { - putByKeyspaceAndTable(row, virtualColumnsBuilders); - } - return this; - } - - public Builder withIndexes(Iterable rows) { - for (AdminRow row : rows) { - putByKeyspaceAndTable(row, indexesBuilders); - } - return this; - } - - public Builder withVertices(Iterable rows) { - for (AdminRow row : rows) { - putByKeyspaceAndTable(row, verticesBuilders); - } - return this; - } - - public Builder withEdges(Iterable rows) { - for (AdminRow row : rows) { - putByKeyspaceAndTable(row, edgesBuilders); - } - return this; - } - - private void put(ImmutableList.Builder builder, AdminRow row) { - String keyspace = row.getString("keyspace_name"); - if (keyspace == null) { - LOG.warn("[{}] Skipping system row with missing keyspace name", logPrefix); - } else if (keyspaceFilter.includes(keyspace)) { - builder.add(row); - } - } - - private void putByKeyspace( - AdminRow row, ImmutableMultimap.Builder builder) { - String keyspace = row.getString("keyspace_name"); - if (keyspace == null) { - LOG.warn("[{}] Skipping system row with missing keyspace name", logPrefix); - } else if (keyspaceFilter.includes(keyspace)) { - builder.put(CqlIdentifier.fromInternal(keyspace), row); - } - } - - private void putByKeyspaceAndTable( - AdminRow row, - Map> builders) { - String keyspace = row.getString("keyspace_name"); - String table = row.getString(tableNameColumn); - if (keyspace == null) { - LOG.warn("[{}] Skipping system row with missing keyspace name", logPrefix); - } else if (table == null) { - LOG.warn("[{}] Skipping system row with missing table name", logPrefix); - } else if (keyspaceFilter.includes(keyspace)) { - ImmutableMultimap.Builder builder = - builders.computeIfAbsent( - CqlIdentifier.fromInternal(keyspace), s -> ImmutableListMultimap.builder()); - builder.put(CqlIdentifier.fromInternal(table), row); - } - } - - public CassandraSchemaRows build() { - return new CassandraSchemaRows( - node, - dataTypeParser, - keyspacesBuilder.build(), - virtualKeyspacesBuilder.build(), - tablesBuilder.build(), - virtualTablesBuilder.build(), - viewsBuilder.build(), - build(columnsBuilders), - build(virtualColumnsBuilders), - build(indexesBuilders), - typesBuilder.build(), - functionsBuilder.build(), - aggregatesBuilder.build(), - build(verticesBuilders), - build(edgesBuilders)); - } - - private static Map> build( - Map> builders) { - ImmutableMap.Builder> builder = ImmutableMap.builder(); - builders - .entrySet() - .forEach( - (entry) -> { - builder.put(entry.getKey(), entry.getValue().build()); - }); - return builder.build(); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/DefaultSchemaQueriesFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/DefaultSchemaQueriesFactory.java deleted file mode 100644 index e537475ed7b..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/DefaultSchemaQueriesFactory.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.queries; - -import com.datastax.dse.driver.api.core.metadata.DseNodeProperties; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -public class DefaultSchemaQueriesFactory implements SchemaQueriesFactory { - - private static final Logger LOG = LoggerFactory.getLogger(DefaultSchemaQueriesFactory.class); - - protected final InternalDriverContext context; - protected final String logPrefix; - - public DefaultSchemaQueriesFactory(InternalDriverContext context) { - this.context = context; - this.logPrefix = context.getSessionName(); - } - - @Override - public SchemaQueries newInstance() { - DriverChannel channel = context.getControlConnection().channel(); - if (channel == null || channel.closeFuture().isDone()) { - throw new IllegalStateException("Control channel not available, aborting schema refresh"); - } - Node node = - context - .getMetadataManager() - .getMetadata() - .findNode(channel.getEndPoint()) - .orElseThrow( - () -> - new IllegalStateException( - "Could not find control node metadata " - + channel.getEndPoint() - + ", aborting schema refresh")); - return newInstance(node, channel); - } - - protected SchemaQueries newInstance(Node node, DriverChannel channel) { - - DriverExecutionProfile config = context.getConfig().getDefaultProfile(); - - Version dseVersion = (Version) node.getExtras().get(DseNodeProperties.DSE_VERSION); - if (dseVersion != null) { - dseVersion = dseVersion.nextStable(); - - LOG.debug( - "[{}] Sending schema queries to {} with DSE version {}", logPrefix, node, dseVersion); - // 4.8 is the oldest version supported, which uses C* 2.1 schema - if (dseVersion.compareTo(Version.V5_0_0) < 0) { - return new Cassandra21SchemaQueries(channel, node, config, logPrefix); - } else if (dseVersion.compareTo(Version.V6_7_0) < 0) { - // 5.0 - 6.7 uses C* 3.0 schema - return new Cassandra3SchemaQueries(channel, node, config, logPrefix); - } else if (dseVersion.compareTo(Version.V6_8_0) < 0) { - // 6.7 uses C* 4.0 schema - return new Cassandra4SchemaQueries(channel, node, config, logPrefix); - } else { - // 6.8+ uses DSE 6.8 schema (C* 4.0 schema with graph metadata) (JAVA-1898) - return new Dse68SchemaQueries(channel, node, config, logPrefix); - } - } else { - Version cassandraVersion = node.getCassandraVersion(); - if (cassandraVersion == null) { - LOG.warn( - "[{}] Cassandra version missing for {}, defaulting to {}", - logPrefix, - node, - Version.V3_0_0); - cassandraVersion = Version.V3_0_0; - } else { - cassandraVersion = cassandraVersion.nextStable(); - } - LOG.debug( - "[{}] Sending schema queries to {} with version {}", logPrefix, node, cassandraVersion); - if (cassandraVersion.compareTo(Version.V2_2_0) < 0) { - return new Cassandra21SchemaQueries(channel, node, config, logPrefix); - } else if (cassandraVersion.compareTo(Version.V3_0_0) < 0) { - return new Cassandra22SchemaQueries(channel, node, config, logPrefix); - } else if (cassandraVersion.compareTo(Version.V4_0_0) < 0) { - return new Cassandra3SchemaQueries(channel, node, config, logPrefix); - } else { - return new Cassandra4SchemaQueries(channel, node, config, logPrefix); - } - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Dse68SchemaQueries.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Dse68SchemaQueries.java deleted file mode 100644 index 460df8b59e5..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Dse68SchemaQueries.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.queries; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import java.util.Optional; - -/** - * The system table queries to refresh the schema in DSE 6.8. - * - *

There are two additional tables for per-table graph metadata. - */ -public class Dse68SchemaQueries extends Cassandra4SchemaQueries { - - public Dse68SchemaQueries( - DriverChannel channel, Node node, DriverExecutionProfile config, String logPrefix) { - super(channel, node, config, logPrefix); - } - - @Override - protected Optional selectEdgesQuery() { - return Optional.of("SELECT * FROM system_schema.edges"); - } - - @Override - protected Optional selectVerticiesQuery() { - return Optional.of("SELECT * FROM system_schema.vertices"); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/KeyspaceFilter.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/KeyspaceFilter.java deleted file mode 100644 index a483a904f6e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/KeyspaceFilter.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.queries; - -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.List; - -/** - * Filters keyspaces during schema metadata queries. - * - *

Depending on the circumstances, we do it either on the server side with a WHERE IN clause that - * will be appended to every query, or on the client side with a predicate that will be applied to - * every fetched row. - */ -public interface KeyspaceFilter { - - static KeyspaceFilter newInstance(@NonNull String logPrefix, @NonNull List specs) { - if (specs.isEmpty()) { - return INCLUDE_ALL; - } else { - return new RuleBasedKeyspaceFilter(logPrefix, specs); - } - } - - /** The WHERE IN clause, or an empty string if there is no server-side filtering. */ - @NonNull - String getWhereClause(); - - /** The predicate that will be invoked for client-side filtering. */ - boolean includes(@NonNull String keyspace); - - KeyspaceFilter INCLUDE_ALL = - new KeyspaceFilter() { - @NonNull - @Override - public String getWhereClause() { - return ""; - } - - @Override - public boolean includes(@NonNull String keyspace) { - return true; - } - }; -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/RuleBasedKeyspaceFilter.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/RuleBasedKeyspaceFilter.java deleted file mode 100644 index 38a8c116c45..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/RuleBasedKeyspaceFilter.java +++ /dev/null @@ -1,203 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.queries; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.ArrayList; -import java.util.HashSet; -import java.util.List; -import java.util.Optional; -import java.util.Set; -import java.util.function.Predicate; -import java.util.regex.Matcher; -import java.util.regex.Pattern; -import java.util.regex.PatternSyntaxException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Filters keyspaces during schema metadata queries. - * - *

Depending on the circumstances, we do it either on the server side with a WHERE IN clause that - * will be appended to every query, or on the client side with a predicate that will be applied to - * every fetched row. - */ -class RuleBasedKeyspaceFilter implements KeyspaceFilter { - - private static final Logger LOG = LoggerFactory.getLogger(RuleBasedKeyspaceFilter.class); - - private static final Pattern EXACT_INCLUDE = Pattern.compile("\\w+"); - private static final Pattern EXACT_EXCLUDE = Pattern.compile("!\\s*(\\w+)"); - private static final Pattern REGEX_INCLUDE = Pattern.compile("/(.+)/"); - private static final Pattern REGEX_EXCLUDE = Pattern.compile("!\\s*/(.+)/"); - - private final String logPrefix; - private final String whereClause; - private final Set exactIncludes = new HashSet<>(); - private final Set exactExcludes = new HashSet<>(); - private final List> regexIncludes = new ArrayList<>(); - private final List> regexExcludes = new ArrayList<>(); - - private final boolean isDebugEnabled; - private final Set loggedKeyspaces; - - RuleBasedKeyspaceFilter(@NonNull String logPrefix, @NonNull List specs) { - assert !specs.isEmpty(); // see KeyspaceFilter#newInstance - - this.logPrefix = logPrefix; - for (String spec : specs) { - spec = spec.trim(); - Matcher matcher; - if (EXACT_INCLUDE.matcher(spec).matches()) { - exactIncludes.add(spec); - if (exactExcludes.remove(spec)) { - LOG.warn( - "[{}] '{}' is both included and excluded, ignoring the exclusion", logPrefix, spec); - } - } else if ((matcher = EXACT_EXCLUDE.matcher(spec)).matches()) { - String name = matcher.group(1); - if (exactIncludes.contains(name)) { - LOG.warn( - "[{}] '{}' is both included and excluded, ignoring the exclusion", logPrefix, name); - } else { - exactExcludes.add(name); - } - } else if ((matcher = REGEX_INCLUDE.matcher(spec)).matches()) { - compile(matcher.group(1)).map(regexIncludes::add); - } else if ((matcher = REGEX_EXCLUDE.matcher(spec)).matches()) { - compile(matcher.group(1)).map(regexExcludes::add); - } else { - LOG.warn( - "[{}] Error while parsing {}: invalid element '{}', skipping", - logPrefix, - DefaultDriverOption.METADATA_SCHEMA_REFRESHED_KEYSPACES.getPath(), - spec); - } - } - - if (!exactIncludes.isEmpty() && regexIncludes.isEmpty() && regexExcludes.isEmpty()) { - // We can filter on the server - whereClause = buildWhereClause(exactIncludes); - if (!exactExcludes.isEmpty()) { - // Proceed, but this is probably a mistake - LOG.warn( - "[{}] {} only has exact includes and excludes, the excludes are redundant", - logPrefix, - DefaultDriverOption.METADATA_SCHEMA_REFRESHED_KEYSPACES.getPath()); - } - LOG.debug("[{}] Filtering server-side with '{}'", logPrefix, whereClause); - } else { - whereClause = ""; - LOG.debug("[{}] No server-side filtering", logPrefix); - } - - isDebugEnabled = LOG.isDebugEnabled(); - loggedKeyspaces = isDebugEnabled ? new HashSet<>() : null; - } - - @NonNull - @Override - public String getWhereClause() { - return whereClause; - } - - @Override - public boolean includes(@NonNull String keyspace) { - if (exactIncludes.contains(keyspace)) { - log(keyspace, true, "it is included by name"); - return true; - } else if (exactExcludes.contains(keyspace)) { - log(keyspace, false, "it is excluded by name"); - return false; - } else if (regexIncludes.isEmpty()) { - if (regexExcludes.isEmpty()) { - log(keyspace, false, "it is not included by name"); - return false; - } else if (matchesAny(keyspace, regexExcludes)) { - log(keyspace, false, "it matches at least one regex exclude"); - return false; - } else { - log(keyspace, true, "it does not match any regex exclude"); - return true; - } - } else { // !regexIncludes.isEmpty() - if (regexExcludes.isEmpty()) { - if (matchesAny(keyspace, regexIncludes)) { - log(keyspace, true, "it matches at least one regex include"); - return true; - } else { - log(keyspace, false, "it does not match any regex include"); - return false; - } - } else { - if (matchesAny(keyspace, regexIncludes) && !matchesAny(keyspace, regexExcludes)) { - log(keyspace, true, "it matches at least one regex include, and no regex exclude"); - return true; - } else { - log(keyspace, false, "it matches either no regex include, or at least one regex exclude"); - return false; - } - } - } - } - - private void log(@NonNull String keyspace, boolean include, @NonNull String reason) { - if (isDebugEnabled && loggedKeyspaces.add(keyspace)) { - LOG.debug( - "[{}] Filtering {} '{}' because {}", logPrefix, include ? "in" : "out", keyspace, reason); - } - } - - private boolean matchesAny(String keyspace, List> rules) { - for (Predicate rule : rules) { - if (rule.test(keyspace)) { - return true; - } - } - return false; - } - - private Optional> compile(String regex) { - try { - return Optional.of(Pattern.compile(regex).asPredicate()); - } catch (PatternSyntaxException e) { - LOG.warn( - "[{}] Error while parsing {}: syntax error in regex /{}/ ({}), skipping", - this.logPrefix, - DefaultDriverOption.METADATA_SCHEMA_REFRESHED_KEYSPACES.getPath(), - regex, - e.getMessage()); - return Optional.empty(); - } - } - - private static String buildWhereClause(Set keyspaces) { - StringBuilder builder = new StringBuilder(" WHERE keyspace_name IN ("); - boolean first = true; - for (String keyspace : keyspaces) { - if (first) { - first = false; - } else { - builder.append(","); - } - builder.append('\'').append(keyspace).append('\''); - } - return builder.append(')').toString(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/SchemaQueries.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/SchemaQueries.java deleted file mode 100644 index 613f43197e2..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/SchemaQueries.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.queries; - -import java.util.concurrent.CompletionStage; - -/** - * Manages the queries to system tables during a schema refresh. - * - *

They are all asynchronous, and possibly paged. This class abstracts all the details and - * exposes a common result type. - * - *

Implementations must be thread-safe. - */ -public interface SchemaQueries { - - /** - * Launch the queries asynchronously, returning a future that will complete when they have all - * succeeded. - */ - CompletionStage execute(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/SchemaQueriesFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/SchemaQueriesFactory.java deleted file mode 100644 index 32d1ae684ef..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/SchemaQueriesFactory.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.queries; - -public interface SchemaQueriesFactory { - SchemaQueries newInstance(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/SchemaRows.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/SchemaRows.java deleted file mode 100644 index 0507b8cffd1..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/SchemaRows.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.queries; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.DataTypeParser; -import com.datastax.oss.driver.shaded.guava.common.collect.Multimap; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; - -/** - * The system rows returned by the queries for a schema refresh, categorized by keyspace/table where - * relevant. - * - *

Implementations must be thread-safe. - */ -public interface SchemaRows { - - /** The node that was used to retrieve the schema information. */ - @NonNull - Node getNode(); - - List keyspaces(); - - List virtualKeyspaces(); - - Multimap tables(); - - Multimap virtualTables(); - - Multimap views(); - - Multimap types(); - - Multimap functions(); - - Multimap aggregates(); - - Map> columns(); - - Map> virtualColumns(); - - Map> indexes(); - - DataTypeParser dataTypeParser(); - - default Map> vertices() { - return new LinkedHashMap<>(); - } - - default Map> edges() { - return new LinkedHashMap<>(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/refresh/SchemaRefresh.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/refresh/SchemaRefresh.java deleted file mode 100644 index 86a4d1912f4..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/refresh/SchemaRefresh.java +++ /dev/null @@ -1,150 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.refresh; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.DefaultMetadata; -import com.datastax.oss.driver.internal.core.metadata.MetadataRefresh; -import com.datastax.oss.driver.internal.core.metadata.schema.events.AggregateChangeEvent; -import com.datastax.oss.driver.internal.core.metadata.schema.events.FunctionChangeEvent; -import com.datastax.oss.driver.internal.core.metadata.schema.events.KeyspaceChangeEvent; -import com.datastax.oss.driver.internal.core.metadata.schema.events.TableChangeEvent; -import com.datastax.oss.driver.internal.core.metadata.schema.events.TypeChangeEvent; -import com.datastax.oss.driver.internal.core.metadata.schema.events.ViewChangeEvent; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.Sets; -import java.util.Map; -import java.util.function.BiFunction; -import java.util.function.Function; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class SchemaRefresh implements MetadataRefresh { - - @VisibleForTesting public final Map newKeyspaces; - - public SchemaRefresh(Map newKeyspaces) { - this.newKeyspaces = newKeyspaces; - } - - @Override - public Result compute( - DefaultMetadata oldMetadata, boolean tokenMapEnabled, InternalDriverContext context) { - ImmutableList.Builder events = ImmutableList.builder(); - - Map oldKeyspaces = oldMetadata.getKeyspaces(); - for (CqlIdentifier removedKey : Sets.difference(oldKeyspaces.keySet(), newKeyspaces.keySet())) { - events.add(KeyspaceChangeEvent.dropped(oldKeyspaces.get(removedKey))); - } - for (Map.Entry entry : newKeyspaces.entrySet()) { - CqlIdentifier key = entry.getKey(); - computeEvents(oldKeyspaces.get(key), entry.getValue(), events); - } - - return new Result( - oldMetadata.withSchema(this.newKeyspaces, tokenMapEnabled, context), events.build()); - } - - /** - * Computes the exact set of events to emit when a keyspace has changed. - * - *

We can't simply emit {@link KeyspaceChangeEvent#updated(KeyspaceMetadata, KeyspaceMetadata)} - * because this method might be called as part of a full schema refresh, or a keyspace refresh - * initiated by coalesced child element refreshes. We need to traverse all children to check what - * has exactly changed. - */ - private void computeEvents( - KeyspaceMetadata oldKeyspace, - KeyspaceMetadata newKeyspace, - ImmutableList.Builder events) { - if (oldKeyspace == null) { - events.add(KeyspaceChangeEvent.created(newKeyspace)); - } else { - if (!oldKeyspace.shallowEquals(newKeyspace)) { - events.add(KeyspaceChangeEvent.updated(oldKeyspace, newKeyspace)); - } - computeChildEvents(oldKeyspace, newKeyspace, events); - } - } - - private void computeChildEvents( - KeyspaceMetadata oldKeyspace, - KeyspaceMetadata newKeyspace, - ImmutableList.Builder events) { - computeChildEvents( - oldKeyspace.getTables(), - newKeyspace.getTables(), - TableChangeEvent::dropped, - TableChangeEvent::created, - TableChangeEvent::updated, - events); - computeChildEvents( - oldKeyspace.getViews(), - newKeyspace.getViews(), - ViewChangeEvent::dropped, - ViewChangeEvent::created, - ViewChangeEvent::updated, - events); - computeChildEvents( - oldKeyspace.getUserDefinedTypes(), - newKeyspace.getUserDefinedTypes(), - TypeChangeEvent::dropped, - TypeChangeEvent::created, - TypeChangeEvent::updated, - events); - computeChildEvents( - oldKeyspace.getFunctions(), - newKeyspace.getFunctions(), - FunctionChangeEvent::dropped, - FunctionChangeEvent::created, - FunctionChangeEvent::updated, - events); - computeChildEvents( - oldKeyspace.getAggregates(), - newKeyspace.getAggregates(), - AggregateChangeEvent::dropped, - AggregateChangeEvent::created, - AggregateChangeEvent::updated, - events); - } - - private void computeChildEvents( - Map oldChildren, - Map newChildren, - Function newDroppedEvent, - Function newCreatedEvent, - BiFunction newUpdatedEvent, - ImmutableList.Builder events) { - for (K removedKey : Sets.difference(oldChildren.keySet(), newChildren.keySet())) { - events.add(newDroppedEvent.apply(oldChildren.get(removedKey))); - } - for (Map.Entry entry : newChildren.entrySet()) { - K key = entry.getKey(); - V newChild = entry.getValue(); - V oldChild = oldChildren.get(key); - if (oldChild == null) { - events.add(newCreatedEvent.apply(newChild)); - } else if (!oldChild.equals(newChild)) { - events.add(newUpdatedEvent.apply(oldChild, newChild)); - } - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ByteOrderedToken.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ByteOrderedToken.java deleted file mode 100644 index ff7642d0c18..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ByteOrderedToken.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.primitives.UnsignedBytes; -import com.datastax.oss.protocol.internal.util.Bytes; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import net.jcip.annotations.Immutable; - -/** A token generated by {@code ByteOrderedPartitioner}. */ -@Immutable -public class ByteOrderedToken implements Token { - - private final ByteBuffer value; - - public ByteOrderedToken(@NonNull ByteBuffer value) { - this.value = ByteBuffer.wrap(Bytes.getArray(value)).asReadOnlyBuffer(); - } - - @NonNull - public ByteBuffer getValue() { - return value; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof ByteOrderedToken) { - ByteOrderedToken that = (ByteOrderedToken) other; - return this.value.equals(that.getValue()); - } else { - return false; - } - } - - @Override - public int hashCode() { - return value.hashCode(); - } - - @Override - public int compareTo(@NonNull Token other) { - Preconditions.checkArgument( - other instanceof ByteOrderedToken, "Can only compare tokens of the same type"); - return UnsignedBytes.lexicographicalComparator() - .compare(Bytes.getArray(value), Bytes.getArray(((ByteOrderedToken) other).value)); - } - - @Override - public String toString() { - return "ByteOrderedToken(" + Bytes.toHexString(value) + ")"; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ByteOrderedTokenFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ByteOrderedTokenFactory.java deleted file mode 100644 index 5dc3aa3aa45..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ByteOrderedTokenFactory.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.metadata.token.TokenRange; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.nio.ByteBuffer; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class ByteOrderedTokenFactory implements TokenFactory { - - public static final String PARTITIONER_NAME = "org.apache.cassandra.dht.ByteOrderedPartitioner"; - - public static final ByteOrderedToken MIN_TOKEN = new ByteOrderedToken(ByteBuffer.allocate(0)); - - @Override - public String getPartitionerName() { - return PARTITIONER_NAME; - } - - @Override - public Token hash(ByteBuffer partitionKey) { - return new ByteOrderedToken(partitionKey); - } - - @Override - public Token parse(String tokenString) { - // This method must be able to parse the contents of system.peers.tokens, which do not have the - // "0x" prefix. On the other hand, OPPToken#toString has the "0x" because it should be usable in - // a CQL query, and it's nice to have fromString and toString symmetrical. So handle both cases: - if (!tokenString.startsWith("0x")) { - String prefix = (tokenString.length() % 2 == 0) ? "0x" : "0x0"; - tokenString = prefix + tokenString; - } - ByteBuffer value = Bytes.fromHexString(tokenString); - return new ByteOrderedToken(value); - } - - @Override - public String format(Token token) { - Preconditions.checkArgument( - token instanceof ByteOrderedToken, "Can only format ByteOrderedToken instances"); - return Bytes.toHexString(((ByteOrderedToken) token).getValue()); - } - - @Override - public Token minToken() { - return MIN_TOKEN; - } - - @Override - public TokenRange range(Token start, Token end) { - Preconditions.checkArgument( - start instanceof ByteOrderedToken && end instanceof ByteOrderedToken, - "Can only build ranges of ByteOrderedToken instances"); - return new ByteOrderedTokenRange(((ByteOrderedToken) start), ((ByteOrderedToken) end)); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ByteOrderedTokenRange.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ByteOrderedTokenRange.java deleted file mode 100644 index 7e95b7c01c9..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ByteOrderedTokenRange.java +++ /dev/null @@ -1,145 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.metadata.token.TokenRange; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.math.BigInteger; -import java.nio.ByteBuffer; -import java.util.List; -import net.jcip.annotations.Immutable; - -@Immutable -public class ByteOrderedTokenRange extends TokenRangeBase { - - private static final BigInteger TWO = BigInteger.valueOf(2); - - public ByteOrderedTokenRange(ByteOrderedToken start, ByteOrderedToken end) { - super(start, end, ByteOrderedTokenFactory.MIN_TOKEN); - } - - @Override - protected TokenRange newTokenRange(Token start, Token end) { - return new ByteOrderedTokenRange(((ByteOrderedToken) start), ((ByteOrderedToken) end)); - } - - @Override - protected List split(Token rawStartToken, Token rawEndToken, int numberOfSplits) { - int tokenOrder = rawStartToken.compareTo(rawEndToken); - - // ]min,min] means the whole ring. However, since there is no "max token" with this partitioner, - // we can't come up with a magic end value that would cover the whole ring - if (tokenOrder == 0 && rawStartToken.equals(ByteOrderedTokenFactory.MIN_TOKEN)) { - throw new IllegalArgumentException("Cannot split whole ring with ordered partitioner"); - } - - ByteOrderedToken startToken = (ByteOrderedToken) rawStartToken; - ByteOrderedToken endToken = (ByteOrderedToken) rawEndToken; - - int significantBytes; - BigInteger start, end, range, ringEnd, ringLength; - BigInteger bigNumberOfSplits = BigInteger.valueOf(numberOfSplits); - if (tokenOrder < 0) { - // Since tokens are compared lexicographically, convert to integers using the largest length - // (ex: given 0x0A and 0x0BCD, switch to 0x0A00 and 0x0BCD) - significantBytes = Math.max(startToken.getValue().capacity(), endToken.getValue().capacity()); - - // If the number of splits does not fit in the difference between the two integers, use more - // bytes (ex: cannot fit 4 splits between 0x01 and 0x03, so switch to 0x0100 and 0x0300) - // At most 4 additional bytes will be needed, since numberOfSplits is an integer. - int addedBytes = 0; - while (true) { - start = toBigInteger(startToken.getValue(), significantBytes); - end = toBigInteger(endToken.getValue(), significantBytes); - range = end.subtract(start); - if (addedBytes == 4 || start.equals(end) || range.compareTo(bigNumberOfSplits) >= 0) { - break; - } - significantBytes += 1; - addedBytes += 1; - } - ringEnd = ringLength = null; // won't be used - } else { - // Same logic except that we wrap around the ring - significantBytes = Math.max(startToken.getValue().capacity(), endToken.getValue().capacity()); - int addedBytes = 0; - while (true) { - start = toBigInteger(startToken.getValue(), significantBytes); - end = toBigInteger(endToken.getValue(), significantBytes); - ringLength = TWO.pow(significantBytes * 8); - ringEnd = ringLength.subtract(BigInteger.ONE); - range = end.subtract(start).add(ringLength); - if (addedBytes == 4 || range.compareTo(bigNumberOfSplits) >= 0) { - break; - } - significantBytes += 1; - addedBytes += 1; - } - } - - List values = super.split(start, range, ringEnd, ringLength, numberOfSplits); - List tokens = Lists.newArrayListWithExpectedSize(values.size()); - for (BigInteger value : values) { - tokens.add(new ByteOrderedToken(toBytes(value, significantBytes))); - } - return tokens; - } - - // Convert a token's byte array to a number in order to perform computations. - // This depends on the number of "significant bytes" that we use to normalize all tokens to the - // same size. - // For example if the token is 0x01 but significantBytes is 2, the result is 8 (0x0100). - private BigInteger toBigInteger(ByteBuffer bb, int significantBytes) { - byte[] bytes = Bytes.getArray(bb); - byte[] target; - if (significantBytes != bytes.length) { - target = new byte[significantBytes]; - System.arraycopy(bytes, 0, target, 0, bytes.length); - } else { - target = bytes; - } - return new BigInteger(1, target); - } - - // Convert a numeric representation back to a byte array. - // Again, the number of significant bytes matters: if the input value is 1 but significantBytes is - // 2, the - // expected result is 0x0001 (a simple conversion would produce 0x01). - protected ByteBuffer toBytes(BigInteger value, int significantBytes) { - byte[] rawBytes = value.toByteArray(); - byte[] result; - if (rawBytes.length == significantBytes) { - result = rawBytes; - } else { - result = new byte[significantBytes]; - int start, length; - if (rawBytes[0] == 0) { - // that's a sign byte, ignore (it can cause rawBytes.length == significantBytes + 1) - start = 1; - length = rawBytes.length - 1; - } else { - start = 0; - length = rawBytes.length; - } - System.arraycopy(rawBytes, start, result, significantBytes - length, length); - } - return ByteBuffer.wrap(result); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/CanonicalNodeSetBuilder.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/CanonicalNodeSetBuilder.java deleted file mode 100644 index 099d8b55129..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/CanonicalNodeSetBuilder.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import net.jcip.annotations.NotThreadSafe; - -/** - * A reusable set builder that guarantees that identical sets (same elements in the same order) will - * be represented by the same instance. - */ -@NotThreadSafe -class CanonicalNodeSetBuilder { - - private final Map, Set> canonicalSets = new HashMap<>(); - private final List elements = new ArrayList<>(); - - void add(Node node) { - // This is O(n), but the cardinality is low (max possible size is the replication factor). - if (!elements.contains(node)) { - elements.add(node); - } - } - - int size() { - return elements.size(); - } - - Set build() { - return canonicalSets.computeIfAbsent(elements, ImmutableSet::copyOf); - } - - void clear() { - elements.clear(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/DefaultReplicationStrategyFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/DefaultReplicationStrategyFactory.java deleted file mode 100644 index a5da85195c6..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/DefaultReplicationStrategyFactory.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import java.util.Map; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class DefaultReplicationStrategyFactory implements ReplicationStrategyFactory { - - private final String logPrefix; - - public DefaultReplicationStrategyFactory(InternalDriverContext context) { - this.logPrefix = context.getSessionName(); - } - - @Override - public ReplicationStrategy newInstance(Map replicationConfig) { - String strategyClass = replicationConfig.get("class"); - Preconditions.checkNotNull( - strategyClass, "Missing replication strategy class in " + replicationConfig); - switch (strategyClass) { - case "org.apache.cassandra.locator.LocalStrategy": - return new LocalReplicationStrategy(); - case "org.apache.cassandra.locator.SimpleStrategy": - return new SimpleReplicationStrategy(replicationConfig); - case "org.apache.cassandra.locator.NetworkTopologyStrategy": - return new NetworkTopologyReplicationStrategy(replicationConfig, logPrefix); - case "org.apache.cassandra.locator.EverywhereStrategy": - return new EverywhereReplicationStrategy(); - default: - throw new IllegalArgumentException("Unsupported replication strategy: " + strategyClass); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/DefaultTokenFactoryRegistry.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/DefaultTokenFactoryRegistry.java deleted file mode 100644 index 8226ddee2c7..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/DefaultTokenFactoryRegistry.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -public class DefaultTokenFactoryRegistry implements TokenFactoryRegistry { - - private static final Logger LOG = LoggerFactory.getLogger(DefaultTokenFactoryRegistry.class); - - private final String logPrefix; - - public DefaultTokenFactoryRegistry(InternalDriverContext context) { - this.logPrefix = context.getSessionName(); - } - - @Override - public TokenFactory tokenFactoryFor(String partitioner) { - if (Murmur3TokenFactory.PARTITIONER_NAME.equals(partitioner)) { - LOG.debug("[{}] Detected Murmur3 partitioner ({})", logPrefix, partitioner); - return new Murmur3TokenFactory(); - } else if (RandomTokenFactory.PARTITIONER_NAME.equals(partitioner)) { - LOG.debug("[{}] Detected random partitioner ({})", logPrefix, partitioner); - return new RandomTokenFactory(); - } else if (ByteOrderedTokenFactory.PARTITIONER_NAME.equals(partitioner)) { - LOG.debug("[{}] Detected byte ordered partitioner ({})", logPrefix, partitioner); - return new ByteOrderedTokenFactory(); - } else { - LOG.warn( - "[{}] Unsupported partitioner '{}', token map will be empty.", logPrefix, partitioner); - return null; - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/DefaultTokenMap.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/DefaultTokenMap.java deleted file mode 100644 index 8c59fb73847..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/DefaultTokenMap.java +++ /dev/null @@ -1,303 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.TokenMap; -import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.metadata.token.TokenRange; -import com.datastax.oss.driver.internal.core.metadata.DefaultNode; -import com.datastax.oss.driver.internal.core.util.RoutingKey; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSetMultimap; -import com.datastax.oss.driver.shaded.guava.common.collect.SetMultimap; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.SortedSet; -import java.util.TreeSet; -import net.jcip.annotations.Immutable; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@Immutable -public class DefaultTokenMap implements TokenMap { - - private static final Logger LOG = LoggerFactory.getLogger(DefaultTokenMap.class); - - public static DefaultTokenMap build( - @NonNull Collection nodes, - @NonNull Collection keyspaces, - @NonNull TokenFactory tokenFactory, - @NonNull ReplicationStrategyFactory replicationStrategyFactory, - @NonNull String logPrefix) { - - TokenToPrimaryAndRing tmp = buildTokenToPrimaryAndRing(nodes, tokenFactory); - Map tokenToPrimary = tmp.tokenToPrimary; - List ring = tmp.ring; - LOG.debug("[{}] Rebuilt ring ({} tokens)", logPrefix, ring.size()); - - Set tokenRanges = buildTokenRanges(ring, tokenFactory); - - ImmutableSetMultimap.Builder tokenRangesByPrimary = - ImmutableSetMultimap.builder(); - for (TokenRange range : tokenRanges) { - if (range.isFullRing()) { - // The full ring is always ]min, min], so getEnd() doesn't match the node's token - assert tokenToPrimary.size() == 1; - tokenRangesByPrimary.put(tokenToPrimary.values().iterator().next(), range); - } else { - tokenRangesByPrimary.put(tokenToPrimary.get(range.getEnd()), range); - } - } - - Map> replicationConfigs = - buildReplicationConfigs(keyspaces, logPrefix); - - ImmutableMap.Builder, KeyspaceTokenMap> keyspaceMapsBuilder = - ImmutableMap.builder(); - for (Map config : ImmutableSet.copyOf(replicationConfigs.values())) { - LOG.debug("[{}] Computing keyspace-level data for {}", logPrefix, config); - keyspaceMapsBuilder.put( - config, - KeyspaceTokenMap.build( - config, - tokenToPrimary, - ring, - tokenRanges, - tokenFactory, - replicationStrategyFactory, - logPrefix)); - } - return new DefaultTokenMap( - tokenFactory, - tokenRanges, - tokenRangesByPrimary.build(), - replicationConfigs, - keyspaceMapsBuilder.build(), - logPrefix); - } - - private final TokenFactory tokenFactory; - @VisibleForTesting final Set tokenRanges; - @VisibleForTesting final SetMultimap tokenRangesByPrimary; - @VisibleForTesting final Map> replicationConfigs; - @VisibleForTesting final Map, KeyspaceTokenMap> keyspaceMaps; - private final String logPrefix; - - private DefaultTokenMap( - TokenFactory tokenFactory, - Set tokenRanges, - SetMultimap tokenRangesByPrimary, - Map> replicationConfigs, - Map, KeyspaceTokenMap> keyspaceMaps, - String logPrefix) { - this.tokenFactory = tokenFactory; - this.tokenRanges = tokenRanges; - this.tokenRangesByPrimary = tokenRangesByPrimary; - this.replicationConfigs = replicationConfigs; - this.keyspaceMaps = keyspaceMaps; - this.logPrefix = logPrefix; - } - - public TokenFactory getTokenFactory() { - return tokenFactory; - } - - @NonNull - @Override - public Token parse(@NonNull String tokenString) { - return tokenFactory.parse(tokenString); - } - - @NonNull - @Override - public String format(@NonNull Token token) { - return tokenFactory.format(token); - } - - @NonNull - @Override - public Token newToken(@NonNull ByteBuffer... partitionKey) { - return tokenFactory.hash(RoutingKey.compose(partitionKey)); - } - - @NonNull - @Override - public TokenRange newTokenRange(@NonNull Token start, @NonNull Token end) { - return tokenFactory.range(start, end); - } - - @NonNull - @Override - public Set getTokenRanges() { - return tokenRanges; - } - - @NonNull - @Override - public Set getTokenRanges(@NonNull Node node) { - return tokenRangesByPrimary.get(node); - } - - @NonNull - @Override - public Set getTokenRanges(@NonNull CqlIdentifier keyspace, @NonNull Node replica) { - KeyspaceTokenMap keyspaceMap = getKeyspaceMap(keyspace); - return (keyspaceMap == null) ? Collections.emptySet() : keyspaceMap.getTokenRanges(replica); - } - - @NonNull - @Override - public Set getReplicas(@NonNull CqlIdentifier keyspace, @NonNull ByteBuffer partitionKey) { - KeyspaceTokenMap keyspaceMap = getKeyspaceMap(keyspace); - return (keyspaceMap == null) ? Collections.emptySet() : keyspaceMap.getReplicas(partitionKey); - } - - @NonNull - @Override - public Set getReplicas(@NonNull CqlIdentifier keyspace, @NonNull Token token) { - KeyspaceTokenMap keyspaceMap = getKeyspaceMap(keyspace); - return (keyspaceMap == null) ? Collections.emptySet() : keyspaceMap.getReplicas(token); - } - - @NonNull - @Override - public String getPartitionerName() { - return tokenFactory.getPartitionerName(); - } - - private KeyspaceTokenMap getKeyspaceMap(CqlIdentifier keyspace) { - Map config = replicationConfigs.get(keyspace); - return (config == null) ? null : keyspaceMaps.get(config); - } - - /** Called when only the schema has changed. */ - public DefaultTokenMap refresh( - @NonNull Collection nodes, - @NonNull Collection keyspaces, - @NonNull ReplicationStrategyFactory replicationStrategyFactory) { - - Map> newReplicationConfigs = - buildReplicationConfigs(keyspaces, logPrefix); - if (newReplicationConfigs.equals(replicationConfigs)) { - LOG.debug("[{}] Schema changes do not impact the token map, no refresh needed", logPrefix); - return this; - } - ImmutableMap.Builder, KeyspaceTokenMap> newKeyspaceMapsBuilder = - ImmutableMap.builder(); - - // Will only be built if needed: - Map tokenToPrimary = null; - List ring = null; - - for (Map config : ImmutableSet.copyOf(newReplicationConfigs.values())) { - KeyspaceTokenMap oldKeyspaceMap = keyspaceMaps.get(config); - if (oldKeyspaceMap != null) { - LOG.debug("[{}] Reusing existing keyspace-level data for {}", logPrefix, config); - newKeyspaceMapsBuilder.put(config, oldKeyspaceMap); - } else { - LOG.debug("[{}] Computing new keyspace-level data for {}", logPrefix, config); - if (tokenToPrimary == null) { - TokenToPrimaryAndRing tmp = buildTokenToPrimaryAndRing(nodes, tokenFactory); - tokenToPrimary = tmp.tokenToPrimary; - ring = tmp.ring; - } - newKeyspaceMapsBuilder.put( - config, - KeyspaceTokenMap.build( - config, - tokenToPrimary, - ring, - tokenRanges, - tokenFactory, - replicationStrategyFactory, - logPrefix)); - } - } - return new DefaultTokenMap( - tokenFactory, - tokenRanges, - tokenRangesByPrimary, - newReplicationConfigs, - newKeyspaceMapsBuilder.build(), - logPrefix); - } - - private static TokenToPrimaryAndRing buildTokenToPrimaryAndRing( - Collection nodes, TokenFactory tokenFactory) { - ImmutableMap.Builder tokenToPrimaryBuilder = ImmutableMap.builder(); - SortedSet sortedTokens = new TreeSet<>(); - for (Node node : nodes) { - for (String tokenString : ((DefaultNode) node).getRawTokens()) { - Token token = tokenFactory.parse(tokenString); - sortedTokens.add(token); - tokenToPrimaryBuilder.put(token, node); - } - } - return new TokenToPrimaryAndRing( - tokenToPrimaryBuilder.build(), ImmutableList.copyOf(sortedTokens)); - } - - static class TokenToPrimaryAndRing { - final Map tokenToPrimary; - final List ring; - - private TokenToPrimaryAndRing(Map tokenToPrimary, List ring) { - this.tokenToPrimary = tokenToPrimary; - this.ring = ring; - } - } - - private static Map> buildReplicationConfigs( - Collection keyspaces, String logPrefix) { - ImmutableMap.Builder> builder = ImmutableMap.builder(); - for (KeyspaceMetadata keyspace : keyspaces) { - if (!keyspace.isVirtual()) { - builder.put(keyspace.getName(), keyspace.getReplication()); - } - } - ImmutableMap> result = builder.build(); - LOG.debug("[{}] Computing keyspace-level data for {}", logPrefix, result); - return result; - } - - private static Set buildTokenRanges(List ring, TokenFactory factory) { - ImmutableSet.Builder builder = ImmutableSet.builder(); - // JAVA-684: if there is only one token, return the full ring (]minToken, minToken]) - if (ring.size() == 1) { - builder.add(factory.range(factory.minToken(), factory.minToken())); - } else { - for (int i = 0; i < ring.size(); i++) { - Token start = ring.get(i); - Token end = ring.get((i + 1) % ring.size()); - builder.add(factory.range(start, end)); - } - } - return builder.build(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/EverywhereReplicationStrategy.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/EverywhereReplicationStrategy.java deleted file mode 100644 index 1973c07f5f8..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/EverywhereReplicationStrategy.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class EverywhereReplicationStrategy implements ReplicationStrategy { - - @Override - public Map> computeReplicasByToken( - Map tokenToPrimary, List ring) { - ImmutableMap.Builder> result = ImmutableMap.builder(); - Set allNodes = ImmutableSet.copyOf(tokenToPrimary.values()); - for (Token token : tokenToPrimary.keySet()) { - result = result.put(token, allNodes); - } - return result.build(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/KeyspaceTokenMap.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/KeyspaceTokenMap.java deleted file mode 100644 index 80bad8a36b1..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/KeyspaceTokenMap.java +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.metadata.token.TokenRange; -import com.datastax.oss.driver.internal.core.util.NanoTime; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSetMultimap; -import com.datastax.oss.driver.shaded.guava.common.collect.SetMultimap; -import java.nio.ByteBuffer; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.Set; -import net.jcip.annotations.Immutable; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * The token data for a given replication configuration. It's shared by all keyspaces that use that - * configuration. - */ -@Immutable -class KeyspaceTokenMap { - - private static final Logger LOG = LoggerFactory.getLogger(KeyspaceTokenMap.class); - - static KeyspaceTokenMap build( - Map replicationConfig, - Map tokenToPrimary, - List ring, - Set tokenRanges, - TokenFactory tokenFactory, - ReplicationStrategyFactory replicationStrategyFactory, - String logPrefix) { - - long start = System.nanoTime(); - try { - ReplicationStrategy strategy = replicationStrategyFactory.newInstance(replicationConfig); - - Map> replicasByToken = strategy.computeReplicasByToken(tokenToPrimary, ring); - SetMultimap tokenRangesByNode; - if (ring.size() == 1) { - // We forced the single range to ]minToken,minToken], make sure to use that instead of - // relying - // on the node's token - ImmutableSetMultimap.Builder builder = ImmutableSetMultimap.builder(); - for (Node node : tokenToPrimary.values()) { - builder.putAll(node, tokenRanges); - } - tokenRangesByNode = builder.build(); - } else { - tokenRangesByNode = buildTokenRangesByNode(tokenRanges, replicasByToken); - } - return new KeyspaceTokenMap(ring, tokenRangesByNode, replicasByToken, tokenFactory); - } finally { - LOG.debug( - "[{}] Computing keyspace-level data for {} took {}", - logPrefix, - replicationConfig, - NanoTime.formatTimeSince(start)); - } - } - - private final List ring; - private final SetMultimap tokenRangesByNode; - private final Map> replicasByToken; - private final TokenFactory tokenFactory; - - private KeyspaceTokenMap( - List ring, - SetMultimap tokenRangesByNode, - Map> replicasByToken, - TokenFactory tokenFactory) { - this.ring = ring; - this.tokenRangesByNode = tokenRangesByNode; - this.replicasByToken = replicasByToken; - this.tokenFactory = tokenFactory; - } - - Set getTokenRanges(Node replica) { - return tokenRangesByNode.get(replica); - } - - Set getReplicas(ByteBuffer partitionKey) { - return getReplicas(tokenFactory.hash(partitionKey)); - } - - Set getReplicas(Token token) { - // If the token happens to be one of the "primary" tokens, get result directly - Set nodes = replicasByToken.get(token); - if (nodes != null) { - return nodes; - } - // Otherwise, find the closest "primary" token on the ring - int i = Collections.binarySearch(ring, token); - if (i < 0) { - i = -i - 1; - if (i >= ring.size()) { - i = 0; - } - } - return replicasByToken.get(ring.get(i)); - } - - private static SetMultimap buildTokenRangesByNode( - Set tokenRanges, Map> replicasByToken) { - ImmutableSetMultimap.Builder result = ImmutableSetMultimap.builder(); - for (TokenRange range : tokenRanges) { - for (Node node : replicasByToken.get(range.getEnd())) { - result.put(node, range); - } - } - return result.build(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/LocalReplicationStrategy.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/LocalReplicationStrategy.java deleted file mode 100644 index 916947e598c..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/LocalReplicationStrategy.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -class LocalReplicationStrategy implements ReplicationStrategy { - - @Override - public Map> computeReplicasByToken( - Map tokenToPrimary, List ring) { - ImmutableMap.Builder> result = ImmutableMap.builder(); - // Each token maps to exactly one node - for (Map.Entry entry : tokenToPrimary.entrySet()) { - result.put(entry.getKey(), ImmutableSet.of(entry.getValue())); - } - return result.build(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/Murmur3Token.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/Murmur3Token.java deleted file mode 100644 index 1b3072d4f22..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/Murmur3Token.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.primitives.Longs; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.Immutable; - -/** A token generated by {@code Murmur3Partitioner}. */ -@Immutable -public class Murmur3Token implements Token { - - private final long value; - - public Murmur3Token(long value) { - this.value = value; - } - - public long getValue() { - return value; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof Murmur3Token) { - Murmur3Token that = (Murmur3Token) other; - return this.value == that.value; - } else { - return false; - } - } - - @Override - public int hashCode() { - return (int) (value ^ (value >>> 32)); - } - - @Override - public int compareTo(@NonNull Token other) { - Preconditions.checkArgument( - other instanceof Murmur3Token, "Can only compare tokens of the same type"); - Murmur3Token that = (Murmur3Token) other; - return Longs.compare(this.value, that.value); - } - - @Override - public String toString() { - return "Murmur3Token(" + value + ")"; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/Murmur3TokenFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/Murmur3TokenFactory.java deleted file mode 100644 index 2d4dc975a63..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/Murmur3TokenFactory.java +++ /dev/null @@ -1,214 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.metadata.token.TokenRange; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import java.nio.ByteBuffer; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class Murmur3TokenFactory implements TokenFactory { - - public static final String PARTITIONER_NAME = "org.apache.cassandra.dht.Murmur3Partitioner"; - - public static final Murmur3Token MIN_TOKEN = new Murmur3Token(Long.MIN_VALUE); - public static final Murmur3Token MAX_TOKEN = new Murmur3Token(Long.MAX_VALUE); - - @Override - public String getPartitionerName() { - return PARTITIONER_NAME; - } - - @Override - public Token hash(ByteBuffer partitionKey) { - long v = murmur(partitionKey); - return new Murmur3Token(v == Long.MIN_VALUE ? Long.MAX_VALUE : v); - } - - @Override - public Token parse(String tokenString) { - return new Murmur3Token(Long.parseLong(tokenString)); - } - - @Override - public String format(Token token) { - Preconditions.checkArgument( - token instanceof Murmur3Token, "Can only format Murmur3Token instances"); - return Long.toString(((Murmur3Token) token).getValue()); - } - - @Override - public Token minToken() { - return MIN_TOKEN; - } - - @Override - public TokenRange range(Token start, Token end) { - Preconditions.checkArgument( - start instanceof Murmur3Token && end instanceof Murmur3Token, - "Can only build ranges of Murmur3Token instances"); - return new Murmur3TokenRange((Murmur3Token) start, (Murmur3Token) end); - } - - // This is an adapted version of the MurmurHash.hash3_x64_128 from Cassandra used - // for M3P. Compared to that methods, there's a few inlining of arguments and we - // only return the first 64-bits of the result since that's all M3P uses. - private long murmur(ByteBuffer data) { - int offset = data.position(); - int length = data.remaining(); - - int nblocks = length >> 4; // Process as 128-bit blocks. - - long h1 = 0; - long h2 = 0; - - long c1 = 0x87c37b91114253d5L; - long c2 = 0x4cf5ad432745937fL; - - // ---------- - // body - - for (int i = 0; i < nblocks; i++) { - long k1 = getblock(data, offset, i * 2); - long k2 = getblock(data, offset, i * 2 + 1); - - k1 *= c1; - k1 = rotl64(k1, 31); - k1 *= c2; - h1 ^= k1; - h1 = rotl64(h1, 27); - h1 += h2; - h1 = h1 * 5 + 0x52dce729; - k2 *= c2; - k2 = rotl64(k2, 33); - k2 *= c1; - h2 ^= k2; - h2 = rotl64(h2, 31); - h2 += h1; - h2 = h2 * 5 + 0x38495ab5; - } - - // ---------- - // tail - - // Advance offset to the unprocessed tail of the data. - offset += nblocks * 16; - - long k1 = 0; - long k2 = 0; - - switch (length & 15) { - case 15: - k2 ^= ((long) data.get(offset + 14)) << 48; - // fall through - case 14: - k2 ^= ((long) data.get(offset + 13)) << 40; - // fall through - case 13: - k2 ^= ((long) data.get(offset + 12)) << 32; - // fall through - case 12: - k2 ^= ((long) data.get(offset + 11)) << 24; - // fall through - case 11: - k2 ^= ((long) data.get(offset + 10)) << 16; - // fall through - case 10: - k2 ^= ((long) data.get(offset + 9)) << 8; - // fall through - case 9: - k2 ^= ((long) data.get(offset + 8)); - k2 *= c2; - k2 = rotl64(k2, 33); - k2 *= c1; - h2 ^= k2; - // fall through - case 8: - k1 ^= ((long) data.get(offset + 7)) << 56; - // fall through - case 7: - k1 ^= ((long) data.get(offset + 6)) << 48; - // fall through - case 6: - k1 ^= ((long) data.get(offset + 5)) << 40; - // fall through - case 5: - k1 ^= ((long) data.get(offset + 4)) << 32; - // fall through - case 4: - k1 ^= ((long) data.get(offset + 3)) << 24; - // fall through - case 3: - k1 ^= ((long) data.get(offset + 2)) << 16; - // fall through - case 2: - k1 ^= ((long) data.get(offset + 1)) << 8; - // fall through - case 1: - k1 ^= ((long) data.get(offset)); - k1 *= c1; - k1 = rotl64(k1, 31); - k1 *= c2; - h1 ^= k1; - } - - // ---------- - // finalization - - h1 ^= length; - h2 ^= length; - - h1 += h2; - h2 += h1; - - h1 = fmix(h1); - h2 = fmix(h2); - - h1 += h2; - - return h1; - } - - private long getblock(ByteBuffer key, int offset, int index) { - int i_8 = index << 3; - int blockOffset = offset + i_8; - return ((long) key.get(blockOffset) & 0xff) - + (((long) key.get(blockOffset + 1) & 0xff) << 8) - + (((long) key.get(blockOffset + 2) & 0xff) << 16) - + (((long) key.get(blockOffset + 3) & 0xff) << 24) - + (((long) key.get(blockOffset + 4) & 0xff) << 32) - + (((long) key.get(blockOffset + 5) & 0xff) << 40) - + (((long) key.get(blockOffset + 6) & 0xff) << 48) - + (((long) key.get(blockOffset + 7) & 0xff) << 56); - } - - private long rotl64(long v, int n) { - return ((v << n) | (v >>> (64 - n))); - } - - private long fmix(long k) { - k ^= k >>> 33; - k *= 0xff51afd7ed558ccdL; - k ^= k >>> 33; - k *= 0xc4ceb9fe1a85ec53L; - k ^= k >>> 33; - return k; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/Murmur3TokenRange.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/Murmur3TokenRange.java deleted file mode 100644 index 2a87cd2c3b6..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/Murmur3TokenRange.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.metadata.token.TokenRange; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import java.math.BigInteger; -import java.util.List; -import net.jcip.annotations.Immutable; - -@Immutable -public class Murmur3TokenRange extends TokenRangeBase { - - private static final BigInteger RING_END = BigInteger.valueOf(Long.MAX_VALUE); - private static final BigInteger RING_LENGTH = - RING_END.subtract(BigInteger.valueOf(Long.MIN_VALUE)); - - public Murmur3TokenRange(Murmur3Token start, Murmur3Token end) { - super(start, end, Murmur3TokenFactory.MIN_TOKEN); - } - - @Override - protected TokenRange newTokenRange(Token start, Token end) { - return new Murmur3TokenRange((Murmur3Token) start, (Murmur3Token) end); - } - - @Override - protected List split(Token startToken, Token endToken, int numberOfSplits) { - // edge case: ]min, min] means the whole ring - if (startToken.equals(endToken) && startToken.equals(Murmur3TokenFactory.MIN_TOKEN)) { - endToken = Murmur3TokenFactory.MAX_TOKEN; - } - - BigInteger start = BigInteger.valueOf(((Murmur3Token) startToken).getValue()); - BigInteger end = BigInteger.valueOf(((Murmur3Token) endToken).getValue()); - - BigInteger range = end.subtract(start); - if (range.compareTo(BigInteger.ZERO) < 0) { - range = range.add(RING_LENGTH); - } - - List values = super.split(start, range, RING_END, RING_LENGTH, numberOfSplits); - List tokens = Lists.newArrayListWithExpectedSize(values.size()); - for (BigInteger value : values) { - tokens.add(new Murmur3Token(value.longValue())); - } - return tokens; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/NetworkTopologyReplicationStrategy.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/NetworkTopologyReplicationStrategy.java deleted file mode 100644 index 0ed81083ad6..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/NetworkTopologyReplicationStrategy.java +++ /dev/null @@ -1,175 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.Maps; -import com.datastax.oss.driver.shaded.guava.common.collect.Sets; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -class NetworkTopologyReplicationStrategy implements ReplicationStrategy { - - private static final Logger LOG = - LoggerFactory.getLogger(NetworkTopologyReplicationStrategy.class); - - private final Map replicationConfig; - private final Map replicationFactors; - private final String logPrefix; - - NetworkTopologyReplicationStrategy(Map replicationConfig, String logPrefix) { - this.replicationConfig = replicationConfig; - ImmutableMap.Builder factorsBuilder = ImmutableMap.builder(); - for (Map.Entry entry : replicationConfig.entrySet()) { - if (!entry.getKey().equals("class")) { - factorsBuilder.put(entry.getKey(), ReplicationFactor.fromString(entry.getValue())); - } - } - this.replicationFactors = factorsBuilder.build(); - this.logPrefix = logPrefix; - } - - @Override - public Map> computeReplicasByToken( - Map tokenToPrimary, List ring) { - - // The implementation of this method was adapted from - // org.apache.cassandra.locator.NetworkTopologyStrategy - - ImmutableMap.Builder> result = ImmutableMap.builder(); - Map> racks = getRacksInDcs(tokenToPrimary.values()); - Map dcNodeCount = Maps.newHashMapWithExpectedSize(replicationFactors.size()); - Set warnedDcs = Sets.newHashSetWithExpectedSize(replicationFactors.size()); - CanonicalNodeSetBuilder replicasBuilder = new CanonicalNodeSetBuilder(); - - // find maximum number of nodes in each DC - for (Node node : Sets.newHashSet(tokenToPrimary.values())) { - String dc = node.getDatacenter(); - dcNodeCount.merge(dc, 1, Integer::sum); - } - for (int i = 0; i < ring.size(); i++) { - replicasBuilder.clear(); - - Map> allDcReplicas = new HashMap<>(); - Map> seenRacks = new HashMap<>(); - Map> skippedDcEndpoints = new HashMap<>(); - for (String dc : replicationFactors.keySet()) { - allDcReplicas.put(dc, new HashSet<>()); - seenRacks.put(dc, new HashSet<>()); - skippedDcEndpoints.put(dc, new LinkedHashSet<>()); // preserve order - } - - for (int j = 0; j < ring.size() && !allDone(allDcReplicas, dcNodeCount); j++) { - Node h = tokenToPrimary.get(getTokenWrapping(i + j, ring)); - String dc = h.getDatacenter(); - if (dc == null || !allDcReplicas.containsKey(dc)) { - continue; - } - ReplicationFactor dcConfig = replicationFactors.get(dc); - assert dcConfig != null; // since allDcReplicas.containsKey(dc) - int rf = dcConfig.fullReplicas(); - Set dcReplicas = allDcReplicas.get(dc); - if (dcReplicas.size() >= rf) { - continue; - } - String rack = h.getRack(); - // Check if we already visited all racks in dc - if (rack == null || seenRacks.get(dc).size() == racks.get(dc).size()) { - replicasBuilder.add(h); - dcReplicas.add(h); - } else { - // Is this a new rack? - if (seenRacks.get(dc).contains(rack)) { - skippedDcEndpoints.get(dc).add(h); - } else { - replicasBuilder.add(h); - dcReplicas.add(h); - seenRacks.get(dc).add(rack); - // If we've run out of distinct racks, add the nodes skipped so far - if (seenRacks.get(dc).size() == racks.get(dc).size()) { - Iterator skippedIt = skippedDcEndpoints.get(dc).iterator(); - while (skippedIt.hasNext() && dcReplicas.size() < rf) { - Node nextSkipped = skippedIt.next(); - replicasBuilder.add(nextSkipped); - dcReplicas.add(nextSkipped); - } - } - } - } - } - // If we haven't found enough replicas after a whole trip around the ring, this probably - // means that the replication factors are broken. - // Warn the user because that leads to quadratic performance of this method (JAVA-702). - for (Map.Entry> entry : allDcReplicas.entrySet()) { - String dcName = entry.getKey(); - int expectedFactor = replicationFactors.get(dcName).fullReplicas(); - int achievedFactor = entry.getValue().size(); - if (achievedFactor < expectedFactor && !warnedDcs.contains(dcName)) { - LOG.warn( - "[{}] Error while computing token map for replication settings {}: " - + "could not achieve replication factor {} for datacenter {} (found only {} replicas).", - logPrefix, - replicationConfig, - expectedFactor, - dcName, - achievedFactor); - // only warn once per DC - warnedDcs.add(dcName); - } - } - - result.put(ring.get(i), replicasBuilder.build()); - } - return result.build(); - } - - private boolean allDone(Map> map, Map dcNodeCount) { - for (Map.Entry> entry : map.entrySet()) { - String dc = entry.getKey(); - int dcCount = (dcNodeCount.get(dc) == null) ? 0 : dcNodeCount.get(dc); - if (entry.getValue().size() < Math.min(replicationFactors.get(dc).fullReplicas(), dcCount)) { - return false; - } - } - return true; - } - - private Map> getRacksInDcs(Iterable nodes) { - Map> result = new HashMap<>(); - for (Node node : nodes) { - Set racks = result.computeIfAbsent(node.getDatacenter(), k -> new HashSet<>()); - racks.add(node.getRack()); - } - return result; - } - - private static Token getTokenWrapping(int i, List ring) { - return ring.get(i % ring.size()); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/RandomToken.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/RandomToken.java deleted file mode 100644 index 52e32fef522..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/RandomToken.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.math.BigInteger; -import net.jcip.annotations.Immutable; - -/** A token generated by {@code RandomPartitioner}. */ -@Immutable -public class RandomToken implements Token { - - private final BigInteger value; - - public RandomToken(BigInteger value) { - this.value = value; - } - - public BigInteger getValue() { - return value; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof RandomToken) { - RandomToken that = (RandomToken) other; - return this.value.equals(that.value); - } else { - return false; - } - } - - @Override - public int hashCode() { - return value.hashCode(); - } - - @Override - public int compareTo(@NonNull Token other) { - Preconditions.checkArgument( - other instanceof RandomToken, "Can only compare tokens of the same type"); - RandomToken that = (RandomToken) other; - return this.value.compareTo(that.getValue()); - } - - @Override - public String toString() { - return "RandomToken(" + value + ")"; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/RandomTokenFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/RandomTokenFactory.java deleted file mode 100644 index 59f1bcc865b..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/RandomTokenFactory.java +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.metadata.token.TokenRange; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import java.math.BigInteger; -import java.nio.ByteBuffer; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class RandomTokenFactory implements TokenFactory { - - public static final String PARTITIONER_NAME = "org.apache.cassandra.dht.RandomPartitioner"; - - private static final BigInteger MIN_VALUE = BigInteger.ONE.negate(); - static final BigInteger MAX_VALUE = BigInteger.valueOf(2).pow(127); - public static final RandomToken MIN_TOKEN = new RandomToken(MIN_VALUE); - public static final RandomToken MAX_TOKEN = new RandomToken(MAX_VALUE); - - private final MessageDigest prototype; - private final boolean supportsClone; - - public RandomTokenFactory() { - prototype = createMessageDigest(); - boolean supportsClone; - try { - prototype.clone(); - supportsClone = true; - } catch (CloneNotSupportedException e) { - supportsClone = false; - } - this.supportsClone = supportsClone; - } - - @Override - public String getPartitionerName() { - return PARTITIONER_NAME; - } - - @Override - public Token hash(ByteBuffer partitionKey) { - return new RandomToken(md5(partitionKey)); - } - - @Override - public Token parse(String tokenString) { - return new RandomToken(new BigInteger(tokenString)); - } - - @Override - public String format(Token token) { - Preconditions.checkArgument( - token instanceof RandomToken, "Can only format RandomToken instances"); - return ((RandomToken) token).getValue().toString(); - } - - @Override - public Token minToken() { - return MIN_TOKEN; - } - - @Override - public TokenRange range(Token start, Token end) { - Preconditions.checkArgument( - start instanceof RandomToken && end instanceof RandomToken, - "Can only build ranges of RandomToken instances"); - return new RandomTokenRange((RandomToken) start, (RandomToken) end); - } - - private static MessageDigest createMessageDigest() { - try { - return MessageDigest.getInstance("MD5"); - } catch (NoSuchAlgorithmException e) { - throw new RuntimeException("MD5 doesn't seem to be available on this JVM", e); - } - } - - private BigInteger md5(ByteBuffer data) { - MessageDigest digest = newMessageDigest(); - digest.update(data.duplicate()); - return new BigInteger(digest.digest()).abs(); - } - - private MessageDigest newMessageDigest() { - if (supportsClone) { - try { - return (MessageDigest) prototype.clone(); - } catch (CloneNotSupportedException ignored) { - } - } - return createMessageDigest(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/RandomTokenRange.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/RandomTokenRange.java deleted file mode 100644 index d1a98a185db..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/RandomTokenRange.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import static com.datastax.oss.driver.internal.core.metadata.token.RandomTokenFactory.MAX_VALUE; - -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.metadata.token.TokenRange; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import java.math.BigInteger; -import java.util.List; -import net.jcip.annotations.Immutable; - -@Immutable -public class RandomTokenRange extends TokenRangeBase { - - private static final BigInteger RING_LENGTH = MAX_VALUE.add(BigInteger.ONE); - - public RandomTokenRange(RandomToken start, RandomToken end) { - super(start, end, RandomTokenFactory.MIN_TOKEN); - } - - @Override - protected TokenRange newTokenRange(Token start, Token end) { - return new RandomTokenRange(((RandomToken) start), ((RandomToken) end)); - } - - @Override - protected List split(Token startToken, Token endToken, int numberOfSplits) { - // edge case: ]min, min] means the whole ring - if (startToken.equals(endToken) && startToken.equals(RandomTokenFactory.MIN_TOKEN)) { - endToken = RandomTokenFactory.MAX_TOKEN; - } - - BigInteger start = ((RandomToken) startToken).getValue(); - BigInteger end = ((RandomToken) endToken).getValue(); - - BigInteger range = end.subtract(start); - if (range.compareTo(BigInteger.ZERO) < 0) { - range = range.add(RING_LENGTH); - } - - List values = super.split(start, range, MAX_VALUE, RING_LENGTH, numberOfSplits); - List tokens = Lists.newArrayListWithExpectedSize(values.size()); - for (BigInteger value : values) { - tokens.add(new RandomToken(value)); - } - return tokens; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ReplicationFactor.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ReplicationFactor.java deleted file mode 100644 index 966372da621..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ReplicationFactor.java +++ /dev/null @@ -1,82 +0,0 @@ -package com.datastax.oss.driver.internal.core.metadata.token; -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -import java.util.Objects; - -// This class is a subset of server version at org.apache.cassandra.locator.ReplicationFactor -public class ReplicationFactor { - private final int allReplicas; - private final int fullReplicas; - private final int transientReplicas; - - public ReplicationFactor(int allReplicas, int transientReplicas) { - this.allReplicas = allReplicas; - this.transientReplicas = transientReplicas; - this.fullReplicas = allReplicas - transientReplicas; - } - - public ReplicationFactor(int allReplicas) { - this(allReplicas, 0); - } - - public int fullReplicas() { - return fullReplicas; - } - - public int transientReplicas() { - return transientReplicas; - } - - public boolean hasTransientReplicas() { - return allReplicas != fullReplicas; - } - - public static ReplicationFactor fromString(String s) { - if (s.contains("/")) { - - int slash = s.indexOf('/'); - String allPart = s.substring(0, slash); - String transientPart = s.substring(slash + 1); - return new ReplicationFactor(Integer.parseInt(allPart), Integer.parseInt(transientPart)); - } else { - return new ReplicationFactor(Integer.parseInt(s), 0); - } - } - - @Override - public String toString() { - return allReplicas + (hasTransientReplicas() ? "/" + transientReplicas() : ""); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof ReplicationFactor)) { - return false; - } - ReplicationFactor that = (ReplicationFactor) o; - return allReplicas == that.allReplicas && fullReplicas == that.fullReplicas; - } - - @Override - public int hashCode() { - return Objects.hash(allReplicas, fullReplicas); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ReplicationStrategy.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ReplicationStrategy.java deleted file mode 100644 index e16841e5107..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ReplicationStrategy.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import java.util.List; -import java.util.Map; -import java.util.Set; - -public interface ReplicationStrategy { - Map> computeReplicasByToken(Map tokenToPrimary, List ring); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ReplicationStrategyFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ReplicationStrategyFactory.java deleted file mode 100644 index 4f01d2ac920..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ReplicationStrategyFactory.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import java.util.Map; - -public interface ReplicationStrategyFactory { - ReplicationStrategy newInstance(Map replicationConfig); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/SimpleReplicationStrategy.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/SimpleReplicationStrategy.java deleted file mode 100644 index db2c16112a1..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/SimpleReplicationStrategy.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -class SimpleReplicationStrategy implements ReplicationStrategy { - - private final ReplicationFactor replicationFactor; - - SimpleReplicationStrategy(Map replicationConfig) { - this(extractReplicationFactor(replicationConfig)); - } - - @VisibleForTesting - SimpleReplicationStrategy(ReplicationFactor replicationFactor) { - this.replicationFactor = replicationFactor; - } - - @Override - public Map> computeReplicasByToken( - Map tokenToPrimary, List ring) { - - int rf = Math.min(replicationFactor.fullReplicas(), ring.size()); - - ImmutableMap.Builder> result = ImmutableMap.builder(); - CanonicalNodeSetBuilder replicasBuilder = new CanonicalNodeSetBuilder(); - - for (int i = 0; i < ring.size(); i++) { - replicasBuilder.clear(); - for (int j = 0; j < ring.size() && replicasBuilder.size() < rf; j++) { - replicasBuilder.add(tokenToPrimary.get(getTokenWrapping(i + j, ring))); - } - result.put(ring.get(i), replicasBuilder.build()); - } - return result.build(); - } - - private static Token getTokenWrapping(int i, List ring) { - return ring.get(i % ring.size()); - } - - private static ReplicationFactor extractReplicationFactor(Map replicationConfig) { - String factorString = replicationConfig.get("replication_factor"); - Preconditions.checkNotNull(factorString, "Missing replication factor in " + replicationConfig); - return ReplicationFactor.fromString(factorString); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/TokenFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/TokenFactory.java deleted file mode 100644 index 8a1731be385..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/TokenFactory.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.metadata.token.TokenRange; -import java.nio.ByteBuffer; - -/** Manages token instances for a partitioner implementation. */ -public interface TokenFactory { - - String getPartitionerName(); - - Token hash(ByteBuffer partitionKey); - - Token parse(String tokenString); - - String format(Token token); - - /** - * The minimum token is a special value that no key ever hashes to, it's used both as lower and - * upper bound. - */ - Token minToken(); - - TokenRange range(Token start, Token end); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/TokenFactoryRegistry.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/TokenFactoryRegistry.java deleted file mode 100644 index f7e31da9870..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/TokenFactoryRegistry.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -/** A thin layer of indirection to make token factories pluggable. */ -public interface TokenFactoryRegistry { - TokenFactory tokenFactoryFor(String partitioner); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/TokenRangeBase.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/TokenRangeBase.java deleted file mode 100644 index f63f9dd1ab4..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/TokenRangeBase.java +++ /dev/null @@ -1,297 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.metadata.token.TokenRange; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.math.BigInteger; -import java.util.ArrayList; -import java.util.List; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -@Immutable -public abstract class TokenRangeBase implements TokenRange { - - private final Token start; - private final Token end; - private final Token minToken; - - protected TokenRangeBase(Token start, Token end, Token minToken) { - this.start = start; - this.end = end; - this.minToken = minToken; - } - - @NonNull - @Override - public Token getStart() { - return start; - } - - @NonNull - @Override - public Token getEnd() { - return end; - } - - @NonNull - @Override - public List splitEvenly(int numberOfSplits) { - if (numberOfSplits < 1) - throw new IllegalArgumentException( - String.format("numberOfSplits (%d) must be greater than 0.", numberOfSplits)); - if (isEmpty()) { - throw new IllegalArgumentException("Can't split empty range " + this); - } - - List tokenRanges = new ArrayList<>(); - List splitPoints = split(start, end, numberOfSplits); - Token splitStart = start; - for (Token splitEnd : splitPoints) { - tokenRanges.add(newTokenRange(splitStart, splitEnd)); - splitStart = splitEnd; - } - tokenRanges.add(newTokenRange(splitStart, end)); - return tokenRanges; - } - - protected abstract List split(Token start, Token end, int numberOfSplits); - - /** This is used by {@link #split(Token, Token, int)} implementations. */ - protected List split( - BigInteger start, - BigInteger range, - BigInteger ringEnd, - BigInteger ringLength, - int numberOfSplits) { - BigInteger[] tmp = range.divideAndRemainder(BigInteger.valueOf(numberOfSplits)); - BigInteger divider = tmp[0]; - int remainder = tmp[1].intValue(); - - List results = Lists.newArrayListWithExpectedSize(numberOfSplits - 1); - BigInteger current = start; - BigInteger dividerPlusOne = - (remainder == 0) - ? null // won't be used - : divider.add(BigInteger.ONE); - - for (int i = 1; i < numberOfSplits; i++) { - current = current.add(remainder-- > 0 ? dividerPlusOne : divider); - if (ringEnd != null && current.compareTo(ringEnd) > 0) current = current.subtract(ringLength); - results.add(current); - } - return results; - } - - protected abstract TokenRange newTokenRange(Token start, Token end); - - @Override - public boolean isEmpty() { - return start.equals(end) && !start.equals(minToken); - } - - @Override - public boolean isWrappedAround() { - return start.compareTo(end) > 0 && !end.equals(minToken); - } - - @Override - public boolean isFullRing() { - return start.equals(minToken) && end.equals(minToken); - } - - @NonNull - @Override - public List unwrap() { - if (isWrappedAround()) { - return ImmutableList.of(newTokenRange(start, minToken), newTokenRange(minToken, end)); - } else { - return ImmutableList.of(this); - } - } - - @Override - public boolean intersects(@NonNull TokenRange that) { - // Empty ranges never intersect any other range - if (this.isEmpty() || that.isEmpty()) { - return false; - } - - return contains(this, that.getStart(), true) - || contains(this, that.getEnd(), false) - || contains(that, this.start, true) - || contains(that, this.end, false); - } - - @NonNull - @Override - public List intersectWith(@NonNull TokenRange that) { - if (!this.intersects(that)) { - throw new IllegalArgumentException( - "The two ranges do not intersect, use intersects() before calling this method"); - } - - List intersected = Lists.newArrayList(); - - // Compare the unwrapped ranges to one another. - List unwrappedForThis = this.unwrap(); - List unwrappedForThat = that.unwrap(); - for (TokenRange t1 : unwrappedForThis) { - for (TokenRange t2 : unwrappedForThat) { - if (t1.intersects(t2)) { - intersected.add( - newTokenRange( - contains(t1, t2.getStart(), true) ? t2.getStart() : t1.getStart(), - contains(t1, t2.getEnd(), false) ? t2.getEnd() : t1.getEnd())); - } - } - } - - // If two intersecting ranges were produced, merge them if they are adjacent. - // This could happen in the case that two wrapped ranges intersected. - if (intersected.size() == 2) { - TokenRange t1 = intersected.get(0); - TokenRange t2 = intersected.get(1); - if (t1.getEnd().equals(t2.getStart()) || t2.getEnd().equals(t1.getStart())) { - return ImmutableList.of(t1.mergeWith(t2)); - } - } - - return intersected; - } - - @Override - public boolean contains(@NonNull Token token) { - return contains(this, token, false); - } - - // isStart handles the case where the token is the start of another range, for example: - // * ]1,2] contains 2, but it does not contain the start of ]2,3] - // * ]1,2] does not contain 1, but it contains the start of ]1,3] - @VisibleForTesting - boolean contains(TokenRange range, Token token, boolean isStart) { - if (range.isEmpty()) { - return false; - } - if (range.getEnd().equals(minToken)) { - if (range.getStart().equals(minToken)) { // ]min, min] = full ring, contains everything - return true; - } else if (token.equals(minToken)) { - return !isStart; - } else { - return isStart - ? token.compareTo(range.getStart()) >= 0 - : token.compareTo(range.getStart()) > 0; - } - } else { - boolean isAfterStart = - isStart ? token.compareTo(range.getStart()) >= 0 : token.compareTo(range.getStart()) > 0; - boolean isBeforeEnd = - isStart ? token.compareTo(range.getEnd()) < 0 : token.compareTo(range.getEnd()) <= 0; - return range.isWrappedAround() - ? isAfterStart || isBeforeEnd // ####]----]#### - : isAfterStart && isBeforeEnd; // ----]####]---- - } - } - - @NonNull - @Override - public TokenRange mergeWith(@NonNull TokenRange that) { - if (this.equals(that)) { - return this; - } - - if (!(this.intersects(that) - || this.end.equals(that.getStart()) - || that.getEnd().equals(this.start))) { - throw new IllegalArgumentException( - String.format( - "Can't merge %s with %s because they neither intersect nor are adjacent", - this, that)); - } - - if (this.isEmpty()) { - return that; - } - - if (that.isEmpty()) { - return this; - } - - // That's actually "starts in or is adjacent to the end of" - boolean thisStartsInThat = contains(that, this.start, true) || this.start.equals(that.getEnd()); - boolean thatStartsInThis = - contains(this, that.getStart(), true) || that.getStart().equals(this.end); - - // This takes care of all the cases that return the full ring, so that we don't have to worry - // about them below - if (thisStartsInThat && thatStartsInThis) { - return fullRing(); - } - - // Starting at this.start, see how far we can go while staying in at least one of the ranges. - Token mergedEnd = - (thatStartsInThis && !contains(this, that.getEnd(), false)) ? that.getEnd() : this.end; - - // Repeat in the other direction. - Token mergedStart = thisStartsInThat ? that.getStart() : this.start; - - return newTokenRange(mergedStart, mergedEnd); - } - - private TokenRange fullRing() { - return newTokenRange(minToken, minToken); - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof TokenRange) { - TokenRange that = (TokenRange) other; - return this.start.equals(that.getStart()) && this.end.equals(that.getEnd()); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(start, end); - } - - @Override - public int compareTo(@NonNull TokenRange that) { - if (this.equals(that)) { - return 0; - } else { - int compareStart = this.start.compareTo(that.getStart()); - return compareStart != 0 ? compareStart : this.end.compareTo(that.getEnd()); - } - } - - @Override - public String toString() { - return String.format("%s(%s, %s)", getClass().getSimpleName(), start, end); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/AbstractMetricUpdater.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/AbstractMetricUpdater.java deleted file mode 100644 index 3d7dc50a7c0..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/AbstractMetricUpdater.java +++ /dev/null @@ -1,182 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; -import com.datastax.oss.driver.api.core.session.throttling.RequestThrottler; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.cql.CqlPrepareAsyncProcessor; -import com.datastax.oss.driver.internal.core.cql.CqlPrepareSyncProcessor; -import com.datastax.oss.driver.internal.core.pool.ChannelPool; -import com.datastax.oss.driver.internal.core.session.RequestProcessor; -import com.datastax.oss.driver.internal.core.session.throttling.ConcurrencyLimitingRequestThrottler; -import com.datastax.oss.driver.internal.core.session.throttling.RateLimitingRequestThrottler; -import com.datastax.oss.driver.shaded.guava.common.cache.Cache; -import edu.umd.cs.findbugs.annotations.Nullable; -import io.netty.util.Timeout; -import java.time.Duration; -import java.util.Set; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicReference; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public abstract class AbstractMetricUpdater implements MetricUpdater { - - private static final Logger LOG = LoggerFactory.getLogger(AbstractMetricUpdater.class); - - // Not final for testing purposes - public static Duration MIN_EXPIRE_AFTER = Duration.ofMinutes(5); - - protected final InternalDriverContext context; - protected final Set enabledMetrics; - - private final AtomicReference metricsExpirationTimeoutRef = new AtomicReference<>(); - private final Duration expireAfter; - - protected AbstractMetricUpdater(InternalDriverContext context, Set enabledMetrics) { - this.context = context; - this.enabledMetrics = enabledMetrics; - DriverExecutionProfile config = context.getConfig().getDefaultProfile(); - Duration expireAfter = config.getDuration(DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER); - if (expireAfter.compareTo(MIN_EXPIRE_AFTER) < 0) { - LOG.warn( - "[{}] Value too low for {}: {}. Forcing to {} instead.", - context.getSessionName(), - DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER.getPath(), - expireAfter, - MIN_EXPIRE_AFTER); - expireAfter = MIN_EXPIRE_AFTER; - } - this.expireAfter = expireAfter; - } - - @Override - public boolean isEnabled(MetricT metric, String profileName) { - return enabledMetrics.contains(metric); - } - - public Duration getExpireAfter() { - return expireAfter; - } - - protected int connectedNodes() { - int count = 0; - for (Node node : context.getMetadataManager().getMetadata().getNodes().values()) { - if (node.getOpenConnections() > 0) { - count++; - } - } - return count; - } - - protected int throttlingQueueSize() { - RequestThrottler requestThrottler = context.getRequestThrottler(); - if (requestThrottler instanceof ConcurrencyLimitingRequestThrottler) { - return ((ConcurrencyLimitingRequestThrottler) requestThrottler).getQueueSize(); - } - if (requestThrottler instanceof RateLimitingRequestThrottler) { - return ((RateLimitingRequestThrottler) requestThrottler).getQueueSize(); - } - LOG.warn( - "[{}] Metric {} does not support {}, it will always return 0", - context.getSessionName(), - DefaultSessionMetric.THROTTLING_QUEUE_SIZE.getPath(), - requestThrottler.getClass().getName()); - return 0; - } - - protected long preparedStatementCacheSize() { - Cache cache = getPreparedStatementCache(); - if (cache == null) { - LOG.warn( - "[{}] Metric {} is enabled in the config, " - + "but it looks like no CQL prepare processor is registered. " - + "The gauge will always return 0", - context.getSessionName(), - DefaultSessionMetric.CQL_PREPARED_CACHE_SIZE.getPath()); - return 0L; - } - return cache.size(); - } - - @Nullable - protected Cache getPreparedStatementCache() { - // By default, both the sync processor and the async ones are registered and they share the same - // cache. But with a custom processor registry, there could be only one of the two present. - for (RequestProcessor processor : context.getRequestProcessorRegistry().getProcessors()) { - if (processor instanceof CqlPrepareAsyncProcessor) { - return ((CqlPrepareAsyncProcessor) processor).getCache(); - } else if (processor instanceof CqlPrepareSyncProcessor) { - return ((CqlPrepareSyncProcessor) processor).getCache(); - } - } - return null; - } - - protected int availableStreamIds(Node node) { - ChannelPool pool = context.getPoolManager().getPools().get(node); - return (pool == null) ? 0 : pool.getAvailableIds(); - } - - protected int inFlightRequests(Node node) { - ChannelPool pool = context.getPoolManager().getPools().get(node); - return (pool == null) ? 0 : pool.getInFlight(); - } - - protected int orphanedStreamIds(Node node) { - ChannelPool pool = context.getPoolManager().getPools().get(node); - return (pool == null) ? 0 : pool.getOrphanedIds(); - } - - protected void startMetricsExpirationTimeout() { - metricsExpirationTimeoutRef.accumulateAndGet( - newTimeout(), - (current, update) -> { - if (current == null) { - return update; - } else { - update.cancel(); - return current; - } - }); - } - - protected void cancelMetricsExpirationTimeout() { - Timeout t = metricsExpirationTimeoutRef.getAndSet(null); - if (t != null) { - t.cancel(); - } - } - - protected Timeout newTimeout() { - return context - .getNettyOptions() - .getTimer() - .newTimeout( - t -> { - clearMetrics(); - cancelMetricsExpirationTimeout(); - }, - expireAfter.toNanos(), - TimeUnit.NANOSECONDS); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricId.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricId.java deleted file mode 100644 index c1c2e80e387..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricId.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; -import java.util.Objects; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public final class DefaultMetricId implements MetricId { - - private final String name; - private final ImmutableMap tags; - - public DefaultMetricId(String name, Map tags) { - this.name = Objects.requireNonNull(name, "name cannot be null"); - this.tags = ImmutableMap.copyOf(Objects.requireNonNull(tags, "tags cannot be null")); - } - - @NonNull - @Override - public String getName() { - return name; - } - - @NonNull - @Override - public Map getTags() { - return tags; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - DefaultMetricId that = (DefaultMetricId) o; - return name.equals(that.name) && tags.equals(that.tags); - } - - @Override - public int hashCode() { - return Objects.hash(name, tags); - } - - @Override - public String toString() { - return name + tags; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricIdGenerator.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricIdGenerator.java deleted file mode 100644 index d4bacb35df9..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricIdGenerator.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.NodeMetric; -import com.datastax.oss.driver.api.core.metrics.SessionMetric; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Objects; - -/** - * The default {@link MetricIdGenerator}. - * - *

This generator generates unique names, containing the session name, the node endpoint (for - * node metrics), and the metric prefix. It does not generate tags. - */ -public class DefaultMetricIdGenerator implements MetricIdGenerator { - - private final String sessionPrefix; - private final String nodePrefix; - - @SuppressWarnings("unused") - public DefaultMetricIdGenerator(DriverContext context) { - String sessionName = context.getSessionName(); - String prefix = - Objects.requireNonNull( - context - .getConfig() - .getDefaultProfile() - .getString(DefaultDriverOption.METRICS_ID_GENERATOR_PREFIX, "")); - sessionPrefix = prefix.isEmpty() ? sessionName + '.' : prefix + '.' + sessionName + '.'; - nodePrefix = sessionPrefix + "nodes."; - } - - @NonNull - @Override - public MetricId sessionMetricId(@NonNull SessionMetric metric) { - return new DefaultMetricId(sessionPrefix + metric.getPath(), ImmutableMap.of()); - } - - @NonNull - @Override - public MetricId nodeMetricId(@NonNull Node node, @NonNull NodeMetric metric) { - return new DefaultMetricId( - nodePrefix + node.getEndPoint().asMetricPrefix() + '.' + metric.getPath(), - ImmutableMap.of()); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetrics.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetrics.java deleted file mode 100644 index b15dc955760..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetrics.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import com.codahale.metrics.Metric; -import com.codahale.metrics.MetricRegistry; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.Metrics; -import com.datastax.oss.driver.api.core.metrics.NodeMetric; -import com.datastax.oss.driver.api.core.metrics.SessionMetric; -import com.datastax.oss.driver.internal.core.metadata.DefaultNode; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Optional; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class DefaultMetrics implements Metrics { - - private final MetricRegistry registry; - private final DropwizardSessionMetricUpdater sessionUpdater; - - public DefaultMetrics(MetricRegistry registry, DropwizardSessionMetricUpdater sessionUpdater) { - this.registry = registry; - this.sessionUpdater = sessionUpdater; - } - - @NonNull - @Override - public MetricRegistry getRegistry() { - return registry; - } - - @NonNull - @Override - @SuppressWarnings("TypeParameterUnusedInFormals") - public Optional getSessionMetric( - @NonNull SessionMetric metric, String profileName) { - return Optional.ofNullable(sessionUpdater.getMetric(metric, profileName)); - } - - @NonNull - @Override - @SuppressWarnings("TypeParameterUnusedInFormals") - public Optional getNodeMetric( - @NonNull Node node, @NonNull NodeMetric metric, String profileName) { - NodeMetricUpdater nodeUpdater = ((DefaultNode) node).getMetricUpdater(); - return Optional.ofNullable( - ((DropwizardNodeMetricUpdater) nodeUpdater).getMetric(metric, profileName)); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricsFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricsFactory.java deleted file mode 100644 index 7869f8a8af6..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricsFactory.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import static com.datastax.oss.driver.internal.core.util.Dependency.DROPWIZARD; - -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.Metrics; -import com.datastax.oss.driver.internal.core.util.DefaultDependencyChecker; -import java.util.Optional; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -public class DefaultMetricsFactory implements MetricsFactory { - - private static final Logger LOG = LoggerFactory.getLogger(DefaultMetricsFactory.class); - - private final MetricsFactory delegate; - - @SuppressWarnings("unused") - public DefaultMetricsFactory(DriverContext context) { - if (DefaultDependencyChecker.isPresent(DROPWIZARD)) { - this.delegate = new DropwizardMetricsFactory(context); - } else { - this.delegate = new NoopMetricsFactory(context); - } - LOG.debug("[{}] Using {}", context.getSessionName(), delegate.getClass().getSimpleName()); - } - - @Override - public Optional getMetrics() { - return delegate.getMetrics(); - } - - @Override - public SessionMetricUpdater getSessionUpdater() { - return delegate.getSessionUpdater(); - } - - @Override - public NodeMetricUpdater newNodeUpdater(Node node) { - return delegate.newNodeUpdater(node); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricsFactorySubstitutions.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricsFactorySubstitutions.java deleted file mode 100644 index 8332cdcca18..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricsFactorySubstitutions.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import static com.datastax.oss.driver.internal.core.util.Dependency.DROPWIZARD; - -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.internal.core.util.GraalDependencyChecker; -import com.oracle.svm.core.annotate.Alias; -import com.oracle.svm.core.annotate.Delete; -import com.oracle.svm.core.annotate.Substitute; -import com.oracle.svm.core.annotate.TargetClass; -import com.oracle.svm.core.annotate.TargetElement; -import java.util.function.BooleanSupplier; - -@SuppressWarnings("unused") -public class DefaultMetricsFactorySubstitutions { - - @TargetClass(value = DefaultMetricsFactory.class, onlyWith = DropwizardMissing.class) - public static final class DefaultMetricsFactoryDropwizardMissing { - - @Alias - @TargetElement(name = "delegate") - @SuppressWarnings({"FieldCanBeLocal", "FieldMayBeFinal"}) - private MetricsFactory delegate; - - @Substitute - @TargetElement(name = TargetElement.CONSTRUCTOR_NAME) - public DefaultMetricsFactoryDropwizardMissing(DriverContext context) { - this.delegate = new NoopMetricsFactory(context); - } - } - - @TargetClass(value = DropwizardMetricsFactory.class, onlyWith = DropwizardMissing.class) - @Delete - public static final class DeleteDropwizardMetricsFactory {} - - public static class DropwizardMissing implements BooleanSupplier { - @Override - public boolean getAsBoolean() { - return !GraalDependencyChecker.isPresent(DROPWIZARD); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DropwizardMetricUpdater.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DropwizardMetricUpdater.java deleted file mode 100644 index 9377fb3a17e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DropwizardMetricUpdater.java +++ /dev/null @@ -1,199 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import com.codahale.metrics.Counter; -import com.codahale.metrics.Histogram; -import com.codahale.metrics.Meter; -import com.codahale.metrics.Metric; -import com.codahale.metrics.MetricRegistry; -import com.codahale.metrics.Reservoir; -import com.codahale.metrics.Timer; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.config.DriverOption; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.time.Duration; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.TimeUnit; -import java.util.function.Supplier; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -public abstract class DropwizardMetricUpdater extends AbstractMetricUpdater { - - private static final Logger LOG = LoggerFactory.getLogger(DropwizardMetricUpdater.class); - - protected final MetricRegistry registry; - - protected final ConcurrentMap metrics = new ConcurrentHashMap<>(); - - protected final ConcurrentMap reservoirs = new ConcurrentHashMap<>(); - - protected DropwizardMetricUpdater( - InternalDriverContext context, Set enabledMetrics, MetricRegistry registry) { - super(context, enabledMetrics); - this.registry = registry; - } - - @SuppressWarnings({"unchecked", "TypeParameterUnusedInFormals"}) - public T getMetric( - MetricT metric, @SuppressWarnings("unused") String profileName) { - return (T) metrics.get(metric); - } - - @Override - public void incrementCounter(MetricT metric, @Nullable String profileName, long amount) { - if (isEnabled(metric, profileName)) { - getOrCreateCounterFor(metric).inc(amount); - } - } - - @Override - public void updateHistogram(MetricT metric, @Nullable String profileName, long value) { - if (isEnabled(metric, profileName)) { - getOrCreateHistogramFor(metric).update(value); - } - } - - @Override - public void markMeter(MetricT metric, @Nullable String profileName, long amount) { - if (isEnabled(metric, profileName)) { - getOrCreateMeterFor(metric).mark(amount); - } - } - - @Override - public void updateTimer( - MetricT metric, @Nullable String profileName, long duration, TimeUnit unit) { - if (isEnabled(metric, profileName)) { - getOrCreateTimerFor(metric).update(duration, unit); - } - } - - @Override - public void clearMetrics() { - for (MetricT metric : metrics.keySet()) { - MetricId id = getMetricId(metric); - registry.remove(id.getName()); - } - metrics.clear(); - reservoirs.clear(); - } - - protected abstract MetricId getMetricId(MetricT metric); - - protected void initializeGauge( - MetricT metric, DriverExecutionProfile profile, Supplier supplier) { - if (isEnabled(metric, profile.getName())) { - metrics.computeIfAbsent( - metric, - m -> { - MetricId id = getMetricId(m); - return registry.gauge(id.getName(), () -> supplier::get); - }); - } - } - - protected void initializeCounter(MetricT metric, DriverExecutionProfile profile) { - if (isEnabled(metric, profile.getName())) { - getOrCreateCounterFor(metric); - } - } - - protected void initializeHdrTimer( - MetricT metric, - DriverExecutionProfile profile, - DriverOption highestLatency, - DriverOption significantDigits, - DriverOption interval) { - if (isEnabled(metric, profile.getName())) { - reservoirs.computeIfAbsent( - metric, m -> createHdrReservoir(m, profile, highestLatency, significantDigits, interval)); - getOrCreateTimerFor(metric); - } - } - - protected Counter getOrCreateCounterFor(MetricT metric) { - return (Counter) - metrics.computeIfAbsent( - metric, - m -> { - MetricId id = getMetricId(m); - return registry.counter(id.getName()); - }); - } - - protected Meter getOrCreateMeterFor(MetricT metric) { - return (Meter) - metrics.computeIfAbsent( - metric, - m -> { - MetricId id = getMetricId(m); - return registry.meter(id.getName()); - }); - } - - protected Histogram getOrCreateHistogramFor(MetricT metric) { - return (Histogram) - metrics.computeIfAbsent( - metric, - m -> { - MetricId id = getMetricId(m); - return registry.histogram(id.getName()); - }); - } - - protected Timer getOrCreateTimerFor(MetricT metric) { - return (Timer) - metrics.computeIfAbsent( - metric, - m -> { - MetricId id = getMetricId(m); - Reservoir reservoir = reservoirs.get(metric); - Timer timer = reservoir == null ? new Timer() : new Timer(reservoir); - return registry.timer(id.getName(), () -> timer); - }); - } - - protected HdrReservoir createHdrReservoir( - MetricT metric, - DriverExecutionProfile profile, - DriverOption highestLatencyOption, - DriverOption significantDigitsOption, - DriverOption intervalOption) { - MetricId id = getMetricId(metric); - Duration highestLatency = profile.getDuration(highestLatencyOption); - int significantDigits = profile.getInt(significantDigitsOption); - if (significantDigits < 0 || significantDigits > 5) { - LOG.warn( - "[{}] Configuration option {} is out of range (expected between 0 and 5, " - + "found {}); using 3 instead.", - id.getName(), - significantDigitsOption, - significantDigits); - significantDigits = 3; - } - Duration refreshInterval = profile.getDuration(intervalOption); - return new HdrReservoir(highestLatency, significantDigits, refreshInterval, id.getName()); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DropwizardMetricsFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DropwizardMetricsFactory.java deleted file mode 100644 index 5f28f8f5060..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DropwizardMetricsFactory.java +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import com.codahale.metrics.MetricRegistry; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeState; -import com.datastax.oss.driver.api.core.metrics.Metrics; -import com.datastax.oss.driver.api.core.metrics.NodeMetric; -import com.datastax.oss.driver.api.core.metrics.SessionMetric; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.NodeStateEvent; -import com.datastax.oss.driver.internal.core.util.concurrent.RunOrSchedule; -import edu.umd.cs.findbugs.annotations.Nullable; -import io.netty.util.concurrent.EventExecutor; -import java.util.Optional; -import java.util.Set; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -public class DropwizardMetricsFactory implements MetricsFactory { - - private static final Logger LOG = LoggerFactory.getLogger(DropwizardMetricsFactory.class); - - private final InternalDriverContext context; - private final Set enabledNodeMetrics; - private final MetricRegistry registry; - @Nullable private final Metrics metrics; - private final SessionMetricUpdater sessionUpdater; - - public DropwizardMetricsFactory(DriverContext context) { - this.context = (InternalDriverContext) context; - String logPrefix = context.getSessionName(); - DriverExecutionProfile config = context.getConfig().getDefaultProfile(); - Set enabledSessionMetrics = - MetricPaths.parseSessionMetricPaths( - config.getStringList(DefaultDriverOption.METRICS_SESSION_ENABLED), logPrefix); - this.enabledNodeMetrics = - MetricPaths.parseNodeMetricPaths( - config.getStringList(DefaultDriverOption.METRICS_NODE_ENABLED), logPrefix); - if (enabledSessionMetrics.isEmpty() && enabledNodeMetrics.isEmpty()) { - LOG.debug("[{}] All metrics are disabled, Session.getMetrics will be empty", logPrefix); - this.registry = null; - this.sessionUpdater = NoopSessionMetricUpdater.INSTANCE; - this.metrics = null; - } else { - // try to get the metric registry from the context - Object possibleMetricRegistry = this.context.getMetricRegistry(); - if (possibleMetricRegistry == null) { - // metrics are enabled, but a metric registry was not supplied to the context - // create a registry object - possibleMetricRegistry = new MetricRegistry(); - } - if (possibleMetricRegistry instanceof MetricRegistry) { - this.registry = (MetricRegistry) possibleMetricRegistry; - DropwizardSessionMetricUpdater dropwizardSessionUpdater = - new DropwizardSessionMetricUpdater(this.context, enabledSessionMetrics, registry); - this.sessionUpdater = dropwizardSessionUpdater; - this.metrics = new DefaultMetrics(registry, dropwizardSessionUpdater); - } else { - // Metrics are enabled, but the registry object is not an expected type - throw new IllegalArgumentException( - "Unexpected Metrics registry object. Expected registry object to be of type '" - + MetricRegistry.class.getName() - + "', but was '" - + possibleMetricRegistry.getClass().getName() - + "'"); - } - if (!enabledNodeMetrics.isEmpty()) { - EventExecutor adminEventExecutor = - this.context.getNettyOptions().adminEventExecutorGroup().next(); - this.context - .getEventBus() - .register( - NodeStateEvent.class, - RunOrSchedule.on(adminEventExecutor, this::processNodeStateEvent)); - } - } - } - - @Override - public Optional getMetrics() { - return Optional.ofNullable(metrics); - } - - @Override - public SessionMetricUpdater getSessionUpdater() { - return sessionUpdater; - } - - @Override - public NodeMetricUpdater newNodeUpdater(Node node) { - if (registry == null) { - return NoopNodeMetricUpdater.INSTANCE; - } else { - return new DropwizardNodeMetricUpdater(node, context, enabledNodeMetrics, registry); - } - } - - protected void processNodeStateEvent(NodeStateEvent event) { - if (event.newState == NodeState.DOWN - || event.newState == NodeState.FORCED_DOWN - || event.newState == null) { - // node is DOWN or REMOVED - ((DropwizardNodeMetricUpdater) event.node.getMetricUpdater()).startMetricsExpirationTimeout(); - } else if (event.newState == NodeState.UP || event.newState == NodeState.UNKNOWN) { - // node is UP or ADDED - ((DropwizardNodeMetricUpdater) event.node.getMetricUpdater()) - .cancelMetricsExpirationTimeout(); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DropwizardNodeMetricUpdater.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DropwizardNodeMetricUpdater.java deleted file mode 100644 index 2e5e6c8db3d..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DropwizardNodeMetricUpdater.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import com.codahale.metrics.MetricRegistry; -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.dse.driver.api.core.metrics.DseNodeMetric; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; -import com.datastax.oss.driver.api.core.metrics.NodeMetric; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import java.util.Set; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class DropwizardNodeMetricUpdater extends DropwizardMetricUpdater - implements NodeMetricUpdater { - - private final Node node; - - public DropwizardNodeMetricUpdater( - Node node, - InternalDriverContext context, - Set enabledMetrics, - MetricRegistry registry) { - super(context, enabledMetrics, registry); - this.node = node; - - DriverExecutionProfile profile = context.getConfig().getDefaultProfile(); - - initializeGauge(DefaultNodeMetric.OPEN_CONNECTIONS, profile, node::getOpenConnections); - initializeGauge(DefaultNodeMetric.AVAILABLE_STREAMS, profile, () -> availableStreamIds(node)); - initializeGauge(DefaultNodeMetric.IN_FLIGHT, profile, () -> inFlightRequests(node)); - initializeGauge(DefaultNodeMetric.ORPHANED_STREAMS, profile, () -> orphanedStreamIds(node)); - - initializeCounter(DefaultNodeMetric.UNSENT_REQUESTS, profile); - initializeCounter(DefaultNodeMetric.ABORTED_REQUESTS, profile); - initializeCounter(DefaultNodeMetric.WRITE_TIMEOUTS, profile); - initializeCounter(DefaultNodeMetric.READ_TIMEOUTS, profile); - initializeCounter(DefaultNodeMetric.UNAVAILABLES, profile); - initializeCounter(DefaultNodeMetric.OTHER_ERRORS, profile); - initializeCounter(DefaultNodeMetric.RETRIES, profile); - initializeCounter(DefaultNodeMetric.RETRIES_ON_ABORTED, profile); - initializeCounter(DefaultNodeMetric.RETRIES_ON_READ_TIMEOUT, profile); - initializeCounter(DefaultNodeMetric.RETRIES_ON_WRITE_TIMEOUT, profile); - initializeCounter(DefaultNodeMetric.RETRIES_ON_UNAVAILABLE, profile); - initializeCounter(DefaultNodeMetric.RETRIES_ON_OTHER_ERROR, profile); - initializeCounter(DefaultNodeMetric.IGNORES, profile); - initializeCounter(DefaultNodeMetric.IGNORES_ON_ABORTED, profile); - initializeCounter(DefaultNodeMetric.IGNORES_ON_READ_TIMEOUT, profile); - initializeCounter(DefaultNodeMetric.IGNORES_ON_WRITE_TIMEOUT, profile); - initializeCounter(DefaultNodeMetric.IGNORES_ON_UNAVAILABLE, profile); - initializeCounter(DefaultNodeMetric.IGNORES_ON_OTHER_ERROR, profile); - initializeCounter(DefaultNodeMetric.SPECULATIVE_EXECUTIONS, profile); - initializeCounter(DefaultNodeMetric.CONNECTION_INIT_ERRORS, profile); - initializeCounter(DefaultNodeMetric.AUTHENTICATION_ERRORS, profile); - - initializeHdrTimer( - DefaultNodeMetric.CQL_MESSAGES, - profile, - DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_HIGHEST, - DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_DIGITS, - DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_INTERVAL); - initializeHdrTimer( - DseNodeMetric.GRAPH_MESSAGES, - profile, - DseDriverOption.METRICS_NODE_GRAPH_MESSAGES_HIGHEST, - DseDriverOption.METRICS_NODE_GRAPH_MESSAGES_DIGITS, - DseDriverOption.METRICS_NODE_GRAPH_MESSAGES_INTERVAL); - } - - @Override - protected MetricId getMetricId(NodeMetric metric) { - MetricId id = context.getMetricIdGenerator().nodeMetricId(node, metric); - if (!id.getTags().isEmpty()) { - throw new IllegalStateException("Cannot use metric tags with Dropwizard"); - } - return id; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DropwizardSessionMetricUpdater.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DropwizardSessionMetricUpdater.java deleted file mode 100644 index 94e10ad6936..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DropwizardSessionMetricUpdater.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import com.codahale.metrics.MetricRegistry; -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.dse.driver.api.core.metrics.DseSessionMetric; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; -import com.datastax.oss.driver.api.core.metrics.SessionMetric; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import java.util.Set; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class DropwizardSessionMetricUpdater extends DropwizardMetricUpdater - implements SessionMetricUpdater { - - public DropwizardSessionMetricUpdater( - InternalDriverContext context, Set enabledMetrics, MetricRegistry registry) { - super(context, enabledMetrics, registry); - - DriverExecutionProfile profile = context.getConfig().getDefaultProfile(); - - initializeGauge(DefaultSessionMetric.CONNECTED_NODES, profile, this::connectedNodes); - initializeGauge(DefaultSessionMetric.THROTTLING_QUEUE_SIZE, profile, this::throttlingQueueSize); - initializeGauge( - DefaultSessionMetric.CQL_PREPARED_CACHE_SIZE, profile, this::preparedStatementCacheSize); - - initializeCounter(DefaultSessionMetric.CQL_CLIENT_TIMEOUTS, profile); - initializeCounter(DefaultSessionMetric.THROTTLING_ERRORS, profile); - initializeCounter(DseSessionMetric.GRAPH_CLIENT_TIMEOUTS, profile); - - initializeHdrTimer( - DefaultSessionMetric.CQL_REQUESTS, - profile, - DefaultDriverOption.METRICS_SESSION_CQL_REQUESTS_HIGHEST, - DefaultDriverOption.METRICS_SESSION_CQL_REQUESTS_DIGITS, - DefaultDriverOption.METRICS_SESSION_CQL_REQUESTS_INTERVAL); - initializeHdrTimer( - DefaultSessionMetric.THROTTLING_DELAY, - profile, - DefaultDriverOption.METRICS_SESSION_THROTTLING_HIGHEST, - DefaultDriverOption.METRICS_SESSION_THROTTLING_DIGITS, - DefaultDriverOption.METRICS_SESSION_THROTTLING_INTERVAL); - initializeHdrTimer( - DseSessionMetric.CONTINUOUS_CQL_REQUESTS, - profile, - DseDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_HIGHEST, - DseDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_DIGITS, - DseDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_INTERVAL); - initializeHdrTimer( - DseSessionMetric.GRAPH_REQUESTS, - profile, - DseDriverOption.METRICS_SESSION_GRAPH_REQUESTS_HIGHEST, - DseDriverOption.METRICS_SESSION_GRAPH_REQUESTS_DIGITS, - DseDriverOption.METRICS_SESSION_GRAPH_REQUESTS_INTERVAL); - } - - @Override - protected MetricId getMetricId(SessionMetric metric) { - MetricId id = context.getMetricIdGenerator().sessionMetricId(metric); - if (!id.getTags().isEmpty()) { - throw new IllegalStateException("Cannot use metric tags with Dropwizard"); - } - return id; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/HdrReservoir.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/HdrReservoir.java deleted file mode 100644 index c66fe1dbf8a..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/HdrReservoir.java +++ /dev/null @@ -1,270 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import com.codahale.metrics.Reservoir; -import com.codahale.metrics.Snapshot; -import java.io.OutputStream; -import java.time.Duration; -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; -import net.jcip.annotations.GuardedBy; -import net.jcip.annotations.ThreadSafe; -import org.HdrHistogram.Histogram; -import org.HdrHistogram.Recorder; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A reservoir implementation backed by the HdrHistogram library. - * - *

It uses a {@link Recorder} to capture snapshots at a configurable interval: calls to {@link - * #update(long)} are recorded in a "live" histogram, while {@link #getSnapshot()} is based on a - * "cached", read-only histogram. Each time the cached histogram becomes older than the interval, - * the two histograms are switched (therefore statistics won't be available during the first - * interval after initialization, since we don't have a cached histogram yet). - * - *

Note that this class does not implement {@link #size()}. - * - * @see HdrHistogram - */ -@ThreadSafe -public class HdrReservoir implements Reservoir { - - private static final Logger LOG = LoggerFactory.getLogger(HdrReservoir.class); - - private final String logPrefix; - private final Recorder recorder; - private final long refreshIntervalNanos; - - // The lock only orchestrates `getSnapshot()` calls; `update()` is fed directly to the recorder, - // which is lock-free. `getSnapshot()` calls are comparatively rare, so locking is not a - // bottleneck. - private final ReadWriteLock cacheLock = new ReentrantReadWriteLock(); - - @GuardedBy("cacheLock") - private Histogram cachedHistogram; - - @GuardedBy("cacheLock") - private long cachedHistogramTimestampNanos; - - @GuardedBy("cacheLock") - private Snapshot cachedSnapshot; - - public HdrReservoir( - Duration highestTrackableLatency, - int numberOfSignificantValueDigits, - Duration refreshInterval, - String logPrefix) { - this.logPrefix = logPrefix; - // The Reservoir interface is supposed to be agnostic to the unit. However, the Metrics library - // heavily leans towards nanoseconds (for example, Timer feeds nanoseconds to update(); JmxTimer - // assumes that the snapshot results are in nanoseconds). - // In our case, microseconds are precise enough for request metrics, and we don't want to waste - // space unnecessarily. So we simply use microseconds for our internal storage, and do the - // conversion when needed. - this.recorder = - new Recorder(highestTrackableLatency.toNanos() / 1000, numberOfSignificantValueDigits); - this.refreshIntervalNanos = refreshInterval.toNanos(); - this.cachedHistogramTimestampNanos = System.nanoTime(); - this.cachedSnapshot = EMPTY_SNAPSHOT; - } - - @Override - public void update(long value) { - try { - recorder.recordValue(value / 1000); - } catch (ArrayIndexOutOfBoundsException e) { - LOG.warn("[{}] Recorded value ({}) is out of bounds, discarding", logPrefix, value); - } - } - - /** - * Not implemented: this reservoir implementation is intended for use with a {@link - * com.codahale.metrics.Histogram}, which doesn't use this method. - * - *

(original description: {@inheritDoc}) - */ - @Override - public int size() { - throw new UnsupportedOperationException("HdrReservoir does not implement size()"); - } - - /** - * {@inheritDoc} - * - *

Note that the snapshots returned from this method do not implement {@link - * Snapshot#getValues()} nor {@link Snapshot#dump(OutputStream)}. In addition, due to the way that - * internal data structures are recycled, you should not hold onto a snapshot for more than the - * refresh interval; one way to ensure this is to never cache the result of this method. - */ - @Override - public Snapshot getSnapshot() { - long now = System.nanoTime(); - - cacheLock.readLock().lock(); - try { - if (now - cachedHistogramTimestampNanos < refreshIntervalNanos) { - return cachedSnapshot; - } - } finally { - cacheLock.readLock().unlock(); - } - - cacheLock.writeLock().lock(); - try { - // Might have raced with another writer => re-check the timestamp - if (now - cachedHistogramTimestampNanos >= refreshIntervalNanos) { - LOG.debug("Cached snapshot is too old, refreshing"); - cachedHistogram = recorder.getIntervalHistogram(cachedHistogram); - cachedSnapshot = new HdrSnapshot(cachedHistogram); - cachedHistogramTimestampNanos = now; - } - return cachedSnapshot; - } finally { - cacheLock.writeLock().unlock(); - } - } - - private class HdrSnapshot extends Snapshot { - - private final Histogram histogram; - private final double meanNanos; - private final double stdDevNanos; - - private HdrSnapshot(Histogram histogram) { - this.histogram = histogram; - - // Cache those values because they rely on HdrHistogram's internal iterators, which are not - // safe if the snapshot is accessed by concurrent reporters. - // In contrast, getMin(), getMax() and getValue() are safe. - this.meanNanos = histogram.getMean() * 1000; - this.stdDevNanos = histogram.getStdDeviation() * 1000; - } - - @Override - public double getValue(double quantile) { - return histogram.getValueAtPercentile(quantile * 100) * 1000; - } - - /** - * Not implemented: this reservoir implementation is intended for use with a {@link - * com.codahale.metrics.Histogram}, which doesn't use this method. - * - *

(original description: {@inheritDoc}) - */ - @Override - public long[] getValues() { - // This can be implemented, but we ran into issues when accessed by concurrent reporters - // because HdrHistogram uses an unsafe shared iterator. - // So throwing instead since this method should be seldom used anyway. - throw new UnsupportedOperationException( - "HdrReservoir's snapshots do not implement getValues()"); - } - - @Override - public int size() { - long longSize = histogram.getTotalCount(); - // The Metrics API requires an int. It's very unlikely that we get an overflow here, unless - // the refresh interval is ridiculously high (at 10k requests/s, it would have to be more than - // 59 hours). However handle gracefully just in case. - int size; - if (longSize > Integer.MAX_VALUE) { - LOG.warn("[{}] Too many recorded values, truncating", logPrefix); - size = Integer.MAX_VALUE; - } else { - size = (int) longSize; - } - return size; - } - - @Override - public long getMax() { - return histogram.getMaxValue() * 1000; - } - - @Override - public double getMean() { - return meanNanos; - } - - @Override - public long getMin() { - return histogram.getMinValue() * 1000; - } - - @Override - public double getStdDev() { - return stdDevNanos; - } - - /** - * Not implemented: this reservoir implementation is intended for use with a {@link - * com.codahale.metrics.Histogram}, which doesn't use this method. - * - *

(original description: {@inheritDoc}) - */ - @Override - public void dump(OutputStream output) { - throw new UnsupportedOperationException("HdrReservoir's snapshots do not implement dump()"); - } - } - - private static final Snapshot EMPTY_SNAPSHOT = - new Snapshot() { - @Override - public double getValue(double quantile) { - return 0; - } - - @Override - public long[] getValues() { - return new long[0]; - } - - @Override - public int size() { - return 0; - } - - @Override - public long getMax() { - return 0; - } - - @Override - public double getMean() { - return 0; - } - - @Override - public long getMin() { - return 0; - } - - @Override - public double getStdDev() { - return 0; - } - - @Override - public void dump(OutputStream output) { - // nothing to do - } - }; -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/MetricId.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/MetricId.java deleted file mode 100644 index 039fb96d34b..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/MetricId.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; - -/** - * The identifier of a metric. - * - *

The driver will use the reported name and tags to register the described metric against the - * current metric registry. - * - *

A metric identifier is unique, that is, the combination of its name and its tags is expected - * to be unique for a given metric registry. - */ -public interface MetricId { - - /** - * Returns this metric name. - * - *

Metric names can be any non-empty string, but it is recommended to create metric names that - * have path-like structures separated by a dot, e.g. {@code path.to.my.custom.metric}. Driver - * built-in implementations of this interface abide by this rule. - * - * @return The metric name; cannot be empty nor null. - */ - @NonNull - String getName(); - - /** @return The metric tags, or empty if no tag is defined; cannot be null. */ - @NonNull - Map getTags(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/MetricIdGenerator.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/MetricIdGenerator.java deleted file mode 100644 index 7a33a81b966..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/MetricIdGenerator.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.NodeMetric; -import com.datastax.oss.driver.api.core.metrics.SessionMetric; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * A {@link MetricIdGenerator} is used to generate the unique identifiers by which a metric should - * be registered against the current metrics registry. - * - *

The driver ships with two implementations of this interface; {@code DefaultMetricIdGenerator} - * and {@code TaggingMetricIdGenerator}. - * - *

{@code DefaultMetricIdGenerator} is the default implementation; it generates metric - * identifiers with unique names and no tags. - * - *

{@code TaggingMetricIdGenerator} generates metric identifiers whose uniqueness stems from the - * combination of their names and tags. - * - *

See the driver's {@code reference.conf} file. - */ -public interface MetricIdGenerator { - - /** Generates a {@link MetricId} for the given {@link SessionMetric}. */ - @NonNull - MetricId sessionMetricId(@NonNull SessionMetric metric); - - /** Generates a {@link MetricId} for the given {@link Node} and {@link NodeMetric}. */ - @NonNull - MetricId nodeMetricId(@NonNull Node node, @NonNull NodeMetric metric); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/MetricPaths.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/MetricPaths.java deleted file mode 100644 index 92b3fc569f7..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/MetricPaths.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import com.datastax.dse.driver.api.core.metrics.DseNodeMetric; -import com.datastax.dse.driver.api.core.metrics.DseSessionMetric; -import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; -import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; -import com.datastax.oss.driver.api.core.metrics.NodeMetric; -import com.datastax.oss.driver.api.core.metrics.SessionMetric; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.Set; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class MetricPaths { - - private static final Logger LOG = LoggerFactory.getLogger(MetricPaths.class); - - public static Set parseSessionMetricPaths(List paths, String logPrefix) { - Set result = new HashSet<>(); - for (String path : paths) { - try { - result.add(DefaultSessionMetric.fromPath(path)); - } catch (IllegalArgumentException e) { - try { - result.add(DseSessionMetric.fromPath(path)); - } catch (IllegalArgumentException e1) { - LOG.warn("[{}] Unknown session metric {}, skipping", logPrefix, path); - } - } - } - return Collections.unmodifiableSet(result); - } - - public static Set parseNodeMetricPaths(List paths, String logPrefix) { - Set result = new HashSet<>(); - for (String path : paths) { - try { - result.add(DefaultNodeMetric.fromPath(path)); - } catch (IllegalArgumentException e) { - try { - result.add(DseNodeMetric.fromPath(path)); - } catch (IllegalArgumentException e1) { - LOG.warn("[{}] Unknown node metric {}, skipping", logPrefix, path); - } - } - } - return Collections.unmodifiableSet(result); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/MetricUpdater.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/MetricUpdater.java deleted file mode 100644 index c07d1b136af..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/MetricUpdater.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.concurrent.TimeUnit; - -/** - * Note about profiles names: they are included to keep the possibility to break up metrics per - * profile in the future, but right now the default updater implementations ignore them. The driver - * internals provide a profile name when it makes sense and is practical; in other cases, it passes - * {@code null}. - */ -public interface MetricUpdater { - - void incrementCounter(MetricT metric, @Nullable String profileName, long amount); - - default void incrementCounter(MetricT metric, @Nullable String profileName) { - incrementCounter(metric, profileName, 1); - } - - // note: currently unused - void updateHistogram(MetricT metric, @Nullable String profileName, long value); - - void markMeter(MetricT metric, @Nullable String profileName, long amount); - - default void markMeter(MetricT metric, @Nullable String profileName) { - markMeter(metric, profileName, 1); - } - - void updateTimer(MetricT metric, @Nullable String profileName, long duration, TimeUnit unit); - - boolean isEnabled(MetricT metric, @Nullable String profileName); - - void clearMetrics(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/MetricsFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/MetricsFactory.java deleted file mode 100644 index 6440b79fb75..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/MetricsFactory.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.Metrics; -import java.util.Optional; - -public interface MetricsFactory { - - Optional getMetrics(); - - /** @return the unique instance for this session (this must return the same object every time). */ - SessionMetricUpdater getSessionUpdater(); - - NodeMetricUpdater newNodeUpdater(Node node); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/NodeMetricUpdater.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/NodeMetricUpdater.java deleted file mode 100644 index 93d003f0a03..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/NodeMetricUpdater.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import com.datastax.oss.driver.api.core.metrics.NodeMetric; - -public interface NodeMetricUpdater extends MetricUpdater {} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/NoopMetricsFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/NoopMetricsFactory.java deleted file mode 100644 index 59ebd3d314b..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/NoopMetricsFactory.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.Metrics; -import java.util.List; -import java.util.Optional; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -public class NoopMetricsFactory implements MetricsFactory { - - private static final Logger LOG = LoggerFactory.getLogger(NoopMetricsFactory.class); - - @SuppressWarnings("unused") - public NoopMetricsFactory(DriverContext context) { - String logPrefix = context.getSessionName(); - DriverExecutionProfile config = context.getConfig().getDefaultProfile(); - List enabledSessionMetrics = - config.getStringList(DefaultDriverOption.METRICS_SESSION_ENABLED); - List enabledNodeMetrics = - config.getStringList(DefaultDriverOption.METRICS_NODE_ENABLED); - if (!enabledSessionMetrics.isEmpty() || !enabledNodeMetrics.isEmpty()) { - LOG.warn( - "[{}] Some session-level or node-level metrics were enabled, " - + "but NoopMetricsFactory is being used: all metrics will be empty", - logPrefix); - } - } - - @Override - public Optional getMetrics() { - return Optional.empty(); - } - - @Override - public SessionMetricUpdater getSessionUpdater() { - return NoopSessionMetricUpdater.INSTANCE; - } - - @Override - public NodeMetricUpdater newNodeUpdater(Node node) { - return NoopNodeMetricUpdater.INSTANCE; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/NoopNodeMetricUpdater.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/NoopNodeMetricUpdater.java deleted file mode 100644 index 8d216990331..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/NoopNodeMetricUpdater.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import com.datastax.oss.driver.api.core.metrics.NodeMetric; -import java.util.concurrent.TimeUnit; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class NoopNodeMetricUpdater implements NodeMetricUpdater { - - public static NoopNodeMetricUpdater INSTANCE = new NoopNodeMetricUpdater(); - - private NoopNodeMetricUpdater() {} - - @Override - public void incrementCounter(NodeMetric metric, String profileName, long amount) { - // nothing to do - } - - @Override - public void updateHistogram(NodeMetric metric, String profileName, long value) { - // nothing to do - } - - @Override - public void markMeter(NodeMetric metric, String profileName, long amount) { - // nothing to do - } - - @Override - public void updateTimer(NodeMetric metric, String profileName, long duration, TimeUnit unit) { - // nothing to do - } - - @Override - public boolean isEnabled(NodeMetric metric, String profileName) { - // since methods don't do anything, return false - return false; - } - - @Override - public void clearMetrics() { - // nothing to do - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/NoopSessionMetricUpdater.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/NoopSessionMetricUpdater.java deleted file mode 100644 index 7099a8ddcac..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/NoopSessionMetricUpdater.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import com.datastax.oss.driver.api.core.metrics.SessionMetric; -import java.util.concurrent.TimeUnit; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class NoopSessionMetricUpdater implements SessionMetricUpdater { - - public static NoopSessionMetricUpdater INSTANCE = new NoopSessionMetricUpdater(); - - private NoopSessionMetricUpdater() {} - - @Override - public void incrementCounter(SessionMetric metric, String profileName, long amount) { - // nothing to do - } - - @Override - public void updateHistogram(SessionMetric metric, String profileName, long value) { - // nothing to do - } - - @Override - public void markMeter(SessionMetric metric, String profileName, long amount) { - // nothing to do - } - - @Override - public void updateTimer(SessionMetric metric, String profileName, long duration, TimeUnit unit) { - // nothing to do - } - - @Override - public boolean isEnabled(SessionMetric metric, String profileName) { - // since methods don't do anything, return false - return false; - } - - @Override - public void clearMetrics() {} -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/SessionMetricUpdater.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/SessionMetricUpdater.java deleted file mode 100644 index b7fc51dd134..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/SessionMetricUpdater.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import com.datastax.oss.driver.api.core.metrics.SessionMetric; - -public interface SessionMetricUpdater extends MetricUpdater {} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/TaggingMetricIdGenerator.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/TaggingMetricIdGenerator.java deleted file mode 100644 index 393651929c9..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/TaggingMetricIdGenerator.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.NodeMetric; -import com.datastax.oss.driver.api.core.metrics.SessionMetric; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Objects; - -/** - * A {@link MetricIdGenerator} that generates metric identifiers using a combination of names and - * tags. - * - *

Session metric identifiers contain a name starting with "session." and ending with the metric - * path, and a tag with the key "session" and the value of the current session name. - * - *

Node metric identifiers contain a name starting with "nodes." and ending with the metric path, - * and two tags: one with the key "session" and the value of the current session name, the other - * with the key "node" and the value of the current node endpoint. - */ -public class TaggingMetricIdGenerator implements MetricIdGenerator { - - private final String sessionName; - private final String sessionPrefix; - private final String nodePrefix; - - @SuppressWarnings("unused") - public TaggingMetricIdGenerator(DriverContext context) { - sessionName = context.getSessionName(); - String prefix = - Objects.requireNonNull( - context - .getConfig() - .getDefaultProfile() - .getString(DefaultDriverOption.METRICS_ID_GENERATOR_PREFIX, "")); - sessionPrefix = prefix.isEmpty() ? "session." : prefix + ".session."; - nodePrefix = prefix.isEmpty() ? "nodes." : prefix + ".nodes."; - } - - @NonNull - @Override - public MetricId sessionMetricId(@NonNull SessionMetric metric) { - return new DefaultMetricId( - sessionPrefix + metric.getPath(), ImmutableMap.of("session", sessionName)); - } - - @NonNull - @Override - public MetricId nodeMetricId(@NonNull Node node, @NonNull NodeMetric metric) { - return new DefaultMetricId( - nodePrefix + metric.getPath(), - ImmutableMap.of("session", sessionName, "node", node.getEndPoint().toString())); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/os/CpuInfo.java b/core/src/main/java/com/datastax/oss/driver/internal/core/os/CpuInfo.java deleted file mode 100644 index dffc23c4c8f..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/os/CpuInfo.java +++ /dev/null @@ -1,144 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.os; - -import java.util.Locale; - -public class CpuInfo { - - /* Copied from equivalent op in jnr.ffi.Platform. We have to have this here as it has to be defined - * before its (multiple) uses in determineCpu() */ - private static final Locale LOCALE = Locale.ENGLISH; - - /* The remainder of this class is largely based on jnr.ffi.Platform in jnr-ffi version 2.1.10. - * We copy it manually here in order to avoid introducing an extra dependency merely for the sake of - * evaluating some system properties. - * - * jnr-ffi copyright notice follows: - * - * Copyright (C) 2008-2010 Wayne Meissner - * - * This file is part of the JNR project. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - /** The supported CPU architectures. */ - public enum Cpu { - /* - * Note The names of the enum values are used in other parts of the - * code to determine where to find the native stub library. Do NOT rename. - */ - - /** 32 bit legacy Intel */ - I386, - - /** 64 bit AMD (aka EM64T/X64) */ - X86_64, - - /** 32 bit Power PC */ - PPC, - - /** 64 bit Power PC */ - PPC64, - - /** 64 bit Power PC little endian */ - PPC64LE, - - /** 32 bit Sun sparc */ - SPARC, - - /** 64 bit Sun sparc */ - SPARCV9, - - /** IBM zSeries S/390 */ - S390X, - - /** 32 bit MIPS (used by nestedvm) */ - MIPS32, - - /** 32 bit ARM */ - ARM, - - /** 64 bit ARM */ - AARCH64, - - /** - * Unknown CPU architecture. A best effort will be made to infer architecture specific values - * such as address and long size. - */ - UNKNOWN; - - @Override - public String toString() { - return name().toLowerCase(LOCALE); - } - } - - public static Cpu determineCpu() { - String archString = System.getProperty("os.arch"); - if (equalsIgnoreCase("x86", archString) - || equalsIgnoreCase("i386", archString) - || equalsIgnoreCase("i86pc", archString) - || equalsIgnoreCase("i686", archString)) { - return Cpu.I386; - } else if (equalsIgnoreCase("x86_64", archString) || equalsIgnoreCase("amd64", archString)) { - return Cpu.X86_64; - } else if (equalsIgnoreCase("ppc", archString) || equalsIgnoreCase("powerpc", archString)) { - return Cpu.PPC; - } else if (equalsIgnoreCase("ppc64", archString) || equalsIgnoreCase("powerpc64", archString)) { - if ("little".equals(System.getProperty("sun.cpu.endian"))) { - return Cpu.PPC64LE; - } - return Cpu.PPC64; - } else if (equalsIgnoreCase("ppc64le", archString) - || equalsIgnoreCase("powerpc64le", archString)) { - return Cpu.PPC64LE; - } else if (equalsIgnoreCase("s390", archString) || equalsIgnoreCase("s390x", archString)) { - return Cpu.S390X; - } else if (equalsIgnoreCase("aarch64", archString)) { - return Cpu.AARCH64; - } else if (equalsIgnoreCase("arm", archString) || equalsIgnoreCase("armv7l", archString)) { - return Cpu.ARM; - } - - // Try to find by lookup up in the CPU list - for (Cpu cpu : Cpu.values()) { - if (equalsIgnoreCase(cpu.name(), archString)) { - return cpu; - } - } - - return Cpu.UNKNOWN; - } - - private static boolean equalsIgnoreCase(String s1, String s2) { - return s1.equalsIgnoreCase(s2) - || s1.toUpperCase(LOCALE).equals(s2.toUpperCase(LOCALE)) - || s1.toLowerCase(LOCALE).equals(s2.toLowerCase(LOCALE)); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/os/EmptyLibc.java b/core/src/main/java/com/datastax/oss/driver/internal/core/os/EmptyLibc.java deleted file mode 100644 index 5b57a01564c..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/os/EmptyLibc.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.os; - -import java.util.Optional; - -/** A no-op NativeImpl implementation; useful if we can't load one of the others */ -public class EmptyLibc implements Libc { - - @Override - public boolean available() { - return false; - } - - @Override - public Optional gettimeofday() { - return Optional.empty(); - } - - @Override - public Optional getpid() { - return Optional.empty(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/os/GraalGetpid.java b/core/src/main/java/com/datastax/oss/driver/internal/core/os/GraalGetpid.java deleted file mode 100644 index fc9dd8d50c7..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/os/GraalGetpid.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.os; - -import java.util.Collections; -import java.util.List; -import org.graalvm.nativeimage.c.CContext; -import org.graalvm.nativeimage.c.function.CFunction; - -@CContext(GraalGetpid.Directives.class) -public class GraalGetpid { - - static class Directives implements CContext.Directives { - - @Override - public List getHeaderFiles() { - - return Collections.singletonList(""); - } - } - - @CFunction - public static native int getpid(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/os/GraalLibc.java b/core/src/main/java/com/datastax/oss/driver/internal/core/os/GraalLibc.java deleted file mode 100644 index a6535c2c653..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/os/GraalLibc.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.os; - -import java.util.Locale; -import java.util.Optional; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class GraalLibc implements Libc { - - private static final Logger LOG = LoggerFactory.getLogger(GraalLibc.class); - - private static final Locale LOCALE = Locale.ENGLISH; - - private static final String MAC_PLATFORM_STR = "mac".toLowerCase(LOCALE); - private static final String DARWIN_PLATFORM_STR = "darwin".toLowerCase(LOCALE); - private static final String LINUX_PLATFORM_STR = "linux".toLowerCase(LOCALE); - - private final boolean available = checkAvailability(); - - /* This method is adapted from of jnr.ffi.Platform.determineOS() in jnr-ffi version 2.1.10. **/ - private boolean checkPlatform() { - - String osName = System.getProperty("os.name").split(" ", -1)[0]; - String compareStr = osName.toLowerCase(Locale.ENGLISH); - return compareStr.startsWith(MAC_PLATFORM_STR) - || compareStr.startsWith(DARWIN_PLATFORM_STR) - || compareStr.startsWith(LINUX_PLATFORM_STR); - } - - private boolean checkAvailability() { - - if (!checkPlatform()) { - return false; - } - - try { - getpidRaw(); - } catch (Throwable t) { - - LOG.debug("Error calling getpid()", t); - return false; - } - - try { - gettimeofdayRaw(); - } catch (Throwable t) { - - LOG.debug("Error calling gettimeofday()", t); - return false; - } - - return true; - } - - @Override - public boolean available() { - return this.available; - } - - /* Substrate includes a substitution for Linux + Darwin which redefines System.nanoTime() to use - * gettimeofday() (unless platform-specific higher-res clocks are available, which is even better). */ - @Override - public Optional gettimeofday() { - return this.available ? Optional.of(gettimeofdayRaw()) : Optional.empty(); - } - - private long gettimeofdayRaw() { - return Math.round(System.nanoTime() / 1_000d); - } - - @Override - public Optional getpid() { - return this.available ? Optional.of(getpidRaw()) : Optional.empty(); - } - - private int getpidRaw() { - return GraalGetpid.getpid(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/os/JnrLibc.java b/core/src/main/java/com/datastax/oss/driver/internal/core/os/JnrLibc.java deleted file mode 100644 index 25236dee837..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/os/JnrLibc.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.os; - -import java.util.Optional; -import java.util.function.Consumer; -import jnr.posix.POSIX; -import jnr.posix.POSIXFactory; -import jnr.posix.Timeval; -import jnr.posix.util.DefaultPOSIXHandler; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class JnrLibc implements Libc { - - private static final Logger LOG = LoggerFactory.getLogger(JnrLibc.class); - - private final Optional posix; - - public JnrLibc() { - - this.posix = loadPosix(); - } - - @Override - public Optional gettimeofday() { - - return this.posix.flatMap(this::gettimeofdayImpl); - } - - @Override - public Optional getpid() { - - return this.posix.map(POSIX::getpid); - } - - @Override - public boolean available() { - return this.posix.isPresent(); - } - - private Optional loadPosix() { - - try { - return Optional.of(POSIXFactory.getPOSIX(new DefaultPOSIXHandler(), true)) - .flatMap(p -> catchAll(p, posix -> posix.getpid(), "Error calling getpid()")) - .flatMap(p -> catchAll(p, this::gettimeofdayImpl, "Error calling gettimeofday()")); - } catch (Throwable t) { - LOG.debug("Error loading POSIX", t); - return Optional.empty(); - } - } - - private Optional catchAll(POSIX posix, Consumer fn, String debugStr) { - try { - fn.accept(posix); - return Optional.of(posix); - } catch (Throwable t) { - - LOG.debug(debugStr, t); - return Optional.empty(); - } - } - - private Optional gettimeofdayImpl(POSIX posix) { - - Timeval tv = posix.allocateTimeval(); - int rv = posix.gettimeofday(tv); - if (rv != 0) { - LOG.debug("Expected 0 return value from gettimeofday(), observed " + rv); - return Optional.empty(); - } - return Optional.of(tv.sec() * 1_000_000 + tv.usec()); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/os/JnrLibcSubstitution.java b/core/src/main/java/com/datastax/oss/driver/internal/core/os/JnrLibcSubstitution.java deleted file mode 100644 index 532001498f4..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/os/JnrLibcSubstitution.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.os; - -import com.oracle.svm.core.annotate.Substitute; -import com.oracle.svm.core.annotate.TargetClass; -import java.util.Optional; - -/** - * Add an explicit Graal substitution for {@link JnrLibc}. If we don't implement something like this - * the analysis done at Graal native image build time will discover the jnr-posix references in - * JnrLibc even though they won't be used at runtime. By default jnr-ffi (used by jnr-posix to do - * it's work) will use {@link ClassLoader#defineClass(String, byte[], int, int)} which isn't - * supported by Graal. This behaviour can be changed with a system property but the cleanest - * solution is simply to remove the references to jnr-posix code via a Graal substitution. - */ -@TargetClass(JnrLibc.class) -@Substitute -final class JnrLibcSubstitution implements Libc { - - @Substitute - public JnrLibcSubstitution() {} - - @Substitute - @Override - public boolean available() { - return false; - } - - @Substitute - @Override - public Optional gettimeofday() { - return Optional.empty(); - } - - @Substitute - @Override - public Optional getpid() { - return Optional.empty(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/os/Libc.java b/core/src/main/java/com/datastax/oss/driver/internal/core/os/Libc.java deleted file mode 100644 index f3bda6a8c88..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/os/Libc.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.os; - -import java.util.Optional; - -public interface Libc { - - /* Maintained to allow Native.isXAvailable() functionality without trying to make a native call if - * the underlying support _is_ available. */ - boolean available(); - - Optional gettimeofday(); - - Optional getpid(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/os/Native.java b/core/src/main/java/com/datastax/oss/driver/internal/core/os/Native.java deleted file mode 100644 index e292914bb4b..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/os/Native.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.os; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** A gateway to perform system calls. */ -public class Native { - - private static final Logger LOG = LoggerFactory.getLogger(Native.class); - - private static class LibcLoader { - - /* These values come from Graal's imageinfo API which aims to offer the ability to detect - * when we're in the Graal build/run time via system props. The maintainers of Graal have - * agreed that this API will not change over time. We reference these props as literals - * to avoid introducing a dependency on Graal code for non-Graal users here. */ - private static final String GRAAL_STATUS_PROP = "org.graalvm.nativeimage.imagecode"; - private static final String GRAAL_BUILDTIME_STATUS = "buildtime"; - private static final String GRAAL_RUNTIME_STATUS = "runtime"; - - public Libc load() { - try { - if (isGraal()) { - LOG.info("Using Graal-specific native functions"); - return new GraalLibc(); - } - return new JnrLibc(); - } catch (Throwable t) { - LOG.info( - "Unable to load JNR native implementation. This could be normal if JNR is excluded from the classpath", - t); - return new EmptyLibc(); - } - } - - private boolean isGraal() { - - String val = System.getProperty(GRAAL_STATUS_PROP); - return val != null - && (val.equals(GRAAL_RUNTIME_STATUS) || val.equalsIgnoreCase(GRAAL_BUILDTIME_STATUS)); - } - } - - private static final Libc LIBC = new LibcLoader().load(); - private static final CpuInfo.Cpu CPU = CpuInfo.determineCpu(); - - private static final String NATIVE_CALL_ERR_MSG = "Native call failed or was not available"; - - /** Whether {@link Native#currentTimeMicros()} is available on this system. */ - public static boolean isCurrentTimeMicrosAvailable() { - return LIBC.available(); - } - - /** - * The current time in microseconds, as returned by libc.gettimeofday(); can only be used if - * {@link #isCurrentTimeMicrosAvailable()} is true. - */ - public static long currentTimeMicros() { - return LIBC.gettimeofday().orElseThrow(() -> new IllegalStateException(NATIVE_CALL_ERR_MSG)); - } - - public static boolean isGetProcessIdAvailable() { - return LIBC.available(); - } - - public static int getProcessId() { - return LIBC.getpid().orElseThrow(() -> new IllegalStateException(NATIVE_CALL_ERR_MSG)); - } - - /** - * Returns the current processor architecture the JVM is running on. This value should match up to - * what's returned by jnr-ffi's Platform.getCPU() method. - * - * @return the current processor architecture. - */ - public static String getCpu() { - return CPU.toString(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/pool/ChannelPool.java b/core/src/main/java/com/datastax/oss/driver/internal/core/pool/ChannelPool.java deleted file mode 100644 index 6b7d06045bd..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/pool/ChannelPool.java +++ /dev/null @@ -1,585 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.pool; - -import com.datastax.oss.driver.api.core.AsyncAutoCloseable; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.InvalidKeyspaceException; -import com.datastax.oss.driver.api.core.UnsupportedProtocolVersionException; -import com.datastax.oss.driver.api.core.auth.AuthenticationException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.connection.ReconnectionPolicy; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; -import com.datastax.oss.driver.internal.core.channel.ChannelEvent; -import com.datastax.oss.driver.internal.core.channel.ChannelFactory; -import com.datastax.oss.driver.internal.core.channel.ClusterNameMismatchException; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.channel.DriverChannelOptions; -import com.datastax.oss.driver.internal.core.config.ConfigChangeEvent; -import com.datastax.oss.driver.internal.core.context.EventBus; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.DefaultNode; -import com.datastax.oss.driver.internal.core.metadata.TopologyEvent; -import com.datastax.oss.driver.internal.core.util.Loggers; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.driver.internal.core.util.concurrent.Reconnection; -import com.datastax.oss.driver.internal.core.util.concurrent.RunOrSchedule; -import com.datastax.oss.driver.internal.core.util.concurrent.UncaughtExceptions; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.collect.Sets; -import edu.umd.cs.findbugs.annotations.NonNull; -import io.netty.util.concurrent.EventExecutor; -import io.netty.util.concurrent.Future; -import io.netty.util.concurrent.GenericFutureListener; -import java.util.ArrayList; -import java.util.HashSet; -import java.util.List; -import java.util.Set; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.atomic.AtomicInteger; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * The channel pool maintains a set of {@link DriverChannel} instances connected to a given node. - * - *

It allows clients to obtain a channel to execute their requests. - * - *

If one or more channels go down, a reconnection process starts in order to replace them; it - * runs until the channel count is back to its intended target. - */ -@ThreadSafe -public class ChannelPool implements AsyncAutoCloseable { - private static final Logger LOG = LoggerFactory.getLogger(ChannelPool.class); - - /** - * Initializes a new pool. - * - *

The returned completion stage will complete when all the underlying channels have finished - * their initialization. If one or more channels fail, a reconnection will be started immediately. - * Note that this method succeeds even if all channels fail, so you might get a pool that has no - * channels (i.e. {@link #next()} return {@code null}) and is reconnecting. - */ - public static CompletionStage init( - Node node, - CqlIdentifier keyspaceName, - NodeDistance distance, - InternalDriverContext context, - String sessionLogPrefix) { - ChannelPool pool = new ChannelPool(node, keyspaceName, distance, context, sessionLogPrefix); - return pool.connect(); - } - - // This is read concurrently, but only mutated on adminExecutor (by methods in SingleThreaded) - @VisibleForTesting final ChannelSet channels = new ChannelSet(); - - private final Node node; - private final CqlIdentifier initialKeyspaceName; - private final EventExecutor adminExecutor; - private final String sessionLogPrefix; - private final String logPrefix; - private final SingleThreaded singleThreaded; - private volatile boolean invalidKeyspace; - - private ChannelPool( - Node node, - CqlIdentifier keyspaceName, - NodeDistance distance, - InternalDriverContext context, - String sessionLogPrefix) { - this.node = node; - this.initialKeyspaceName = keyspaceName; - this.adminExecutor = context.getNettyOptions().adminEventExecutorGroup().next(); - this.sessionLogPrefix = sessionLogPrefix; - this.logPrefix = sessionLogPrefix + "|" + node.getEndPoint(); - this.singleThreaded = new SingleThreaded(keyspaceName, distance, context); - } - - private CompletionStage connect() { - RunOrSchedule.on(adminExecutor, singleThreaded::connect); - return singleThreaded.connectFuture; - } - - public Node getNode() { - return node; - } - - /** - * The keyspace with which the pool was initialized (therefore a constant, it's not updated if the - * keyspace is switched later). - */ - public CqlIdentifier getInitialKeyspaceName() { - return initialKeyspaceName; - } - - /** - * Whether all channels failed due to an invalid keyspace. This is only used at initialization. We - * don't make the decision to close the pool here yet, that's done at the session level. - */ - public boolean isInvalidKeyspace() { - return invalidKeyspace; - } - - /** - * @return the channel that has the most available stream ids. This is called on the direct - * request path, and we want to avoid complex check-then-act semantics; therefore this might - * race and return a channel that is already closed, or {@code null}. In those cases, it is up - * to the caller to fail fast and move to the next node. - */ - public DriverChannel next() { - return channels.next(); - } - - /** @return the number of active channels in the pool. */ - public int size() { - return channels.size(); - } - - /** @return the number of available stream ids on all channels in the pool. */ - public int getAvailableIds() { - return channels.getAvailableIds(); - } - - /** - * @return the number of requests currently executing on all channels in this pool (including - * {@link #getOrphanedIds() orphaned ids}). - */ - public int getInFlight() { - return channels.getInFlight(); - } - - /** - * @return the number of stream ids for requests in all channels in this pool that have either - * timed out or been cancelled, but for which we can't release the stream id because a request - * might still come from the server. - */ - public int getOrphanedIds() { - return channels.getOrphanedIds(); - } - - /** - * Sets a new distance for the node this pool belongs to. This method returns immediately, the new - * distance will be set asynchronously. - * - * @param newDistance the new distance to set. - */ - public void resize(NodeDistance newDistance) { - RunOrSchedule.on(adminExecutor, () -> singleThreaded.resize(newDistance)); - } - - /** - * Changes the keyspace name on all the channels in this pool. - * - *

Note that this is not called directly by the user, but happens only on a SetKeyspace - * response after a successful "USE ..." query, so the name should be valid. If the keyspace - * switch fails on any channel, that channel is closed and a reconnection is started. - */ - public CompletionStage setKeyspace(CqlIdentifier newKeyspaceName) { - return RunOrSchedule.on(adminExecutor, () -> singleThreaded.setKeyspace(newKeyspaceName)); - } - - public void reconnectNow() { - RunOrSchedule.on(adminExecutor, singleThreaded::reconnectNow); - } - - @NonNull - @Override - public CompletionStage closeFuture() { - return singleThreaded.closeFuture; - } - - @NonNull - @Override - public CompletionStage closeAsync() { - RunOrSchedule.on(adminExecutor, singleThreaded::close); - return singleThreaded.closeFuture; - } - - @NonNull - @Override - public CompletionStage forceCloseAsync() { - RunOrSchedule.on(adminExecutor, singleThreaded::forceClose); - return singleThreaded.closeFuture; - } - - /** Holds all administration tasks, that are confined to the admin executor. */ - private class SingleThreaded { - - private final DriverConfig config; - private final ChannelFactory channelFactory; - private final EventBus eventBus; - // The channels that are currently connecting - private final List> pendingChannels = new ArrayList<>(); - private final Set closingChannels = new HashSet<>(); - private final Reconnection reconnection; - private final Object configListenerKey; - - private NodeDistance distance; - private int wantedCount; - private final CompletableFuture connectFuture = new CompletableFuture<>(); - private boolean isConnecting; - private final CompletableFuture closeFuture = new CompletableFuture<>(); - private boolean isClosing; - private CompletableFuture setKeyspaceFuture; - - private CqlIdentifier keyspaceName; - - private SingleThreaded( - CqlIdentifier keyspaceName, NodeDistance distance, InternalDriverContext context) { - this.keyspaceName = keyspaceName; - this.config = context.getConfig(); - this.distance = distance; - this.wantedCount = getConfiguredSize(distance); - this.channelFactory = context.getChannelFactory(); - this.eventBus = context.getEventBus(); - ReconnectionPolicy reconnectionPolicy = context.getReconnectionPolicy(); - this.reconnection = - new Reconnection( - logPrefix, - adminExecutor, - () -> reconnectionPolicy.newNodeSchedule(node), - this::addMissingChannels, - () -> eventBus.fire(ChannelEvent.reconnectionStarted(node)), - () -> eventBus.fire(ChannelEvent.reconnectionStopped(node))); - this.configListenerKey = - eventBus.register( - ConfigChangeEvent.class, RunOrSchedule.on(adminExecutor, this::onConfigChanged)); - } - - private void connect() { - assert adminExecutor.inEventLoop(); - if (isConnecting) { - return; - } - isConnecting = true; - CompletionStage initialChannels = - addMissingChannels() - .thenApply( - allConnected -> { - if (!allConnected) { - reconnection.start(); - } - return ChannelPool.this; - }); - CompletableFutures.completeFrom(initialChannels, connectFuture); - } - - private CompletionStage addMissingChannels() { - assert adminExecutor.inEventLoop(); - // We always wait for all attempts to succeed or fail before scheduling a reconnection - assert pendingChannels.isEmpty(); - - int missing = wantedCount - channels.size(); - LOG.debug("[{}] Trying to create {} missing channels", logPrefix, missing); - DriverChannelOptions options = - DriverChannelOptions.builder() - .withKeyspace(keyspaceName) - .withOwnerLogPrefix(sessionLogPrefix) - .build(); - for (int i = 0; i < missing; i++) { - CompletionStage channelFuture = channelFactory.connect(node, options); - pendingChannels.add(channelFuture); - } - return CompletableFutures.allDone(pendingChannels) - .thenApplyAsync(this::onAllConnected, adminExecutor); - } - - private boolean onAllConnected(@SuppressWarnings("unused") Void v) { - assert adminExecutor.inEventLoop(); - Throwable fatalError = null; - int invalidKeyspaceErrors = 0; - for (CompletionStage pendingChannel : pendingChannels) { - CompletableFuture future = pendingChannel.toCompletableFuture(); - assert future.isDone(); - if (future.isCompletedExceptionally()) { - Throwable error = CompletableFutures.getFailed(future); - ((DefaultNode) node) - .getMetricUpdater() - .incrementCounter( - error instanceof AuthenticationException - ? DefaultNodeMetric.AUTHENTICATION_ERRORS - : DefaultNodeMetric.CONNECTION_INIT_ERRORS, - null); - if (error instanceof ClusterNameMismatchException - || error instanceof UnsupportedProtocolVersionException) { - // This will likely be thrown by all channels, but finish the loop cleanly - fatalError = error; - } else if (error instanceof AuthenticationException) { - // Always warn because this is most likely something the operator needs to fix. - // Keep going to reconnect if it can be fixed without bouncing the client. - Loggers.warnWithException(LOG, "[{}] Authentication error", logPrefix, error); - } else if (error instanceof InvalidKeyspaceException) { - invalidKeyspaceErrors += 1; - } else { - if (config - .getDefaultProfile() - .getBoolean(DefaultDriverOption.CONNECTION_WARN_INIT_ERROR)) { - Loggers.warnWithException( - LOG, "[{}] Error while opening new channel", logPrefix, error); - } else { - LOG.debug("[{}] Error while opening new channel", logPrefix, error); - } - } - } else { - DriverChannel channel = CompletableFutures.getCompleted(future); - if (isClosing) { - LOG.debug( - "[{}] New channel added ({}) but the pool was closed, closing it", - logPrefix, - channel); - channel.forceClose(); - } else { - LOG.debug("[{}] New channel added {}", logPrefix, channel); - channels.add(channel); - eventBus.fire(ChannelEvent.channelOpened(node)); - channel - .closeStartedFuture() - .addListener( - f -> - adminExecutor - .submit(() -> onChannelCloseStarted(channel)) - .addListener(UncaughtExceptions::log)); - channel - .closeFuture() - .addListener( - f -> - adminExecutor - .submit(() -> onChannelClosed(channel)) - .addListener(UncaughtExceptions::log)); - } - } - } - // If all channels failed, assume the keyspace is wrong - invalidKeyspace = - invalidKeyspaceErrors > 0 && invalidKeyspaceErrors == pendingChannels.size(); - - pendingChannels.clear(); - - if (fatalError != null) { - Loggers.warnWithException( - LOG, - "[{}] Fatal error while initializing pool, forcing the node down", - logPrefix, - fatalError); - // Note: getBroadcastRpcAddress() can only be empty for the control node (and not for modern - // C* versions anyway). If we already have a control connection open to that node, it's - // impossible to get a protocol version or cluster name mismatch error while creating the - // pool, so it's safe to ignore this case. - node.getBroadcastRpcAddress() - .ifPresent(address -> eventBus.fire(TopologyEvent.forceDown(address))); - // Don't bother continuing, the pool will get shut down soon anyway - return true; - } - - shrinkIfTooManyChannels(); // Can happen if the pool was shrinked during the reconnection - - int currentCount = channels.size(); - LOG.debug( - "[{}] Reconnection attempt complete, {}/{} channels", - logPrefix, - currentCount, - wantedCount); - // Stop reconnecting if we have the wanted count - return currentCount >= wantedCount; - } - - private void onChannelCloseStarted(DriverChannel channel) { - assert adminExecutor.inEventLoop(); - if (!isClosing) { - LOG.debug("[{}] Channel {} started graceful shutdown", logPrefix, channel); - channels.remove(channel); - closingChannels.add(channel); - eventBus.fire(ChannelEvent.channelClosed(node)); - reconnection.start(); - } - } - - private void onChannelClosed(DriverChannel channel) { - assert adminExecutor.inEventLoop(); - if (!isClosing) { - // Either it was closed abruptly and was still in the live set, or it was an orderly - // shutdown and it had moved to the closing set. - if (channels.remove(channel)) { - LOG.debug("[{}] Lost channel {}", logPrefix, channel); - eventBus.fire(ChannelEvent.channelClosed(node)); - reconnection.start(); - } else { - LOG.debug("[{}] Channel {} completed graceful shutdown", logPrefix, channel); - closingChannels.remove(channel); - } - } - } - - private void resize(NodeDistance newDistance) { - assert adminExecutor.inEventLoop(); - distance = newDistance; - int newChannelCount = getConfiguredSize(newDistance); - if (newChannelCount > wantedCount) { - LOG.debug("[{}] Growing ({} => {} channels)", logPrefix, wantedCount, newChannelCount); - wantedCount = newChannelCount; - reconnection.start(); - } else if (newChannelCount < wantedCount) { - LOG.debug("[{}] Shrinking ({} => {} channels)", logPrefix, wantedCount, newChannelCount); - wantedCount = newChannelCount; - if (!reconnection.isRunning()) { - shrinkIfTooManyChannels(); - } // else it will be handled at the end of the reconnection attempt - } - } - - private void shrinkIfTooManyChannels() { - assert adminExecutor.inEventLoop(); - int extraCount = channels.size() - wantedCount; - if (extraCount > 0) { - LOG.debug("[{}] Closing {} extra channels", logPrefix, extraCount); - Set toRemove = Sets.newHashSetWithExpectedSize(extraCount); - for (DriverChannel channel : channels) { - toRemove.add(channel); - if (--extraCount == 0) { - break; - } - } - for (DriverChannel channel : toRemove) { - channels.remove(channel); - channel.close(); - eventBus.fire(ChannelEvent.channelClosed(node)); - } - } - } - - private void onConfigChanged(@SuppressWarnings("unused") ConfigChangeEvent event) { - assert adminExecutor.inEventLoop(); - // resize re-reads the pool size from the configuration and does nothing if it hasn't changed, - // which is exactly what we want. - resize(distance); - } - - private CompletionStage setKeyspace(CqlIdentifier newKeyspaceName) { - assert adminExecutor.inEventLoop(); - if (setKeyspaceFuture != null && !setKeyspaceFuture.isDone()) { - return CompletableFutures.failedFuture( - new IllegalStateException( - "Can't call setKeyspace while a keyspace switch is already in progress")); - } - keyspaceName = newKeyspaceName; - setKeyspaceFuture = new CompletableFuture<>(); - - // Switch the keyspace on all live channels. - // We can read the size before iterating because mutations are confined to this thread: - int toSwitch = channels.size(); - if (toSwitch == 0) { - setKeyspaceFuture.complete(null); - } else { - AtomicInteger remaining = new AtomicInteger(toSwitch); - for (DriverChannel channel : channels) { - channel - .setKeyspace(newKeyspaceName) - .addListener( - f -> { - // Don't handle errors: if a channel fails to switch the keyspace, it closes - if (remaining.decrementAndGet() == 0) { - setKeyspaceFuture.complete(null); - } - }); - } - } - - // pending channels were scheduled with the old keyspace name, ensure they eventually switch - for (CompletionStage channelFuture : pendingChannels) { - // errors are swallowed here, this is fine because a setkeyspace error will close the - // channel, so it will eventually get reported - channelFuture.thenAccept(channel -> channel.setKeyspace(newKeyspaceName)); - } - - return setKeyspaceFuture; - } - - private void reconnectNow() { - assert adminExecutor.inEventLoop(); - // Don't force because if the reconnection is stopped, it means either we have enough channels - // or the pool is shutting down. - reconnection.reconnectNow(false); - } - - private void close() { - assert adminExecutor.inEventLoop(); - if (isClosing) { - return; - } - isClosing = true; - - // If an attempt was in progress right now, it might open new channels but they will be - // handled in onAllConnected - reconnection.stop(); - - eventBus.unregister(configListenerKey, ConfigChangeEvent.class); - - // Close all channels, the pool future completes when all the channels futures have completed - int toClose = closingChannels.size() + channels.size(); - if (toClose == 0) { - closeFuture.complete(null); - } else { - AtomicInteger remaining = new AtomicInteger(toClose); - GenericFutureListener> channelCloseListener = - f -> { - if (!f.isSuccess()) { - Loggers.warnWithException(LOG, "[{}] Error closing channel", logPrefix, f.cause()); - } - if (remaining.decrementAndGet() == 0) { - closeFuture.complete(null); - } - }; - for (DriverChannel channel : channels) { - eventBus.fire(ChannelEvent.channelClosed(node)); - channel.close().addListener(channelCloseListener); - } - for (DriverChannel channel : closingChannels) { - // don't fire the close event, onChannelCloseStarted() already did it - channel.closeFuture().addListener(channelCloseListener); - } - } - } - - private void forceClose() { - assert adminExecutor.inEventLoop(); - if (!isClosing) { - close(); - } - for (DriverChannel channel : channels) { - channel.forceClose(); - } - for (DriverChannel channel : closingChannels) { - channel.forceClose(); - } - } - - private int getConfiguredSize(NodeDistance distance) { - return config - .getDefaultProfile() - .getInt( - (distance == NodeDistance.LOCAL) - ? DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE - : DefaultDriverOption.CONNECTION_POOL_REMOTE_SIZE); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolFactory.java deleted file mode 100644 index b854f4c326c..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolFactory.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.pool; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import java.util.concurrent.CompletionStage; -import net.jcip.annotations.ThreadSafe; - -/** Just a level of indirection to make testing easier. */ -@ThreadSafe -public class ChannelPoolFactory { - public CompletionStage init( - Node node, - CqlIdentifier keyspaceName, - NodeDistance distance, - InternalDriverContext context, - String sessionLogPrefix) { - return ChannelPool.init(node, keyspaceName, distance, context, sessionLogPrefix); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/pool/ChannelSet.java b/core/src/main/java/com/datastax/oss/driver/internal/core/pool/ChannelSet.java deleted file mode 100644 index b02e15819d9..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/pool/ChannelSet.java +++ /dev/null @@ -1,178 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.pool; - -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.collect.Iterators; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Arrays; -import java.util.Iterator; -import java.util.concurrent.locks.ReentrantLock; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Concurrent structure used to store the channels of a pool. - * - *

Its write semantics are similar to "copy-on-write" JDK collections, selection operations are - * expected to vastly outnumber mutations. - */ -@ThreadSafe -class ChannelSet implements Iterable { - - private static final Logger LOG = LoggerFactory.getLogger(ChannelSet.class); - /** - * The maximum number of iterations in the busy wait loop in {@link #next()} when there are - * multiple channels. This is a backstop to protect against thread starvation, in practice we've - * never observed more than 3 iterations in tests. - */ - private static final int MAX_ITERATIONS = 50; - - private volatile DriverChannel[] channels; - private final ReentrantLock lock = new ReentrantLock(); // must be held when mutating the array - - ChannelSet() { - this.channels = new DriverChannel[] {}; - } - - void add(DriverChannel toAdd) { - Preconditions.checkNotNull(toAdd); - lock.lock(); - try { - assert indexOf(channels, toAdd) < 0; - DriverChannel[] newChannels = Arrays.copyOf(channels, channels.length + 1); - newChannels[newChannels.length - 1] = toAdd; - channels = newChannels; - } finally { - lock.unlock(); - } - } - - boolean remove(DriverChannel toRemove) { - Preconditions.checkNotNull(toRemove); - lock.lock(); - try { - int index = indexOf(channels, toRemove); - if (index < 0) { - return false; - } else { - DriverChannel[] newChannels = new DriverChannel[channels.length - 1]; - int newI = 0; - for (int i = 0; i < channels.length; i++) { - if (i != index) { - newChannels[newI] = channels[i]; - newI += 1; - } - } - channels = newChannels; - return true; - } - } finally { - lock.unlock(); - } - } - - /** @return null if the set is empty or all are full */ - DriverChannel next() { - DriverChannel[] snapshot = this.channels; - switch (snapshot.length) { - case 0: - return null; - case 1: - DriverChannel onlyChannel = snapshot[0]; - return onlyChannel.preAcquireId() ? onlyChannel : null; - default: - for (int i = 0; i < MAX_ITERATIONS; i++) { - DriverChannel best = null; - int bestScore = 0; - for (DriverChannel channel : snapshot) { - int score = channel.getAvailableIds(); - if (score > bestScore) { - bestScore = score; - best = channel; - } - } - if (best == null) { - return null; - } else if (best.preAcquireId()) { - return best; - } - } - LOG.trace("Could not select a channel after {} iterations", MAX_ITERATIONS); - return null; - } - } - - /** @return the number of available stream ids on all channels in this channel set. */ - int getAvailableIds() { - int availableIds = 0; - DriverChannel[] snapshot = this.channels; - for (DriverChannel channel : snapshot) { - availableIds += channel.getAvailableIds(); - } - return availableIds; - } - - /** - * @return the number of requests currently executing on all channels in this channel set - * (including {@link #getOrphanedIds() orphaned ids}). - */ - int getInFlight() { - int inFlight = 0; - DriverChannel[] snapshot = this.channels; - for (DriverChannel channel : snapshot) { - inFlight += channel.getInFlight(); - } - return inFlight; - } - - /** - * @return the number of stream ids for requests in all channels in this channel set that have - * either timed out or been cancelled, but for which we can't release the stream id because a - * request might still come from the server. - */ - int getOrphanedIds() { - int orphanedIds = 0; - DriverChannel[] snapshot = this.channels; - for (DriverChannel channel : snapshot) { - orphanedIds += channel.getOrphanedIds(); - } - return orphanedIds; - } - - int size() { - return this.channels.length; - } - - @NonNull - @Override - public Iterator iterator() { - return Iterators.forArray(this.channels); - } - - private static int indexOf(DriverChannel[] channels, DriverChannel key) { - for (int i = 0; i < channels.length; i++) { - if (channels[i] == key) { - return i; - } - } - return -1; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/BuiltInCompressors.java b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/BuiltInCompressors.java deleted file mode 100644 index 74270caef91..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/BuiltInCompressors.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.protocol; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.protocol.internal.Compressor; -import io.netty.buffer.ByteBuf; -import java.util.Locale; - -/** - * Provides a single entry point to create compressor instances in the driver. - * - *

Note that this class also serves as a convenient target for GraalVM substitutions, see {@link - * CompressorSubstitutions}. - */ -public class BuiltInCompressors { - - public static Compressor newInstance(String name, DriverContext context) { - switch (name.toLowerCase(Locale.ROOT)) { - case "lz4": - return new Lz4Compressor(context); - case "snappy": - return new SnappyCompressor(context); - case "none": - return Compressor.none(); - default: - throw new IllegalArgumentException( - String.format( - "Unsupported compression algorithm '%s' (from configuration option %s)", - name, DefaultDriverOption.PROTOCOL_COMPRESSION.getPath())); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/ByteBufCompressor.java b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/ByteBufCompressor.java deleted file mode 100644 index 95e6be07434..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/ByteBufCompressor.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.protocol; - -import com.datastax.oss.protocol.internal.Compressor; -import io.netty.buffer.ByteBuf; -import java.nio.ByteBuffer; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public abstract class ByteBufCompressor implements Compressor { - - @Override - public ByteBuf compress(ByteBuf uncompressed) { - return uncompressed.isDirect() - ? compressDirect(uncompressed, true) - : compressHeap(uncompressed, true); - } - - @Override - public ByteBuf compressWithoutLength(ByteBuf uncompressed) { - return uncompressed.isDirect() - ? compressDirect(uncompressed, false) - : compressHeap(uncompressed, false); - } - - protected abstract ByteBuf compressDirect(ByteBuf input, boolean prependWithUncompressedLength); - - protected abstract ByteBuf compressHeap(ByteBuf input, boolean prependWithUncompressedLength); - - @Override - public ByteBuf decompress(ByteBuf compressed) { - return decompressWithoutLength(compressed, readUncompressedLength(compressed)); - } - - protected abstract int readUncompressedLength(ByteBuf compressed); - - @Override - public ByteBuf decompressWithoutLength(ByteBuf compressed, int uncompressedLength) { - return compressed.isDirect() - ? decompressDirect(compressed, uncompressedLength) - : decompressHeap(compressed, uncompressedLength); - } - - protected abstract ByteBuf decompressDirect(ByteBuf input, int uncompressedLength); - - protected abstract ByteBuf decompressHeap(ByteBuf input, int uncompressedLength); - - protected static ByteBuffer inputNioBuffer(ByteBuf buf) { - // Using internalNioBuffer(...) as we only hold the reference in this method and so can - // reduce Object allocations. - int index = buf.readerIndex(); - int len = buf.readableBytes(); - return buf.nioBufferCount() == 1 - ? buf.internalNioBuffer(index, len) - : buf.nioBuffer(index, len); - } - - protected static ByteBuffer outputNioBuffer(ByteBuf buf) { - int index = buf.writerIndex(); - int len = buf.writableBytes(); - return buf.nioBufferCount() == 1 - ? buf.internalNioBuffer(index, len) - : buf.nioBuffer(index, len); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/ByteBufPrimitiveCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/ByteBufPrimitiveCodec.java deleted file mode 100644 index 1371009f989..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/ByteBufPrimitiveCodec.java +++ /dev/null @@ -1,246 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.protocol; - -import com.datastax.oss.protocol.internal.PrimitiveCodec; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufAllocator; -import io.netty.buffer.CompositeByteBuf; -import io.netty.util.CharsetUtil; -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.nio.ByteBuffer; -import java.util.Arrays; -import java.util.zip.CRC32; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class ByteBufPrimitiveCodec implements PrimitiveCodec { - - private final ByteBufAllocator allocator; - - public ByteBufPrimitiveCodec(ByteBufAllocator allocator) { - this.allocator = allocator; - } - - @Override - public ByteBuf allocate(int size) { - return allocator.ioBuffer(size, size); - } - - @Override - public void release(ByteBuf toRelease) { - toRelease.release(); - } - - @Override - public int sizeOf(ByteBuf toMeasure) { - return toMeasure.readableBytes(); - } - - @Override - public ByteBuf concat(ByteBuf left, ByteBuf right) { - if (!left.isReadable()) { - return right.duplicate(); - } else if (!right.isReadable()) { - return left.duplicate(); - } else { - CompositeByteBuf c = allocator.compositeBuffer(2); - c.addComponents(left, right); - // c.readerIndex() is 0, which is the first readable byte in left - c.writerIndex( - left.writerIndex() - left.readerIndex() + right.writerIndex() - right.readerIndex()); - return c; - } - } - - @Override - public void markReaderIndex(ByteBuf source) { - source.markReaderIndex(); - } - - @Override - public void resetReaderIndex(ByteBuf source) { - source.resetReaderIndex(); - } - - @Override - public byte readByte(ByteBuf source) { - return source.readByte(); - } - - @Override - public int readInt(ByteBuf source) { - return source.readInt(); - } - - @Override - public int readInt(ByteBuf source, int offset) { - return source.getInt(source.readerIndex() + offset); - } - - @Override - public InetAddress readInetAddr(ByteBuf source) { - int length = readByte(source) & 0xFF; - byte[] bytes = new byte[length]; - source.readBytes(bytes); - return newInetAddress(bytes); - } - - @Override - public long readLong(ByteBuf source) { - return source.readLong(); - } - - @Override - public int readUnsignedShort(ByteBuf source) { - return source.readUnsignedShort(); - } - - @Override - public ByteBuffer readBytes(ByteBuf source) { - int length = readInt(source); - if (length < 0) return null; - byte[] bytes = new byte[length]; - source.readBytes(bytes); - return ByteBuffer.wrap(bytes); - } - - @Override - public byte[] readShortBytes(ByteBuf source) { - try { - int length = readUnsignedShort(source); - byte[] bytes = new byte[length]; - source.readBytes(bytes); - return bytes; - } catch (IndexOutOfBoundsException e) { - throw new IllegalArgumentException( - "Not enough bytes to read a byte array preceded by its 2 bytes length"); - } - } - - @Override - public String readString(ByteBuf source) { - int length = readUnsignedShort(source); - return readString(source, length); - } - - @Override - public String readLongString(ByteBuf source) { - int length = readInt(source); - return readString(source, length); - } - - @Override - public ByteBuf readRetainedSlice(ByteBuf source, int sliceLength) { - return source.readRetainedSlice(sliceLength); - } - - @Override - public void updateCrc(ByteBuf source, CRC32 crc) { - crc.update(source.internalNioBuffer(source.readerIndex(), source.readableBytes())); - } - - @Override - public void writeByte(byte b, ByteBuf dest) { - dest.writeByte(b); - } - - @Override - public void writeInt(int i, ByteBuf dest) { - dest.writeInt(i); - } - - @Override - public void writeInetAddr(InetAddress inetAddr, ByteBuf dest) { - byte[] bytes = inetAddr.getAddress(); - writeByte((byte) bytes.length, dest); - dest.writeBytes(bytes); - } - - @Override - public void writeLong(long l, ByteBuf dest) { - dest.writeLong(l); - } - - @Override - public void writeUnsignedShort(int i, ByteBuf dest) { - dest.writeShort(i); - } - - @Override - public void writeString(String s, ByteBuf dest) { - byte[] bytes = s.getBytes(CharsetUtil.UTF_8); - writeUnsignedShort(bytes.length, dest); - dest.writeBytes(bytes); - } - - @Override - public void writeLongString(String s, ByteBuf dest) { - byte[] bytes = s.getBytes(CharsetUtil.UTF_8); - writeInt(bytes.length, dest); - dest.writeBytes(bytes); - } - - @Override - public void writeBytes(ByteBuffer bytes, ByteBuf dest) { - if (bytes == null) { - writeInt(-1, dest); - } else { - writeInt(bytes.remaining(), dest); - dest.writeBytes(bytes.duplicate()); - } - } - - @Override - public void writeBytes(byte[] bytes, ByteBuf dest) { - if (bytes == null) { - writeInt(-1, dest); - } else { - writeInt(bytes.length, dest); - dest.writeBytes(bytes); - } - } - - @Override - public void writeShortBytes(byte[] bytes, ByteBuf dest) { - writeUnsignedShort(bytes.length, dest); - dest.writeBytes(bytes); - } - - private static String readString(ByteBuf source, int length) { - try { - String str = source.toString(source.readerIndex(), length, CharsetUtil.UTF_8); - source.readerIndex(source.readerIndex() + length); - return str; - } catch (IndexOutOfBoundsException e) { - throw new IllegalArgumentException( - "Not enough bytes to read an UTF-8 serialized string of size " + length, e); - } - } - - private InetAddress newInetAddress(byte[] bytes) { - try { - return InetAddress.getByAddress(bytes); - } catch (UnknownHostException e) { - // Per the Javadoc, the only way this can happen is if the length is illegal - throw new IllegalArgumentException( - String.format("Invalid address length: %d (%s)", bytes.length, Arrays.toString(bytes))); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/ByteBufSegmentBuilder.java b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/ByteBufSegmentBuilder.java deleted file mode 100644 index 9b112559aab..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/ByteBufSegmentBuilder.java +++ /dev/null @@ -1,184 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.protocol; - -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.protocol.internal.Frame; -import com.datastax.oss.protocol.internal.FrameCodec; -import com.datastax.oss.protocol.internal.PrimitiveCodec; -import com.datastax.oss.protocol.internal.Segment; -import com.datastax.oss.protocol.internal.SegmentBuilder; -import edu.umd.cs.findbugs.annotations.NonNull; -import io.netty.buffer.ByteBuf; -import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelPromise; -import io.netty.util.concurrent.Future; -import io.netty.util.concurrent.GenericFutureListener; -import java.util.ArrayList; -import java.util.List; -import net.jcip.annotations.NotThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@NotThreadSafe -public class ByteBufSegmentBuilder extends SegmentBuilder { - - private static final Logger LOG = LoggerFactory.getLogger(ByteBufSegmentBuilder.class); - - private final ChannelHandlerContext context; - private final String logPrefix; - - public ByteBufSegmentBuilder( - @NonNull ChannelHandlerContext context, - @NonNull PrimitiveCodec primitiveCodec, - @NonNull FrameCodec frameCodec, - @NonNull String logPrefix) { - super(primitiveCodec, frameCodec); - this.context = context; - this.logPrefix = logPrefix; - } - - @Override - @NonNull - protected ChannelPromise mergeStates(@NonNull List framePromises) { - if (framePromises.size() == 1) { - return framePromises.get(0); - } - // We concatenate multiple frames into one segment. When the segment is written, all the frames - // are written. - ChannelPromise segmentPromise = context.newPromise(); - ImmutableList dependents = ImmutableList.copyOf(framePromises); - segmentPromise.addListener( - future -> { - if (future.isSuccess()) { - for (ChannelPromise framePromise : dependents) { - framePromise.setSuccess(); - } - } else { - Throwable cause = future.cause(); - for (ChannelPromise framePromise : dependents) { - framePromise.setFailure(cause); - } - } - }); - return segmentPromise; - } - - @Override - @NonNull - protected List splitState(@NonNull ChannelPromise framePromise, int sliceCount) { - // We split one frame into multiple slices. When all slices are written, the frame is written. - List slicePromises = new ArrayList<>(sliceCount); - for (int i = 0; i < sliceCount; i++) { - slicePromises.add(context.newPromise()); - } - GenericFutureListener> sliceListener = - new SliceWriteListener(framePromise, slicePromises); - for (int i = 0; i < sliceCount; i++) { - slicePromises.get(i).addListener(sliceListener); - } - return slicePromises; - } - - @Override - protected void processSegment( - @NonNull Segment segment, @NonNull ChannelPromise segmentPromise) { - context.write(segment, segmentPromise); - } - - @Override - protected void onLargeFrameSplit(@NonNull Frame frame, int frameLength, int sliceCount) { - LOG.trace( - "[{}] Frame {} is too large ({} > {}), splitting into {} segments", - logPrefix, - frame.streamId, - frameLength, - Segment.MAX_PAYLOAD_LENGTH, - sliceCount); - } - - @Override - protected void onSegmentFull( - @NonNull Frame frame, int frameLength, int currentPayloadLength, int currentFrameCount) { - LOG.trace( - "[{}] Current self-contained segment is full ({}/{} bytes, {} frames), processing now", - logPrefix, - currentPayloadLength, - Segment.MAX_PAYLOAD_LENGTH, - currentFrameCount); - } - - @Override - protected void onSmallFrameAdded( - @NonNull Frame frame, int frameLength, int currentPayloadLength, int currentFrameCount) { - LOG.trace( - "[{}] Added frame {} to current self-contained segment " - + "(bringing it to {}/{} bytes, {} frames)", - logPrefix, - frame.streamId, - currentPayloadLength, - Segment.MAX_PAYLOAD_LENGTH, - currentFrameCount); - } - - @Override - protected void onLastSegmentFlushed(int currentPayloadLength, int currentFrameCount) { - LOG.trace( - "[{}] Flushing last self-contained segment ({}/{} bytes, {} frames)", - logPrefix, - currentPayloadLength, - Segment.MAX_PAYLOAD_LENGTH, - currentFrameCount); - } - - @NotThreadSafe - static class SliceWriteListener implements GenericFutureListener> { - - private final ChannelPromise parentPromise; - private final List slicePromises; - - // All slices are written to the same channel, and the segment is built from the Flusher which - // also runs on the same event loop, so we don't need synchronization. - private int remainingSlices; - - SliceWriteListener(@NonNull ChannelPromise parentPromise, List slicePromises) { - this.parentPromise = parentPromise; - this.slicePromises = slicePromises; - this.remainingSlices = slicePromises.size(); - } - - @Override - public void operationComplete(@NonNull Future future) { - if (!parentPromise.isDone()) { - if (future.isSuccess()) { - remainingSlices -= 1; - if (remainingSlices == 0) { - parentPromise.setSuccess(); - } - } else { - // If any slice fails, we can immediately mark the whole frame as failed: - parentPromise.setFailure(future.cause()); - // Cancel any remaining slice, Netty will not send the bytes. - for (ChannelPromise slicePromise : slicePromises) { - slicePromise.cancel(/*Netty ignores this*/ false); - } - } - } - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/BytesToSegmentDecoder.java b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/BytesToSegmentDecoder.java deleted file mode 100644 index 03125bd33a5..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/BytesToSegmentDecoder.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.protocol; - -import com.datastax.oss.driver.api.core.connection.CrcMismatchException; -import com.datastax.oss.protocol.internal.Segment; -import com.datastax.oss.protocol.internal.SegmentCodec; -import edu.umd.cs.findbugs.annotations.NonNull; -import io.netty.buffer.ByteBuf; -import io.netty.channel.ChannelHandlerContext; -import io.netty.handler.codec.LengthFieldBasedFrameDecoder; -import java.nio.ByteOrder; -import net.jcip.annotations.NotThreadSafe; - -/** - * Decodes {@link Segment}s from a stream of bytes. - * - *

This works like a regular length-field-based decoder, but we override {@link - * #getUnadjustedFrameLength} to handle two peculiarities: the length is encoded on 17 bits, and we - * also want to check the header CRC before we use it. So we parse the whole segment header ahead of - * time, and store it until we're ready to build the segment. - */ -@NotThreadSafe -public class BytesToSegmentDecoder extends LengthFieldBasedFrameDecoder { - - private final SegmentCodec segmentCodec; - private SegmentCodec.Header header; - - public BytesToSegmentDecoder(@NonNull SegmentCodec segmentCodec) { - super( - // max length (Netty wants this to be the overall length including everything): - segmentCodec.headerLength() - + SegmentCodec.CRC24_LENGTH - + Segment.MAX_PAYLOAD_LENGTH - + SegmentCodec.CRC32_LENGTH, - // offset and size of the "length" field: that's the whole header - 0, - segmentCodec.headerLength() + SegmentCodec.CRC24_LENGTH, - // length adjustment: add the trailing CRC to the declared length - SegmentCodec.CRC32_LENGTH, - // bytes to skip: the header (we've already parsed it while reading the length) - segmentCodec.headerLength() + SegmentCodec.CRC24_LENGTH); - this.segmentCodec = segmentCodec; - } - - @Override - protected Object decode(ChannelHandlerContext ctx, ByteBuf in) throws Exception { - try { - ByteBuf payloadAndCrc = (ByteBuf) super.decode(ctx, in); - if (payloadAndCrc == null) { - return null; - } else { - assert header != null; - try { - Segment segment = segmentCodec.decode(header, payloadAndCrc); - header = null; - return segment; - } catch (com.datastax.oss.protocol.internal.CrcMismatchException e) { - throw new CrcMismatchException(e.getMessage()); - } - } - } catch (Exception e) { - // Don't hold on to a stale header if we failed to decode the rest of the segment - header = null; - throw e; - } - } - - @Override - protected long getUnadjustedFrameLength(ByteBuf buffer, int offset, int length, ByteOrder order) { - // The parent class calls this repeatedly for the same "frame" if there weren't enough - // accumulated bytes the first time. Only decode the header the first time: - if (header == null) { - try { - header = segmentCodec.decodeHeader(buffer.slice(offset, length)); - } catch (com.datastax.oss.protocol.internal.CrcMismatchException e) { - throw new CrcMismatchException(e.getMessage()); - } - } - return header.payloadLength; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/CompressorSubstitutions.java b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/CompressorSubstitutions.java deleted file mode 100644 index 8a551a039db..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/CompressorSubstitutions.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.protocol; - -import static com.datastax.oss.driver.internal.core.util.Dependency.LZ4; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.internal.core.util.GraalDependencyChecker; -import com.datastax.oss.protocol.internal.Compressor; -import com.oracle.svm.core.annotate.Substitute; -import com.oracle.svm.core.annotate.TargetClass; -import io.netty.buffer.ByteBuf; -import java.util.Locale; -import java.util.function.BooleanSupplier; - -/** - * Handles GraalVM substitutions for compressors: LZ4 is only supported if we can find the native - * library in the classpath, and Snappy is never supported. - * - *

When a compressor is not supported, we delete its class, and modify {@link - * BuiltInCompressors#newInstance(String, DriverContext)} to throw an error if the user attempts to - * configure it. - */ -@SuppressWarnings("unused") -public class CompressorSubstitutions { - - @TargetClass(value = BuiltInCompressors.class, onlyWith = Lz4Present.class) - public static final class BuiltInCompressorsLz4Only { - @Substitute - public static Compressor newInstance(String name, DriverContext context) { - switch (name.toLowerCase(Locale.ROOT)) { - case "lz4": - return new Lz4Compressor(context); - case "snappy": - throw new UnsupportedOperationException( - "Snappy compression is not supported for native images"); - case "none": - return Compressor.none(); - default: - throw new IllegalArgumentException( - String.format( - "Unsupported compression algorithm '%s' (from configuration option %s)", - name, DefaultDriverOption.PROTOCOL_COMPRESSION.getPath())); - } - } - } - - @TargetClass(value = BuiltInCompressors.class, onlyWith = Lz4Missing.class) - public static final class NoBuiltInCompressors { - @Substitute - public static Compressor newInstance(String name, DriverContext context) { - switch (name.toLowerCase(Locale.ROOT)) { - case "lz4": - throw new UnsupportedOperationException( - "This native image was not built with support for LZ4 compression"); - case "snappy": - throw new UnsupportedOperationException( - "Snappy compression is not supported for native images"); - case "none": - return Compressor.none(); - default: - throw new IllegalArgumentException( - String.format( - "Unsupported compression algorithm '%s' (from configuration option %s)", - name, DefaultDriverOption.PROTOCOL_COMPRESSION.getPath())); - } - } - } - - public static class Lz4Present implements BooleanSupplier { - @Override - public boolean getAsBoolean() { - return GraalDependencyChecker.isPresent(LZ4); - } - } - - public static class Lz4Missing extends Lz4Present { - @Override - public boolean getAsBoolean() { - return !super.getAsBoolean(); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/FrameDecoder.java b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/FrameDecoder.java deleted file mode 100644 index 20816ba581b..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/FrameDecoder.java +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.protocol; - -import com.datastax.oss.driver.api.core.connection.FrameTooLongException; -import com.datastax.oss.driver.internal.core.util.Loggers; -import com.datastax.oss.protocol.internal.Frame; -import com.datastax.oss.protocol.internal.FrameCodec; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.response.Error; -import io.netty.buffer.ByteBuf; -import io.netty.channel.ChannelHandlerContext; -import io.netty.handler.codec.LengthFieldBasedFrameDecoder; -import io.netty.handler.codec.TooLongFrameException; -import java.util.Collections; -import net.jcip.annotations.NotThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@NotThreadSafe -public class FrameDecoder extends LengthFieldBasedFrameDecoder { - private static final Logger LOG = LoggerFactory.getLogger(FrameDecoder.class); - - // Where the length of the frame is located in the payload - private static final int LENGTH_FIELD_OFFSET = 5; - private static final int LENGTH_FIELD_LENGTH = 4; - - private final FrameCodec frameCodec; - private boolean isFirstResponse; - - public FrameDecoder(FrameCodec frameCodec, int maxFrameLengthInBytes) { - super(maxFrameLengthInBytes, LENGTH_FIELD_OFFSET, LENGTH_FIELD_LENGTH, 0, 0, true); - this.frameCodec = frameCodec; - } - - @Override - protected Object decode(ChannelHandlerContext ctx, ByteBuf in) throws Exception { - int startIndex = in.readerIndex(); - if (isFirstResponse) { - isFirstResponse = false; - - // Must read at least the protocol v1/v2 header (see below) - if (in.readableBytes() < 8) { - return null; - } - // Special case for obsolete protocol versions (< v3): the length field is at a different - // position, so we can't delegate to super.decode() which would read the wrong length. - int protocolVersion = (int) in.getByte(startIndex) & 0b0111_1111; - if (protocolVersion < 3) { - int streamId = in.getByte(startIndex + 2); - int length = in.getInt(startIndex + 4); - // We don't need a full-blown decoder, just to signal the protocol error. So discard the - // incoming data and spoof a server-side protocol error. - if (in.readableBytes() < 8 + length) { - return null; // keep reading until we can discard the whole message at once - } else { - in.readerIndex(startIndex + 8 + length); - } - return Frame.forResponse( - protocolVersion, - streamId, - null, - Frame.NO_PAYLOAD, - Collections.emptyList(), - new Error( - ProtocolConstants.ErrorCode.PROTOCOL_ERROR, - "Invalid or unsupported protocol version")); - } - } - - try { - ByteBuf buffer = (ByteBuf) super.decode(ctx, in); - return (buffer == null) - ? null // did not receive whole frame yet, keep reading - : frameCodec.decode(buffer); - } catch (Exception e) { - // If decoding failed, try to read at least the stream id, so that the error can be - // propagated to the client request matching that id (otherwise we have to fail all - // pending requests on this channel) - int streamId; - try { - streamId = in.getShort(startIndex + 2); - } catch (Exception e1) { - // Should never happen, super.decode does not return a non-null buffer until the length - // field has been read, and the stream id comes before - Loggers.warnWithException(LOG, "Unexpected error while reading stream id", e1); - streamId = -1; - } - if (e instanceof TooLongFrameException) { - // Translate the Netty error to our own type - e = new FrameTooLongException(ctx.channel().remoteAddress(), e.getMessage()); - } - throw new FrameDecodingException(streamId, e); - } - } - - @Override - protected ByteBuf extractFrame(ChannelHandlerContext ctx, ByteBuf buffer, int index, int length) { - return buffer.slice(index, length); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/FrameDecodingException.java b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/FrameDecodingException.java deleted file mode 100644 index c209f3f263b..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/FrameDecodingException.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.protocol; - -import io.netty.handler.codec.DecoderException; - -/** - * Wraps an error while decoding an incoming protocol frame. - * - *

This is only used internally, never exposed to the client. - */ -public class FrameDecodingException extends DecoderException { - public final int streamId; - - public FrameDecodingException(int streamId, Throwable cause) { - super("Error decoding frame for streamId " + streamId, cause); - this.streamId = streamId; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/FrameEncoder.java b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/FrameEncoder.java deleted file mode 100644 index 6504ab29728..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/FrameEncoder.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.protocol; - -import com.datastax.oss.driver.api.core.connection.FrameTooLongException; -import com.datastax.oss.protocol.internal.Frame; -import com.datastax.oss.protocol.internal.FrameCodec; -import io.netty.buffer.ByteBuf; -import io.netty.channel.ChannelHandler; -import io.netty.channel.ChannelHandlerContext; -import io.netty.handler.codec.MessageToMessageEncoder; -import java.util.List; -import net.jcip.annotations.ThreadSafe; - -@ChannelHandler.Sharable -@ThreadSafe -public class FrameEncoder extends MessageToMessageEncoder { - - private final FrameCodec frameCodec; - private final int maxFrameLength; - - public FrameEncoder(FrameCodec frameCodec, int maxFrameLength) { - super(Frame.class); - this.frameCodec = frameCodec; - this.maxFrameLength = maxFrameLength; - } - - @Override - protected void encode(ChannelHandlerContext ctx, Frame frame, List out) throws Exception { - ByteBuf buffer = frameCodec.encode(frame); - int actualLength = buffer.readableBytes(); - if (actualLength > maxFrameLength) { - throw new FrameTooLongException( - ctx.channel().remoteAddress(), - String.format("Outgoing frame length exceeds %d: %d", maxFrameLength, actualLength)); - } - out.add(buffer); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/FrameToSegmentEncoder.java b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/FrameToSegmentEncoder.java deleted file mode 100644 index 46c872f4adc..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/FrameToSegmentEncoder.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.protocol; - -import com.datastax.oss.protocol.internal.Frame; -import com.datastax.oss.protocol.internal.FrameCodec; -import com.datastax.oss.protocol.internal.PrimitiveCodec; -import edu.umd.cs.findbugs.annotations.NonNull; -import io.netty.buffer.ByteBuf; -import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelOutboundHandlerAdapter; -import io.netty.channel.ChannelPromise; -import net.jcip.annotations.NotThreadSafe; - -@NotThreadSafe -public class FrameToSegmentEncoder extends ChannelOutboundHandlerAdapter { - - private final PrimitiveCodec primitiveCodec; - private final FrameCodec frameCodec; - private final String logPrefix; - - private ByteBufSegmentBuilder segmentBuilder; - - public FrameToSegmentEncoder( - @NonNull PrimitiveCodec primitiveCodec, - @NonNull FrameCodec frameCodec, - @NonNull String logPrefix) { - this.primitiveCodec = primitiveCodec; - this.frameCodec = frameCodec; - this.logPrefix = logPrefix; - } - - @Override - public void handlerAdded(@NonNull ChannelHandlerContext ctx) { - segmentBuilder = new ByteBufSegmentBuilder(ctx, primitiveCodec, frameCodec, logPrefix); - } - - @Override - public void write( - @NonNull ChannelHandlerContext ctx, @NonNull Object msg, @NonNull ChannelPromise promise) - throws Exception { - if (msg instanceof Frame) { - segmentBuilder.addFrame(((Frame) msg), promise); - } else { - super.write(ctx, msg, promise); - } - } - - @Override - public void flush(@NonNull ChannelHandlerContext ctx) throws Exception { - segmentBuilder.flush(); - super.flush(ctx); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/Lz4Compressor.java b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/Lz4Compressor.java deleted file mode 100644 index d376cefc216..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/Lz4Compressor.java +++ /dev/null @@ -1,190 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.protocol; - -import static com.datastax.oss.driver.internal.core.util.Dependency.LZ4; - -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.internal.core.util.DefaultDependencyChecker; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import io.netty.buffer.ByteBuf; -import java.nio.ByteBuffer; -import net.jcip.annotations.ThreadSafe; -import net.jpountz.lz4.LZ4Compressor; -import net.jpountz.lz4.LZ4Factory; -import net.jpountz.lz4.LZ4FastDecompressor; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -public class Lz4Compressor extends ByteBufCompressor { - - private static final Logger LOG = LoggerFactory.getLogger(Lz4Compressor.class); - - private final LZ4Compressor compressor; - private final LZ4FastDecompressor decompressor; - - public Lz4Compressor(DriverContext context) { - this(context.getSessionName()); - } - - @VisibleForTesting - Lz4Compressor(String sessionName) { - if (DefaultDependencyChecker.isPresent(LZ4)) { - LZ4Factory lz4Factory = LZ4Factory.fastestInstance(); - LOG.info("[{}] Using {}", sessionName, lz4Factory.toString()); - this.compressor = lz4Factory.fastCompressor(); - this.decompressor = lz4Factory.fastDecompressor(); - } else { - throw new IllegalStateException( - "Could not find the LZ4 library on the classpath " - + "(the driver declares it as an optional dependency, " - + "so you need to declare it explicitly)"); - } - } - - @Override - public String algorithm() { - return "lz4"; - } - - @Override - protected ByteBuf compressDirect(ByteBuf input, boolean prependWithUncompressedLength) { - int maxCompressedLength = compressor.maxCompressedLength(input.readableBytes()); - // If the input is direct we will allocate a direct output buffer as well as this will allow us - // to use LZ4Compressor.compress and so eliminate memory copies. - ByteBuf output = - input.alloc().directBuffer((prependWithUncompressedLength ? 4 : 0) + maxCompressedLength); - try { - ByteBuffer in = inputNioBuffer(input); - // Increase reader index. - input.readerIndex(input.writerIndex()); - - if (prependWithUncompressedLength) { - output.writeInt(in.remaining()); - } - - ByteBuffer out = outputNioBuffer(output); - int written = - compressor.compress( - in, in.position(), in.remaining(), out, out.position(), out.remaining()); - // Set the writer index so the amount of written bytes is reflected - output.writerIndex(output.writerIndex() + written); - } catch (Exception e) { - // release output buffer so we not leak and rethrow exception. - output.release(); - throw e; - } - return output; - } - - @Override - protected ByteBuf compressHeap(ByteBuf input, boolean prependWithUncompressedLength) { - int maxCompressedLength = compressor.maxCompressedLength(input.readableBytes()); - - // Not a direct buffer so use byte arrays... - int inOffset = input.arrayOffset() + input.readerIndex(); - byte[] in = input.array(); - int len = input.readableBytes(); - // Increase reader index. - input.readerIndex(input.writerIndex()); - - // Allocate a heap buffer from the ByteBufAllocator as we may use a PooledByteBufAllocator and - // so can eliminate the overhead of allocate a new byte[]. - ByteBuf output = - input.alloc().heapBuffer((prependWithUncompressedLength ? 4 : 0) + maxCompressedLength); - try { - if (prependWithUncompressedLength) { - output.writeInt(len); - } - // calculate the correct offset. - int offset = output.arrayOffset() + output.writerIndex(); - byte[] out = output.array(); - int written = compressor.compress(in, inOffset, len, out, offset); - - // Set the writer index so the amount of written bytes is reflected - output.writerIndex(output.writerIndex() + written); - } catch (Exception e) { - // release output buffer so we not leak and rethrow exception. - output.release(); - throw e; - } - return output; - } - - @Override - protected int readUncompressedLength(ByteBuf compressed) { - return compressed.readInt(); - } - - @Override - protected ByteBuf decompressDirect(ByteBuf input, int uncompressedLength) { - // If the input is direct we will allocate a direct output buffer as well as this will allow us - // to use LZ4Compressor.decompress and so eliminate memory copies. - int readable = input.readableBytes(); - ByteBuffer in = inputNioBuffer(input); - // Increase reader index. - input.readerIndex(input.writerIndex()); - ByteBuf output = input.alloc().directBuffer(uncompressedLength); - try { - ByteBuffer out = outputNioBuffer(output); - int read = decompressor.decompress(in, in.position(), out, out.position(), out.remaining()); - if (read != readable) { - throw new IllegalArgumentException("Compressed lengths mismatch"); - } - - // Set the writer index so the amount of written bytes is reflected - output.writerIndex(output.writerIndex() + uncompressedLength); - } catch (Exception e) { - // release output buffer so we not leak and rethrow exception. - output.release(); - throw e; - } - return output; - } - - @Override - protected ByteBuf decompressHeap(ByteBuf input, int uncompressedLength) { - // Not a direct buffer so use byte arrays... - byte[] in = input.array(); - int len = input.readableBytes(); - int inOffset = input.arrayOffset() + input.readerIndex(); - // Increase reader index. - input.readerIndex(input.writerIndex()); - - // Allocate a heap buffer from the ByteBufAllocator as we may use a PooledByteBufAllocator and - // so can eliminate the overhead of allocate a new byte[]. - ByteBuf output = input.alloc().heapBuffer(uncompressedLength); - try { - int offset = output.arrayOffset() + output.writerIndex(); - byte[] out = output.array(); - int read = decompressor.decompress(in, inOffset, out, offset, uncompressedLength); - if (read != len) { - throw new IllegalArgumentException("Compressed lengths mismatch"); - } - - // Set the writer index so the amount of written bytes is reflected - output.writerIndex(output.writerIndex() + uncompressedLength); - } catch (Exception e) { - // release output buffer so we not leak and rethrow exception. - output.release(); - throw e; - } - return output; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/SegmentToBytesEncoder.java b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/SegmentToBytesEncoder.java deleted file mode 100644 index c7845545df4..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/SegmentToBytesEncoder.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.protocol; - -import com.datastax.oss.protocol.internal.Segment; -import com.datastax.oss.protocol.internal.SegmentCodec; -import edu.umd.cs.findbugs.annotations.NonNull; -import io.netty.buffer.ByteBuf; -import io.netty.channel.ChannelHandler; -import io.netty.channel.ChannelHandlerContext; -import io.netty.handler.codec.MessageToMessageEncoder; -import java.util.List; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -@ChannelHandler.Sharable -public class SegmentToBytesEncoder extends MessageToMessageEncoder> { - - private final SegmentCodec segmentCodec; - - public SegmentToBytesEncoder(@NonNull SegmentCodec segmentCodec) { - this.segmentCodec = segmentCodec; - } - - @Override - protected void encode( - @NonNull ChannelHandlerContext ctx, - @NonNull Segment segment, - @NonNull List out) { - segmentCodec.encode(segment, out); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/SegmentToFrameDecoder.java b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/SegmentToFrameDecoder.java deleted file mode 100644 index b15a17bb87f..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/SegmentToFrameDecoder.java +++ /dev/null @@ -1,129 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.protocol; - -import com.datastax.oss.protocol.internal.Frame; -import com.datastax.oss.protocol.internal.FrameCodec; -import com.datastax.oss.protocol.internal.Segment; -import edu.umd.cs.findbugs.annotations.NonNull; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufAllocator; -import io.netty.buffer.CompositeByteBuf; -import io.netty.channel.ChannelHandlerContext; -import io.netty.handler.codec.MessageToMessageDecoder; -import java.util.ArrayList; -import java.util.List; -import net.jcip.annotations.NotThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Converts the segments decoded by {@link BytesToSegmentDecoder} into legacy frames understood by - * the rest of the driver. - */ -@NotThreadSafe -public class SegmentToFrameDecoder extends MessageToMessageDecoder> { - - private static final Logger LOG = LoggerFactory.getLogger(SegmentToFrameDecoder.class); - - private static final int UNKNOWN_LENGTH = Integer.MIN_VALUE; - - private final FrameCodec frameCodec; - private final String logPrefix; - - // Accumulated state when we are reading a sequence of slices - private int targetLength = UNKNOWN_LENGTH; - private final List accumulatedSlices = new ArrayList<>(); - private int accumulatedLength; - - public SegmentToFrameDecoder(@NonNull FrameCodec frameCodec, @NonNull String logPrefix) { - this.logPrefix = logPrefix; - this.frameCodec = frameCodec; - } - - @Override - protected void decode( - @NonNull ChannelHandlerContext ctx, - @NonNull Segment segment, - @NonNull List out) { - if (segment.isSelfContained) { - decodeSelfContained(segment, out); - } else { - decodeSlice(segment, ctx.alloc(), out); - } - } - - private void decodeSelfContained(Segment segment, List out) { - ByteBuf payload = segment.payload; - int frameCount = 0; - try { - do { - Frame frame = frameCodec.decode(payload); - LOG.trace( - "[{}] Decoded response frame {} from self-contained segment", - logPrefix, - frame.streamId); - out.add(frame); - frameCount += 1; - } while (payload.isReadable()); - } finally { - payload.release(); - } - LOG.trace("[{}] Done processing self-contained segment ({} frames)", logPrefix, frameCount); - } - - private void decodeSlice(Segment segment, ByteBufAllocator allocator, List out) { - assert targetLength != UNKNOWN_LENGTH ^ (accumulatedSlices.isEmpty() && accumulatedLength == 0); - ByteBuf slice = segment.payload; - if (targetLength == UNKNOWN_LENGTH) { - // First slice, read ahead to find the target length - targetLength = FrameCodec.V3_ENCODED_HEADER_SIZE + frameCodec.decodeBodySize(slice); - } - accumulatedSlices.add(slice); - accumulatedLength += slice.readableBytes(); - int accumulatedSlicesSize = accumulatedSlices.size(); - LOG.trace( - "[{}] Decoded slice {}, {}/{} bytes", - logPrefix, - accumulatedSlicesSize, - accumulatedLength, - targetLength); - assert accumulatedLength <= targetLength; - if (accumulatedLength == targetLength) { - // We've received enough data to reassemble the whole message - CompositeByteBuf encodedFrame = allocator.compositeBuffer(accumulatedSlicesSize); - encodedFrame.addComponents(true, accumulatedSlices); - Frame frame; - try { - frame = frameCodec.decode(encodedFrame); - } finally { - encodedFrame.release(); - // Reset our state - targetLength = UNKNOWN_LENGTH; - accumulatedSlices.clear(); - accumulatedLength = 0; - } - LOG.trace( - "[{}] Decoded response frame {} from {} slices", - logPrefix, - frame.streamId, - accumulatedSlicesSize); - out.add(frame); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/SnappyCompressor.java b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/SnappyCompressor.java deleted file mode 100644 index 21165d808b9..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/SnappyCompressor.java +++ /dev/null @@ -1,177 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.protocol; - -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.internal.core.util.DefaultDependencyChecker; -import com.datastax.oss.driver.internal.core.util.Dependency; -import io.netty.buffer.ByteBuf; -import java.io.IOException; -import java.nio.ByteBuffer; -import net.jcip.annotations.ThreadSafe; -import org.xerial.snappy.Snappy; - -/** - * @implNote The Snappy protocol already encodes the uncompressed length in the compressed payload, - * so {@link #compress(ByteBuf)} and {@link #compressWithoutLength(ByteBuf)} produce the same - * output for this compressor. The corresponding parameters {@code - * prependWithUncompressedLength} and {@code uncompressedLength} are ignored by their respective - * methods. - */ -@ThreadSafe -public class SnappyCompressor extends ByteBufCompressor { - - public SnappyCompressor(@SuppressWarnings("unused") DriverContext context) { - if (!DefaultDependencyChecker.isPresent(Dependency.SNAPPY)) { - throw new IllegalStateException( - "Could not find the Snappy library on the classpath " - + "(the driver declares it as an optional dependency, " - + "so you need to declare it explicitly)"); - } - } - - @Override - public String algorithm() { - return "snappy"; - } - - @Override - protected ByteBuf compressDirect( - ByteBuf input, /*ignored*/ boolean prependWithUncompressedLength) { - int maxCompressedLength = Snappy.maxCompressedLength(input.readableBytes()); - // If the input is direct we will allocate a direct output buffer as well as this will allow us - // to use Snappy.compress(ByteBuffer, ByteBuffer) and so eliminate memory copies. - ByteBuf output = input.alloc().directBuffer(maxCompressedLength); - try { - ByteBuffer in = inputNioBuffer(input); - // Increase reader index. - input.readerIndex(input.writerIndex()); - - ByteBuffer out = outputNioBuffer(output); - int written = Snappy.compress(in, out); - // Set the writer index so the amount of written bytes is reflected - output.writerIndex(output.writerIndex() + written); - return output; - } catch (IOException e) { - // release output buffer so we not leak and rethrow exception. - output.release(); - throw new RuntimeException(e); - } - } - - @Override - protected ByteBuf compressHeap(ByteBuf input, /*ignored*/ boolean prependWithUncompressedLength) { - int maxCompressedLength = Snappy.maxCompressedLength(input.readableBytes()); - int inOffset = input.arrayOffset() + input.readerIndex(); - byte[] in = input.array(); - int len = input.readableBytes(); - // Increase reader index. - input.readerIndex(input.writerIndex()); - - // Allocate a heap buffer from the ByteBufAllocator as we may use a PooledByteBufAllocator and - // so can eliminate the overhead of allocate a new byte[]. - ByteBuf output = input.alloc().heapBuffer(maxCompressedLength); - try { - // Calculate the correct offset. - int offset = output.arrayOffset() + output.writerIndex(); - byte[] out = output.array(); - int written = Snappy.compress(in, inOffset, len, out, offset); - - // Increase the writerIndex with the written bytes. - output.writerIndex(output.writerIndex() + written); - return output; - } catch (IOException e) { - // release output buffer so we not leak and rethrow exception. - output.release(); - throw new RuntimeException(e); - } - } - - @Override - protected int readUncompressedLength(ByteBuf compressed) { - // Since compress methods don't actually prepend with a length, we have nothing to read here. - // Return a bogus length (it will be ignored by the decompress methods, so the actual value - // doesn't matter). - return -1; - } - - @Override - protected ByteBuf decompressDirect(ByteBuf input, /*ignored*/ int uncompressedLength) { - ByteBuffer in = inputNioBuffer(input); - // Increase reader index. - input.readerIndex(input.writerIndex()); - - ByteBuf output = null; - try { - if (!Snappy.isValidCompressedBuffer(in)) { - throw new IllegalArgumentException( - "Provided frame does not appear to be Snappy compressed"); - } - // If the input is direct we will allocate a direct output buffer as well as this will allow - // us to use Snappy.compress(ByteBuffer, ByteBuffer) and so eliminate memory copies. - output = input.alloc().directBuffer(Snappy.uncompressedLength(in)); - ByteBuffer out = outputNioBuffer(output); - - int size = Snappy.uncompress(in, out); - // Set the writer index so the amount of written bytes is reflected - output.writerIndex(output.writerIndex() + size); - return output; - } catch (IOException e) { - // release output buffer so we not leak and rethrow exception. - if (output != null) { - output.release(); - } - throw new RuntimeException(e); - } - } - - @Override - protected ByteBuf decompressHeap(ByteBuf input, /*ignored*/ int uncompressedLength) { - // Not a direct buffer so use byte arrays... - int inOffset = input.arrayOffset() + input.readerIndex(); - byte[] in = input.array(); - int len = input.readableBytes(); - // Increase reader index. - input.readerIndex(input.writerIndex()); - - ByteBuf output = null; - try { - if (!Snappy.isValidCompressedBuffer(in, inOffset, len)) { - throw new IllegalArgumentException( - "Provided frame does not appear to be Snappy compressed"); - } - // Allocate a heap buffer from the ByteBufAllocator as we may use a PooledByteBufAllocator and - // so can eliminate the overhead of allocate a new byte[]. - output = input.alloc().heapBuffer(Snappy.uncompressedLength(in, inOffset, len)); - // Calculate the correct offset. - int offset = output.arrayOffset() + output.writerIndex(); - byte[] out = output.array(); - int written = Snappy.uncompress(in, inOffset, len, out, offset); - - // Increase the writerIndex with the written bytes. - output.writerIndex(output.writerIndex() + written); - return output; - } catch (IOException e) { - // release output buffer so we not leak and rethrow exception. - if (output != null) { - output.release(); - } - throw new RuntimeException(e); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/package-info.java b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/package-info.java deleted file mode 100644 index 05da030eec3..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/package-info.java +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** Specialization of the native protocol layer for the driver, based on Netty. */ -package com.datastax.oss.driver.internal.core.protocol; diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/retry/ConsistencyDowngradingRetryPolicy.java b/core/src/main/java/com/datastax/oss/driver/internal/core/retry/ConsistencyDowngradingRetryPolicy.java deleted file mode 100644 index dbf534459a3..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/retry/ConsistencyDowngradingRetryPolicy.java +++ /dev/null @@ -1,376 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.retry; - -import static com.datastax.oss.driver.api.core.servererrors.WriteType.BATCH; -import static com.datastax.oss.driver.api.core.servererrors.WriteType.BATCH_LOG; -import static com.datastax.oss.driver.api.core.servererrors.WriteType.SIMPLE; -import static com.datastax.oss.driver.api.core.servererrors.WriteType.UNLOGGED_BATCH; - -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.connection.ClosedConnectionException; -import com.datastax.oss.driver.api.core.connection.HeartbeatException; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.retry.RetryDecision; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.retry.RetryVerdict; -import com.datastax.oss.driver.api.core.servererrors.CoordinatorException; -import com.datastax.oss.driver.api.core.servererrors.ReadFailureException; -import com.datastax.oss.driver.api.core.servererrors.WriteFailureException; -import com.datastax.oss.driver.api.core.servererrors.WriteType; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import edu.umd.cs.findbugs.annotations.NonNull; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A retry policy that sometimes retries with a lower consistency level than the one initially - * requested. - * - *

BEWARE: this policy may retry queries using a lower consistency level than the one - * initially requested. By doing so, it may break consistency guarantees. In other words, if you use - * this retry policy, there are cases (documented below) where a read at {@code QUORUM} may - * not see a preceding write at {@code QUORUM}. Furthermore, this policy doesn't always respect - * datacenter locality; for example, it may downgrade {@code LOCAL_QUORUM} to {@code ONE}, and thus - * could accidentally send a write that was intended for the local datacenter to another - * datacenter.Do not use this policy unless you have understood the cases where this can happen and - * are ok with that. - * - *

This policy implements the same retries than the {@link DefaultRetryPolicy} policy. But on top - * of that, it also retries in the following cases: - * - *

    - *
  • On a read timeout: if the number of replicas that responded is greater than one, but lower - * than is required by the requested consistency level, the operation is retried at a lower - * consistency level. - *
  • On a write timeout: if the operation is a {@code WriteType.UNLOGGED_BATCH} and at least one - * replica acknowledged the write, the operation is retried at a lower consistency level. - * Furthermore, for other operations, if at least one replica acknowledged the write, the - * timeout is ignored. - *
  • On an unavailable exception: if at least one replica is alive, the operation is retried at - * a lower consistency level. - *
- * - * The lower consistency level to use for retries is determined by the following rules: - * - *
    - *
  • if more than 3 replicas responded, use {@code THREE}. - *
  • if 1, 2 or 3 replicas responded, use the corresponding level {@code ONE}, {@code TWO} or - * {@code THREE}. - *
- * - * Note that if the initial consistency level was {@code EACH_QUORUM}, Cassandra returns the number - * of live replicas in the datacenter that failed to reach consistency, not the overall - * number in the cluster. Therefore if this number is 0, we still retry at {@code ONE}, on the - * assumption that a host may still be up in another datacenter. - * - *

The reasoning behind this retry policy is the following one. If, based on the information the - * Cassandra coordinator node returns, retrying the operation with the initially requested - * consistency has a chance to succeed, do it. Otherwise, if based on this information, we know that - * the initially requested consistency level cannot be achieved currently, then: - * - *

    - *
  • For writes, ignore the exception (thus silently failing the consistency requirement) if we - * know the write has been persisted on at least one replica. - *
  • For reads, try reading at a lower consistency level (thus silently failing the consistency - * requirement). - *
- * - * In other words, this policy implements the idea that if the requested consistency level cannot be - * achieved, the next best thing for writes is to make sure the data is persisted, and that reading - * something is better than reading nothing, even if there is a risk of reading stale data. - */ -public class ConsistencyDowngradingRetryPolicy implements RetryPolicy { - - private static final Logger LOG = - LoggerFactory.getLogger(ConsistencyDowngradingRetryPolicy.class); - - @VisibleForTesting - public static final String VERDICT_ON_READ_TIMEOUT = - "[{}] Verdict on read timeout (consistency: {}, required responses: {}, " - + "received responses: {}, data retrieved: {}, retries: {}): {}"; - - @VisibleForTesting - public static final String VERDICT_ON_WRITE_TIMEOUT = - "[{}] Verdict on write timeout (consistency: {}, write type: {}, " - + "required acknowledgments: {}, received acknowledgments: {}, retries: {}): {}"; - - @VisibleForTesting - public static final String VERDICT_ON_UNAVAILABLE = - "[{}] Verdict on unavailable exception (consistency: {}, " - + "required replica: {}, alive replica: {}, retries: {}): {}"; - - @VisibleForTesting - public static final String VERDICT_ON_ABORTED = - "[{}] Verdict on aborted request (type: {}, message: '{}', retries: {}): {}"; - - @VisibleForTesting - public static final String VERDICT_ON_ERROR = - "[{}] Verdict on node error (type: {}, message: '{}', retries: {}): {}"; - - private final String logPrefix; - - @SuppressWarnings("unused") - public ConsistencyDowngradingRetryPolicy( - @NonNull DriverContext context, @NonNull String profileName) { - this(context.getSessionName() + "|" + profileName); - } - - public ConsistencyDowngradingRetryPolicy(@NonNull String logPrefix) { - this.logPrefix = logPrefix; - } - - /** - * {@inheritDoc} - * - *

This implementation triggers a maximum of one retry. If less replicas responded than - * required by the consistency level (but at least one replica did respond), the operation is - * retried at a lower consistency level. If enough replicas responded but data was not retrieved, - * the operation is retried with the initial consistency level. Otherwise, an exception is thrown. - */ - @Override - public RetryVerdict onReadTimeoutVerdict( - @NonNull Request request, - @NonNull ConsistencyLevel cl, - int blockFor, - int received, - boolean dataPresent, - int retryCount) { - RetryVerdict verdict; - if (retryCount != 0) { - verdict = RetryVerdict.RETHROW; - } else if (cl.isSerial()) { - // CAS reads are not all that useful in terms of visibility of the writes since CAS write - // supports the normal consistency levels on the committing phase. So the main use case for - // CAS reads is probably for when you've timed out on a CAS write and want to make sure what - // happened. Downgrading in that case would be always wrong so we just special-case to - // rethrow. - verdict = RetryVerdict.RETHROW; - } else if (received < blockFor) { - verdict = maybeDowngrade(received, cl); - } else if (!dataPresent) { - // Retry with same CL since this usually means that enough replica are alive to satisfy the - // consistency but the coordinator picked a dead one for data retrieval, not having detected - // that replica as dead yet. - verdict = RetryVerdict.RETRY_SAME; - } else { - // This usually means a digest mismatch, in which case it's pointless to retry since - // the inconsistency has to be repaired first. - verdict = RetryVerdict.RETHROW; - } - if (LOG.isTraceEnabled()) { - LOG.trace( - VERDICT_ON_READ_TIMEOUT, - logPrefix, - cl, - blockFor, - received, - dataPresent, - retryCount, - verdict); - } - return verdict; - } - - /** - * {@inheritDoc} - * - *

This implementation triggers a maximum of one retry. If {@code writeType == - * WriteType.BATCH_LOG}, the write is retried with the initial consistency level. If {@code - * writeType == WriteType.UNLOGGED_BATCH} and at least one replica acknowledged, the write is - * retried with a lower consistency level (with unlogged batch, a write timeout can always - * mean that part of the batch haven't been persisted at all, even if {@code receivedAcks > 0}). - * For other write types ({@code WriteType.SIMPLE} and {@code WriteType.BATCH}), if we know the - * write has been persisted on at least one replica, we ignore the exception. Otherwise, an - * exception is thrown. - */ - @Override - public RetryVerdict onWriteTimeoutVerdict( - @NonNull Request request, - @NonNull ConsistencyLevel cl, - @NonNull WriteType writeType, - int blockFor, - int received, - int retryCount) { - RetryVerdict verdict; - if (retryCount != 0) { - verdict = RetryVerdict.RETHROW; - } else if (SIMPLE.equals(writeType) || BATCH.equals(writeType)) { - // Since we provide atomicity, if at least one replica acknowledged the write, - // there is no point in retrying - verdict = received > 0 ? RetryVerdict.IGNORE : RetryVerdict.RETHROW; - } else if (UNLOGGED_BATCH.equals(writeType)) { - // Since only part of the batch could have been persisted, - // retry with whatever consistency should allow to persist all - verdict = maybeDowngrade(received, cl); - } else if (BATCH_LOG.equals(writeType)) { - verdict = RetryVerdict.RETRY_SAME; - } else { - verdict = RetryVerdict.RETHROW; - } - if (LOG.isTraceEnabled()) { - LOG.trace( - VERDICT_ON_WRITE_TIMEOUT, - logPrefix, - cl, - writeType, - blockFor, - received, - retryCount, - verdict); - } - return verdict; - } - - /** - * {@inheritDoc} - * - *

This implementation triggers a maximum of one retry. If at least one replica is known to be - * alive, the operation is retried at a lower consistency level. - */ - @Override - public RetryVerdict onUnavailableVerdict( - @NonNull Request request, - @NonNull ConsistencyLevel cl, - int required, - int alive, - int retryCount) { - RetryVerdict verdict; - if (retryCount != 0) { - verdict = RetryVerdict.RETHROW; - } else if (cl.isSerial()) { - // JAVA-764: if the requested consistency level is serial, it means that the - // operation failed at the paxos phase of a LWT. - // Retry on the next host, on the assumption that the initial coordinator could be - // network-isolated. - verdict = RetryVerdict.RETRY_NEXT; - } else { - verdict = maybeDowngrade(alive, cl); - } - if (LOG.isTraceEnabled()) { - LOG.trace(VERDICT_ON_UNAVAILABLE, logPrefix, cl, required, alive, retryCount, verdict); - } - return verdict; - } - - @Override - public RetryVerdict onRequestAbortedVerdict( - @NonNull Request request, @NonNull Throwable error, int retryCount) { - RetryVerdict verdict = - error instanceof ClosedConnectionException || error instanceof HeartbeatException - ? RetryVerdict.RETRY_NEXT - : RetryVerdict.RETHROW; - if (LOG.isTraceEnabled()) { - LOG.trace( - VERDICT_ON_ABORTED, - logPrefix, - error.getClass().getSimpleName(), - error.getMessage(), - retryCount, - verdict); - } - return verdict; - } - - @Override - public RetryVerdict onErrorResponseVerdict( - @NonNull Request request, @NonNull CoordinatorException error, int retryCount) { - RetryVerdict verdict = - error instanceof WriteFailureException || error instanceof ReadFailureException - ? RetryVerdict.RETHROW - : RetryVerdict.RETRY_NEXT; - if (LOG.isTraceEnabled()) { - LOG.trace( - VERDICT_ON_ERROR, - logPrefix, - error.getClass().getSimpleName(), - error.getMessage(), - retryCount, - verdict); - } - return verdict; - } - - @Override - @Deprecated - public RetryDecision onReadTimeout( - @NonNull Request request, - @NonNull ConsistencyLevel cl, - int blockFor, - int received, - boolean dataPresent, - int retryCount) { - throw new UnsupportedOperationException("onReadTimeout"); - } - - @Override - @Deprecated - public RetryDecision onWriteTimeout( - @NonNull Request request, - @NonNull ConsistencyLevel cl, - @NonNull WriteType writeType, - int blockFor, - int received, - int retryCount) { - throw new UnsupportedOperationException("onWriteTimeout"); - } - - @Override - @Deprecated - public RetryDecision onUnavailable( - @NonNull Request request, - @NonNull ConsistencyLevel cl, - int required, - int alive, - int retryCount) { - throw new UnsupportedOperationException("onUnavailable"); - } - - @Override - @Deprecated - public RetryDecision onRequestAborted( - @NonNull Request request, @NonNull Throwable error, int retryCount) { - throw new UnsupportedOperationException("onRequestAborted"); - } - - @Override - @Deprecated - public RetryDecision onErrorResponse( - @NonNull Request request, @NonNull CoordinatorException error, int retryCount) { - throw new UnsupportedOperationException("onErrorResponse"); - } - - @Override - public void close() {} - - private RetryVerdict maybeDowngrade(int alive, ConsistencyLevel current) { - if (alive >= 3) { - return new ConsistencyDowngradingRetryVerdict(ConsistencyLevel.THREE); - } - if (alive == 2) { - return new ConsistencyDowngradingRetryVerdict(ConsistencyLevel.TWO); - } - // JAVA-1005: EACH_QUORUM does not report a global number of alive replicas - // so even if we get 0 alive replicas, there might be a node up in some other datacenter - if (alive == 1 || current.getProtocolCode() == ConsistencyLevel.EACH_QUORUM.getProtocolCode()) { - return new ConsistencyDowngradingRetryVerdict(ConsistencyLevel.ONE); - } - return RetryVerdict.RETHROW; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/retry/ConsistencyDowngradingRetryVerdict.java b/core/src/main/java/com/datastax/oss/driver/internal/core/retry/ConsistencyDowngradingRetryVerdict.java deleted file mode 100644 index d78f80c7354..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/retry/ConsistencyDowngradingRetryVerdict.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.retry; - -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.retry.RetryDecision; -import com.datastax.oss.driver.api.core.retry.RetryVerdict; -import com.datastax.oss.driver.api.core.session.Request; -import edu.umd.cs.findbugs.annotations.NonNull; - -public class ConsistencyDowngradingRetryVerdict implements RetryVerdict { - - private final ConsistencyLevel consistencyLevel; - - public ConsistencyDowngradingRetryVerdict(@NonNull ConsistencyLevel consistencyLevel) { - this.consistencyLevel = consistencyLevel; - } - - @NonNull - @Override - public RetryDecision getRetryDecision() { - return RetryDecision.RETRY_SAME; - } - - @NonNull - @Override - public RequestT getRetryRequest(@NonNull RequestT previous) { - if (previous instanceof Statement) { - Statement statement = (Statement) previous; - @SuppressWarnings("unchecked") - RequestT toRetry = (RequestT) statement.setConsistencyLevel(consistencyLevel); - return toRetry; - } - return previous; - } - - @Override - public String toString() { - return getRetryDecision() + " at consistency " + consistencyLevel; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/retry/DefaultRetryPolicy.java b/core/src/main/java/com/datastax/oss/driver/internal/core/retry/DefaultRetryPolicy.java deleted file mode 100644 index 8cea1a564b5..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/retry/DefaultRetryPolicy.java +++ /dev/null @@ -1,239 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.retry; - -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.connection.ClosedConnectionException; -import com.datastax.oss.driver.api.core.connection.HeartbeatException; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.retry.RetryDecision; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.servererrors.CoordinatorException; -import com.datastax.oss.driver.api.core.servererrors.DefaultWriteType; -import com.datastax.oss.driver.api.core.servererrors.ReadFailureException; -import com.datastax.oss.driver.api.core.servererrors.WriteFailureException; -import com.datastax.oss.driver.api.core.servererrors.WriteType; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * The default retry policy. - * - *

This is a very conservative implementation: it triggers a maximum of one retry per request, - * and only in cases that have a high chance of success (see the method javadocs for detailed - * explanations of each case). - * - *

To activate this policy, modify the {@code advanced.retry-policy} section in the driver - * configuration, for example: - * - *

- * datastax-java-driver {
- *   advanced.retry-policy {
- *     class = DefaultRetryPolicy
- *   }
- * }
- * 
- * - * See {@code reference.conf} (in the manual or core driver JAR) for more details. - */ -@ThreadSafe -public class DefaultRetryPolicy implements RetryPolicy { - - private static final Logger LOG = LoggerFactory.getLogger(DefaultRetryPolicy.class); - - @VisibleForTesting - public static final String RETRYING_ON_READ_TIMEOUT = - "[{}] Retrying on read timeout on same host (consistency: {}, required responses: {}, " - + "received responses: {}, data retrieved: {}, retries: {})"; - - @VisibleForTesting - public static final String RETRYING_ON_WRITE_TIMEOUT = - "[{}] Retrying on write timeout on same host (consistency: {}, write type: {}, " - + "required acknowledgments: {}, received acknowledgments: {}, retries: {})"; - - @VisibleForTesting - public static final String RETRYING_ON_UNAVAILABLE = - "[{}] Retrying on unavailable exception on next host (consistency: {}, " - + "required replica: {}, alive replica: {}, retries: {})"; - - @VisibleForTesting - public static final String RETRYING_ON_ABORTED = - "[{}] Retrying on aborted request on next host (retries: {})"; - - @VisibleForTesting - public static final String RETRYING_ON_ERROR = - "[{}] Retrying on node error on next host (retries: {})"; - - private final String logPrefix; - - public DefaultRetryPolicy(DriverContext context, String profileName) { - this.logPrefix = (context != null ? context.getSessionName() : null) + "|" + profileName; - } - - /** - * {@inheritDoc} - * - *

This implementation triggers a maximum of one retry (to the same node), and only if enough - * replicas had responded to the read request but data was not retrieved amongst those. That - * usually means that enough replicas are alive to satisfy the consistency, but the coordinator - * picked a dead one for data retrieval, not having detected that replica as dead yet. The - * reasoning is that by the time we get the timeout, the dead replica will likely have been - * detected as dead and the retry has a high chance of success. - * - *

Otherwise, the exception is rethrown. - */ - @Override - @Deprecated - public RetryDecision onReadTimeout( - @NonNull Request request, - @NonNull ConsistencyLevel cl, - int blockFor, - int received, - boolean dataPresent, - int retryCount) { - - RetryDecision decision = - (retryCount == 0 && received >= blockFor && !dataPresent) - ? RetryDecision.RETRY_SAME - : RetryDecision.RETHROW; - - if (decision == RetryDecision.RETRY_SAME && LOG.isTraceEnabled()) { - LOG.trace(RETRYING_ON_READ_TIMEOUT, logPrefix, cl, blockFor, received, false, retryCount); - } - - return decision; - } - - /** - * {@inheritDoc} - * - *

This implementation triggers a maximum of one retry (to the same node), and only for a - * {@code WriteType.BATCH_LOG} write. The reasoning is that the coordinator tries to write the - * distributed batch log against a small subset of nodes in the local datacenter; a timeout - * usually means that none of these nodes were alive but the coordinator hadn't detected them as - * dead yet. By the time we get the timeout, the dead nodes will likely have been detected as - * dead, and the retry has thus a high chance of success. - * - *

Otherwise, the exception is rethrown. - */ - @Override - @Deprecated - public RetryDecision onWriteTimeout( - @NonNull Request request, - @NonNull ConsistencyLevel cl, - @NonNull WriteType writeType, - int blockFor, - int received, - int retryCount) { - - RetryDecision decision = - (retryCount == 0 && writeType == DefaultWriteType.BATCH_LOG) - ? RetryDecision.RETRY_SAME - : RetryDecision.RETHROW; - - if (decision == RetryDecision.RETRY_SAME && LOG.isTraceEnabled()) { - LOG.trace( - RETRYING_ON_WRITE_TIMEOUT, logPrefix, cl, writeType, blockFor, received, retryCount); - } - return decision; - } - - /** - * {@inheritDoc} - * - *

This implementation triggers a maximum of one retry, to the next node in the query plan. The - * rationale is that the first coordinator might have been network-isolated from all other nodes - * (thinking they're down), but still able to communicate with the client; in that case, retrying - * on the same host has almost no chance of success, but moving to the next host might solve the - * issue. - * - *

Otherwise, the exception is rethrown. - */ - @Override - @Deprecated - public RetryDecision onUnavailable( - @NonNull Request request, - @NonNull ConsistencyLevel cl, - int required, - int alive, - int retryCount) { - - RetryDecision decision = (retryCount == 0) ? RetryDecision.RETRY_NEXT : RetryDecision.RETHROW; - - if (decision == RetryDecision.RETRY_NEXT && LOG.isTraceEnabled()) { - LOG.trace(RETRYING_ON_UNAVAILABLE, logPrefix, cl, required, alive, retryCount); - } - - return decision; - } - - /** - * {@inheritDoc} - * - *

This implementation retries on the next node if the connection was closed, and rethrows - * (assuming a driver bug) in all other cases. - */ - @Override - @Deprecated - public RetryDecision onRequestAborted( - @NonNull Request request, @NonNull Throwable error, int retryCount) { - - RetryDecision decision = - (error instanceof ClosedConnectionException || error instanceof HeartbeatException) - ? RetryDecision.RETRY_NEXT - : RetryDecision.RETHROW; - - if (decision == RetryDecision.RETRY_NEXT && LOG.isTraceEnabled()) { - LOG.trace(RETRYING_ON_ABORTED, logPrefix, retryCount, error); - } - - return decision; - } - - /** - * {@inheritDoc} - * - *

This implementation rethrows read and write failures, and retries other errors on the next - * node. - */ - @Override - @Deprecated - public RetryDecision onErrorResponse( - @NonNull Request request, @NonNull CoordinatorException error, int retryCount) { - - RetryDecision decision = - (error instanceof ReadFailureException || error instanceof WriteFailureException) - ? RetryDecision.RETHROW - : RetryDecision.RETRY_NEXT; - - if (decision == RetryDecision.RETRY_NEXT && LOG.isTraceEnabled()) { - LOG.trace(RETRYING_ON_ERROR, logPrefix, retryCount, error); - } - - return decision; - } - - @Override - public void close() { - // nothing to do - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/retry/DefaultRetryVerdict.java b/core/src/main/java/com/datastax/oss/driver/internal/core/retry/DefaultRetryVerdict.java deleted file mode 100644 index e74651e30de..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/retry/DefaultRetryVerdict.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.retry; - -import com.datastax.oss.driver.api.core.retry.RetryDecision; -import com.datastax.oss.driver.api.core.retry.RetryVerdict; -import edu.umd.cs.findbugs.annotations.NonNull; - -public class DefaultRetryVerdict implements RetryVerdict { - - private final RetryDecision decision; - - public DefaultRetryVerdict(@NonNull RetryDecision decision) { - this.decision = decision; - } - - @NonNull - @Override - public RetryDecision getRetryDecision() { - return decision; - } - - @Override - public String toString() { - return getRetryDecision().name(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/servererrors/DefaultWriteTypeRegistry.java b/core/src/main/java/com/datastax/oss/driver/internal/core/servererrors/DefaultWriteTypeRegistry.java deleted file mode 100644 index 7abe49a98c0..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/servererrors/DefaultWriteTypeRegistry.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.servererrors; - -import com.datastax.oss.driver.api.core.servererrors.DefaultWriteType; -import com.datastax.oss.driver.api.core.servererrors.WriteType; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class DefaultWriteTypeRegistry implements WriteTypeRegistry { - - private static final ImmutableList values = - ImmutableList.builder().add(DefaultWriteType.values()).build(); - - @Override - public WriteType fromName(String name) { - return DefaultWriteType.valueOf(name); - } - - @Override - public ImmutableList getValues() { - return values; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/servererrors/WriteTypeRegistry.java b/core/src/main/java/com/datastax/oss/driver/internal/core/servererrors/WriteTypeRegistry.java deleted file mode 100644 index 537c3922f0f..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/servererrors/WriteTypeRegistry.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.servererrors; - -import com.datastax.oss.driver.api.core.servererrors.WriteType; - -public interface WriteTypeRegistry { - WriteType fromName(String name); - - /** @return all the values known to this driver instance. */ - Iterable getValues(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/session/BuiltInRequestProcessors.java b/core/src/main/java/com/datastax/oss/driver/internal/core/session/BuiltInRequestProcessors.java deleted file mode 100644 index dc6e6a295a1..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/session/BuiltInRequestProcessors.java +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.session; - -import static com.datastax.oss.driver.internal.core.util.Dependency.REACTIVE_STREAMS; -import static com.datastax.oss.driver.internal.core.util.Dependency.TINKERPOP; - -import com.datastax.dse.driver.internal.core.cql.continuous.ContinuousCqlRequestAsyncProcessor; -import com.datastax.dse.driver.internal.core.cql.continuous.ContinuousCqlRequestSyncProcessor; -import com.datastax.dse.driver.internal.core.cql.continuous.reactive.ContinuousCqlRequestReactiveProcessor; -import com.datastax.dse.driver.internal.core.cql.reactive.CqlRequestReactiveProcessor; -import com.datastax.dse.driver.internal.core.graph.GraphRequestAsyncProcessor; -import com.datastax.dse.driver.internal.core.graph.GraphRequestSyncProcessor; -import com.datastax.dse.driver.internal.core.graph.GraphSupportChecker; -import com.datastax.dse.driver.internal.core.graph.reactive.ReactiveGraphRequestProcessor; -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import com.datastax.oss.driver.internal.core.cql.CqlPrepareAsyncProcessor; -import com.datastax.oss.driver.internal.core.cql.CqlPrepareSyncProcessor; -import com.datastax.oss.driver.internal.core.cql.CqlRequestAsyncProcessor; -import com.datastax.oss.driver.internal.core.cql.CqlRequestSyncProcessor; -import com.datastax.oss.driver.internal.core.util.DefaultDependencyChecker; -import java.util.ArrayList; -import java.util.List; -import java.util.Optional; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class BuiltInRequestProcessors { - - private static final Logger LOG = LoggerFactory.getLogger(BuiltInRequestProcessors.class); - - public static List> createDefaultProcessors(DefaultDriverContext context) { - List> processors = new ArrayList<>(); - addBasicProcessors(processors, context); - if (DefaultDependencyChecker.isPresent(TINKERPOP)) { - addGraphProcessors(context, processors); - } else { - LOG.debug("Tinkerpop was not found on the classpath: graph extensions will not be available"); - } - if (DefaultDependencyChecker.isPresent(REACTIVE_STREAMS)) { - addReactiveProcessors(processors); - } else { - LOG.debug( - "Reactive Streams was not found on the classpath: reactive extensions will not be available"); - } - if (DefaultDependencyChecker.isPresent(REACTIVE_STREAMS) - && DefaultDependencyChecker.isPresent(TINKERPOP)) { - addGraphReactiveProcessors(context, processors); - } - return processors; - } - - public static void addBasicProcessors( - List> processors, DefaultDriverContext context) { - // regular requests (sync and async) - CqlRequestAsyncProcessor cqlRequestAsyncProcessor = new CqlRequestAsyncProcessor(); - CqlRequestSyncProcessor cqlRequestSyncProcessor = - new CqlRequestSyncProcessor(cqlRequestAsyncProcessor); - processors.add(cqlRequestAsyncProcessor); - processors.add(cqlRequestSyncProcessor); - - // prepare requests (sync and async) - CqlPrepareAsyncProcessor cqlPrepareAsyncProcessor = - new CqlPrepareAsyncProcessor(Optional.of(context)); - CqlPrepareSyncProcessor cqlPrepareSyncProcessor = - new CqlPrepareSyncProcessor(cqlPrepareAsyncProcessor); - processors.add(cqlPrepareAsyncProcessor); - processors.add(cqlPrepareSyncProcessor); - - // continuous requests (sync and async) - ContinuousCqlRequestAsyncProcessor continuousCqlRequestAsyncProcessor = - new ContinuousCqlRequestAsyncProcessor(); - ContinuousCqlRequestSyncProcessor continuousCqlRequestSyncProcessor = - new ContinuousCqlRequestSyncProcessor(continuousCqlRequestAsyncProcessor); - processors.add(continuousCqlRequestAsyncProcessor); - processors.add(continuousCqlRequestSyncProcessor); - } - - public static void addGraphProcessors( - DefaultDriverContext context, List> processors) { - GraphRequestAsyncProcessor graphRequestAsyncProcessor = - new GraphRequestAsyncProcessor(context, new GraphSupportChecker()); - GraphRequestSyncProcessor graphRequestSyncProcessor = - new GraphRequestSyncProcessor(graphRequestAsyncProcessor); - processors.add(graphRequestAsyncProcessor); - processors.add(graphRequestSyncProcessor); - } - - public static void addReactiveProcessors(List> processors) { - CqlRequestReactiveProcessor cqlRequestReactiveProcessor = - new CqlRequestReactiveProcessor(new CqlRequestAsyncProcessor()); - ContinuousCqlRequestReactiveProcessor continuousCqlRequestReactiveProcessor = - new ContinuousCqlRequestReactiveProcessor(new ContinuousCqlRequestAsyncProcessor()); - processors.add(cqlRequestReactiveProcessor); - processors.add(continuousCqlRequestReactiveProcessor); - } - - public static void addGraphReactiveProcessors( - DefaultDriverContext context, List> processors) { - ReactiveGraphRequestProcessor reactiveGraphRequestProcessor = - new ReactiveGraphRequestProcessor( - new GraphRequestAsyncProcessor(context, new GraphSupportChecker())); - processors.add(reactiveGraphRequestProcessor); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/session/BuiltInRequestProcessorsSubstitutions.java b/core/src/main/java/com/datastax/oss/driver/internal/core/session/BuiltInRequestProcessorsSubstitutions.java deleted file mode 100644 index b8bca431228..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/session/BuiltInRequestProcessorsSubstitutions.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.session; - -import static com.datastax.oss.driver.internal.core.util.Dependency.REACTIVE_STREAMS; -import static com.datastax.oss.driver.internal.core.util.Dependency.TINKERPOP; - -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import com.datastax.oss.driver.internal.core.util.GraalDependencyChecker; -import com.oracle.svm.core.annotate.Substitute; -import com.oracle.svm.core.annotate.TargetClass; -import java.util.ArrayList; -import java.util.List; -import java.util.function.BooleanSupplier; - -@SuppressWarnings("unused") -public class BuiltInRequestProcessorsSubstitutions { - - @TargetClass(value = BuiltInRequestProcessors.class, onlyWith = GraphMissingReactiveMissing.class) - public static final class BuiltInRequestProcessorsGraphMissingReactiveMissing { - - @Substitute - public static List> createDefaultProcessors( - DefaultDriverContext context) { - List> processors = new ArrayList<>(); - BuiltInRequestProcessors.addBasicProcessors(processors, context); - return processors; - } - } - - @TargetClass(value = BuiltInRequestProcessors.class, onlyWith = GraphMissingReactivePresent.class) - public static final class BuiltInRequestProcessorsGraphMissingReactivePresent { - - @Substitute - public static List> createDefaultProcessors( - DefaultDriverContext context) { - List> processors = new ArrayList<>(); - BuiltInRequestProcessors.addBasicProcessors(processors, context); - BuiltInRequestProcessors.addReactiveProcessors(processors); - return processors; - } - } - - @TargetClass(value = BuiltInRequestProcessors.class, onlyWith = GraphPresentReactiveMissing.class) - public static final class BuiltInRequestProcessorsGraphPresentReactiveMissing { - - @Substitute - public static List> createDefaultProcessors( - DefaultDriverContext context) { - List> processors = new ArrayList<>(); - BuiltInRequestProcessors.addBasicProcessors(processors, context); - BuiltInRequestProcessors.addGraphProcessors(context, processors); - return processors; - } - } - - public static class GraphMissingReactiveMissing implements BooleanSupplier { - @Override - public boolean getAsBoolean() { - return !GraalDependencyChecker.isPresent(TINKERPOP) - && !GraalDependencyChecker.isPresent(REACTIVE_STREAMS); - } - } - - public static class GraphMissingReactivePresent implements BooleanSupplier { - @Override - public boolean getAsBoolean() { - return !GraalDependencyChecker.isPresent(TINKERPOP) - && GraalDependencyChecker.isPresent(REACTIVE_STREAMS); - } - } - - public static class GraphPresentReactiveMissing implements BooleanSupplier { - @Override - public boolean getAsBoolean() { - return GraalDependencyChecker.isPresent(TINKERPOP) - && !GraalDependencyChecker.isPresent(REACTIVE_STREAMS); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/session/DefaultSession.java b/core/src/main/java/com/datastax/oss/driver/internal/core/session/DefaultSession.java deleted file mode 100644 index b795c30fce7..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/session/DefaultSession.java +++ /dev/null @@ -1,711 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.session; - -import com.datastax.oss.driver.api.core.AsyncAutoCloseable; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.core.metadata.Metadata; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeState; -import com.datastax.oss.driver.api.core.metrics.Metrics; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.context.LifecycleListener; -import com.datastax.oss.driver.internal.core.metadata.DefaultNode; -import com.datastax.oss.driver.internal.core.metadata.MetadataManager; -import com.datastax.oss.driver.internal.core.metadata.MetadataManager.RefreshSchemaResult; -import com.datastax.oss.driver.internal.core.metadata.NodeStateEvent; -import com.datastax.oss.driver.internal.core.metadata.NodeStateManager; -import com.datastax.oss.driver.internal.core.metrics.NodeMetricUpdater; -import com.datastax.oss.driver.internal.core.metrics.SessionMetricUpdater; -import com.datastax.oss.driver.internal.core.pool.ChannelPool; -import com.datastax.oss.driver.internal.core.util.Loggers; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.driver.internal.core.util.concurrent.RunOrSchedule; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import io.netty.util.concurrent.EventExecutor; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Set; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.RejectedExecutionException; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.function.Supplier; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * The session implementation. - * - *

It maintains a {@link ChannelPool} to each node that the {@link LoadBalancingPolicy} set to a - * non-ignored distance. It listens for distance events and node state events, in order to adjust - * the pools accordingly. - * - *

It executes requests by: - * - *

    - *
  • picking the appropriate processor to convert the request into a protocol message. - *
  • getting a query plan from the load balancing policy - *
  • trying to send the message on each pool, in the order of the query plan - *
- */ -@ThreadSafe -public class DefaultSession implements CqlSession { - - private static final Logger LOG = LoggerFactory.getLogger(DefaultSession.class); - - private static final AtomicInteger INSTANCE_COUNT = new AtomicInteger(); - - public static CompletionStage init( - InternalDriverContext context, Set contactPoints, CqlIdentifier keyspace) { - return new DefaultSession(context, contactPoints).init(keyspace); - } - - private final InternalDriverContext context; - private final EventExecutor adminExecutor; - private final String logPrefix; - private final SingleThreaded singleThreaded; - private final MetadataManager metadataManager; - private final RequestProcessorRegistry processorRegistry; - private final PoolManager poolManager; - private final SessionMetricUpdater metricUpdater; - - private DefaultSession(InternalDriverContext context, Set contactPoints) { - int instanceCount = INSTANCE_COUNT.incrementAndGet(); - int threshold = - context.getConfig().getDefaultProfile().getInt(DefaultDriverOption.SESSION_LEAK_THRESHOLD); - LOG.debug( - "Creating new session {} ({} live instances)", context.getSessionName(), instanceCount); - if (threshold > 0 && instanceCount > threshold) { - LOG.warn( - "You have too many session instances: {} active, expected less than {} " - + "(see '{}' in the configuration)", - instanceCount, - threshold, - DefaultDriverOption.SESSION_LEAK_THRESHOLD.getPath()); - } - - this.logPrefix = context.getSessionName(); - this.adminExecutor = context.getNettyOptions().adminEventExecutorGroup().next(); - try { - this.context = context; - this.singleThreaded = new SingleThreaded(context, contactPoints); - this.metadataManager = context.getMetadataManager(); - this.processorRegistry = context.getRequestProcessorRegistry(); - this.poolManager = context.getPoolManager(); - this.metricUpdater = context.getMetricsFactory().getSessionUpdater(); - } catch (Throwable t) { - LOG.debug( - "Error creating session {} ({} live instances)", - context.getSessionName(), - INSTANCE_COUNT.decrementAndGet()); - // Rethrow but make sure we release any resources allocated by Netty. At this stage there are - // no scheduled tasks on the event loops so getNow() won't block. - try { - context.getNettyOptions().onClose().getNow(); - } catch (Throwable suppressed) { - Loggers.warnWithException( - LOG, - "[{}] Error while closing NettyOptions " - + "(suppressed because we're already handling an init failure)", - logPrefix, - suppressed); - } - throw t; - } - } - - private CompletionStage init(CqlIdentifier keyspace) { - RunOrSchedule.on(adminExecutor, () -> singleThreaded.init(keyspace)); - return singleThreaded.initFuture; - } - - @NonNull - @Override - public String getName() { - return context.getSessionName(); - } - - @NonNull - @Override - public Metadata getMetadata() { - return metadataManager.getMetadata(); - } - - @Override - public boolean isSchemaMetadataEnabled() { - return metadataManager.isSchemaEnabled(); - } - - @NonNull - @Override - public CompletionStage setSchemaMetadataEnabled(@Nullable Boolean newValue) { - return metadataManager.setSchemaEnabled(newValue); - } - - @NonNull - @Override - public CompletionStage refreshSchemaAsync() { - return metadataManager - .refreshSchema(null, true, true) - .thenApply(RefreshSchemaResult::getMetadata); - } - - @NonNull - @Override - public CompletionStage checkSchemaAgreementAsync() { - return context.getTopologyMonitor().checkSchemaAgreement(); - } - - @NonNull - @Override - public DriverContext getContext() { - return context; - } - - @NonNull - @Override - public Optional getKeyspace() { - return Optional.ofNullable(poolManager.getKeyspace()); - } - - @NonNull - @Override - public Optional getMetrics() { - return context.getMetricsFactory().getMetrics(); - } - - /** - * INTERNAL USE ONLY -- switches the session to a new keyspace. - * - *

This is called by the driver when a {@code USE} query is successfully executed through the - * session. Calling it from anywhere else is highly discouraged, as an invalid keyspace would - * wreak havoc (close all connections and make the session unusable). - */ - @NonNull - public CompletionStage setKeyspace(@NonNull CqlIdentifier newKeyspace) { - return poolManager.setKeyspace(newKeyspace); - } - - @NonNull - public Map getPools() { - return poolManager.getPools(); - } - - @Nullable - @Override - public ResultT execute( - @NonNull RequestT request, @NonNull GenericType resultType) { - RequestProcessor processor = - processorRegistry.processorFor(request, resultType); - return isClosed() - ? processor.newFailure(new IllegalStateException("Session is closed")) - : processor.process(request, this, context, logPrefix); - } - - @Nullable - public DriverChannel getChannel(@NonNull Node node, @NonNull String logPrefix) { - ChannelPool pool = poolManager.getPools().get(node); - if (pool == null) { - LOG.trace("[{}] No pool to {}, skipping", logPrefix, node); - return null; - } else { - DriverChannel channel = pool.next(); - if (channel == null) { - LOG.trace("[{}] Pool returned no channel for {}, skipping", logPrefix, node); - return null; - } else if (channel.closeFuture().isDone()) { - LOG.trace("[{}] Pool returned closed connection to {}, skipping", logPrefix, node); - return null; - } else { - return channel; - } - } - } - - @NonNull - public ConcurrentMap getRepreparePayloads() { - return poolManager.getRepreparePayloads(); - } - - @NonNull - public SessionMetricUpdater getMetricUpdater() { - return metricUpdater; - } - - @NonNull - @Override - public CompletionStage closeFuture() { - return singleThreaded.closeFuture; - } - - @NonNull - @Override - public CompletionStage closeAsync() { - return closeSafely(singleThreaded::close); - } - - @NonNull - @Override - public CompletionStage forceCloseAsync() { - return closeSafely(singleThreaded::forceClose); - } - - private CompletionStage closeSafely(Runnable action) { - // Protect against getting closed twice: with the default NettyOptions, closing shuts down - // adminExecutor, so we don't want to call RunOrSchedule the second time. - if (!singleThreaded.closeFuture.isDone()) { - try { - RunOrSchedule.on(adminExecutor, action); - } catch (RejectedExecutionException e) { - // Checking the future is racy, there is still a tiny window that could get us here. - LOG.warn( - "[{}] Ignoring terminated executor. " - + "This generally happens if you close the session multiple times concurrently, " - + "and can be safely ignored if the close() call returns normally.", - logPrefix, - e); - } - } - return singleThreaded.closeFuture; - } - - private class SingleThreaded { - - private final InternalDriverContext context; - private final Set initialContactPoints; - private final NodeStateManager nodeStateManager; - private final SchemaListenerNotifier schemaListenerNotifier; - private final CompletableFuture initFuture = new CompletableFuture<>(); - private boolean initWasCalled; - private final CompletableFuture closeFuture = new CompletableFuture<>(); - private boolean closeWasCalled; - private boolean forceCloseWasCalled; - - private SingleThreaded(InternalDriverContext context, Set contactPoints) { - this.context = context; - this.nodeStateManager = new NodeStateManager(context); - this.initialContactPoints = contactPoints; - this.schemaListenerNotifier = - new SchemaListenerNotifier( - context.getSchemaChangeListener(), context.getEventBus(), adminExecutor); - context - .getEventBus() - .register( - NodeStateEvent.class, RunOrSchedule.on(adminExecutor, this::onNodeStateChanged)); - CompletableFutures.propagateCancellation( - this.initFuture, context.getTopologyMonitor().initFuture()); - } - - private void init(CqlIdentifier keyspace) { - assert adminExecutor.inEventLoop(); - if (initWasCalled) { - return; - } - initWasCalled = true; - LOG.debug("[{}] Starting initialization", logPrefix); - - // Eagerly fetch user-facing policies right now, no need to start opening connections if - // something is wrong in the configuration. - try { - context.getLoadBalancingPolicies(); - context.getRetryPolicies(); - context.getSpeculativeExecutionPolicies(); - context.getReconnectionPolicy(); - context.getAddressTranslator(); - context.getNodeStateListener(); - context.getSchemaChangeListener(); - context.getRequestTracker(); - context.getRequestThrottler(); - context.getAuthProvider(); - context.getSslHandlerFactory(); - context.getTimestampGenerator(); - } catch (Throwable error) { - RunOrSchedule.on(adminExecutor, this::closePolicies); - context - .getNettyOptions() - .onClose() - .addListener( - f -> { - if (!f.isSuccess()) { - Loggers.warnWithException( - LOG, - "[{}] Error while closing NettyOptions " - + "(suppressed because we're already handling an init failure)", - logPrefix, - f.cause()); - } - initFuture.completeExceptionally(error); - }); - LOG.debug( - "Error initializing new session {} ({} live instances)", - context.getSessionName(), - INSTANCE_COUNT.decrementAndGet()); - return; - } - - closeFuture.whenComplete( - (v, error) -> - LOG.debug( - "Closing session {} ({} live instances)", - context.getSessionName(), - INSTANCE_COUNT.decrementAndGet())); - - MetadataManager metadataManager = context.getMetadataManager(); - metadataManager.addContactPoints(initialContactPoints); - context - .getTopologyMonitor() - .init() - .thenCompose(v -> metadataManager.refreshNodes()) - .thenCompose(v -> checkProtocolVersion()) - .thenCompose(v -> initialSchemaRefresh()) - .thenCompose(v -> initializePools(keyspace)) - .whenComplete( - (v, error) -> { - if (error == null) { - LOG.debug("[{}] Initialization complete, ready", logPrefix); - notifyListeners(); - initFuture.complete(DefaultSession.this); - } else { - LOG.debug("[{}] Initialization failed, force closing", logPrefix, error); - forceCloseAsync() - .whenComplete( - (v1, error1) -> { - if (error1 != null) { - error.addSuppressed(error1); - } - initFuture.completeExceptionally(error); - }); - } - }); - } - - private CompletionStage checkProtocolVersion() { - try { - boolean protocolWasForced = - context.getConfig().getDefaultProfile().isDefined(DefaultDriverOption.PROTOCOL_VERSION); - if (!protocolWasForced) { - ProtocolVersion currentVersion = context.getProtocolVersion(); - ProtocolVersion bestVersion = - context - .getProtocolVersionRegistry() - .highestCommon(metadataManager.getMetadata().getNodes().values()); - if (bestVersion.getCode() < currentVersion.getCode()) { - LOG.info( - "[{}] Negotiated protocol version {} for the initial contact point, " - + "but other nodes only support {}, downgrading", - logPrefix, - currentVersion, - bestVersion); - context.getChannelFactory().setProtocolVersion(bestVersion); - - // Note that, with the default topology monitor, the control connection is already - // connected with currentVersion at this point. This doesn't really matter because none - // of the control queries use any protocol-dependent feature. - // Keep going as-is, the control connection might switch to the "correct" version later - // if it reconnects to another node. - } else if (bestVersion.getCode() > currentVersion.getCode()) { - LOG.info( - "[{}] Negotiated protocol version {} for the initial contact point, " - + "but cluster seems to support {}, keeping the negotiated version", - logPrefix, - currentVersion, - bestVersion); - } - } - return CompletableFuture.completedFuture(null); - } catch (Throwable throwable) { - return CompletableFutures.failedFuture(throwable); - } - } - - private CompletionStage initialSchemaRefresh() { - try { - return metadataManager - .refreshSchema(null, false, true) - .exceptionally( - error -> { - Loggers.warnWithException( - LOG, - "[{}] Unexpected error while refreshing schema during initialization, " - + "proceeding without schema metadata", - logPrefix, - error); - return null; - }); - } catch (Throwable throwable) { - return CompletableFutures.failedFuture(throwable); - } - } - - private CompletionStage initializePools(CqlIdentifier keyspace) { - try { - nodeStateManager.markInitialized(); - context.getLoadBalancingPolicyWrapper().init(); - context.getConfigLoader().onDriverInit(context); - return poolManager.init(keyspace); - } catch (Throwable throwable) { - return CompletableFutures.failedFuture(throwable); - } - } - - private void notifyListeners() { - for (LifecycleListener lifecycleListener : context.getLifecycleListeners()) { - try { - lifecycleListener.onSessionReady(); - } catch (Throwable t) { - Loggers.warnWithException( - LOG, - "[{}] Error while notifying {} of session ready", - logPrefix, - lifecycleListener, - t); - } - } - try { - context.getNodeStateListener().onSessionReady(DefaultSession.this); - } catch (Throwable t) { - Loggers.warnWithException( - LOG, - "[{}] Error while notifying {} of session ready", - logPrefix, - context.getNodeStateListener(), - t); - } - try { - schemaListenerNotifier.onSessionReady(DefaultSession.this); - } catch (Throwable t) { - Loggers.warnWithException( - LOG, - "[{}] Error while notifying {} of session ready", - logPrefix, - schemaListenerNotifier, - t); - } - try { - context.getRequestTracker().onSessionReady(DefaultSession.this); - } catch (Throwable t) { - Loggers.warnWithException( - LOG, - "[{}] Error while notifying {} of session ready", - logPrefix, - context.getRequestTracker(), - t); - } - } - - private void onNodeStateChanged(NodeStateEvent event) { - assert adminExecutor.inEventLoop(); - DefaultNode node = event.node; - if (node == null) { - LOG.debug( - "[{}] Node for this event was removed, ignoring state change: {}", logPrefix, event); - } else if (event.newState == null) { - context.getNodeStateListener().onRemove(node); - } else if (event.oldState == null && event.newState == NodeState.UNKNOWN) { - context.getNodeStateListener().onAdd(node); - } else if (event.newState == NodeState.UP) { - context.getNodeStateListener().onUp(node); - } else if (event.newState == NodeState.DOWN || event.newState == NodeState.FORCED_DOWN) { - context.getNodeStateListener().onDown(node); - } - } - - private void close() { - assert adminExecutor.inEventLoop(); - if (closeWasCalled) { - return; - } - closeWasCalled = true; - LOG.debug("[{}] Starting shutdown", logPrefix); - - closePolicies(); - - // clear metrics to prevent memory leak - for (Node n : metadataManager.getMetadata().getNodes().values()) { - NodeMetricUpdater updater = ((DefaultNode) n).getMetricUpdater(); - if (updater != null) updater.clearMetrics(); - } - - if (metricUpdater != null) metricUpdater.clearMetrics(); - - List> childrenCloseStages = new ArrayList<>(); - for (AsyncAutoCloseable closeable : internalComponentsToClose()) { - childrenCloseStages.add(closeable.closeAsync()); - } - CompletableFutures.whenAllDone( - childrenCloseStages, () -> onChildrenClosed(childrenCloseStages), adminExecutor); - } - - private void forceClose() { - assert adminExecutor.inEventLoop(); - if (forceCloseWasCalled) { - return; - } - forceCloseWasCalled = true; - LOG.debug( - "[{}] Starting forced shutdown (was {}closed before)", - logPrefix, - (closeWasCalled ? "" : "not ")); - - // clear metrics to prevent memory leak - for (Node n : metadataManager.getMetadata().getNodes().values()) { - NodeMetricUpdater updater = ((DefaultNode) n).getMetricUpdater(); - if (updater != null) updater.clearMetrics(); - } - - if (metricUpdater != null) metricUpdater.clearMetrics(); - - if (closeWasCalled) { - // onChildrenClosed has already been scheduled - for (AsyncAutoCloseable closeable : internalComponentsToClose()) { - closeable.forceCloseAsync(); - } - } else { - closePolicies(); - List> childrenCloseStages = new ArrayList<>(); - for (AsyncAutoCloseable closeable : internalComponentsToClose()) { - childrenCloseStages.add(closeable.forceCloseAsync()); - } - CompletableFutures.whenAllDone( - childrenCloseStages, () -> onChildrenClosed(childrenCloseStages), adminExecutor); - } - } - - private void onChildrenClosed(List> childrenCloseStages) { - assert adminExecutor.inEventLoop(); - for (CompletionStage stage : childrenCloseStages) { - warnIfFailed(stage); - } - context - .getNettyOptions() - .onClose() - .addListener( - f -> { - if (!f.isSuccess()) { - closeFuture.completeExceptionally(f.cause()); - } else { - closeFuture.complete(null); - } - }); - } - - private void warnIfFailed(CompletionStage stage) { - CompletableFuture future = stage.toCompletableFuture(); - assert future.isDone(); - if (future.isCompletedExceptionally()) { - Loggers.warnWithException( - LOG, - "[{}] Unexpected error while closing", - logPrefix, - CompletableFutures.getFailed(future)); - } - } - - private void closePolicies() { - // This is a bit tricky: we might be closing the session because of an initialization error. - // This error might have been triggered by a policy failing to initialize. If we try to access - // the policy here to close it, it will fail again. So make sure we ignore that error and - // proceed to close the other policies. - List policies = new ArrayList<>(); - for (Supplier supplier : - ImmutableList.>of( - context::getReconnectionPolicy, - context::getLoadBalancingPolicyWrapper, - context::getAddressTranslator, - context::getConfigLoader, - context::getNodeStateListener, - context::getSchemaChangeListener, - context::getRequestTracker, - context::getRequestThrottler, - context::getTimestampGenerator)) { - try { - policies.add(supplier.get()); - } catch (Throwable t) { - // Assume the policy had failed to initialize, and we don't need to close it => ignore - } - } - try { - context.getAuthProvider().ifPresent(policies::add); - } catch (Throwable t) { - // ignore - } - try { - context.getSslHandlerFactory().ifPresent(policies::add); - } catch (Throwable t) { - // ignore - } - try { - policies.addAll(context.getRetryPolicies().values()); - } catch (Throwable t) { - // ignore - } - try { - policies.addAll(context.getSpeculativeExecutionPolicies().values()); - } catch (Throwable t) { - // ignore - } - policies.addAll(context.getLifecycleListeners()); - - // Finally we have a list of all the policies that initialized successfully, close them: - for (AutoCloseable policy : policies) { - try { - policy.close(); - } catch (Throwable t) { - Loggers.warnWithException(LOG, "[{}] Error while closing {}", logPrefix, policy, t); - } - } - } - - private List internalComponentsToClose() { - ImmutableList.Builder components = - ImmutableList.builder() - .add(poolManager, nodeStateManager, metadataManager); - - // Same as closePolicies(): make sure we don't trigger errors by accessing context components - // that had failed to initialize: - try { - components.add(context.getTopologyMonitor()); - } catch (Throwable t) { - // ignore - } - try { - components.add(context.getControlConnection()); - } catch (Throwable t) { - // ignore - } - return components.build(); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/session/PoolManager.java b/core/src/main/java/com/datastax/oss/driver/internal/core/session/PoolManager.java deleted file mode 100644 index 661be017461..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/session/PoolManager.java +++ /dev/null @@ -1,536 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.session; - -import com.datastax.oss.driver.api.core.AsyncAutoCloseable; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.InvalidKeyspaceException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeState; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.DefaultNode; -import com.datastax.oss.driver.internal.core.metadata.DistanceEvent; -import com.datastax.oss.driver.internal.core.metadata.NodeStateEvent; -import com.datastax.oss.driver.internal.core.metadata.TopologyEvent; -import com.datastax.oss.driver.internal.core.pool.ChannelPool; -import com.datastax.oss.driver.internal.core.pool.ChannelPoolFactory; -import com.datastax.oss.driver.internal.core.util.Loggers; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.driver.internal.core.util.concurrent.ReplayingEventFilter; -import com.datastax.oss.driver.internal.core.util.concurrent.RunOrSchedule; -import com.datastax.oss.driver.internal.core.util.concurrent.UncaughtExceptions; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import com.datastax.oss.driver.shaded.guava.common.collect.MapMaker; -import edu.umd.cs.findbugs.annotations.NonNull; -import io.netty.util.concurrent.EventExecutor; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Collection; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.WeakHashMap; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Maintains the connection pools of a session. - * - *

Logically this belongs to {@link DefaultSession}, but it's extracted here in order to be - * accessible from the context (notably for metrics). - */ -@ThreadSafe -public class PoolManager implements AsyncAutoCloseable { - - private static final Logger LOG = LoggerFactory.getLogger(PoolManager.class); - - // This is read concurrently, but only updated from adminExecutor - private volatile CqlIdentifier keyspace; - - private final ConcurrentMap pools = - new ConcurrentHashMap<>( - 16, - 0.75f, - // the map will only be updated from adminExecutor - 1); - - // The raw data to reprepare requests on the fly, if we hit a node that doesn't have them in - // its cache. - // This is raw protocol-level data, as opposed to the actual instances returned to the client - // (e.g. DefaultPreparedStatement) which are handled at the protocol level (e.g. - // CqlPrepareAsyncProcessor). We keep the two separate to avoid introducing a dependency from the - // session to a particular processor implementation. - private final ConcurrentMap repreparePayloads; - - private final String logPrefix; - private final EventExecutor adminExecutor; - private final DriverExecutionProfile config; - private final SingleThreaded singleThreaded; - - public PoolManager(InternalDriverContext context) { - this.logPrefix = context.getSessionName(); - this.adminExecutor = context.getNettyOptions().adminEventExecutorGroup().next(); - this.config = context.getConfig().getDefaultProfile(); - this.singleThreaded = new SingleThreaded(context); - - if (config.getBoolean(DefaultDriverOption.PREPARED_CACHE_WEAK_VALUES, true)) { - LOG.debug("[{}] Prepared statements cache configured to use weak values", logPrefix); - this.repreparePayloads = new MapMaker().weakValues().makeMap(); - } else { - LOG.debug("[{}] Prepared statements cache configured to use strong values", logPrefix); - this.repreparePayloads = new MapMaker().makeMap(); - } - } - - public CompletionStage init(CqlIdentifier keyspace) { - RunOrSchedule.on(adminExecutor, () -> singleThreaded.init(keyspace)); - return singleThreaded.initFuture; - } - - public CqlIdentifier getKeyspace() { - return keyspace; - } - - public CompletionStage setKeyspace(CqlIdentifier newKeyspace) { - CqlIdentifier oldKeyspace = this.keyspace; - if (Objects.equals(oldKeyspace, newKeyspace)) { - return CompletableFuture.completedFuture(null); - } - if (config.getBoolean(DefaultDriverOption.REQUEST_WARN_IF_SET_KEYSPACE)) { - LOG.warn( - "[{}] Detected a keyspace change at runtime ({} => {}). " - + "This is an anti-pattern that should be avoided in production " - + "(see '{}' in the configuration).", - logPrefix, - (oldKeyspace == null) ? "" : oldKeyspace.asInternal(), - newKeyspace.asInternal(), - DefaultDriverOption.REQUEST_WARN_IF_SET_KEYSPACE.getPath()); - } - this.keyspace = newKeyspace; - CompletableFuture result = new CompletableFuture<>(); - RunOrSchedule.on(adminExecutor, () -> singleThreaded.setKeyspace(newKeyspace, result)); - return result; - } - - public Map getPools() { - return pools; - } - - public ConcurrentMap getRepreparePayloads() { - return repreparePayloads; - } - - @NonNull - @Override - public CompletionStage closeFuture() { - return singleThreaded.closeFuture; - } - - @NonNull - @Override - public CompletionStage closeAsync() { - RunOrSchedule.on(adminExecutor, singleThreaded::close); - return singleThreaded.closeFuture; - } - - @NonNull - @Override - public CompletionStage forceCloseAsync() { - RunOrSchedule.on(adminExecutor, singleThreaded::forceClose); - return singleThreaded.closeFuture; - } - - private class SingleThreaded { - - private final InternalDriverContext context; - private final ChannelPoolFactory channelPoolFactory; - private final CompletableFuture initFuture = new CompletableFuture<>(); - private boolean initWasCalled; - private final CompletableFuture closeFuture = new CompletableFuture<>(); - private boolean closeWasCalled; - private boolean forceCloseWasCalled; - private final Object distanceListenerKey; - private final ReplayingEventFilter distanceEventFilter = - new ReplayingEventFilter<>(this::processDistanceEvent); - private final Object stateListenerKey; - private final ReplayingEventFilter stateEventFilter = - new ReplayingEventFilter<>(this::processStateEvent); - private final Object topologyListenerKey; - // The pools that we have opened but have not finished initializing yet - private final Map> pending = new HashMap<>(); - // If we receive events while a pool is initializing, the last one is stored here - private final Map pendingDistanceEvents = new WeakHashMap<>(); - private final Map pendingStateEvents = new WeakHashMap<>(); - - private SingleThreaded(InternalDriverContext context) { - this.context = context; - this.channelPoolFactory = context.getChannelPoolFactory(); - this.distanceListenerKey = - context - .getEventBus() - .register( - DistanceEvent.class, RunOrSchedule.on(adminExecutor, this::onDistanceEvent)); - this.stateListenerKey = - context - .getEventBus() - .register(NodeStateEvent.class, RunOrSchedule.on(adminExecutor, this::onStateEvent)); - this.topologyListenerKey = - context - .getEventBus() - .register( - TopologyEvent.class, RunOrSchedule.on(adminExecutor, this::onTopologyEvent)); - } - - private void init(CqlIdentifier keyspace) { - assert adminExecutor.inEventLoop(); - if (initWasCalled) { - return; - } - initWasCalled = true; - - LOG.debug("[{}] Starting initialization", logPrefix); - - PoolManager.this.keyspace = keyspace; - - // Make sure we don't miss any event while the pools are initializing - distanceEventFilter.start(); - stateEventFilter.start(); - - Collection nodes = context.getMetadataManager().getMetadata().getNodes().values(); - List> poolStages = new ArrayList<>(nodes.size()); - for (Node node : nodes) { - NodeDistance distance = node.getDistance(); - if (distance == NodeDistance.IGNORED) { - LOG.debug("[{}] Skipping {} because it is IGNORED", logPrefix, node); - } else if (node.getState() == NodeState.FORCED_DOWN) { - LOG.debug("[{}] Skipping {} because it is FORCED_DOWN", logPrefix, node); - } else { - LOG.debug("[{}] Creating a pool for {}", logPrefix, node); - poolStages.add(channelPoolFactory.init(node, keyspace, distance, context, logPrefix)); - } - } - CompletableFutures.whenAllDone(poolStages, () -> this.onPoolsInit(poolStages), adminExecutor); - } - - private void onPoolsInit(List> poolStages) { - assert adminExecutor.inEventLoop(); - LOG.debug("[{}] All pools have finished initializing", logPrefix); - // We will only propagate an invalid keyspace error if all pools get it - boolean allInvalidKeyspaces = poolStages.size() > 0; - for (CompletionStage poolStage : poolStages) { - // Note: pool init always succeeds - ChannelPool pool = CompletableFutures.getCompleted(poolStage.toCompletableFuture()); - boolean invalidKeyspace = pool.isInvalidKeyspace(); - if (invalidKeyspace) { - LOG.debug("[{}] Pool to {} reports an invalid keyspace", logPrefix, pool.getNode()); - } - allInvalidKeyspaces &= invalidKeyspace; - pools.put(pool.getNode(), pool); - } - if (allInvalidKeyspaces) { - initFuture.completeExceptionally( - new InvalidKeyspaceException("Invalid keyspace " + keyspace.asCql(true))); - forceClose(); - } else { - LOG.debug("[{}] Initialization complete, ready", logPrefix); - initFuture.complete(null); - distanceEventFilter.markReady(); - stateEventFilter.markReady(); - } - } - - private void onDistanceEvent(DistanceEvent event) { - assert adminExecutor.inEventLoop(); - distanceEventFilter.accept(event); - } - - private void onStateEvent(NodeStateEvent event) { - assert adminExecutor.inEventLoop(); - stateEventFilter.accept(event); - } - - private void processDistanceEvent(DistanceEvent event) { - assert adminExecutor.inEventLoop(); - // no need to check closeWasCalled, because we stop listening for events one closed - DefaultNode node = event.node; - NodeDistance newDistance = event.distance; - if (pending.containsKey(node)) { - pendingDistanceEvents.put(node, event); - } else if (newDistance == NodeDistance.IGNORED) { - ChannelPool pool = pools.remove(node); - if (pool != null) { - LOG.debug("[{}] {} became IGNORED, destroying pool", logPrefix, node); - pool.closeAsync() - .exceptionally( - error -> { - Loggers.warnWithException(LOG, "[{}] Error closing pool", logPrefix, error); - return null; - }); - } - } else { - NodeState state = node.getState(); - if (state == NodeState.FORCED_DOWN) { - LOG.warn( - "[{}] {} became {} but it is FORCED_DOWN, ignoring", logPrefix, node, newDistance); - return; - } - ChannelPool pool = pools.get(node); - if (pool == null) { - LOG.debug( - "[{}] {} became {} and no pool found, initializing it", logPrefix, node, newDistance); - CompletionStage poolFuture = - channelPoolFactory.init(node, keyspace, newDistance, context, logPrefix); - pending.put(node, poolFuture); - poolFuture - .thenAcceptAsync(this::onPoolInitialized, adminExecutor) - .exceptionally(UncaughtExceptions::log); - } else { - LOG.debug("[{}] {} became {}, resizing it", logPrefix, node, newDistance); - pool.resize(newDistance); - } - } - } - - private void processStateEvent(NodeStateEvent event) { - assert adminExecutor.inEventLoop(); - // no need to check closeWasCalled, because we stop listening for events once closed - DefaultNode node = event.node; - NodeState oldState = event.oldState; - NodeState newState = event.newState; - if (pending.containsKey(node)) { - pendingStateEvents.put(node, event); - } else if (newState == null || newState == NodeState.FORCED_DOWN) { - ChannelPool pool = pools.remove(node); - if (pool != null) { - LOG.debug( - "[{}] {} was {}, destroying pool", - logPrefix, - node, - newState == null ? "removed" : newState.name()); - pool.closeAsync() - .exceptionally( - error -> { - Loggers.warnWithException(LOG, "[{}] Error closing pool", logPrefix, error); - return null; - }); - } - } else if (oldState == NodeState.FORCED_DOWN - && newState == NodeState.UP - && node.getDistance() != NodeDistance.IGNORED) { - LOG.debug("[{}] {} was forced back UP, initializing pool", logPrefix, node); - createOrReconnectPool(node); - } - } - - private void onTopologyEvent(TopologyEvent event) { - assert adminExecutor.inEventLoop(); - if (event.type == TopologyEvent.Type.SUGGEST_UP) { - context - .getMetadataManager() - .getMetadata() - .findNode(event.broadcastRpcAddress) - .ifPresent( - node -> { - if (node.getDistance() != NodeDistance.IGNORED) { - LOG.debug( - "[{}] Received a SUGGEST_UP event for {}, reconnecting pool now", - logPrefix, - node); - ChannelPool pool = pools.get(node); - if (pool != null) { - pool.reconnectNow(); - } - } - }); - } - } - - private void createOrReconnectPool(Node node) { - ChannelPool pool = pools.get(node); - if (pool == null) { - CompletionStage poolFuture = - channelPoolFactory.init(node, keyspace, node.getDistance(), context, logPrefix); - pending.put(node, poolFuture); - poolFuture - .thenAcceptAsync(this::onPoolInitialized, adminExecutor) - .exceptionally(UncaughtExceptions::log); - } else { - pool.reconnectNow(); - } - } - - private void onPoolInitialized(ChannelPool pool) { - assert adminExecutor.inEventLoop(); - Node node = pool.getNode(); - if (closeWasCalled) { - LOG.debug( - "[{}] Session closed while a pool to {} was initializing, closing it", logPrefix, node); - pool.forceCloseAsync(); - } else { - LOG.debug("[{}] New pool to {} initialized", logPrefix, node); - if (Objects.equals(keyspace, pool.getInitialKeyspaceName())) { - reprepareStatements(pool); - } else { - // The keyspace changed while the pool was being initialized, switch it now. - pool.setKeyspace(keyspace) - .handleAsync( - (result, error) -> { - if (error != null) { - Loggers.warnWithException( - LOG, "Error while switching keyspace to " + keyspace, error); - } - reprepareStatements(pool); - return null; - }, - adminExecutor); - } - } - } - - private void reprepareStatements(ChannelPool pool) { - assert adminExecutor.inEventLoop(); - if (config.getBoolean(DefaultDriverOption.REPREPARE_ENABLED)) { - new ReprepareOnUp( - logPrefix + "|" + pool.getNode().getEndPoint(), - pool, - adminExecutor, - repreparePayloads, - context, - () -> RunOrSchedule.on(adminExecutor, () -> onPoolReady(pool))) - .start(); - } else { - LOG.debug("[{}] Reprepare on up is disabled, skipping", logPrefix); - onPoolReady(pool); - } - } - - private void onPoolReady(ChannelPool pool) { - assert adminExecutor.inEventLoop(); - Node node = pool.getNode(); - pending.remove(node); - pools.put(node, pool); - DistanceEvent distanceEvent = pendingDistanceEvents.remove(node); - NodeStateEvent stateEvent = pendingStateEvents.remove(node); - if (stateEvent != null && stateEvent.newState == NodeState.FORCED_DOWN) { - LOG.debug( - "[{}] Received {} while the pool was initializing, processing it now", - logPrefix, - stateEvent); - processStateEvent(stateEvent); - } else if (distanceEvent != null) { - LOG.debug( - "[{}] Received {} while the pool was initializing, processing it now", - logPrefix, - distanceEvent); - processDistanceEvent(distanceEvent); - } - } - - private void setKeyspace(CqlIdentifier newKeyspace, CompletableFuture doneFuture) { - assert adminExecutor.inEventLoop(); - if (closeWasCalled) { - doneFuture.complete(null); - return; - } - LOG.debug("[{}] Switching to keyspace {}", logPrefix, newKeyspace); - List> poolReadyFutures = Lists.newArrayListWithCapacity(pools.size()); - for (ChannelPool pool : pools.values()) { - poolReadyFutures.add(pool.setKeyspace(newKeyspace)); - } - CompletableFutures.completeFrom(CompletableFutures.allDone(poolReadyFutures), doneFuture); - } - - private void close() { - assert adminExecutor.inEventLoop(); - if (closeWasCalled) { - return; - } - closeWasCalled = true; - LOG.debug("[{}] Starting shutdown", logPrefix); - - // Stop listening for events - context.getEventBus().unregister(distanceListenerKey, DistanceEvent.class); - context.getEventBus().unregister(stateListenerKey, NodeStateEvent.class); - context.getEventBus().unregister(topologyListenerKey, TopologyEvent.class); - - List> closePoolStages = new ArrayList<>(pools.size()); - for (ChannelPool pool : pools.values()) { - closePoolStages.add(pool.closeAsync()); - } - CompletableFutures.whenAllDone( - closePoolStages, () -> onAllPoolsClosed(closePoolStages), adminExecutor); - } - - private void forceClose() { - assert adminExecutor.inEventLoop(); - if (forceCloseWasCalled) { - return; - } - forceCloseWasCalled = true; - LOG.debug( - "[{}] Starting forced shutdown (was {}closed before)", - logPrefix, - (closeWasCalled ? "" : "not ")); - - if (closeWasCalled) { - for (ChannelPool pool : pools.values()) { - pool.forceCloseAsync(); - } - } else { - List> closePoolStages = new ArrayList<>(pools.size()); - for (ChannelPool pool : pools.values()) { - closePoolStages.add(pool.forceCloseAsync()); - } - CompletableFutures.whenAllDone( - closePoolStages, () -> onAllPoolsClosed(closePoolStages), adminExecutor); - } - } - - private void onAllPoolsClosed(List> closePoolStages) { - assert adminExecutor.inEventLoop(); - Throwable firstError = null; - for (CompletionStage closePoolStage : closePoolStages) { - CompletableFuture closePoolFuture = closePoolStage.toCompletableFuture(); - assert closePoolFuture.isDone(); - if (closePoolFuture.isCompletedExceptionally()) { - Throwable error = CompletableFutures.getFailed(closePoolFuture); - if (firstError == null) { - firstError = error; - } else { - firstError.addSuppressed(error); - } - } - } - if (firstError != null) { - closeFuture.completeExceptionally(firstError); - } else { - LOG.debug("[{}] Shutdown complete", logPrefix); - closeFuture.complete(null); - } - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/session/ReprepareOnUp.java b/core/src/main/java/com/datastax/oss/driver/internal/core/session/ReprepareOnUp.java deleted file mode 100644 index ee979473fd1..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/session/ReprepareOnUp.java +++ /dev/null @@ -1,278 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.session; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.connection.BusyConnectionException; -import com.datastax.oss.driver.api.core.session.throttling.RequestThrottler; -import com.datastax.oss.driver.internal.core.adminrequest.AdminResult; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.internal.core.adminrequest.ThrottledAdminRequestHandler; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.cql.CqlRequestHandler; -import com.datastax.oss.driver.internal.core.metrics.SessionMetricUpdater; -import com.datastax.oss.driver.internal.core.pool.ChannelPool; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.driver.internal.core.util.concurrent.RunOrSchedule; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.request.Prepare; -import com.datastax.oss.protocol.internal.request.Query; -import com.datastax.oss.protocol.internal.util.Bytes; -import io.netty.util.concurrent.EventExecutor; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.ArrayDeque; -import java.util.Collections; -import java.util.HashSet; -import java.util.Map; -import java.util.Queue; -import java.util.Set; -import java.util.concurrent.CompletionStage; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Ensures that a newly added or restarted node knows all the prepared statements created from this - * driver instance. - * - *

See the comments in {@code reference.conf} for more explanations about this process. If any - * prepare request fail, we ignore the error because it will be retried on the fly (see {@link - * CqlRequestHandler}). - * - *

Logically this code belongs to {@link DefaultSession}, but it was extracted for modularity and - * testability. - */ -@ThreadSafe -class ReprepareOnUp { - - private static final Logger LOG = LoggerFactory.getLogger(ReprepareOnUp.class); - private static final Query QUERY_SERVER_IDS = - new Query("SELECT prepared_id FROM system.prepared_statements"); - - private final String logPrefix; - private final ChannelPool pool; - private final EventExecutor adminExecutor; - private final Map repreparePayloads; - private final Runnable whenPrepared; - private final boolean checkSystemTable; - private final int maxStatements; - private final int maxParallelism; - private final Duration timeout; - private final RequestThrottler throttler; - private final SessionMetricUpdater metricUpdater; - - // After the constructor, everything happens on adminExecutor, so these fields do not need any - // synchronization. - private Set serverKnownIds; - private Queue toReprepare; - private int runningWorkers; - - ReprepareOnUp( - String logPrefix, - ChannelPool pool, - EventExecutor adminExecutor, - Map repreparePayloads, - InternalDriverContext context, - Runnable whenPrepared) { - - this.logPrefix = logPrefix; - this.pool = pool; - this.adminExecutor = adminExecutor; - this.repreparePayloads = repreparePayloads; - this.whenPrepared = whenPrepared; - this.throttler = context.getRequestThrottler(); - - DriverConfig config = context.getConfig(); - this.checkSystemTable = - config.getDefaultProfile().getBoolean(DefaultDriverOption.REPREPARE_CHECK_SYSTEM_TABLE); - this.timeout = config.getDefaultProfile().getDuration(DefaultDriverOption.REPREPARE_TIMEOUT); - this.maxStatements = - config.getDefaultProfile().getInt(DefaultDriverOption.REPREPARE_MAX_STATEMENTS); - this.maxParallelism = - config.getDefaultProfile().getInt(DefaultDriverOption.REPREPARE_MAX_PARALLELISM); - - this.metricUpdater = context.getMetricsFactory().getSessionUpdater(); - } - - void start() { - if (repreparePayloads.isEmpty()) { - LOG.debug("[{}] No statements to reprepare, done", logPrefix); - whenPrepared.run(); - } else { - // Check log level because ConcurrentMap.size is not a constant operation - if (LOG.isDebugEnabled()) { - LOG.debug( - "[{}] {} statements to reprepare on newly added/up node", - logPrefix, - repreparePayloads.size()); - } - if (checkSystemTable) { - LOG.debug("[{}] Checking which statements the server knows about", logPrefix); - queryAsync(QUERY_SERVER_IDS, Collections.emptyMap(), "QUERY system.prepared_statements") - .whenCompleteAsync(this::gatherServerIds, adminExecutor); - } else { - LOG.debug( - "[{}] {} is disabled, repreparing directly", - logPrefix, - DefaultDriverOption.REPREPARE_CHECK_SYSTEM_TABLE.getPath()); - RunOrSchedule.on( - adminExecutor, - () -> { - serverKnownIds = Collections.emptySet(); - gatherPayloadsToReprepare(); - }); - } - } - } - - private void gatherServerIds(AdminResult rows, Throwable error) { - assert adminExecutor.inEventLoop(); - if (serverKnownIds == null) { - serverKnownIds = new HashSet<>(); - } - if (error != null) { - LOG.debug( - "[{}] Error querying system.prepared_statements ({}), proceeding without server ids", - logPrefix, - error.toString()); - gatherPayloadsToReprepare(); - } else { - for (AdminRow row : rows) { - serverKnownIds.add(row.getByteBuffer("prepared_id")); - } - if (rows.hasNextPage()) { - LOG.debug("[{}] system.prepared_statements has more pages", logPrefix); - rows.nextPage().whenCompleteAsync(this::gatherServerIds, adminExecutor); - } else { - LOG.debug("[{}] Gathered {} server ids, proceeding", logPrefix, serverKnownIds.size()); - gatherPayloadsToReprepare(); - } - } - } - - private void gatherPayloadsToReprepare() { - assert adminExecutor.inEventLoop(); - toReprepare = new ArrayDeque<>(); - for (RepreparePayload payload : repreparePayloads.values()) { - if (serverKnownIds.contains(payload.id)) { - LOG.trace( - "[{}] Skipping statement {} because it is already known to the server", - logPrefix, - Bytes.toHexString(payload.id)); - } else { - if (maxStatements > 0 && toReprepare.size() == maxStatements) { - LOG.debug( - "[{}] Limiting number of statements to reprepare to {} as configured, " - + "but there are more", - logPrefix, - maxStatements); - break; - } else { - toReprepare.add(payload); - } - } - } - if (toReprepare.isEmpty()) { - LOG.debug( - "[{}] No statements to reprepare that are not known by the server already, done", - logPrefix); - whenPrepared.run(); - } else { - startWorkers(); - } - } - - private void startWorkers() { - assert adminExecutor.inEventLoop(); - runningWorkers = Math.min(maxParallelism, toReprepare.size()); - LOG.debug( - "[{}] Repreparing {} statements with {} parallel workers", - logPrefix, - toReprepare.size(), - runningWorkers); - for (int i = 0; i < runningWorkers; i++) { - startWorker(); - } - } - - private void startWorker() { - assert adminExecutor.inEventLoop(); - if (toReprepare.isEmpty()) { - runningWorkers -= 1; - if (runningWorkers == 0) { - LOG.debug("[{}] All workers finished, done", logPrefix); - whenPrepared.run(); - } - } else { - RepreparePayload payload = toReprepare.poll(); - prepareAsync( - new Prepare( - payload.query, (payload.keyspace == null ? null : payload.keyspace.asInternal())), - payload.customPayload) - .handleAsync( - (result, error) -> { - // Don't log, AdminRequestHandler does already - startWorker(); - return null; - }, - adminExecutor); - } - } - - @VisibleForTesting - protected CompletionStage queryAsync( - Message message, Map customPayload, String debugString) { - DriverChannel channel = pool.next(); - if (channel == null) { - return CompletableFutures.failedFuture( - new BusyConnectionException("Found no channel to execute reprepare query")); - } else { - ThrottledAdminRequestHandler reprepareHandler = - ThrottledAdminRequestHandler.query( - channel, - false, - message, - customPayload, - timeout, - throttler, - metricUpdater, - logPrefix, - debugString); - return reprepareHandler.start(); - } - } - - @VisibleForTesting - protected CompletionStage prepareAsync( - Message message, Map customPayload) { - DriverChannel channel = pool.next(); - if (channel == null) { - return CompletableFutures.failedFuture( - new BusyConnectionException("Found no channel to execute reprepare query")); - } else { - ThrottledAdminRequestHandler reprepareHandler = - ThrottledAdminRequestHandler.prepare( - channel, false, message, customPayload, timeout, throttler, metricUpdater, logPrefix); - return reprepareHandler.start(); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/session/RepreparePayload.java b/core/src/main/java/com/datastax/oss/driver/internal/core/session/RepreparePayload.java deleted file mode 100644 index 7c4b10442a7..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/session/RepreparePayload.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.session; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.internal.core.cql.DefaultPreparedStatement; -import com.datastax.oss.protocol.internal.request.Prepare; -import java.nio.ByteBuffer; -import java.util.Map; -import net.jcip.annotations.Immutable; - -/** - * The information that's necessary to reprepare an already prepared statement, in case we hit a - * node that doesn't have it in its cache. - * - *

Make sure the object that's returned to the client (e.g. {@link DefaultPreparedStatement} for - * CQL statements) keeps a reference to this. - */ -@Immutable -public class RepreparePayload { - public final ByteBuffer id; - public final String query; - - /** The keyspace that is set independently from the query string (see CASSANDRA-10145) */ - public final CqlIdentifier keyspace; - - public final Map customPayload; - - public RepreparePayload( - ByteBuffer id, String query, CqlIdentifier keyspace, Map customPayload) { - this.id = id; - this.query = query; - this.keyspace = keyspace; - this.customPayload = customPayload; - } - - public Prepare toMessage() { - return new Prepare(query, keyspace == null ? null : keyspace.asInternal()); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/session/RequestProcessor.java b/core/src/main/java/com/datastax/oss/driver/internal/core/session/RequestProcessor.java deleted file mode 100644 index 49599667d70..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/session/RequestProcessor.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.session; - -import com.datastax.oss.driver.api.core.cql.PrepareRequest; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; - -/** - * Handles a type of request in the driver. - * - *

By default, the driver supports CQL {@link Statement queries} and {@link PrepareRequest - * preparation requests}. New processors can be plugged in to handle new types of requests. - * - * @param the type of request accepted. - * @param the type of result when a request is processed. - */ -public interface RequestProcessor { - - /** - * Whether the processor can produce the given result from the given request. - * - *

Processors will be tried in the order they were registered. The first processor for which - * this method returns true will be used. - */ - boolean canProcess(Request request, GenericType resultType); - - /** Processes the given request, producing a result. */ - ResultT process( - RequestT request, - DefaultSession session, - InternalDriverContext context, - String sessionLogPrefix); - - /** Builds a failed result to directly report the given error. */ - ResultT newFailure(RuntimeException error); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/session/RequestProcessorRegistry.java b/core/src/main/java/com/datastax/oss/driver/internal/core/session/RequestProcessorRegistry.java deleted file mode 100644 index b993365d201..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/session/RequestProcessorRegistry.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.session; - -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -public class RequestProcessorRegistry { - - private static final Logger LOG = LoggerFactory.getLogger(RequestProcessorRegistry.class); - - private final String logPrefix; - // Effectively immutable: the contents are never modified after construction - private final RequestProcessor[] processors; - - public RequestProcessorRegistry(String logPrefix, RequestProcessor... processors) { - this.logPrefix = logPrefix; - this.processors = processors; - } - - public RequestProcessor processorFor( - RequestT request, GenericType resultType) { - - for (RequestProcessor processor : processors) { - if (processor.canProcess(request, resultType)) { - LOG.trace("[{}] Using {} to process {}", logPrefix, processor, request); - // The cast is safe provided that the processor implements canProcess correctly - @SuppressWarnings("unchecked") - RequestProcessor result = - (RequestProcessor) processor; - return result; - } else { - LOG.trace("[{}] {} cannot process {}, trying next", logPrefix, processor, request); - } - } - throw new IllegalArgumentException("No request processor found for " + request); - } - - /** This creates a defensive copy on every call, do not overuse. */ - public Iterable> getProcessors() { - return ImmutableList.copyOf(processors); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/session/SchemaListenerNotifier.java b/core/src/main/java/com/datastax/oss/driver/internal/core/session/SchemaListenerNotifier.java deleted file mode 100644 index 51ba4d30624..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/session/SchemaListenerNotifier.java +++ /dev/null @@ -1,174 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.session; - -import com.datastax.oss.driver.api.core.metadata.schema.SchemaChangeListener; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.internal.core.context.EventBus; -import com.datastax.oss.driver.internal.core.metadata.schema.events.AggregateChangeEvent; -import com.datastax.oss.driver.internal.core.metadata.schema.events.FunctionChangeEvent; -import com.datastax.oss.driver.internal.core.metadata.schema.events.KeyspaceChangeEvent; -import com.datastax.oss.driver.internal.core.metadata.schema.events.TableChangeEvent; -import com.datastax.oss.driver.internal.core.metadata.schema.events.TypeChangeEvent; -import com.datastax.oss.driver.internal.core.metadata.schema.events.ViewChangeEvent; -import com.datastax.oss.driver.internal.core.util.concurrent.RunOrSchedule; -import io.netty.util.concurrent.EventExecutor; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -class SchemaListenerNotifier { - - private final SchemaChangeListener listener; - private final EventExecutor adminExecutor; - - // It is technically possible that a schema change could happen in the middle of session - // initialization. Don't forward events in this case, it would likely do more harm than good if a - // listener implementation doesn't expect it. - private boolean sessionReady; - - SchemaListenerNotifier( - SchemaChangeListener listener, EventBus eventBus, EventExecutor adminExecutor) { - this.listener = listener; - this.adminExecutor = adminExecutor; - - // No need to unregister at shutdown, this component has the same lifecycle as the cluster - eventBus.register( - AggregateChangeEvent.class, RunOrSchedule.on(adminExecutor, this::onAggregateChangeEvent)); - eventBus.register( - FunctionChangeEvent.class, RunOrSchedule.on(adminExecutor, this::onFunctionChangeEvent)); - eventBus.register( - KeyspaceChangeEvent.class, RunOrSchedule.on(adminExecutor, this::onKeyspaceChangeEvent)); - eventBus.register( - TableChangeEvent.class, RunOrSchedule.on(adminExecutor, this::onTableChangeEvent)); - eventBus.register( - TypeChangeEvent.class, RunOrSchedule.on(adminExecutor, this::onTypeChangeEvent)); - eventBus.register( - ViewChangeEvent.class, RunOrSchedule.on(adminExecutor, this::onViewChangeEvent)); - } - - void onSessionReady(Session session) { - RunOrSchedule.on( - adminExecutor, - () -> { - sessionReady = true; - listener.onSessionReady(session); - }); - } - - private void onAggregateChangeEvent(AggregateChangeEvent event) { - assert adminExecutor.inEventLoop(); - if (sessionReady) { - switch (event.changeType) { - case CREATED: - listener.onAggregateCreated(event.newAggregate); - break; - case UPDATED: - listener.onAggregateUpdated(event.newAggregate, event.oldAggregate); - break; - case DROPPED: - listener.onAggregateDropped(event.oldAggregate); - break; - } - } - } - - private void onFunctionChangeEvent(FunctionChangeEvent event) { - assert adminExecutor.inEventLoop(); - if (sessionReady) { - switch (event.changeType) { - case CREATED: - listener.onFunctionCreated(event.newFunction); - break; - case UPDATED: - listener.onFunctionUpdated(event.newFunction, event.oldFunction); - break; - case DROPPED: - listener.onFunctionDropped(event.oldFunction); - break; - } - } - } - - private void onKeyspaceChangeEvent(KeyspaceChangeEvent event) { - assert adminExecutor.inEventLoop(); - if (sessionReady) { - switch (event.changeType) { - case CREATED: - listener.onKeyspaceCreated(event.newKeyspace); - break; - case UPDATED: - listener.onKeyspaceUpdated(event.newKeyspace, event.oldKeyspace); - break; - case DROPPED: - listener.onKeyspaceDropped(event.oldKeyspace); - break; - } - } - } - - private void onTableChangeEvent(TableChangeEvent event) { - assert adminExecutor.inEventLoop(); - if (sessionReady) { - switch (event.changeType) { - case CREATED: - listener.onTableCreated(event.newTable); - break; - case UPDATED: - listener.onTableUpdated(event.newTable, event.oldTable); - break; - case DROPPED: - listener.onTableDropped(event.oldTable); - break; - } - } - } - - private void onTypeChangeEvent(TypeChangeEvent event) { - assert adminExecutor.inEventLoop(); - if (sessionReady) { - switch (event.changeType) { - case CREATED: - listener.onUserDefinedTypeCreated(event.newType); - break; - case UPDATED: - listener.onUserDefinedTypeUpdated(event.newType, event.oldType); - break; - case DROPPED: - listener.onUserDefinedTypeDropped(event.oldType); - break; - } - } - } - - private void onViewChangeEvent(ViewChangeEvent event) { - assert adminExecutor.inEventLoop(); - if (sessionReady) { - switch (event.changeType) { - case CREATED: - listener.onViewCreated(event.newView); - break; - case UPDATED: - listener.onViewUpdated(event.newView, event.oldView); - break; - case DROPPED: - listener.onViewDropped(event.oldView); - break; - } - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/session/SessionWrapper.java b/core/src/main/java/com/datastax/oss/driver/internal/core/session/SessionWrapper.java deleted file mode 100644 index 1a1270b41c8..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/session/SessionWrapper.java +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.session; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.metadata.Metadata; -import com.datastax.oss.driver.api.core.metrics.Metrics; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Optional; -import java.util.concurrent.CompletionStage; -import net.jcip.annotations.ThreadSafe; - -/** - * Utility class to wrap a session. - * - *

This will typically be used to mix in a convenience interface from a 3rd-party extension: - * - *

{@code
- * class ReactiveSessionWrapper extends SessionWrapper implements ReactiveSession {
- *   public ReactiveSessionWrapper(Session delegate) {
- *     super(delegate);
- *   }
- * }
- * }
- */ -@ThreadSafe -public class SessionWrapper implements Session { - - private final Session delegate; - - public SessionWrapper(@NonNull Session delegate) { - this.delegate = delegate; - } - - @NonNull - public Session getDelegate() { - return delegate; - } - - @NonNull - @Override - public String getName() { - return delegate.getName(); - } - - @NonNull - @Override - public Metadata getMetadata() { - return delegate.getMetadata(); - } - - @Override - public boolean isSchemaMetadataEnabled() { - return delegate.isSchemaMetadataEnabled(); - } - - @NonNull - @Override - public CompletionStage setSchemaMetadataEnabled(@Nullable Boolean newValue) { - return delegate.setSchemaMetadataEnabled(newValue); - } - - @NonNull - @Override - public CompletionStage refreshSchemaAsync() { - return delegate.refreshSchemaAsync(); - } - - @NonNull - @Override - public CompletionStage checkSchemaAgreementAsync() { - return delegate.checkSchemaAgreementAsync(); - } - - @NonNull - @Override - public DriverContext getContext() { - return delegate.getContext(); - } - - @NonNull - @Override - public Optional getKeyspace() { - return delegate.getKeyspace(); - } - - @NonNull - @Override - public Optional getMetrics() { - return delegate.getMetrics(); - } - - @Nullable - @Override - public ResultT execute( - @NonNull RequestT request, @NonNull GenericType resultType) { - return delegate.execute(request, resultType); - } - - @NonNull - @Override - public CompletionStage closeFuture() { - return delegate.closeFuture(); - } - - @NonNull - @Override - public CompletionStage closeAsync() { - return delegate.closeAsync(); - } - - @NonNull - @Override - public CompletionStage forceCloseAsync() { - return delegate.forceCloseAsync(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/session/throttling/ConcurrencyLimitingRequestThrottler.java b/core/src/main/java/com/datastax/oss/driver/internal/core/session/throttling/ConcurrencyLimitingRequestThrottler.java deleted file mode 100644 index 8146c5b113a..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/session/throttling/ConcurrencyLimitingRequestThrottler.java +++ /dev/null @@ -1,231 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.session.throttling; - -import com.datastax.oss.driver.api.core.RequestThrottlingException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.session.throttling.RequestThrottler; -import com.datastax.oss.driver.api.core.session.throttling.Throttled; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Deque; -import java.util.concurrent.ConcurrentLinkedDeque; -import java.util.concurrent.atomic.AtomicInteger; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A request throttler that limits the number of concurrent requests. - * - *

To activate this throttler, modify the {@code advanced.throttler} section in the driver - * configuration, for example: - * - *

- * datastax-java-driver {
- *   advanced.throttler {
- *     class = ConcurrencyLimitingRequestThrottler
- *     max-concurrent-requests = 10000
- *     max-queue-size = 10000
- *   }
- * }
- * 
- * - * See {@code reference.conf} (in the manual or core driver JAR) for more details. - */ -@ThreadSafe -public class ConcurrencyLimitingRequestThrottler implements RequestThrottler { - - private static final Logger LOG = - LoggerFactory.getLogger(ConcurrencyLimitingRequestThrottler.class); - - private final String logPrefix; - private final int maxConcurrentRequests; - private final int maxQueueSize; - private final AtomicInteger concurrentRequests = new AtomicInteger(0); - // CLQ is not O(1) for size(), as it forces a full iteration of the queue. So, we track - // the size of the queue explicitly. - private final Deque queue = new ConcurrentLinkedDeque<>(); - private final AtomicInteger queueSize = new AtomicInteger(0); - private volatile boolean closed = false; - - public ConcurrencyLimitingRequestThrottler(DriverContext context) { - this.logPrefix = context.getSessionName(); - DriverExecutionProfile config = context.getConfig().getDefaultProfile(); - this.maxConcurrentRequests = - config.getInt(DefaultDriverOption.REQUEST_THROTTLER_MAX_CONCURRENT_REQUESTS); - this.maxQueueSize = config.getInt(DefaultDriverOption.REQUEST_THROTTLER_MAX_QUEUE_SIZE); - LOG.debug( - "[{}] Initializing with maxConcurrentRequests = {}, maxQueueSize = {}", - logPrefix, - maxConcurrentRequests, - maxQueueSize); - } - - @Override - public void register(@NonNull Throttled request) { - if (closed) { - LOG.trace("[{}] Rejecting request after shutdown", logPrefix); - fail(request, "The session is shutting down"); - return; - } - - // Implementation note: Technically the "concurrent requests" or "queue size" - // could read transiently over the limit, but the queue itself will never grow - // beyond the limit since we always check for that condition and revert if - // over-limit. We do this instead of a CAS-loop to avoid the potential loop. - - // If no backlog exists AND we get capacity, we can execute immediately - if (queueSize.get() == 0) { - // Take a claim first, and then check if we are OK to proceed - int newConcurrent = concurrentRequests.incrementAndGet(); - if (newConcurrent <= maxConcurrentRequests) { - LOG.trace("[{}] Starting newly registered request", logPrefix); - request.onThrottleReady(false); - return; - } else { - // We exceeded the limit, decrement the count and fall through to the queuing logic - concurrentRequests.decrementAndGet(); - } - } - - // If we have a backlog, or we failed to claim capacity, try to enqueue - int newQueueSize = queueSize.incrementAndGet(); - if (newQueueSize <= maxQueueSize) { - LOG.trace("[{}] Enqueuing request", logPrefix); - queue.offer(request); - - // Double-check that we were still supposed to be enqueued; it is possible - // that the session was closed while we were enqueuing, it's also possible - // that it is right now removing the request, so we need to check both - if (closed) { - if (queue.remove(request)) { - queueSize.decrementAndGet(); - LOG.trace("[{}] Rejecting late request after shutdown", logPrefix); - fail(request, "The session is shutting down"); - } - } - } else { - LOG.trace("[{}] Rejecting request because of full queue", logPrefix); - queueSize.decrementAndGet(); - fail( - request, - String.format( - "The session has reached its maximum capacity " - + "(concurrent requests: %d, queue size: %d)", - maxConcurrentRequests, maxQueueSize)); - } - } - - @Override - public void signalSuccess(@NonNull Throttled request) { - Throttled nextRequest = onRequestDoneAndDequeNext(); - if (nextRequest != null) { - nextRequest.onThrottleReady(true); - } - } - - @Override - public void signalError(@NonNull Throttled request, @NonNull Throwable error) { - signalSuccess(request); // not treated differently - } - - @Override - public void signalTimeout(@NonNull Throttled request) { - Throttled nextRequest = null; - if (!closed) { - if (queue.remove(request)) { // The request timed out before it was active - queueSize.decrementAndGet(); - LOG.trace("[{}] Removing timed out request from the queue", logPrefix); - } else { - nextRequest = onRequestDoneAndDequeNext(); - } - } - - if (nextRequest != null) { - nextRequest.onThrottleReady(true); - } - } - - @Override - public void signalCancel(@NonNull Throttled request) { - Throttled nextRequest = null; - if (!closed) { - if (queue.remove(request)) { // The request has been cancelled before it was active - queueSize.decrementAndGet(); - LOG.trace("[{}] Removing cancelled request from the queue", logPrefix); - } else { - nextRequest = onRequestDoneAndDequeNext(); - } - } - - if (nextRequest != null) { - nextRequest.onThrottleReady(true); - } - } - - @Nullable - private Throttled onRequestDoneAndDequeNext() { - if (!closed) { - Throttled nextRequest = queue.poll(); - if (nextRequest == null) { - concurrentRequests.decrementAndGet(); - } else { - queueSize.decrementAndGet(); - LOG.trace("[{}] Starting dequeued request", logPrefix); - return nextRequest; - } - } - - // no next task was dequeued - return null; - } - - @Override - public void close() { - closed = true; - - LOG.debug("[{}] Rejecting {} queued requests after shutdown", logPrefix, queueSize.get()); - Throttled request; - while ((request = queue.poll()) != null) { - queueSize.decrementAndGet(); - fail(request, "The session is shutting down"); - } - } - - public int getQueueSize() { - return queueSize.get(); - } - - @VisibleForTesting - int getConcurrentRequests() { - return concurrentRequests.get(); - } - - @VisibleForTesting - Deque getQueue() { - return queue; - } - - private static void fail(Throttled request, String message) { - request.onThrottleFailure(new RequestThrottlingException(message)); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/session/throttling/NanoClock.java b/core/src/main/java/com/datastax/oss/driver/internal/core/session/throttling/NanoClock.java deleted file mode 100644 index 9a25059caef..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/session/throttling/NanoClock.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.session.throttling; - -/** A thin wrapper around {@link System#nanoTime()}, to simplify testing. */ -interface NanoClock { - long nanoTime(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/session/throttling/PassThroughRequestThrottler.java b/core/src/main/java/com/datastax/oss/driver/internal/core/session/throttling/PassThroughRequestThrottler.java deleted file mode 100644 index 2210e4b26f1..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/session/throttling/PassThroughRequestThrottler.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.session.throttling; - -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.session.throttling.RequestThrottler; -import com.datastax.oss.driver.api.core.session.throttling.Throttled; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.IOException; -import net.jcip.annotations.ThreadSafe; - -/** - * A request throttler that does not enforce any kind of limitation: requests are always executed - * immediately. - * - *

To activate this throttler, modify the {@code advanced.throttler} section in the driver - * configuration, for example: - * - *

- * datastax-java-driver {
- *   advanced.throttler {
- *     class = PassThroughRequestThrottler
- *   }
- * }
- * 
- * - * See {@code reference.conf} (in the manual or core driver JAR) for more details. - */ -@ThreadSafe -public class PassThroughRequestThrottler implements RequestThrottler { - - @SuppressWarnings("unused") - public PassThroughRequestThrottler(DriverContext context) { - // nothing to do - } - - @Override - public void register(@NonNull Throttled request) { - request.onThrottleReady(false); - } - - @Override - public void signalSuccess(@NonNull Throttled request) { - // nothing to do - } - - @Override - public void signalError(@NonNull Throttled request, @NonNull Throwable error) { - // nothing to do - } - - @Override - public void signalTimeout(@NonNull Throttled request) { - // nothing to do - } - - @Override - public void signalCancel(@NonNull Throttled request) { - // nothing to do - } - - @Override - public void close() throws IOException { - // nothing to do - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/session/throttling/RateLimitingRequestThrottler.java b/core/src/main/java/com/datastax/oss/driver/internal/core/session/throttling/RateLimitingRequestThrottler.java deleted file mode 100644 index 03a693dc0fe..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/session/throttling/RateLimitingRequestThrottler.java +++ /dev/null @@ -1,284 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.session.throttling; - -import com.datastax.oss.driver.api.core.RequestThrottlingException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.session.throttling.RequestThrottler; -import com.datastax.oss.driver.api.core.session.throttling.Throttled; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import edu.umd.cs.findbugs.annotations.NonNull; -import io.netty.util.concurrent.EventExecutor; -import java.time.Duration; -import java.util.ArrayDeque; -import java.util.Deque; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.locks.ReentrantLock; -import net.jcip.annotations.GuardedBy; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A request throttler that limits the rate of requests per second. - * - *

To activate this throttler, modify the {@code advanced.throttler} section in the driver - * configuration, for example: - * - *

- * datastax-java-driver {
- *   advanced.throttler {
- *     class = RateLimitingRequestThrottler
- *     max-requests-per-second = 10000
- *     max-queue-size = 10000
- *     drain-interval = 10 milliseconds
- *   }
- * }
- * 
- * - * See {@code reference.conf} (in the manual or core driver JAR) for more details. - */ -@ThreadSafe -public class RateLimitingRequestThrottler implements RequestThrottler { - - private static final Logger LOG = LoggerFactory.getLogger(RateLimitingRequestThrottler.class); - - private final String logPrefix; - private final NanoClock clock; - private final int maxRequestsPerSecond; - private final int maxQueueSize; - private final long drainIntervalNanos; - private final EventExecutor scheduler; - - private final ReentrantLock lock = new ReentrantLock(); - - @GuardedBy("lock") - private long lastUpdateNanos; - - @GuardedBy("lock") - private int storedPermits; - - @GuardedBy("lock") - private final Deque queue = new ArrayDeque<>(); - - @GuardedBy("lock") - private boolean closed; - - @SuppressWarnings("unused") - public RateLimitingRequestThrottler(DriverContext context) { - this(context, System::nanoTime); - } - - @VisibleForTesting - RateLimitingRequestThrottler(DriverContext context, NanoClock clock) { - this.logPrefix = context.getSessionName(); - this.clock = clock; - - DriverExecutionProfile config = context.getConfig().getDefaultProfile(); - - this.maxRequestsPerSecond = - config.getInt(DefaultDriverOption.REQUEST_THROTTLER_MAX_REQUESTS_PER_SECOND); - this.maxQueueSize = config.getInt(DefaultDriverOption.REQUEST_THROTTLER_MAX_QUEUE_SIZE); - Duration drainInterval = - config.getDuration(DefaultDriverOption.REQUEST_THROTTLER_DRAIN_INTERVAL); - this.drainIntervalNanos = drainInterval.toNanos(); - - this.lastUpdateNanos = clock.nanoTime(); - // Start with one second worth of permits to avoid delaying initial requests - this.storedPermits = maxRequestsPerSecond; - - this.scheduler = - ((InternalDriverContext) context).getNettyOptions().adminEventExecutorGroup().next(); - - LOG.debug( - "[{}] Initializing with maxRequestsPerSecond = {}, maxQueueSize = {}, drainInterval = {}", - logPrefix, - maxRequestsPerSecond, - maxQueueSize, - drainInterval); - } - - @Override - public void register(@NonNull Throttled request) { - long now = clock.nanoTime(); - lock.lock(); - try { - if (closed) { - LOG.trace("[{}] Rejecting request after shutdown", logPrefix); - fail(request, "The session is shutting down"); - } else if (queue.isEmpty() && acquire(now, 1) == 1) { - LOG.trace("[{}] Starting newly registered request", logPrefix); - request.onThrottleReady(false); - } else if (queue.size() < maxQueueSize) { - LOG.trace("[{}] Enqueuing request", logPrefix); - if (queue.isEmpty()) { - scheduler.schedule(this::drain, drainIntervalNanos, TimeUnit.NANOSECONDS); - } - queue.add(request); - } else { - LOG.trace("[{}] Rejecting request because of full queue", logPrefix); - fail( - request, - String.format( - "The session has reached its maximum capacity " - + "(requests/s: %d, queue size: %d)", - maxRequestsPerSecond, maxQueueSize)); - } - } finally { - lock.unlock(); - } - } - - // Runs periodically when the queue is not empty. It tries to dequeue as much as possible while - // staying under the target rate. If it does not completely drain the queue, it reschedules - // itself. - private void drain() { - assert scheduler.inEventLoop(); - long now = clock.nanoTime(); - lock.lock(); - try { - if (closed || queue.isEmpty()) { - return; - } - int toDequeue = acquire(now, queue.size()); - LOG.trace("[{}] Dequeuing {}/{} elements", logPrefix, toDequeue, queue.size()); - for (int i = 0; i < toDequeue; i++) { - LOG.trace("[{}] Starting dequeued request", logPrefix); - queue.poll().onThrottleReady(true); - } - if (!queue.isEmpty()) { - LOG.trace( - "[{}] {} elements remaining in queue, rescheduling drain task", - logPrefix, - queue.size()); - scheduler.schedule(this::drain, drainIntervalNanos, TimeUnit.NANOSECONDS); - } - } finally { - lock.unlock(); - } - } - - @Override - public void signalSuccess(@NonNull Throttled request) { - // nothing to do - } - - @Override - public void signalError(@NonNull Throttled request, @NonNull Throwable error) { - // nothing to do - } - - @Override - public void signalTimeout(@NonNull Throttled request) { - lock.lock(); - try { - if (!closed && queue.remove(request)) { // The request timed out before it was active - LOG.trace("[{}] Removing timed out request from the queue", logPrefix); - } - } finally { - lock.unlock(); - } - } - - @Override - public void signalCancel(@NonNull Throttled request) { - lock.lock(); - try { - if (!closed && queue.remove(request)) { // The request has been cancelled before it was active - LOG.trace("[{}] Removing cancelled request from the queue", logPrefix); - } - } finally { - lock.unlock(); - } - } - - @Override - public void close() { - lock.lock(); - try { - closed = true; - LOG.debug("[{}] Rejecting {} queued requests after shutdown", logPrefix, queue.size()); - for (Throttled request : queue) { - fail(request, "The session is shutting down"); - } - } finally { - lock.unlock(); - } - } - - @SuppressWarnings("GuardedBy") // this method is only called with the lock held - private int acquire(long currentTimeNanos, int wantedPermits) { - assert lock.isHeldByCurrentThread() && !closed; - - long elapsedNanos = currentTimeNanos - lastUpdateNanos; - - if (elapsedNanos >= 1_000_000_000) { - // created more than the max, so whatever was stored, the sum will be capped to the max - storedPermits = maxRequestsPerSecond; - lastUpdateNanos = currentTimeNanos; - } else if (elapsedNanos > 0) { - int createdPermits = (int) (elapsedNanos * maxRequestsPerSecond / 1_000_000_000); - if (createdPermits > 0) { - // Only reset interval if we've generated permits, otherwise we might continually reset - // before we get the chance to generate anything. - lastUpdateNanos = currentTimeNanos; - } - storedPermits = Math.min(storedPermits + createdPermits, maxRequestsPerSecond); - } - - int returned = (storedPermits >= wantedPermits) ? wantedPermits : storedPermits; - storedPermits = Math.max(storedPermits - wantedPermits, 0); - return returned; - } - - public int getQueueSize() { - lock.lock(); - try { - return queue.size(); - } finally { - lock.unlock(); - } - } - - @VisibleForTesting - int getStoredPermits() { - lock.lock(); - try { - return storedPermits; - } finally { - lock.unlock(); - } - } - - @VisibleForTesting - Deque getQueue() { - lock.lock(); - try { - return queue; - } finally { - lock.unlock(); - } - } - - private static void fail(Throttled request, String message) { - request.onThrottleFailure(new RequestThrottlingException(message)); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/specex/ConstantSpeculativeExecutionPolicy.java b/core/src/main/java/com/datastax/oss/driver/internal/core/specex/ConstantSpeculativeExecutionPolicy.java deleted file mode 100644 index 5e84f6b1002..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/specex/ConstantSpeculativeExecutionPolicy.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.specex; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.specex.SpeculativeExecutionPolicy; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import net.jcip.annotations.ThreadSafe; - -/** - * A policy that schedules a configurable number of speculative executions, separated by a fixed - * delay. - * - *

To activate this policy, modify the {@code advanced.speculative-execution-policy} section in - * the driver configuration, for example: - * - *

- * datastax-java-driver {
- *   advanced.speculative-execution-policy {
- *     class = ConstantSpeculativeExecutionPolicy
- *     max-executions = 3
- *     delay = 100 milliseconds
- *   }
- * }
- * 
- * - * See {@code reference.conf} (in the manual or core driver JAR) for more details. - */ -@ThreadSafe -public class ConstantSpeculativeExecutionPolicy implements SpeculativeExecutionPolicy { - - private final int maxExecutions; - private final long constantDelayMillis; - - public ConstantSpeculativeExecutionPolicy(DriverContext context, String profileName) { - DriverExecutionProfile config = context.getConfig().getProfile(profileName); - this.maxExecutions = config.getInt(DefaultDriverOption.SPECULATIVE_EXECUTION_MAX); - if (this.maxExecutions < 1) { - throw new IllegalArgumentException("Max must be at least 1"); - } - this.constantDelayMillis = - config.getDuration(DefaultDriverOption.SPECULATIVE_EXECUTION_DELAY).toMillis(); - if (this.constantDelayMillis < 0) { - throw new IllegalArgumentException("Delay must be positive or 0"); - } - } - - @Override - public long nextExecution( - @NonNull @SuppressWarnings("unused") Node node, - @Nullable @SuppressWarnings("unused") CqlIdentifier keyspace, - @NonNull @SuppressWarnings("unused") Request request, - int runningExecutions) { - assert runningExecutions >= 1; - return (runningExecutions < maxExecutions) ? constantDelayMillis : -1; - } - - @Override - public void close() { - // nothing to do - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/specex/NoSpeculativeExecutionPolicy.java b/core/src/main/java/com/datastax/oss/driver/internal/core/specex/NoSpeculativeExecutionPolicy.java deleted file mode 100644 index 2f6b17286e5..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/specex/NoSpeculativeExecutionPolicy.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.specex; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.specex.SpeculativeExecutionPolicy; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import net.jcip.annotations.ThreadSafe; - -/** - * A policy that never triggers speculative executions. - * - *

To activate this policy, modify the {@code advanced.speculative-execution-policy} section in - * the driver configuration, for example: - * - *

- * datastax-java-driver {
- *   advanced.speculative-execution-policy {
- *     class = NoSpeculativeExecutionPolicy
- *   }
- * }
- * 
- * - * See {@code reference.conf} (in the manual or core driver JAR) for more details. - */ -@ThreadSafe -public class NoSpeculativeExecutionPolicy implements SpeculativeExecutionPolicy { - - public NoSpeculativeExecutionPolicy( - @SuppressWarnings("unused") DriverContext context, - @SuppressWarnings("unused") String profileName) { - // nothing to do - } - - @Override - @SuppressWarnings("unused") - public long nextExecution( - @NonNull Node node, - @Nullable CqlIdentifier keyspace, - @NonNull Request request, - int runningExecutions) { - // never start speculative executions - return -1; - } - - @Override - public void close() { - // nothing to do - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/ssl/DefaultSslEngineFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/ssl/DefaultSslEngineFactory.java deleted file mode 100644 index 343d3f9e4e7..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/ssl/DefaultSslEngineFactory.java +++ /dev/null @@ -1,193 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.ssl; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.core.ssl.SslEngineFactory; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.InputStream; -import java.net.InetSocketAddress; -import java.net.SocketAddress; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.security.KeyStore; -import java.security.SecureRandom; -import java.time.Duration; -import java.util.List; -import java.util.Optional; -import javax.net.ssl.SSLContext; -import javax.net.ssl.SSLEngine; -import javax.net.ssl.SSLParameters; -import javax.net.ssl.TrustManagerFactory; -import net.jcip.annotations.ThreadSafe; - -/** - * Default SSL implementation. - * - *

To activate this class, add an {@code advanced.ssl-engine-factory} section in the driver - * configuration, for example: - * - *

- * datastax-java-driver {
- *   advanced.ssl-engine-factory {
- *     class = DefaultSslEngineFactory
- *     cipher-suites = [ "TLS_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_256_CBC_SHA" ]
- *     hostname-validation = false
- *     truststore-path = /path/to/client.truststore
- *     truststore-password = password123
- *     keystore-path = /path/to/client.keystore
- *     keystore-password = password123
- *     keystore-reload-interval = 30 minutes
- *   }
- * }
- * 
- * - * See {@code reference.conf} (in the manual or core driver JAR) for more details. - */ -@ThreadSafe -public class DefaultSslEngineFactory implements SslEngineFactory { - - private final SSLContext sslContext; - private final String[] cipherSuites; - private final boolean requireHostnameValidation; - private final boolean allowDnsReverseLookupSan; - private ReloadingKeyManagerFactory kmf; - - /** Builds a new instance from the driver configuration. */ - public DefaultSslEngineFactory(DriverContext driverContext) { - DriverExecutionProfile config = driverContext.getConfig().getDefaultProfile(); - try { - this.sslContext = buildContext(config); - } catch (Exception e) { - throw new IllegalStateException("Cannot initialize SSL Context", e); - } - if (config.isDefined(DefaultDriverOption.SSL_CIPHER_SUITES)) { - List list = config.getStringList(DefaultDriverOption.SSL_CIPHER_SUITES); - String tmp[] = new String[list.size()]; - this.cipherSuites = list.toArray(tmp); - } else { - this.cipherSuites = null; - } - this.requireHostnameValidation = - config.getBoolean(DefaultDriverOption.SSL_HOSTNAME_VALIDATION, true); - this.allowDnsReverseLookupSan = - config.getBoolean(DefaultDriverOption.SSL_ALLOW_DNS_REVERSE_LOOKUP_SAN, true); - } - - @VisibleForTesting - protected String hostname(InetSocketAddress addr) { - return allowDnsReverseLookupSan ? hostMaybeFromDnsReverseLookup(addr) : hostNoLookup(addr); - } - - @VisibleForTesting - protected String hostMaybeFromDnsReverseLookup(InetSocketAddress addr) { - // See java.net.InetSocketAddress.getHostName: - // "This method may trigger a name service reverse lookup if the address was created with a - // literal IP address." - return addr.getHostName(); - } - - @VisibleForTesting - protected String hostNoLookup(InetSocketAddress addr) { - // See java.net.InetSocketAddress.getHostString: - // "This has the benefit of not attempting a reverse lookup" - return addr.getHostString(); - } - - @NonNull - @Override - public SSLEngine newSslEngine(@NonNull EndPoint remoteEndpoint) { - SSLEngine engine; - SocketAddress remoteAddress = remoteEndpoint.resolve(); - if (remoteAddress instanceof InetSocketAddress) { - InetSocketAddress socketAddress = (InetSocketAddress) remoteAddress; - engine = sslContext.createSSLEngine(hostname(socketAddress), socketAddress.getPort()); - } else { - engine = sslContext.createSSLEngine(); - } - engine.setUseClientMode(true); - if (cipherSuites != null) { - engine.setEnabledCipherSuites(cipherSuites); - } - if (requireHostnameValidation) { - SSLParameters parameters = engine.getSSLParameters(); - parameters.setEndpointIdentificationAlgorithm("HTTPS"); - engine.setSSLParameters(parameters); - } - return engine; - } - - protected SSLContext buildContext(DriverExecutionProfile config) throws Exception { - if (config.isDefined(DefaultDriverOption.SSL_KEYSTORE_PATH) - || config.isDefined(DefaultDriverOption.SSL_TRUSTSTORE_PATH)) { - SSLContext context = SSLContext.getInstance("SSL"); - - // initialize truststore if configured. - TrustManagerFactory tmf = null; - if (config.isDefined(DefaultDriverOption.SSL_TRUSTSTORE_PATH)) { - try (InputStream tsf = - Files.newInputStream( - Paths.get(config.getString(DefaultDriverOption.SSL_TRUSTSTORE_PATH)))) { - KeyStore ts = KeyStore.getInstance("JKS"); - char[] password = - config.isDefined(DefaultDriverOption.SSL_TRUSTSTORE_PASSWORD) - ? config.getString(DefaultDriverOption.SSL_TRUSTSTORE_PASSWORD).toCharArray() - : null; - ts.load(tsf, password); - tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); - tmf.init(ts); - } - } - - // initialize keystore if configured. - if (config.isDefined(DefaultDriverOption.SSL_KEYSTORE_PATH)) { - kmf = buildReloadingKeyManagerFactory(config); - } - - context.init( - kmf != null ? kmf.getKeyManagers() : null, - tmf != null ? tmf.getTrustManagers() : null, - new SecureRandom()); - return context; - } else { - // if both keystore and truststore aren't configured, use default SSLContext. - return SSLContext.getDefault(); - } - } - - private ReloadingKeyManagerFactory buildReloadingKeyManagerFactory(DriverExecutionProfile config) - throws Exception { - Path keystorePath = Paths.get(config.getString(DefaultDriverOption.SSL_KEYSTORE_PATH)); - String password = config.getString(DefaultDriverOption.SSL_KEYSTORE_PASSWORD, null); - Optional reloadInterval = - Optional.ofNullable( - config.getDuration(DefaultDriverOption.SSL_KEYSTORE_RELOAD_INTERVAL, null)); - - return ReloadingKeyManagerFactory.create(keystorePath, password, reloadInterval); - } - - @Override - public void close() throws Exception { - if (kmf != null) kmf.close(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/ssl/JdkSslHandlerFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/ssl/JdkSslHandlerFactory.java deleted file mode 100644 index 7661325005e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/ssl/JdkSslHandlerFactory.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.ssl; - -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.core.ssl.SslEngineFactory; -import io.netty.channel.Channel; -import io.netty.handler.ssl.SslHandler; -import javax.net.ssl.SSLEngine; -import net.jcip.annotations.ThreadSafe; - -/** SSL handler factory used when JDK-based SSL was configured through the driver's public API. */ -@ThreadSafe -public class JdkSslHandlerFactory implements SslHandlerFactory { - private final SslEngineFactory sslEngineFactory; - - public JdkSslHandlerFactory(SslEngineFactory sslEngineFactory) { - this.sslEngineFactory = sslEngineFactory; - } - - @Override - public SslHandler newSslHandler(Channel channel, EndPoint remoteEndpoint) { - SSLEngine engine = sslEngineFactory.newSslEngine(remoteEndpoint); - return new SslHandler(engine); - } - - @Override - public void close() throws Exception { - sslEngineFactory.close(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/ssl/ReloadingKeyManagerFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/ssl/ReloadingKeyManagerFactory.java deleted file mode 100644 index 8a9e11bb2e9..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/ssl/ReloadingKeyManagerFactory.java +++ /dev/null @@ -1,264 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.ssl; - -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import java.io.ByteArrayInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.net.Socket; -import java.nio.file.Files; -import java.nio.file.Path; -import java.security.KeyStore; -import java.security.KeyStoreException; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; -import java.security.Principal; -import java.security.PrivateKey; -import java.security.Provider; -import java.security.UnrecoverableKeyException; -import java.security.cert.CertificateException; -import java.security.cert.X509Certificate; -import java.time.Duration; -import java.util.Arrays; -import java.util.Optional; -import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicReference; -import javax.net.ssl.KeyManager; -import javax.net.ssl.KeyManagerFactory; -import javax.net.ssl.KeyManagerFactorySpi; -import javax.net.ssl.ManagerFactoryParameters; -import javax.net.ssl.SSLEngine; -import javax.net.ssl.X509ExtendedKeyManager; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class ReloadingKeyManagerFactory extends KeyManagerFactory implements AutoCloseable { - private static final Logger logger = LoggerFactory.getLogger(ReloadingKeyManagerFactory.class); - private static final String KEYSTORE_TYPE = "JKS"; - private Path keystorePath; - private String keystorePassword; - private ScheduledExecutorService executor; - private final Spi spi; - - // We're using a single thread executor so this shouldn't need to be volatile, since all updates - // to lastDigest should come from the same thread - private volatile byte[] lastDigest; - - /** - * Create a new {@link ReloadingKeyManagerFactory} with the given keystore file and password, - * reloading from the file's content at the given interval. This function will do an initial - * reload before returning, to confirm that the file exists and is readable. - * - * @param keystorePath the keystore file to reload - * @param keystorePassword the keystore password - * @param reloadInterval the duration between reload attempts. Set to {@link Optional#empty()} to - * disable scheduled reloading. - * @return - */ - static ReloadingKeyManagerFactory create( - Path keystorePath, String keystorePassword, Optional reloadInterval) - throws UnrecoverableKeyException, KeyStoreException, NoSuchAlgorithmException, - CertificateException, IOException { - KeyManagerFactory kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); - - KeyStore ks; - try (InputStream ksf = Files.newInputStream(keystorePath)) { - ks = KeyStore.getInstance(KEYSTORE_TYPE); - ks.load(ksf, keystorePassword.toCharArray()); - } - kmf.init(ks, keystorePassword.toCharArray()); - - ReloadingKeyManagerFactory reloadingKeyManagerFactory = new ReloadingKeyManagerFactory(kmf); - reloadingKeyManagerFactory.start(keystorePath, keystorePassword, reloadInterval); - return reloadingKeyManagerFactory; - } - - @VisibleForTesting - protected ReloadingKeyManagerFactory(KeyManagerFactory initial) { - this( - new Spi((X509ExtendedKeyManager) initial.getKeyManagers()[0]), - initial.getProvider(), - initial.getAlgorithm()); - } - - private ReloadingKeyManagerFactory(Spi spi, Provider provider, String algorithm) { - super(spi, provider, algorithm); - this.spi = spi; - } - - private void start( - Path keystorePath, String keystorePassword, Optional reloadInterval) { - this.keystorePath = keystorePath; - this.keystorePassword = keystorePassword; - - // Ensure that reload is called once synchronously, to make sure the file exists etc. - reload(); - - if (!reloadInterval.isPresent() || reloadInterval.get().isZero()) { - final String msg = - "KeyStore reloading is disabled. If your Cassandra cluster requires client certificates, " - + "client application restarts are infrequent, and client certificates have short lifetimes, then your client " - + "may fail to re-establish connections to Cassandra hosts. To enable KeyStore reloading, see " - + "`advanced.ssl-engine-factory.keystore-reload-interval` in reference.conf."; - logger.info(msg); - } else { - logger.info("KeyStore reloading is enabled with interval {}", reloadInterval.get()); - - this.executor = - Executors.newScheduledThreadPool( - 1, - runnable -> { - Thread t = Executors.defaultThreadFactory().newThread(runnable); - t.setName(String.format("%s-%%d", this.getClass().getSimpleName())); - t.setDaemon(true); - return t; - }); - this.executor.scheduleWithFixedDelay( - this::reload, - reloadInterval.get().toMillis(), - reloadInterval.get().toMillis(), - TimeUnit.MILLISECONDS); - } - } - - @VisibleForTesting - void reload() { - try { - reload0(); - } catch (Exception e) { - String msg = - "Failed to reload KeyStore. If this continues to happen, your client may use stale identity" - + " certificates and fail to re-establish connections to Cassandra hosts."; - logger.warn(msg, e); - } - } - - private synchronized void reload0() - throws NoSuchAlgorithmException, IOException, KeyStoreException, CertificateException, - UnrecoverableKeyException { - logger.debug("Checking KeyStore file {} for updates", keystorePath); - - final byte[] keyStoreBytes = Files.readAllBytes(keystorePath); - final byte[] newDigest = digest(keyStoreBytes); - if (lastDigest != null && Arrays.equals(lastDigest, digest(keyStoreBytes))) { - logger.debug("KeyStore file content has not changed; skipping update"); - return; - } - - final KeyStore keyStore = KeyStore.getInstance(KEYSTORE_TYPE); - try (InputStream inputStream = new ByteArrayInputStream(keyStoreBytes)) { - keyStore.load(inputStream, keystorePassword.toCharArray()); - } - KeyManagerFactory kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); - kmf.init(keyStore, keystorePassword.toCharArray()); - logger.info("Detected updates to KeyStore file {}", keystorePath); - - this.spi.keyManager.set((X509ExtendedKeyManager) kmf.getKeyManagers()[0]); - this.lastDigest = newDigest; - } - - @Override - public void close() throws Exception { - if (executor != null) { - executor.shutdown(); - } - } - - private static byte[] digest(byte[] payload) throws NoSuchAlgorithmException { - final MessageDigest digest = MessageDigest.getInstance("SHA-256"); - return digest.digest(payload); - } - - private static class Spi extends KeyManagerFactorySpi { - DelegatingKeyManager keyManager; - - Spi(X509ExtendedKeyManager initial) { - this.keyManager = new DelegatingKeyManager(initial); - } - - @Override - protected void engineInit(KeyStore ks, char[] password) { - throw new UnsupportedOperationException(); - } - - @Override - protected void engineInit(ManagerFactoryParameters spec) { - throw new UnsupportedOperationException(); - } - - @Override - protected KeyManager[] engineGetKeyManagers() { - return new KeyManager[] {keyManager}; - } - } - - private static class DelegatingKeyManager extends X509ExtendedKeyManager { - AtomicReference delegate; - - DelegatingKeyManager(X509ExtendedKeyManager initial) { - delegate = new AtomicReference<>(initial); - } - - void set(X509ExtendedKeyManager keyManager) { - delegate.set(keyManager); - } - - @Override - public String chooseEngineClientAlias(String[] keyType, Principal[] issuers, SSLEngine engine) { - return delegate.get().chooseEngineClientAlias(keyType, issuers, engine); - } - - @Override - public String chooseEngineServerAlias(String keyType, Principal[] issuers, SSLEngine engine) { - return delegate.get().chooseEngineServerAlias(keyType, issuers, engine); - } - - @Override - public String[] getClientAliases(String keyType, Principal[] issuers) { - return delegate.get().getClientAliases(keyType, issuers); - } - - @Override - public String chooseClientAlias(String[] keyType, Principal[] issuers, Socket socket) { - return delegate.get().chooseClientAlias(keyType, issuers, socket); - } - - @Override - public String[] getServerAliases(String keyType, Principal[] issuers) { - return delegate.get().getServerAliases(keyType, issuers); - } - - @Override - public String chooseServerAlias(String keyType, Principal[] issuers, Socket socket) { - return delegate.get().chooseServerAlias(keyType, issuers, socket); - } - - @Override - public X509Certificate[] getCertificateChain(String alias) { - return delegate.get().getCertificateChain(alias); - } - - @Override - public PrivateKey getPrivateKey(String alias) { - return delegate.get().getPrivateKey(alias); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/ssl/SniSslEngineFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/ssl/SniSslEngineFactory.java deleted file mode 100644 index 4d2cb69fbfc..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/ssl/SniSslEngineFactory.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.datastax.oss.driver.internal.core.ssl; - -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.core.ssl.SslEngineFactory; -import com.datastax.oss.driver.internal.core.metadata.SniEndPoint; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.net.InetSocketAddress; -import java.util.concurrent.CopyOnWriteArrayList; -import javax.net.ssl.SNIHostName; -import javax.net.ssl.SSLContext; -import javax.net.ssl.SSLEngine; -import javax.net.ssl.SSLParameters; - -public class SniSslEngineFactory implements SslEngineFactory { - - // An offset that gets added to our "fake" ports (see below). We pick this value because it is the - // start of the ephemeral port range. - private static final int FAKE_PORT_OFFSET = 49152; - - private final SSLContext sslContext; - private final CopyOnWriteArrayList fakePorts = new CopyOnWriteArrayList<>(); - private final boolean allowDnsReverseLookupSan; - - public SniSslEngineFactory(SSLContext sslContext) { - this(sslContext, true); - } - - public SniSslEngineFactory(SSLContext sslContext, boolean allowDnsReverseLookupSan) { - this.sslContext = sslContext; - this.allowDnsReverseLookupSan = allowDnsReverseLookupSan; - } - - @NonNull - @Override - public SSLEngine newSslEngine(@NonNull EndPoint remoteEndpoint) { - if (!(remoteEndpoint instanceof SniEndPoint)) { - throw new IllegalArgumentException( - String.format( - "Configuration error: can only use %s with SNI end points", - this.getClass().getSimpleName())); - } - SniEndPoint sniEndPoint = (SniEndPoint) remoteEndpoint; - InetSocketAddress address = sniEndPoint.resolve(); - String sniServerName = sniEndPoint.getServerName(); - - // When hostname verification is enabled (with setEndpointIdentificationAlgorithm), the SSL - // engine will try to match the server's certificate against the SNI host name; if that doesn't - // work, it will fall back to the "advisory peer host" passed to createSSLEngine. - // - // In our case, the first check will never succeed because our SNI host name is not the DNS name - // (we use the Cassandra host_id instead). So we *must* set the advisory peer information. - // - // However if we use the address as-is, this leads to another issue: the advisory peer - // information is also used to cache SSL sessions internally. All of our nodes share the same - // proxy address, so the JDK tries to reuse SSL sessions across nodes. But it doesn't update the - // SNI host name every time, so it ends up opening connections to the wrong node. - // - // To avoid that, we create a unique "fake" port for every node. We still get session reuse for - // a given node, but not across nodes. This is safe because the advisory port is only used for - // session caching. - String peerHost = allowDnsReverseLookupSan ? address.getHostName() : address.getHostString(); - SSLEngine engine = sslContext.createSSLEngine(peerHost, getFakePort(sniServerName)); - engine.setUseClientMode(true); - SSLParameters parameters = engine.getSSLParameters(); - parameters.setServerNames(ImmutableList.of(new SNIHostName(sniServerName))); - parameters.setEndpointIdentificationAlgorithm("HTTPS"); - engine.setSSLParameters(parameters); - return engine; - } - - private int getFakePort(String sniServerName) { - fakePorts.addIfAbsent(sniServerName); - return FAKE_PORT_OFFSET + fakePorts.indexOf(sniServerName); - } - - @Override - public void close() { - // nothing to do - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/ssl/SslHandlerFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/ssl/SslHandlerFactory.java deleted file mode 100644 index 87bea563796..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/ssl/SslHandlerFactory.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.ssl; - -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import io.netty.channel.Channel; -import io.netty.handler.ssl.SslHandler; - -/** - * Low-level SSL extension point. - * - *

SSL is separated into two interfaces to avoid exposing Netty classes in our public API: - * - *

    - *
  • "normal" (JDK-based) SSL is part of the public API, and can be configured via an instance - * of {@link com.datastax.oss.driver.api.core.ssl.SslEngineFactory} defined in the driver - * configuration. - *
  • this interface deals with Netty handlers directly. It can be used for more advanced cases, - * like using Netty's native OpenSSL integration instead of the JDK. This is considered expert - * level, and therefore part of our internal API. - *
- * - * @see DefaultDriverContext#buildSslHandlerFactory() - */ -public interface SslHandlerFactory extends AutoCloseable { - SslHandler newSslHandler(Channel channel, EndPoint remoteEndpoint); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/time/AtomicTimestampGenerator.java b/core/src/main/java/com/datastax/oss/driver/internal/core/time/AtomicTimestampGenerator.java deleted file mode 100644 index 351ed96d66f..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/time/AtomicTimestampGenerator.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.time; - -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import java.util.concurrent.atomic.AtomicLong; -import net.jcip.annotations.ThreadSafe; - -/** - * A timestamp generator that guarantees monotonically increasing timestamps across all client - * threads, and logs warnings when timestamps drift in the future. - * - *

To activate this generator, modify the {@code advanced.timestamp-generator} section in the - * driver configuration, for example: - * - *

- * datastax-java-driver {
- *   advanced.timestamp-generator {
- *     class = AtomicTimestampGenerator
- *     drift-warning {
- *       threshold = 1 second
- *       interval = 10 seconds
- *     }
- *     force-java-clock = false
- *   }
- * }
- * 
- * - * See {@code reference.conf} (in the manual or core driver JAR) for more details. - */ -@ThreadSafe -public class AtomicTimestampGenerator extends MonotonicTimestampGenerator { - - private final AtomicLong lastRef = new AtomicLong(0); - - public AtomicTimestampGenerator(DriverContext context) { - super(context); - } - - @VisibleForTesting - AtomicTimestampGenerator(Clock clock, InternalDriverContext context) { - super(clock, context); - } - - @Override - public long next() { - while (true) { - long last = lastRef.get(); - long next = computeNext(last); - if (lastRef.compareAndSet(last, next)) { - return next; - } - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/time/Clock.java b/core/src/main/java/com/datastax/oss/driver/internal/core/time/Clock.java deleted file mode 100644 index e576b13a74b..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/time/Clock.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.time; - -import com.datastax.oss.driver.internal.core.os.Native; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A small abstraction around system clock that aims to provide microsecond precision with the best - * accuracy possible. - */ -public interface Clock { - Logger LOG = LoggerFactory.getLogger(Clock.class); - - /** - * Returns the best implementation for the current platform. - * - *

Usage with non-blocking threads: beware that this method may block the calling thread on its - * very first invocation, because native libraries used by the driver will be loaded at that - * moment. If that is a problem, consider invoking this method once from a thread that is allowed - * to block. Subsequent invocations are guaranteed not to block. - */ - static Clock getInstance(boolean forceJavaClock) { - if (forceJavaClock) { - LOG.info("Using Java system clock because this was explicitly required in the configuration"); - return new JavaClock(); - } else if (!Native.isCurrentTimeMicrosAvailable()) { - LOG.info( - "Could not access native clock (see debug logs for details), " - + "falling back to Java system clock"); - return new JavaClock(); - } else { - LOG.info("Using native clock for microsecond precision"); - return new NativeClock(); - } - } - - /** - * Returns the difference, measured in microseconds, between the current time and and the - * Epoch (that is, midnight, January 1, 1970 UTC). - */ - long currentTimeMicros(); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/time/JavaClock.java b/core/src/main/java/com/datastax/oss/driver/internal/core/time/JavaClock.java deleted file mode 100644 index b6dfbebcdb0..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/time/JavaClock.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.time; - -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class JavaClock implements Clock { - @Override - public long currentTimeMicros() { - return System.currentTimeMillis() * 1000; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/time/MonotonicTimestampGenerator.java b/core/src/main/java/com/datastax/oss/driver/internal/core/time/MonotonicTimestampGenerator.java deleted file mode 100644 index 99a520d02b1..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/time/MonotonicTimestampGenerator.java +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.time; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.time.TimestampGenerator; -import java.time.Duration; -import java.util.concurrent.atomic.AtomicLong; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A timestamp generator that guarantees monotonicity, and logs warnings when timestamps drift in - * the future. - */ -@ThreadSafe -abstract class MonotonicTimestampGenerator implements TimestampGenerator { - - private static final Logger LOG = LoggerFactory.getLogger(MonotonicTimestampGenerator.class); - - private final Clock clock; - private final long warningThresholdMicros; - private final long warningIntervalMillis; - private final AtomicLong lastDriftWarning = new AtomicLong(Long.MIN_VALUE); - - protected MonotonicTimestampGenerator(DriverContext context) { - this(buildClock(context), context); - } - - protected MonotonicTimestampGenerator(Clock clock, DriverContext context) { - this.clock = clock; - - DriverExecutionProfile config = context.getConfig().getDefaultProfile(); - this.warningThresholdMicros = - config - .getDuration( - DefaultDriverOption.TIMESTAMP_GENERATOR_DRIFT_WARNING_THRESHOLD, Duration.ZERO) - .toNanos() - / 1000; - - if (this.warningThresholdMicros == 0) { - this.warningIntervalMillis = 0; - } else { - this.warningIntervalMillis = - config - .getDuration(DefaultDriverOption.TIMESTAMP_GENERATOR_DRIFT_WARNING_INTERVAL) - .toMillis(); - } - } - - /** - * Compute the next timestamp, given the current clock tick and the last timestamp returned. - * - *

If timestamps have to drift ahead of the current clock tick to guarantee monotonicity, a - * warning will be logged according to the rules defined in the configuration. - */ - protected long computeNext(long last) { - long currentTick = clock.currentTimeMicros(); - if (last >= currentTick) { - maybeLog(currentTick, last); - return last + 1; - } - return currentTick; - } - - @Override - public void close() throws Exception { - // nothing to do - } - - private void maybeLog(long currentTick, long last) { - if (warningThresholdMicros != 0 - && LOG.isWarnEnabled() - && last > currentTick + warningThresholdMicros) { - long now = System.currentTimeMillis(); - long lastWarning = lastDriftWarning.get(); - if (now > lastWarning + warningIntervalMillis - && lastDriftWarning.compareAndSet(lastWarning, now)) { - LOG.warn( - "Clock skew detected: current tick ({}) was {} microseconds behind the last generated timestamp ({}), " - + "returned timestamps will be artificially incremented to guarantee monotonicity.", - currentTick, - last - currentTick, - last); - } - } - } - - private static Clock buildClock(DriverContext context) { - DriverExecutionProfile config = context.getConfig().getDefaultProfile(); - boolean forceJavaClock = - config.getBoolean(DefaultDriverOption.TIMESTAMP_GENERATOR_FORCE_JAVA_CLOCK, false); - return Clock.getInstance(forceJavaClock); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/time/NativeClock.java b/core/src/main/java/com/datastax/oss/driver/internal/core/time/NativeClock.java deleted file mode 100644 index 51265ead820..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/time/NativeClock.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.time; - -import static java.util.concurrent.TimeUnit.MILLISECONDS; -import static java.util.concurrent.TimeUnit.NANOSECONDS; -import static java.util.concurrent.TimeUnit.SECONDS; - -import com.datastax.oss.driver.internal.core.os.Native; -import java.util.concurrent.atomic.AtomicReference; -import net.jcip.annotations.ThreadSafe; - -/** - * Provides the current time with microseconds precision with some reasonable accuracy through the - * use of {@link Native#currentTimeMicros()}. - * - *

Because calling JNR methods is slightly expensive, we only call it once per second and add the - * number of nanoseconds since the last call to get the current time, which is good enough an - * accuracy for our purpose (see CASSANDRA-6106). - * - *

This reduces the cost of the call to {@link NativeClock#currentTimeMicros()} to levels - * comparable to those of a call to {@link System#nanoTime()}. - */ -@ThreadSafe -public class NativeClock implements Clock { - - private static final long ONE_SECOND_NS = NANOSECONDS.convert(1, SECONDS); - private static final long ONE_MILLISECOND_NS = NANOSECONDS.convert(1, MILLISECONDS); - - // Records a time in micros along with the System.nanoTime() value at the time the time is - // fetched. - private static class FetchedTime { - - private final long timeInMicros; - private final long nanoTimeAtCheck; - - private FetchedTime(long timeInMicros, long nanoTimeAtCheck) { - this.timeInMicros = timeInMicros; - this.nanoTimeAtCheck = nanoTimeAtCheck; - } - } - - private final AtomicReference lastFetchedTime = - new AtomicReference<>(fetchTimeMicros()); - - @Override - public long currentTimeMicros() { - FetchedTime spec = lastFetchedTime.get(); - long curNano = System.nanoTime(); - if (curNano > spec.nanoTimeAtCheck + ONE_SECOND_NS) { - lastFetchedTime.compareAndSet(spec, spec = fetchTimeMicros()); - } - return spec.timeInMicros + ((curNano - spec.nanoTimeAtCheck) / 1000); - } - - private static FetchedTime fetchTimeMicros() { - // To compensate for the fact that the Native.currentTimeMicros call could take some time, - // instead of picking the nano time before the call or after the call, we take the average of - // both. - long start = System.nanoTime(); - long micros = Native.currentTimeMicros(); - long end = System.nanoTime(); - // If it turns out the call took us more than 1 millisecond (can happen while the JVM warms up, - // unlikely otherwise, but no reasons to take risks), fall back to System.currentTimeMillis() - // temporarily. - if ((end - start) > ONE_MILLISECOND_NS) { - return new FetchedTime(System.currentTimeMillis() * 1000, System.nanoTime()); - } else { - return new FetchedTime(micros, (end + start) / 2); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/time/ServerSideTimestampGenerator.java b/core/src/main/java/com/datastax/oss/driver/internal/core/time/ServerSideTimestampGenerator.java deleted file mode 100644 index 0df056deb04..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/time/ServerSideTimestampGenerator.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.time; - -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.time.TimestampGenerator; -import net.jcip.annotations.ThreadSafe; - -/** - * A timestamp generator that never sends a timestamp with any query, therefore letting Cassandra - * assign a server-side timestamp. - * - *

To activate this generator, modify the {@code advanced.timestamp-generator} section in the - * driver configuration, for example: - * - *

- * datastax-java-driver {
- *   advanced.timestamp-generator {
- *     class = ServerSideTimestampGenerator
- *   }
- * }
- * 
- * - * See {@code reference.conf} (in the manual or core driver JAR) for more details. - */ -@ThreadSafe -public class ServerSideTimestampGenerator implements TimestampGenerator { - - public ServerSideTimestampGenerator(@SuppressWarnings("unused") DriverContext context) { - // nothing to do - } - - @Override - public long next() { - return Statement.NO_DEFAULT_TIMESTAMP; - } - - @Override - public void close() throws Exception { - // nothing to do - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/time/ThreadLocalTimestampGenerator.java b/core/src/main/java/com/datastax/oss/driver/internal/core/time/ThreadLocalTimestampGenerator.java deleted file mode 100644 index 598ae5cbbc2..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/time/ThreadLocalTimestampGenerator.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.time; - -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import net.jcip.annotations.ThreadSafe; - -/** - * A timestamp generator that guarantees monotonically increasing timestamps within each thread, and - * logs warnings when timestamps drift in the future. - * - *

Beware that there is a risk of timestamp collision with this generator when accessed by more - * than one thread at a time; only use it when threads are not in direct competition for timestamp - * ties (i.e., they are executing independent statements). - * - *

To activate this generator, modify the {@code advanced.timestamp-generator} section in the - * driver configuration, for example: - * - *

- * datastax-java-driver {
- *   advanced.timestamp-generator {
- *     class = ThreadLocalTimestampGenerator
- *     drift-warning {
- *       threshold = 1 second
- *       interval = 10 seconds
- *     }
- *     force-java-clock = false
- *   }
- * }
- * 
- * - * See {@code reference.conf} (in the manual or core driver JAR) for more details. - */ -@ThreadSafe -public class ThreadLocalTimestampGenerator extends MonotonicTimestampGenerator { - - private final ThreadLocal lastRef = ThreadLocal.withInitial(() -> 0L); - - public ThreadLocalTimestampGenerator(DriverContext context) { - super(context); - } - - @VisibleForTesting - ThreadLocalTimestampGenerator(Clock clock, DriverContext context) { - super(clock, context); - } - - @Override - public long next() { - Long last = this.lastRef.get(); - long next = computeNext(last); - this.lastRef.set(next); - return next; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/MultiplexingRequestTracker.java b/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/MultiplexingRequestTracker.java deleted file mode 100644 index 6fe2ba059bd..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/MultiplexingRequestTracker.java +++ /dev/null @@ -1,174 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.tracker; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.tracker.RequestTracker; -import com.datastax.oss.driver.internal.core.util.Loggers; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Arrays; -import java.util.Collection; -import java.util.List; -import java.util.Objects; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.function.Consumer; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Combines multiple request trackers into a single one. - * - *

Any exception thrown by a child tracker is caught and logged. - */ -@ThreadSafe -public class MultiplexingRequestTracker implements RequestTracker { - - private static final Logger LOG = LoggerFactory.getLogger(MultiplexingRequestTracker.class); - - private final List trackers = new CopyOnWriteArrayList<>(); - - public MultiplexingRequestTracker() {} - - public MultiplexingRequestTracker(RequestTracker... trackers) { - this(Arrays.asList(trackers)); - } - - public MultiplexingRequestTracker(Collection trackers) { - addTrackers(trackers); - } - - private void addTrackers(Collection source) { - for (RequestTracker tracker : source) { - addTracker(tracker); - } - } - - private void addTracker(RequestTracker toAdd) { - Objects.requireNonNull(toAdd, "tracker cannot be null"); - if (toAdd instanceof MultiplexingRequestTracker) { - addTrackers(((MultiplexingRequestTracker) toAdd).trackers); - } else { - trackers.add(toAdd); - } - } - - public void register(@NonNull RequestTracker tracker) { - addTracker(tracker); - } - - @Override - public void onSuccess( - @NonNull Request request, - long latencyNanos, - @NonNull DriverExecutionProfile executionProfile, - @NonNull Node node, - @NonNull String sessionRequestLogPrefix) { - invokeTrackers( - tracker -> - tracker.onSuccess( - request, latencyNanos, executionProfile, node, sessionRequestLogPrefix), - sessionRequestLogPrefix, - "onSuccess"); - } - - @Override - public void onError( - @NonNull Request request, - @NonNull Throwable error, - long latencyNanos, - @NonNull DriverExecutionProfile executionProfile, - @Nullable Node node, - @NonNull String sessionRequestLogPrefix) { - invokeTrackers( - tracker -> - tracker.onError( - request, error, latencyNanos, executionProfile, node, sessionRequestLogPrefix), - sessionRequestLogPrefix, - "onError"); - } - - @Override - public void onNodeSuccess( - @NonNull Request request, - long latencyNanos, - @NonNull DriverExecutionProfile executionProfile, - @NonNull Node node, - @NonNull String nodeRequestLogPrefix) { - invokeTrackers( - tracker -> - tracker.onNodeSuccess( - request, latencyNanos, executionProfile, node, nodeRequestLogPrefix), - nodeRequestLogPrefix, - "onNodeSuccess"); - } - - @Override - public void onNodeError( - @NonNull Request request, - @NonNull Throwable error, - long latencyNanos, - @NonNull DriverExecutionProfile executionProfile, - @NonNull Node node, - @NonNull String nodeRequestLogPrefix) { - invokeTrackers( - tracker -> - tracker.onNodeError( - request, error, latencyNanos, executionProfile, node, nodeRequestLogPrefix), - nodeRequestLogPrefix, - "onNodeError"); - } - - @Override - public void onSessionReady(@NonNull Session session) { - invokeTrackers(tracker -> tracker.onSessionReady(session), session.getName(), "onSessionReady"); - } - - @Override - public void close() throws Exception { - for (RequestTracker tracker : trackers) { - try { - tracker.close(); - } catch (Exception e) { - Loggers.warnWithException( - LOG, "Unexpected error while closing request tracker {}.", tracker, e); - } - } - } - - private void invokeTrackers( - @NonNull Consumer action, String logPrefix, String event) { - for (RequestTracker tracker : trackers) { - try { - action.accept(tracker); - } catch (Exception e) { - Loggers.warnWithException( - LOG, - "[{}] Unexpected error while notifying request tracker {} of an {} event.", - logPrefix, - tracker, - event, - e); - } - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/NoopRequestTracker.java b/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/NoopRequestTracker.java deleted file mode 100644 index 3821c6ace2d..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/NoopRequestTracker.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.tracker; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.tracker.RequestTracker; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.ThreadSafe; - -/** - * Default request tracker implementation with empty methods. This implementation is used when no - * trackers were registered, neither programmatically nor through the configuration. - */ -@ThreadSafe -public class NoopRequestTracker implements RequestTracker { - - public NoopRequestTracker(@SuppressWarnings("unused") DriverContext context) { - // nothing to do - } - - @Override - public void onSuccess( - @NonNull Request request, - long latencyNanos, - @NonNull DriverExecutionProfile executionProfile, - @NonNull Node node, - @NonNull String sessionRequestLogPrefix) { - // nothing to do - } - - @Override - public void onError( - @NonNull Request request, - @NonNull Throwable error, - long latencyNanos, - @NonNull DriverExecutionProfile executionProfile, - Node node, - @NonNull String sessionRequestLogPrefix) { - // nothing to do - } - - @Override - public void onNodeError( - @NonNull Request request, - @NonNull Throwable error, - long latencyNanos, - @NonNull DriverExecutionProfile executionProfile, - @NonNull Node node, - @NonNull String nodeRequestLogPrefix) { - // nothing to do - } - - @Override - public void onNodeSuccess( - @NonNull Request request, - long latencyNanos, - @NonNull DriverExecutionProfile executionProfile, - @NonNull Node node, - @NonNull String nodeRequestLogPrefix) { - // nothing to do - } - - @Override - public void close() throws Exception { - // nothing to do - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/RequestLogFormatter.java b/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/RequestLogFormatter.java deleted file mode 100644 index 808d08e228d..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/RequestLogFormatter.java +++ /dev/null @@ -1,302 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.tracker; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.cql.BatchStatement; -import com.datastax.oss.driver.api.core.cql.BatchableStatement; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.DefaultBatchType; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.internal.core.util.NanoTime; -import java.nio.ByteBuffer; -import java.util.List; -import java.util.Map; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class RequestLogFormatter { - - private static final String FURTHER_VALUES_TRUNCATED = "...]"; - private static final String TRUNCATED = "..."; - - private final DriverContext context; - - public RequestLogFormatter(DriverContext context) { - this.context = context; - } - - public StringBuilder logBuilder(String logPrefix, Node node) { - return new StringBuilder("[").append(logPrefix).append("][").append(node).append("] "); - } - - public void appendSuccessDescription(StringBuilder builder) { - builder.append("Success "); - } - - public void appendSlowDescription(StringBuilder builder) { - builder.append("Slow "); - } - - public void appendErrorDescription(StringBuilder builder) { - builder.append("Error "); - } - - public void appendLatency(long latencyNanos, StringBuilder builder) { - builder.append('(').append(NanoTime.format(latencyNanos)).append(") "); - } - - public void appendRequest( - Request request, - int maxQueryLength, - boolean showValues, - int maxValues, - int maxValueLength, - StringBuilder builder) { - appendStats(request, builder); - appendQueryString(request, maxQueryLength, builder); - if (showValues) { - appendValues(request, maxValues, maxValueLength, true, builder); - } - } - - protected void appendStats(Request request, StringBuilder builder) { - int valueCount = countBoundValues(request); - if (request instanceof BatchStatement) { - BatchStatement statement = (BatchStatement) request; - builder - .append('[') - .append(statement.size()) - .append(" statements, ") - .append(valueCount) - .append(" values] "); - } else { - builder.append('[').append(valueCount).append(" values] "); - } - } - - protected int countBoundValues(Request request) { - if (request instanceof BatchStatement) { - int count = 0; - for (BatchableStatement child : (BatchStatement) request) { - count += countBoundValues(child); - } - return count; - } else if (request instanceof BoundStatement) { - return ((BoundStatement) request).getPreparedStatement().getVariableDefinitions().size(); - } else if (request instanceof SimpleStatement) { - SimpleStatement statement = (SimpleStatement) request; - return Math.max(statement.getPositionalValues().size(), statement.getNamedValues().size()); - } else { - return 0; - } - } - - protected int appendQueryString(Request request, int limit, StringBuilder builder) { - if (request instanceof BatchStatement) { - BatchStatement batch = (BatchStatement) request; - limit = append("BEGIN", limit, builder); - if (batch.getBatchType() == DefaultBatchType.UNLOGGED) { - limit = append(" UNLOGGED", limit, builder); - } else if (batch.getBatchType() == DefaultBatchType.COUNTER) { - limit = append(" COUNTER", limit, builder); - } - limit = append(" BATCH ", limit, builder); - for (BatchableStatement child : batch) { - limit = appendQueryString(child, limit, builder); - if (limit < 0) { - break; - } - limit = append("; ", limit, builder); - } - limit = append("APPLY BATCH", limit, builder); - return limit; - } else if (request instanceof BoundStatement) { - BoundStatement statement = (BoundStatement) request; - return append(statement.getPreparedStatement().getQuery(), limit, builder); - } else if (request instanceof SimpleStatement) { - SimpleStatement statement = (SimpleStatement) request; - return append(statement.getQuery(), limit, builder); - } else { - return append(request.toString(), limit, builder); - } - } - - /** - * @return the number of values that can still be appended after this, or -1 if the max was - * reached by this call. - */ - protected int appendValues( - Request request, - int maxValues, - int maxValueLength, - boolean addSeparator, - StringBuilder builder) { - if (request instanceof BatchStatement) { - BatchStatement batch = (BatchStatement) request; - for (BatchableStatement child : batch) { - maxValues = appendValues(child, maxValues, maxValueLength, addSeparator, builder); - if (addSeparator) { - addSeparator = false; - } - if (maxValues < 0) { - return -1; - } - } - } else if (request instanceof BoundStatement) { - BoundStatement statement = (BoundStatement) request; - ColumnDefinitions definitions = statement.getPreparedStatement().getVariableDefinitions(); - List values = statement.getValues(); - assert definitions.size() == values.size(); - if (definitions.size() > 0) { - if (addSeparator) { - builder.append(' '); - } - builder.append('['); - for (int i = 0; i < definitions.size(); i++) { - if (i > 0) { - builder.append(", "); - } - maxValues -= 1; - if (maxValues < 0) { - builder.append(FURTHER_VALUES_TRUNCATED); - return -1; - } - builder.append(definitions.get(i).getName().asCql(true)).append('='); - if (!statement.isSet(i)) { - builder.append(""); - } else { - ByteBuffer value = values.get(i); - DataType type = definitions.get(i).getType(); - appendValue(value, type, maxValueLength, builder); - } - } - builder.append(']'); - } - } else if (request instanceof SimpleStatement) { - SimpleStatement statement = (SimpleStatement) request; - if (!statement.getPositionalValues().isEmpty()) { - if (addSeparator) { - builder.append(' '); - } - builder.append('['); - int i = 0; - for (Object value : statement.getPositionalValues()) { - if (i > 0) { - builder.append(", "); - } - maxValues -= 1; - if (maxValues < 0) { - builder.append(FURTHER_VALUES_TRUNCATED); - return -1; - } - builder.append('v').append(i).append('='); - appendValue(value, maxValueLength, builder); - i += 1; - } - builder.append(']'); - } else if (!statement.getNamedValues().isEmpty()) { - if (addSeparator) { - builder.append(' '); - } - builder.append('['); - int i = 0; - for (Map.Entry entry : statement.getNamedValues().entrySet()) { - if (i > 0) { - builder.append(", "); - } - maxValues -= 1; - if (maxValues < 0) { - builder.append(FURTHER_VALUES_TRUNCATED); - return -1; - } - builder.append(entry.getKey().asCql(true)).append('='); - appendValue(entry.getValue(), maxValueLength, builder); - i += 1; - } - builder.append(']'); - } - } - return maxValues; - } - - protected void appendValue(ByteBuffer raw, DataType type, int maxLength, StringBuilder builder) { - TypeCodec codec = context.getCodecRegistry().codecFor(type); - if (type.equals(DataTypes.BLOB)) { - // For very large buffers, apply the limit before converting into a string - int maxBufferLength = Math.max((maxLength - 2) / 2, 0); - boolean bufferTooLarge = raw.remaining() > maxBufferLength; - if (bufferTooLarge) { - raw = (ByteBuffer) raw.duplicate().limit(maxBufferLength); - } - Object value = codec.decode(raw, context.getProtocolVersion()); - append(codec.format(value), maxLength, builder); - if (bufferTooLarge) { - builder.append(TRUNCATED); - } - } else { - Object value = codec.decode(raw, context.getProtocolVersion()); - append(codec.format(value), maxLength, builder); - } - } - - protected void appendValue(Object value, int maxLength, StringBuilder builder) { - TypeCodec codec = context.getCodecRegistry().codecFor(value); - if (value instanceof ByteBuffer) { - // For very large buffers, apply the limit before converting into a string - ByteBuffer buffer = (ByteBuffer) value; - int maxBufferLength = Math.max((maxLength - 2) / 2, 0); - boolean bufferTooLarge = buffer.remaining() > maxBufferLength; - if (bufferTooLarge) { - buffer = (ByteBuffer) buffer.duplicate().limit(maxBufferLength); - } - append(codec.format(buffer), maxLength, builder); - if (bufferTooLarge) { - builder.append(TRUNCATED); - } - } else { - append(codec.format(value), maxLength, builder); - } - } - - /** - * @return the number of characters that can still be appended after this, or -1 if this call hit - * the limit. - */ - protected int append(String value, int limit, StringBuilder builder) { - if (limit < 0) { - // Small simplification to avoid having to check the limit every time when we do a sequence of - // simple calls, like BEGIN... UNLOGGED... BATCH. If the first call hits the limit, the next - // ones will be ignored. - return limit; - } else if (value.length() <= limit) { - builder.append(value); - return limit - value.length(); - } else { - builder.append(value.substring(0, limit)).append(TRUNCATED); - return -1; - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/RequestLogger.java b/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/RequestLogger.java deleted file mode 100644 index f242ff89c54..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/RequestLogger.java +++ /dev/null @@ -1,251 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.tracker; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.session.SessionBuilder; -import com.datastax.oss.driver.api.core.tracker.RequestTracker; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.time.Duration; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A request tracker that logs the requests executed through the session, according to a set of - * configurable options. - * - *

To activate this tracker, modify the {@code advanced.request-tracker} section in the driver - * configuration, for example: - * - *

- * datastax-java-driver {
- *   advanced.request-tracker {
- *     classes = [RequestLogger]
- *     logs {
- *       success { enabled = true }
- *       slow { enabled = true, threshold = 1 second }
- *       error { enabled = true }
- *       max-query-length = 500
- *       show-values = true
- *       max-value-length = 50
- *       max-values = 50
- *       show-stack-traces = true
- *     }
- *   }
- * }
- * 
- * - * See {@code reference.conf} (in the manual or core driver JAR) for more details. - * - *

Note that if a tracker is specified programmatically with {@link - * SessionBuilder#addRequestTracker(RequestTracker)}, the configuration is ignored. - */ -@ThreadSafe -public class RequestLogger implements RequestTracker { - - private static final Logger LOG = LoggerFactory.getLogger(RequestLogger.class); - - public static final int DEFAULT_REQUEST_LOGGER_MAX_QUERY_LENGTH = 500; - public static final boolean DEFAULT_REQUEST_LOGGER_SHOW_VALUES = true; - public static final int DEFAULT_REQUEST_LOGGER_MAX_VALUES = 50; - public static final int DEFAULT_REQUEST_LOGGER_MAX_VALUE_LENGTH = 50; - - private final RequestLogFormatter formatter; - - public RequestLogger(DriverContext context) { - this(new RequestLogFormatter(context)); - } - - protected RequestLogger(RequestLogFormatter formatter) { - this.formatter = formatter; - } - - @Override - public void onSuccess( - @NonNull Request request, - long latencyNanos, - @NonNull DriverExecutionProfile executionProfile, - @NonNull Node node, - @NonNull String sessionRequestLogPrefix) { - - boolean successEnabled = - executionProfile.getBoolean(DefaultDriverOption.REQUEST_LOGGER_SUCCESS_ENABLED, false); - boolean slowEnabled = - executionProfile.getBoolean(DefaultDriverOption.REQUEST_LOGGER_SLOW_ENABLED, false); - if (!successEnabled && !slowEnabled) { - return; - } - - long slowThresholdNanos = - executionProfile - .getDuration(DefaultDriverOption.REQUEST_LOGGER_SLOW_THRESHOLD, Duration.ofSeconds(1)) - .toNanos(); - boolean isSlow = latencyNanos > slowThresholdNanos; - if ((isSlow && !slowEnabled) || (!isSlow && !successEnabled)) { - return; - } - - int maxQueryLength = - executionProfile.getInt( - DefaultDriverOption.REQUEST_LOGGER_MAX_QUERY_LENGTH, - DEFAULT_REQUEST_LOGGER_MAX_QUERY_LENGTH); - boolean showValues = - executionProfile.getBoolean( - DefaultDriverOption.REQUEST_LOGGER_VALUES, DEFAULT_REQUEST_LOGGER_SHOW_VALUES); - int maxValues = - executionProfile.getInt( - DefaultDriverOption.REQUEST_LOGGER_MAX_VALUES, DEFAULT_REQUEST_LOGGER_MAX_VALUES); - int maxValueLength = - executionProfile.getInt( - DefaultDriverOption.REQUEST_LOGGER_MAX_VALUE_LENGTH, - DEFAULT_REQUEST_LOGGER_MAX_VALUE_LENGTH); - - logSuccess( - request, - latencyNanos, - isSlow, - node, - maxQueryLength, - showValues, - maxValues, - maxValueLength, - sessionRequestLogPrefix); - } - - @Override - public void onError( - @NonNull Request request, - @NonNull Throwable error, - long latencyNanos, - @NonNull DriverExecutionProfile executionProfile, - Node node, - @NonNull String sessionRequestLogPrefix) { - - if (!executionProfile.getBoolean(DefaultDriverOption.REQUEST_LOGGER_ERROR_ENABLED, false)) { - return; - } - - int maxQueryLength = - executionProfile.getInt( - DefaultDriverOption.REQUEST_LOGGER_MAX_QUERY_LENGTH, - DEFAULT_REQUEST_LOGGER_MAX_QUERY_LENGTH); - boolean showValues = - executionProfile.getBoolean( - DefaultDriverOption.REQUEST_LOGGER_VALUES, DEFAULT_REQUEST_LOGGER_SHOW_VALUES); - int maxValues = - executionProfile.getInt( - DefaultDriverOption.REQUEST_LOGGER_MAX_VALUES, DEFAULT_REQUEST_LOGGER_MAX_VALUES); - - int maxValueLength = - executionProfile.getInt( - DefaultDriverOption.REQUEST_LOGGER_MAX_VALUE_LENGTH, - DEFAULT_REQUEST_LOGGER_MAX_VALUE_LENGTH); - boolean showStackTraces = - executionProfile.getBoolean(DefaultDriverOption.REQUEST_LOGGER_STACK_TRACES, false); - - logError( - request, - error, - latencyNanos, - node, - maxQueryLength, - showValues, - maxValues, - maxValueLength, - showStackTraces, - sessionRequestLogPrefix); - } - - @Override - public void onNodeError( - @NonNull Request request, - @NonNull Throwable error, - long latencyNanos, - @NonNull DriverExecutionProfile executionProfile, - @NonNull Node node, - @NonNull String nodeRequestLogPrefix) { - // Nothing to do - } - - @Override - public void onNodeSuccess( - @NonNull Request request, - long latencyNanos, - @NonNull DriverExecutionProfile executionProfile, - @NonNull Node node, - @NonNull String nodeRequestLogPrefix) { - // Nothing to do - } - - @Override - public void close() throws Exception { - // nothing to do - } - - protected void logSuccess( - Request request, - long latencyNanos, - boolean isSlow, - Node node, - int maxQueryLength, - boolean showValues, - int maxValues, - int maxValueLength, - String logPrefix) { - - StringBuilder builder = formatter.logBuilder(logPrefix, node); - if (isSlow) { - formatter.appendSlowDescription(builder); - } else { - formatter.appendSuccessDescription(builder); - } - formatter.appendLatency(latencyNanos, builder); - formatter.appendRequest( - request, maxQueryLength, showValues, maxValues, maxValueLength, builder); - LOG.info(builder.toString()); - } - - protected void logError( - Request request, - Throwable error, - long latencyNanos, - Node node, - int maxQueryLength, - boolean showValues, - int maxValues, - int maxValueLength, - boolean showStackTraces, - String logPrefix) { - - StringBuilder builder = formatter.logBuilder(logPrefix, node); - formatter.appendErrorDescription(builder); - formatter.appendLatency(latencyNanos, builder); - formatter.appendRequest( - request, maxQueryLength, showValues, maxValues, maxValueLength, builder); - if (showStackTraces) { - LOG.error(builder.toString(), error); - } else { - LOG.error("{} [{}]", builder.toString(), error.toString()); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/UuidRequestIdGenerator.java b/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/UuidRequestIdGenerator.java deleted file mode 100644 index cc07d6717f4..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/UuidRequestIdGenerator.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.tracker; - -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.tracker.RequestIdGenerator; -import com.datastax.oss.driver.api.core.uuid.Uuids; -import edu.umd.cs.findbugs.annotations.NonNull; - -public class UuidRequestIdGenerator implements RequestIdGenerator { - public UuidRequestIdGenerator(DriverContext context) {} - - /** Generates a random v4 UUID. */ - @Override - public String getSessionRequestId() { - return Uuids.random().toString(); - } - - /** - * {session-request-id}-{random-uuid} All node requests for a session request will have the same - * session request id - */ - @Override - public String getNodeRequestId(@NonNull Request statement, @NonNull String parentId) { - return parentId + "-" + Uuids.random(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/W3CContextRequestIdGenerator.java b/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/W3CContextRequestIdGenerator.java deleted file mode 100644 index fe15b93bc8e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/W3CContextRequestIdGenerator.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.tracker; - -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.tracker.RequestIdGenerator; -import com.datastax.oss.driver.shaded.guava.common.io.BaseEncoding; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.security.SecureRandom; -import java.util.Random; - -public class W3CContextRequestIdGenerator implements RequestIdGenerator { - - private final Random random = new SecureRandom(); - private final BaseEncoding baseEncoding = BaseEncoding.base16().lowerCase(); - private final String payloadKey; - - public W3CContextRequestIdGenerator(DriverContext context) { - payloadKey = RequestIdGenerator.super.getCustomPayloadKey(); - } - - public W3CContextRequestIdGenerator(String payloadKey) { - this.payloadKey = payloadKey; - } - - /** Random 16 bytes, e.g. "4bf92f3577b34da6a3ce929d0e0e4736" */ - @Override - public String getSessionRequestId() { - byte[] bytes = new byte[16]; - random.nextBytes(bytes); - return baseEncoding.encode(bytes); - } - - /** - * Following the format of W3C "traceparent" spec, - * https://www.w3.org/TR/trace-context/#traceparent-header-field-values e.g. - * "00-4bf92f3577b34da6a3ce929d0e0e4736-a3ce929d0e0e4736-01" All node requests in the same session - * request share the same "trace-id" field value - */ - @Override - public String getNodeRequestId(@NonNull Request statement, @NonNull String parentId) { - byte[] bytes = new byte[8]; - random.nextBytes(bytes); - return String.format("00-%s-%s-00", parentId, baseEncoding.encode(bytes)); - } - - @Override - public String getCustomPayloadKey() { - return this.payloadKey; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/DataTypeHelper.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/DataTypeHelper.java deleted file mode 100644 index 1e02a6b8e82..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/DataTypeHelper.java +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.response.result.RawType; -import com.datastax.oss.protocol.internal.util.IntMap; -import java.util.List; -import java.util.Map; - -public class DataTypeHelper { - - public static DataType fromProtocolSpec(RawType rawType, AttachmentPoint attachmentPoint) { - DataType type = PRIMITIVE_TYPES_BY_CODE.get(rawType.id); - if (type != null) { - return type; - } else { - switch (rawType.id) { - case ProtocolConstants.DataType.CUSTOM: - RawType.RawCustom rawCustom = (RawType.RawCustom) rawType; - return DataTypes.custom(rawCustom.className); - case ProtocolConstants.DataType.LIST: - RawType.RawList rawList = (RawType.RawList) rawType; - return DataTypes.listOf(fromProtocolSpec(rawList.elementType, attachmentPoint)); - case ProtocolConstants.DataType.SET: - RawType.RawSet rawSet = (RawType.RawSet) rawType; - return DataTypes.setOf(fromProtocolSpec(rawSet.elementType, attachmentPoint)); - case ProtocolConstants.DataType.MAP: - RawType.RawMap rawMap = (RawType.RawMap) rawType; - return DataTypes.mapOf( - fromProtocolSpec(rawMap.keyType, attachmentPoint), - fromProtocolSpec(rawMap.valueType, attachmentPoint)); - case ProtocolConstants.DataType.TUPLE: - RawType.RawTuple rawTuple = (RawType.RawTuple) rawType; - List rawFieldsList = rawTuple.fieldTypes; - ImmutableList.Builder fields = ImmutableList.builder(); - for (RawType rawField : rawFieldsList) { - fields.add(fromProtocolSpec(rawField, attachmentPoint)); - } - return new DefaultTupleType(fields.build(), attachmentPoint); - case ProtocolConstants.DataType.UDT: - RawType.RawUdt rawUdt = (RawType.RawUdt) rawType; - ImmutableList.Builder fieldNames = ImmutableList.builder(); - ImmutableList.Builder fieldTypes = ImmutableList.builder(); - for (Map.Entry entry : rawUdt.fields.entrySet()) { - fieldNames.add(CqlIdentifier.fromInternal(entry.getKey())); - fieldTypes.add(fromProtocolSpec(entry.getValue(), attachmentPoint)); - } - return new DefaultUserDefinedType( - CqlIdentifier.fromInternal(rawUdt.keyspace), - CqlIdentifier.fromInternal(rawUdt.typeName), - false, - fieldNames.build(), - fieldTypes.build(), - attachmentPoint); - default: - throw new IllegalArgumentException("Unsupported type: " + rawType.id); - } - } - } - - private static IntMap PRIMITIVE_TYPES_BY_CODE = - sortByProtocolCode( - DataTypes.ASCII, - DataTypes.BIGINT, - DataTypes.BLOB, - DataTypes.BOOLEAN, - DataTypes.COUNTER, - DataTypes.DECIMAL, - DataTypes.DOUBLE, - DataTypes.FLOAT, - DataTypes.INT, - DataTypes.TIMESTAMP, - DataTypes.UUID, - DataTypes.VARINT, - DataTypes.TIMEUUID, - DataTypes.INET, - DataTypes.DATE, - DataTypes.TEXT, - DataTypes.TIME, - DataTypes.SMALLINT, - DataTypes.TINYINT, - DataTypes.DURATION); - - private static IntMap sortByProtocolCode(DataType... types) { - IntMap.Builder builder = IntMap.builder(); - for (DataType type : types) { - builder.put(type.getProtocolCode(), type); - } - return builder.build(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultCustomType.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultCustomType.java deleted file mode 100644 index 7b9e03818ac..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultCustomType.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type; - -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.type.CustomType; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.IOException; -import java.io.ObjectInputStream; -import java.io.Serializable; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultCustomType implements CustomType, Serializable { - - private static final long serialVersionUID = 1; - - /** @serial */ - private final String className; - - public DefaultCustomType(@NonNull String className) { - Preconditions.checkNotNull(className); - this.className = className; - } - - @NonNull - @Override - public String getClassName() { - return className; - } - - @Override - public boolean isDetached() { - return false; - } - - @Override - public void attach(@NonNull AttachmentPoint attachmentPoint) { - // nothing to do - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof CustomType) { - CustomType that = (CustomType) other; - return this.className.equals(that.getClassName()); - } else { - return false; - } - } - - @Override - public int hashCode() { - return className.hashCode(); - } - - @Override - public String toString() { - return "Custom(" + className + ")"; - } - - private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { - in.defaultReadObject(); - Preconditions.checkNotNull(className); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultListType.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultListType.java deleted file mode 100644 index 6c21b44639e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultListType.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type; - -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.ListType; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.IOException; -import java.io.ObjectInputStream; -import java.io.Serializable; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultListType implements ListType, Serializable { - - private static final long serialVersionUID = 1; - - /** @serial */ - private final DataType elementType; - /** @serial */ - private final boolean frozen; - - public DefaultListType(@NonNull DataType elementType, boolean frozen) { - Preconditions.checkNotNull(elementType); - this.elementType = elementType; - this.frozen = frozen; - } - - @NonNull - @Override - public DataType getElementType() { - return elementType; - } - - @Override - public boolean isFrozen() { - return frozen; - } - - @Override - public boolean isDetached() { - return elementType.isDetached(); - } - - @Override - public void attach(@NonNull AttachmentPoint attachmentPoint) { - elementType.attach(attachmentPoint); - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof ListType) { - ListType that = (ListType) other; - // frozen is not taken into account - return this.elementType.equals(that.getElementType()); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(DefaultListType.class, this.elementType); - } - - @Override - public String toString() { - return "List(" + elementType + ", " + (frozen ? "" : "not ") + "frozen)"; - } - - private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { - in.defaultReadObject(); - Preconditions.checkNotNull(elementType); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultMapType.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultMapType.java deleted file mode 100644 index 8da9f196f26..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultMapType.java +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type; - -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.MapType; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.IOException; -import java.io.ObjectInputStream; -import java.io.Serializable; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultMapType implements MapType, Serializable { - - private static final long serialVersionUID = 1; - - /** @serial */ - private final DataType keyType; - /** @serial */ - private final DataType valueType; - /** @serial */ - private final boolean frozen; - - public DefaultMapType(@NonNull DataType keyType, @NonNull DataType valueType, boolean frozen) { - Preconditions.checkNotNull(keyType); - Preconditions.checkNotNull(valueType); - this.keyType = keyType; - this.valueType = valueType; - this.frozen = frozen; - } - - @NonNull - @Override - public DataType getKeyType() { - return keyType; - } - - @NonNull - @Override - public DataType getValueType() { - return valueType; - } - - @Override - public boolean isFrozen() { - return frozen; - } - - @Override - public boolean isDetached() { - return keyType.isDetached() || valueType.isDetached(); - } - - @Override - public void attach(@NonNull AttachmentPoint attachmentPoint) { - keyType.attach(attachmentPoint); - valueType.attach(attachmentPoint); - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof MapType) { - MapType that = (MapType) other; - // frozen is not taken into account - return this.keyType.equals(that.getKeyType()) && this.valueType.equals(that.getValueType()); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(DefaultMapType.class, keyType, valueType); - } - - @Override - public String toString() { - return "Map(" + keyType + " => " + valueType + ", " + (frozen ? "" : "not ") + "frozen)"; - } - - private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { - in.defaultReadObject(); - Preconditions.checkNotNull(keyType); - Preconditions.checkNotNull(valueType); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultSetType.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultSetType.java deleted file mode 100644 index 27641731c72..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultSetType.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type; - -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.SetType; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.IOException; -import java.io.ObjectInputStream; -import java.io.Serializable; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultSetType implements SetType, Serializable { - - private static final long serialVersionUID = 1; - - /** @serial */ - private final DataType elementType; - /** @serial */ - private final boolean frozen; - - public DefaultSetType(@NonNull DataType elementType, boolean frozen) { - Preconditions.checkNotNull(elementType); - this.elementType = elementType; - this.frozen = frozen; - } - - @NonNull - @Override - public DataType getElementType() { - return elementType; - } - - @Override - public boolean isFrozen() { - return frozen; - } - - @Override - public boolean isDetached() { - return elementType.isDetached(); - } - - @Override - public void attach(@NonNull AttachmentPoint attachmentPoint) { - elementType.attach(attachmentPoint); - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof SetType) { - SetType that = (SetType) other; - // frozen is not taken into account - return this.elementType.equals(that.getElementType()); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(DefaultSetType.class, this.elementType); - } - - @Override - public String toString() { - return "Set(" + elementType + ", " + (frozen ? "" : "not ") + "frozen)"; - } - - private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { - in.defaultReadObject(); - Preconditions.checkNotNull(elementType); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultTupleType.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultTupleType.java deleted file mode 100644 index 29b1b20436f..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultTupleType.java +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type; - -import com.datastax.oss.driver.api.core.data.TupleValue; -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.TupleType; -import com.datastax.oss.driver.internal.core.data.DefaultTupleValue; -import com.datastax.oss.driver.shaded.guava.common.base.Joiner; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.IOException; -import java.io.ObjectInputStream; -import java.io.Serializable; -import java.util.List; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultTupleType implements TupleType, Serializable { - - private static final long serialVersionUID = 1; - - /** @serial */ - private final ImmutableList componentTypes; - - private transient volatile AttachmentPoint attachmentPoint; - - public DefaultTupleType( - @NonNull List componentTypes, @NonNull AttachmentPoint attachmentPoint) { - Preconditions.checkNotNull(componentTypes); - this.componentTypes = ImmutableList.copyOf(componentTypes); - this.attachmentPoint = attachmentPoint; - } - - public DefaultTupleType(@NonNull List componentTypes) { - this(componentTypes, AttachmentPoint.NONE); - } - - @NonNull - @Override - public List getComponentTypes() { - return componentTypes; - } - - @NonNull - @Override - public TupleValue newValue() { - return new DefaultTupleValue(this); - } - - @NonNull - @Override - public TupleValue newValue(@NonNull Object... values) { - return new DefaultTupleValue(this, values); - } - - @Override - public boolean isDetached() { - return attachmentPoint == AttachmentPoint.NONE; - } - - @Override - public void attach(@NonNull AttachmentPoint attachmentPoint) { - this.attachmentPoint = attachmentPoint; - for (DataType componentType : componentTypes) { - componentType.attach(attachmentPoint); - } - } - - @NonNull - @Override - public AttachmentPoint getAttachmentPoint() { - return attachmentPoint; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof TupleType) { - TupleType that = (TupleType) other; - return this.componentTypes.equals(that.getComponentTypes()); - } else { - return false; - } - } - - @Override - public int hashCode() { - return componentTypes.hashCode(); - } - - @Override - public String toString() { - return "Tuple(" + WITH_COMMA.join(componentTypes) + ")"; - } - - private static final Joiner WITH_COMMA = Joiner.on(", "); - - private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { - in.defaultReadObject(); - Preconditions.checkNotNull(componentTypes); - this.attachmentPoint = AttachmentPoint.NONE; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultUserDefinedType.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultUserDefinedType.java deleted file mode 100644 index 6b1431dc699..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultUserDefinedType.java +++ /dev/null @@ -1,220 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.core.data.DefaultUdtValue; -import com.datastax.oss.driver.internal.core.data.IdentifierIndex; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.IOException; -import java.io.ObjectInputStream; -import java.io.Serializable; -import java.util.List; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultUserDefinedType implements UserDefinedType, Serializable { - - private static final long serialVersionUID = 1; - - /** @serial */ - private final CqlIdentifier keyspace; - /** @serial */ - private final CqlIdentifier name; - - // Data types are only [de]serialized as part of a row, frozenness doesn't matter in that context - private final transient boolean frozen; - - /** @serial */ - private final List fieldNames; - /** @serial */ - private final List fieldTypes; - - private transient IdentifierIndex index; - private transient volatile AttachmentPoint attachmentPoint; - - public DefaultUserDefinedType( - @NonNull CqlIdentifier keyspace, - @NonNull CqlIdentifier name, - boolean frozen, - List fieldNames, - @NonNull List fieldTypes, - @NonNull AttachmentPoint attachmentPoint) { - Preconditions.checkNotNull(keyspace); - Preconditions.checkNotNull(name); - Preconditions.checkNotNull(fieldNames); - Preconditions.checkNotNull(fieldTypes); - Preconditions.checkArgument(fieldNames.size() > 0, "Field names list can't be null or empty"); - Preconditions.checkArgument( - fieldTypes.size() == fieldNames.size(), - "There should be the same number of field names and types"); - this.keyspace = keyspace; - this.name = name; - this.frozen = frozen; - this.fieldNames = ImmutableList.copyOf(fieldNames); - this.fieldTypes = ImmutableList.copyOf(fieldTypes); - this.index = new IdentifierIndex(this.fieldNames); - this.attachmentPoint = attachmentPoint; - } - - public DefaultUserDefinedType( - @NonNull CqlIdentifier keyspace, - @NonNull CqlIdentifier name, - boolean frozen, - @NonNull List fieldNames, - @NonNull List fieldTypes) { - this(keyspace, name, frozen, fieldNames, fieldTypes, AttachmentPoint.NONE); - } - - @NonNull - @Override - public CqlIdentifier getKeyspace() { - return keyspace; - } - - @NonNull - @Override - public CqlIdentifier getName() { - return name; - } - - @Override - public boolean isFrozen() { - return frozen; - } - - @NonNull - @Override - public List getFieldNames() { - return fieldNames; - } - - @NonNull - @Override - public List allIndicesOf(@NonNull CqlIdentifier id) { - return index.allIndicesOf(id); - } - - @Override - public int firstIndexOf(@NonNull CqlIdentifier id) { - return index.firstIndexOf(id); - } - - @NonNull - @Override - public List allIndicesOf(@NonNull String name) { - return index.allIndicesOf(name); - } - - @Override - public int firstIndexOf(@NonNull String name) { - return index.firstIndexOf(name); - } - - @NonNull - @Override - public List getFieldTypes() { - return fieldTypes; - } - - @NonNull - @Override - public UserDefinedType copy(boolean newFrozen) { - return (newFrozen == frozen) - ? this - : new DefaultUserDefinedType( - keyspace, name, newFrozen, fieldNames, fieldTypes, attachmentPoint); - } - - @NonNull - @Override - public UdtValue newValue() { - return new DefaultUdtValue(this); - } - - @NonNull - @Override - public UdtValue newValue(@NonNull Object... fields) { - return new DefaultUdtValue(this, fields); - } - - @Override - public boolean isDetached() { - return attachmentPoint == AttachmentPoint.NONE; - } - - @Override - public void attach(@NonNull AttachmentPoint attachmentPoint) { - this.attachmentPoint = attachmentPoint; - for (DataType fieldType : fieldTypes) { - fieldType.attach(attachmentPoint); - } - } - - @NonNull - @Override - public AttachmentPoint getAttachmentPoint() { - return attachmentPoint; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof UserDefinedType) { - UserDefinedType that = (UserDefinedType) other; - // frozen is ignored in comparisons - return this.keyspace.equals(that.getKeyspace()) - && this.name.equals(that.getName()) - && this.fieldNames.equals(that.getFieldNames()) - && this.fieldTypes.equals(that.getFieldTypes()); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(keyspace, name, fieldNames, fieldTypes); - } - - @Override - public String toString() { - return "UDT(" + keyspace.asCql(true) + "." + name.asCql(true) + ")"; - } - - private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { - in.defaultReadObject(); - Preconditions.checkNotNull(keyspace); - Preconditions.checkNotNull(name); - Preconditions.checkArgument( - fieldNames != null && fieldNames.size() > 0, "Field names list can't be null or empty"); - Preconditions.checkArgument( - fieldTypes != null && fieldTypes.size() == fieldNames.size(), - "There should be the same number of field names and types"); - this.attachmentPoint = AttachmentPoint.NONE; - this.index = new IdentifierIndex(this.fieldNames); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultVectorType.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultVectorType.java deleted file mode 100644 index 0b1ced94769..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultVectorType.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type; - -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.VectorType; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultVectorType implements VectorType { - - public static final String VECTOR_CLASS_NAME = "org.apache.cassandra.db.marshal.VectorType"; - - private final DataType subtype; - private final int dimensions; - - public DefaultVectorType(DataType subtype, int dimensions) { - - this.dimensions = dimensions; - this.subtype = subtype; - } - - /* ============== ContainerType interface ============== */ - @Override - public DataType getElementType() { - return this.subtype; - } - - /* ============== VectorType interface ============== */ - @Override - public int getDimensions() { - return this.dimensions; - } - - /* ============== CustomType interface ============== */ - @NonNull - @Override - public String getClassName() { - return VECTOR_CLASS_NAME; - } - - @NonNull - @Override - public String asCql(boolean includeFrozen, boolean pretty) { - return String.format("vector<%s, %d>", getElementType().asCql(true, false), getDimensions()); - } - - /* ============== General class implementation ============== */ - @Override - public boolean equals(Object o) { - if (o == this) { - return true; - } else if (o instanceof DefaultVectorType) { - DefaultVectorType that = (DefaultVectorType) o; - return that.subtype.equals(this.subtype) && that.dimensions == this.dimensions; - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(DefaultVectorType.class, subtype, dimensions); - } - - @Override - public String toString() { - return String.format("Vector(%s, %d)", getElementType(), getDimensions()); - } - - @Override - public boolean isDetached() { - return false; - } - - @Override - public void attach(@NonNull AttachmentPoint attachmentPoint) { - // nothing to do - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/PrimitiveType.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/PrimitiveType.java deleted file mode 100644 index c6f815a7487..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/PrimitiveType.java +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type; - -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.Serializable; -import java.util.Locale; -import net.jcip.annotations.Immutable; - -@Immutable -public class PrimitiveType implements DataType, Serializable { - - /** @serial */ - private final int protocolCode; - - public PrimitiveType(int protocolCode) { - this.protocolCode = protocolCode; - } - - @Override - public int getProtocolCode() { - return protocolCode; - } - - @Override - public boolean isDetached() { - return false; - } - - @Override - public void attach(@NonNull AttachmentPoint attachmentPoint) { - // nothing to do - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof PrimitiveType) { - PrimitiveType that = (PrimitiveType) other; - return this.protocolCode == that.protocolCode; - } else { - return false; - } - } - - @Override - public int hashCode() { - return protocolCode; - } - - @NonNull - @Override - public String asCql(boolean includeFrozen, boolean pretty) { - return codeName(protocolCode).toLowerCase(Locale.ROOT); - } - - @Override - public String toString() { - return codeName(protocolCode); - } - - private static String codeName(int protocolCode) { - // Reminder: we don't use enums to leave the door open for custom extensions - switch (protocolCode) { - case ProtocolConstants.DataType.ASCII: - return "ASCII"; - case ProtocolConstants.DataType.BIGINT: - return "BIGINT"; - case ProtocolConstants.DataType.BLOB: - return "BLOB"; - case ProtocolConstants.DataType.BOOLEAN: - return "BOOLEAN"; - case ProtocolConstants.DataType.COUNTER: - return "COUNTER"; - case ProtocolConstants.DataType.DECIMAL: - return "DECIMAL"; - case ProtocolConstants.DataType.DOUBLE: - return "DOUBLE"; - case ProtocolConstants.DataType.FLOAT: - return "FLOAT"; - case ProtocolConstants.DataType.INT: - return "INT"; - case ProtocolConstants.DataType.TIMESTAMP: - return "TIMESTAMP"; - case ProtocolConstants.DataType.UUID: - return "UUID"; - case ProtocolConstants.DataType.VARINT: - return "VARINT"; - case ProtocolConstants.DataType.TIMEUUID: - return "TIMEUUID"; - case ProtocolConstants.DataType.INET: - return "INET"; - case ProtocolConstants.DataType.DATE: - return "DATE"; - case ProtocolConstants.DataType.VARCHAR: - return "TEXT"; - case ProtocolConstants.DataType.TIME: - return "TIME"; - case ProtocolConstants.DataType.SMALLINT: - return "SMALLINT"; - case ProtocolConstants.DataType.TINYINT: - return "TINYINT"; - case ProtocolConstants.DataType.DURATION: - return "DURATION"; - default: - return "0x" + Integer.toHexString(protocolCode); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/UserDefinedTypeBuilder.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/UserDefinedTypeBuilder.java deleted file mode 100644 index 43e05f17690..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/UserDefinedTypeBuilder.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import net.jcip.annotations.NotThreadSafe; - -/** - * Helper class to build {@link UserDefinedType} instances. - * - *

This is not part of the public API, because building user defined types manually can be - * tricky: the fields must be defined in the exact same order as the database definition, otherwise - * you will insert corrupt data in your database. If you decide to use this class anyway, make sure - * that you define fields in the correct order, and that the database schema never changes. - */ -@NotThreadSafe -public class UserDefinedTypeBuilder { - - private final CqlIdentifier keyspaceName; - private final CqlIdentifier typeName; - private boolean frozen; - private final ImmutableList.Builder fieldNames; - private final ImmutableList.Builder fieldTypes; - private AttachmentPoint attachmentPoint = AttachmentPoint.NONE; - - public UserDefinedTypeBuilder(CqlIdentifier keyspaceName, CqlIdentifier typeName) { - this.keyspaceName = keyspaceName; - this.typeName = typeName; - this.fieldNames = ImmutableList.builder(); - this.fieldTypes = ImmutableList.builder(); - } - - public UserDefinedTypeBuilder(String keyspaceName, String typeName) { - this(CqlIdentifier.fromCql(keyspaceName), CqlIdentifier.fromCql(typeName)); - } - - /** - * Adds a new field. The fields in the resulting type will be in the order of the calls to this - * method. - */ - public UserDefinedTypeBuilder withField(CqlIdentifier name, DataType type) { - fieldNames.add(name); - fieldTypes.add(type); - return this; - } - - public UserDefinedTypeBuilder withField(String name, DataType type) { - return withField(CqlIdentifier.fromCql(name), type); - } - - /** Makes the type frozen (by default, it is not). */ - public UserDefinedTypeBuilder frozen() { - this.frozen = true; - return this; - } - - public UserDefinedTypeBuilder withAttachmentPoint(AttachmentPoint attachmentPoint) { - this.attachmentPoint = attachmentPoint; - return this; - } - - public UserDefinedType build() { - return new DefaultUserDefinedType( - keyspaceName, typeName, frozen, fieldNames.build(), fieldTypes.build(), attachmentPoint); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/BigIntCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/BigIntCodec.java deleted file mode 100644 index 8496da17fa6..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/BigIntCodec.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.PrimitiveLongCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.util.Optional; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class BigIntCodec implements PrimitiveLongCodec { - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.LONG; - } - - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.BIGINT; - } - - @Override - public boolean accepts(@NonNull Object value) { - return value instanceof Long; - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return javaClass == Long.class || javaClass == long.class; - } - - @Nullable - @Override - public ByteBuffer encodePrimitive(long value, @NonNull ProtocolVersion protocolVersion) { - ByteBuffer bytes = ByteBuffer.allocate(8); - bytes.putLong(0, value); - return bytes; - } - - @Override - public long decodePrimitive( - @Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) { - return 0; - } else if (bytes.remaining() != 8) { - throw new IllegalArgumentException( - "Invalid 64-bits long value, expecting 8 bytes but got " + bytes.remaining()); - } else { - return bytes.getLong(bytes.position()); - } - } - - @NonNull - @Override - public String format(@Nullable Long value) { - return (value == null) ? "NULL" : Long.toString(value); - } - - @Nullable - @Override - public Long parse(@Nullable String value) { - try { - return (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) - ? null - : Long.parseLong(value); - } catch (NumberFormatException e) { - throw new IllegalArgumentException( - String.format("Cannot parse 64-bits long value from \"%s\"", value)); - } - } - - @NonNull - @Override - public Optional serializedSize() { - return Optional.of(8); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/BlobCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/BlobCodec.java deleted file mode 100644 index 1f5fcd5eeaa..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/BlobCodec.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.protocol.internal.util.Bytes; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import net.jcip.annotations.ThreadSafe; - -/** - * A codec that maps the CQL type {@code blob} to the Java type {@link ByteBuffer}. - * - *

If you are looking for a codec mapping the CQL type {@code blob} to the Java type {@code - * byte[]}, you should use {@link SimpleBlobCodec} instead. - */ -@ThreadSafe -public class BlobCodec implements TypeCodec { - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.BYTE_BUFFER; - } - - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.BLOB; - } - - @Override - public boolean accepts(@NonNull Object value) { - return value instanceof ByteBuffer; - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return ByteBuffer.class.equals(javaClass); - } - - @Nullable - @Override - public ByteBuffer encode(@Nullable ByteBuffer value, @NonNull ProtocolVersion protocolVersion) { - return (value == null) ? null : value.duplicate(); - } - - @Nullable - @Override - public ByteBuffer decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - return (bytes == null) ? null : bytes.duplicate(); - } - - @NonNull - @Override - public String format(@Nullable ByteBuffer value) { - return (value == null) ? "NULL" : Bytes.toHexString(value); - } - - @Nullable - @Override - public ByteBuffer parse(@Nullable String value) { - return (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) - ? null - : Bytes.fromHexString(value); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/BooleanCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/BooleanCodec.java deleted file mode 100644 index af388982be9..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/BooleanCodec.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.PrimitiveBooleanCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.util.Optional; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class BooleanCodec implements PrimitiveBooleanCodec { - - private static final ByteBuffer TRUE = ByteBuffer.wrap(new byte[] {1}); - private static final ByteBuffer FALSE = ByteBuffer.wrap(new byte[] {0}); - - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.BOOLEAN; - } - - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.BOOLEAN; - } - - @Override - public boolean accepts(@NonNull Object value) { - return value instanceof Boolean; - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return javaClass == Boolean.class || javaClass == boolean.class; - } - - @Nullable - @Override - public ByteBuffer encodePrimitive(boolean value, @NonNull ProtocolVersion protocolVersion) { - return value ? TRUE.duplicate() : FALSE.duplicate(); - } - - @Override - public boolean decodePrimitive( - @Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) { - return false; - } else if (bytes.remaining() != 1) { - throw new IllegalArgumentException( - "Invalid boolean value, expecting 1 byte but got " + bytes.remaining()); - } else { - return bytes.get(bytes.position()) != 0; - } - } - - @NonNull - @Override - public String format(@Nullable Boolean value) { - if (value == null) { - return "NULL"; - } else { - return value ? "true" : "false"; - } - } - - @Nullable - @Override - public Boolean parse(@Nullable String value) { - if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) { - return null; - } else if (value.equalsIgnoreCase(Boolean.FALSE.toString())) { - return false; - } else if (value.equalsIgnoreCase(Boolean.TRUE.toString())) { - return true; - } else { - throw new IllegalArgumentException( - String.format("Cannot parse boolean value from \"%s\"", value)); - } - } - - @NonNull - @Override - public Optional serializedSize() { - return Optional.of(1); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/CounterCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/CounterCodec.java deleted file mode 100644 index ab90ba09c20..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/CounterCodec.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class CounterCodec extends BigIntCodec { - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.COUNTER; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/CqlDurationCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/CqlDurationCodec.java deleted file mode 100644 index 90f6f56cf06..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/CqlDurationCodec.java +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.data.CqlDuration; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.util.VIntCoding; -import com.datastax.oss.driver.shaded.guava.common.io.ByteArrayDataOutput; -import com.datastax.oss.driver.shaded.guava.common.io.ByteStreams; -import com.datastax.oss.protocol.internal.util.Bytes; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.io.DataInput; -import java.io.IOException; -import java.nio.ByteBuffer; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class CqlDurationCodec implements TypeCodec { - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.CQL_DURATION; - } - - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.DURATION; - } - - @Override - public boolean accepts(@NonNull Object value) { - return value instanceof CqlDuration; - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return javaClass == CqlDuration.class; - } - - @Nullable - @Override - public ByteBuffer encode(@Nullable CqlDuration value, @NonNull ProtocolVersion protocolVersion) { - if (value == null) { - return null; - } - long months = value.getMonths(); - long days = value.getDays(); - long nanoseconds = value.getNanoseconds(); - int size = - VIntCoding.computeVIntSize(months) - + VIntCoding.computeVIntSize(days) - + VIntCoding.computeVIntSize(nanoseconds); - ByteArrayDataOutput out = ByteStreams.newDataOutput(size); - try { - VIntCoding.writeVInt(months, out); - VIntCoding.writeVInt(days, out); - VIntCoding.writeVInt(nanoseconds, out); - } catch (IOException e) { - // cannot happen - throw new AssertionError(); - } - return ByteBuffer.wrap(out.toByteArray()); - } - - @Nullable - @Override - public CqlDuration decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) { - return null; - } else { - DataInput in = ByteStreams.newDataInput(Bytes.getArray(bytes)); - try { - int months = (int) VIntCoding.readVInt(in); - int days = (int) VIntCoding.readVInt(in); - long nanoseconds = VIntCoding.readVInt(in); - return CqlDuration.newInstance(months, days, nanoseconds); - } catch (IOException e) { - // cannot happen - throw new AssertionError(); - } - } - } - - @NonNull - @Override - public String format(@Nullable CqlDuration value) { - return (value == null) ? "NULL" : value.toString(); - } - - @Nullable - @Override - public CqlDuration parse(@Nullable String value) { - return (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) - ? null - : CqlDuration.from(value); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/CustomCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/CustomCodec.java deleted file mode 100644 index 61a854e88d8..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/CustomCodec.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.CustomType; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.protocol.internal.util.Bytes; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class CustomCodec implements TypeCodec { - - private final CustomType cqlType; - - public CustomCodec(CustomType cqlType) { - this.cqlType = cqlType; - } - - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.BYTE_BUFFER; - } - - @NonNull - @Override - public DataType getCqlType() { - return cqlType; - } - - @Override - public boolean accepts(@NonNull Object value) { - return value instanceof ByteBuffer; - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return ByteBuffer.class.equals(javaClass); - } - - @Nullable - @Override - public ByteBuffer encode(@Nullable ByteBuffer value, @NonNull ProtocolVersion protocolVersion) { - return (value == null) ? null : value.duplicate(); - } - - @Nullable - @Override - public ByteBuffer decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - return (bytes == null) ? null : bytes.duplicate(); - } - - @NonNull - @Override - public String format(@Nullable ByteBuffer value) { - return (value == null) ? "NULL" : Bytes.toHexString(value); - } - - @Nullable - @Override - public ByteBuffer parse(@Nullable String value) { - return (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) - ? null - : Bytes.fromHexString(value); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/DateCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/DateCodec.java deleted file mode 100644 index 2fc463ef7d2..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/DateCodec.java +++ /dev/null @@ -1,155 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static java.lang.Long.parseLong; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.util.Strings; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.time.LocalDate; -import java.time.format.DateTimeFormatter; -import java.time.temporal.ChronoUnit; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class DateCodec implements TypeCodec { - - private static final LocalDate EPOCH = LocalDate.of(1970, 1, 1); - - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.LOCAL_DATE; - } - - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.DATE; - } - - @Override - public boolean accepts(@NonNull Object value) { - return value instanceof LocalDate; - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return javaClass == LocalDate.class; - } - - @Nullable - @Override - public ByteBuffer encode(@Nullable LocalDate value, @NonNull ProtocolVersion protocolVersion) { - if (value == null) { - return null; - } - long days = ChronoUnit.DAYS.between(EPOCH, value); - int unsigned = signedToUnsigned((int) days); - return TypeCodecs.INT.encodePrimitive(unsigned, protocolVersion); - } - - @Nullable - @Override - public LocalDate decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) { - return null; - } - int unsigned = TypeCodecs.INT.decodePrimitive(bytes, protocolVersion); - int signed = unsignedToSigned(unsigned); - return EPOCH.plusDays(signed); - } - - @NonNull - @Override - public String format(@Nullable LocalDate value) { - return (value == null) ? "NULL" : Strings.quote(DateTimeFormatter.ISO_LOCAL_DATE.format(value)); - } - - @Nullable - @Override - public LocalDate parse(@Nullable String value) { - if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) { - return null; - } - - // single quotes are optional for long literals, mandatory for date patterns - // strip enclosing single quotes, if any - if (Strings.isQuoted(value)) { - value = Strings.unquote(value); - } - - if (Strings.isLongLiteral(value)) { - long raw; - try { - raw = parseLong(value); - } catch (NumberFormatException e) { - throw new IllegalArgumentException( - String.format("Cannot parse date value from \"%s\"", value)); - } - int days; - try { - days = cqlDateToDaysSinceEpoch(raw); - } catch (IllegalArgumentException e) { - throw new IllegalArgumentException( - String.format("Cannot parse date value from \"%s\"", value)); - } - return EPOCH.plusDays(days); - } - - try { - return LocalDate.parse(value, DateTimeFormatter.ISO_LOCAL_DATE); - } catch (RuntimeException e) { - throw new IllegalArgumentException( - String.format("Cannot parse date value from \"%s\"", value)); - } - } - - private static int signedToUnsigned(int signed) { - return signed - Integer.MIN_VALUE; - } - - private static int unsignedToSigned(int unsigned) { - return unsigned + Integer.MIN_VALUE; // this relies on overflow for "negative" values - } - - /** - * Converts a raw CQL long representing a numeric DATE literal to the number of days since the - * Epoch. In CQL, numeric DATE literals are longs (unsigned integers actually) between 0 and 2^32 - * - 1, with the epoch in the middle; this method re-centers the epoch at 0. - */ - private static int cqlDateToDaysSinceEpoch(long raw) { - if (raw < 0 || raw > MAX_CQL_LONG_VALUE) - throw new IllegalArgumentException( - String.format( - "Numeric literals for DATE must be between 0 and %d (got %d)", - MAX_CQL_LONG_VALUE, raw)); - return (int) (raw - EPOCH_AS_CQL_LONG); - } - - private static final long MAX_CQL_LONG_VALUE = ((1L << 32) - 1); - private static final long EPOCH_AS_CQL_LONG = (1L << 31); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/DecimalCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/DecimalCodec.java deleted file mode 100644 index 25650b733cd..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/DecimalCodec.java +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.math.BigDecimal; -import java.math.BigInteger; -import java.nio.ByteBuffer; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class DecimalCodec implements TypeCodec { - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.BIG_DECIMAL; - } - - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.DECIMAL; - } - - @Override - public boolean accepts(@NonNull Object value) { - return value instanceof BigDecimal; - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return BigDecimal.class.isAssignableFrom(javaClass); - } - - @Nullable - @Override - public ByteBuffer encode(@Nullable BigDecimal value, @NonNull ProtocolVersion protocolVersion) { - if (value == null) { - return null; - } - BigInteger bi = value.unscaledValue(); - int scale = value.scale(); - byte[] bibytes = bi.toByteArray(); - - ByteBuffer bytes = ByteBuffer.allocate(4 + bibytes.length); - bytes.putInt(scale); - bytes.put(bibytes); - bytes.rewind(); - return bytes; - } - - @Nullable - @Override - public BigDecimal decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) { - return null; - } else if (bytes.remaining() < 4) { - throw new IllegalArgumentException( - "Invalid decimal value, expecting at least 4 bytes but got " + bytes.remaining()); - } - - bytes = bytes.duplicate(); - int scale = bytes.getInt(); - byte[] bibytes = new byte[bytes.remaining()]; - bytes.get(bibytes); - - BigInteger bi = new BigInteger(bibytes); - return new BigDecimal(bi, scale); - } - - @NonNull - @Override - public String format(@Nullable BigDecimal value) { - return (value == null) ? "NULL" : value.toString(); - } - - @Nullable - @Override - public BigDecimal parse(@Nullable String value) { - try { - return (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) - ? null - : new BigDecimal(value); - } catch (NumberFormatException e) { - throw new IllegalArgumentException( - String.format("Cannot parse decimal value from \"%s\"", value)); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/DoubleCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/DoubleCodec.java deleted file mode 100644 index b01847517d9..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/DoubleCodec.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.PrimitiveDoubleCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.util.Optional; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class DoubleCodec implements PrimitiveDoubleCodec { - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.DOUBLE; - } - - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.DOUBLE; - } - - @Override - public boolean accepts(@NonNull Object value) { - return value instanceof Double; - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return javaClass == Double.class || javaClass == double.class; - } - - @Nullable - @Override - public ByteBuffer encodePrimitive(double value, @NonNull ProtocolVersion protocolVersion) { - ByteBuffer bytes = ByteBuffer.allocate(8); - bytes.putDouble(0, value); - return bytes; - } - - @Override - public double decodePrimitive( - @Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) { - return 0; - } else if (bytes.remaining() != 8) { - throw new IllegalArgumentException( - "Invalid 64-bits double value, expecting 8 bytes but got " + bytes.remaining()); - } else { - return bytes.getDouble(bytes.position()); - } - } - - @NonNull - @Override - public String format(@Nullable Double value) { - return (value == null) ? "NULL" : Double.toString(value); - } - - @Nullable - @Override - public Double parse(@Nullable String value) { - try { - return (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) - ? null - : Double.parseDouble(value); - } catch (NumberFormatException e) { - throw new IllegalArgumentException( - String.format("Cannot parse 64-bits double value from \"%s\"", value)); - } - } - - @NonNull - @Override - public Optional serializedSize() { - return Optional.of(8); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/FloatCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/FloatCodec.java deleted file mode 100644 index fd851edfad3..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/FloatCodec.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.PrimitiveFloatCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.util.Optional; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class FloatCodec implements PrimitiveFloatCodec { - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.FLOAT; - } - - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.FLOAT; - } - - @Override - public boolean accepts(@NonNull Object value) { - return value instanceof Float; - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return javaClass == Float.class || javaClass == float.class; - } - - @Nullable - @Override - public ByteBuffer encodePrimitive(float value, @NonNull ProtocolVersion protocolVersion) { - ByteBuffer bytes = ByteBuffer.allocate(4); - bytes.putFloat(0, value); - return bytes; - } - - @Override - public float decodePrimitive( - @Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) { - return 0; - } else if (bytes.remaining() != 4) { - throw new IllegalArgumentException( - "Invalid 32-bits float value, expecting 4 bytes but got " + bytes.remaining()); - } else { - return bytes.getFloat(bytes.position()); - } - } - - @NonNull - @Override - public String format(@Nullable Float value) { - return (value == null) ? "NULL" : Float.toString(value); - } - - @Nullable - @Override - public Float parse(@Nullable String value) { - try { - return (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) - ? null - : Float.parseFloat(value); - } catch (NumberFormatException e) { - throw new IllegalArgumentException( - String.format("Cannot parse 32-bits float value from \"%s\"", value)); - } - } - - @NonNull - @Override - public Optional serializedSize() { - return Optional.of(4); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/InetCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/InetCodec.java deleted file mode 100644 index 167c7109bf9..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/InetCodec.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.util.Strings; -import com.datastax.oss.protocol.internal.util.Bytes; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.nio.ByteBuffer; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class InetCodec implements TypeCodec { - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.INET_ADDRESS; - } - - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.INET; - } - - @Override - public boolean accepts(@NonNull Object value) { - return value instanceof InetAddress; - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return InetAddress.class.equals(javaClass); - } - - @Nullable - @Override - public ByteBuffer encode(@Nullable InetAddress value, @NonNull ProtocolVersion protocolVersion) { - return (value == null) ? null : ByteBuffer.wrap(value.getAddress()); - } - - @Nullable - @Override - public InetAddress decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) { - return null; - } - try { - return InetAddress.getByAddress(Bytes.getArray(bytes)); - } catch (UnknownHostException e) { - throw new IllegalArgumentException( - "Invalid bytes for inet value, got " + bytes.remaining() + " bytes"); - } - } - - @NonNull - @Override - public String format(@Nullable InetAddress value) { - return (value == null) ? "NULL" : ("'" + value.getHostAddress() + "'"); - } - - @Nullable - @Override - public InetAddress parse(@Nullable String value) { - if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) { - return null; - } - - value = value.trim(); - if (!Strings.isQuoted(value)) { - throw new IllegalArgumentException( - String.format("inet values must be enclosed in single quotes (\"%s\")", value)); - } - try { - return InetAddress.getByName(value.substring(1, value.length() - 1)); - } catch (Exception e) { - throw new IllegalArgumentException( - String.format("Cannot parse inet value from \"%s\"", value)); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/IntCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/IntCodec.java deleted file mode 100644 index b11b164a445..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/IntCodec.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.PrimitiveIntCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.util.Optional; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class IntCodec implements PrimitiveIntCodec { - - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.INTEGER; - } - - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.INT; - } - - @Override - public boolean accepts(@NonNull Object value) { - return value instanceof Integer; - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return javaClass == Integer.class || javaClass == int.class; - } - - @Nullable - @Override - public ByteBuffer encodePrimitive(int value, @NonNull ProtocolVersion protocolVersion) { - ByteBuffer bytes = ByteBuffer.allocate(4); - bytes.putInt(0, value); - return bytes; - } - - @Override - public int decodePrimitive(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) { - return 0; - } else if (bytes.remaining() != 4) { - throw new IllegalArgumentException( - "Invalid 32-bits integer value, expecting 4 bytes but got " + bytes.remaining()); - } else { - return bytes.getInt(bytes.position()); - } - } - - @NonNull - @Override - public String format(@Nullable Integer value) { - return (value == null) ? "NULL" : Integer.toString(value); - } - - @Nullable - @Override - public Integer parse(@Nullable String value) { - try { - return (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) - ? null - : Integer.parseInt(value); - } catch (NumberFormatException e) { - throw new IllegalArgumentException( - String.format("Cannot parse 32-bits int value from \"%s\"", value)); - } - } - - @NonNull - @Override - public Optional serializedSize() { - return Optional.of(4); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/ListCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/ListCodec.java deleted file mode 100644 index d587bbd5887..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/ListCodec.java +++ /dev/null @@ -1,205 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.ListType; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.List; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class ListCodec implements TypeCodec> { - - private final DataType cqlType; - private final GenericType> javaType; - private final TypeCodec elementCodec; - - public ListCodec(DataType cqlType, TypeCodec elementCodec) { - this.cqlType = cqlType; - this.javaType = GenericType.listOf(elementCodec.getJavaType()); - this.elementCodec = elementCodec; - Preconditions.checkArgument(cqlType instanceof ListType); - } - - @NonNull - @Override - public GenericType> getJavaType() { - return javaType; - } - - @NonNull - @Override - public DataType getCqlType() { - return cqlType; - } - - @Override - public boolean accepts(@NonNull Object value) { - if (List.class.isAssignableFrom(value.getClass())) { - // runtime type ok, now check element type - List list = (List) value; - return list.isEmpty() || elementCodec.accepts(list.get(0)); - } else { - return false; - } - } - - @Nullable - @Override - public ByteBuffer encode( - @Nullable List value, @NonNull ProtocolVersion protocolVersion) { - // An int indicating the number of elements in the list, followed by the elements. Each element - // is a byte array representing the serialized value, preceded by an int indicating its size. - if (value == null) { - return null; - } else { - int i = 0; - ByteBuffer[] encodedElements = new ByteBuffer[value.size()]; - int toAllocate = 4; // initialize with number of elements - for (ElementT element : value) { - if (element == null) { - throw new NullPointerException("Collection elements cannot be null"); - } - ByteBuffer encodedElement; - try { - encodedElement = elementCodec.encode(element, protocolVersion); - } catch (ClassCastException e) { - throw new IllegalArgumentException("Invalid type for element: " + element.getClass()); - } - if (encodedElement == null) { - throw new NullPointerException("Collection elements cannot encode to CQL NULL"); - } - encodedElements[i++] = encodedElement; - toAllocate += 4 + encodedElement.remaining(); // the element preceded by its size - } - ByteBuffer result = ByteBuffer.allocate(toAllocate); - result.putInt(value.size()); - for (ByteBuffer encodedElement : encodedElements) { - result.putInt(encodedElement.remaining()); - result.put(encodedElement); - } - result.flip(); - return result; - } - } - - @Nullable - @Override - public List decode( - @Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) { - return new ArrayList<>(0); - } else { - ByteBuffer input = bytes.duplicate(); - int size = input.getInt(); - List result = new ArrayList<>(size); - for (int i = 0; i < size; i++) { - ElementT element; - int elementSize = input.getInt(); - // Allow null elements on the decode path, because Cassandra might return such collections - // for some computed values in the future -- e.g. SELECT ttl(some_collection) - if (elementSize < 0) { - element = null; - } else { - ByteBuffer encodedElement = input.slice(); - encodedElement.limit(elementSize); - element = elementCodec.decode(encodedElement, protocolVersion); - input.position(input.position() + elementSize); - } - result.add(element); - } - return result; - } - } - - @NonNull - @Override - public String format(@Nullable List value) { - if (value == null) { - return "NULL"; - } - StringBuilder sb = new StringBuilder("["); - boolean first = true; - for (ElementT t : value) { - if (first) { - first = false; - } else { - sb.append(","); - } - sb.append(elementCodec.format(t)); - } - sb.append("]"); - return sb.toString(); - } - - @Nullable - @Override - public List parse(@Nullable String value) { - if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) return null; - - int idx = ParseUtils.skipSpaces(value, 0); - if (value.charAt(idx++) != '[') - throw new IllegalArgumentException( - String.format( - "Cannot parse list value from \"%s\", at character %d expecting '[' but got '%c'", - value, idx, value.charAt(idx))); - - idx = ParseUtils.skipSpaces(value, idx); - - if (value.charAt(idx) == ']') { - return new ArrayList<>(0); - } - - List list = new ArrayList<>(); - while (idx < value.length()) { - int n; - try { - n = ParseUtils.skipCQLValue(value, idx); - } catch (IllegalArgumentException e) { - throw new IllegalArgumentException( - String.format( - "Cannot parse list value from \"%s\", invalid CQL value at character %d", - value, idx), - e); - } - - list.add(elementCodec.parse(value.substring(idx, n))); - idx = n; - - idx = ParseUtils.skipSpaces(value, idx); - if (value.charAt(idx) == ']') return list; - if (value.charAt(idx++) != ',') - throw new IllegalArgumentException( - String.format( - "Cannot parse list value from \"%s\", at character %d expecting ',' but got '%c'", - value, idx, value.charAt(idx))); - - idx = ParseUtils.skipSpaces(value, idx); - } - throw new IllegalArgumentException( - String.format("Malformed list value \"%s\", missing closing ']'", value)); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/MapCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/MapCodec.java deleted file mode 100644 index 999f41bf207..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/MapCodec.java +++ /dev/null @@ -1,267 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.shaded.guava.common.collect.Maps; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.util.LinkedHashMap; -import java.util.Map; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class MapCodec implements TypeCodec> { - - private final DataType cqlType; - private final GenericType> javaType; - private final TypeCodec keyCodec; - private final TypeCodec valueCodec; - - public MapCodec(DataType cqlType, TypeCodec keyCodec, TypeCodec valueCodec) { - this.cqlType = cqlType; - this.keyCodec = keyCodec; - this.valueCodec = valueCodec; - this.javaType = GenericType.mapOf(keyCodec.getJavaType(), valueCodec.getJavaType()); - } - - @NonNull - @Override - public GenericType> getJavaType() { - return javaType; - } - - @NonNull - @Override - public DataType getCqlType() { - return cqlType; - } - - @Override - public boolean accepts(@NonNull Object value) { - if (value instanceof Map) { - // runtime type ok, now check key and value types - Map map = (Map) value; - if (map.isEmpty()) { - return true; - } - Map.Entry entry = map.entrySet().iterator().next(); - return keyCodec.accepts(entry.getKey()) && valueCodec.accepts(entry.getValue()); - } - return false; - } - - @Override - @Nullable - public ByteBuffer encode( - @Nullable Map value, @NonNull ProtocolVersion protocolVersion) { - // An int indicating the number of key/value pairs in the map, followed by the pairs. Each pair - // is a byte array representing the serialized key, preceded by an int indicating its size, - // followed by the value in the same format. - if (value == null) { - return null; - } else { - int i = 0; - ByteBuffer[] encodedElements = new ByteBuffer[value.size() * 2]; - int toAllocate = 4; // initialize with number of elements - for (Map.Entry entry : value.entrySet()) { - if (entry.getKey() == null) { - throw new NullPointerException("Map keys cannot be null"); - } - if (entry.getValue() == null) { - throw new NullPointerException("Map values cannot be null"); - } - ByteBuffer encodedKey; - try { - encodedKey = keyCodec.encode(entry.getKey(), protocolVersion); - } catch (ClassCastException e) { - throw new IllegalArgumentException("Invalid type for key: " + entry.getKey().getClass()); - } - if (encodedKey == null) { - throw new NullPointerException("Map keys cannot encode to CQL NULL"); - } - encodedElements[i++] = encodedKey; - toAllocate += 4 + encodedKey.remaining(); // the key preceded by its size - ByteBuffer encodedValue; - try { - encodedValue = valueCodec.encode(entry.getValue(), protocolVersion); - } catch (ClassCastException e) { - throw new IllegalArgumentException( - "Invalid type for value: " + entry.getValue().getClass()); - } - if (encodedValue == null) { - throw new NullPointerException("Map values cannot encode to CQL NULL"); - } - encodedElements[i++] = encodedValue; - toAllocate += 4 + encodedValue.remaining(); // the value preceded by its size - } - ByteBuffer result = ByteBuffer.allocate(toAllocate); - result.putInt(value.size()); - for (ByteBuffer encodedElement : encodedElements) { - result.putInt(encodedElement.remaining()); - result.put(encodedElement); - } - result.flip(); - return result; - } - } - - @Nullable - @Override - public Map decode( - @Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) { - return new LinkedHashMap<>(0); - } else { - ByteBuffer input = bytes.duplicate(); - int size = input.getInt(); - Map result = Maps.newLinkedHashMapWithExpectedSize(size); - for (int i = 0; i < size; i++) { - KeyT key; - int keySize = input.getInt(); - // Allow null elements on the decode path, because Cassandra might return such collections - // for some computed values in the future -- e.g. SELECT ttl(some_collection) - if (keySize < 0) { - key = null; - } else { - ByteBuffer encodedKey = input.slice(); - encodedKey.limit(keySize); - key = keyCodec.decode(encodedKey, protocolVersion); - input.position(input.position() + keySize); - } - ValueT value; - int valueSize = input.getInt(); - if (valueSize < 0) { - value = null; - } else { - ByteBuffer encodedValue = input.slice(); - encodedValue.limit(valueSize); - value = valueCodec.decode(encodedValue, protocolVersion); - input.position(input.position() + valueSize); - } - result.put(key, value); - } - return result; - } - } - - @NonNull - @Override - public String format(@Nullable Map value) { - if (value == null) { - return "NULL"; - } - StringBuilder sb = new StringBuilder(); - sb.append("{"); - boolean first = true; - for (Map.Entry e : value.entrySet()) { - if (first) { - first = false; - } else { - sb.append(","); - } - sb.append(keyCodec.format(e.getKey())); - sb.append(":"); - sb.append(valueCodec.format(e.getValue())); - } - sb.append("}"); - return sb.toString(); - } - - @Nullable - @Override - public Map parse(@Nullable String value) { - if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) { - return null; - } - - int idx = ParseUtils.skipSpaces(value, 0); - if (value.charAt(idx++) != '{') { - throw new IllegalArgumentException( - String.format( - "cannot parse map value from \"%s\", at character %d expecting '{' but got '%c'", - value, idx, value.charAt(idx))); - } - - idx = ParseUtils.skipSpaces(value, idx); - - if (value.charAt(idx) == '}') { - return new LinkedHashMap<>(0); - } - - Map map = new LinkedHashMap<>(); - while (idx < value.length()) { - int n; - try { - n = ParseUtils.skipCQLValue(value, idx); - } catch (IllegalArgumentException e) { - throw new IllegalArgumentException( - String.format( - "Cannot parse map value from \"%s\", invalid CQL value at character %d", - value, idx), - e); - } - - KeyT k = keyCodec.parse(value.substring(idx, n)); - idx = n; - - idx = ParseUtils.skipSpaces(value, idx); - if (value.charAt(idx++) != ':') { - throw new IllegalArgumentException( - String.format( - "Cannot parse map value from \"%s\", at character %d expecting ':' but got '%c'", - value, idx, value.charAt(idx))); - } - idx = ParseUtils.skipSpaces(value, idx); - - try { - n = ParseUtils.skipCQLValue(value, idx); - } catch (IllegalArgumentException e) { - throw new IllegalArgumentException( - String.format( - "Cannot parse map value from \"%s\", invalid CQL value at character %d", - value, idx), - e); - } - - ValueT v = valueCodec.parse(value.substring(idx, n)); - idx = n; - - map.put(k, v); - - idx = ParseUtils.skipSpaces(value, idx); - if (value.charAt(idx) == '}') { - return map; - } - if (value.charAt(idx++) != ',') { - throw new IllegalArgumentException( - String.format( - "Cannot parse map value from \"%s\", at character %d expecting ',' but got '%c'", - value, idx, value.charAt(idx))); - } - - idx = ParseUtils.skipSpaces(value, idx); - } - throw new IllegalArgumentException( - String.format("Malformed map value \"%s\", missing closing '}'", value)); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/ParseUtils.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/ParseUtils.java deleted file mode 100644 index a52130a093d..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/ParseUtils.java +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -public class ParseUtils { - - /** - * Returns the index of the first character in toParse from idx that is not a "space". - * - * @param toParse the string to skip space on. - * @param idx the index to start skipping space from. - * @return the index of the first character in toParse from idx that is not a "space. - */ - public static int skipSpaces(String toParse, int idx) { - while (idx < toParse.length() && isBlank(toParse.charAt(idx))) ++idx; - return idx; - } - - /** - * Assuming that idx points to the beginning of a CQL value in toParse, returns the index of the - * first character after this value. - * - * @param toParse the string to skip a value form. - * @param idx the index to start parsing a value from. - * @return the index ending the CQL value starting at {@code idx}. - * @throws IllegalArgumentException if idx doesn't point to the start of a valid CQL value. - */ - public static int skipCQLValue(String toParse, int idx) { - if (idx >= toParse.length()) throw new IllegalArgumentException(); - - if (isBlank(toParse.charAt(idx))) throw new IllegalArgumentException(); - - int cbrackets = 0; - int sbrackets = 0; - int parens = 0; - boolean inString = false; - - do { - char c = toParse.charAt(idx); - if (inString) { - if (c == '\'') { - if (idx + 1 < toParse.length() && toParse.charAt(idx + 1) == '\'') { - ++idx; // this is an escaped quote, skip it - } else { - inString = false; - if (cbrackets == 0 && sbrackets == 0 && parens == 0) return idx + 1; - } - } - // Skip any other character - } else if (c == '\'') { - inString = true; - } else if (c == '{') { - ++cbrackets; - } else if (c == '[') { - ++sbrackets; - } else if (c == '(') { - ++parens; - } else if (c == '}') { - if (cbrackets == 0) return idx; - - --cbrackets; - if (cbrackets == 0 && sbrackets == 0 && parens == 0) return idx + 1; - } else if (c == ']') { - if (sbrackets == 0) return idx; - - --sbrackets; - if (cbrackets == 0 && sbrackets == 0 && parens == 0) return idx + 1; - } else if (c == ')') { - if (parens == 0) return idx; - - --parens; - if (cbrackets == 0 && sbrackets == 0 && parens == 0) return idx + 1; - } else if (isBlank(c) || !isCqlIdentifierChar(c)) { - if (cbrackets == 0 && sbrackets == 0 && parens == 0) return idx; - } - } while (++idx < toParse.length()); - - if (inString || cbrackets != 0 || sbrackets != 0 || parens != 0) - throw new IllegalArgumentException(); - return idx; - } - - /** - * Assuming that idx points to the beginning of a CQL identifier in toParse, returns the index of - * the first character after this identifier. - * - * @param toParse the string to skip an identifier from. - * @param idx the index to start parsing an identifier from. - * @return the index ending the CQL identifier starting at {@code idx}. - * @throws IllegalArgumentException if idx doesn't point to the start of a valid CQL identifier. - */ - public static int skipCQLId(String toParse, int idx) { - if (idx >= toParse.length()) throw new IllegalArgumentException(); - - char c = toParse.charAt(idx); - if (isCqlIdentifierChar(c)) { - while (idx < toParse.length() && isCqlIdentifierChar(toParse.charAt(idx))) idx++; - return idx; - } - - if (c != '"') throw new IllegalArgumentException(); - - while (++idx < toParse.length()) { - c = toParse.charAt(idx); - if (c != '"') continue; - - if (idx + 1 < toParse.length() && toParse.charAt(idx + 1) == '\"') - ++idx; // this is an escaped double quote, skip it - else return idx + 1; - } - throw new IllegalArgumentException(); - } - - public static boolean isBlank(int c) { - return c == ' ' || c == '\t' || c == '\n'; - } - - public static boolean isCqlIdentifierChar(int c) { - return (c >= '0' && c <= '9') - || (c >= 'a' && c <= 'z') - || (c >= 'A' && c <= 'Z') - || c == '-' - || c == '+' - || c == '.' - || c == '_' - || c == '&'; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/SetCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/SetCodec.java deleted file mode 100644 index fc4c0887516..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/SetCodec.java +++ /dev/null @@ -1,206 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.SetType; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.collect.Sets; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.util.LinkedHashSet; -import java.util.Set; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class SetCodec implements TypeCodec> { - - private final DataType cqlType; - private final GenericType> javaType; - private final TypeCodec elementCodec; - - public SetCodec(DataType cqlType, TypeCodec elementCodec) { - this.cqlType = cqlType; - this.javaType = GenericType.setOf(elementCodec.getJavaType()); - this.elementCodec = elementCodec; - Preconditions.checkArgument(cqlType instanceof SetType); - } - - @NonNull - @Override - public GenericType> getJavaType() { - return javaType; - } - - @NonNull - @Override - public DataType getCqlType() { - return cqlType; - } - - @Override - public boolean accepts(@NonNull Object value) { - if (Set.class.isAssignableFrom(value.getClass())) { - // runtime type ok, now check element type - Set set = (Set) value; - return set.isEmpty() || elementCodec.accepts(set.iterator().next()); - } else { - return false; - } - } - - @Nullable - @Override - public ByteBuffer encode( - @Nullable Set value, @NonNull ProtocolVersion protocolVersion) { - // An int indicating the number of elements in the set, followed by the elements. Each element - // is a byte array representing the serialized value, preceded by an int indicating its size. - if (value == null) { - return null; - } else { - int i = 0; - ByteBuffer[] encodedElements = new ByteBuffer[value.size()]; - int toAllocate = 4; // initialize with number of elements - for (ElementT element : value) { - if (element == null) { - throw new NullPointerException("Collection elements cannot be null"); - } - ByteBuffer encodedElement; - try { - encodedElement = elementCodec.encode(element, protocolVersion); - } catch (ClassCastException e) { - throw new IllegalArgumentException("Invalid type for element: " + element.getClass()); - } - if (encodedElement == null) { - throw new NullPointerException("Collection elements cannot encode to CQL NULL"); - } - encodedElements[i++] = encodedElement; - toAllocate += 4 + encodedElement.remaining(); // the element preceded by its size - } - ByteBuffer result = ByteBuffer.allocate(toAllocate); - result.putInt(value.size()); - for (ByteBuffer encodedElement : encodedElements) { - result.putInt(encodedElement.remaining()); - result.put(encodedElement); - } - result.flip(); - return result; - } - } - - @Nullable - @Override - public Set decode( - @Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) { - return new LinkedHashSet<>(0); - } else { - ByteBuffer input = bytes.duplicate(); - int size = input.getInt(); - Set result = Sets.newLinkedHashSetWithExpectedSize(size); - for (int i = 0; i < size; i++) { - ElementT element; - int elementSize = input.getInt(); - // Allow null elements on the decode path, because Cassandra might return such collections - // for some computed values in the future -- e.g. SELECT ttl(some_collection) - if (elementSize < 0) { - element = null; - } else { - ByteBuffer encodedElement = input.slice(); - encodedElement.limit(elementSize); - element = elementCodec.decode(encodedElement, protocolVersion); - input.position(input.position() + elementSize); - } - result.add(element); - } - return result; - } - } - - @NonNull - @Override - public String format(@Nullable Set value) { - if (value == null) { - return "NULL"; - } - StringBuilder sb = new StringBuilder("{"); - boolean first = true; - for (ElementT t : value) { - if (first) { - first = false; - } else { - sb.append(","); - } - sb.append(elementCodec.format(t)); - } - sb.append("}"); - return sb.toString(); - } - - @Nullable - @Override - public Set parse(@Nullable String value) { - if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) return null; - - int idx = ParseUtils.skipSpaces(value, 0); - if (value.charAt(idx++) != '{') - throw new IllegalArgumentException( - String.format( - "Cannot parse set value from \"%s\", at character %d expecting '{' but got '%c'", - value, idx, value.charAt(idx))); - - idx = ParseUtils.skipSpaces(value, idx); - - if (value.charAt(idx) == '}') { - return new LinkedHashSet<>(0); - } - - Set set = new LinkedHashSet<>(); - while (idx < value.length()) { - int n; - try { - n = ParseUtils.skipCQLValue(value, idx); - } catch (IllegalArgumentException e) { - throw new IllegalArgumentException( - String.format( - "Cannot parse set value from \"%s\", invalid CQL value at character %d", - value, idx), - e); - } - - set.add(elementCodec.parse(value.substring(idx, n))); - idx = n; - - idx = ParseUtils.skipSpaces(value, idx); - if (value.charAt(idx) == '}') return set; - if (value.charAt(idx++) != ',') - throw new IllegalArgumentException( - String.format( - "Cannot parse set value from \"%s\", at character %d expecting ',' but got '%c'", - value, idx, value.charAt(idx))); - - idx = ParseUtils.skipSpaces(value, idx); - } - throw new IllegalArgumentException( - String.format("Malformed set value \"%s\", missing closing '}'", value)); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/SimpleBlobCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/SimpleBlobCodec.java deleted file mode 100644 index 9f90feb8e7c..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/SimpleBlobCodec.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.data.ByteUtils; -import com.datastax.oss.driver.api.core.type.codec.MappingCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.extras.array.ByteListToArrayCodec; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import net.jcip.annotations.Immutable; - -/** - * A codec that maps the CQL type {@code blob} to the Java type {@code byte[]}. - * - *

If you are looking for a codec mapping the CQL type {@code blob} to the Java type {@link - * ByteBuffer}, you should use {@link BlobCodec} instead. - * - *

If you are looking for a codec mapping the CQL type {@code list { - - public SimpleBlobCodec() { - super(TypeCodecs.BLOB, GenericType.of(byte[].class)); - } - - @Override - public boolean accepts(@NonNull Object value) { - return value instanceof byte[]; - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return byte[].class.equals(javaClass); - } - - @Nullable - @Override - protected byte[] innerToOuter(@Nullable ByteBuffer value) { - return value == null ? null : ByteUtils.getArray(value); - } - - @Nullable - @Override - protected ByteBuffer outerToInner(@Nullable byte[] value) { - return value == null ? null : ByteBuffer.wrap(value); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/SmallIntCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/SmallIntCodec.java deleted file mode 100644 index 08beb0b34c5..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/SmallIntCodec.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.PrimitiveShortCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class SmallIntCodec implements PrimitiveShortCodec { - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.SHORT; - } - - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.SMALLINT; - } - - @Override - public boolean accepts(@NonNull Object value) { - return value instanceof Short; - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return javaClass == Short.class || javaClass == short.class; - } - - @Nullable - @Override - public ByteBuffer encodePrimitive(short value, @NonNull ProtocolVersion protocolVersion) { - ByteBuffer bytes = ByteBuffer.allocate(2); - bytes.putShort(0, value); - return bytes; - } - - @Override - public short decodePrimitive( - @Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) { - return 0; - } else if (bytes.remaining() != 2) { - throw new IllegalArgumentException( - "Invalid 16-bits integer value, expecting 2 bytes but got " + bytes.remaining()); - } else { - return bytes.getShort(bytes.position()); - } - } - - @NonNull - @Override - public String format(@Nullable Short value) { - return (value == null) ? "NULL" : Short.toString(value); - } - - @Nullable - @Override - public Short parse(@Nullable String value) { - try { - return (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) - ? null - : Short.parseShort(value); - } catch (NumberFormatException e) { - throw new IllegalArgumentException( - String.format("Cannot parse 16-bits int value from \"%s\"", value)); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/StringCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/StringCodec.java deleted file mode 100644 index 2a9acdd8c47..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/StringCodec.java +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.util.Strings; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import io.netty.util.concurrent.FastThreadLocal; -import java.nio.ByteBuffer; -import java.nio.CharBuffer; -import java.nio.charset.CharacterCodingException; -import java.nio.charset.Charset; -import java.nio.charset.CharsetDecoder; -import java.nio.charset.CharsetEncoder; -import java.nio.charset.CodingErrorAction; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class StringCodec implements TypeCodec { - - private final DataType cqlType; - private final FastThreadLocal charsetEncoder; - private final FastThreadLocal charsetDecoder; - - public StringCodec(@NonNull DataType cqlType, @NonNull Charset charset) { - this.cqlType = cqlType; - charsetEncoder = - new FastThreadLocal() { - @Override - protected CharsetEncoder initialValue() throws Exception { - return charset - .newEncoder() - .onMalformedInput(CodingErrorAction.REPORT) - .onUnmappableCharacter(CodingErrorAction.REPORT); - } - }; - charsetDecoder = - new FastThreadLocal() { - @Override - protected CharsetDecoder initialValue() throws Exception { - return charset - .newDecoder() - .onMalformedInput(CodingErrorAction.REPORT) - .onUnmappableCharacter(CodingErrorAction.REPORT); - } - }; - } - - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.STRING; - } - - @NonNull - @Override - public DataType getCqlType() { - return cqlType; - } - - @Override - public boolean accepts(@NonNull Object value) { - return value instanceof String; - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return javaClass == String.class; - } - - @Nullable - @Override - public ByteBuffer encode(@Nullable String value, @NonNull ProtocolVersion protocolVersion) { - if (value == null) { - return null; - } - try { - return charsetEncoder.get().encode(CharBuffer.wrap(value)); - } catch (CharacterCodingException e) { - throw new IllegalArgumentException(e); - } - } - - @Nullable - @Override - public String decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null) { - return null; - } else if (bytes.remaining() == 0) { - return ""; - } else { - try { - return charsetDecoder.get().decode(bytes.duplicate()).toString(); - } catch (CharacterCodingException e) { - throw new IllegalArgumentException(e); - } - } - } - - @NonNull - @Override - public String format(@Nullable String value) { - return (value == null) ? "NULL" : Strings.quote(value); - } - - @Nullable - @Override - public String parse(String value) { - if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) { - return null; - } else if (!Strings.isQuoted(value)) { - throw new IllegalArgumentException( - "text or varchar values must be enclosed by single quotes"); - } else { - return Strings.unquote(value); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/TimeCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/TimeCodec.java deleted file mode 100644 index 4977687342d..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/TimeCodec.java +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.util.Strings; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.time.LocalTime; -import java.time.format.DateTimeFormatter; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class TimeCodec implements TypeCodec { - - private static final DateTimeFormatter FORMATTER = - DateTimeFormatter.ofPattern("HH:mm:ss.SSSSSSSSS"); - - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.LOCAL_TIME; - } - - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.TIME; - } - - @Override - public boolean accepts(@NonNull Object value) { - return value instanceof LocalTime; - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return javaClass == LocalTime.class; - } - - @Nullable - @Override - public ByteBuffer encode(@Nullable LocalTime value, @NonNull ProtocolVersion protocolVersion) { - return (value == null) - ? null - : TypeCodecs.BIGINT.encodePrimitive(value.toNanoOfDay(), protocolVersion); - } - - @Nullable - @Override - public LocalTime decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) { - return null; - } else { - long nanosOfDay = TypeCodecs.BIGINT.decodePrimitive(bytes, protocolVersion); - return LocalTime.ofNanoOfDay(nanosOfDay); - } - } - - @NonNull - @Override - public String format(@Nullable LocalTime value) { - return (value == null) ? "NULL" : Strings.quote(FORMATTER.format(value)); - } - - @Nullable - @Override - public LocalTime parse(@Nullable String value) { - if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) { - return null; - } - - // enclosing single quotes required, even for long literals - if (!Strings.isQuoted(value)) { - throw new IllegalArgumentException("time values must be enclosed by single quotes"); - } - value = value.substring(1, value.length() - 1); - - if (Strings.isLongLiteral(value)) { - try { - return LocalTime.ofNanoOfDay(Long.parseLong(value)); - } catch (NumberFormatException e) { - throw new IllegalArgumentException( - String.format("Cannot parse time value from \"%s\"", value), e); - } - } - - try { - return LocalTime.parse(value); - } catch (RuntimeException e) { - throw new IllegalArgumentException( - String.format("Cannot parse time value from \"%s\"", value), e); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/TimeUuidCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/TimeUuidCodec.java deleted file mode 100644 index 95744f63ee3..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/TimeUuidCodec.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.util.UUID; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class TimeUuidCodec extends UuidCodec { - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.TIMEUUID; - } - - @Override - public boolean accepts(@NonNull Object value) { - return value instanceof UUID && ((UUID) value).version() == 1; - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return javaClass == UUID.class; - } - - @Nullable - @Override - public ByteBuffer encode(@Nullable UUID value, @NonNull ProtocolVersion protocolVersion) { - if (value == null) { - return null; - } else if (value.version() != 1) { - throw new IllegalArgumentException( - String.format("%s is not a Type 1 (time-based) UUID", value)); - } else { - return super.encode(value, protocolVersion); - } - } - - @NonNull - @Override - public String format(@Nullable UUID value) { - if (value == null) { - return "NULL"; - } else if (value.version() != 1) { - throw new IllegalArgumentException( - String.format("%s is not a Type 1 (time-based) UUID", value)); - } else { - return super.format(value); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/TimestampCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/TimestampCodec.java deleted file mode 100644 index 964f774c8d9..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/TimestampCodec.java +++ /dev/null @@ -1,303 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.util.Strings; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import io.netty.util.concurrent.FastThreadLocal; -import java.nio.ByteBuffer; -import java.text.ParsePosition; -import java.text.SimpleDateFormat; -import java.time.Instant; -import java.time.ZoneId; -import java.util.Date; -import java.util.Optional; -import java.util.TimeZone; -import net.jcip.annotations.ThreadSafe; - -/** - * A codec that handles Apache Cassandra(R)'s timestamp type and maps it to Java's {@link Instant}. - * - *

Implementation notes: - * - *

    - *
  1. Because {@code Instant} uses a precision of nanoseconds, whereas the timestamp type uses a - * precision of milliseconds, truncation will happen for any excess precision information as - * though the amount in nanoseconds was subject to integer division by one million. - *
  2. For compatibility reasons, this codec uses the legacy {@link SimpleDateFormat} API - * internally when parsing and formatting, and converts from {@link Instant} to {@link Date} - * and vice versa. Specially when parsing, this may yield different results as compared to - * what the newer Java Time API parsers would have produced for the same input. - *
  3. Also, {@code Instant} can store points on the time-line further in the future and further - * in the past than {@code Date}. This codec will throw an exception when attempting to parse - * or format an {@code Instant} falling in this category. - *
- * - *

Accepted date-time formats

- * - * The following patterns are valid CQL timestamp literal formats for Apache Cassandra(R) 3.0 and - * higher, and are thus all recognized when parsing: - * - *
    - *
  1. {@code yyyy-MM-dd'T'HH:mm} - *
  2. {@code yyyy-MM-dd'T'HH:mm:ss} - *
  3. {@code yyyy-MM-dd'T'HH:mm:ss.SSS} - *
  4. {@code yyyy-MM-dd'T'HH:mmX} - *
  5. {@code yyyy-MM-dd'T'HH:mmXX} - *
  6. {@code yyyy-MM-dd'T'HH:mmXXX} - *
  7. {@code yyyy-MM-dd'T'HH:mm:ssX} - *
  8. {@code yyyy-MM-dd'T'HH:mm:ssXX} - *
  9. {@code yyyy-MM-dd'T'HH:mm:ssXXX} - *
  10. {@code yyyy-MM-dd'T'HH:mm:ss.SSSX} - *
  11. {@code yyyy-MM-dd'T'HH:mm:ss.SSSXX} - *
  12. {@code yyyy-MM-dd'T'HH:mm:ss.SSSXXX} - *
  13. {@code yyyy-MM-dd'T'HH:mm z} - *
  14. {@code yyyy-MM-dd'T'HH:mm:ss z} - *
  15. {@code yyyy-MM-dd'T'HH:mm:ss.SSS z} - *
  16. {@code yyyy-MM-dd HH:mm} - *
  17. {@code yyyy-MM-dd HH:mm:ss} - *
  18. {@code yyyy-MM-dd HH:mm:ss.SSS} - *
  19. {@code yyyy-MM-dd HH:mmX} - *
  20. {@code yyyy-MM-dd HH:mmXX} - *
  21. {@code yyyy-MM-dd HH:mmXXX} - *
  22. {@code yyyy-MM-dd HH:mm:ssX} - *
  23. {@code yyyy-MM-dd HH:mm:ssXX} - *
  24. {@code yyyy-MM-dd HH:mm:ssXXX} - *
  25. {@code yyyy-MM-dd HH:mm:ss.SSSX} - *
  26. {@code yyyy-MM-dd HH:mm:ss.SSSXX} - *
  27. {@code yyyy-MM-dd HH:mm:ss.SSSXXX} - *
  28. {@code yyyy-MM-dd HH:mm z} - *
  29. {@code yyyy-MM-dd HH:mm:ss z} - *
  30. {@code yyyy-MM-dd HH:mm:ss.SSS z} - *
  31. {@code yyyy-MM-dd} - *
  32. {@code yyyy-MM-ddX} - *
  33. {@code yyyy-MM-ddXX} - *
  34. {@code yyyy-MM-ddXXX} - *
  35. {@code yyyy-MM-dd z} - *
- * - * By default, when parsing, timestamp literals that do not include any time zone information will - * be interpreted using the system's {@linkplain ZoneId#systemDefault() default time zone}. This is - * intended to mimic Apache Cassandra(R)'s own parsing behavior (see {@code - * org.apache.cassandra.serializers.TimestampSerializer}). The default time zone can be modified - * using the {@linkplain TimestampCodec#TimestampCodec(ZoneId) one-arg constructor} that takes a - * custom {@link ZoneId} as an argument. - * - *

When formatting, the pattern used is always {@code yyyy-MM-dd'T'HH:mm:ss.SSSXXX} and the time - * zone is either the the system's default one, or the one that was provided when instantiating the - * codec. - */ -@ThreadSafe -public class TimestampCodec implements TypeCodec { - - /** - * Patterns accepted by Apache Cassandra(R) 3.0 and higher when parsing CQL literals. - * - *

Note that Cassandra's TimestampSerializer declares many more patterns but some of them are - * equivalent when parsing. - */ - private static final String[] DATE_STRING_PATTERNS = - new String[] { - // 1) date-time patterns separated by 'T' - // (declared first because none of the others are ISO compliant, but some of these are) - // 1.a) without time zone - "yyyy-MM-dd'T'HH:mm", - "yyyy-MM-dd'T'HH:mm:ss", - "yyyy-MM-dd'T'HH:mm:ss.SSS", - // 1.b) with ISO-8601 time zone - "yyyy-MM-dd'T'HH:mmX", - "yyyy-MM-dd'T'HH:mmXX", - "yyyy-MM-dd'T'HH:mmXXX", - "yyyy-MM-dd'T'HH:mm:ssX", - "yyyy-MM-dd'T'HH:mm:ssXX", - "yyyy-MM-dd'T'HH:mm:ssXXX", - "yyyy-MM-dd'T'HH:mm:ss.SSSX", - "yyyy-MM-dd'T'HH:mm:ss.SSSXX", - "yyyy-MM-dd'T'HH:mm:ss.SSSXXX", - // 1.c) with generic time zone - "yyyy-MM-dd'T'HH:mm z", - "yyyy-MM-dd'T'HH:mm:ss z", - "yyyy-MM-dd'T'HH:mm:ss.SSS z", - // 2) date-time patterns separated by whitespace - // 2.a) without time zone - "yyyy-MM-dd HH:mm", - "yyyy-MM-dd HH:mm:ss", - "yyyy-MM-dd HH:mm:ss.SSS", - // 2.b) with ISO-8601 time zone - "yyyy-MM-dd HH:mmX", - "yyyy-MM-dd HH:mmXX", - "yyyy-MM-dd HH:mmXXX", - "yyyy-MM-dd HH:mm:ssX", - "yyyy-MM-dd HH:mm:ssXX", - "yyyy-MM-dd HH:mm:ssXXX", - "yyyy-MM-dd HH:mm:ss.SSSX", - "yyyy-MM-dd HH:mm:ss.SSSXX", - "yyyy-MM-dd HH:mm:ss.SSSXXX", - // 2.c) with generic time zone - "yyyy-MM-dd HH:mm z", - "yyyy-MM-dd HH:mm:ss z", - "yyyy-MM-dd HH:mm:ss.SSS z", - // 3) date patterns without time - // 3.a) without time zone - "yyyy-MM-dd", - // 3.b) with ISO-8601 time zone - "yyyy-MM-ddX", - "yyyy-MM-ddXX", - "yyyy-MM-ddXXX", - // 3.c) with generic time zone - "yyyy-MM-dd z" - }; - - private final FastThreadLocal parser; - - private final FastThreadLocal formatter; - - /** - * Creates a new {@code TimestampCodec} that uses the system's {@linkplain ZoneId#systemDefault() - * default time zone} to parse timestamp literals that do not include any time zone information. - */ - public TimestampCodec() { - this(ZoneId.systemDefault()); - } - - /** - * Creates a new {@code TimestampCodec} that uses the given {@link ZoneId} to parse timestamp - * literals that do not include any time zone information. - */ - public TimestampCodec(ZoneId defaultZoneId) { - parser = - new FastThreadLocal() { - @Override - protected SimpleDateFormat initialValue() { - SimpleDateFormat parser = new SimpleDateFormat(); - parser.setLenient(false); - parser.setTimeZone(TimeZone.getTimeZone(defaultZoneId)); - return parser; - } - }; - formatter = - new FastThreadLocal() { - @Override - protected SimpleDateFormat initialValue() { - SimpleDateFormat parser = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSXXX"); - parser.setTimeZone(TimeZone.getTimeZone(defaultZoneId)); - return parser; - } - }; - } - - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.INSTANT; - } - - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.TIMESTAMP; - } - - @Override - public boolean accepts(@NonNull Object value) { - return value instanceof Instant; - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return javaClass == Instant.class; - } - - @Nullable - @Override - public ByteBuffer encode(@Nullable Instant value, @NonNull ProtocolVersion protocolVersion) { - return (value == null) - ? null - : TypeCodecs.BIGINT.encodePrimitive(value.toEpochMilli(), protocolVersion); - } - - @Nullable - @Override - public Instant decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - return (bytes == null || bytes.remaining() == 0) - ? null - : Instant.ofEpochMilli(TypeCodecs.BIGINT.decodePrimitive(bytes, protocolVersion)); - } - - @NonNull - @Override - public String format(@Nullable Instant value) { - return (value == null) ? "NULL" : Strings.quote(formatter.get().format(Date.from(value))); - } - - @Nullable - @Override - public Instant parse(@Nullable String value) { - if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) { - return null; - } - String unquoted = Strings.unquote(value); - if (Strings.isLongLiteral(unquoted)) { - // Numeric literals may be quoted or not - try { - return Instant.ofEpochMilli(Long.parseLong(unquoted)); - } catch (NumberFormatException e) { - throw new IllegalArgumentException( - String.format("Cannot parse timestamp value from \"%s\"", value)); - } - } else { - // Alphanumeric literals must be quoted - if (!Strings.isQuoted(value)) { - throw new IllegalArgumentException( - String.format("Alphanumeric timestamp literal must be quoted: \"%s\"", value)); - } - SimpleDateFormat parser = this.parser.get(); - TimeZone timeZone = parser.getTimeZone(); - ParsePosition pos = new ParsePosition(0); - for (String pattern : DATE_STRING_PATTERNS) { - parser.applyPattern(pattern); - pos.setIndex(0); - try { - Date date = parser.parse(unquoted, pos); - if (date != null && pos.getIndex() == unquoted.length()) { - return date.toInstant(); - } - } finally { - // restore the parser's default time zone, it might have been modified by the call to - // parse() - parser.setTimeZone(timeZone); - } - } - throw new IllegalArgumentException( - String.format("Cannot parse timestamp value from \"%s\"", value)); - } - } - - @NonNull - @Override - public Optional serializedSize() { - return Optional.of(8); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/TinyIntCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/TinyIntCodec.java deleted file mode 100644 index 13bf79b70d5..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/TinyIntCodec.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.PrimitiveByteCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class TinyIntCodec implements PrimitiveByteCodec { - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.BYTE; - } - - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.TINYINT; - } - - @Override - public boolean accepts(@NonNull Object value) { - return value instanceof Byte; - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return javaClass == Byte.class || javaClass == byte.class; - } - - @Nullable - @Override - public ByteBuffer encodePrimitive(byte value, @NonNull ProtocolVersion protocolVersion) { - ByteBuffer bytes = ByteBuffer.allocate(1); - bytes.put(0, value); - return bytes; - } - - @Override - public byte decodePrimitive( - @Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) { - return 0; - } else if (bytes.remaining() != 1) { - throw new IllegalArgumentException( - "Invalid 8-bits integer value, expecting 1 byte but got " + bytes.remaining()); - } else { - return bytes.get(bytes.position()); - } - } - - @NonNull - @Override - public String format(@Nullable Byte value) { - return (value == null) ? "NULL" : Byte.toString(value); - } - - @Nullable - @Override - public Byte parse(@Nullable String value) { - try { - return (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) - ? null - : Byte.parseByte(value); - } catch (NumberFormatException e) { - throw new IllegalArgumentException( - String.format("Cannot parse 8-bits int value from \"%s\"", value)); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/TupleCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/TupleCodec.java deleted file mode 100644 index cc85266682c..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/TupleCodec.java +++ /dev/null @@ -1,247 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.data.TupleValue; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.TupleType; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.BufferUnderflowException; -import java.nio.ByteBuffer; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class TupleCodec implements TypeCodec { - - private final TupleType cqlType; - - public TupleCodec(@NonNull TupleType cqlType) { - this.cqlType = cqlType; - } - - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.TUPLE_VALUE; - } - - @NonNull - @Override - public DataType getCqlType() { - return cqlType; - } - - @Override - public boolean accepts(@NonNull Object value) { - return (value instanceof TupleValue) && ((TupleValue) value).getType().equals(cqlType); - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return TupleValue.class.equals(javaClass); - } - - @Nullable - @Override - public ByteBuffer encode(@Nullable TupleValue value, @NonNull ProtocolVersion protocolVersion) { - if (value == null) { - return null; - } - if (!value.getType().equals(cqlType)) { - throw new IllegalArgumentException( - String.format("Invalid tuple type, expected %s but got %s", cqlType, value.getType())); - } - // Encoding: each field as a [bytes] value ([bytes] = int length + contents, null is - // represented by -1) - int toAllocate = 0; - for (int i = 0; i < value.size(); i++) { - ByteBuffer field = value.getBytesUnsafe(i); - toAllocate += 4 + (field == null ? 0 : field.remaining()); - } - ByteBuffer result = ByteBuffer.allocate(toAllocate); - for (int i = 0; i < value.size(); i++) { - ByteBuffer field = value.getBytesUnsafe(i); - if (field == null) { - result.putInt(-1); - } else { - result.putInt(field.remaining()); - result.put(field.duplicate()); - } - } - return (ByteBuffer) result.flip(); - } - - @Nullable - @Override - public TupleValue decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null) { - return null; - } - // empty byte buffers will result in empty values - try { - ByteBuffer input = bytes.duplicate(); - TupleValue value = cqlType.newValue(); - int i = 0; - while (input.hasRemaining()) { - if (i > cqlType.getComponentTypes().size()) { - throw new IllegalArgumentException( - String.format( - "Too many fields in encoded tuple, expected %d", - cqlType.getComponentTypes().size())); - } - int elementSize = input.getInt(); - ByteBuffer element; - if (elementSize < 0) { - element = null; - } else { - element = input.slice(); - element.limit(elementSize); - input.position(input.position() + elementSize); - } - value = value.setBytesUnsafe(i, element); - i += 1; - } - return value; - } catch (BufferUnderflowException e) { - throw new IllegalArgumentException("Not enough bytes to deserialize a tuple", e); - } - } - - @NonNull - @Override - public String format(@Nullable TupleValue value) { - if (value == null) { - return "NULL"; - } - if (!value.getType().equals(cqlType)) { - throw new IllegalArgumentException( - String.format("Invalid tuple type, expected %s but got %s", cqlType, value.getType())); - } - CodecRegistry registry = cqlType.getAttachmentPoint().getCodecRegistry(); - - StringBuilder sb = new StringBuilder("("); - boolean first = true; - for (int i = 0; i < value.size(); i++) { - if (first) { - first = false; - } else { - sb.append(","); - } - DataType elementType = cqlType.getComponentTypes().get(i); - TypeCodec codec = registry.codecFor(elementType); - sb.append(codec.format(value.get(i, codec))); - } - sb.append(")"); - return sb.toString(); - } - - @Nullable - @Override - public TupleValue parse(@Nullable String value) { - if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) { - return null; - } - - TupleValue tuple = cqlType.newValue(); - int length = value.length(); - - int position = ParseUtils.skipSpaces(value, 0); - if (value.charAt(position) != '(') { - throw new IllegalArgumentException( - String.format( - "Cannot parse tuple value from \"%s\", at character %d expecting '(' but got '%c'", - value, position, value.charAt(position))); - } - - position++; - position = ParseUtils.skipSpaces(value, position); - - CodecRegistry registry = cqlType.getAttachmentPoint().getCodecRegistry(); - - int field = 0; - while (position < length) { - if (value.charAt(position) == ')') { - position = ParseUtils.skipSpaces(value, position + 1); - if (position == length) { - return tuple; - } - throw new IllegalArgumentException( - String.format( - "Cannot parse tuple value from \"%s\", at character %d expecting EOF or blank, but got \"%s\"", - value, position, value.substring(position))); - } - int n; - try { - n = ParseUtils.skipCQLValue(value, position); - } catch (IllegalArgumentException e) { - throw new IllegalArgumentException( - String.format( - "Cannot parse tuple value from \"%s\", invalid CQL value at field %d (character %d)", - value, field, position), - e); - } - - String fieldValue = value.substring(position, n); - DataType elementType = cqlType.getComponentTypes().get(field); - TypeCodec codec = registry.codecFor(elementType); - Object parsed; - try { - parsed = codec.parse(fieldValue); - } catch (Exception e) { - throw new IllegalArgumentException( - String.format( - "Cannot parse tuple value from \"%s\", invalid CQL value at field %d (character %d): %s", - value, field, position, e.getMessage()), - e); - } - tuple = tuple.set(field, parsed, codec); - - position = n; - - position = ParseUtils.skipSpaces(value, position); - if (position == length) { - throw new IllegalArgumentException( - String.format( - "Cannot parse tuple value from \"%s\", at field %d (character %d) expecting ',' or ')', but got EOF", - value, field, position)); - } - if (value.charAt(position) == ')') { - continue; - } - if (value.charAt(position) != ',') { - throw new IllegalArgumentException( - String.format( - "Cannot parse tuple value from \"%s\", at field %d (character %d) expecting ',' but got '%c'", - value, field, position, value.charAt(position))); - } - ++position; // skip ',' - - position = ParseUtils.skipSpaces(value, position); - field += 1; - } - throw new IllegalArgumentException( - String.format( - "Cannot parse tuple value from \"%s\", at field %d (character %d) expecting CQL value or ')', got EOF", - value, field, position)); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/UdtCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/UdtCodec.java deleted file mode 100644 index 5d0a379f761..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/UdtCodec.java +++ /dev/null @@ -1,294 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.BufferUnderflowException; -import java.nio.ByteBuffer; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -public class UdtCodec implements TypeCodec { - - private static final Logger LOG = LoggerFactory.getLogger(UdtCodec.class); - - private final UserDefinedType cqlType; - - public UdtCodec(@NonNull UserDefinedType cqlType) { - this.cqlType = cqlType; - } - - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.UDT_VALUE; - } - - @NonNull - @Override - public DataType getCqlType() { - return cqlType; - } - - @Override - public boolean accepts(@NonNull Object value) { - return value instanceof UdtValue && ((UdtValue) value).getType().equals(cqlType); - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return UdtValue.class.equals(javaClass); - } - - @Nullable - @Override - public ByteBuffer encode(@Nullable UdtValue value, @NonNull ProtocolVersion protocolVersion) { - if (value == null) { - return null; - } - if (!value.getType().equals(cqlType)) { - throw new IllegalArgumentException( - String.format( - "Invalid user defined type, expected %s but got %s", cqlType, value.getType())); - } - // Encoding: each field as a [bytes] value ([bytes] = int length + contents, null is - // represented by -1) - int toAllocate = 0; - int size = cqlType.getFieldTypes().size(); - for (int i = 0; i < size; i++) { - ByteBuffer field = value.getBytesUnsafe(i); - toAllocate += 4 + (field == null ? 0 : field.remaining()); - } - ByteBuffer result = ByteBuffer.allocate(toAllocate); - for (int i = 0; i < value.size(); i++) { - ByteBuffer field = value.getBytesUnsafe(i); - if (field == null) { - result.putInt(-1); - } else { - result.putInt(field.remaining()); - result.put(field.duplicate()); - } - } - return (ByteBuffer) result.flip(); - } - - @Nullable - @Override - public UdtValue decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null) { - return null; - } - // empty byte buffers will result in empty values - try { - ByteBuffer input = bytes.duplicate(); - UdtValue value = cqlType.newValue(); - int i = 0; - while (input.hasRemaining()) { - if (i == cqlType.getFieldTypes().size()) { - LOG.debug("Encountered unexpected fields when parsing codec {}", cqlType); - break; - } - int elementSize = input.getInt(); - ByteBuffer element; - if (elementSize < 0) { - element = null; - } else { - element = input.slice(); - element.limit(elementSize); - input.position(input.position() + elementSize); - } - value = value.setBytesUnsafe(i, element); - i += 1; - } - return value; - } catch (BufferUnderflowException e) { - throw new IllegalArgumentException("Not enough bytes to deserialize a UDT value", e); - } - } - - @NonNull - @Override - public String format(@Nullable UdtValue value) { - if (value == null) { - return "NULL"; - } - - CodecRegistry registry = cqlType.getAttachmentPoint().getCodecRegistry(); - - StringBuilder sb = new StringBuilder("{"); - int size = cqlType.getFieldTypes().size(); - boolean first = true; - for (int i = 0; i < size; i++) { - if (first) { - first = false; - } else { - sb.append(","); - } - CqlIdentifier elementName = cqlType.getFieldNames().get(i); - sb.append(elementName.asCql(true)); - sb.append(":"); - DataType elementType = cqlType.getFieldTypes().get(i); - TypeCodec codec = registry.codecFor(elementType); - sb.append(codec.format(value.get(i, codec))); - } - sb.append("}"); - return sb.toString(); - } - - @Nullable - @Override - public UdtValue parse(@Nullable String value) { - if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) { - return null; - } - - UdtValue udt = cqlType.newValue(); - int length = value.length(); - - int position = ParseUtils.skipSpaces(value, 0); - if (value.charAt(position) != '{') { - throw new IllegalArgumentException( - String.format( - "Cannot parse UDT value from \"%s\" at character %d: expecting '{' but got '%c'", - value, position, value.charAt(position))); - } - - position++; - position = ParseUtils.skipSpaces(value, position); - - if (position == length) { - throw new IllegalArgumentException( - String.format( - "Cannot parse UDT value from \"%s\" at character %d: expecting CQL identifier or '}', got EOF", - value, position)); - } - - CodecRegistry registry = cqlType.getAttachmentPoint().getCodecRegistry(); - - CqlIdentifier id = null; - while (position < length) { - if (value.charAt(position) == '}') { - position = ParseUtils.skipSpaces(value, position + 1); - if (position == length) { - return udt; - } - throw new IllegalArgumentException( - String.format( - "Cannot parse UDT value from \"%s\", at character %d expecting EOF or blank, but got \"%s\"", - value, position, value.substring(position))); - } - int n; - try { - n = ParseUtils.skipCQLId(value, position); - } catch (IllegalArgumentException e) { - throw new IllegalArgumentException( - String.format( - "Cannot parse UDT value from \"%s\", cannot parse a CQL identifier at character %d", - value, position), - e); - } - id = CqlIdentifier.fromInternal(value.substring(position, n)); - position = n; - - if (!cqlType.contains(id)) { - throw new IllegalArgumentException( - String.format( - "Cannot parse UDT value from \"%s\", unknown CQL identifier at character %d: \"%s\"", - value, position, id)); - } - - position = ParseUtils.skipSpaces(value, position); - if (position == length) { - throw new IllegalArgumentException( - String.format( - "Cannot parse UDT value from \"%s\", at field %s (character %d) expecting ':', but got EOF", - value, id, position)); - } - if (value.charAt(position) != ':') { - throw new IllegalArgumentException( - String.format( - "Cannot parse UDT value from \"%s\", at field %s (character %d) expecting ':', but got '%c'", - value, id, position, value.charAt(position))); - } - position++; - position = ParseUtils.skipSpaces(value, position); - - try { - n = ParseUtils.skipCQLValue(value, position); - } catch (IllegalArgumentException e) { - throw new IllegalArgumentException( - String.format( - "Cannot parse UDT value from \"%s\", invalid CQL value at field %s (character %d)", - value, id, position), - e); - } - - String fieldValue = value.substring(position, n); - // This works because ids occur at most once in UDTs - DataType fieldType = cqlType.getFieldTypes().get(cqlType.firstIndexOf(id)); - TypeCodec codec = registry.codecFor(fieldType); - Object parsed; - try { - parsed = codec.parse(fieldValue); - } catch (Exception e) { - throw new IllegalArgumentException( - String.format( - "Cannot parse UDT value from \"%s\", invalid CQL value at field %s (character %d): %s", - value, id, position, e.getMessage()), - e); - } - udt = udt.set(id, parsed, codec); - position = n; - - position = ParseUtils.skipSpaces(value, position); - if (position == length) { - throw new IllegalArgumentException( - String.format( - "Cannot parse UDT value from \"%s\", at field %s (character %d) expecting ',' or '}', but got EOF", - value, id, position)); - } - if (value.charAt(position) == '}') { - continue; - } - if (value.charAt(position) != ',') { - throw new IllegalArgumentException( - String.format( - "Cannot parse UDT value from \"%s\", at field %s (character %d) expecting ',' but got '%c'", - value, id, position, value.charAt(position))); - } - ++position; // skip ',' - - position = ParseUtils.skipSpaces(value, position); - } - throw new IllegalArgumentException( - String.format( - "Cannot parse UDT value from \"%s\" at field %s (character %d): expecting CQL identifier or '}', got EOF", - value, id, position)); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/UuidCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/UuidCodec.java deleted file mode 100644 index cc5f48dbe52..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/UuidCodec.java +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.util.Optional; -import java.util.UUID; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class UuidCodec implements TypeCodec { - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.UUID; - } - - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.UUID; - } - - @Override - public boolean accepts(@NonNull Object value) { - return value instanceof UUID; - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return javaClass == UUID.class; - } - - @Nullable - @Override - public ByteBuffer encode(@Nullable UUID value, @NonNull ProtocolVersion protocolVersion) { - if (value == null) { - return null; - } - ByteBuffer bytes = ByteBuffer.allocate(16); - bytes.putLong(0, value.getMostSignificantBits()); - bytes.putLong(8, value.getLeastSignificantBits()); - return bytes; - } - - @Nullable - @Override - public UUID decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) { - return null; - } else if (bytes.remaining() != 16) { - throw new IllegalArgumentException( - "Unexpected number of bytes for a UUID, expected 16, got " + bytes.remaining()); - } else { - return new UUID(bytes.getLong(bytes.position()), bytes.getLong(bytes.position() + 8)); - } - } - - @NonNull - @Override - public String format(@Nullable UUID value) { - return (value == null) ? "NULL" : value.toString(); - } - - @Nullable - @Override - public UUID parse(@Nullable String value) { - try { - return (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) - ? null - : UUID.fromString(value); - } catch (IllegalArgumentException e) { - throw new IllegalArgumentException( - String.format("Cannot parse UUID value from \"%s\"", value), e); - } - } - - @NonNull - @Override - public Optional serializedSize() { - return Optional.of(16); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/VarIntCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/VarIntCodec.java deleted file mode 100644 index b04c959c704..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/VarIntCodec.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.protocol.internal.util.Bytes; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.math.BigInteger; -import java.nio.ByteBuffer; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class VarIntCodec implements TypeCodec { - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.BIG_INTEGER; - } - - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.VARINT; - } - - @Override - public boolean accepts(@NonNull Object value) { - return value instanceof BigInteger; - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return BigInteger.class.isAssignableFrom(javaClass); - } - - @Nullable - @Override - public ByteBuffer encode(@Nullable BigInteger value, @NonNull ProtocolVersion protocolVersion) { - return (value == null) ? null : ByteBuffer.wrap(value.toByteArray()); - } - - @Nullable - @Override - public BigInteger decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - return (bytes == null) || bytes.remaining() == 0 ? null : new BigInteger(Bytes.getArray(bytes)); - } - - @NonNull - @Override - public String format(@Nullable BigInteger value) { - return (value == null) ? "NULL" : value.toString(); - } - - @Nullable - @Override - public BigInteger parse(@Nullable String value) { - try { - return (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) - ? null - : new BigInteger(value); - } catch (NumberFormatException e) { - throw new IllegalArgumentException( - String.format("Cannot parse varint value from \"%s\"", value), e); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/VectorCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/VectorCodec.java deleted file mode 100644 index 1f8ce1a7166..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/VectorCodec.java +++ /dev/null @@ -1,203 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.data.CqlVector; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.VectorType; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.DefaultVectorType; -import com.datastax.oss.driver.internal.core.type.util.VIntCoding; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import java.util.NoSuchElementException; -import java.util.Optional; -import java.util.stream.Collectors; - -public class VectorCodec implements TypeCodec> { - - private final VectorType cqlType; - private final GenericType> javaType; - private final TypeCodec subtypeCodec; - - public VectorCodec(@NonNull VectorType cqlType, @NonNull TypeCodec subtypeCodec) { - this.cqlType = cqlType; - this.subtypeCodec = subtypeCodec; - this.javaType = GenericType.vectorOf(subtypeCodec.getJavaType()); - } - - public VectorCodec(int dimensions, @NonNull TypeCodec subtypeCodec) { - this(new DefaultVectorType(subtypeCodec.getCqlType(), dimensions), subtypeCodec); - } - - @NonNull - @Override - public GenericType> getJavaType() { - return this.javaType; - } - - @NonNull - @Override - public Optional serializedSize() { - return subtypeCodec.serializedSize().isPresent() - ? Optional.of(subtypeCodec.serializedSize().get() * cqlType.getDimensions()) - : Optional.empty(); - } - - @NonNull - @Override - public DataType getCqlType() { - return this.cqlType; - } - - @Nullable - @Override - public ByteBuffer encode( - @Nullable CqlVector value, @NonNull ProtocolVersion protocolVersion) { - boolean isVarSized = !subtypeCodec.serializedSize().isPresent(); - if (value == null || cqlType.getDimensions() <= 0) { - return null; - } - ByteBuffer[] valueBuffs = new ByteBuffer[cqlType.getDimensions()]; - Iterator values = value.iterator(); - int allValueBuffsSize = 0; - for (int i = 0; i < cqlType.getDimensions(); ++i) { - ByteBuffer valueBuff; - SubtypeT valueObj; - - try { - valueObj = values.next(); - } catch (NoSuchElementException nsee) { - throw new IllegalArgumentException( - String.format( - "Not enough elements; must provide elements for %d dimensions", - cqlType.getDimensions())); - } - - try { - valueBuff = this.subtypeCodec.encode(valueObj, protocolVersion); - } catch (ClassCastException e) { - throw new IllegalArgumentException("Invalid type for element: " + valueObj.getClass()); - } - if (valueBuff == null) { - throw new NullPointerException("Vector elements cannot encode to CQL NULL"); - } - int elementSize = valueBuff.limit(); - if (isVarSized) { - allValueBuffsSize += VIntCoding.computeVIntSize(elementSize); - } - allValueBuffsSize += elementSize; - valueBuff.rewind(); - valueBuffs[i] = valueBuff; - } - // if too many elements, throw - if (values.hasNext()) { - throw new IllegalArgumentException( - String.format( - "Too many elements; must provide elements for %d dimensions", - cqlType.getDimensions())); - } - /* Since we already did an early return for <= 0 dimensions above */ - assert valueBuffs.length > 0; - ByteBuffer rv = ByteBuffer.allocate(allValueBuffsSize); - for (int i = 0; i < cqlType.getDimensions(); ++i) { - if (isVarSized) { - VIntCoding.writeUnsignedVInt32(valueBuffs[i].remaining(), rv); - } - rv.put(valueBuffs[i]); - } - rv.flip(); - return rv; - } - - @Nullable - @Override - public CqlVector decode( - @Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) { - return null; - } - - // Upfront check for fixed-size types only - subtypeCodec - .serializedSize() - .ifPresent( - (fixed_size) -> { - if (bytes.remaining() != cqlType.getDimensions() * fixed_size) { - throw new IllegalArgumentException( - String.format( - "Expected elements of uniform size, observed %d elements with total bytes %d", - cqlType.getDimensions(), bytes.remaining())); - } - }); - ; - ByteBuffer slice = bytes.slice(); - List rv = new ArrayList(cqlType.getDimensions()); - for (int i = 0; i < cqlType.getDimensions(); ++i) { - - int size = - subtypeCodec - .serializedSize() - .orElseGet(() -> VIntCoding.getUnsignedVInt32(slice, slice.position())); - // If we aren't dealing with a fixed-size type we need to move the current slice position - // beyond the vint-encoded size of the current element. Ideally this would be - // serializedSize().ifNotPresent(Consumer) but the Optional API isn't doing us any favors - // there. - if (!subtypeCodec.serializedSize().isPresent()) - slice.position(slice.position() + VIntCoding.computeUnsignedVIntSize(size)); - int originalPosition = slice.position(); - slice.limit(originalPosition + size); - rv.add(this.subtypeCodec.decode(slice, protocolVersion)); - // Move to the start of the next element - slice.position(originalPosition + size); - // Reset the limit to the end of the buffer - slice.limit(slice.capacity()); - } - - // if too many elements, throw - if (slice.hasRemaining()) { - throw new IllegalArgumentException( - String.format( - "Too many elements; must provide elements for %d dimensions", - cqlType.getDimensions())); - } - - return CqlVector.newInstance(rv); - } - - @NonNull - @Override - public String format(CqlVector value) { - if (value == null) return "NULL"; - return value.stream().map(subtypeCodec::format).collect(Collectors.joining(", ", "[", "]")); - } - - @Nullable - @Override - public CqlVector parse(@Nullable String value) { - return (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) - ? null - : CqlVector.from(value, this.subtypeCodec); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/OptionalCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/OptionalCodec.java deleted file mode 100644 index e62e244bf5e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/OptionalCodec.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras; - -import com.datastax.oss.driver.api.core.type.codec.MappingCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Collection; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; -import net.jcip.annotations.Immutable; - -/** - * A codec that wraps other codecs around {@link Optional} instances. - * - * @param The wrapped Java type. - */ -@Immutable -public class OptionalCodec extends MappingCodec> { - - public OptionalCodec(@NonNull TypeCodec innerCodec) { - super( - Objects.requireNonNull(innerCodec, "innerCodec must not be null"), - GenericType.optionalOf(innerCodec.getJavaType())); - } - - @Override - public boolean accepts(@NonNull Object value) { - Objects.requireNonNull(value); - if (value instanceof Optional) { - Optional optional = (Optional) value; - return optional.map(innerCodec::accepts).orElse(true); - } - return false; - } - - @Nullable - @Override - protected Optional innerToOuter(@Nullable T value) { - return Optional.ofNullable(isAbsent(value) ? null : value); - } - - @Nullable - @Override - protected T outerToInner(@Nullable Optional value) { - return value != null && value.isPresent() ? value.get() : null; - } - - protected boolean isAbsent(@Nullable T value) { - return value == null - || (value instanceof Collection && ((Collection) value).isEmpty()) - || (value instanceof Map && ((Map) value).isEmpty()); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/AbstractListToArrayCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/AbstractListToArrayCodec.java deleted file mode 100644 index fcf61a4e7b3..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/AbstractListToArrayCodec.java +++ /dev/null @@ -1,200 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.array; - -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.ListType; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.ParseUtils; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.lang.reflect.Array; -import java.util.Objects; - -/** - * Base class for all codecs dealing with Java arrays. This class aims to reduce the amount of code - * required to create such codecs. - * - * @param The Java array type this codec handles - */ -public abstract class AbstractListToArrayCodec implements TypeCodec { - - @NonNull protected final ListType cqlType; - @NonNull protected final GenericType javaType; - - /** - * @param cqlType The CQL type. Must be a list type. - * @param arrayType The Java type. Must be an array class. - */ - protected AbstractListToArrayCodec( - @NonNull ListType cqlType, @NonNull GenericType arrayType) { - this.cqlType = Objects.requireNonNull(cqlType, "cqlType cannot be null"); - this.javaType = Objects.requireNonNull(arrayType, "arrayType cannot be null"); - if (!arrayType.isArray()) { - throw new IllegalArgumentException("Expecting Java array class, got " + arrayType); - } - } - - @NonNull - @Override - public GenericType getJavaType() { - return javaType; - } - - @NonNull - @Override - public DataType getCqlType() { - return cqlType; - } - - @NonNull - @Override - public String format(@Nullable ArrayT array) { - if (array == null) { - return "NULL"; - } - int length = Array.getLength(array); - StringBuilder sb = new StringBuilder(); - sb.append('['); - for (int i = 0; i < length; i++) { - if (i != 0) { - sb.append(","); - } - formatElement(sb, array, i); - } - sb.append(']'); - return sb.toString(); - } - - @Nullable - @Override - public ArrayT parse(@Nullable String value) { - if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) { - return null; - } - int idx = skipSpaces(value, 0); - idx = skipOpeningBracket(value, idx); - idx = skipSpaces(value, idx); - if (value.charAt(idx) == ']') { - return newInstance(0); - } - // first pass: determine array length - int length = getArrayLength(value, idx); - // second pass: parse elements - ArrayT array = newInstance(length); - int i = 0; - for (; idx < value.length(); i++) { - int n = skipLiteral(value, idx); - parseElement(value.substring(idx, n), array, i); - idx = skipSpaces(value, n); - if (value.charAt(idx) == ']') { - return array; - } - idx = skipComma(value, idx); - idx = skipSpaces(value, idx); - } - throw new IllegalArgumentException( - String.format("Malformed list value \"%s\", missing closing ']'", value)); - } - - /** - * Creates a new array instance with the given size. - * - * @param size The size of the array to instantiate. - * @return a new array instance with the given size. - */ - @NonNull - protected abstract ArrayT newInstance(int size); - - /** - * Formats the {@code index}th element of {@code array} to {@code output}. - * - * @param output The StringBuilder to write to. - * @param array The array to read from. - * @param index The element index. - */ - protected abstract void formatElement( - @NonNull StringBuilder output, @NonNull ArrayT array, int index); - - /** - * Parses the {@code index}th element of {@code array} from {@code input}. - * - * @param input The String to read from. - * @param array The array to write to. - * @param index The element index. - */ - protected abstract void parseElement(@NonNull String input, @NonNull ArrayT array, int index); - - private int getArrayLength(String value, int idx) { - int length = 1; - for (; idx < value.length(); length++) { - idx = skipLiteral(value, idx); - idx = skipSpaces(value, idx); - if (value.charAt(idx) == ']') { - break; - } - idx = skipComma(value, idx); - idx = skipSpaces(value, idx); - } - return length; - } - - private int skipComma(String value, int idx) { - if (value.charAt(idx) != ',') { - throw new IllegalArgumentException( - String.format( - "Cannot parse list value from \"%s\", at character %d expecting ',' but got '%c'", - value, idx, value.charAt(idx))); - } - return idx + 1; - } - - private int skipOpeningBracket(String value, int idx) { - if (value.charAt(idx) != '[') { - throw new IllegalArgumentException( - String.format( - "cannot parse list value from \"%s\", at character %d expecting '[' but got '%c'", - value, idx, value.charAt(idx))); - } - return idx + 1; - } - - private int skipSpaces(String value, int idx) { - try { - return ParseUtils.skipSpaces(value, idx); - } catch (IllegalArgumentException e) { - throw new IllegalArgumentException( - String.format( - "Cannot parse list value from \"%s\", at character %d expecting space but got '%c'", - value, idx, value.charAt(idx)), - e); - } - } - - private int skipLiteral(String value, int idx) { - try { - return ParseUtils.skipCQLValue(value, idx); - } catch (IllegalArgumentException e) { - throw new IllegalArgumentException( - String.format( - "Cannot parse list value from \"%s\", invalid CQL value at character %d", value, idx), - e); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/AbstractPrimitiveListToArrayCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/AbstractPrimitiveListToArrayCodec.java deleted file mode 100644 index 3e5ece7c159..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/AbstractPrimitiveListToArrayCodec.java +++ /dev/null @@ -1,129 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.array; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.ListType; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.lang.reflect.Array; -import java.nio.ByteBuffer; -import java.util.Objects; - -/** - * Base class for all codecs dealing with Java primitive arrays. This class provides a more - * efficient implementation of {@link #encode(Object, ProtocolVersion)} and {@link - * #decode(ByteBuffer, ProtocolVersion)} for primitive arrays. - * - * @param The Java primitive array type this codec handles - */ -public abstract class AbstractPrimitiveListToArrayCodec - extends AbstractListToArrayCodec { - - /** - * @param cqlType The CQL type. Must be a list type. - * @param javaClass The Java type. Must be an array class. - */ - protected AbstractPrimitiveListToArrayCodec( - @NonNull ListType cqlType, @NonNull GenericType javaClass) { - super(cqlType, javaClass); - GenericType componentType = Objects.requireNonNull(javaClass.getComponentType()); - if (!componentType.isPrimitive()) { - throw new IllegalArgumentException( - "Expecting primitive array component type, got " + componentType); - } - } - - @Nullable - @Override - public ByteBuffer encode( - @Nullable PrimitiveArrayT array, @NonNull ProtocolVersion protocolVersion) { - if (array == null) { - return null; - } - int length = Array.getLength(array); - int sizeOfElement = 4 + sizeOfComponentType(); - int totalSize = 4 + length * sizeOfElement; - ByteBuffer output = ByteBuffer.allocate(totalSize); - output.putInt(length); - for (int i = 0; i < length; i++) { - output.putInt(sizeOfComponentType()); - serializeElement(output, array, i, protocolVersion); - } - output.flip(); - return output; - } - - @Nullable - @Override - public PrimitiveArrayT decode( - @Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) { - return newInstance(0); - } - ByteBuffer input = bytes.duplicate(); - int length = input.getInt(); - PrimitiveArrayT array = newInstance(length); - for (int i = 0; i < length; i++) { - int elementSize = input.getInt(); - // Null elements can happen on the decode path, but we cannot tolerate them - if (elementSize < 0) { - throw new NullPointerException("Primitive arrays cannot store null elements"); - } else { - deserializeElement(input, array, i, protocolVersion); - } - } - return array; - } - - /** - * Return the size in bytes of the array component type. - * - * @return the size in bytes of the array component type. - */ - protected abstract int sizeOfComponentType(); - - /** - * Write the {@code index}th element of {@code array} to {@code output}. - * - * @param output The ByteBuffer to write to. - * @param array The array to read from. - * @param index The element index. - * @param protocolVersion The protocol version to use. - */ - protected abstract void serializeElement( - @NonNull ByteBuffer output, - @NonNull PrimitiveArrayT array, - int index, - @NonNull ProtocolVersion protocolVersion); - - /** - * Read the {@code index}th element of {@code array} from {@code input}. - * - * @param input The ByteBuffer to read from. - * @param array The array to write to. - * @param index The element index. - * @param protocolVersion The protocol version to use. - */ - protected abstract void deserializeElement( - @NonNull ByteBuffer input, - @NonNull PrimitiveArrayT array, - int index, - @NonNull ProtocolVersion protocolVersion); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/BooleanListToArrayCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/BooleanListToArrayCodec.java deleted file mode 100644 index c9cc0baa41f..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/BooleanListToArrayCodec.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.array; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -/** - * A codec that maps the CQL type {@code list} to the Java type {@code boolean[]}. - * - *

Note that this codec is designed for performance and converts CQL lists directly to - * {@code boolean[]}, thus avoiding any unnecessary boxing and unboxing of Java primitive {@code - * boolean} values; it also instantiates arrays without the need for an intermediary Java {@code - * List} object. - */ -@Immutable -public class BooleanListToArrayCodec extends AbstractPrimitiveListToArrayCodec { - - private static final byte TRUE = (byte) 1; - private static final byte FALSE = (byte) 0; - - public BooleanListToArrayCodec() { - super(DataTypes.listOf(DataTypes.BOOLEAN), GenericType.of(boolean[].class)); - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - Objects.requireNonNull(javaClass); - return boolean[].class.equals(javaClass); - } - - @Override - public boolean accepts(@NonNull Object value) { - Objects.requireNonNull(value); - return value instanceof boolean[]; - } - - @Override - protected int sizeOfComponentType() { - return 1; - } - - @Override - protected void serializeElement( - @NonNull ByteBuffer output, - @NonNull boolean[] array, - int index, - @NonNull ProtocolVersion protocolVersion) { - byte element = array[index] ? TRUE : FALSE; - output.put(element); - } - - @Override - protected void deserializeElement( - @NonNull ByteBuffer input, - @NonNull boolean[] array, - int index, - @NonNull ProtocolVersion protocolVersion) { - array[index] = input.get() == TRUE; - } - - @Override - protected void formatElement(@NonNull StringBuilder output, @NonNull boolean[] array, int index) { - output.append(array[index]); - } - - @Override - protected void parseElement(@NonNull String input, @NonNull boolean[] array, int index) { - array[index] = Boolean.parseBoolean(input); - } - - @NonNull - @Override - protected boolean[] newInstance(int size) { - return new boolean[size]; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ByteListToArrayCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ByteListToArrayCodec.java deleted file mode 100644 index b811908e341..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ByteListToArrayCodec.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.array; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.SimpleBlobCodec; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -/** - * A codec that maps the CQL type {@code list} to the Java type {@code byte[]}. - * - *

Note that this codec is not suitable for reading CQL blobs as byte arrays; you should use - * {@link SimpleBlobCodec} for that. - * - *

Note that this codec is designed for performance and converts CQL lists directly to - * {@code byte[]}, thus avoiding any unnecessary boxing and unboxing of Java primitive {@code byte} - * values; it also instantiates arrays without the need for an intermediary Java {@code List} - * object. - */ -@Immutable -public class ByteListToArrayCodec extends AbstractPrimitiveListToArrayCodec { - - public ByteListToArrayCodec() { - super(DataTypes.listOf(DataTypes.SMALLINT), GenericType.of(byte[].class)); - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - Objects.requireNonNull(javaClass); - return byte[].class.equals(javaClass); - } - - @Override - public boolean accepts(@NonNull Object value) { - Objects.requireNonNull(value); - return value instanceof byte[]; - } - - @Override - protected int sizeOfComponentType() { - return 1; - } - - @Override - protected void serializeElement( - @NonNull ByteBuffer output, - @NonNull byte[] array, - int index, - @NonNull ProtocolVersion protocolVersion) { - output.put(array[index]); - } - - @Override - protected void deserializeElement( - @NonNull ByteBuffer input, - @NonNull byte[] array, - int index, - @NonNull ProtocolVersion protocolVersion) { - array[index] = input.get(); - } - - @Override - protected void formatElement(@NonNull StringBuilder output, @NonNull byte[] array, int index) { - output.append(array[index]); - } - - @Override - protected void parseElement(@NonNull String input, @NonNull byte[] array, int index) { - array[index] = Byte.parseByte(input); - } - - @NonNull - @Override - protected byte[] newInstance(int size) { - return new byte[size]; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/DoubleListToArrayCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/DoubleListToArrayCodec.java deleted file mode 100644 index fdf5befa635..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/DoubleListToArrayCodec.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.array; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -/** - * A codec that maps the CQL type {@code list} to the Java type {@code double[]}. - * - *

Note that this codec is designed for performance and converts CQL lists directly to - * {@code double[]}, thus avoiding any unnecessary boxing and unboxing of Java primitive {@code - * double} values; it also instantiates arrays without the need for an intermediary Java {@code - * List} object. - */ -@Immutable -public class DoubleListToArrayCodec extends AbstractPrimitiveListToArrayCodec { - - public DoubleListToArrayCodec() { - super(DataTypes.listOf(DataTypes.DOUBLE), GenericType.of(double[].class)); - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - Objects.requireNonNull(javaClass); - return double[].class.equals(javaClass); - } - - @Override - public boolean accepts(@NonNull Object value) { - Objects.requireNonNull(value); - return value instanceof double[]; - } - - @Override - protected int sizeOfComponentType() { - return 8; - } - - @Override - protected void serializeElement( - @NonNull ByteBuffer output, - @NonNull double[] array, - int index, - @NonNull ProtocolVersion protocolVersion) { - output.putDouble(array[index]); - } - - @Override - protected void deserializeElement( - @NonNull ByteBuffer input, - @NonNull double[] array, - int index, - @NonNull ProtocolVersion protocolVersion) { - array[index] = input.getDouble(); - } - - @Override - protected void formatElement(@NonNull StringBuilder output, @NonNull double[] array, int index) { - output.append(array[index]); - } - - @Override - protected void parseElement(@NonNull String input, @NonNull double[] array, int index) { - array[index] = Double.parseDouble(input); - } - - @NonNull - @Override - protected double[] newInstance(int size) { - return new double[size]; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/FloatListToArrayCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/FloatListToArrayCodec.java deleted file mode 100644 index b77e5d1243d..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/FloatListToArrayCodec.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.array; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -/** - * A codec that maps the CQL type {@code list} to the Java type {@code float[]}. - * - *

Note that this codec is designed for performance and converts CQL lists directly to - * {@code float[]}, thus avoiding any unnecessary boxing and unboxing of Java primitive {@code - * float} values; it also instantiates arrays without the need for an intermediary Java {@code List} - * object. - */ -@Immutable -public class FloatListToArrayCodec extends AbstractPrimitiveListToArrayCodec { - - public FloatListToArrayCodec() { - super(DataTypes.listOf(DataTypes.FLOAT), GenericType.of(float[].class)); - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - Objects.requireNonNull(javaClass); - return float[].class.equals(javaClass); - } - - @Override - public boolean accepts(@NonNull Object value) { - Objects.requireNonNull(value); - return value instanceof float[]; - } - - @Override - protected int sizeOfComponentType() { - return 4; - } - - @Override - protected void serializeElement( - @NonNull ByteBuffer output, - @NonNull float[] array, - int index, - @NonNull ProtocolVersion protocolVersion) { - output.putFloat(array[index]); - } - - @Override - protected void deserializeElement( - @NonNull ByteBuffer input, - @NonNull float[] array, - int index, - @NonNull ProtocolVersion protocolVersion) { - array[index] = input.getFloat(); - } - - @Override - protected void formatElement(@NonNull StringBuilder output, @NonNull float[] array, int index) { - output.append(array[index]); - } - - @Override - protected void parseElement(@NonNull String input, @NonNull float[] array, int index) { - array[index] = Float.parseFloat(input); - } - - @NonNull - @Override - protected float[] newInstance(int size) { - return new float[size]; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/IntListToArrayCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/IntListToArrayCodec.java deleted file mode 100644 index cf464282b1e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/IntListToArrayCodec.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.array; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -/** - * A codec that maps the CQL type {@code list} to the Java type {@code int[]}. - * - *

Note that this codec is designed for performance and converts CQL lists directly to - * {@code int[]}, thus avoiding any unnecessary boxing and unboxing of Java primitive {@code int} - * values; it also instantiates arrays without the need for an intermediary Java {@code List} - * object. - */ -@Immutable -public class IntListToArrayCodec extends AbstractPrimitiveListToArrayCodec { - - public IntListToArrayCodec() { - super(DataTypes.listOf(DataTypes.INT), GenericType.of(int[].class)); - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - Objects.requireNonNull(javaClass); - return int[].class.equals(javaClass); - } - - @Override - public boolean accepts(@NonNull Object value) { - Objects.requireNonNull(value); - return value instanceof int[]; - } - - @Override - protected int sizeOfComponentType() { - return 4; - } - - @Override - protected void serializeElement( - @NonNull ByteBuffer output, - @NonNull int[] array, - int index, - @NonNull ProtocolVersion protocolVersion) { - output.putInt(array[index]); - } - - @Override - protected void deserializeElement( - @NonNull ByteBuffer input, - @NonNull int[] array, - int index, - @NonNull ProtocolVersion protocolVersion) { - array[index] = input.getInt(); - } - - @Override - protected void formatElement(@NonNull StringBuilder output, @NonNull int[] array, int index) { - output.append(array[index]); - } - - @Override - protected void parseElement(@NonNull String input, @NonNull int[] array, int index) { - array[index] = Integer.parseInt(input); - } - - @NonNull - @Override - protected int[] newInstance(int size) { - return new int[size]; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/LongListToArrayCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/LongListToArrayCodec.java deleted file mode 100644 index bde21d40272..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/LongListToArrayCodec.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.array; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -/** - * A codec that maps the CQL type {@code list} to the Java type {@code long[]}. - * - *

Note that this codec is designed for performance and converts CQL lists directly to - * {@code long[]}, thus avoiding any unnecessary boxing and unboxing of Java primitive {@code long} - * values; it also instantiates arrays without the need for an intermediary Java {@code List} - * object. - */ -@Immutable -public class LongListToArrayCodec extends AbstractPrimitiveListToArrayCodec { - - public LongListToArrayCodec() { - super(DataTypes.listOf(DataTypes.BIGINT), GenericType.of(long[].class)); - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - Objects.requireNonNull(javaClass); - return long[].class.equals(javaClass); - } - - @Override - public boolean accepts(@NonNull Object value) { - Objects.requireNonNull(value); - return value instanceof long[]; - } - - @Override - protected int sizeOfComponentType() { - return 8; - } - - @Override - protected void serializeElement( - @NonNull ByteBuffer output, - @NonNull long[] array, - int index, - @NonNull ProtocolVersion protocolVersion) { - output.putLong(array[index]); - } - - @Override - protected void deserializeElement( - @NonNull ByteBuffer input, - @NonNull long[] array, - int index, - @NonNull ProtocolVersion protocolVersion) { - array[index] = input.getLong(); - } - - @Override - protected void formatElement(@NonNull StringBuilder output, @NonNull long[] array, int index) { - output.append(array[index]); - } - - @Override - protected void parseElement(@NonNull String input, @NonNull long[] array, int index) { - array[index] = Long.parseLong(input); - } - - @NonNull - @Override - protected long[] newInstance(int size) { - return new long[size]; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ObjectListToArrayCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ObjectListToArrayCodec.java deleted file mode 100644 index 8600ba3e9a5..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ObjectListToArrayCodec.java +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.array; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.lang.reflect.Array; -import java.nio.ByteBuffer; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -/** - * Codec dealing with Java object arrays. Serialization and deserialization of elements in the array - * is delegated to the provided element codec. - * - *

For example, to create a codec that maps {@code list} to {@code String[]}, declare the - * following: - * - *

{@code
- * ObjectListToArrayCodec stringArrayCodec = new ObjectListToArrayCodec<>(TypeCodecs.TEXT);
- * }
- * - * @param The Java array component type this codec handles - */ -@Immutable -public class ObjectListToArrayCodec extends AbstractListToArrayCodec { - - private final TypeCodec elementCodec; - - public ObjectListToArrayCodec(@NonNull TypeCodec elementCodec) { - super( - DataTypes.listOf( - Objects.requireNonNull(elementCodec, "elementCodec must not be null").getCqlType()), - GenericType.arrayOf(elementCodec.getJavaType())); - this.elementCodec = elementCodec; - } - - @Override - public boolean accepts(@NonNull Object value) { - Objects.requireNonNull(value); - Class clazz = value.getClass(); - return clazz.isArray() - && clazz.getComponentType().equals(elementCodec.getJavaType().getRawType()); - } - - @Nullable - @Override - public ByteBuffer encode(@Nullable ElementT[] value, @NonNull ProtocolVersion protocolVersion) { - if (value == null) { - return null; - } - int i = 0; - ByteBuffer[] encodedElements = new ByteBuffer[value.length]; - int toAllocate = 4; // initialize with number of elements - for (ElementT elt : value) { - if (elt == null) { - throw new NullPointerException("Collection elements cannot be null"); - } - ByteBuffer encodedElement; - try { - encodedElement = elementCodec.encode(elt, protocolVersion); - } catch (ClassCastException e) { - throw new IllegalArgumentException( - String.format( - "Invalid type for %s element, expecting %s but got %s", - cqlType, elementCodec.getJavaType(), elt.getClass()), - e); - } - if (encodedElement == null) { - throw new NullPointerException("Collection elements cannot encode to CQL NULL"); - } - encodedElements[i++] = encodedElement; - toAllocate += 4 + encodedElement.remaining(); // the element preceded by its size - } - ByteBuffer result = ByteBuffer.allocate(toAllocate); - result.putInt(value.length); - for (ByteBuffer encodedElement : encodedElements) { - result.putInt(encodedElement.remaining()); - result.put(encodedElement); - } - result.flip(); - return result; - } - - @Nullable - @Override - public ElementT[] decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) { - return newInstance(0); - } - ByteBuffer input = bytes.duplicate(); - int size = input.getInt(); - ElementT[] result = newInstance(size); - for (int i = 0; i < size; i++) { - ElementT element; - int elementSize = input.getInt(); - // Allow null elements on the decode path, because Cassandra might return such collections - // for some computed values in the future -- e.g. SELECT ttl(some_collection) - if (elementSize < 0) { - element = null; - } else { - ByteBuffer encodedElement = input.slice(); - encodedElement.limit(elementSize); - element = elementCodec.decode(encodedElement, protocolVersion); - input.position(input.position() + elementSize); - } - result[i] = element; - } - return result; - } - - @Override - protected void formatElement( - @NonNull StringBuilder output, @NonNull ElementT[] array, int index) { - output.append(elementCodec.format(array[index])); - } - - @Override - protected void parseElement(@NonNull String input, @NonNull ElementT[] array, int index) { - array[index] = elementCodec.parse(input); - } - - @NonNull - @Override - @SuppressWarnings("unchecked") - protected ElementT[] newInstance(int size) { - return (ElementT[]) Array.newInstance(getJavaType().getRawType().getComponentType(), size); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ShortListToArrayCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ShortListToArrayCodec.java deleted file mode 100644 index 13bb5733bf9..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ShortListToArrayCodec.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.array; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -/** - * A codec that maps the CQL type {@code list} to the Java type {@code short[]}. - * - *

Note that this codec is designed for performance and converts CQL lists directly to - * {@code short[]}, thus avoiding any unnecessary boxing and unboxing of Java primitive {@code - * short} values; it also instantiates arrays without the need for an intermediary Java {@code List} - * object. - */ -@Immutable -public class ShortListToArrayCodec extends AbstractPrimitiveListToArrayCodec { - - public ShortListToArrayCodec() { - super(DataTypes.listOf(DataTypes.SMALLINT), GenericType.of(short[].class)); - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return short[].class.equals(javaClass); - } - - @Override - public boolean accepts(@NonNull Object value) { - Objects.requireNonNull(value); - return value instanceof short[]; - } - - @Override - protected int sizeOfComponentType() { - return 2; - } - - @Override - protected void serializeElement( - @NonNull ByteBuffer output, - @NonNull short[] array, - int index, - @NonNull ProtocolVersion protocolVersion) { - output.putShort(array[index]); - } - - @Override - protected void deserializeElement( - @NonNull ByteBuffer input, - @NonNull short[] array, - int index, - @NonNull ProtocolVersion protocolVersion) { - array[index] = input.getShort(); - } - - @Override - protected void formatElement(@NonNull StringBuilder output, @NonNull short[] array, int index) { - output.append(array[index]); - } - - @Override - protected void parseElement(@NonNull String input, @NonNull short[] array, int index) { - array[index] = Short.parseShort(input); - } - - @NonNull - @Override - protected short[] newInstance(int size) { - return new short[size]; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/enums/EnumNameCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/enums/EnumNameCodec.java deleted file mode 100644 index 56363ef819e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/enums/EnumNameCodec.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.enums; - -import com.datastax.oss.driver.api.core.type.codec.MappingCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -/** - * A codec that serializes {@link Enum} instances as CQL {@code varchar}s representing their - * programmatic names as returned by {@link Enum#name()}. - * - *

Note that this codec relies on the enum constant names; it is therefore vital that - * enum names never change. - * - * @param The Enum class this codec serializes from and deserializes to. - */ -@Immutable -public class EnumNameCodec> extends MappingCodec { - - private final Class enumClass; - - public EnumNameCodec(@NonNull Class enumClass) { - super( - TypeCodecs.TEXT, - GenericType.of(Objects.requireNonNull(enumClass, "enumClass must not be null"))); - this.enumClass = enumClass; - } - - @Nullable - @Override - protected EnumT innerToOuter(@Nullable String value) { - return value == null || value.isEmpty() ? null : Enum.valueOf(enumClass, value); - } - - @Nullable - @Override - protected String outerToInner(@Nullable EnumT value) { - return value == null ? null : value.name(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/enums/EnumOrdinalCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/enums/EnumOrdinalCodec.java deleted file mode 100644 index 4d6ca26484e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/enums/EnumOrdinalCodec.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.enums; - -import com.datastax.oss.driver.api.core.type.codec.MappingCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -/** - * A codec that serializes {@link Enum} instances as CQL {@code int}s representing their ordinal - * values as returned by {@link Enum#ordinal()}. - * - *

Note that this codec relies on the enum constants declaration order; it is therefore - * vital that this order remains immutable. - * - * @param The Enum class this codec serializes from and deserializes to. - */ -@Immutable -public class EnumOrdinalCodec> extends MappingCodec { - - private final EnumT[] enumConstants; - - public EnumOrdinalCodec(@NonNull Class enumClass) { - super( - TypeCodecs.INT, - GenericType.of(Objects.requireNonNull(enumClass, "enumClass must not be null"))); - this.enumConstants = enumClass.getEnumConstants(); - } - - @Nullable - @Override - protected EnumT innerToOuter(@Nullable Integer value) { - return value == null ? null : enumConstants[value]; - } - - @Nullable - @Override - protected Integer outerToInner(@Nullable EnumT value) { - return value == null ? null : value.ordinal(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/json/JsonCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/json/JsonCodec.java deleted file mode 100644 index a971d27b3f3..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/json/JsonCodec.java +++ /dev/null @@ -1,186 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.json; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.util.Strings; -import com.datastax.oss.protocol.internal.util.Bytes; -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.JavaType; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.type.TypeFactory; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.Objects; - -/** - * A JSON codec that maps arbitrary Java objects to JSON strings stored as CQL type {@code text}, - * using the Jackson library to perform serialization and deserialization of JSON objects. - * - *

Note that this codec requires the presence of Jackson library at runtime. If you use Maven, - * this can be done by declaring the following dependency in your project: - * - *

{@code
- * 
- *   com.fasterxml.jackson.core
- *   jackson-databind
- *   LATEST
- * 
- * }
- * - * @see Jackson JSON Library - * @param The Java type that this codec serializes from and deserializes to, from JSON strings. - */ -public class JsonCodec implements TypeCodec { - - private final ObjectMapper objectMapper; - private final GenericType javaType; - private final JavaType jacksonJavaType; - - /** - * Creates a new instance for the provided {@code javaClass}, using a default, newly-allocated - * {@link ObjectMapper}. - * - *

The codec created with this constructor can handle all primitive CQL types as well as - * collections thereof, however it cannot handle tuples and user-defined types; if you need - * support for such CQL types, you need to create your own {@link ObjectMapper} and use the - * {@linkplain #JsonCodec(Class, ObjectMapper) two-arg constructor} instead. - * - * @param javaClass the Java class this codec maps to. - */ - public JsonCodec(@NonNull Class javaClass) { - this(GenericType.of(Objects.requireNonNull(javaClass, "javaClass cannot be null"))); - } - - /** - * Creates a new instance for the provided {@code javaType}, using a default, newly-allocated - * {@link ObjectMapper}. - * - *

The codec created with this constructor can handle all primitive CQL types as well as - * collections thereof, however it cannot handle tuples and user-defined types; if you need - * support for such CQL types, you need to create your own {@link ObjectMapper} and use the - * {@linkplain #JsonCodec(GenericType, ObjectMapper) two-arg constructor} instead. - * - * @param javaType the Java type this codec maps to. - */ - public JsonCodec(@NonNull GenericType javaType) { - this(javaType, new ObjectMapper()); - } - - /** - * Creates a new instance for the provided {@code javaClass}, and using the provided {@link - * ObjectMapper}. - * - * @param javaClass the Java class this codec maps to. - * @param objectMapper the {@link ObjectMapper} instance to use. - */ - public JsonCodec(@NonNull Class javaClass, @NonNull ObjectMapper objectMapper) { - this( - GenericType.of(Objects.requireNonNull(javaClass, "javaClass cannot be null")), - objectMapper); - } - - /** - * Creates a new instance for the provided {@code javaType}, and using the provided {@link - * ObjectMapper}. - * - * @param javaType the Java type this codec maps to. - * @param objectMapper the {@link ObjectMapper} instance to use. - */ - public JsonCodec(@NonNull GenericType javaType, @NonNull ObjectMapper objectMapper) { - this.javaType = Objects.requireNonNull(javaType, "javaType cannot be null"); - this.objectMapper = Objects.requireNonNull(objectMapper, "objectMapper cannot be null"); - this.jacksonJavaType = TypeFactory.defaultInstance().constructType(javaType.getType()); - } - - @NonNull - @Override - public GenericType getJavaType() { - return javaType; - } - - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.TEXT; - } - - @Nullable - @Override - public ByteBuffer encode(@Nullable T value, @NonNull ProtocolVersion protocolVersion) { - if (value == null) { - return null; - } - try { - return ByteBuffer.wrap(objectMapper.writeValueAsBytes(value)); - } catch (JsonProcessingException e) { - throw new IllegalArgumentException("Failed to encode value as JSON", e); - } - } - - @Nullable - @Override - public T decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null) { - return null; - } - try { - return objectMapper.readValue(Bytes.getArray(bytes), jacksonJavaType); - } catch (IOException e) { - throw new IllegalArgumentException("Failed to decode JSON value", e); - } - } - - @NonNull - @Override - public String format(@Nullable T value) { - if (value == null) { - return "NULL"; - } - String json; - try { - json = objectMapper.writeValueAsString(value); - } catch (JsonProcessingException e) { - throw new IllegalArgumentException("Failed to format value as JSON", e); - } - return Strings.quote(json); - } - - @Nullable - @Override - public T parse(@Nullable String value) { - if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) { - return null; - } - if (!Strings.isQuoted(value)) { - throw new IllegalArgumentException("JSON strings must be enclosed by single quotes"); - } - String json = Strings.unquote(value); - try { - return objectMapper.readValue(json, jacksonJavaType); - } catch (IOException e) { - throw new IllegalArgumentException("Failed to parse value as JSON", e); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/LocalTimestampCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/LocalTimestampCodec.java deleted file mode 100644 index 6b66b5d2049..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/LocalTimestampCodec.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.time; - -import com.datastax.oss.driver.api.core.type.codec.MappingCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.TimestampCodec; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.time.Instant; -import java.time.LocalDateTime; -import java.time.ZoneId; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -/** - * {@link TypeCodec} that maps {@link LocalDateTime} to CQL {@code timestamp}, allowing the setting - * and retrieval of {@code timestamp} columns as {@link LocalDateTime} instances. - * - *

This codec shares its logic with {@link TimestampCodec}. See the javadocs of this codec for - * important remarks about implementation notes and accepted timestamp formats. - */ -@Immutable -public class LocalTimestampCodec extends MappingCodec { - - private final ZoneId timeZone; - - /** - * Creates a new {@code LocalTimestampCodec} that converts CQL timestamps into {@link - * LocalDateTime} instances using the system's {@linkplain ZoneId#systemDefault() default time - * zone} as their time zone. The supplied {@code timeZone} will also be used to parse CQL - * timestamp literals that do not include any time zone information. - */ - public LocalTimestampCodec() { - this(ZoneId.systemDefault()); - } - - /** - * Creates a new {@code LocalTimestampCodec} that converts CQL timestamps into {@link - * LocalDateTime} instances using the given {@link ZoneId} as their time zone. The supplied {@code - * timeZone} will also be used to parse CQL timestamp literals that do not include any time zone - * information. - */ - public LocalTimestampCodec(@NonNull ZoneId timeZone) { - super( - new TimestampCodec(Objects.requireNonNull(timeZone, "timeZone cannot be null")), - GenericType.LOCAL_DATE_TIME); - this.timeZone = timeZone; - } - - @Override - public boolean accepts(@NonNull Object value) { - Objects.requireNonNull(value); - return value instanceof LocalDateTime; - } - - @Nullable - @Override - protected LocalDateTime innerToOuter(@Nullable Instant value) { - return value == null ? null : LocalDateTime.ofInstant(value, timeZone); - } - - @Nullable - @Override - protected Instant outerToInner(@Nullable LocalDateTime value) { - return value == null ? null : value.atZone(timeZone).toInstant(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/PersistentZonedTimestampCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/PersistentZonedTimestampCodec.java deleted file mode 100644 index c16a64b9ad9..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/PersistentZonedTimestampCodec.java +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.time; - -import com.datastax.oss.driver.api.core.data.TupleValue; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.TupleType; -import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; -import com.datastax.oss.driver.api.core.type.codec.MappingCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.time.Instant; -import java.time.ZoneId; -import java.time.ZonedDateTime; -import java.util.Objects; -import net.jcip.annotations.Immutable; - -/** - * {@link TypeCodec} that maps {@link ZonedDateTime} to CQL {@code tuple}, - * providing a pattern for maintaining timezone information in Cassandra. - * - *

Since Cassandra's timestamp type does not store any time zone, by using a - * tuple<timestamp,varchar> a timezone can be persisted in the varchar - * field of such tuples, and so when the value is deserialized the original timezone is - * preserved. - * - *

Note: if you want to retrieve CQL timestamps as {@link ZonedDateTime} instances but don't need - * to persist the time zone to the database, you should rather use {@link ZonedTimestampCodec}. - */ -@Immutable -public class PersistentZonedTimestampCodec extends MappingCodec { - - private static final TupleType CQL_TYPE = DataTypes.tupleOf(DataTypes.TIMESTAMP, DataTypes.TEXT); - - public PersistentZonedTimestampCodec() { - super(TypeCodecs.tupleOf(CQL_TYPE), GenericType.ZONED_DATE_TIME); - } - - @Override - public boolean accepts(@NonNull Object value) { - Objects.requireNonNull(value); - return value instanceof ZonedDateTime; - } - - @NonNull - @Override - public TupleType getCqlType() { - return CQL_TYPE; - } - - @NonNull - @Override - public String format(@Nullable ZonedDateTime value) { - if (value == null) { - return "NULL"; - } - // Use TIMESTAMP_UTC for a better-looking format - return "(" - + ExtraTypeCodecs.TIMESTAMP_UTC.format(value.toInstant()) - + "," - + TypeCodecs.TEXT.format(value.getZone().toString()) - + ")"; - } - - @Nullable - @Override - protected ZonedDateTime innerToOuter(@Nullable TupleValue value) { - if (value == null) { - return null; - } else { - Instant instant = Objects.requireNonNull(value.getInstant(0)); - ZoneId zoneId = ZoneId.of(Objects.requireNonNull(value.getString(1))); - return ZonedDateTime.ofInstant(instant, zoneId); - } - } - - @Nullable - @Override - protected TupleValue outerToInner(@Nullable ZonedDateTime value) { - if (value == null) { - return null; - } else { - Instant instant = value.toInstant(); - String zoneId = value.getZone().toString(); - return this.getCqlType().newValue(instant, zoneId); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/TimestampMillisCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/TimestampMillisCodec.java deleted file mode 100644 index 12e3e839d2a..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/TimestampMillisCodec.java +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.time; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.PrimitiveLongCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.TimestampCodec; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.time.Instant; -import java.time.ZoneId; -import java.util.Objects; -import java.util.Optional; -import net.jcip.annotations.Immutable; - -/** - * A {@link TypeCodec} that maps CQL timestamps to Java primitive longs, representing the number of - * milliseconds since the Epoch. - * - *

This codec can serve as a replacement for the driver's built-in {@link TypeCodecs#TIMESTAMP - * timestamp} codec, when application code prefers to deal with raw milliseconds than with {@link - * Instant} instances. - * - *

This codec shares its logic with {@link TimestampCodec}. See the javadocs of this codec for - * important remarks about implementation notes and accepted timestamp formats. - */ -@Immutable -public class TimestampMillisCodec implements PrimitiveLongCodec { - - private final TimestampCodec timestampCodec; - - /** - * Creates a new {@code TimestampMillisCodec} that uses the system's {@linkplain - * ZoneId#systemDefault() default time zone} to parse timestamp literals that do not include any - * time zone information. - */ - public TimestampMillisCodec() { - this(ZoneId.systemDefault()); - } - - /** - * Creates a new {@code TimestampMillisCodec} that uses the given {@link ZoneId} to parse - * timestamp literals that do not include any time zone information. - */ - public TimestampMillisCodec(ZoneId defaultZoneId) { - timestampCodec = new TimestampCodec(defaultZoneId); - } - - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.LONG; - } - - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.TIMESTAMP; - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - return javaClass == Long.class || javaClass == long.class; - } - - @Override - public boolean accepts(@NonNull Object value) { - Objects.requireNonNull(value); - return value instanceof Long; - } - - @Nullable - @Override - public ByteBuffer encodePrimitive(long value, @NonNull ProtocolVersion protocolVersion) { - return TypeCodecs.BIGINT.encodePrimitive(value, protocolVersion); - } - - @Override - public long decodePrimitive( - @Nullable ByteBuffer value, @NonNull ProtocolVersion protocolVersion) { - return TypeCodecs.BIGINT.decodePrimitive(value, protocolVersion); - } - - @Nullable - @Override - public Long parse(@Nullable String value) { - Instant instant = timestampCodec.parse(value); - return instant == null ? null : instant.toEpochMilli(); - } - - @NonNull - @Override - public String format(@Nullable Long value) { - Instant instant = value == null ? null : Instant.ofEpochMilli(value); - return timestampCodec.format(instant); - } - - @NonNull - @Override - public Optional serializedSize() { - return Optional.of(8); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/ZonedTimestampCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/ZonedTimestampCodec.java deleted file mode 100644 index a0947ff3493..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/ZonedTimestampCodec.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.time; - -import com.datastax.oss.driver.api.core.type.codec.MappingCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.TimestampCodec; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.time.Instant; -import java.time.ZoneId; -import java.time.ZonedDateTime; -import java.util.Objects; -import net.jcip.annotations.ThreadSafe; - -/** - * A codec that handles Apache Cassandra(R)'s timestamp type and maps it to Java's {@link - * ZonedDateTime}, using the {@link ZoneId} supplied at instantiation. - * - *

Note that Apache Cassandra(R)'s timestamp type does not store any time zone; this codec is - * provided merely as a convenience for users that need to deal with zoned timestamps in their - * applications. If you need to persist the time zone in the database, consider using {@link - * PersistentZonedTimestampCodec} instead. - * - *

This codec shares its logic with {@link TimestampCodec}. See the javadocs of this codec for - * important remarks about implementation notes and accepted timestamp formats. - * - * @see TimestampCodec - */ -@ThreadSafe -public class ZonedTimestampCodec extends MappingCodec { - - private final ZoneId timeZone; - - /** - * Creates a new {@code ZonedTimestampCodec} that converts CQL timestamps into {@link - * ZonedDateTime} instances using the system's {@linkplain ZoneId#systemDefault() default time - * zone} as their time zone. The supplied {@code timeZone} will also be used to parse CQL - * timestamp literals that do not include any time zone information. - */ - public ZonedTimestampCodec() { - this(ZoneId.systemDefault()); - } - - /** - * Creates a new {@code ZonedTimestampCodec} that converts CQL timestamps into {@link - * ZonedDateTime} instances using the given {@link ZoneId} as their time zone. The supplied {@code - * timeZone} will also be used to parse CQL timestamp literals that do not include any time zone - * information. - */ - public ZonedTimestampCodec(ZoneId timeZone) { - super( - new TimestampCodec(Objects.requireNonNull(timeZone, "timeZone cannot be null")), - GenericType.ZONED_DATE_TIME); - this.timeZone = timeZone; - } - - @Override - public boolean accepts(@NonNull Object value) { - Objects.requireNonNull(value); - return value instanceof ZonedDateTime; - } - - @Nullable - @Override - protected ZonedDateTime innerToOuter(@Nullable Instant value) { - return value == null ? null : value.atZone(timeZone); - } - - @Nullable - @Override - protected Instant outerToInner(@Nullable ZonedDateTime value) { - return value == null ? null : value.toInstant(); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/vector/AbstractVectorToArrayCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/vector/AbstractVectorToArrayCodec.java deleted file mode 100644 index 3e4e844783c..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/vector/AbstractVectorToArrayCodec.java +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.vector; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.VectorType; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.lang.reflect.Array; -import java.nio.ByteBuffer; -import java.util.Objects; - -/** Common super-class for all codecs which map a CQL vector type onto a primitive array */ -public abstract class AbstractVectorToArrayCodec implements TypeCodec { - - @NonNull protected final VectorType cqlType; - @NonNull protected final GenericType javaType; - - /** - * @param cqlType The CQL type. Must be a list type. - * @param arrayType The Java type. Must be an array class. - */ - protected AbstractVectorToArrayCodec( - @NonNull VectorType cqlType, @NonNull GenericType arrayType) { - this.cqlType = Objects.requireNonNull(cqlType, "cqlType cannot be null"); - this.javaType = Objects.requireNonNull(arrayType, "arrayType cannot be null"); - if (!arrayType.isArray()) { - throw new IllegalArgumentException("Expecting Java array class, got " + arrayType); - } - } - - @NonNull - @Override - public GenericType getJavaType() { - return this.javaType; - } - - @NonNull - @Override - public DataType getCqlType() { - return this.cqlType; - } - - @Nullable - @Override - public ByteBuffer encode(@Nullable ArrayT array, @NonNull ProtocolVersion protocolVersion) { - if (array == null) { - return null; - } - int length = Array.getLength(array); - int totalSize = length * sizeOfComponentType(); - ByteBuffer output = ByteBuffer.allocate(totalSize); - for (int i = 0; i < length; i++) { - serializeElement(output, array, i, protocolVersion); - } - output.flip(); - return output; - } - - @Nullable - @Override - public ArrayT decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null || bytes.remaining() == 0) { - throw new IllegalArgumentException( - "Input ByteBuffer must not be null and must have non-zero remaining bytes"); - } - ByteBuffer input = bytes.duplicate(); - int length = this.cqlType.getDimensions(); - int elementSize = sizeOfComponentType(); - ArrayT array = newInstance(); - for (int i = 0; i < length; i++) { - // Null elements can happen on the decode path, but we cannot tolerate them - if (elementSize < 0) { - throw new NullPointerException("Primitive arrays cannot store null elements"); - } else { - deserializeElement(input, array, i, protocolVersion); - } - } - return array; - } - - /** - * Creates a new array instance with a size matching the specified vector. - * - * @return a new array instance with a size matching the specified vector. - */ - @NonNull - protected abstract ArrayT newInstance(); - - /** - * Return the size in bytes of the array component type. - * - * @return the size in bytes of the array component type. - */ - protected abstract int sizeOfComponentType(); - - /** - * Write the {@code index}th element of {@code array} to {@code output}. - * - * @param output The ByteBuffer to write to. - * @param array The array to read from. - * @param index The element index. - * @param protocolVersion The protocol version to use. - */ - protected abstract void serializeElement( - @NonNull ByteBuffer output, - @NonNull ArrayT array, - int index, - @NonNull ProtocolVersion protocolVersion); - - /** - * Read the {@code index}th element of {@code array} from {@code input}. - * - * @param input The ByteBuffer to read from. - * @param array The array to write to. - * @param index The element index. - * @param protocolVersion The protocol version to use. - */ - protected abstract void deserializeElement( - @NonNull ByteBuffer input, - @NonNull ArrayT array, - int index, - @NonNull ProtocolVersion protocolVersion); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/vector/FloatVectorToArrayCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/vector/FloatVectorToArrayCodec.java deleted file mode 100644 index 86f31dc4980..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/vector/FloatVectorToArrayCodec.java +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.vector; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.VectorType; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.FloatCodec; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.base.Splitter; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.util.Arrays; -import java.util.Iterator; -import java.util.Objects; - -/** A codec that maps CQL vectors to the Java type {@code float[]}. */ -public class FloatVectorToArrayCodec extends AbstractVectorToArrayCodec { - - public FloatVectorToArrayCodec(VectorType type) { - super(type, GenericType.of(float[].class)); - } - - @Override - public boolean accepts(@NonNull Class javaClass) { - Objects.requireNonNull(javaClass); - return float[].class.equals(javaClass); - } - - @Override - public boolean accepts(@NonNull Object value) { - Objects.requireNonNull(value); - return value instanceof float[]; - } - - @NonNull - @Override - protected float[] newInstance() { - return new float[cqlType.getDimensions()]; - } - - @Override - protected int sizeOfComponentType() { - return 4; - } - - @Override - protected void serializeElement( - @NonNull ByteBuffer output, - @NonNull float[] array, - int index, - @NonNull ProtocolVersion protocolVersion) { - output.putFloat(array[index]); - } - - @Override - protected void deserializeElement( - @NonNull ByteBuffer input, - @NonNull float[] array, - int index, - @NonNull ProtocolVersion protocolVersion) { - array[index] = input.getFloat(); - } - - @NonNull - @Override - public String format(@Nullable float[] value) { - return value == null ? "NULL" : Arrays.toString(value); - } - - @Nullable - @Override - public float[] parse(@Nullable String str) { - Preconditions.checkArgument(str != null, "Cannot create float array from null string"); - Preconditions.checkArgument(!str.isEmpty(), "Cannot create float array from empty string"); - - FloatCodec codec = new FloatCodec(); - float[] rv = this.newInstance(); - Iterator strIter = - Splitter.on(", ").trimResults().split(str.substring(1, str.length() - 1)).iterator(); - for (int i = 0; i < rv.length; ++i) { - String strVal = strIter.next(); - if (strVal == null) { - throw new IllegalArgumentException("Null element observed in float array string"); - } - Float f = codec.parse(strVal); - rv[i] = f.floatValue(); - } - return rv; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/registry/CachingCodecRegistry.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/registry/CachingCodecRegistry.java deleted file mode 100644 index 3af5a30ba27..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/registry/CachingCodecRegistry.java +++ /dev/null @@ -1,764 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.registry; - -import com.datastax.oss.driver.api.core.data.CqlDuration; -import com.datastax.oss.driver.api.core.data.CqlVector; -import com.datastax.oss.driver.api.core.data.TupleValue; -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.core.type.ContainerType; -import com.datastax.oss.driver.api.core.type.CustomType; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.ListType; -import com.datastax.oss.driver.api.core.type.MapType; -import com.datastax.oss.driver.api.core.type.SetType; -import com.datastax.oss.driver.api.core.type.TupleType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.api.core.type.VectorType; -import com.datastax.oss.driver.api.core.type.codec.CodecNotFoundException; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.codec.registry.MutableCodecRegistry; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.reflect.TypeToken; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.util.IntMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.lang.reflect.ParameterizedType; -import java.lang.reflect.Type; -import java.math.BigDecimal; -import java.math.BigInteger; -import java.net.InetAddress; -import java.nio.ByteBuffer; -import java.time.Instant; -import java.time.LocalDate; -import java.time.LocalTime; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A codec registry that handles built-in type mappings, can be extended with a list of - * user-provided codecs, generates more complex codecs from those basic codecs, and caches generated - * codecs for reuse. - * - *

The primitive mappings always take precedence over any user codec. The list of user codecs can - * not be modified after construction. - * - *

This class is abstract in order to be agnostic from the cache implementation. Subclasses must - * implement {@link #getCachedCodec(DataType, GenericType, boolean)}. - */ -@ThreadSafe -public abstract class CachingCodecRegistry implements MutableCodecRegistry { - - private static final Logger LOG = LoggerFactory.getLogger(CachingCodecRegistry.class); - - // Implementation notes: - // - built-in primitive codecs are served directly, without hitting the cache - // - same for user codecs (we assume the cardinality will always be low, so a sequential array - // traversal is cheap). - - protected final String logPrefix; - private final TypeCodec[] primitiveCodecs; - private final CopyOnWriteArrayList> userCodecs = new CopyOnWriteArrayList<>(); - private final IntMap> primitiveCodecsByCode; - private final Lock registerLock = new ReentrantLock(); - - protected CachingCodecRegistry( - @NonNull String logPrefix, @NonNull TypeCodec[] primitiveCodecs) { - this.logPrefix = logPrefix; - this.primitiveCodecs = primitiveCodecs; - this.primitiveCodecsByCode = sortByProtocolCode(primitiveCodecs); - } - - /** - * @deprecated this constructor calls an overridable method ({@link #register(TypeCodec[])}), - * which is a bad practice. The recommended alternative is to use {@link - * #CachingCodecRegistry(String, TypeCodec[])}, then add the codecs with one of the {@link - * #register} methods. - */ - @Deprecated - protected CachingCodecRegistry( - @NonNull String logPrefix, - @NonNull TypeCodec[] primitiveCodecs, - @NonNull TypeCodec[] userCodecs) { - this(logPrefix, primitiveCodecs); - register(userCodecs); - } - - @Override - public void register(TypeCodec newCodec) { - // This method could work without synchronization, but there is a tiny race condition that would - // allow two threads to register colliding codecs (the last added codec would later be ignored, - // but without any warning). Serialize calls to avoid that: - registerLock.lock(); - try { - for (TypeCodec primitiveCodec : primitiveCodecs) { - if (collides(newCodec, primitiveCodec)) { - LOG.warn( - "[{}] Ignoring codec {} because it collides with built-in primitive codec {}", - logPrefix, - newCodec, - primitiveCodec); - return; - } - } - for (TypeCodec userCodec : userCodecs) { - if (collides(newCodec, userCodec)) { - LOG.warn( - "[{}] Ignoring codec {} because it collides with previously registered codec {}", - logPrefix, - newCodec, - userCodec); - return; - } - } - // Technically this would cover the two previous cases as well, but we want precise messages. - try { - TypeCodec cachedCodec = - getCachedCodec(newCodec.getCqlType(), newCodec.getJavaType(), false); - LOG.warn( - "[{}] Ignoring codec {} because it collides with previously generated codec {}", - logPrefix, - newCodec, - cachedCodec); - return; - } catch (CodecNotFoundException ignored) { - // Catching the exception is ugly, but it avoids breaking the internal API (e.g. by adding a - // getCachedCodecIfExists) - } - userCodecs.add(newCodec); - } finally { - registerLock.unlock(); - } - } - - private boolean collides(TypeCodec newCodec, TypeCodec oldCodec) { - return oldCodec.accepts(newCodec.getCqlType()) && oldCodec.accepts(newCodec.getJavaType()); - } - - /** - * Gets a complex codec from the cache. - * - *

If the codec does not exist in the cache, this method must generate it with {@link - * #createCodec(DataType, GenericType, boolean)} (and most likely put it in the cache too for - * future calls). - */ - protected abstract TypeCodec getCachedCodec( - @Nullable DataType cqlType, @Nullable GenericType javaType, boolean isJavaCovariant); - - @NonNull - @Override - public TypeCodec codecFor( - @NonNull DataType cqlType, @NonNull GenericType javaType) { - return codecFor(cqlType, javaType, false); - } - - // Not exposed publicly, (isJavaCovariant=true) is only used for internal recursion - @NonNull - protected TypeCodec codecFor( - @NonNull DataType cqlType, - @NonNull GenericType javaType, - boolean isJavaCovariant) { - LOG.trace("[{}] Looking up codec for {} <-> {}", logPrefix, cqlType, javaType); - TypeCodec primitiveCodec = primitiveCodecsByCode.get(cqlType.getProtocolCode()); - if (primitiveCodec != null && matches(primitiveCodec, javaType, isJavaCovariant)) { - LOG.trace("[{}] Found matching primitive codec {}", logPrefix, primitiveCodec); - return uncheckedCast(primitiveCodec); - } - for (TypeCodec userCodec : userCodecs) { - if (userCodec.accepts(cqlType) && matches(userCodec, javaType, isJavaCovariant)) { - LOG.trace("[{}] Found matching user codec {}", logPrefix, userCodec); - return uncheckedCast(userCodec); - } - } - return uncheckedCast(getCachedCodec(cqlType, javaType, isJavaCovariant)); - } - - @NonNull - @Override - public TypeCodec codecFor( - @NonNull DataType cqlType, @NonNull Class javaType) { - LOG.trace("[{}] Looking up codec for {} <-> {}", logPrefix, cqlType, javaType); - TypeCodec primitiveCodec = primitiveCodecsByCode.get(cqlType.getProtocolCode()); - if (primitiveCodec != null && primitiveCodec.accepts(javaType)) { - LOG.trace("[{}] Found matching primitive codec {}", logPrefix, primitiveCodec); - return uncheckedCast(primitiveCodec); - } - for (TypeCodec userCodec : userCodecs) { - if (userCodec.accepts(cqlType) && userCodec.accepts(javaType)) { - LOG.trace("[{}] Found matching user codec {}", logPrefix, userCodec); - return uncheckedCast(userCodec); - } - } - return uncheckedCast(getCachedCodec(cqlType, GenericType.of(javaType), false)); - } - - @NonNull - @Override - public TypeCodec codecFor(@NonNull DataType cqlType) { - LOG.trace("[{}] Looking up codec for CQL type {}", logPrefix, cqlType); - TypeCodec primitiveCodec = primitiveCodecsByCode.get(cqlType.getProtocolCode()); - if (primitiveCodec != null) { - LOG.trace("[{}] Found matching primitive codec {}", logPrefix, primitiveCodec); - return uncheckedCast(primitiveCodec); - } - for (TypeCodec userCodec : userCodecs) { - if (userCodec.accepts(cqlType)) { - LOG.trace("[{}] Found matching user codec {}", logPrefix, userCodec); - return uncheckedCast(userCodec); - } - } - return uncheckedCast(getCachedCodec(cqlType, null, false)); - } - - @NonNull - @Override - public TypeCodec codecFor( - @NonNull DataType cqlType, @NonNull JavaTypeT value) { - Preconditions.checkNotNull(cqlType); - Preconditions.checkNotNull(value); - LOG.trace("[{}] Looking up codec for CQL type {} and object {}", logPrefix, cqlType, value); - - TypeCodec primitiveCodec = primitiveCodecsByCode.get(cqlType.getProtocolCode()); - if (primitiveCodec != null && primitiveCodec.accepts(value)) { - LOG.trace("[{}] Found matching primitive codec {}", logPrefix, primitiveCodec); - return uncheckedCast(primitiveCodec); - } - for (TypeCodec userCodec : userCodecs) { - if (userCodec.accepts(cqlType) && userCodec.accepts(value)) { - LOG.trace("[{}] Found matching user codec {}", logPrefix, userCodec); - return uncheckedCast(userCodec); - } - } - - GenericType javaType = inspectType(value, cqlType); - LOG.trace("[{}] Continuing based on inferred type {}", logPrefix, javaType); - return uncheckedCast(getCachedCodec(cqlType, javaType, true)); - } - - @NonNull - @Override - public TypeCodec codecFor(@NonNull JavaTypeT value) { - Preconditions.checkNotNull(value); - LOG.trace("[{}] Looking up codec for object {}", logPrefix, value); - - for (TypeCodec primitiveCodec : primitiveCodecs) { - if (primitiveCodec.accepts(value)) { - LOG.trace("[{}] Found matching primitive codec {}", logPrefix, primitiveCodec); - return uncheckedCast(primitiveCodec); - } - } - for (TypeCodec userCodec : userCodecs) { - if (userCodec.accepts(value)) { - LOG.trace("[{}] Found matching user codec {}", logPrefix, userCodec); - return uncheckedCast(userCodec); - } - } - - DataType cqlType = inferCqlTypeFromValue(value); - GenericType javaType = inspectType(value, cqlType); - LOG.trace( - "[{}] Continuing based on inferred CQL type {} and Java type {}", - logPrefix, - cqlType, - javaType); - return uncheckedCast(getCachedCodec(cqlType, javaType, true)); - } - - @NonNull - @Override - public TypeCodec codecFor(@NonNull GenericType javaType) { - return codecFor(javaType, false); - } - - // Not exposed publicly, (isJavaCovariant=true) is only used for internal recursion - @NonNull - protected TypeCodec codecFor( - @NonNull GenericType javaType, boolean isJavaCovariant) { - LOG.trace( - "[{}] Looking up codec for Java type {} (covariant = {})", - logPrefix, - javaType, - isJavaCovariant); - for (TypeCodec primitiveCodec : primitiveCodecs) { - if (matches(primitiveCodec, javaType, isJavaCovariant)) { - LOG.trace("[{}] Found matching primitive codec {}", logPrefix, primitiveCodec); - return uncheckedCast(primitiveCodec); - } - } - for (TypeCodec userCodec : userCodecs) { - if (matches(userCodec, javaType, isJavaCovariant)) { - LOG.trace("[{}] Found matching user codec {}", logPrefix, userCodec); - return uncheckedCast(userCodec); - } - } - return uncheckedCast(getCachedCodec(null, javaType, isJavaCovariant)); - } - - protected boolean matches( - @NonNull TypeCodec codec, @NonNull GenericType javaType, boolean isJavaCovariant) { - return isJavaCovariant ? codec.getJavaType().isSupertypeOf(javaType) : codec.accepts(javaType); - } - - @NonNull - protected GenericType inspectType(@NonNull Object value, @Nullable DataType cqlType) { - if (value instanceof List) { - List list = (List) value; - if (list.isEmpty()) { - // Empty collections are always encoded the same way, so any element type will do - // in the absence of a CQL type. When the CQL type is known, we try to infer the best Java - // type. - return cqlType == null ? JAVA_TYPE_FOR_EMPTY_LISTS : inferJavaTypeFromCqlType(cqlType); - } else { - Object firstElement = list.get(0); - if (firstElement == null) { - throw new IllegalArgumentException( - "Can't infer list codec because the first element is null " - + "(note that CQL does not allow null values in collections)"); - } - GenericType elementType = - inspectType( - firstElement, cqlType == null ? null : ((ContainerType) cqlType).getElementType()); - return GenericType.listOf(elementType); - } - } else if (value instanceof Set) { - Set set = (Set) value; - if (set.isEmpty()) { - return cqlType == null ? JAVA_TYPE_FOR_EMPTY_SETS : inferJavaTypeFromCqlType(cqlType); - } else { - Object firstElement = set.iterator().next(); - if (firstElement == null) { - throw new IllegalArgumentException( - "Can't infer set codec because the first element is null " - + "(note that CQL does not allow null values in collections)"); - } - GenericType elementType = - inspectType( - firstElement, cqlType == null ? null : ((SetType) cqlType).getElementType()); - return GenericType.setOf(elementType); - } - } else if (value instanceof Map) { - Map map = (Map) value; - if (map.isEmpty()) { - return cqlType == null ? JAVA_TYPE_FOR_EMPTY_MAPS : inferJavaTypeFromCqlType(cqlType); - } else { - Map.Entry firstEntry = map.entrySet().iterator().next(); - Object firstKey = firstEntry.getKey(); - Object firstValue = firstEntry.getValue(); - if (firstKey == null || firstValue == null) { - throw new IllegalArgumentException( - "Can't infer map codec because the first key and/or value is null " - + "(note that CQL does not allow null values in collections)"); - } - GenericType keyType = - inspectType(firstKey, cqlType == null ? null : ((MapType) cqlType).getKeyType()); - GenericType valueType = - inspectType(firstValue, cqlType == null ? null : ((MapType) cqlType).getValueType()); - return GenericType.mapOf(keyType, valueType); - } - } else if (value instanceof CqlVector) { - CqlVector vector = (CqlVector) value; - if (vector.isEmpty()) { - return cqlType == null ? JAVA_TYPE_FOR_EMPTY_CQLVECTORS : inferJavaTypeFromCqlType(cqlType); - } else { - Object firstElement = vector.iterator().next(); - if (firstElement == null) { - throw new IllegalArgumentException( - "Can't infer vector codec because the first element is null " - + "(note that CQL does not allow null values in collections)"); - } - GenericType elementType = - inspectType( - firstElement, cqlType == null ? null : ((VectorType) cqlType).getElementType()); - return GenericType.vectorOf(elementType); - } - } else { - // There's not much more we can do - return GenericType.of(value.getClass()); - } - } - - @NonNull - protected GenericType inferJavaTypeFromCqlType(@NonNull DataType cqlType) { - if (cqlType instanceof ListType) { - DataType elementType = ((ListType) cqlType).getElementType(); - return GenericType.listOf(inferJavaTypeFromCqlType(elementType)); - } else if (cqlType instanceof SetType) { - DataType elementType = ((SetType) cqlType).getElementType(); - return GenericType.setOf(inferJavaTypeFromCqlType(elementType)); - } else if (cqlType instanceof MapType) { - DataType keyType = ((MapType) cqlType).getKeyType(); - DataType valueType = ((MapType) cqlType).getValueType(); - return GenericType.mapOf( - inferJavaTypeFromCqlType(keyType), inferJavaTypeFromCqlType(valueType)); - } else if (cqlType instanceof VectorType) { - DataType elementType = ((VectorType) cqlType).getElementType(); - GenericType numberType = inferJavaTypeFromCqlType(elementType); - return GenericType.vectorOf(numberType); - } - switch (cqlType.getProtocolCode()) { - case ProtocolConstants.DataType.CUSTOM: - case ProtocolConstants.DataType.BLOB: - return GenericType.BYTE_BUFFER; - case ProtocolConstants.DataType.ASCII: - case ProtocolConstants.DataType.VARCHAR: - return GenericType.STRING; - case ProtocolConstants.DataType.BIGINT: - case ProtocolConstants.DataType.COUNTER: - return GenericType.LONG; - case ProtocolConstants.DataType.BOOLEAN: - return GenericType.BOOLEAN; - case ProtocolConstants.DataType.DECIMAL: - return GenericType.BIG_DECIMAL; - case ProtocolConstants.DataType.DOUBLE: - return GenericType.DOUBLE; - case ProtocolConstants.DataType.FLOAT: - return GenericType.FLOAT; - case ProtocolConstants.DataType.INT: - return GenericType.INTEGER; - case ProtocolConstants.DataType.TIMESTAMP: - return GenericType.INSTANT; - case ProtocolConstants.DataType.UUID: - case ProtocolConstants.DataType.TIMEUUID: - return GenericType.UUID; - case ProtocolConstants.DataType.VARINT: - return GenericType.BIG_INTEGER; - case ProtocolConstants.DataType.INET: - return GenericType.INET_ADDRESS; - case ProtocolConstants.DataType.DATE: - return GenericType.LOCAL_DATE; - case ProtocolConstants.DataType.TIME: - return GenericType.LOCAL_TIME; - case ProtocolConstants.DataType.SMALLINT: - return GenericType.SHORT; - case ProtocolConstants.DataType.TINYINT: - return GenericType.BYTE; - case ProtocolConstants.DataType.DURATION: - return GenericType.CQL_DURATION; - case ProtocolConstants.DataType.UDT: - return GenericType.UDT_VALUE; - case ProtocolConstants.DataType.TUPLE: - return GenericType.TUPLE_VALUE; - default: - throw new CodecNotFoundException(cqlType, null); - } - } - - @Nullable - protected DataType inferCqlTypeFromValue(@NonNull Object value) { - if (value instanceof List) { - List list = (List) value; - if (list.isEmpty()) { - return CQL_TYPE_FOR_EMPTY_LISTS; - } - Object firstElement = list.get(0); - if (firstElement == null) { - throw new IllegalArgumentException( - "Can't infer list codec because the first element is null " - + "(note that CQL does not allow null values in collections)"); - } - DataType elementType = inferCqlTypeFromValue(firstElement); - if (elementType == null) { - return null; - } - return DataTypes.listOf(elementType); - } else if (value instanceof Set) { - Set set = (Set) value; - if (set.isEmpty()) { - return CQL_TYPE_FOR_EMPTY_SETS; - } - Object firstElement = set.iterator().next(); - if (firstElement == null) { - throw new IllegalArgumentException( - "Can't infer set codec because the first element is null " - + "(note that CQL does not allow null values in collections)"); - } - DataType elementType = inferCqlTypeFromValue(firstElement); - if (elementType == null) { - return null; - } - return DataTypes.setOf(elementType); - } else if (value instanceof Map) { - Map map = (Map) value; - if (map.isEmpty()) { - return CQL_TYPE_FOR_EMPTY_MAPS; - } - Entry firstEntry = map.entrySet().iterator().next(); - Object firstKey = firstEntry.getKey(); - Object firstValue = firstEntry.getValue(); - if (firstKey == null || firstValue == null) { - throw new IllegalArgumentException( - "Can't infer map codec because the first key and/or value is null " - + "(note that CQL does not allow null values in collections)"); - } - DataType keyType = inferCqlTypeFromValue(firstKey); - DataType valueType = inferCqlTypeFromValue(firstValue); - if (keyType == null || valueType == null) { - return null; - } - return DataTypes.mapOf(keyType, valueType); - } else if (value instanceof CqlVector) { - CqlVector vector = (CqlVector) value; - if (vector.isEmpty()) { - return CQL_TYPE_FOR_EMPTY_VECTORS; - } - Object firstElement = vector.iterator().next(); - if (firstElement == null) { - throw new IllegalArgumentException( - "Can't infer vector codec because the first element is null " - + "(note that CQL does not allow null values in collections)"); - } - DataType elementType = inferCqlTypeFromValue(firstElement); - if (elementType == null) { - return null; - } - return DataTypes.vectorOf(elementType, vector.size()); - } - Class javaClass = value.getClass(); - if (ByteBuffer.class.isAssignableFrom(javaClass)) { - return DataTypes.BLOB; - } else if (String.class.equals(javaClass)) { - return DataTypes.TEXT; - } else if (Long.class.equals(javaClass)) { - return DataTypes.BIGINT; - } else if (Boolean.class.equals(javaClass)) { - return DataTypes.BOOLEAN; - } else if (BigDecimal.class.equals(javaClass)) { - return DataTypes.DECIMAL; - } else if (Double.class.equals(javaClass)) { - return DataTypes.DOUBLE; - } else if (Float.class.equals(javaClass)) { - return DataTypes.FLOAT; - } else if (Integer.class.equals(javaClass)) { - return DataTypes.INT; - } else if (Instant.class.equals(javaClass)) { - return DataTypes.TIMESTAMP; - } else if (UUID.class.equals(javaClass)) { - return DataTypes.UUID; - } else if (BigInteger.class.equals(javaClass)) { - return DataTypes.VARINT; - } else if (InetAddress.class.isAssignableFrom(javaClass)) { - return DataTypes.INET; - } else if (LocalDate.class.equals(javaClass)) { - return DataTypes.DATE; - } else if (LocalTime.class.equals(javaClass)) { - return DataTypes.TIME; - } else if (Short.class.equals(javaClass)) { - return DataTypes.SMALLINT; - } else if (Byte.class.equals(javaClass)) { - return DataTypes.TINYINT; - } else if (CqlDuration.class.equals(javaClass)) { - return DataTypes.DURATION; - } else if (UdtValue.class.isAssignableFrom(javaClass)) { - return ((UdtValue) value).getType(); - } else if (TupleValue.class.isAssignableFrom(javaClass)) { - return ((TupleValue) value).getType(); - } - // This might mean that the java type is a custom type with a custom codec, - // so don't throw CodecNotFoundException just yet. - return null; - } - - private TypeCodec getElementCodecForCqlAndJavaType( - ContainerType cqlType, TypeToken token, boolean isJavaCovariant) { - - DataType elementCqlType = cqlType.getElementType(); - if (token.getType() instanceof ParameterizedType) { - Type[] typeArguments = ((ParameterizedType) token.getType()).getActualTypeArguments(); - GenericType elementJavaType = GenericType.of(typeArguments[0]); - return uncheckedCast(codecFor(elementCqlType, elementJavaType, isJavaCovariant)); - } - return codecFor(elementCqlType); - } - - private TypeCodec getElementCodecForJavaType( - ParameterizedType parameterizedType, boolean isJavaCovariant) { - - Type[] typeArguments = parameterizedType.getActualTypeArguments(); - GenericType elementType = GenericType.of(typeArguments[0]); - return codecFor(elementType, isJavaCovariant); - } - - // Try to create a codec when we haven't found it in the cache - @NonNull - protected TypeCodec createCodec( - @Nullable DataType cqlType, @Nullable GenericType javaType, boolean isJavaCovariant) { - LOG.trace("[{}] Cache miss, creating codec", logPrefix); - // Either type can be null, but not both. - if (javaType == null) { - assert cqlType != null; - return createCodec(cqlType); - } else if (cqlType == null) { - return createCodec(javaType, isJavaCovariant); - } else { // Both non-null - TypeToken token = javaType.__getToken(); - if (cqlType instanceof ListType && List.class.isAssignableFrom(token.getRawType())) { - TypeCodec elementCodec = - getElementCodecForCqlAndJavaType((ContainerType) cqlType, token, isJavaCovariant); - return TypeCodecs.listOf(elementCodec); - } else if (cqlType instanceof SetType && Set.class.isAssignableFrom(token.getRawType())) { - TypeCodec elementCodec = - getElementCodecForCqlAndJavaType((ContainerType) cqlType, token, isJavaCovariant); - return TypeCodecs.setOf(elementCodec); - } else if (cqlType instanceof MapType && Map.class.isAssignableFrom(token.getRawType())) { - DataType keyCqlType = ((MapType) cqlType).getKeyType(); - DataType valueCqlType = ((MapType) cqlType).getValueType(); - TypeCodec keyCodec; - TypeCodec valueCodec; - if (token.getType() instanceof ParameterizedType) { - Type[] typeArguments = ((ParameterizedType) token.getType()).getActualTypeArguments(); - GenericType keyJavaType = GenericType.of(typeArguments[0]); - GenericType valueJavaType = GenericType.of(typeArguments[1]); - keyCodec = uncheckedCast(codecFor(keyCqlType, keyJavaType, isJavaCovariant)); - valueCodec = uncheckedCast(codecFor(valueCqlType, valueJavaType, isJavaCovariant)); - } else { - keyCodec = codecFor(keyCqlType); - valueCodec = codecFor(valueCqlType); - } - return TypeCodecs.mapOf(keyCodec, valueCodec); - } else if (cqlType instanceof TupleType - && TupleValue.class.isAssignableFrom(token.getRawType())) { - return TypeCodecs.tupleOf((TupleType) cqlType); - } else if (cqlType instanceof UserDefinedType - && UdtValue.class.isAssignableFrom(token.getRawType())) { - return TypeCodecs.udtOf((UserDefinedType) cqlType); - } else if (cqlType instanceof VectorType - && CqlVector.class.isAssignableFrom(token.getRawType())) { - VectorType vectorType = (VectorType) cqlType; - /* For a vector type we'll always get back an instance of TypeCodec due to the - * type of CqlVector... but getElementCodecForCqlAndJavaType() is a generalized function that can't - * return this more precise type. Thus the cast here. */ - TypeCodec elementCodec = - uncheckedCast(getElementCodecForCqlAndJavaType(vectorType, token, isJavaCovariant)); - return TypeCodecs.vectorOf(vectorType, elementCodec); - } else if (cqlType instanceof CustomType - && ByteBuffer.class.isAssignableFrom(token.getRawType())) { - return TypeCodecs.custom(cqlType); - } - throw new CodecNotFoundException(cqlType, javaType); - } - } - - // Try to create a codec when we haven't found it in the cache. - // Variant where the CQL type is unknown. Can be covariant if we come from a lookup by Java value. - @NonNull - protected TypeCodec createCodec(@NonNull GenericType javaType, boolean isJavaCovariant) { - TypeToken token = javaType.__getToken(); - if (List.class.isAssignableFrom(token.getRawType()) - && token.getType() instanceof ParameterizedType) { - TypeCodec elementCodec = - getElementCodecForJavaType((ParameterizedType) token.getType(), isJavaCovariant); - return TypeCodecs.listOf(elementCodec); - } else if (Set.class.isAssignableFrom(token.getRawType()) - && token.getType() instanceof ParameterizedType) { - TypeCodec elementCodec = - getElementCodecForJavaType((ParameterizedType) token.getType(), isJavaCovariant); - return TypeCodecs.setOf(elementCodec); - } else if (Map.class.isAssignableFrom(token.getRawType()) - && token.getType() instanceof ParameterizedType) { - Type[] typeArguments = ((ParameterizedType) token.getType()).getActualTypeArguments(); - GenericType keyType = GenericType.of(typeArguments[0]); - GenericType valueType = GenericType.of(typeArguments[1]); - TypeCodec keyCodec = codecFor(keyType, isJavaCovariant); - TypeCodec valueCodec = codecFor(valueType, isJavaCovariant); - return TypeCodecs.mapOf(keyCodec, valueCodec); - } - /* Note that this method cannot generate TypeCodec instances for any CqlVector type. VectorCodec needs - * to know the dimensions of the vector it will be operating on and there's no way to determine that from - * the Java type alone. */ - throw new CodecNotFoundException(null, javaType); - } - - // Try to create a codec when we haven't found it in the cache. - // Variant where the Java type is unknown. - @NonNull - protected TypeCodec createCodec(@NonNull DataType cqlType) { - if (cqlType instanceof ListType) { - DataType elementType = ((ListType) cqlType).getElementType(); - TypeCodec elementCodec = codecFor(elementType); - return TypeCodecs.listOf(elementCodec); - } else if (cqlType instanceof SetType) { - DataType elementType = ((SetType) cqlType).getElementType(); - TypeCodec elementCodec = codecFor(elementType); - return TypeCodecs.setOf(elementCodec); - } else if (cqlType instanceof MapType) { - DataType keyType = ((MapType) cqlType).getKeyType(); - DataType valueType = ((MapType) cqlType).getValueType(); - TypeCodec keyCodec = codecFor(keyType); - TypeCodec valueCodec = codecFor(valueType); - return TypeCodecs.mapOf(keyCodec, valueCodec); - } else if (cqlType instanceof VectorType) { - VectorType vectorType = (VectorType) cqlType; - TypeCodec elementCodec = - uncheckedCast(codecFor(vectorType.getElementType())); - return TypeCodecs.vectorOf(vectorType, elementCodec); - } else if (cqlType instanceof TupleType) { - return TypeCodecs.tupleOf((TupleType) cqlType); - } else if (cqlType instanceof UserDefinedType) { - return TypeCodecs.udtOf((UserDefinedType) cqlType); - } else if (cqlType instanceof CustomType) { - return TypeCodecs.custom(cqlType); - } - throw new CodecNotFoundException(cqlType, null); - } - - private static IntMap> sortByProtocolCode(TypeCodec[] codecs) { - IntMap.Builder> builder = IntMap.builder(); - for (TypeCodec codec : codecs) { - builder.put(codec.getCqlType().getProtocolCode(), codec); - } - return builder.build(); - } - - // We call this after validating the types, so we know the cast will never fail. - private static TypeCodec uncheckedCast( - TypeCodec codec) { - @SuppressWarnings("unchecked") - TypeCodec result = (TypeCodec) codec; - return result; - } - - // These are mock types that are used as placeholders when we try to find a codec for an empty - // Java collection instance. All empty collections are serialized in the same way, so any element - // type will do: - private static final GenericType> JAVA_TYPE_FOR_EMPTY_LISTS = - GenericType.listOf(Boolean.class); - private static final GenericType> JAVA_TYPE_FOR_EMPTY_SETS = - GenericType.setOf(Boolean.class); - private static final GenericType> JAVA_TYPE_FOR_EMPTY_MAPS = - GenericType.mapOf(Boolean.class, Boolean.class); - private static final GenericType> JAVA_TYPE_FOR_EMPTY_CQLVECTORS = - GenericType.vectorOf(Number.class); - private static final DataType CQL_TYPE_FOR_EMPTY_LISTS = DataTypes.listOf(DataTypes.BOOLEAN); - private static final DataType CQL_TYPE_FOR_EMPTY_SETS = DataTypes.setOf(DataTypes.BOOLEAN); - private static final DataType CQL_TYPE_FOR_EMPTY_MAPS = - DataTypes.mapOf(DataTypes.BOOLEAN, DataTypes.BOOLEAN); - private static final DataType CQL_TYPE_FOR_EMPTY_VECTORS = DataTypes.vectorOf(DataTypes.INT, 0); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/registry/CodecRegistryConstants.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/registry/CodecRegistryConstants.java deleted file mode 100644 index bbf77bdf5dc..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/registry/CodecRegistryConstants.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.registry; - -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import java.util.function.BiConsumer; -import java.util.function.BiFunction; - -public class CodecRegistryConstants { - - /** - * The driver's default primitive codecs (map all primitive CQL types to their "natural" Java - * equivalent). - * - *

This is exposed in case you want to call {@link - * DefaultCodecRegistry#DefaultCodecRegistry(String, int, BiFunction, int, BiConsumer, - * TypeCodec[])} but only customize the caching options. - */ - public static final TypeCodec[] PRIMITIVE_CODECS = - new TypeCodec[] { - // Must be declared before AsciiCodec so it gets chosen when CQL type not available - TypeCodecs.TEXT, - // Must be declared before TimeUUIDCodec so it gets chosen when CQL type not available - TypeCodecs.UUID, - TypeCodecs.TIMEUUID, - TypeCodecs.TIMESTAMP, - TypeCodecs.INT, - TypeCodecs.BIGINT, - TypeCodecs.BLOB, - TypeCodecs.DOUBLE, - TypeCodecs.FLOAT, - TypeCodecs.DECIMAL, - TypeCodecs.VARINT, - TypeCodecs.INET, - TypeCodecs.BOOLEAN, - TypeCodecs.SMALLINT, - TypeCodecs.TINYINT, - TypeCodecs.DATE, - TypeCodecs.TIME, - TypeCodecs.DURATION, - TypeCodecs.COUNTER, - TypeCodecs.ASCII - }; -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/registry/DefaultCodecRegistry.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/registry/DefaultCodecRegistry.java deleted file mode 100644 index cc14740e180..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/registry/DefaultCodecRegistry.java +++ /dev/null @@ -1,168 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.registry; - -import com.datastax.oss.driver.api.core.DriverExecutionException; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.shaded.guava.common.base.Throwables; -import com.datastax.oss.driver.shaded.guava.common.cache.CacheBuilder; -import com.datastax.oss.driver.shaded.guava.common.cache.CacheLoader; -import com.datastax.oss.driver.shaded.guava.common.cache.LoadingCache; -import com.datastax.oss.driver.shaded.guava.common.cache.RemovalListener; -import com.datastax.oss.driver.shaded.guava.common.util.concurrent.ExecutionError; -import com.datastax.oss.driver.shaded.guava.common.util.concurrent.UncheckedExecutionException; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Objects; -import java.util.function.BiConsumer; -import java.util.function.BiFunction; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * The default codec registry implementation. - * - *

It is a caching registry based on Guava cache (note that the driver shades Guava). - */ -@ThreadSafe -public class DefaultCodecRegistry extends CachingCodecRegistry { - - private static final Logger LOG = LoggerFactory.getLogger(DefaultCodecRegistry.class); - - private final LoadingCache> cache; - - /** - * Creates a new instance that accepts user codecs, with the default built-in codecs and the - * default cache behavior. - */ - public DefaultCodecRegistry(@NonNull String logPrefix) { - this(logPrefix, CodecRegistryConstants.PRIMITIVE_CODECS); - } - - /** - * Creates a new instance that accepts user codecs, with the given built-in codecs and the default - * cache behavior. - */ - public DefaultCodecRegistry(@NonNull String logPrefix, @NonNull TypeCodec... primitiveCodecs) { - this(logPrefix, 0, null, 0, null, primitiveCodecs); - } - - /** - * Same as {@link #DefaultCodecRegistry(String, TypeCodec[])}, but with some amount of control - * over cache behavior. - * - *

Giving full access to the Guava cache API would be too much work, since it is shaded and we - * have to wrap everything. If you need something that's not available here, it's easy enough to - * write your own CachingCodecRegistry implementation. It's doubtful that stuff like cache - * eviction is that useful anyway. - */ - public DefaultCodecRegistry( - @NonNull String logPrefix, - int initialCacheCapacity, - @Nullable BiFunction, Integer> cacheWeigher, - int maximumCacheWeight, - @Nullable BiConsumer> cacheRemovalListener, - @NonNull TypeCodec... primitiveCodecs) { - - super(logPrefix, primitiveCodecs); - CacheBuilder cacheBuilder = CacheBuilder.newBuilder(); - if (initialCacheCapacity > 0) { - cacheBuilder.initialCapacity(initialCacheCapacity); - } - if (cacheWeigher != null) { - cacheBuilder.weigher(cacheWeigher::apply).maximumWeight(maximumCacheWeight); - } - CacheLoader> cacheLoader = - new CacheLoader>() { - @Override - public TypeCodec load(@NonNull CacheKey key) throws Exception { - return createCodec(key.cqlType, key.javaType, key.isJavaCovariant); - } - }; - if (cacheRemovalListener != null) { - this.cache = - cacheBuilder - .removalListener( - (RemovalListener>) - notification -> - cacheRemovalListener.accept( - notification.getKey(), notification.getValue())) - .build(cacheLoader); - } else { - this.cache = cacheBuilder.build(cacheLoader); - } - } - - @Override - protected TypeCodec getCachedCodec( - @Nullable DataType cqlType, @Nullable GenericType javaType, boolean isJavaCovariant) { - LOG.trace("[{}] Checking cache", logPrefix); - try { - return cache.getUnchecked(new CacheKey(cqlType, javaType, isJavaCovariant)); - } catch (UncheckedExecutionException | ExecutionError e) { - // unwrap exception cause and throw it directly. - Throwable cause = e.getCause(); - if (cause != null) { - Throwables.throwIfUnchecked(cause); - throw new DriverExecutionException(cause); - } else { - // Should never happen, throw just in case - throw new RuntimeException(e.getMessage()); - } - } - } - - public static final class CacheKey { - - public final DataType cqlType; - public final GenericType javaType; - public final boolean isJavaCovariant; - - public CacheKey( - @Nullable DataType cqlType, @Nullable GenericType javaType, boolean isJavaCovariant) { - this.javaType = javaType; - this.cqlType = cqlType; - this.isJavaCovariant = isJavaCovariant; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof CacheKey) { - CacheKey that = (CacheKey) other; - return Objects.equals(this.cqlType, that.cqlType) - && Objects.equals(this.javaType, that.javaType) - && this.isJavaCovariant == that.isJavaCovariant; - } else { - return false; - } - } - - @Override - public int hashCode() { - // NOTE: inlined Objects.hash for performance reasons (avoid Object[] allocation - // seen in profiler allocation traces) - return ((31 + Objects.hashCode(cqlType)) * 31 + Objects.hashCode(javaType)) * 31 - + Boolean.hashCode(isJavaCovariant); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/util/VIntCoding.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/util/VIntCoding.java deleted file mode 100644 index 552f84f2ae1..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/util/VIntCoding.java +++ /dev/null @@ -1,258 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -package com.datastax.oss.driver.internal.core.type.util; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; -import java.nio.ByteBuffer; - -/** - * Variable length encoding inspired from Google varints. - * - *

Cassandra vints are encoded with the most significant group first. The most significant byte - * will contains the information about how many extra bytes need to be read as well as the most - * significant bits of the integer. The number of extra bytes to read is encoded as 1 bit on the - * left side. For example, if we need to read 3 more bytes the first byte will start with 1110. If - * the encoded integer is 8 bytes long the vint will be encoded on 9 bytes and the first byte will - * be: 11111111 - * - *

Signed integers are (like protocol buffer varints) encoded using the ZigZag encoding so that - * numbers with a small absolute value have a small vint encoded value too. - * - *

Note that there is also a type called {@code varint} in the CQL protocol specification. This - * is completely unrelated. - */ -public class VIntCoding { - - private static long readUnsignedVInt(DataInput input) throws IOException { - int firstByte = input.readByte(); - - // Bail out early if this is one byte, necessary or it fails later - if (firstByte >= 0) { - return firstByte; - } - - int size = numberOfExtraBytesToRead(firstByte); - long retval = firstByte & firstByteValueMask(size); - for (int ii = 0; ii < size; ii++) { - byte b = input.readByte(); - retval <<= 8; - retval |= b & 0xff; - } - - return retval; - } - - public static long readVInt(DataInput input) throws IOException { - return decodeZigZag64(readUnsignedVInt(input)); - } - - // & this with the first byte to give the value part for a given extraBytesToRead encoded in the - // byte - private static int firstByteValueMask(int extraBytesToRead) { - // by including the known 0bit in the mask, we can use this for encodeExtraBytesToRead - return 0xff >> extraBytesToRead; - } - - private static byte encodeExtraBytesToRead(int extraBytesToRead) { - // because we have an extra bit in the value mask, we just need to invert it - return (byte) ~firstByteValueMask(extraBytesToRead); - } - - private static int numberOfExtraBytesToRead(int firstByte) { - // we count number of set upper bits; so if we simply invert all of the bits, we're golden - // this is aided by the fact that we only work with negative numbers, so when upcast to an int - // all - // of the new upper bits are also set, so by inverting we set all of them to zero - return Integer.numberOfLeadingZeros(~firstByte) - 24; - } - - private static final ThreadLocal encodingBuffer = - ThreadLocal.withInitial(() -> new byte[9]); - - private static void writeUnsignedVInt(long value, DataOutput output) throws IOException { - int size = VIntCoding.computeUnsignedVIntSize(value); - if (size == 1) { - output.write((int) value); - return; - } - - output.write(VIntCoding.encodeVInt(value, size), 0, size); - } - - private static byte[] encodeVInt(long value, int size) { - byte encodingSpace[] = encodingBuffer.get(); - int extraBytes = size - 1; - - for (int i = extraBytes; i >= 0; --i) { - encodingSpace[i] = (byte) value; - value >>= 8; - } - encodingSpace[0] |= encodeExtraBytesToRead(extraBytes); - return encodingSpace; - } - - public static void writeVInt(long value, DataOutput output) throws IOException { - writeUnsignedVInt(encodeZigZag64(value), output); - } - - /** - * Decode a ZigZag-encoded 64-bit value. ZigZag encodes signed integers into values that can be - * efficiently encoded with varint. (Otherwise, negative values must be sign-extended to 64 bits - * to be varint encoded, thus always taking 10 bytes on the wire.) - * - * @param n an unsigned 64-bit integer, stored in a signed int because Java has no explicit - * unsigned support. - * @return a signed 64-bit integer. - */ - private static long decodeZigZag64(final long n) { - return (n >>> 1) ^ -(n & 1); - } - - /** - * Encode a ZigZag-encoded 64-bit value. ZigZag encodes signed integers into values that can be - * efficiently encoded with varint. (Otherwise, negative values must be sign-extended to 64 bits - * to be varint encoded, thus always taking 10 bytes on the wire.) - * - * @param n a signed 64-bit integer. - * @return an unsigned 64-bit integer, stored in a signed int because Java has no explicit - * unsigned support. - */ - private static long encodeZigZag64(final long n) { - // Note: the right-shift must be arithmetic - return (n << 1) ^ (n >> 63); - } - - /** Compute the number of bytes that would be needed to encode a varint. */ - public static int computeVIntSize(final long param) { - return computeUnsignedVIntSize(encodeZigZag64(param)); - } - - /** Compute the number of bytes that would be needed to encode an unsigned varint. */ - public static int computeUnsignedVIntSize(final long value) { - int magnitude = - Long.numberOfLeadingZeros( - value | 1); // | with 1 to ensure magnitude <= 63, so (63 - 1) / 7 <= 8 - return (639 - magnitude * 9) >> 6; - } - - public static void writeUnsignedVInt32(int value, ByteBuffer output) { - writeUnsignedVInt((long) value, output); - } - - public static void writeUnsignedVInt(long value, ByteBuffer output) { - int size = VIntCoding.computeUnsignedVIntSize(value); - if (size == 1) { - output.put((byte) value); - return; - } - - output.put(VIntCoding.encodeVInt(value, size), 0, size); - } - - /** - * Read up to a 32-bit integer back, using the unsigned (no zigzag) encoding. - * - *

Note this method is the same as {@link #readUnsignedVInt(DataInput)}, except that we do - * *not* block if there are not enough bytes in the buffer to reconstruct the value. - * - * @throws VIntOutOfRangeException If the vint doesn't fit into a 32-bit integer - */ - public static int getUnsignedVInt32(ByteBuffer input, int readerIndex) { - return checkedCast(getUnsignedVInt(input, readerIndex)); - } - - public static long getUnsignedVInt(ByteBuffer input, int readerIndex) { - return getUnsignedVInt(input, readerIndex, input.limit()); - } - - public static long getUnsignedVInt(ByteBuffer input, int readerIndex, int readerLimit) { - if (readerIndex < 0) - throw new IllegalArgumentException( - "Reader index should be non-negative, but was " + readerIndex); - - if (readerIndex >= readerLimit) return -1; - - int firstByte = input.get(readerIndex++); - - // Bail out early if this is one byte, necessary or it fails later - if (firstByte >= 0) return firstByte; - - int size = numberOfExtraBytesToRead(firstByte); - if (readerIndex + size > readerLimit) return -1; - - long retval = firstByte & firstByteValueMask(size); - for (int ii = 0; ii < size; ii++) { - byte b = input.get(readerIndex++); - retval <<= 8; - retval |= b & 0xff; - } - - return retval; - } - - public static int checkedCast(long value) { - int result = (int) value; - if ((long) result != value) throw new VIntOutOfRangeException(value); - return result; - } - - /** - * Throw when attempting to decode a vint and the output type doesn't have enough space to fit the - * value that was decoded - */ - public static class VIntOutOfRangeException extends RuntimeException { - public final long value; - - private VIntOutOfRangeException(long value) { - super(value + " is out of range for a 32-bit integer"); - this.value = value; - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/AddressUtils.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/AddressUtils.java deleted file mode 100644 index 8905edb9192..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/AddressUtils.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util; - -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.net.UnknownHostException; -import java.util.HashSet; -import java.util.Set; - -public class AddressUtils { - - public static Set extract(String address, boolean resolve) { - int separator = address.lastIndexOf(':'); - if (separator < 0) { - throw new IllegalArgumentException("expecting format host:port"); - } - - String host = address.substring(0, separator); - String portString = address.substring(separator + 1); - int port; - try { - port = Integer.parseInt(portString); - } catch (NumberFormatException e) { - throw new IllegalArgumentException("expecting port to be a number, got " + portString, e); - } - if (!resolve) { - return ImmutableSet.of(InetSocketAddress.createUnresolved(host, port)); - } else { - InetAddress[] inetAddresses; - try { - inetAddresses = InetAddress.getAllByName(host); - } catch (UnknownHostException e) { - throw new RuntimeException(e); - } - Set result = new HashSet<>(); - for (InetAddress inetAddress : inetAddresses) { - result.add(new InetSocketAddress(inetAddress, port)); - } - return result; - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/ArrayUtils.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/ArrayUtils.java deleted file mode 100644 index 490b1dc7d17..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/ArrayUtils.java +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util; - -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Random; -import java.util.concurrent.ThreadLocalRandom; - -public class ArrayUtils { - - public static void swap(@NonNull ElementT[] elements, int i, int j) { - if (i != j) { - ElementT tmp = elements[i]; - elements[i] = elements[j]; - elements[j] = tmp; - } - } - - /** - * Moves an element towards the beginning of the array, shifting all the intermediary elements to - * the right (no-op if {@code targetIndex >= sourceIndex}). - */ - public static void bubbleUp( - @NonNull ElementT[] elements, int sourceIndex, int targetIndex) { - for (int i = sourceIndex; i > targetIndex; i--) { - swap(elements, i, i - 1); - } - } - - /** - * Moves an element towards the end of the array, shifting all the intermediary elements to the - * left (no-op if {@code targetIndex <= sourceIndex}). - */ - public static void bubbleDown( - @NonNull ElementT[] elements, int sourceIndex, int targetIndex) { - for (int i = sourceIndex; i < targetIndex; i++) { - swap(elements, i, i + 1); - } - } - - /** - * Shuffles the first n elements of the array in-place. - * - * @param elements the array to shuffle. - * @param n the number of elements to shuffle; must be {@code <= elements.length}. - * @see Modern - * Fisher-Yates shuffle - */ - public static void shuffleHead(@NonNull ElementT[] elements, int n) { - shuffleHead(elements, n, ThreadLocalRandom.current()); - } - - /** - * Shuffles the first n elements of the array in-place. - * - * @param elements the array to shuffle. - * @param n the number of elements to shuffle; must be {@code <= elements.length}. - * @param random the {@link ThreadLocalRandom} instance to use. This is mainly intended to - * facilitate tests. - * @see Modern - * Fisher-Yates shuffle - */ - public static void shuffleHead( - @NonNull ElementT[] elements, int n, @NonNull Random random) { - if (n > elements.length) { - throw new ArrayIndexOutOfBoundsException( - String.format( - "Can't shuffle the first %d elements, there are only %d", n, elements.length)); - } - if (n > 1) { - for (int i = n - 1; i > 0; i--) { - int j = random.nextInt(i + 1); - swap(elements, i, j); - } - } - } - - /** Rotates the elements in the specified range by the specified amount (round-robin). */ - public static void rotate( - @NonNull ElementT[] elements, int startIndex, int length, int amount) { - if (length >= 2) { - amount = amount % length; - // Repeatedly shift by 1. This is not the most time-efficient but the array will typically be - // small so we don't care, and this avoids allocating a temporary buffer. - for (int i = 0; i < amount; i++) { - bubbleDown(elements, startIndex, startIndex + length - 1); - } - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/CollectionsUtils.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/CollectionsUtils.java deleted file mode 100644 index 0dd9a85fcc6..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/CollectionsUtils.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util; - -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; - -public class CollectionsUtils { - public static Map combineListsIntoOrderedMap(List keys, List values) { - if (keys.size() != values.size()) { - throw new IllegalArgumentException("Cannot combine lists with not matching sizes"); - } - - Map map = new LinkedHashMap<>(); - for (int i = 0; i < keys.size(); i++) { - map.put(keys.get(i), values.get(i)); - } - return map; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/CountingIterator.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/CountingIterator.java deleted file mode 100644 index 391996d9369..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/CountingIterator.java +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util; - -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import java.util.Iterator; -import java.util.NoSuchElementException; -import net.jcip.annotations.NotThreadSafe; - -/** - * An iterator that knows in advance how many elements it will return, and maintains a counter as - * elements get returned. - */ -@NotThreadSafe -public abstract class CountingIterator implements Iterator { - - protected int remaining; - - public CountingIterator(int remaining) { - this.remaining = remaining; - } - - public int remaining() { - return remaining; - } - - /* - * The rest of this class was adapted from Guava's `AbstractIterator` (which we can't extend - * because its `next` method is final). Guava copyright notice follows: - * - * Copyright (C) 2007 The Guava Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - private enum State { - READY, - NOT_READY, - DONE, - FAILED, - } - - private State state = State.NOT_READY; - private ElementT next; - - protected abstract ElementT computeNext(); - - protected final ElementT endOfData() { - state = State.DONE; - return null; - } - - @Override - public final boolean hasNext() { - Preconditions.checkState(state != State.FAILED); - switch (state) { - case DONE: - return false; - case READY: - return true; - default: - } - return tryToComputeNext(); - } - - private boolean tryToComputeNext() { - state = State.FAILED; // temporary pessimism - next = computeNext(); - if (state != State.DONE) { - state = State.READY; - return true; - } - return false; - } - - @Override - public final ElementT next() { - if (!hasNext()) { - throw new NoSuchElementException(); - } - state = State.NOT_READY; - ElementT result = next; - next = null; - // Added to original Guava code: decrement counter when we return an element - remaining -= 1; - return result; - } - - public final ElementT peek() { - if (!hasNext()) { - throw new NoSuchElementException(); - } - return next; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/DefaultDependencyChecker.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/DefaultDependencyChecker.java deleted file mode 100644 index 2e717590569..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/DefaultDependencyChecker.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util; - -import java.util.concurrent.ConcurrentHashMap; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A checker for the presence of various {@link Dependency} instances at runtime. Predicate tests - * for Graal substitutions should NOT use this class; see {@link GraalDependencyChecker} for more - * information. - */ -public class DefaultDependencyChecker { - - private static final Logger LOG = LoggerFactory.getLogger(DefaultDependencyChecker.class); - - private static final ConcurrentHashMap CACHE = new ConcurrentHashMap<>(); - - /** - * Return true iff we can find all classes for the dependency on the classpath, false otherwise - * - * @param dependency the dependency to search for - * @return true if the dependency is available, false otherwise - */ - public static boolean isPresent(Dependency dependency) { - try { - return CACHE.computeIfAbsent( - dependency, - (dep) -> { - for (String classNameToTest : dependency.classes()) { - // Always use the driver class loader, assuming that the driver classes and - // the dependency classes are either being loaded by the same class loader, - // or – as in OSGi deployments – by two distinct, but compatible class loaders. - if (Reflection.loadClass(null, classNameToTest) == null) { - return false; - } - } - return true; - }); - } catch (Exception e) { - LOG.warn("Unexpected exception when checking for dependency " + dependency, e); - return false; - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/Dependency.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/Dependency.java deleted file mode 100644 index 97cfa25d9af..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/Dependency.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util; - -import java.util.Arrays; -import java.util.Collections; -import java.util.List; - -/** - * A set of driver optional dependencies and a common mechanism to test the presence of such - * dependencies on the application's classpath. - * - *

We use the given fully-qualified names of classes to test the presence of the whole dependency - * on the classpath, including its transitive dependencies if applicable. This assumes that if these - * classes are present, then the entire library is present and functional, and vice versa. - * - *

Note: some of the libraries declared here may be shaded; in these cases the shade plugin will - * replace the package names listed above with names starting with {@code - * com.datastax.oss.driver.shaded.*}, but the presence check would still work as expected. - */ -public enum Dependency { - SNAPPY("org.xerial.snappy.Snappy"), - LZ4("net.jpountz.lz4.LZ4Compressor"), - ESRI("com.esri.core.geometry.ogc.OGCGeometry"), - TINKERPOP( - // gremlin-core - "org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal", - // tinkergraph-gremlin - "org.apache.tinkerpop.gremlin.tinkergraph.structure.TinkerIoRegistryV3d0"), - REACTIVE_STREAMS("org.reactivestreams.Publisher"), - JACKSON( - // jackson-core - "com.fasterxml.jackson.core.JsonParser", - // jackson-databind - "com.fasterxml.jackson.databind.ObjectMapper"), - DROPWIZARD("com.codahale.metrics.MetricRegistry"), - ; - - @SuppressWarnings("ImmutableEnumChecker") - private final List clzs; - - Dependency(String... classNames) { - clzs = Collections.unmodifiableList(Arrays.asList(classNames)); - } - - public Iterable classes() { - return this.clzs; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/DirectedGraph.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/DirectedGraph.java deleted file mode 100644 index b9ab863cb88..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/DirectedGraph.java +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util; - -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.collect.LinkedHashMultimap; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import com.datastax.oss.driver.shaded.guava.common.collect.Maps; -import com.datastax.oss.driver.shaded.guava.common.collect.Multimap; -import java.util.ArrayDeque; -import java.util.Arrays; -import java.util.Collection; -import java.util.List; -import java.util.Map; -import java.util.Queue; -import net.jcip.annotations.NotThreadSafe; - -/** A basic directed graph implementation to perform topological sorts. */ -@NotThreadSafe -public class DirectedGraph { - - // We need to keep track of the predecessor count. For simplicity, use a map to store it - // alongside the vertices. - private final Map vertices; - private final Multimap adjacencyList; - private boolean wasSorted; - - public DirectedGraph(Collection vertices) { - this.vertices = Maps.newLinkedHashMapWithExpectedSize(vertices.size()); - this.adjacencyList = LinkedHashMultimap.create(); - - for (VertexT vertex : vertices) { - this.vertices.put(vertex, 0); - } - } - - @VisibleForTesting - @SafeVarargs - DirectedGraph(VertexT... vertices) { - this(Arrays.asList(vertices)); - } - - /** - * this assumes that {@code from} and {@code to} were part of the vertices passed to the - * constructor - */ - public void addEdge(VertexT from, VertexT to) { - Preconditions.checkArgument(vertices.containsKey(from) && vertices.containsKey(to)); - adjacencyList.put(from, to); - vertices.put(to, vertices.get(to) + 1); - } - - /** one-time use only, calling this multiple times on the same graph won't work */ - public List topologicalSort() { - Preconditions.checkState(!wasSorted); - wasSorted = true; - - Queue queue = new ArrayDeque<>(); - - for (Map.Entry entry : vertices.entrySet()) { - if (entry.getValue() == 0) { - queue.add(entry.getKey()); - } - } - - List result = Lists.newArrayList(); - while (!queue.isEmpty()) { - VertexT vertex = queue.remove(); - result.add(vertex); - for (VertexT successor : adjacencyList.get(vertex)) { - if (decrementAndGetCount(successor) == 0) { - queue.add(successor); - } - } - } - - if (result.size() != vertices.size()) { - throw new IllegalArgumentException("failed to perform topological sort, graph has a cycle"); - } - - return result; - } - - private int decrementAndGetCount(VertexT vertex) { - Integer count = vertices.get(vertex); - count = count - 1; - vertices.put(vertex, count); - return count; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/GraalDependencyChecker.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/GraalDependencyChecker.java deleted file mode 100644 index c80970eb3b6..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/GraalDependencyChecker.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util; - -import java.util.concurrent.ConcurrentHashMap; - -/** - * A dependency checker implementation which should be safe to use for build-time checks when - * building Graal native images. This class is similar to {@link DefaultDependencyChecker} but - * doesn't introduce any external dependencies which might complicate the native image build - * process. Expectation is that this will be most prominently used in the various predicate classes - * which determine whether or not Graal substitutions should be used. - */ -public class GraalDependencyChecker { - - private static final ConcurrentHashMap CACHE = new ConcurrentHashMap<>(); - - /** - * Return true iff we can find all classes for the dependency on the classpath, false otherwise - * - * @param dependency the dependency to search for - * @return true if the dependency is available, false otherwise - */ - public static boolean isPresent(Dependency dependency) { - try { - return CACHE.computeIfAbsent( - dependency, - (dep) -> { - for (String classNameToTest : dependency.classes()) { - // Note that this lands in a pretty similar spot to - // Reflection.loadClass() with a null class loader - // arg. Major difference here is that we avoid the - // more complex exception handling/logging ops in - // that code. - try { - Class.forName(classNameToTest); - } catch (LinkageError | Exception e) { - return false; - } - } - return true; - }); - } catch (Exception e) { - return false; - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/Loggers.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/Loggers.java deleted file mode 100644 index 99dca2c60c0..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/Loggers.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util; - -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.data.AccessibleById; -import com.datastax.oss.driver.api.core.data.AccessibleByName; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class Loggers { - - /** - * Emits a warning log that includes an exception. If the current level is debug, the full stack - * trace is included, otherwise only the exception's message. - */ - public static void warnWithException(Logger logger, String format, Object... arguments) { - if (logger.isDebugEnabled()) { - logger.warn(format, arguments); - } else { - Object last = arguments[arguments.length - 1]; - if (last instanceof Throwable) { - Throwable t = (Throwable) last; - arguments[arguments.length - 1] = t.getClass().getSimpleName() + ": " + t.getMessage(); - logger.warn(format + " ({})", arguments); - } else { - // Should only be called with an exception as last argument, but handle gracefully anyway - logger.warn(format, arguments); - } - } - } - - // Loggers for API interfaces, declared here in order to keep them internal. - public static Logger COLUMN_DEFINITIONS = LoggerFactory.getLogger(ColumnDefinitions.class); - public static Logger ACCESSIBLE_BY_ID = LoggerFactory.getLogger(AccessibleById.class); - public static Logger ACCESSIBLE_BY_NAME = LoggerFactory.getLogger(AccessibleByName.class); - public static Logger USER_DEFINED_TYPE = LoggerFactory.getLogger(UserDefinedType.class); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/NanoTime.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/NanoTime.java deleted file mode 100644 index 0001bc9925c..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/NanoTime.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util; - -public class NanoTime { - - private static final long ONE_HOUR = 3600L * 1000 * 1000 * 1000; - private static final long ONE_MINUTE = 60L * 1000 * 1000 * 1000; - private static final long ONE_SECOND = 1000 * 1000 * 1000; - private static final long ONE_MILLISECOND = 1000 * 1000; - private static final long ONE_MICROSECOND = 1000; - - /** Formats a duration in the best unit (truncating the fractional part). */ - public static String formatTimeSince(long startTimeNs) { - return format(System.nanoTime() - startTimeNs); - } - - /** Formats a duration in the best unit (truncating the fractional part). */ - public static String format(long elapsedNs) { - if (elapsedNs >= ONE_HOUR) { - long hours = elapsedNs / ONE_HOUR; - long minutes = (elapsedNs % ONE_HOUR) / ONE_MINUTE; - return hours + " h " + minutes + " mn"; - } else if (elapsedNs >= ONE_MINUTE) { - long minutes = elapsedNs / ONE_MINUTE; - long seconds = (elapsedNs % ONE_MINUTE) / ONE_SECOND; - return minutes + " mn " + seconds + " s"; - } else if (elapsedNs >= ONE_SECOND) { - long seconds = elapsedNs / ONE_SECOND; - long milliseconds = (elapsedNs % ONE_SECOND) / ONE_MILLISECOND; - return seconds + "." + milliseconds + " s"; - } else if (elapsedNs >= ONE_MILLISECOND) { - return (elapsedNs / ONE_MILLISECOND) + " ms"; - } else if (elapsedNs >= ONE_MICROSECOND) { - return (elapsedNs / ONE_MICROSECOND) + " us"; - } else { - return elapsedNs + " ns"; - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/ProtocolUtils.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/ProtocolUtils.java deleted file mode 100644 index f653ea6f5f9..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/ProtocolUtils.java +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util; - -import com.datastax.oss.protocol.internal.ProtocolConstants; - -public class ProtocolUtils { - /** - * Formats a message opcode for logs and error messages. - * - *

Note that the reason why we don't use enums is because the driver can be extended with - * custom opcodes. - */ - public static String opcodeString(int opcode) { - switch (opcode) { - case ProtocolConstants.Opcode.ERROR: - return "ERROR"; - case ProtocolConstants.Opcode.STARTUP: - return "STARTUP"; - case ProtocolConstants.Opcode.READY: - return "READY"; - case ProtocolConstants.Opcode.AUTHENTICATE: - return "AUTHENTICATE"; - case ProtocolConstants.Opcode.OPTIONS: - return "OPTIONS"; - case ProtocolConstants.Opcode.SUPPORTED: - return "SUPPORTED"; - case ProtocolConstants.Opcode.QUERY: - return "QUERY"; - case ProtocolConstants.Opcode.RESULT: - return "RESULT"; - case ProtocolConstants.Opcode.PREPARE: - return "PREPARE"; - case ProtocolConstants.Opcode.EXECUTE: - return "EXECUTE"; - case ProtocolConstants.Opcode.REGISTER: - return "REGISTER"; - case ProtocolConstants.Opcode.EVENT: - return "EVENT"; - case ProtocolConstants.Opcode.BATCH: - return "BATCH"; - case ProtocolConstants.Opcode.AUTH_CHALLENGE: - return "AUTH_CHALLENGE"; - case ProtocolConstants.Opcode.AUTH_RESPONSE: - return "AUTH_RESPONSE"; - case ProtocolConstants.Opcode.AUTH_SUCCESS: - return "AUTH_SUCCESS"; - default: - return "0x" + Integer.toHexString(opcode); - } - } - - /** - * Formats an error code for logs and error messages. - * - *

Note that the reason why we don't use enums is because the driver can be extended with - * custom codes. - */ - public static String errorCodeString(int errorCode) { - switch (errorCode) { - case ProtocolConstants.ErrorCode.SERVER_ERROR: - return "SERVER_ERROR"; - case ProtocolConstants.ErrorCode.PROTOCOL_ERROR: - return "PROTOCOL_ERROR"; - case ProtocolConstants.ErrorCode.AUTH_ERROR: - return "AUTH_ERROR"; - case ProtocolConstants.ErrorCode.UNAVAILABLE: - return "UNAVAILABLE"; - case ProtocolConstants.ErrorCode.OVERLOADED: - return "OVERLOADED"; - case ProtocolConstants.ErrorCode.IS_BOOTSTRAPPING: - return "IS_BOOTSTRAPPING"; - case ProtocolConstants.ErrorCode.TRUNCATE_ERROR: - return "TRUNCATE_ERROR"; - case ProtocolConstants.ErrorCode.WRITE_TIMEOUT: - return "WRITE_TIMEOUT"; - case ProtocolConstants.ErrorCode.READ_TIMEOUT: - return "READ_TIMEOUT"; - case ProtocolConstants.ErrorCode.READ_FAILURE: - return "READ_FAILURE"; - case ProtocolConstants.ErrorCode.FUNCTION_FAILURE: - return "FUNCTION_FAILURE"; - case ProtocolConstants.ErrorCode.WRITE_FAILURE: - return "WRITE_FAILURE"; - case ProtocolConstants.ErrorCode.CDC_WRITE_FAILURE: - return "CDC_WRITE_FAILURE"; - case ProtocolConstants.ErrorCode.CAS_WRITE_UNKNOWN: - return "CAS_WRITE_UNKNOWN"; - case ProtocolConstants.ErrorCode.SYNTAX_ERROR: - return "SYNTAX_ERROR"; - case ProtocolConstants.ErrorCode.UNAUTHORIZED: - return "UNAUTHORIZED"; - case ProtocolConstants.ErrorCode.INVALID: - return "INVALID"; - case ProtocolConstants.ErrorCode.CONFIG_ERROR: - return "CONFIG_ERROR"; - case ProtocolConstants.ErrorCode.ALREADY_EXISTS: - return "ALREADY_EXISTS"; - case ProtocolConstants.ErrorCode.UNPREPARED: - return "UNPREPARED"; - default: - return "0x" + Integer.toHexString(errorCode); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/Reflection.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/Reflection.java deleted file mode 100644 index 75a8f5b7380..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/Reflection.java +++ /dev/null @@ -1,344 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.config.DriverOption; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.shaded.guava.common.base.Joiner; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ListMultimap; -import com.datastax.oss.driver.shaded.guava.common.collect.MultimapBuilder; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.lang.reflect.Constructor; -import java.lang.reflect.InvocationTargetException; -import java.util.Collection; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class Reflection { - - private static final Logger LOG = LoggerFactory.getLogger(Reflection.class); - - /** - * Loads a class by name using the given {@link ClassLoader}. - * - *

If the class loader is null, the class will be loaded using the class loader that loaded the - * driver. - * - * @return null if the class does not exist or could not be loaded. - */ - @Nullable - public static Class loadClass(@Nullable ClassLoader classLoader, @NonNull String className) { - try { - Class clazz; - if (classLoader == null) { - LOG.trace("Attempting to load {} with driver's class loader", className); - clazz = Class.forName(className); - } else { - LOG.trace("Attempting to load {} with {}", className, classLoader); - clazz = Class.forName(className, true, classLoader); - } - LOG.trace("Successfully loaded {}", className); - return clazz; - } catch (LinkageError | Exception e) { - // Note: only ClassNotFoundException, LinkageError and SecurityException - // are declared to be thrown; however some class loaders (Apache Felix) - // may throw other checked exceptions, which cannot be caught directly - // because that would cause a compilation failure. - LOG.debug( - String.format("Could not load %s with loader %s: %s", className, classLoader, e), e); - if (classLoader == null) { - return null; - } else { - // If the user-supplied class loader is unable to locate the class, try with the driver's - // default class loader. This is useful in OSGi deployments where the user-supplied loader - // may be able to load some classes but not all of them. Besides, the driver bundle, in - // OSGi, has a "Dynamic-Import:*" directive that makes its class loader capable of locating - // a great number of classes. - return loadClass(null, className); - } - } - } - - /** - * Tries to create an instance of a class, given an option defined in the driver configuration. - * - *

For example: - * - *

-   * my-policy.class = my.package.MyPolicyImpl
-   * 
- * - * The class will be instantiated via reflection, it must have a constructor that takes a {@link - * DriverContext} argument. - * - * @param context the driver context. - * @param classNameOption the option that indicates the class. It will be looked up in the default - * profile of the configuration stored in the context. - * @param expectedSuperType a super-type that the class is expected to implement/extend. - * @param defaultPackages the default packages to prepend to the class name if it's not qualified. - * They will be tried in order, the first one that matches an existing class will be used. - * @return the new instance, or empty if {@code classNameOption} is not defined in the - * configuration. - */ - public static Optional buildFromConfig( - InternalDriverContext context, - DriverOption classNameOption, - Class expectedSuperType, - String... defaultPackages) { - return buildFromConfig(context, null, classNameOption, expectedSuperType, defaultPackages); - } - - /** - * Tries to create a list of instances, given an option defined in the driver configuration. - * - *

For example: - * - *

-   * my-policy.classes = [my.package.MyPolicyImpl1,my.package.MyPolicyImpl2]
-   * 
- * - * Each class will be instantiated via reflection, and must have a constructor that takes a {@link - * DriverContext} argument. - * - * @param context the driver context. - * @param classNamesOption the option that indicates the class list. It will be looked up in the - * default profile of the configuration stored in the context. - * @param expectedSuperType a super-type that the classes are expected to implement/extend. - * @param defaultPackages the default packages to prepend to the class names if they are not - * qualified. They will be tried in order, the first one that matches an existing class will - * be used. - * @return the list of new instances, or an empty list if {@code classNamesOption} is not defined - * in the configuration. - */ - public static ImmutableList buildFromConfigList( - InternalDriverContext context, - DriverOption classNamesOption, - Class expectedSuperType, - String... defaultPackages) { - return buildFromConfigList(context, null, classNamesOption, expectedSuperType, defaultPackages); - } - - /** - * Tries to create multiple instances of a class, given options defined in the driver - * configuration and possibly overridden in profiles. - * - *

For example: - * - *

-   * my-policy.class = package1.PolicyImpl1
-   * profiles {
-   *   my-profile { my-policy.class = package2.PolicyImpl2 }
-   * }
-   * 
- * - * The class will be instantiated via reflection, it must have a constructor that takes two - * arguments: the {@link DriverContext}, and a string representing the profile name. - * - *

This method assumes the policy is mandatory, the class option must be present at least for - * the default profile. - * - * @param context the driver context. - * @param classNameOption the option that indicates the class (my-policy.class in the example - * above). - * @param rootOption the root of the section containing the policy's configuration (my-policy in - * the example above). Profiles that have the same contents under that section will share the - * same policy instance. - * @param expectedSuperType a super-type that the class is expected to implement/extend. - * @param defaultPackages the default packages to prepend to the class name if it's not qualified. - * They will be tried in order, the first one that matches an existing class will be used. - * @return the policy instances by profile name. If multiple profiles share the same - * configuration, a single instance will be shared by all their entries. - */ - public static Map buildFromConfigProfiles( - InternalDriverContext context, - DriverOption classNameOption, - DriverOption rootOption, - Class expectedSuperType, - String... defaultPackages) { - - // Find out how many distinct configurations we have - ListMultimap profilesByConfig = - MultimapBuilder.hashKeys().arrayListValues().build(); - for (DriverExecutionProfile profile : context.getConfig().getProfiles().values()) { - profilesByConfig.put(profile.getComparisonKey(rootOption), profile.getName()); - } - - // Instantiate each distinct configuration, and associate it with the corresponding profiles - ImmutableMap.Builder result = ImmutableMap.builder(); - for (Collection profiles : profilesByConfig.asMap().values()) { - // Since all profiles use the same config, we can use any of them - String profileName = profiles.iterator().next(); - ComponentT policy = - buildFromConfig(context, profileName, classNameOption, expectedSuperType, defaultPackages) - .orElseThrow( - () -> - new IllegalArgumentException( - String.format( - "Missing configuration for %s in profile %s", - rootOption.getPath(), profileName))); - for (String profile : profiles) { - result.put(profile, policy); - } - } - return result.build(); - } - - /** - * @param profileName if null, this is a global policy, use the default profile and look for a - * one-arg constructor. If not null, this is a per-profile policy, look for a two-arg - * constructor. - */ - public static Optional buildFromConfig( - InternalDriverContext context, - String profileName, - DriverOption classNameOption, - Class expectedSuperType, - String... defaultPackages) { - - DriverExecutionProfile config = - (profileName == null) - ? context.getConfig().getDefaultProfile() - : context.getConfig().getProfile(profileName); - - String configPath = classNameOption.getPath(); - LOG.debug("Creating a {} from config option {}", expectedSuperType.getSimpleName(), configPath); - - if (!config.isDefined(classNameOption)) { - LOG.debug("Option is not defined, skipping"); - return Optional.empty(); - } - - String className = config.getString(classNameOption); - return Optional.of( - resolveClass( - context, profileName, expectedSuperType, configPath, className, defaultPackages)); - } - - /** - * @param profileName if null, this is a global policy, use the default profile and look for a - * one-arg constructor. If not null, this is a per-profile policy, look for a two-arg - * constructor. - */ - public static ImmutableList buildFromConfigList( - InternalDriverContext context, - String profileName, - DriverOption classNamesOption, - Class expectedSuperType, - String... defaultPackages) { - - DriverExecutionProfile config = - (profileName == null) - ? context.getConfig().getDefaultProfile() - : context.getConfig().getProfile(profileName); - - String configPath = classNamesOption.getPath(); - LOG.debug( - "Creating a list of {} from config option {}", - expectedSuperType.getSimpleName(), - configPath); - - if (!config.isDefined(classNamesOption)) { - LOG.debug("Option is not defined, skipping"); - return ImmutableList.of(); - } - - List classNames = config.getStringList(classNamesOption); - ImmutableList.Builder components = ImmutableList.builder(); - for (String className : classNames) { - components.add( - resolveClass( - context, profileName, expectedSuperType, configPath, className, defaultPackages)); - } - return components.build(); - } - - @NonNull - private static ComponentT resolveClass( - InternalDriverContext context, - String profileName, - Class expectedSuperType, - String configPath, - String className, - String[] defaultPackages) { - Class clazz = null; - if (className.contains(".")) { - LOG.debug("Building from fully-qualified name {}", className); - clazz = loadClass(context.getClassLoader(), className); - } else { - LOG.debug("Building from unqualified name {}", className); - for (String defaultPackage : defaultPackages) { - String qualifiedClassName = defaultPackage + "." + className; - LOG.debug("Trying with default package {}", qualifiedClassName); - clazz = loadClass(context.getClassLoader(), qualifiedClassName); - if (clazz != null) { - break; - } - } - } - if (clazz == null) { - throw new IllegalArgumentException( - String.format("Can't find class %s (specified by %s)", className, configPath)); - } - Preconditions.checkArgument( - expectedSuperType.isAssignableFrom(clazz), - "Expected class %s (specified by %s) to be a subtype of %s", - className, - configPath, - expectedSuperType.getName()); - - Constructor constructor; - Class[] argumentTypes = - (profileName == null) - ? new Class[] {DriverContext.class} - : new Class[] {DriverContext.class, String.class}; - try { - constructor = clazz.asSubclass(expectedSuperType).getConstructor(argumentTypes); - } catch (NoSuchMethodException e) { - throw new IllegalArgumentException( - String.format( - "Expected class %s (specified by %s) " - + "to have an accessible constructor with arguments (%s)", - className, configPath, Joiner.on(',').join(argumentTypes))); - } - try { - @SuppressWarnings("JavaReflectionInvocation") - ComponentT instance = - (profileName == null) - ? constructor.newInstance(context) - : constructor.newInstance(context, profileName); - return instance; - } catch (Exception e) { - // ITE just wraps an exception thrown by the constructor, get rid of it: - Throwable cause = (e instanceof InvocationTargetException) ? e.getCause() : e; - throw new IllegalArgumentException( - String.format( - "Error instantiating class %s (specified by %s): %s", - className, configPath, cause.getMessage()), - cause); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/RoutingKey.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/RoutingKey.java deleted file mode 100644 index 7d8895d228f..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/RoutingKey.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util; - -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; - -public class RoutingKey { - - /** Assembles multiple routing key components into a single buffer. */ - @NonNull - public static ByteBuffer compose(@NonNull ByteBuffer... components) { - if (components.length == 1) return components[0]; - - int totalLength = 0; - for (ByteBuffer bb : components) totalLength += 2 + bb.remaining() + 1; - - ByteBuffer out = ByteBuffer.allocate(totalLength); - for (ByteBuffer buffer : components) { - ByteBuffer bb = buffer.duplicate(); - putShortLength(out, bb.remaining()); - out.put(bb); - out.put((byte) 0); - } - out.flip(); - return out; - } - - private static void putShortLength(ByteBuffer bb, int length) { - bb.put((byte) ((length >> 8) & 0xFF)); - bb.put((byte) (length & 0xFF)); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/Sizes.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/Sizes.java deleted file mode 100644 index 337895ec107..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/Sizes.java +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.cql.BatchableStatement; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.internal.core.data.ValuesHelper; -import com.datastax.oss.protocol.internal.FrameCodec; -import com.datastax.oss.protocol.internal.PrimitiveSizes; -import com.datastax.oss.protocol.internal.request.query.QueryOptions; -import com.datastax.oss.protocol.internal.request.query.Values; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -public class Sizes { - - /** Returns a common size for all kinds of Request implementations. */ - public static int minimumRequestSize(Request request) { - - // Header and payload are common inside a Frame at the protocol level - - // Frame header has a fixed size of 9 for protocol version >= V3, which includes Frame flags - // size - int size = FrameCodec.V3_ENCODED_HEADER_SIZE; - - if (!request.getCustomPayload().isEmpty()) { - // Custom payload is not supported in v3, but assume user won't have a custom payload set if - // they use this version - size += PrimitiveSizes.sizeOfBytesMap(request.getCustomPayload()); - } - - return size; - } - - public static int minimumStatementSize(Statement statement, DriverContext context) { - int size = minimumRequestSize(statement); - - // These are options in the protocol inside a frame that are common to all Statements - - size += QueryOptions.queryFlagsSize(context.getProtocolVersion().getCode()); - - size += PrimitiveSizes.SHORT; // size of consistency level - size += PrimitiveSizes.SHORT; // size of serial consistency level - - return size; - } - - /** - * Returns the size in bytes of a simple statement's values, depending on whether the values are - * named or positional. - */ - public static int sizeOfSimpleStatementValues( - SimpleStatement simpleStatement, - ProtocolVersion protocolVersion, - CodecRegistry codecRegistry) { - int size = 0; - - if (!simpleStatement.getPositionalValues().isEmpty()) { - - List positionalValues = - new ArrayList<>(simpleStatement.getPositionalValues().size()); - for (Object value : simpleStatement.getPositionalValues()) { - positionalValues.add( - ValuesHelper.encodeToDefaultCqlMapping(value, codecRegistry, protocolVersion)); - } - - size += Values.sizeOfPositionalValues(positionalValues); - - } else if (!simpleStatement.getNamedValues().isEmpty()) { - - Map namedValues = new HashMap<>(simpleStatement.getNamedValues().size()); - for (Map.Entry value : simpleStatement.getNamedValues().entrySet()) { - namedValues.put( - value.getKey().asInternal(), - ValuesHelper.encodeToDefaultCqlMapping( - value.getValue(), codecRegistry, protocolVersion)); - } - - size += Values.sizeOfNamedValues(namedValues); - } - return size; - } - - /** Return the size in bytes of a bound statement's values. */ - public static int sizeOfBoundStatementValues(BoundStatement boundStatement) { - return Values.sizeOfPositionalValues(boundStatement.getValues()); - } - - /** - * The size of a statement inside a batch query is different from the size of a complete - * Statement. The inner batch statements only include the query or prepared ID, and the values of - * the statement. - */ - public static Integer sizeOfInnerBatchStatementInBytes( - BatchableStatement statement, ProtocolVersion protocolVersion, CodecRegistry codecRegistry) { - int size = 0; - - size += - PrimitiveSizes - .BYTE; // for each inner statement, there is one byte for the "kind": prepared or string - - if (statement instanceof SimpleStatement) { - size += PrimitiveSizes.sizeOfLongString(((SimpleStatement) statement).getQuery()); - size += - sizeOfSimpleStatementValues( - ((SimpleStatement) statement), protocolVersion, codecRegistry); - } else if (statement instanceof BoundStatement) { - size += - PrimitiveSizes.sizeOfShortBytes( - ((BoundStatement) statement).getPreparedStatement().getId()); - size += sizeOfBoundStatementValues(((BoundStatement) statement)); - } - return size; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/Strings.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/Strings.java deleted file mode 100644 index 2e85b451c75..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/Strings.java +++ /dev/null @@ -1,343 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util; - -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import java.util.Locale; -import java.util.Objects; - -public class Strings { - - /** - * Return {@code true} if the given string is surrounded by single quotes, and {@code false} - * otherwise. - * - * @param value The string to inspect. - * @return {@code true} if the given string is surrounded by single quotes, and {@code false} - * otherwise. - */ - public static boolean isQuoted(String value) { - return isQuoted(value, '\''); - } - - /** - * Quote the given string; single quotes are escaped. If the given string is null, this method - * returns a quoted empty string ({@code ''}). - * - * @param value The value to quote. - * @return The quoted string. - */ - public static String quote(String value) { - return quote(value, '\''); - } - - /** - * Unquote the given string if it is quoted; single quotes are unescaped. If the given string is - * not quoted, it is returned without any modification. - * - * @param value The string to unquote. - * @return The unquoted string. - */ - public static String unquote(String value) { - return unquote(value, '\''); - } - - /** - * Return {@code true} if the given string is surrounded by double quotes, and {@code false} - * otherwise. - * - * @param value The string to inspect. - * @return {@code true} if the given string is surrounded by double quotes, and {@code false} - * otherwise. - */ - public static boolean isDoubleQuoted(String value) { - return isQuoted(value, '\"'); - } - - /** - * Double quote the given string; double quotes are escaped. If the given string is null, this - * method returns a quoted empty string ({@code ""}). - * - * @param value The value to double quote. - * @return The double quoted string. - */ - public static String doubleQuote(String value) { - return quote(value, '"'); - } - - /** - * Unquote the given string if it is double quoted; double quotes are unescaped. If the given - * string is not double quoted, it is returned without any modification. - * - * @param value The string to un-double quote. - * @return The un-double quoted string. - */ - public static String unDoubleQuote(String value) { - return unquote(value, '"'); - } - - /** Whether a string needs double quotes to be a valid CQL identifier. */ - public static boolean needsDoubleQuotes(String s) { - // this method should only be called for C*-provided identifiers, - // so we expect it to be non-null and non-empty. - assert s != null && !s.isEmpty(); - char c = s.charAt(0); - if (!(c >= 97 && c <= 122)) // a-z - return true; - for (int i = 1; i < s.length(); i++) { - c = s.charAt(i); - if (!((c >= 48 && c <= 57) // 0-9 - || (c == 95) // _ - || (c >= 97 && c <= 122) // a-z - )) { - return true; - } - } - return isReservedCqlKeyword(s); - } - - /** - * Return {@code true} if the given string is surrounded by the quote character given, and {@code - * false} otherwise. - * - * @param value The string to inspect. - * @return {@code true} if the given string is surrounded by the quote character, and {@code - * false} otherwise. - */ - private static boolean isQuoted(String value, char quoteChar) { - return value != null - && value.length() > 1 - && value.charAt(0) == quoteChar - && value.charAt(value.length() - 1) == quoteChar; - } - - /** - * @param quoteChar " or ' - * @return A quoted empty string. - */ - private static String emptyQuoted(char quoteChar) { - // don't handle non quote characters, this is done so that these are interned and don't create - // repeated empty quoted strings. - assert quoteChar == '"' || quoteChar == '\''; - if (quoteChar == '"') return "\"\""; - else return "''"; - } - - /** - * Quotes text and escapes any existing quotes in the text. {@code String.replace()} is a bit too - * inefficient (see JAVA-67, JAVA-1262). - * - * @param text The text. - * @param quoteChar The character to use as a quote. - * @return The text with surrounded in quotes with all existing quotes escaped with (i.e. ' - * becomes '') - */ - private static String quote(String text, char quoteChar) { - if (text == null || text.isEmpty()) return emptyQuoted(quoteChar); - - int nbMatch = 0; - int start = -1; - do { - start = text.indexOf(quoteChar, start + 1); - if (start != -1) ++nbMatch; - } while (start != -1); - - // no quotes found that need to be escaped, simply surround in quotes and return. - if (nbMatch == 0) return quoteChar + text + quoteChar; - - // 2 for beginning and end quotes. - // length for original text - // nbMatch for escape characters to add to quotes to be escaped. - int newLength = 2 + text.length() + nbMatch; - char[] result = new char[newLength]; - result[0] = quoteChar; - result[newLength - 1] = quoteChar; - int newIdx = 1; - for (int i = 0; i < text.length(); i++) { - char c = text.charAt(i); - if (c == quoteChar) { - // escape quote with another occurrence. - result[newIdx++] = c; - result[newIdx++] = c; - } else { - result[newIdx++] = c; - } - } - return new String(result); - } - - /** - * Unquotes text and unescapes non surrounding quotes. {@code String.replace()} is a bit too - * inefficient (see JAVA-67, JAVA-1262). - * - * @param text The text - * @param quoteChar The character to use as a quote. - * @return The text with surrounding quotes removed and non surrounding quotes unescaped (i.e. '' - * becomes ') - */ - private static String unquote(String text, char quoteChar) { - if (!isQuoted(text, quoteChar)) return text; - - if (text.length() == 2) return ""; - - String search = emptyQuoted(quoteChar); - int nbMatch = 0; - int start = -1; - do { - start = text.indexOf(search, start + 2); - // ignore the second to last character occurrence, as the last character is a quote. - if (start != -1 && start != text.length() - 2) ++nbMatch; - } while (start != -1); - - // no escaped quotes found, simply remove surrounding quotes and return. - if (nbMatch == 0) return text.substring(1, text.length() - 1); - - // length of the new string will be its current length - the number of occurrences. - int newLength = text.length() - nbMatch - 2; - char[] result = new char[newLength]; - int newIdx = 0; - // track whenever a quoteChar is encountered and the previous character is not a quoteChar. - boolean firstFound = false; - for (int i = 1; i < text.length() - 1; i++) { - char c = text.charAt(i); - if (c == quoteChar) { - if (firstFound) { - // The previous character was a quoteChar, don't add this to result, this action in - // effect removes consecutive quotes. - firstFound = false; - } else { - // found a quoteChar and the previous character was not a quoteChar, include in result. - firstFound = true; - result[newIdx++] = c; - } - } else { - // non quoteChar encountered, include in result. - result[newIdx++] = c; - firstFound = false; - } - } - return new String(result); - } - - @VisibleForTesting - static boolean isReservedCqlKeyword(String id) { - return id != null && RESERVED_KEYWORDS.contains(id.toLowerCase(Locale.ROOT)); - } - - /** - * Check whether the given string corresponds to a valid CQL long literal. Long literals are - * composed solely by digits, but can have an optional leading minus sign. - * - * @param str The string to inspect. - * @return {@code true} if the given string corresponds to a valid CQL integer literal, {@code - * false} otherwise. - */ - public static boolean isLongLiteral(String str) { - if (str == null || str.isEmpty()) return false; - char[] chars = str.toCharArray(); - for (int i = 0; i < chars.length; i++) { - char c = chars[i]; - if ((c < '0' && (i != 0 || c != '-')) || c > '9') return false; - } - return true; - } - - /** - * Checks whether the given text is not null and not empty. - * - * @param text The text to check. - * @param name The name of the argument. - * @return The text (for method chaining). - */ - public static String requireNotEmpty(String text, String name) { - Objects.requireNonNull(text, name + " cannot be null"); - if (text.isEmpty()) { - throw new IllegalArgumentException(name + " cannot be empty"); - } - return text; - } - - private Strings() {} - - private static final ImmutableSet RESERVED_KEYWORDS = - ImmutableSet.of( - // See https://github.com/apache/cassandra/blob/trunk/doc/cql3/CQL.textile#appendixA - "add", - "allow", - "alter", - "and", - "apply", - "asc", - "authorize", - "batch", - "begin", - "by", - "columnfamily", - "create", - "default", - "delete", - "desc", - "describe", - "drop", - "entries", - "execute", - "from", - "full", - "grant", - "if", - "in", - "index", - "infinity", - "insert", - "into", - "is", - "keyspace", - "limit", - "materialized", - "mbean", - "mbeans", - "modify", - "nan", - "norecursive", - "not", - "null", - "of", - "on", - "or", - "order", - "primary", - "rename", - "replace", - "revoke", - "schema", - "select", - "set", - "table", - "to", - "token", - "truncate", - "unlogged", - "unset", - "update", - "use", - "using", - "view", - "where", - "with"); -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/CompositeQueryPlan.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/CompositeQueryPlan.java deleted file mode 100644 index 10ca8c0c48d..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/CompositeQueryPlan.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.collection; - -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.shaded.guava.common.collect.Iterators; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.AbstractQueue; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import java.util.Queue; -import java.util.concurrent.atomic.AtomicInteger; -import net.jcip.annotations.ThreadSafe; - -/** A query plan that encompasses many child plans, and consumes them one by one. */ -@ThreadSafe -public class CompositeQueryPlan extends AbstractQueue implements QueryPlan { - - private final Queue[] plans; - private final AtomicInteger currentPlan = new AtomicInteger(0); - - @SafeVarargs - public CompositeQueryPlan(@NonNull Queue... plans) { - if (plans.length == 0) { - throw new IllegalArgumentException("at least one child plan must be provided"); - } - for (Queue plan : plans) { - if (plan == null) { - throw new NullPointerException("child plan cannot be null"); - } - } - this.plans = plans; - } - - @Nullable - @Override - public Node poll() { - while (true) { - int current = currentPlan.get(); - Queue plan = plans[current]; - Node n = plan.poll(); - if (n != null) { - return n; - } - int next = current + 1; - if (next == plans.length) { - return null; - } - currentPlan.compareAndSet(current, next); - } - } - - @NonNull - @Override - public Iterator iterator() { - List> its = new ArrayList<>(plans.length); - for (Queue plan : plans) { - its.add(plan.iterator()); - } - return Iterators.concat(its.iterator()); - } - - @Override - public int size() { - int size = 0; - for (Queue plan : plans) { - size += plan.size(); - } - return size; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/EmptyQueryPlan.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/EmptyQueryPlan.java deleted file mode 100644 index 53177147695..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/EmptyQueryPlan.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.collection; - -import com.datastax.oss.driver.api.core.metadata.Node; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.AbstractQueue; -import java.util.Collections; -import java.util.Iterator; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -class EmptyQueryPlan extends AbstractQueue implements QueryPlan { - - @Override - public Node poll() { - return null; - } - - @NonNull - @Override - public Iterator iterator() { - return Collections.emptyIterator(); - } - - @Override - public int size() { - return 0; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/LazyQueryPlan.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/LazyQueryPlan.java deleted file mode 100644 index 075143c2e8d..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/LazyQueryPlan.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.collection; - -import com.datastax.oss.driver.api.core.metadata.Node; -import net.jcip.annotations.ThreadSafe; - -/** - * A query plan where nodes are computed lazily, when the plan is consumed for the first time. - * - *

This class can be useful when a query plan computation is heavy but the plan has a low chance - * of ever being consumed, e.g. the last query plan in a {@link CompositeQueryPlan}. - */ -@ThreadSafe -public abstract class LazyQueryPlan extends QueryPlanBase { - - private volatile Object[] nodes; - - /** - * Computes and returns the nodes to use for this query plan. - * - *

For efficiency, the declared return type is {@code Object[]} but all elements must be - * instances of {@link Node}. See {@link #getNodes()} for details. - * - *

This method is guaranteed to be invoked only once, at the first call to {@link #poll()}. - * - *

Implementors must avoid blocking calls in this method as it will be invoked on the driver's - * hot path. - */ - protected abstract Object[] computeNodes(); - - @Override - protected Object[] getNodes() { - if (nodes == null) { - synchronized (this) { - if (nodes == null) { - nodes = computeNodes(); - } - } - } - return nodes; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/QueryPlan.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/QueryPlan.java deleted file mode 100644 index 371e100a0e2..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/QueryPlan.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.collection; - -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.session.Session; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Iterator; -import java.util.Queue; -import net.jcip.annotations.ThreadSafe; - -/** - * A specialized, thread-safe node queue for use when creating {@linkplain - * com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy#newQueryPlan(Request, Session) - * query plans}. - * - *

This interface and its built-in implementations are not general-purpose queues; they are - * tailored for the specific use case of creating query plans in the driver. They make a few - * unconventional API choices for the sake of performance. - * - *

Furthermore, the driver only consumes query plans through calls to its {@link #poll()} method; - * therefore, this method is the only valid mutation operation for a query plan, other mutating - * methods throw. - * - *

Both {@link #size()} and {@link #iterator()} are supported and never throw, even if called - * concurrently. These methods are implemented for reporting purposes only, the driver itself does - * not use them. - * - *

All built-in {@link QueryPlan} implementations can be safely reused for custom load balancing - * policies; if you plan to do so, study the source code of {@link - * com.datastax.oss.driver.internal.core.loadbalancing.DefaultLoadBalancingPolicy} or {@link - * com.datastax.oss.driver.internal.core.loadbalancing.BasicLoadBalancingPolicy}. - * - * @see QueryPlanBase - */ -@ThreadSafe -public interface QueryPlan extends Queue { - - QueryPlan EMPTY = new EmptyQueryPlan(); - - /** - * {@inheritDoc} - * - *

Implementation note: query plan iterators are snapshots that reflect the contents of the - * queue at the time of the call, and are not affected by further modifications. Successive calls - * to this method will return different objects. - */ - @NonNull - @Override - Iterator iterator(); - - @Override - default boolean offer(Node node) { - throw new UnsupportedOperationException("Not implemented"); - } - - @Override - default Node peek() { - throw new UnsupportedOperationException("Not implemented"); - } - - @Override - default boolean add(Node node) { - throw new UnsupportedOperationException("Not implemented"); - } - - @Override - default Node remove() { - throw new UnsupportedOperationException("Not implemented"); - } - - @Override - default Node element() { - throw new UnsupportedOperationException("Not implemented"); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/QueryPlanBase.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/QueryPlanBase.java deleted file mode 100644 index 43f369f636a..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/QueryPlanBase.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.collection; - -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.shaded.guava.common.collect.Iterators; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.AbstractQueue; -import java.util.Arrays; -import java.util.Collections; -import java.util.Iterator; -import java.util.concurrent.atomic.AtomicInteger; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public abstract class QueryPlanBase extends AbstractQueue implements QueryPlan { - - private final AtomicInteger nextIndex = new AtomicInteger(); - - /** - * Returns the nodes in this query plan; the returned array should stay the same across - * invocations. - * - *

The declared return type is {@code Object[]} because of implementation details of {@link - * com.datastax.oss.driver.internal.core.loadbalancing.DefaultLoadBalancingPolicy - * DefaultLoadBalancingPolicy} and {@link - * com.datastax.oss.driver.internal.core.loadbalancing.BasicLoadBalancingPolicy - * BasicLoadBalancingPolicy}, but all elements must be instances of {@link Node}, otherwise - * instance methods will fail later. - */ - protected abstract Object[] getNodes(); - - @Nullable - @Override - public Node poll() { - // We don't handle overflow. In practice it won't be an issue, since the driver stops polling - // once the query plan is empty. - int i = nextIndex.getAndIncrement(); - Object[] nodes = getNodes(); - return (i >= nodes.length) ? null : (Node) nodes[i]; - } - - @NonNull - @Override - public Iterator iterator() { - int i = nextIndex.get(); - Object[] nodes = getNodes(); - if (i >= nodes.length) { - return Collections.emptyIterator(); - } else { - return Iterators.forArray(Arrays.copyOfRange(nodes, i, nodes.length, Node[].class)); - } - } - - @Override - public int size() { - return Math.max(getNodes().length - nextIndex.get(), 0); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/SimpleQueryPlan.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/SimpleQueryPlan.java deleted file mode 100644 index 4e0df8d2354..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/SimpleQueryPlan.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.collection; - -import com.datastax.oss.driver.api.core.metadata.Node; -import edu.umd.cs.findbugs.annotations.NonNull; -import net.jcip.annotations.ThreadSafe; - -/** Query plan where nodes must be provided at construction time. */ -@ThreadSafe -public class SimpleQueryPlan extends QueryPlanBase { - - private final Object[] nodes; - - /** - * Creates a new query plan with the given nodes. - * - *

For efficiency, there is no defensive copy, the provided array is used directly. The - * declared type is {@code Object[]} but all elements must be instances of {@link Node}. See - * {@link #getNodes()} for details. - * - * @param nodes the nodes to initially fill the queue with. - */ - public SimpleQueryPlan(@NonNull Object... nodes) { - this.nodes = nodes; - } - - @Override - protected Object[] getNodes() { - return nodes; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/BlockingOperation.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/BlockingOperation.java deleted file mode 100644 index 3f2d10b62e0..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/BlockingOperation.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.concurrent; - -import edu.umd.cs.findbugs.annotations.NonNull; -import io.netty.util.concurrent.FastThreadLocalThread; -import java.util.concurrent.ThreadFactory; - -/** - * Safeguards against bad usage patterns in client code that could introduce deadlocks in the - * driver. - * - *

The driver internals are fully asynchronous, nothing should ever block. On the other hand, our - * API exposes synchronous wrappers, that call async methods and wait on the result (as a - * convenience for clients that don't want to do async). These methods should never be called on a - * driver thread, because this can lead to deadlocks. This can happen from client code if it uses - * callbacks. - */ -public class BlockingOperation { - - /** - * This method is invoked from each synchronous driver method, and checks that we are not on a - * driver thread. - * - *

For this to work, all driver threads must be created by {@link SafeThreadFactory} (which is - * the case by default). - * - * @throws IllegalStateException if a driver thread is executing this. - */ - public static void checkNotDriverThread() { - if (Thread.currentThread() instanceof InternalThread) { - throw new IllegalStateException( - "Detected a synchronous API call on a driver thread, " - + "failing because this can cause deadlocks."); - } - } - - /** - * Marks threads as driver threads, so that they will be detected by {@link - * #checkNotDriverThread()} - */ - public static class SafeThreadFactory implements ThreadFactory { - @Override - public Thread newThread(@NonNull Runnable r) { - return new InternalThread(r); - } - } - - static class InternalThread extends FastThreadLocalThread { - private InternalThread(Runnable runnable) { - super(runnable); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/CompletableFutures.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/CompletableFutures.java deleted file mode 100644 index 275b2ddfeef..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/CompletableFutures.java +++ /dev/null @@ -1,198 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.concurrent; - -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.DriverExecutionException; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.base.Throwables; -import java.util.List; -import java.util.concurrent.CancellationException; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Executor; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.function.Supplier; - -public class CompletableFutures { - - public static CompletableFuture failedFuture(Throwable cause) { - CompletableFuture future = new CompletableFuture<>(); - future.completeExceptionally(cause); - return future; - } - - /** Completes {@code target} with the outcome of {@code source}. */ - public static void completeFrom(CompletionStage source, CompletableFuture target) { - source.whenComplete( - (t, error) -> { - if (error != null) { - target.completeExceptionally(error); - } else { - target.complete(t); - } - }); - } - - /** @return a completion stage that completes when all inputs are done (success or failure). */ - public static CompletionStage allDone(List> inputs) { - CompletableFuture result = new CompletableFuture<>(); - if (inputs.isEmpty()) { - result.complete(null); - } else { - final int todo = inputs.size(); - final AtomicInteger done = new AtomicInteger(); - for (CompletionStage input : inputs) { - input.whenComplete( - (v, error) -> { - if (done.incrementAndGet() == todo) { - result.complete(null); - } - }); - } - } - return result; - } - - /** Do something when all inputs are done (success or failure). */ - public static void whenAllDone( - List> inputs, Runnable callback, Executor executor) { - allDone(inputs).thenRunAsync(callback, executor).exceptionally(UncaughtExceptions::log); - } - /** - * @return a completion stage that completes when all inputs are successful, or fails if any of - * them failed. - */ - public static CompletionStage allSuccessful(List> inputs) { - CompletableFuture result = new CompletableFuture<>(); - if (inputs.isEmpty()) { - result.complete(null); - } else { - final int todo = inputs.size(); - final AtomicInteger done = new AtomicInteger(); - final CopyOnWriteArrayList errors = new CopyOnWriteArrayList<>(); - for (CompletionStage input : inputs) { - input.whenComplete( - (v, error) -> { - if (error != null) { - errors.add(error); - } - if (done.incrementAndGet() == todo) { - if (errors.isEmpty()) { - result.complete(null); - } else { - Throwable finalError = errors.get(0); - for (int i = 1; i < errors.size(); i++) { - Throwable suppressedError = errors.get(i); - if (finalError != suppressedError) { - finalError.addSuppressed(suppressedError); - } - } - result.completeExceptionally(finalError); - } - } - }); - } - } - return result; - } - - /** Get the result now, when we know for sure that the future is complete. */ - public static T getCompleted(CompletionStage stage) { - CompletableFuture future = stage.toCompletableFuture(); - Preconditions.checkArgument(future.isDone() && !future.isCompletedExceptionally()); - try { - return future.get(); - } catch (InterruptedException | ExecutionException e) { - // Neither can happen given the precondition - throw new AssertionError("Unexpected error", e); - } - } - - /** Get the error now, when we know for sure that the future is failed. */ - public static Throwable getFailed(CompletionStage stage) { - CompletableFuture future = stage.toCompletableFuture(); - Preconditions.checkArgument(future.isCompletedExceptionally()); - try { - future.get(); - throw new AssertionError("future should be failed"); - } catch (InterruptedException e) { - throw new AssertionError("Unexpected error", e); - } catch (ExecutionException e) { - return e.getCause(); - } - } - - public static T getUninterruptibly(CompletionStage stage) { - boolean interrupted = false; - try { - while (true) { - try { - return stage.toCompletableFuture().get(); - } catch (InterruptedException e) { - interrupted = true; - } catch (ExecutionException e) { - Throwable cause = e.getCause(); - if (cause instanceof DriverException) { - throw ((DriverException) cause).copy(); - } - Throwables.throwIfUnchecked(cause); - throw new DriverExecutionException(cause); - } - } - } finally { - if (interrupted) { - Thread.currentThread().interrupt(); - } - } - } - - /** - * Executes a function on the calling thread and returns result in a {@link CompletableFuture}. - * - *

Similar to {@link CompletableFuture#completedFuture} except takes a {@link Supplier} and if - * the supplier throws an unchecked exception, the returning future fails with that exception. - * - * @param supplier Function to execute - * @param Type of result - * @return result of function wrapped in future - */ - public static CompletableFuture wrap(Supplier supplier) { - try { - return CompletableFuture.completedFuture(supplier.get()); - } catch (Throwable t) { - return failedFuture(t); - } - } - - public static void whenCancelled(CompletionStage stage, Runnable action) { - stage.exceptionally( - (error) -> { - if (error instanceof CancellationException) { - action.run(); - } - return null; - }); - } - - public static void propagateCancellation(CompletionStage source, CompletionStage target) { - whenCancelled(source, () -> target.toCompletableFuture().cancel(true)); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/CycleDetector.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/CycleDetector.java deleted file mode 100644 index 548ee0bb042..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/CycleDetector.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.concurrent; - -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.graph.Graphs; -import com.datastax.oss.driver.shaded.guava.common.graph.MutableValueGraph; -import com.datastax.oss.driver.shaded.guava.common.graph.ValueGraphBuilder; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** Detects cycles between a set of {@link LazyReference} instances. */ -@ThreadSafe -public class CycleDetector { - private static final boolean ENABLED = - Boolean.getBoolean("com.datastax.oss.driver.DETECT_CYCLES"); - private static final Logger LOG = LoggerFactory.getLogger(CycleDetector.class); - - private final String errorMessage; - private final boolean enabled; - private final MutableValueGraph graph; - - public CycleDetector(String errorMessage) { - this(errorMessage, ENABLED); - } - - @VisibleForTesting - CycleDetector(String errorMessage, boolean enabled) { - this.errorMessage = errorMessage; - this.enabled = enabled; - this.graph = enabled ? ValueGraphBuilder.directed().build() : null; - } - - void onTryLock(LazyReference reference) { - if (enabled) { - synchronized (this) { - Thread me = Thread.currentThread(); - LOG.debug("{} wants to initialize {}", me, reference.getName()); - graph.putEdgeValue(me.getName(), reference.getName(), "wants to initialize"); - LOG.debug("{}", graph); - if (Graphs.hasCycle(graph.asGraph())) { - throw new IllegalStateException(errorMessage + " " + graph); - } - } - } - } - - void onLockAcquired(LazyReference reference) { - if (enabled) { - synchronized (this) { - Thread me = Thread.currentThread(); - LOG.debug("{} is initializing {}", me, reference.getName()); - String old = graph.removeEdge(me.getName(), reference.getName()); - assert "wants to initialize".equals(old); - graph.putEdgeValue(reference.getName(), me.getName(), "is getting initialized by"); - } - } - } - - void onReleaseLock(LazyReference reference) { - if (enabled) { - synchronized (this) { - Thread me = Thread.currentThread(); - LOG.debug("{} is done initializing {}", me, reference.getName()); - graph.removeEdge(reference.getName(), me.getName()); - } - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/Debouncer.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/Debouncer.java deleted file mode 100644 index 6bde155858c..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/Debouncer.java +++ /dev/null @@ -1,172 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.concurrent; - -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import io.netty.util.concurrent.EventExecutor; -import io.netty.util.concurrent.ScheduledFuture; -import java.time.Duration; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.TimeUnit; -import java.util.function.Consumer; -import java.util.function.Function; -import net.jcip.annotations.NotThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Debounces a sequence of events to smoothen temporary oscillations. - * - *

When a first event is received, the debouncer starts a time window. If no other event is - * received within that window, the initial event is flushed. However, if another event arrives, the - * window is reset, and the next flush will now contain both events. If the window keeps getting - * reset, the debouncer will flush after a given number of accumulated events. - * - * @param the type of the incoming events. - * @param the resulting type after the events of a batch have been coalesced. - */ -@NotThreadSafe // must be confined to adminExecutor -public class Debouncer { - private static final Logger LOG = LoggerFactory.getLogger(Debouncer.class); - - private final String logPrefix; - private final EventExecutor adminExecutor; - private final Consumer onFlush; - private final Duration window; - private final long maxEvents; - private final Function, CoalescedT> coalescer; - - private List currentBatch = new ArrayList<>(); - private ScheduledFuture nextFlush; - private boolean stopped; - - /** - * Creates a new instance. - * - * @param adminExecutor the executor that will be used to schedule all tasks. - * @param coalescer how to transform a batch of events into a result. - * @param onFlush what to do with a result. - * @param window the time window. - * @param maxEvents the maximum number of accumulated events before a flush is forced. - */ - public Debouncer( - EventExecutor adminExecutor, - Function, CoalescedT> coalescer, - Consumer onFlush, - Duration window, - long maxEvents) { - this("debouncer", adminExecutor, coalescer, onFlush, window, maxEvents); - } - - /** - * Creates a new instance. - * - * @param logPrefix the log prefix to use in log messages. - * @param adminExecutor the executor that will be used to schedule all tasks. - * @param coalescer how to transform a batch of events into a result. - * @param onFlush what to do with a result. - * @param window the time window. - * @param maxEvents the maximum number of accumulated events before a flush is forced. - */ - public Debouncer( - String logPrefix, - EventExecutor adminExecutor, - Function, CoalescedT> coalescer, - Consumer onFlush, - Duration window, - long maxEvents) { - this.logPrefix = logPrefix; - this.coalescer = coalescer; - Preconditions.checkArgument(maxEvents >= 1, "maxEvents should be at least 1"); - this.adminExecutor = adminExecutor; - this.onFlush = onFlush; - this.window = window; - this.maxEvents = maxEvents; - } - - /** This must be called on eventExecutor too. */ - public void receive(IncomingT element) { - assert adminExecutor.inEventLoop(); - if (stopped) { - return; - } - if (window.isZero() || maxEvents == 1) { - LOG.debug( - "[{}] Received {}, flushing immediately (window = {}, maxEvents = {})", - logPrefix, - element, - window, - maxEvents); - onFlush.accept(coalescer.apply(ImmutableList.of(element))); - } else { - currentBatch.add(element); - if (currentBatch.size() == maxEvents) { - LOG.debug( - "[{}] Received {}, flushing immediately (because {} accumulated events)", - logPrefix, - element, - maxEvents); - flushNow(); - } else { - LOG.debug("[{}] Received {}, scheduling next flush in {}", logPrefix, element, window); - scheduleFlush(); - } - } - } - - public void flushNow() { - assert adminExecutor.inEventLoop(); - LOG.debug("[{}] Flushing now", logPrefix); - cancelNextFlush(); - if (!currentBatch.isEmpty()) { - onFlush.accept(coalescer.apply(currentBatch)); - currentBatch = new ArrayList<>(); - } - } - - private void scheduleFlush() { - assert adminExecutor.inEventLoop(); - cancelNextFlush(); - nextFlush = adminExecutor.schedule(this::flushNow, window.toNanos(), TimeUnit.NANOSECONDS); - nextFlush.addListener(UncaughtExceptions::log); - } - - private void cancelNextFlush() { - assert adminExecutor.inEventLoop(); - if (nextFlush != null && !nextFlush.isDone()) { - boolean cancelled = nextFlush.cancel(true); - if (cancelled) { - LOG.debug("[{}] Cancelled existing scheduled flush", logPrefix); - } - } - } - - /** - * Stop debouncing: the next flush is cancelled, and all pending and future events will be - * ignored. - */ - public void stop() { - assert adminExecutor.inEventLoop(); - if (!stopped) { - stopped = true; - cancelNextFlush(); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/DriverBlockHoundIntegration.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/DriverBlockHoundIntegration.java deleted file mode 100644 index 7d90c50028e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/DriverBlockHoundIntegration.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.concurrent; - -import com.datastax.oss.driver.internal.core.util.concurrent.BlockingOperation.InternalThread; -import reactor.blockhound.BlockHound; -import reactor.blockhound.integration.BlockHoundIntegration; - -public final class DriverBlockHoundIntegration implements BlockHoundIntegration { - - @Override - public void applyTo(BlockHound.Builder builder) { - - // disallow blocking operations in driver internal threads by default; - // note that session initialization will happen on one of these threads, which is why - // we need to allow a few blocking calls below. - builder.nonBlockingThreadPredicate(current -> current.or(InternalThread.class::isInstance)); - - // blocking calls in initialization methods - - builder.allowBlockingCallsInside( - "com.datastax.oss.driver.internal.core.context.DefaultNettyOptions", "createTimer"); - builder.allowBlockingCallsInside( - "com.datastax.oss.driver.internal.core.os.Native$LibcLoader", "load"); - builder.allowBlockingCallsInside( - // requires native libraries - "com.datastax.oss.driver.internal.core.time.Clock", "getInstance"); - builder.allowBlockingCallsInside( - "com.datastax.oss.driver.internal.core.util.concurrent.LazyReference", "get"); - builder.allowBlockingCallsInside( - "com.datastax.oss.driver.internal.core.util.concurrent.ReplayingEventFilter", "accept"); - builder.allowBlockingCallsInside( - "com.datastax.oss.driver.internal.core.util.concurrent.ReplayingEventFilter", "markReady"); - builder.allowBlockingCallsInside( - "com.datastax.oss.driver.internal.core.util.concurrent.ReplayingEventFilter", "start"); - - // called upon initialization but also on topology/status events - - builder.allowBlockingCallsInside( - "com.datastax.oss.driver.internal.core.metadata.LoadBalancingPolicyWrapper$SinglePolicyDistanceReporter", - "setDistance"); - builder.allowBlockingCallsInside( - "com.datastax.oss.driver.internal.core.pool.ChannelSet", "add"); - builder.allowBlockingCallsInside( - "com.datastax.oss.driver.internal.core.pool.ChannelSet", "remove"); - - // never called directly by the driver; locks that usually operate with low thread contention - - builder.allowBlockingCallsInside( - "com.datastax.oss.driver.internal.core.type.codec.registry.CachingCodecRegistry", - "register"); - builder.allowBlockingCallsInside( - // requires native libraries, for now because of Uuids.getProcessPiece; if JAVA-1116 gets - // implemented, Uuids.getCurrentTimestamp will also require an exception. Pre-emptively - // protect the whole Uuids.timeBased method. - "com.datastax.oss.driver.api.core.uuid.Uuids", "timeBased"); - - // continuous paging - - builder.allowBlockingCallsInside( - "com.datastax.dse.driver.internal.core.cql.continuous.ContinuousRequestHandlerBase$NodeResponseCallback", - "cancel"); - builder.allowBlockingCallsInside( - "com.datastax.dse.driver.internal.core.cql.continuous.ContinuousRequestHandlerBase$NodeResponseCallback", - "dequeueOrCreatePending"); - builder.allowBlockingCallsInside( - "com.datastax.dse.driver.internal.core.cql.continuous.ContinuousRequestHandlerBase$NodeResponseCallback", - "isLastResponse"); - builder.allowBlockingCallsInside( - "com.datastax.dse.driver.internal.core.cql.continuous.ContinuousRequestHandlerBase$NodeResponseCallback", - "onFailure"); - builder.allowBlockingCallsInside( - "com.datastax.dse.driver.internal.core.cql.continuous.ContinuousRequestHandlerBase$NodeResponseCallback", - "onPageTimeout"); - builder.allowBlockingCallsInside( - "com.datastax.dse.driver.internal.core.cql.continuous.ContinuousRequestHandlerBase$NodeResponseCallback", - "onResponse"); - builder.allowBlockingCallsInside( - "com.datastax.dse.driver.internal.core.cql.continuous.ContinuousRequestHandlerBase$NodeResponseCallback", - "onStreamIdAssigned"); - builder.allowBlockingCallsInside( - "com.datastax.dse.driver.internal.core.cql.continuous.ContinuousRequestHandlerBase$NodeResponseCallback", - "operationComplete"); - - // Netty extra exceptions - - // see https://github.com/netty/netty/pull/10810 - builder.allowBlockingCallsInside("io.netty.util.HashedWheelTimer", "start"); - builder.allowBlockingCallsInside("io.netty.util.HashedWheelTimer", "stop"); - - // see https://github.com/netty/netty/pull/10811 - builder.allowBlockingCallsInside("io.netty.util.concurrent.GlobalEventExecutor", "addTask"); - builder.allowBlockingCallsInside( - "io.netty.util.concurrent.SingleThreadEventExecutor", "addTask"); - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/LazyReference.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/LazyReference.java deleted file mode 100644 index e04b7647d8e..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/LazyReference.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.concurrent; - -import java.util.concurrent.locks.ReentrantLock; -import java.util.function.Supplier; -import net.jcip.annotations.ThreadSafe; - -/** Holds a reference to an object that is initialized on first access. */ -@ThreadSafe -public class LazyReference { - - private final String name; - private final Supplier supplier; - private final CycleDetector checker; - private volatile T value; - private final ReentrantLock lock = new ReentrantLock(); - - public LazyReference(String name, Supplier supplier, CycleDetector cycleDetector) { - this.name = name; - this.supplier = supplier; - this.checker = cycleDetector; - } - - public LazyReference(Supplier supplier) { - this(null, supplier, null); - } - - public T get() { - T t = value; - if (t == null) { - if (checker != null) { - checker.onTryLock(this); - } - lock.lock(); - try { - if (checker != null) { - checker.onLockAcquired(this); - } - t = value; - if (t == null) { - value = t = supplier.get(); - } - } finally { - if (checker != null) { - checker.onReleaseLock(this); - } - lock.unlock(); - } - } - return t; - } - - public String getName() { - return name; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/PromiseCombiner.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/PromiseCombiner.java deleted file mode 100644 index b854820403d..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/PromiseCombiner.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.concurrent; - -import edu.umd.cs.findbugs.annotations.NonNull; -import io.netty.util.concurrent.Future; -import io.netty.util.concurrent.GenericFutureListener; -import io.netty.util.concurrent.Promise; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; -import net.jcip.annotations.ThreadSafe; - -/** - * A thread-safe version of Netty's {@link io.netty.util.concurrent.PromiseCombiner} that uses - * proper synchronization to trigger the completion of the aggregate promise. - */ -@ThreadSafe -public class PromiseCombiner { - - /** - * Combines the given futures into the given promise, that is, ties the completion of the latter - * to that of the formers. - * - * @param aggregatePromise The promise that will complete when all parents complete. - * @param parents The parent futures. - */ - public static void combine( - @NonNull Promise aggregatePromise, @NonNull Future... parents) { - PromiseCombinerListener listener = - new PromiseCombinerListener(aggregatePromise, parents.length); - for (Future parent : parents) { - parent.addListener(listener); - } - } - - private static class PromiseCombinerListener implements GenericFutureListener> { - - private final Promise aggregatePromise; - private final AtomicInteger remainingCount; - private final AtomicReference aggregateFailureRef = new AtomicReference<>(); - - private PromiseCombinerListener(Promise aggregatePromise, int numberOfParents) { - this.aggregatePromise = aggregatePromise; - remainingCount = new AtomicInteger(numberOfParents); - } - - @Override - public void operationComplete(Future future) { - if (!future.isSuccess()) { - aggregateFailureRef.updateAndGet( - aggregateFailure -> { - if (aggregateFailure == null) { - aggregateFailure = future.cause(); - } else { - aggregateFailure.addSuppressed(future.cause()); - } - return aggregateFailure; - }); - } - if (remainingCount.decrementAndGet() == 0) { - Throwable aggregateFailure = aggregateFailureRef.get(); - if (aggregateFailure != null) { - aggregatePromise.tryFailure(aggregateFailure); - } else { - aggregatePromise.trySuccess(null); - } - } - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/Reconnection.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/Reconnection.java deleted file mode 100644 index 28aaf596705..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/Reconnection.java +++ /dev/null @@ -1,238 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.concurrent; - -import com.datastax.oss.driver.api.core.connection.ReconnectionPolicy.ReconnectionSchedule; -import com.datastax.oss.driver.internal.core.util.Loggers; -import io.netty.util.concurrent.EventExecutor; -import io.netty.util.concurrent.Future; -import io.netty.util.concurrent.ScheduledFuture; -import java.time.Duration; -import java.util.concurrent.Callable; -import java.util.concurrent.CancellationException; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.TimeUnit; -import java.util.function.Supplier; -import net.jcip.annotations.NotThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A reconnection process that, if failed, is retried periodically according to the intervals - * defined by a policy. - * - *

All the tasks run on a Netty event executor that is provided at construction time. Clients are - * also expected to call the public methods on that thread. - */ -@NotThreadSafe // must be confined to executor -public class Reconnection { - private static final Logger LOG = LoggerFactory.getLogger(Reconnection.class); - - private enum State { - STOPPED, - SCHEDULED, // next attempt scheduled but not started yet - ATTEMPT_IN_PROGRESS, // current attempt started and not completed yet - STOP_AFTER_CURRENT, // stopped, but we're letting an in-progress attempt finish - ; - } - - private final String logPrefix; - private final EventExecutor executor; - private final Supplier scheduleSupplier; - private final Callable> reconnectionTask; - private final Runnable onStart; - private final Runnable onStop; - - private State state = State.STOPPED; - private ReconnectionSchedule reconnectionSchedule; - private ScheduledFuture> nextAttempt; - - /** - * @param reconnectionTask the actual thing to try on a reconnection, returns if it succeeded or - * not. - */ - public Reconnection( - String logPrefix, - EventExecutor executor, - Supplier scheduleSupplier, - Callable> reconnectionTask, - Runnable onStart, - Runnable onStop) { - this.logPrefix = logPrefix; - this.executor = executor; - this.scheduleSupplier = scheduleSupplier; - this.reconnectionTask = reconnectionTask; - this.onStart = onStart; - this.onStop = onStop; - } - - public Reconnection( - String logPrefix, - EventExecutor executor, - Supplier scheduleSupplier, - Callable> reconnectionTask) { - this(logPrefix, executor, scheduleSupplier, reconnectionTask, () -> {}, () -> {}); - } - - /** - * Note that if {@link #stop()} was called but we're still waiting for the last pending attempt to - * complete, this still returns {@code true}. - */ - public boolean isRunning() { - assert executor.inEventLoop(); - return state != State.STOPPED; - } - - /** This is a no-op if the reconnection is already running. */ - public void start() { - start(null); - } - - public void start(ReconnectionSchedule customSchedule) { - assert executor.inEventLoop(); - switch (state) { - case SCHEDULED: - case ATTEMPT_IN_PROGRESS: - // nothing to do - break; - case STOP_AFTER_CURRENT: - // cancel the scheduled stop - state = State.ATTEMPT_IN_PROGRESS; - break; - case STOPPED: - reconnectionSchedule = (customSchedule == null) ? scheduleSupplier.get() : customSchedule; - onStart.run(); - scheduleNextAttempt(); - break; - } - } - - /** - * Forces a reconnection now, without waiting for the next scheduled attempt. - * - * @param forceIfStopped if true and the reconnection is not running, it will get started (meaning - * subsequent reconnections will be scheduled if this attempt fails). If false and the - * reconnection is not running, no attempt is scheduled. - */ - public void reconnectNow(boolean forceIfStopped) { - assert executor.inEventLoop(); - if (state == State.ATTEMPT_IN_PROGRESS || state == State.STOP_AFTER_CURRENT) { - LOG.debug( - "[{}] reconnectNow and current attempt was still running, letting it complete", - logPrefix); - if (state == State.STOP_AFTER_CURRENT) { - // Make sure that we will schedule other attempts if this one fails. - state = State.ATTEMPT_IN_PROGRESS; - } - } else if (state == State.STOPPED && !forceIfStopped) { - LOG.debug("[{}] reconnectNow(false) while stopped, nothing to do", logPrefix); - } else { - assert state == State.SCHEDULED || (state == State.STOPPED && forceIfStopped); - LOG.debug("[{}] Forcing next attempt now", logPrefix); - if (nextAttempt != null) { - nextAttempt.cancel(true); - } - try { - onNextAttemptStarted(reconnectionTask.call()); - } catch (Exception e) { - Loggers.warnWithException( - LOG, "[{}] Uncaught error while starting reconnection attempt", logPrefix, e); - scheduleNextAttempt(); - } - } - } - - public void stop() { - assert executor.inEventLoop(); - switch (state) { - case STOPPED: - case STOP_AFTER_CURRENT: - break; - case ATTEMPT_IN_PROGRESS: - state = State.STOP_AFTER_CURRENT; - break; - case SCHEDULED: - reallyStop(); - break; - } - } - - private void reallyStop() { - LOG.debug("[{}] Stopping reconnection", logPrefix); - state = State.STOPPED; - if (nextAttempt != null) { - nextAttempt.cancel(true); - nextAttempt = null; - } - onStop.run(); - reconnectionSchedule = null; - } - - private void scheduleNextAttempt() { - assert executor.inEventLoop(); - state = State.SCHEDULED; - if (reconnectionSchedule == null) { // happens if reconnectNow() while we were stopped - reconnectionSchedule = scheduleSupplier.get(); - } - Duration nextInterval = reconnectionSchedule.nextDelay(); - LOG.debug("[{}] Scheduling next reconnection in {}", logPrefix, nextInterval); - nextAttempt = executor.schedule(reconnectionTask, nextInterval.toNanos(), TimeUnit.NANOSECONDS); - nextAttempt.addListener( - (Future> f) -> { - if (f.isSuccess()) { - onNextAttemptStarted(f.getNow()); - } else if (!f.isCancelled()) { - Loggers.warnWithException( - LOG, - "[{}] Uncaught error while starting reconnection attempt", - logPrefix, - f.cause()); - scheduleNextAttempt(); - } - }); - } - - // When the Callable runs this means the caller has started the attempt, we have yet to wait on - // the CompletableFuture to find out if that succeeded or not. - private void onNextAttemptStarted(CompletionStage futureOutcome) { - assert executor.inEventLoop(); - state = State.ATTEMPT_IN_PROGRESS; - futureOutcome - .whenCompleteAsync(this::onNextAttemptCompleted, executor) - .exceptionally(UncaughtExceptions::log); - } - - private void onNextAttemptCompleted(Boolean success, Throwable error) { - assert executor.inEventLoop(); - if (success) { - LOG.debug("[{}] Reconnection successful", logPrefix); - reallyStop(); - } else { - if (error != null && !(error instanceof CancellationException)) { - Loggers.warnWithException( - LOG, "[{}] Uncaught error while starting reconnection attempt", logPrefix, error); - } - if (state == State.STOP_AFTER_CURRENT) { - reallyStop(); - } else { - assert state == State.ATTEMPT_IN_PROGRESS; - scheduleNextAttempt(); - } - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/ReplayingEventFilter.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/ReplayingEventFilter.java deleted file mode 100644 index 27ca1b6ff42..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/ReplayingEventFilter.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.concurrent; - -import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import java.util.List; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; -import java.util.function.Consumer; -import net.jcip.annotations.GuardedBy; -import net.jcip.annotations.ThreadSafe; - -/** - * Filters a list of events, accumulating them during an initialization period. - * - *

It has three states: - * - *

    - *
  • Not started: events are discarded. - *
  • Started: events accumulate but are not propagated to the end consumer yet. - *
  • Ready: all accumulated events are flushed to the end consumer; subsequent events are - * propagated directly. The order of events is preserved at all times. - *
- */ -@ThreadSafe -public class ReplayingEventFilter { - - private enum State { - NEW, - STARTED, - READY - } - - private final Consumer consumer; - - // Exceptionally, we use a lock: it will rarely be contended, and if so for only a short period. - private final ReadWriteLock stateLock = new ReentrantReadWriteLock(); - - @GuardedBy("stateLock") - private State state; - - @GuardedBy("stateLock") - private final List recordedEvents; - - public ReplayingEventFilter(Consumer consumer) { - this.consumer = consumer; - this.state = State.NEW; - this.recordedEvents = new CopyOnWriteArrayList<>(); - } - - public void start() { - stateLock.writeLock().lock(); - try { - state = State.STARTED; - } finally { - stateLock.writeLock().unlock(); - } - } - - public void markReady() { - stateLock.writeLock().lock(); - try { - state = State.READY; - for (EventT event : recordedEvents) { - consumer.accept(event); - } - } finally { - recordedEvents.clear(); - stateLock.writeLock().unlock(); - } - } - - public void accept(EventT event) { - stateLock.readLock().lock(); - try { - switch (state) { - case NEW: - break; - case STARTED: - recordedEvents.add(event); - break; - case READY: - consumer.accept(event); - break; - } - } finally { - stateLock.readLock().unlock(); - } - } - - @VisibleForTesting - public List recordedEvents() { - stateLock.readLock().lock(); - try { - return ImmutableList.copyOf(recordedEvents); - } finally { - stateLock.readLock().unlock(); - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/RunOrSchedule.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/RunOrSchedule.java deleted file mode 100644 index addaf1850bf..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/RunOrSchedule.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.concurrent; - -import io.netty.util.concurrent.EventExecutor; -import io.netty.util.concurrent.Future; -import java.util.concurrent.Callable; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.function.Consumer; - -/** - * Utility to run a task on a Netty event executor (i.e. thread). If we're already on the executor, - * the task is submitted, otherwise it's scheduled. - * - *

Be careful when using this, always keep in mind that the task might be executed synchronously. - * This can lead to subtle bugs when both the calling code and the callback manipulate a collection: - * - *

{@code
- * List> futureFoos;
- *
- * // Scheduled on eventExecutor:
- * for (int i = 0; i < count; i++) {
- *   CompletionStage futureFoo = FooFactory.init();
- *   futureFoos.add(futureFoo);
- *   // futureFoo happens to be complete by now, so callback gets executed immediately
- *   futureFoo.whenComplete(RunOrSchedule.on(eventExecutor, () -> callback(futureFoo)));
- * }
- *
- * private void callback(CompletionStage futureFoo) {
- *    futureFoos.remove(futureFoo); // ConcurrentModificationException!!!
- * }
- * }
- * - * For that kind of situation, it's better to use {@code futureFoo.whenCompleteAsync(theTask, - * eventExecutor)}, so that the task is always scheduled. - */ -public class RunOrSchedule { - - public static void on(EventExecutor executor, Runnable task) { - if (executor.inEventLoop()) { - task.run(); - } else { - executor.submit(task).addListener(UncaughtExceptions::log); - } - } - - public static Consumer on(EventExecutor executor, Consumer task) { - return (t) -> { - if (executor.inEventLoop()) { - task.accept(t); - } else { - executor.submit(() -> task.accept(t)).addListener(UncaughtExceptions::log); - } - }; - } - - public static CompletionStage on( - EventExecutor executor, Callable> task) { - if (executor.inEventLoop()) { - try { - return task.call(); - } catch (Exception e) { - return CompletableFutures.failedFuture(e); - } - } else { - CompletableFuture result = new CompletableFuture<>(); - executor - .submit(task) - .addListener( - (Future> f) -> { - if (f.isSuccess()) { - CompletableFutures.completeFrom(f.getNow(), result); - } else { - result.completeExceptionally(f.cause()); - } - }); - return result; - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/UncaughtExceptions.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/UncaughtExceptions.java deleted file mode 100644 index 25bce8773e8..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/UncaughtExceptions.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.concurrent; - -import com.datastax.oss.driver.internal.core.util.Loggers; -import io.netty.util.concurrent.Future; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Utility methods to log unexpected exceptions in asynchronous tasks. - * - *

Use this whenever you execute a future callback to apply side effects, but throw away the - * future itself: - * - *

{@code
- * CompletionStage futureFoo = FooFactory.build();
- *
- * futureFoo
- *   .whenComplete((f, error) -> { handler code with side effects })
- *   // futureFoo is not propagated, do this or any unexpected error in the handler will be
- *   // swallowed
- *   .exceptionally(UncaughtExceptions::log);
- *
- * // If you return the future, you don't need it (but it's up to the caller to handle a failed
- * // future)
- * return futureFoo.whenComplete(...)
- * }
- */ -public class UncaughtExceptions { - - private static final Logger LOG = LoggerFactory.getLogger(UncaughtExceptions.class); - - public static void log(Future future) { - if (!future.isSuccess() && !future.isCancelled()) { - Loggers.warnWithException(LOG, "Uncaught exception in scheduled task", future.cause()); - } - } - - @SuppressWarnings("TypeParameterUnusedInFormals") // type parameter is only needed for chaining - public static T log(Throwable t) { - Loggers.warnWithException(LOG, "Uncaught exception in scheduled task", t); - return null; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/package-info.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/package-info.java deleted file mode 100644 index bd0e2590b47..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/package-info.java +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** Internal utilities specific to Netty. */ -package com.datastax.oss.driver.internal.core.util; diff --git a/core/src/main/java/com/datastax/oss/driver/internal/package-info.java b/core/src/main/java/com/datastax/oss/driver/internal/package-info.java deleted file mode 100644 index 486afc446e3..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/package-info.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Internal implementation details of the driver. - * - *

The types present here (and in subpackages) should not be used from client applications. If - * you decide to use them, do so at your own risk: binary compatibility is best-effort, and we - * reserve the right to break things at any time. Documentation may be sparse. - */ -package com.datastax.oss.driver.internal; diff --git a/core/src/main/resources/META-INF/native-image/com.datastax.oss/java-driver-core/native-image.properties b/core/src/main/resources/META-INF/native-image/com.datastax.oss/java-driver-core/native-image.properties deleted file mode 100644 index 2baa59f3b07..00000000000 --- a/core/src/main/resources/META-INF/native-image/com.datastax.oss/java-driver-core/native-image.properties +++ /dev/null @@ -1,25 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -Args=-H:IncludeResources=reference\\.conf \ - -H:IncludeResources=application\\.conf \ - -H:IncludeResources=application\\.json \ - -H:IncludeResources=application\\.properties \ - -H:IncludeResources=.*Driver\\.properties \ - -H:DynamicProxyConfigurationResources=${.}/proxy.json \ - -H:ReflectionConfigurationResources=${.}/reflection.json \ - --initialize-at-build-time=com.datastax.oss.driver.internal.core.util.Dependency diff --git a/core/src/main/resources/META-INF/native-image/com.datastax.oss/java-driver-core/proxy.json b/core/src/main/resources/META-INF/native-image/com.datastax.oss/java-driver-core/proxy.json deleted file mode 100644 index 37cf6fcf805..00000000000 --- a/core/src/main/resources/META-INF/native-image/com.datastax.oss/java-driver-core/proxy.json +++ /dev/null @@ -1,3 +0,0 @@ -[ - ["java.lang.reflect.TypeVariable"] -] diff --git a/core/src/main/resources/META-INF/native-image/com.datastax.oss/java-driver-core/reflection.json b/core/src/main/resources/META-INF/native-image/com.datastax.oss/java-driver-core/reflection.json deleted file mode 100644 index 6082b853611..00000000000 --- a/core/src/main/resources/META-INF/native-image/com.datastax.oss/java-driver-core/reflection.json +++ /dev/null @@ -1,154 +0,0 @@ -[ - { - "name": "com.datastax.oss.driver.internal.core.loadbalancing.DefaultLoadBalancingPolicy", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext", "java.lang.String" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.loadbalancing.DcInferringLoadBalancingPolicy", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext", "java.lang.String" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.loadbalancing.BasicLoadBalancingPolicy", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext", "java.lang.String" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.connection.ExponentialReconnectionPolicy", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.connection.ConstantReconnectionPolicy", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.retry.DefaultRetryPolicy", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext", "java.lang.String" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.retry.ConsistencyDowngradingRetryPolicy", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext", "java.lang.String" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.specex.NoSpeculativeExecutionPolicy", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext", "java.lang.String" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.specex.ConstantSpeculativeExecutionPolicy", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext", "java.lang.String" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.time.AtomicTimestampGenerator", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.time.ThreadLocalTimestampGenerator", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.time.ServerSideTimestampGenerator", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.tracker.NoopRequestTracker", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.tracker.RequestLogger", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.session.throttling.PassThroughRequestThrottler", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.session.throttling.RateLimitingRequestThrottler", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.session.throttling.ConcurrencyLimitingRequestThrottler", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.metadata.NoopNodeStateListener", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.metadata.schema.NoopSchemaChangeListener", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.addresstranslation.PassThroughAddressTranslator", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.addresstranslation.Ec2MultiRegionAddressTranslator", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.auth.PlainTextAuthProvider", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] - }, - { - "name": "com.datastax.dse.driver.internal.core.auth.DseGssApiAuthProvider", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.ssl.DefaultSslEngineFactory", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.metrics.DefaultMetricsFactory", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.metrics.NoopMetricsFactory", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.metrics.DropwizardMetricsFactory", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.metrics.DefaultMetricIdGenerator", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] - }, - { - "name": "com.datastax.oss.driver.internal.core.metrics.TaggingMetricIdGenerator", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] - }, - { - "name": "io.netty.channel.socket.nio.NioSocketChannel", - "methods": [ { "name": "", "parameterTypes": [] } ] - }, - { - "name": "io.netty.buffer.AbstractByteBufAllocator", - "methods": [ { "name": "toLeakAwareBuffer", "parameterTypes": ["io.netty.buffer.ByteBuf" ] } ] - }, - { - "name": "io.netty.util.ReferenceCountUtil", - "methods": [ { "name": "touch", "parameterTypes": ["java.lang.Object", "java.lang.Object" ] } ] - }, - { - "name" : "io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerIndexField", - "fields": [ {"name": "producerIndex", "allowUnsafeAccess": true} ] - }, - { - "name" : "io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerLimitField", - "fields": [ {"name": "producerLimit", "allowUnsafeAccess": true} ] - }, - { - "name" : "io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueConsumerIndexField", - "fields": [ {"name": "consumerIndex", "allowUnsafeAccess": true} ] - }, - { - "name" : "io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueProducerFields", - "fields": [ {"name": "producerIndex", "allowUnsafeAccess": true} ] - }, - { - "name" : "io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueColdProducerFields", - "fields": [ {"name": "producerLimit", "allowUnsafeAccess": true} ] - }, - { - "name" : "io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueConsumerFields", - "fields": [ {"name": "consumerIndex", "allowUnsafeAccess": true} ] - } -] diff --git a/core/src/main/resources/META-INF/services/reactor.blockhound.integration.BlockHoundIntegration b/core/src/main/resources/META-INF/services/reactor.blockhound.integration.BlockHoundIntegration deleted file mode 100644 index b848ce24855..00000000000 --- a/core/src/main/resources/META-INF/services/reactor.blockhound.integration.BlockHoundIntegration +++ /dev/null @@ -1 +0,0 @@ -com.datastax.oss.driver.internal.core.util.concurrent.DriverBlockHoundIntegration \ No newline at end of file diff --git a/core/src/main/resources/com/datastax/oss/driver/Driver.properties b/core/src/main/resources/com/datastax/oss/driver/Driver.properties deleted file mode 100644 index 4706afe2da8..00000000000 --- a/core/src/main/resources/com/datastax/oss/driver/Driver.properties +++ /dev/null @@ -1,28 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# Note: properties files should be encoded in ISO-8859-1, but we keep this one -# encoded in UTF-8 because that's much easier when building with Maven. - -driver.groupId=${project.groupId} -driver.artifactId=${project.artifactId} -driver.version=${project.version} -# It would be better to use ${project.parent.name} here, but for some reason the bundle plugin -# prevents that from being resolved correctly (unlike the project-level properties above). -# The value is not likely to change, so we simply hard-code it: -driver.name=Apache Cassandra Java Driver diff --git a/core/src/main/resources/reference.conf b/core/src/main/resources/reference.conf deleted file mode 100644 index 4ae83362e29..00000000000 --- a/core/src/main/resources/reference.conf +++ /dev/null @@ -1,2377 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# Reference configuration for the Java Driver for Apache Cassandra®. -# -# Unless you use a custom mechanism to load your configuration (see -# SessionBuilder.withConfigLoader), all the values declared here will be used as defaults. You can -# place your own `application.conf` in the classpath to override them. -# -# Options are classified into two categories: -# - basic: what is most likely to be customized first when kickstarting a new application. -# - advanced: more elaborate tuning options, or "expert"-level customizations. -# -# This file is in HOCON format, see https://github.com/typesafehub/config/blob/master/HOCON.md. -datastax-java-driver { - - # BASIC OPTIONS ---------------------------------------------------------------------------------- - - # The contact points to use for the initial connection to the cluster. - # - # These are addresses of Cassandra nodes that the driver uses to discover the cluster topology. - # Only one contact point is required (the driver will retrieve the address of the other nodes - # automatically), but it is usually a good idea to provide more than one contact point, because if - # that single contact point is unavailable, the driver cannot initialize itself correctly. - # - # This must be a list of strings with each contact point specified as "host:port". If the host is - # a DNS name that resolves to multiple A-records, all the corresponding addresses will be used. Do - # not use "localhost" as the host name (since it resolves to both IPv4 and IPv6 addresses on some - # platforms). - # - # Note that Cassandra 3 and below requires all nodes in a cluster to share the same port (see - # CASSANDRA-7544). - # - # Contact points can also be provided programmatically when you build a cluster instance. If both - # are specified, they will be merged. If both are absent, the driver will default to - # 127.0.0.1:9042. - # - # Required: no - # Modifiable at runtime: no - # Overridable in a profile: no - // basic.contact-points = [ "127.0.0.1:9042", "127.0.0.2:9042" ] - - # A name that uniquely identifies the driver instance created from this configuration. This is - # used as a prefix for log messages and metrics. - # - # If this option is absent, the driver will generate an identifier composed of the letter 's' - # followed by an incrementing counter. If you provide a different value, try to keep it short to - # keep the logs readable. Also, make sure it is unique: reusing the same value will not break the - # driver, but it will mix up the logs and metrics. - # - # Required: no - # Modifiable at runtime: no - # Overridable in a profile: no - // basic.session-name = my_session - - # The name of the keyspace that the session should initially be connected to. - # - # This expects the same format as in a CQL query: case-sensitive names must be quoted (note that - # the quotes must be escaped in HOCON format). For example: - # session-keyspace = case_insensitive_name - # session-keyspace = \"CaseSensitiveName\" - # - # If this option is absent, the session won't be connected to any keyspace, and you'll have to - # either qualify table names in your queries, or use the per-query keyspace feature available in - # Cassandra 4 and above (see Request.getKeyspace()). - # - # This can also be provided programatically in CqlSessionBuilder. - # - # Required: no - # Modifiable at runtime: no - # Overridable in a profile: no - // basic.session-keyspace = my_keyspace - - # How often the driver tries to reload the configuration. - # - # To disable periodic reloading, set this to 0. - # - # Required: yes (unless you pass a different ConfigLoader to the session builder). - # Modifiable at runtime: yes, the new value will be used after the next time the configuration - # gets reloaded. - # Overridable in a profile: no - basic.config-reload-interval = 5 minutes - - basic.request { - # How long the driver waits for a request to complete. This is a global limit on the duration of - # a session.execute() call, including any internal retries the driver might do. - # - # By default, this value is set pretty high to ensure that DDL queries don't time out, in order - # to provide the best experience for new users trying the driver with the out-of-the-box - # configuration. - # For any serious deployment, we recommend that you use separate configuration profiles for DDL - # and DML; you can then set the DML timeout much lower (down to a few milliseconds if needed). - # - # Note that, because timeouts are scheduled on the driver's timer thread, the duration specified - # here must be greater than the timer tick duration defined by the - # advanced.netty.timer.tick-duration setting (see below). If that is not the case, timeouts will - # not be triggered as timely as desired. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for requests issued after the change. - # Overridable in a profile: yes - timeout = 2 seconds - - # The consistency level. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for requests issued after the change. - # Overridable in a profile: yes - consistency = LOCAL_ONE - - # The page size. This controls how many rows will be retrieved simultaneously in a single - # network roundtrip (the goal being to avoid loading too many results in memory at the same - # time). If there are more results, additional requests will be used to retrieve them (either - # automatically if you iterate with the sync API, or explicitly with the async API's - # fetchNextPage method). - # If the value is 0 or negative, it will be ignored and the request will not be paged. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for requests issued after the change. - # Overridable in a profile: yes - page-size = 5000 - - # The serial consistency level. - # The allowed values are SERIAL and LOCAL_SERIAL. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for requests issued after the change. - # Overridable in a profile: yes - serial-consistency = SERIAL - - # The default idempotence of a request, that will be used for all `Request` instances where - # `isIdempotent()` returns null. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for requests issued after the change. - # Overridable in a profile: yes - default-idempotence = false - } - - # The policy that decides the "query plan" for each query; that is, which nodes to try as - # coordinators, and in which order. - # - # Required: yes - # Modifiable at runtime: no (but custom implementations may elect to watch configuration changes - # and allow child options to be changed at runtime). - # Overridable in a profile: yes. Note that the driver creates as few instances as possible: if a - # named profile inherits from the default profile, or if two sibling profiles have the exact - # same configuration, they will share a single policy instance at runtime. - # If there are multiple load balancing policies in a single driver instance, they work together - # in the following way: - # - each request gets a query plan from its profile's policy (or the default policy if the - # request has no profile, or the profile does not override the policy). - # - when the policies assign distances to nodes, the driver uses the closest assigned distance - # for any given node. - basic.load-balancing-policy { - # The class of the policy. If it is not qualified, the driver assumes that it resides in one of - # the following packages: - # - com.datastax.oss.driver.internal.core.loadbalancing. - # - com.datastax.dse.driver.internal.core.loadbalancing. - # - # The driver provides three implementations out of the box: - # - # - `DefaultLoadBalancingPolicy`: should almost always be used; it requires a local datacenter - # to be specified either programmatically when creating the session, or via the configuration - # option: datastax-java-driver.basic.load-balancing-policy.local-datacenter. It can also - # use a highly efficient slow replica avoidance mechanism, which is by default enabled – see - # the option: datastax-java-driver.basic.load-balancing-policy.slow-replica-avoidance. - # - `DcInferringLoadBalancingPolicy`: similar to `DefaultLoadBalancingPolicy`, but does not - # require a local datacenter to be defined, in which case it will attempt to infer the local - # datacenter from the provided contact points, if possible; if that fails, it will throw an - # error during session initialization. This policy is intended mostly for ETL tools and - # should not be used by normal applications. - # - `BasicLoadBalancingPolicy`: similar to `DefaultLoadBalancingPolicy`, but does not have - # the slow replica avoidance mechanism. More importantly, it is the only policy capable of - # operating without local datacenter defined, in which case it will consider nodes in the - # cluster in a datacenter-agnostic way. Beware that this could cause spikes in - # cross-datacenter traffic! This policy is provided mostly as a starting point for users - # wishing to implement their own load balancing policy; it should not be used as is in normal - # applications. - # - # You can also specify a custom class that implements LoadBalancingPolicy and has a public - # constructor with two arguments: the DriverContext and a String representing the profile name. - class = DefaultLoadBalancingPolicy - - # The datacenter that is considered "local": the default policy will only include nodes from - # this datacenter in its query plans. - # - # When using the default policy, this option can only be absent if you specified no contact - # points: in that case, the driver defaults to 127.0.0.1:9042, and that node's datacenter is - # used as the local datacenter. As soon as you provide contact points (either through the - # configuration or through the session builder), you must define the local datacenter - # explicitly, and initialization will fail if this property is absent. In addition, all contact - # points should be from this datacenter; warnings will be logged for nodes that are from a - # different one. - # - # This can also be specified programmatically with SessionBuilder.withLocalDatacenter. If both - # are specified, the programmatic value takes precedence. - // local-datacenter = datacenter1 - - # The class of a custom node distance evaluator. - # - # This option is not required; if present, it must be the fully-qualified name of a class that - # implements `com.datastax.oss.driver.api.core.loadbalancing.NodeDistanceEvaluator`, and has a - # public constructor taking two arguments: the DriverContext and a String representing the - # profile name. - # - # Alternatively, you can pass an instance of your distance evaluator to - # CqlSession.builder().withNodeDistanceEvaluator(). In that case, this option will be ignored. - # - # The evaluator will be invoked each time the policy processes a topology or state change. The - # evaluator's `evaluateDistance` method will be called with the node affected by the change, and - # the local datacenter name (or null if none is defined). If it returns a non-null distance, the - # policy will suggest that distance for the node; if the function returns null, the policy will - # will assign a default distance instead, based on its internal algorithm for computing node - # distances. - // evaluator.class= - - # DEPRECATED. Use evaluator.class instead (see above). If both evaluator.class and filter.class - # are defined, the former wins. - # - # A custom filter to include/exclude nodes. - # - # This option is not required; if present, it must be the fully-qualified name of a class that - # implements `java.util.function.Predicate`, and has a public constructor taking two - # arguments: the DriverContext and a String representing the profile name. - # - # Alternatively, you can pass an instance of your filter to - # CqlSession.builder().withNodeFilter(). In that case, this option will be ignored. - # - # The predicate's `test(Node)` method will be invoked each time the policy processes a - # topology or state change: if it returns false, the node will be set at distance IGNORED - # (meaning the driver won't ever connect to it), and never included in any query plan. - // filter.class= - - # Whether to enable the slow replica avoidance mechanism in DefaultLoadBalancingPolicy. - # - # The default policy always moves replicas first in the query plan (if routing information can - # be determined for the current request). However: - # - if this option is true, it also applies a custom algorithm that takes the responsiveness and - # uptime of each replica into account to order them among each other; - # - if this option is false, replicas are simply shuffled. - # - # If this option is not defined, the driver defaults to true. - slow-replica-avoidance = true - } - basic.cloud { - # The location of the cloud secure bundle used to connect to DataStax Apache Cassandra as a - # service. - # This setting must be a valid URL. - # If the protocol is not specified, it is implicitly assumed to be the `file://` protocol, - # in which case the value is expected to be a valid path on the local filesystem. - # For example, `/a/path/to/bundle` will be interpreted as `file:/a/path/to/bunde`. - # If the protocol is provided explicitly, then the value will be used as is. - # - # Required: no - # Modifiable at runtime: no - # Overridable in a profile: no - // secure-connect-bundle = /location/of/secure/connect/bundle - } - - # DataStax Insights monitoring. - basic.application { - # The name of the application using the session. - # - # It will be sent in the STARTUP protocol message for each new connection established by the - # driver. - # - # This can also be defined programmatically with DseSessionBuilder.withApplicationName(). If you - # specify both, the programmatic value takes precedence and this option is ignored. - # - # Required: no - # Modifiable at runtime: no - # Overridable in a profile: no - // name = - - # The version of the application using the session. - # - # It will be sent in the STARTUP protocol message for each new connection established by the - # driver. - # - # This can also be defined programmatically with DseSessionBuilder.withApplicationVersion(). If - # you specify both, the programmatic value takes precedence and this option is ignored. - # - # Required: no - # Modifiable at runtime: no - # Overridable in a profile: no - // version = - } - - # Graph (DataStax Enterprise only) - basic.graph { - # The name of the graph targeted by graph statements. - # - # This can also be overridden programmatically with GraphStatement.setGraphName(). If both are - # specified, the programmatic value takes precedence, and this option is ignored. - # - # Required: no. In particular, system queries -- such as creating or dropping a graph -- must be - # executed without a graph name (see also basic.graph.is-system-query). - # Modifiable at runtime: yes, the new value will be used for requests issued after the change. - # Overridable in a profile: yes - // name = your-graph-name - - # The traversal source to use for graph statements. - # - # This setting doesn't usually need to change, unless executing OLAP queries, which require the - # traversal source "a". - # - # This can also be overridden programmatically with GraphStatement.setTraversalSource(). If both - # are specified, the programmatic value takes precedence, and this option is ignored. - # - # Required: no - # Modifiable at runtime: yes, the new value will be used for requests issued after the change. - # Overridable in a profile: yes - traversal-source = "g" - - # Whether a script statement represents a system query. - # - # Script statements that access the `system` variable *must not* specify a graph name (otherwise - # `system` is not available). However, if your application executes a lot of non-system - # statements, it is convenient to configure basic.graph.name to avoid repeating it every time. - # This option allows you to ignore that global graph name, for example in a specific profile. - # - # This can also be overridden programmatically with ScriptGraphStatement.setSystemQuery(). If - # both are specified, the programmatic value takes precedence, and this option is ignored. - # - # Required: no (defaults to false) - # Modifiable at runtime: yes, the new value will be used for requests issued after the change. - # Overridable in a profile: yes - // is-system-query = false - - # The read consistency level to use for graph statements. - # - # DSE Graph is able to distinguish between read and write timeouts for the internal storage - # queries that will be produced by a traversal. Hence the consistency level for reads and writes - # can be set separately. - # - # This can also be overridden programmatically with GraphStatement.setReadConsistencyLevel(). If - # both are specified, the programmatic value takes precedence, and this option is ignored. - # - # Required: no (defaults to request.basic.consistency) - # Modifiable at runtime: yes, the new value will be used for requests issued after the change. - # Overridable in a profile: yes - // read-consistency-level = LOCAL_QUORUM - - # The write consistency level to use for graph statements. - # - # DSE Graph is able to distinguish between read and write timeouts for the internal storage - # queries that will be produced by a traversal. Hence the consistency level for reads and writes - # can be set separately. - # - # This can also be overridden programmatically with GraphStatement.setReadConsistencyLevel(). If - # both are specified, the programmatic value takes precedence, and this option is ignored. - # - # Required: no (defaults to request.basic.consistency) - # Modifiable at runtime: yes, the new value will be used for requests issued after the change. - # Overridable in a profile: yes - // write-consistency-level = LOCAL_ONE - - # How long the driver waits for a graph request to complete. This is a global limit on the - # duration of a session.execute() call, including any internal retries the driver might do. - # - # Graph statements behave a bit differently than regular CQL requests (hence this dedicated - # option instead of reusing basic.request.timeout): by default, the client timeout is not set, - # and the driver will just wait as long as needed until the server replies (which is itself - # governed by server-side timeout configuration). - # If you specify a client timeout with this option, then the driver will fail the request after - # the given time; note that the value is also sent along with the request, so that the server - # can also time out early and avoid wasting resources on a response that the client has already - # given up on. - # - # This can also be overridden programmatically with GraphStatement.setTimeout(). If both are - # specified, the programmatic value takes precedence, and this option is ignored. - # - # If this value is left unset (default) or is explicitly set to zero, no timeout will be - # applied. - # - # Required: no (defaults to zero - no timeout) - # Modifiable at runtime: yes, the new value will be used for requests issued after the change. - # Overridable in a profile: yes - // timeout = 10 seconds - } - - - # ADVANCED OPTIONS ------------------------------------------------------------------------------- - - # The maximum number of live sessions that are allowed to coexist in a given VM. - # - # This is intended to help detect resource leaks in client applications that create too many - # sessions and/or do not close them correctly. The driver keeps track of the number of live - # sessions in a static variable; if it gets over this threshold, a warning will be logged for - # every new session. - # - # If the value is less than or equal to 0, the feature is disabled: no warning will be issued. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for sessions created after the change. - # Overridable in a profile: no - advanced.session-leak.threshold = 4 - - advanced.connection { - # The timeout to use when establishing driver connections. - # - # This timeout is for controlling how long the driver will wait for the underlying channel - # to actually connect to the server. This is not the time limit for completing protocol - # negotiations, only the time limit for establishing a channel connection. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for connections created after the - # change. - # Overridable in a profile: no - connect-timeout = 5 seconds - - # The timeout to use for internal queries that run as part of the initialization process, just - # after we open a connection. If this timeout fires, the initialization of the connection will - # fail. If this is the first connection ever, the driver will fail to initialize as well, - # otherwise it will retry the connection later. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for connections created after the - # change. - # Overridable in a profile: no - init-query-timeout = 5 seconds - - # The timeout to use when the driver changes the keyspace on a connection at runtime (this - # happens when the client issues a `USE ...` query, and all connections belonging to the current - # session need to be updated). - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for connections created after the - # change. - # Overridable in a profile: no - set-keyspace-timeout = ${datastax-java-driver.advanced.connection.init-query-timeout} - - # The driver maintains a connection pool to each node, according to the distance assigned to it - # by the load balancing policy. - # If the distance is LOCAL, then local.size connections are opened; if the distance is REMOTE, - # then remote.size connections are opened. If the distance is IGNORED, no connections at all - # are maintained. - pool { - # The number of connections in the pool for a node whose distance is LOCAL, that is, a node - # that belongs to the local datacenter, as inferred by the load balancing or defined by the - # option: datastax-java-driver.basic.load-balancing-policy.local-datacenter. - # - # Each connection can handle many concurrent requests, so 1 is generally a good place to - # start. You should only need higher values in very high performance scenarios, where - # connections might start maxing out their I/O thread (see the driver's online manual for - # more tuning instructions). - # - # Required: yes - # Modifiable at runtime: yes; when the change is detected, all active pools will be notified - # and will adjust their size. - # Overridable in a profile: no - local.size = 1 - - # The number of connections in the pool for a node whose distance is REMOTE, that is, a node - # that does not belong to the local datacenter. - # - # Note: by default, the built-in load-balancing policies will never assign the REMOTE distance - # to any node, to avoid cross-datacenter network traffic. If you want to change this behavior - # and understand the consequences, configure your policy to accept nodes in remote - # datacenters by adjusting the following advanced options: - # - # - datastax-java-driver.advanced.load-balancing-policy.dc-failover.max-nodes-per-remote-dc - # - datastax-java-driver.advanced.load-balancing-policy.dc-failover.allow-for-local-consistency-levels - # - # Required: yes - # Modifiable at runtime: yes; when the change is detected, all active pools will be notified - # and will adjust their size. - # Overridable in a profile: no - remote.size = 1 - } - - # The maximum number of requests that can be executed concurrently on a connection. This must be - # strictly positive, and less than 32768. - # - # We recommend against changing this value: the default of 1024 is fine for most situations, - # it's a good balance between sufficient concurrency on the client and reasonable pressure on - # the server. If you're looking for a way to limit the global throughput of the session, this is - # not the right way to do it: use a request throttler instead (see the `advanced.throttler` - # section in this configuration). - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for connections created after the - # change. - # Overridable in a profile: no - max-requests-per-connection = 1024 - - # The maximum number of "orphaned" requests before a connection gets closed automatically. - # - # Sometimes the driver writes to a node but stops listening for a response (for example if the - # request timed out, or was completed by another node). But we can't safely reuse the stream id - # on this connection until we know for sure that the server is done with it. Therefore the id is - # marked as "orphaned" until we get a response from the node. - # - # If the response never comes (or is lost because of a network issue), orphaned ids can - # accumulate over time, eventually affecting the connection's throughput. So we monitor them - # and close the connection above a given threshold (the pool will replace it). - # - # The value must be lower than `max-requests-per-connection`. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for connections created after the - # change. - # Overridable in a profile: no - max-orphan-requests = 256 - - # Whether to log non-fatal errors when the driver tries to open a new connection. - # - # This error as recoverable, as the driver will try to reconnect according to the reconnection - # policy. Therefore some users see them as unnecessary clutter in the logs. On the other hand, - # those logs can be handy to debug a misbehaving node. - # - # Note that some type of errors are always logged, regardless of this option: - # - protocol version mismatches (the node gets forced down) - # - when the cluster name in system.local doesn't match the other nodes (the node gets forced - # down) - # - authentication errors (will be retried) - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for connections created after the - # change. - # Overridable in a profile: no - warn-on-init-error = true - } - - # Advanced options for the built-in load-balancing policies. - advanced.load-balancing-policy { - # Cross-datacenter failover configuration: configure the load-balancing policies to use nodes - # in remote datacenters. - dc-failover { - # The maximum number of nodes to contact in each remote datacenter. - # - # By default, this number is zero, to avoid cross-datacenter network traffic. When this - # number is greater than zero: - # - # - The load policies will assign the REMOTE distance to that many nodes in each remote - # datacenter. - # - The driver will then attempt to open connections to those nodes. The actual number of - # connections to open to each one of those nodes is configurable via the option: - # datastax-java-driver.advanced.connection.pool.remote.size. - # - The load-balancing policies will include those remote nodes (and only those) in query - # plans, effectively enabling cross-datacenter failover. - # - # Beware that enabling such failover can result in cross-datacenter network traffic spikes, - # if the local datacenter is down or experiencing high latencies! - # - # Required: yes - # Modifiable at runtime: no - # Overridable in a profile: yes - max-nodes-per-remote-dc = 0 - - # Whether cross-datacenter failover should be allowed for requests executed with local - # consistency levels (LOCAL_ONE, LOCAL_QUORUM and LOCAL_SERIAL). - # - # This is disabled by default. Enabling this feature may have unexpected results, since a - # local consistency level may have different semantics depending on the replication factor in - # use in each datacenter. - # - # Required: yes - # Modifiable at runtime: no - # Overridable in a profile: yes - allow-for-local-consistency-levels = false - - # Ordered preference list of remote dc's (in order) optionally supplied for automatic failover. While building a query plan, the driver uses the DC's supplied in order together with max-nodes-per-remote-dc - # Users are not required to specify all DCs, when listing preferences via this config - # Required: no - # Modifiable at runtime: no - # Overridable in a profile: no - preferred-remote-dcs = [""] - } - } - - # Whether to schedule reconnection attempts if all contact points are unreachable on the first - # initialization attempt. - # - # If this is true, the driver will retry according to the reconnection policy. The - # `SessionBuilder.build()` call -- or the future returned by `SessionBuilder.buildAsync()` -- - # won't complete until a contact point has been reached. - # - # If this is false and no contact points are available, the driver will fail with an - # AllNodesFailedException. - # - # Required: yes - # Modifiable at runtime: no - # Overridable in a profile: no - advanced.reconnect-on-init = false - - # The policy that controls how often the driver tries to re-establish connections to down nodes. - # - # Required: yes - # Modifiable at runtime: no (but custom implementations may elect to watch configuration changes - # and allow child options to be changed at runtime). - # Overridable in a profile: no - advanced.reconnection-policy { - # The class of the policy. If it is not qualified, the driver assumes that it resides in the - # package com.datastax.oss.driver.internal.core.connection. - # - # The driver provides two implementations out of the box: ExponentialReconnectionPolicy and - # ConstantReconnectionPolicy. - # - # You can also specify a custom class that implements ReconnectionPolicy and has a public - # constructor with a DriverContext argument. - class = ExponentialReconnectionPolicy - - # ExponentialReconnectionPolicy starts with the base delay, and doubles it after each failed - # reconnection attempt, up to the maximum delay (after that it stays constant). - # - # ConstantReconnectionPolicy only uses the base-delay value, the interval never changes. - base-delay = 1 second - max-delay = 60 seconds - } - - # The policy that controls if the driver retries requests that have failed on one node. - # - # Required: yes - # Modifiable at runtime: no (but custom implementations may elect to watch configuration changes - # and allow child options to be changed at runtime). - # Overridable in a profile: yes. Note that the driver creates as few instances as possible: if a - # named profile inherits from the default profile, or if two sibling profiles have the exact - # same configuration, they will share a single policy instance at runtime. - advanced.retry-policy { - # The class of the policy. If it is not qualified, the driver assumes that it resides in the - # package com.datastax.oss.driver.internal.core.retry. - # - # The driver provides two implementations out of the box: - # - # - DefaultRetryPolicy: the default policy, should almost always be the right choice. - # - ConsistencyDowngradingRetryPolicy: an alternative policy that weakens consistency guarantees - # as a trade-off to maximize the chance of success when retrying. Use with caution. - # - # Refer to the manual to understand how these policies work. - # - # You can also specify a custom class that implements RetryPolicy and has a public constructor - # with two arguments: the DriverContext and a String representing the profile name. - class = DefaultRetryPolicy - } - - # The policy that controls if the driver pre-emptively tries other nodes if a node takes too long - # to respond. - # - # Required: yes - # Modifiable at runtime: no (but custom implementations may elect to watch configuration changes - # and allow child options to be changed at runtime). - # Overridable in a profile: yes. Note that the driver creates as few instances as possible: if a - # named profile inherits from the default profile, or if two sibling profiles have the exact - # same configuration, they will share a single policy instance at runtime. - advanced.speculative-execution-policy { - # The class of the policy. If it is not qualified, the driver assumes that it resides in the - # package com.datastax.oss.driver.internal.core.specex. - # - # The following implementations are available out of the box: - # - NoSpeculativeExecutionPolicy: never schedule any speculative execution - # - ConstantSpeculativeExecutionPolicy: schedule executions based on constant delays. This - # requires the `max-executions` and `delay` options below. - # - # You can also specify a custom class that implements SpeculativeExecutionPolicy and has a - # public constructor with two arguments: the DriverContext and a String representing the - # profile name. - class = NoSpeculativeExecutionPolicy - - # The maximum number of executions (including the initial, non-speculative execution). - # This must be at least one. - // max-executions = 3 - - # The delay between each execution. 0 is allowed, and will result in all executions being sent - # simultaneously when the request starts. - # - # Note that sub-millisecond precision is not supported, any excess precision information will be - # dropped; in particular, delays of less than 1 millisecond are equivalent to 0. - # - # Also note that, because speculative executions are scheduled on the driver's timer thread, - # the duration specified here must be greater than the timer tick duration defined by the - # advanced.netty.timer.tick-duration setting (see below). If that is not the case, speculative - # executions will not be triggered as timely as desired. - # - # This must be positive or 0. - // delay = 100 milliseconds - } - - # The component that handles authentication on each new connection. - # - # Required: no. If the 'class' child option is absent, no authentication will occur. - # Modifiable at runtime: no - # Overridable in a profile: no - # - # Note that the contents of this section can be overridden programmatically with - # SessionBuilder.withAuthProvider or SessionBuilder.withAuthCredentials. - advanced.auth-provider { - # The class of the provider. If it is not qualified, the driver assumes that it resides in one - # of the following packages: - # - com.datastax.oss.driver.internal.core.auth - # - com.datastax.dse.driver.internal.core.auth - # - # The driver provides two implementations: - # - PlainTextAuthProvider: uses plain-text credentials. It requires the `username` and - # `password` options below. When connecting to DataStax Enterprise, an optional - # `authorization-id` can also be specified. - # For backward compatibility with previous driver versions, you can also use the class name - # "DsePlainTextAuthProvider" for this provider. - # - DseGssApiAuthProvider: provides GSSAPI authentication for DSE clusters secured with - # DseAuthenticator. See the example below and refer to the manual for detailed instructions. - # - # You can also specify a custom class that implements AuthProvider and has a public constructor - # with a DriverContext argument (to simplify this, the driver provides two abstract classes that - # can be extended: PlainTextAuthProviderBase and DseGssApiAuthProviderBase). - # - # Finally, you can configure a provider instance programmatically with - # DseSessionBuilder#withAuthProvider. In that case, it will take precedence over the - # configuration. - // class = PlainTextAuthProvider - # - # Sample configuration for plain-text authentication providers: - // username = cassandra - // password = cassandra - # - # Proxy authentication: allows to login as another user or role (valid for both - # PlainTextAuthProvider and DseGssApiAuthProvider): - // authorization-id = userOrRole - # - # The settings below are only applicable to DseGssApiAuthProvider: - # - # Service name. For example, if in your dse.yaml configuration file the - # "kerberos_options/service_principal" setting is "cassandra/my.host.com@MY.REALM.COM", then set - # this option to "cassandra". If this value is not explicitly set via configuration (in an - # application.conf or programmatically), the driver will attempt to set it via a System - # property. The property should be "dse.sasl.service". For backwards compatibility with 1.x - # versions of the driver, if "dse.sasl.service" is not set as a System property, the driver will - # attempt to use "dse.sasl.protocol" as a fallback (which is the property for the 1.x driver). - //service = "cassandra" - # - # Login configuration. It is also possible to provide login configuration through a standard - # JAAS configuration file. The below configuration is just an example, see all possible options - # here: - # https://docs.oracle.com/javase/6/docs/jre/api/security/jaas/spec/com/sun/security/auth/module/Krb5LoginModule.html - // login-configuration { - // principal = "cassandra@DATASTAX.COM" - // useKeyTab = "true" - // refreshKrb5Config = "true" - // keyTab = "/path/to/keytab/file" - // } - # - # Internal SASL properties, if any, such as QOP. - // sasl-properties { - // javax.security.sasl.qop = "auth-conf" - // } - } - - # The SSL engine factory that will initialize an SSL engine for each new connection to a server. - # - # Required: no. If the 'class' child option is absent, SSL won't be activated. - # Modifiable at runtime: no - # Overridable in a profile: no - # - # Note that the contents of this section can be overridden programmatically with - # SessionBuilder.withSslEngineFactory or SessionBuilder#withSslContext. - advanced.ssl-engine-factory { - # The class of the factory. If it is not qualified, the driver assumes that it resides in the - # package com.datastax.oss.driver.internal.core.ssl. - # - # The driver provides a single implementation out of the box: DefaultSslEngineFactory, that uses - # the JDK's built-in SSL implementation. - # - # You can also specify a custom class that implements SslEngineFactory and has a public - # constructor with a DriverContext argument. - // class = DefaultSslEngineFactory - - # Sample configuration for the default SSL factory: - # The cipher suites to enable when creating an SSLEngine for a connection. - # This property is optional. If it is not present, the driver won't explicitly enable cipher - # suites on the engine, which according to the JDK documentations results in "a minimum quality - # of service". - // cipher-suites = [ "TLS_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_256_CBC_SHA" ] - - # Whether or not to require validation that the hostname of the server certificate's common - # name matches the hostname of the server being connected to. If not set, defaults to true. - // hostname-validation = true - - # Whether or not to allow a DNS reverse-lookup of provided server addresses for SAN addresses, - # if cluster endpoints are specified as literal IPs. - # This is left as true for compatibility, but in most environments a DNS reverse-lookup should - # not be necessary to get an address that matches the server certificate SANs. - // allow-dns-reverse-lookup-san = true - - # The locations and passwords used to access truststore and keystore contents. - # These properties are optional. If either truststore-path or keystore-path are specified, - # the driver builds an SSLContext from these files. If neither option is specified, the - # default SSLContext is used, which is based on system property configuration. - // truststore-path = /path/to/client.truststore - // truststore-password = password123 - // keystore-path = /path/to/client.keystore - // keystore-password = password123 - - # The duration between attempts to reload the keystore from the contents of the file specified - # by `keystore-path`. This is mainly relevant in environments where certificates have short - # lifetimes and applications are restarted infrequently, since an expired client certificate - # will prevent new connections from being established until the application is restarted. If - # not set, defaults to not reload the keystore. - // keystore-reload-interval = 30 minutes - } - - # The generator that assigns a microsecond timestamp to each request. - # - # Required: yes - # Modifiable at runtime: no (but custom implementations may elect to watch configuration changes - # and allow child options to be changed at runtime). - # Overridable in a profile: yes. Note that the driver creates as few instances as possible: if a - # named profile inherits from the default profile, or if two sibling profiles have the exact - # same configuration, they will share a single generator instance at runtime. - advanced.timestamp-generator { - # The class of the generator. If it is not qualified, the driver assumes that it resides in the - # package com.datastax.oss.driver.internal.core.time. - # - # The driver provides the following implementations out of the box: - # - AtomicTimestampGenerator: timestamps are guaranteed to be unique across all client threads. - # - ThreadLocalTimestampGenerator: timestamps that are guaranteed to be unique within each - # thread only. - # - ServerSideTimestampGenerator: do not generate timestamps, let the server assign them. - # - # You can also specify a custom class that implements TimestampGenerator and has a public - # constructor with two arguments: the DriverContext and a String representing the profile name. - class = AtomicTimestampGenerator - - # To guarantee that queries are applied on the server in the same order as the client issued - # them, timestamps must be strictly increasing. But this means that, if the driver sends more - # than one query per microsecond, timestamps will drift in the future. While this could happen - # occasionally under high load, it should not be a regular occurrence. Therefore the built-in - # implementations log a warning to detect potential issues. - drift-warning { - # How far in the future timestamps are allowed to drift before the warning is logged. - # If it is undefined or set to 0, warnings are disabled. - threshold = 1 second - - # How often the warning will be logged if timestamps keep drifting above the threshold. - interval = 10 seconds - } - - # Whether to force the driver to use Java's millisecond-precision system clock. - # If this is false, the driver will try to access the microsecond-precision OS clock via native - # calls (and fallback to the Java one if the native calls fail). - # Unless you explicitly want to avoid native calls, there's no reason to change this. - force-java-clock = false - } - - # Request trackers are session-wide components that get notified of the outcome of requests. - advanced.request-tracker { - # The list of trackers to register. - # - # This must be a list of class names, either fully-qualified or non-qualified; if the latter, - # the driver assumes that the class resides in the package - # com.datastax.oss.driver.internal.core.tracker. - # - # All classes specified here must implement - # com.datastax.oss.driver.api.core.tracker.RequestTracker and have a public constructor with a - # DriverContext argument. - # - # The driver provides the following implementation out of the box: - # - RequestLogger: logs requests (see the parameters below). - # - # You can also pass instances of your trackers programmatically with - # CqlSession.builder().addRequestTracker(). - # - # Required: no - # Modifiable at runtime: no (but custom implementations may elect to watch configuration changes - # and allow child options to be changed at runtime). - # Overridable in a profile: no - #classes = [RequestLogger,com.example.app.MyTracker] - - # Parameters for RequestLogger. All of them can be overridden in a profile, and changed at - # runtime (the new values will be taken into account for requests logged after the change). - logs { - # Whether to log successful requests. - // success.enabled = true - - slow { - # The threshold to classify a successful request as "slow". If this is unset, all successful - # requests will be considered as normal. - // threshold = 1 second - - # Whether to log slow requests. - // enabled = true - } - - # Whether to log failed requests. - // error.enabled = true - - # The maximum length of the query string in the log message. If it is longer than that, it - # will be truncated. - // max-query-length = 500 - - # Whether to log bound values in addition to the query string. - // show-values = true - - # The maximum length for bound values in the log message. If the formatted representation of a - # value is longer than that, it will be truncated. - // max-value-length = 50 - - # The maximum number of bound values to log. If a request has more values, the list of values - # will be truncated. - // max-values = 50 - - # Whether to log stack traces for failed queries. If this is disabled, the log will just - # include the exception's string representation (generally the class name and message). - // show-stack-traces = true - } - } - - advanced.request-id { - generator { - # The component that generates a unique identifier for each CQL request, and possibly write the id to the custom payload . - // class = W3CContextRequestIdGenerator - } - } - - # A session-wide component that controls the rate at which requests are executed. - # - # Implementations vary, but throttlers generally track a metric that represents the level of - # utilization of the session, and prevent new requests from starting when that metric exceeds a - # threshold. Pending requests may be enqueued and retried later. - # - # From the public API's point of view, this process is mostly transparent: any time that the - # request is throttled is included in the session.execute() or session.executeAsync() call. - # Similarly, the request timeout encompasses throttling: the timeout starts ticking before the - # throttler has started processing the request; a request may time out while it is still in the - # throttler's queue, before the driver has even tried to send it to a node. - # - # The only visible effect is that a request may fail with a RequestThrottlingException, if the - # throttler has determined that it can neither allow the request to proceed now, nor enqueue it; - # this indicates that your session is overloaded. - # - # Required: yes - # Modifiable at runtime: no (but custom implementations may elect to watch configuration changes - # and allow child options to be changed at runtime). - # Overridable in a profile: no - advanced.throttler { - # The class of the throttler. If it is not qualified, the driver assumes that it resides in - # the package com.datastax.oss.driver.internal.core.session.throttling. - # - # The driver provides the following implementations out of the box: - # - # - PassThroughRequestThrottler: does not perform any kind of throttling, all requests are - # allowed to proceed immediately. Required options: none. - # - # - ConcurrencyLimitingRequestThrottler: limits the number of requests that can be executed in - # parallel. Required options: max-concurrent-requests, max-queue-size. - # - # - RateLimitingRequestThrottler: limits the request rate per second. Required options: - # max-requests-per-second, max-queue-size, drain-interval. - # - # You can also specify a custom class that implements RequestThrottler and has a public - # constructor with a DriverContext argument. - class = PassThroughRequestThrottler - - # The maximum number of requests that can be enqueued when the throttling threshold is exceeded. - # Beyond that size, requests will fail with a RequestThrottlingException. - // max-queue-size = 10000 - - # The maximum number of requests that are allowed to execute in parallel. - # Only used by ConcurrencyLimitingRequestThrottler. - // max-concurrent-requests = 10000 - - # The maximum allowed request rate. - # Only used by RateLimitingRequestThrottler. - // max-requests-per-second = 10000 - - # How often the throttler attempts to dequeue requests. This is the only way for rate-based - # throttling, because the completion of an active request does not necessarily free a "slot" for - # a queued one (the rate might still be too high). - # - # You want to set this high enough that each attempt will process multiple entries in the queue, - # but not delay requests too much. A few milliseconds is probably a happy medium. - # - # Only used by RateLimitingRequestThrottler. - // drain-interval = 10 milliseconds - } - - # The list of node state listeners to register. Node state listeners are session-wide - # components that listen for node state changes (e.g., when nodes go down or back up). - # - # This must be a list of fully-qualified class names; classes specified here must implement - # com.datastax.oss.driver.api.core.metadata.NodeStateListener and have a public - # constructor with a DriverContext argument. - # - # You can also pass instances of your listeners programmatically with - # CqlSession.builder().addNodeStateListener(). - # - # Required: no - # Modifiable at runtime: no (but custom implementations may elect to watch configuration changes - # and allow child options to be changed at runtime). - # Overridable in a profile: no - #advanced.node-state-listener.classes = [com.example.app.MyListener1,com.example.app.MyListener2] - - # The list of schema change listeners to register. Schema change listeners are session-wide - # components that listen for schema changes (e.g., when tables are created or dropped). - # - # This must be a list of fully-qualified class names; classes specified here must implement - # com.datastax.oss.driver.api.core.metadata.schema.SchemaChangeListener and have a public - # constructor with a DriverContext argument. - # - # You can also pass instances of your listeners programmatically with - # CqlSession.builder().addSchemaChangeListener(). - # - # Required: no - # Modifiable at runtime: no (but custom implementations may elect to watch configuration changes - # and allow child options to be changed at runtime). - # Overridable in a profile: no - #advanced.schema-change-listener.classes = [com.example.app.MyListener1,com.example.app.MyListener2] - - # The address translator to use to convert the addresses sent by Cassandra nodes into ones that - # the driver uses to connect. - # This is only needed if the nodes are not directly reachable from the driver (for example, the - # driver is in a different network region and needs to use a public IP, or it connects through a - # proxy). - # - # Required: yes - # Modifiable at runtime: no - # Overridable in a profile: no - advanced.address-translator { - # The class of the translator. If it is not qualified, the driver assumes that it resides in - # the package com.datastax.oss.driver.internal.core.addresstranslation. - # - # The driver provides the following implementations out of the box: - # - PassThroughAddressTranslator: returns all addresses unchanged. - # - FixedHostNameAddressTranslator: translates all addresses to a specific hostname. - # - SubnetAddressTranslator: translates addresses to hostname based on the subnet match. - # - Ec2MultiRegionAddressTranslator: suitable for an Amazon multi-region EC2 deployment where - # clients are also deployed in EC2. It optimizes network costs by favoring private IPs over - # public ones whenever possible. - # - # You can also specify a custom class that implements AddressTranslator and has a public - # constructor with a DriverContext argument. - class = PassThroughAddressTranslator - # - # This property has to be set only in case you use FixedHostNameAddressTranslator. - # advertised-hostname = mycustomhostname - # - # These properties are only applicable in case you use SubnetAddressTranslator. - # subnet-addresses { - # "100.64.0.0/15" = "cassandra.datacenter1.com:9042" - # "100.66.0.0/15" = "cassandra.datacenter2.com:9042" - # # IPv6 example: - # # "::ffff:6440:0/111" = "cassandra.datacenter1.com:9042" - # # "::ffff:6442:0/111" = "cassandra.datacenter2.com:9042" - # } - # Optional. When configured, addresses not matching the configured subnets are translated to this address. - # default-address = "cassandra.datacenter1.com:9042" - # Whether to resolve the addresses once on initialization (if true) or on each node (re-)connection (if false). - # If not configured, defaults to false. - # resolve-addresses = false - } - - # Whether to resolve the addresses passed to `basic.contact-points`. - # - # If this is true, addresses are created with `InetSocketAddress(String, int)`: the host name will - # be resolved the first time, and the driver will use the resolved IP address for all subsequent - # connection attempts. - # - # If this is false, addresses are created with `InetSocketAddress.createUnresolved()`: the host - # name will be resolved again every time the driver opens a new connection. This is useful for - # containerized environments where DNS records are more likely to change over time (note that the - # JVM and OS have their own DNS caching mechanisms, so you might need additional configuration - # beyond the driver). - # - # This option only applies to the contact points specified in the configuration. It has no effect - # on: - # - programmatic contact points passed to SessionBuilder.addContactPoints: these addresses are - # built outside of the driver, so it is your responsibility to provide unresolved instances. - # - dynamically discovered peers: the driver relies on Cassandra system tables, which expose raw - # IP addresses. Use a custom address translator to convert them to unresolved addresses (if - # you're in a containerized environment, you probably already need address translation anyway). - # - # Required: no (defaults to true) - # Modifiable at runtime: no - # Overridable in a profile: no - advanced.resolve-contact-points = true - - advanced.protocol { - # The native protocol version to use. - # - # If this option is absent, the driver looks up the versions of the nodes at startup (by default - # in system.peers.release_version), and chooses the highest common protocol version. - # For example, if you have a mixed cluster with Apache Cassandra 2.1 nodes (protocol v3) and - # Apache Cassandra 3.0 nodes (protocol v3 and v4), then protocol v3 is chosen. If the nodes - # don't have a common protocol version, initialization fails. - # - # If this option is set, then the given version will be used for all connections, without any - # negotiation or downgrading. If any of the contact points doesn't support it, that contact - # point will be skipped. - # - # Once the protocol version is set, it can't change for the rest of the driver's lifetime; if - # an incompatible node joins the cluster later, connection will fail and the driver will force - # it down (i.e. never try to connect to it again). - # - # You can check the actual version at runtime with Session.getContext().getProtocolVersion(). - # - # Required: no - # Modifiable at runtime: no - # Overridable in a profile: no - // version = V4 - - # The name of the algorithm used to compress protocol frames. - # - # The possible values are: - # - lz4: requires at.yawk.lz4:lz4-java in the classpath. - # - snappy: requires org.xerial.snappy:snappy-java in the classpath. - # - the string "none" to indicate no compression (this is functionally equivalent to omitting - # the option). - # - # The driver depends on the compression libraries, but they are optional. Make sure you - # redeclare an explicit dependency in your project. Refer to the driver's POM or manual for the - # exact version. - # - # Required: no. If the option is absent, protocol frames are not compressed. - # Modifiable at runtime: no - # Overridable in a profile: no - // compression = lz4 - - # The maximum length of the frames supported by the driver. Beyond that limit, requests will - # fail with an exception - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for connections created after the - # change. - # Overridable in a profile: no - max-frame-length = 256 MiB - } - - advanced.request { - # Whether a warning is logged when a request (such as a CQL `USE ...`) changes the active - # keyspace. - # Switching keyspace at runtime is highly discouraged, because it is inherently unsafe (other - # requests expecting the old keyspace might be running concurrently), and may cause statements - # prepared before the change to fail. - # It should only be done in very specific use cases where there is only a single client thread - # executing synchronous queries (such as a cqlsh-like interpreter). In other cases, clients - # should prefix table names in their queries instead. - # - # Note that CASSANDRA-10145 (scheduled for C* 4.0) will introduce a per-request keyspace option - # as a workaround to this issue. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for keyspace switches occurring after - # the change. - # Overridable in a profile: no - warn-if-set-keyspace = true - - # If tracing is enabled for a query, this controls how the trace is fetched. - trace { - # How many times the driver will attempt to fetch the query if it is not ready yet. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for traces fetched after the change. - # Overridable in a profile: yes - attempts = 5 - - # The interval between each attempt. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for traces fetched after the change. - # Overridable in a profile: yes - interval = 3 milliseconds - - # The consistency level to use for trace queries. - # Note that the default replication strategy for the system_traces keyspace is SimpleStrategy - # with RF=2, therefore LOCAL_ONE might not work if the local DC has no replicas for a given - # trace id. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for traces fetched after the change. - # Overridable in a profile: yes - consistency = ONE - } - - # Whether logging of server warnings generated during query execution should be disabled by the - # driver. All server generated warnings will be available programmatically via the ExecutionInfo - # object on the executed statement's ResultSet. If set to "false", this will prevent the driver - # from logging these warnings. - # - # NOTE: The log formatting for these warning messages will reuse the options defined for - # advanced.request-tracker. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for query warnings received after the change. - # Overridable in a profile: yes - log-warnings = true - } - - # Graph (DataStax Enterprise only) - advanced.graph { - # The sub-protocol the driver will use to communicate with DSE Graph, on top of the Cassandra - # native protocol. - # - # You should almost never have to change this: the driver sets it automatically, based on the - # information it has about the server. One exception is if you use the script API against a - # legacy DSE version (5.0.3 or older). In that case, you need to force the sub-protocol to - # "graphson-1.0". - # - # This can also be overridden programmatically with GraphStatement.setSubProtocol(). If both are - # specified, the programmatic value takes precedence, and this option is ignored. - # - # Possible values with built-in support in the driver are: - # [ "graphson-1.0", "graphson-2.0", "graph-binary-1.0"] - # - # IMPORTANT: The default value for the Graph sub-protocol is based only on the DSE - # version. If the version is DSE 6.7 and lower, "graphson-2.0" will be the default. For DSE 6.8 - # and higher, the default value is "graphson-binary-1.0". - # - # Required: no - # Modifiable at runtime: yes, the new value will be used for requests issued after the change. - # Overridable in a profile: yes - // sub-protocol = "graphson-2.0" - - # - # Whether or not Graph paging should be enabled or disabled for all queries. - # - #

If AUTO is set, the driver will decide whether or not to enable Graph paging - # based on the protocol version in use and the DSE version of all hosts. For this reason it is - # usually not necessary to change this setting. - # - #

IMPORTANT: Paging for DSE Graph is only available in DSE 6.8 and higher, and - # requires protocol version DSE_V1 or higher and graphs created with the Native engine; enabling - # paging for clusters and graphs that do not meet this requirement may result in query failures. - # - # Supported values are: ENABLED, DISABLED, AUTO - paging-enabled = "AUTO" - - - paging-options { - - # The page size. - # - # The value specified here can be interpreted in number of rows. - # Interpetation in number of bytes is not supported for graph continuous paging queries. - # - # It controls how many rows will be retrieved simultaneously in a single - # network roundtrip (the goal being to avoid loading too many results in memory at the same - # time). If there are more results, additional requests will be used to retrieve them (either - # automatically if you iterate with the sync API, or explicitly with the async API's - # fetchNextPage method). - # - # The default is the same as the driver's normal request page size, - # i.e., 5000 (rows). - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for continuous requests issued after - # the change - # Overridable in a profile: yes - page-size = ${datastax-java-driver.advanced.continuous-paging.page-size} - - # The maximum number of pages to return. - # - # The default is zero, which means retrieve all pages. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for continuous requests issued after - # the change - # Overridable in a profile: yes - max-pages = ${datastax-java-driver.advanced.continuous-paging.max-pages} - - # Returns the maximum number of pages per second. - # - # The default is zero, which means no limit. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for continuous requests issued after - # the change - # Overridable in a profile: yes - max-pages-per-second = ${datastax-java-driver.advanced.continuous-paging.max-pages-per-second} - - # Returns the maximum number of pages per second. - # - # The default is zero, which means no limit. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for continuous requests issued after - # the change - # Overridable in a profile: yes - max-enqueued-pages = ${datastax-java-driver.advanced.continuous-paging.max-enqueued-pages} - } - } - - # Continuous paging (DataStax Enterprise only) - advanced.continuous-paging { - - # The page size. - # - # The value specified here can be interpreted in number of rows - # or in number of bytes, depending on the unit defined with page-unit (see below). - # - # It controls how many rows (or how much data) will be retrieved simultaneously in a single - # network roundtrip (the goal being to avoid loading too many results in memory at the same - # time). If there are more results, additional requests will be used to retrieve them (either - # automatically if you iterate with the sync API, or explicitly with the async API's - # fetchNextPage method). - # - # The default is the same as the driver's normal request page size, - # i.e., 5000 (rows). - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for continuous requests issued after - # the change - # Overridable in a profile: yes - page-size = ${datastax-java-driver.basic.request.page-size} - - # Whether the page-size option should be interpreted in number of rows or bytes. - # - # The default is false, i.e., the page size will be interpreted in number of rows. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for continuous requests issued after - # the change - # Overridable in a profile: yes - page-size-in-bytes = false - - # The maximum number of pages to return. - # - # The default is zero, which means retrieve all pages. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for continuous requests issued after - # the change - # Overridable in a profile: yes - max-pages = 0 - - # Returns the maximum number of pages per second. - # - # The default is zero, which means no limit. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for continuous requests issued after - # the change - # Overridable in a profile: yes - max-pages-per-second = 0 - - # The maximum number of pages that can be stored in the local queue. - # - # This value must be positive. The default is 4. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for continuous requests issued after - # the change - # Overridable in a profile: yes - max-enqueued-pages = 4 - - # Timeouts for continuous paging. - # - # Note that there is no global timeout for continuous paging as there is - # for regular queries, because continuous paging queries can take an arbitrarily - # long time to complete. - # - # Instead, timeouts are applied to each exchange between the driver and the coordinator. In - # other words, if the driver decides to retry, all timeouts are reset. - timeout { - - # How long to wait for the coordinator to send the first page. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for continuous requests issued after - # the change - # Overridable in a profile: yes - first-page = 2 seconds - - # How long to wait for the coordinator to send subsequent pages. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for continuous requests issued after - # the change - # Overridable in a profile: yes - other-pages = 1 second - - } - } - - # DataStax Insights - advanced.monitor-reporting { - # Whether to send monitoring events. - # - # The default is true. - # - # Required: no (defaults to true) - # Modifiable at runtime: no - # Overridable in a profile: no - enabled = true - } - - advanced.metrics { - # Metrics Factory configuration. - factory { - # The class for the metrics factory. - # - # The driver provides out-of-the-box support for three metrics libraries: Dropwizard, - # Micrometer and MicroProfile Metrics. - # - # Dropwizard is the default metrics library in the driver; to use Dropwizard, this value - # should be left to its default, "DefaultMetricsFactory", or set to - # "DropwizardMetricsFactory". The only difference between the two is that the former will work - # even if Dropwizard is not present on the classpath (in which case it will silently disable - # metrics), while the latter requires its presence. - # - # To select Micrometer, set the value to "MicrometerMetricsFactory", and to select - # MicroProfile Metrics, set the value to "MicroProfileMetricsFactory". For these libraries to - # be used, you will also need to add an additional dependency: - # - Micrometer: org.apache.cassandra:java-driver-metrics-micrometer - # - MicroProfile: org.apache.cassandra:java-driver-metrics-microprofile - # - # If you would like to use another metrics library, set this value to the fully-qualified name - # of a class that implements com.datastax.oss.driver.internal.core.metrics.MetricsFactory. - # - # It is also possible to use "NoopMetricsFactory", which forcibly disables metrics completely. - # In fact, "DefaultMetricsFactory" delegates to "DropwizardMetricsFactory" if Dropwizard is - # present on the classpath, or to "NoopMetricsFactory" if it isn't. - # - # Note: specifying a metrics factory is not enough to enable metrics; for the driver to - # actually start collecting metrics, you also need to specify which metrics to collect. See - # the following options for more information: - # - advanced.metrics.session.enabled - # - advanced.metrics.node.enabled - # - # See also the driver online manual for extensive instructions about how to configure metrics. - # - # Required: yes - # Modifiable at runtime: no - # Overridable in a profile: no - class = DefaultMetricsFactory - } - - # This section configures how metric ids are generated. A metric id is a unique combination of - # a metric name and metric tags. - id-generator { - - # The class name of a component implementing - # com.datastax.oss.driver.internal.core.metrics.MetricIdGenerator. If it is not qualified, the - # driver assumes that it resides in the package com.datastax.oss.driver.internal.core.metrics. - # - # The driver ships with two built-in implementations: - # - # - DefaultMetricIdGenerator: generates identifiers composed solely of (unique) metric names; - # it does not generate tags. It is mostly suitable for use with metrics libraries that do - # not support tags, like Dropwizard. - # - TaggingMetricIdGenerator: generates identifiers composed of name and tags. It is mostly - # suitable for use with metrics libraries that support tags, like Micrometer or MicroProfile - # Metrics. - # - # For example, here is how each one of them generates identifiers for the session metric - # "bytes-sent", assuming that the session is named "s0": - # - DefaultMetricIdGenerator: name "s0.bytes-sent", tags: {}. - # - TaggingMetricIdGenerator: name "session.bytes-sent", tags: {"session":"s0"} - # - # Here is how each one of them generates identifiers for the node metric "bytes-sent", - # assuming that the session is named "s0", and the node's broadcast address is 10.1.2.3:9042: - # - DefaultMetricIdGenerator: name "s0.nodes.10_1_2_3:9042.bytes-sent", tags: {}. - # - TaggingMetricIdGenerator: name "nodes.bytes-sent", tags: { "session" : "s0", - # "node" : "\10.1.2.3:9042" } - # - # As shown above, both built-in implementations generate names that are path-like structures - # separated by dots. This is indeed the most common expected format by reporting tools. - # - # Required: yes - # Modifiable at runtime: no - # Overridable in a profile: no - class = DefaultMetricIdGenerator - - # An optional prefix to prepend to each generated metric name. - # - # The prefix should not start nor end with a dot or any other path separator; the following - # are two valid examples: "cassandra" or "myapp.prod.cassandra". - # - # For example, if this prefix is set to "cassandra", here is how the session metric - # "bytes-sent" would be named, assuming that the session is named "s0": - # - with DefaultMetricIdGenerator: "cassandra.s0.bytes-sent" - # - with TaggingMetricIdGenerator: "cassandra.session.bytes-sent" - # - # Here is how the node metric "bytes-sent" would be named, assuming that the session is named - # "s0", and the node's broadcast address is 10.1.2.3:9042: - # - with DefaultMetricIdGenerator: "cassandra.s0.nodes.10_1_2_3:9042.bytes-sent" - # - with TaggingMetricIdGenerator: "cassandra.nodes.bytes-sent" - # - # Required: no - # Modifiable at runtime: no - # Overridable in a profile: no - // prefix = "cassandra" - } - - histograms { - # Adds histogram buckets used to generate aggregable percentile approximations in monitoring - # systems that have query facilities to do so (e.g. Prometheus histogram_quantile, Atlas percentiles). - # - # Required: no - # Modifiable at runtime: no - # Overridable in a profile: no - generate-aggregable = true - } - - # The session-level metrics (all disabled by default). - # - # Required: yes - # Modifiable at runtime: no - # Overridable in a profile: no - session { - enabled = [ - # The number and rate of bytes sent for the entire session (exposed as a Meter if available, - # otherwise as a Counter). - // bytes-sent, - - # The number and rate of bytes received for the entire session (exposed as a Meter if - # available, otherwise as a Counter). - // bytes-received - - # The number of nodes to which the driver has at least one active connection (exposed as a - # Gauge). - // connected-nodes, - - # The throughput and latency percentiles of CQL requests (exposed as a Timer). - # - # This corresponds to the overall duration of the session.execute() call, including any - # retry. - // cql-requests, - - # The number of CQL requests that timed out -- that is, the session.execute() call failed - # with a DriverTimeoutException (exposed as a Counter). - // cql-client-timeouts, - - # The size of the driver-side cache of CQL prepared statements (exposed as a Gauge). - # - # The cache uses weak values eviction, so this represents the number of PreparedStatement - # instances that your application has created, and is still holding a reference to. Note - # that the returned value is approximate. - // cql-prepared-cache-size, - - # How long requests are being throttled (exposed as a Timer). - # - # This is the time between the start of the session.execute() call, and the moment when - # the throttler allows the request to proceed. - // throttling.delay, - - # The size of the throttling queue (exposed as a Gauge). - # - # This is the number of requests that the throttler is currently delaying in order to - # preserve its SLA. This metric only works with the built-in concurrency- and rate-based - # throttlers; in other cases, it will always be 0. - // throttling.queue-size, - - # The number of times a request was rejected with a RequestThrottlingException (exposed as - # a Counter) - // throttling.errors, - - # The throughput and latency percentiles of DSE continuous CQL requests (exposed as a - # Timer). - # - # This metric is a session-level metric and corresponds to the overall duration of the - # session.executeContinuously() call, including any retry. - # - # Note that this metric is analogous to the OSS driver's 'cql-requests' metrics, but for - # continuous paging requests only. Continuous paging requests do not update the - # 'cql-requests' metric, because they are usually much longer. Only the following metrics - # are updated during a continuous paging request: - # - # - At node level: all the usual metrics available for normal CQL requests, such as - # 'cql-messages' and error-related metrics (but these are only updated for the first - # page of results); - # - At session level: only 'continuous-cql-requests' is updated (this metric). - // continuous-cql-requests, - - # The throughput and latency percentiles of Graph requests (exposed as a Timer). - # - # This metric is a session-level metric and corresponds to the overall duration of the - # session.execute(GraphStatement) call, including any retry. - // graph-requests, - - # The number of graph requests that timed out -- that is, the - # session.execute(GraphStatement) call failed with a DriverTimeoutException (exposed as a - # Counter). - # - # Note that this metric is analogous to the OSS driver's 'cql-client-timeouts' metrics, but - # for Graph requests only. - // graph-client-timeouts - - ] - - # Extra configuration (for the metrics that need it) - - # Required: if the 'cql-requests' metric is enabled, and Dropwizard or Micrometer is used. - # Modifiable at runtime: no - # Overridable in a profile: no - cql-requests { - - # The largest latency that we expect to record. - # - # This should be slightly higher than request.timeout (in theory, readings can't be higher - # than the timeout, but there might be a small overhead due to internal scheduling). - # - # This is used to scale internal data structures. If a higher recording is encountered at - # runtime, it is discarded and a warning is logged. - # Valid for: Dropwizard, Micrometer. - highest-latency = 3 seconds - - # The shortest latency that we expect to record. This is used to scale internal data - # structures. - # Valid for: Micrometer. - lowest-latency = 1 millisecond - - # The number of significant decimal digits to which internal structures will maintain - # value resolution and separation (for example, 3 means that recordings up to 1 second - # will be recorded with a resolution of 1 millisecond or better). - # - # For Dropwizard, this must be between 0 and 5. If the value is out of range, it defaults to - # 3 and a warning is logged. - # Valid for: Dropwizard, Micrometer. - significant-digits = 3 - - # The interval at which percentile data is refreshed. - # - # The driver records latency data in a "live" histogram, and serves results from a cached - # snapshot. Each time the snapshot gets older than the interval, the two are switched. - # Note that this switch happens upon fetching the metrics, so if you never fetch the - # recording interval might grow higher (that shouldn't be an issue in a production - # environment because you would typically have a metrics reporter that exports to a - # monitoring tool at a regular interval). - # - # In practice, this means that if you set this to 5 minutes, you're looking at data from a - # 5-minute interval in the past, that is at most 5 minutes old. If you fetch the metrics - # at a faster pace, you will observe the same data for 5 minutes until the interval - # expires. - # - # Note that this does not apply to the total count and rates (those are updated in real - # time). - # Valid for: Dropwizard. - refresh-interval = 5 minutes - - # An optional list of latencies to track as part of the application's service-level - # objectives (SLOs). - # - # If defined, the histogram is guaranteed to contain these boundaries alongside other - # buckets used to generate aggregable percentile approximations. - # Valid for: Micrometer. - // slo = [ 100 milliseconds, 500 milliseconds, 1 second ] - - # An optional list of percentiles to be published by Micrometer. Produces an additional time series for each requested percentile. - # This percentile is computed locally, and so can't be aggregated with percentiles computed across other dimensions (e.g. in a different instance) - # Valid for: Micrometer. - // publish-percentiles = [ 0.75, 0.95, 0.99 ] - } - - # Required: if the 'throttling.delay' metric is enabled, and Dropwizard or Micrometer is used. - # Modifiable at runtime: no - # Overridable in a profile: no - throttling.delay { - highest-latency = 3 seconds - lowest-latency = 1 millisecond - significant-digits = 3 - refresh-interval = 5 minutes - // slo = [ 100 milliseconds, 500 milliseconds, 1 second ] - // publish-percentiles = [ 0.75, 0.95, 0.99 ] - } - - # Required: if the 'continuous-cql-requests' metric is enabled, and Dropwizard or Micrometer - # is used. - # Modifiable at runtime: no - # Overridable in a profile: no - continuous-cql-requests { - highest-latency = 120 seconds - lowest-latency = 10 milliseconds - significant-digits = 3 - refresh-interval = 5 minutes - // slo = [ 100 milliseconds, 500 milliseconds, 1 second ] - // publish-percentiles = [ 0.75, 0.95, 0.99 ] - } - - # Required: if the 'graph-requests' metric is enabled, and Dropwizard or Micrometer is used. - # Modifiable at runtime: no - # Overridable in a profile: no - graph-requests { - highest-latency = 12 seconds - lowest-latency = 1 millisecond - significant-digits = 3 - refresh-interval = 5 minutes - // slo = [ 100 milliseconds, 500 milliseconds, 1 second ] - // publish-percentiles = [ 0.75, 0.95, 0.99 ] - } - } - # The node-level metrics (all disabled by default). - # - # Required: yes - # Modifiable at runtime: no - # Overridable in a profile: no - node { - enabled = [ - # The number of connections open to this node for regular requests (exposed as a - # Gauge). - # - # This includes the control connection (which uses at most one extra connection to a - # random node in the cluster). - // pool.open-connections, - - # The number of stream ids available on the connections to this node (exposed as a - # Gauge). - # - # Stream ids are used to multiplex requests on each connection, so this is an indication - # of how many more requests the node could handle concurrently before becoming saturated - # (note that this is a driver-side only consideration, there might be other limitations on - # the server that prevent reaching that theoretical limit). - // pool.available-streams, - - # The number of requests currently executing on the connections to this node (exposed as a - # Gauge). This includes orphaned streams. - // pool.in-flight, - - # The number of "orphaned" stream ids on the connections to this node (exposed as a - # Gauge). - # - # See the description of the connection.max-orphan-requests option for more details. - // pool.orphaned-streams, - - # The number and rate of bytes sent to this node (exposed as a Meter if available, otherwise - # as a Counter). - // bytes-sent, - - # The number and rate of bytes received from this node (exposed as a Meter if available, - # otherwise as a Counter). - // bytes-received, - - # The throughput and latency percentiles of individual CQL messages sent to this node as - # part of an overall request (exposed as a Timer). - # - # Note that this does not necessarily correspond to the overall duration of the - # session.execute() call, since the driver might query multiple nodes because of retries - # and speculative executions. Therefore a single "request" (as seen from a client of the - # driver) can be composed of more than one of the "messages" measured by this metric. - # - # Therefore this metric is intended as an insight into the performance of this particular - # node. For statistics on overall request completion, use the session-level cql-requests. - // cql-messages, - - # The number of times the driver failed to send a request to this node (exposed as a - # Counter). - # - # In those case we know the request didn't even reach the coordinator, so they are retried - # on the next node automatically (without going through the retry policy). - // errors.request.unsent, - - # The number of times a request was aborted before the driver even received a response - # from this node (exposed as a Counter). - # - # This can happen in two cases: if the connection was closed due to an external event - # (such as a network error or heartbeat failure); or if there was an unexpected error - # while decoding the response (this can only be a driver bug). - // errors.request.aborted, - - # The number of times this node replied with a WRITE_TIMEOUT error (exposed as a Counter). - # - # Whether this error is rethrown directly to the client, rethrown or ignored is determined - # by the RetryPolicy. - // errors.request.write-timeouts, - - # The number of times this node replied with a READ_TIMEOUT error (exposed as a Counter). - # - # Whether this error is rethrown directly to the client, rethrown or ignored is determined - # by the RetryPolicy. - // errors.request.read-timeouts, - - # The number of times this node replied with an UNAVAILABLE error (exposed as a Counter). - # - # Whether this error is rethrown directly to the client, rethrown or ignored is determined - # by the RetryPolicy. - // errors.request.unavailables, - - # The number of times this node replied with an error that doesn't fall under other - # 'errors.*' metrics (exposed as a Counter). - // errors.request.others, - - # The total number of errors on this node that caused the RetryPolicy to trigger a retry - # (exposed as a Counter). - # - # This is a sum of all the other retries.* metrics. - // retries.total, - - # The number of errors on this node that caused the RetryPolicy to trigger a retry, broken - # down by error type (exposed as Counters). - // retries.aborted, - // retries.read-timeout, - // retries.write-timeout, - // retries.unavailable, - // retries.other, - - # The total number of errors on this node that were ignored by the RetryPolicy (exposed as - # a Counter). - # - # This is a sum of all the other ignores.* metrics. - // ignores.total, - - # The number of errors on this node that were ignored by the RetryPolicy, broken down by - # error type (exposed as Counters). - // ignores.aborted, - // ignores.read-timeout, - // ignores.write-timeout, - // ignores.unavailable, - // ignores.other, - - # The number of speculative executions triggered by a slow response from this node - # (exposed as a Counter). - // speculative-executions, - - # The number of errors encountered while trying to establish a connection to this node - # (exposed as a Counter). - # - # Connection errors are not a fatal issue for the driver, failed connections will be - # retried periodically according to the reconnection policy. You can choose whether or not - # to log those errors at WARN level with the connection.warn-on-init-error option. - # - # Authentication errors are not included in this counter, they are tracked separately in - # errors.connection.auth. - // errors.connection.init, - - # The number of authentication errors encountered while trying to establish a connection - # to this node (exposed as a Counter). - # Authentication errors are also logged at WARN level. - // errors.connection.auth, - - # The throughput and latency percentiles of individual graph messages sent to this node as - # part of an overall request (exposed as a Timer). - # - # Note that this does not necessarily correspond to the overall duration of the - # session.execute() call, since the driver might query multiple nodes because of retries - # and speculative executions. Therefore a single "request" (as seen from a client of the - # driver) can be composed of more than one of the "messages" measured by this metric. - # - # Therefore this metric is intended as an insight into the performance of this particular - # node. For statistics on overall request completion, use the session-level graph-requests. - // graph-messages, - ] - - # See cql-requests in the `session` section - # - # Required: if the 'cql-messages' metric is enabled, and Dropwizard or Micrometer is used. - # Modifiable at runtime: no - # Overridable in a profile: no - cql-messages { - highest-latency = 3 seconds - lowest-latency = 1 millisecond - significant-digits = 3 - refresh-interval = 5 minutes - // slo = [ 100 milliseconds, 500 milliseconds, 1 second ] - // publish-percentiles = [ 0.75, 0.95, 0.99 ] - } - - # See graph-requests in the `session` section - # - # Required: if the 'graph-messages' metric is enabled, and Dropwizard or Micrometer is used. - # Modifiable at runtime: no - # Overridable in a profile: no - graph-messages { - highest-latency = 3 seconds - lowest-latency = 1 millisecond - significant-digits = 3 - refresh-interval = 5 minutes - // slo = [ 100 milliseconds, 500 milliseconds, 1 second ] - // publish-percentiles = [ 0.75, 0.95, 0.99 ] - } - - # The time after which the node level metrics will be evicted. - # - # This is used to unregister stale metrics if a node leaves the cluster or gets a new address. - # If the node does not come back up when this interval elapses, all its metrics are removed - # from the registry. - # - # The lowest allowed value is 5 minutes. If you try to set it lower, the driver will log a - # warning and use 5 minutes. - # - # Required: yes - # Modifiable at runtime: no - # Overridable in a profile: no - expire-after = 1 hour - } - } - - advanced.socket { - # Whether or not to disable the Nagle algorithm. - # - # By default, this option is set to true (Nagle disabled), because the driver has its own - # internal message coalescing algorithm. - # - # See java.net.StandardSocketOptions.TCP_NODELAY. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for connections created after the - # change. - # Overridable in a profile: no - tcp-no-delay = true - - # All other socket options are unset by default. The actual value depends on the underlying - # Netty transport: - # - NIO uses the defaults from java.net.Socket (refer to the javadocs of - # java.net.StandardSocketOptions for each option). - # - Epoll delegates to the underlying file descriptor, which uses the O/S defaults. - - # Whether or not to enable TCP keep-alive probes. - # - # See java.net.StandardSocketOptions.SO_KEEPALIVE. - # - # Required: no - # Modifiable at runtime: yes, the new value will be used for connections created after the - # change. - # Overridable in a profile: no - //keep-alive = false - - # Whether or not to allow address reuse. - # - # See java.net.StandardSocketOptions.SO_REUSEADDR. - # - # Required: no - # Modifiable at runtime: yes, the new value will be used for connections created after the - # change. - # Overridable in a profile: no - //reuse-address = true - - # Sets the linger interval. - # - # If the value is zero or greater, then it represents a timeout value, in seconds; - # if the value is negative, it means that this option is disabled. - # - # See java.net.StandardSocketOptions.SO_LINGER. - # - # Required: no - # Modifiable at runtime: yes, the new value will be used for connections created after the - # change. - # Overridable in a profile: no - //linger-interval = 0 - - # Sets a hint to the size of the underlying buffers for incoming network I/O. - # - # See java.net.StandardSocketOptions.SO_RCVBUF. - # - # Required: no - # Modifiable at runtime: yes, the new value will be used for connections created after the - # change. - # Overridable in a profile: no - //receive-buffer-size = 65535 - - # Sets a hint to the size of the underlying buffers for outgoing network I/O. - # - # See java.net.StandardSocketOptions.SO_SNDBUF. - # - # Required: no - # Modifiable at runtime: yes, the new value will be used for connections created after the - # change. - # Overridable in a profile: no - //send-buffer-size = 65535 - } - - advanced.heartbeat { - # The heartbeat interval. If a connection stays idle for that duration (no reads), the driver - # sends a dummy message on it to make sure it's still alive. If not, the connection is trashed - # and replaced. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for connections created after the - # change. - # Overridable in a profile: no - interval = 30 seconds - - # How long the driver waits for the response to a heartbeat. If this timeout fires, the - # heartbeat is considered failed. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for connections created after the - # change. - # Overridable in a profile: no - timeout = ${datastax-java-driver.advanced.connection.init-query-timeout} - } - - advanced.metadata { - # Topology events are external signals that inform the driver of the state of Cassandra nodes - # (by default, they correspond to gossip events received on the control connection). - # The debouncer helps smoothen out oscillations if conflicting events are sent out in short - # bursts. - # Debouncing may be disabled by setting the window to 0 or max-events to 1 (this is not - # recommended). - topology-event-debouncer { - # How long the driver waits to propagate an event. If another event is received within that - # time, the window is reset and a batch of accumulated events will be delivered. - # - # Required: yes - # Modifiable at runtime: no - # Overridable in a profile: no - window = 1 second - - # The maximum number of events that can accumulate. If this count is reached, the events are - # delivered immediately and the time window is reset. This avoids holding events indefinitely - # if the window keeps getting reset. - # - # Required: yes - # Modifiable at runtime: no - # Overridable in a profile: no - max-events = 20 - } - - # Options relating to schema metadata (Session.getMetadata.getKeyspaces). - # This metadata is exposed by the driver for informational purposes, and is also necessary for - # token-aware routing. - schema { - # Whether schema metadata is enabled. - # If this is false, the schema will remain empty, or to the last known value. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for refreshes issued after the - # change. It can also be overridden programmatically via Session.setSchemaMetadataEnabled. - # Overridable in a profile: no - enabled = true - - # The keyspaces for which schema and token metadata should be maintained. - # - # Each element can be one of the following: - # 1. An exact name inclusion, for example "Ks1". If the name is case-sensitive, it must appear - # in its exact case. - # 2. An exact name exclusion, for example "!Ks1". - # 3. A regex inclusion, enclosed in slashes, for example "/^Ks.*/". The part between the - # slashes must follow the syntax rules of java.util.regex.Pattern. - # 4. A regex exclusion, for example "!/^Ks.*/". - # - # If the list is empty, or the option is unset, all keyspaces will match. Otherwise: - # - # If a keyspace matches an exact name inclusion, it is always included, regardless of what any - # other rule says. - # Otherwise, if it matches an exact name exclusion, it is always excluded, regardless of what - # any regex rule says. - # Otherwise, if there are regex rules: - # - if they're only inclusions, the keyspace must match at least one of them. - # - if they're only exclusions, the keyspace must match none of them. - # - if they're both, the keyspace must match at least one inclusion and none of the - # exclusions. - # - # If an element is malformed, or if its regex has a syntax error, a warning is logged and that - # single element is ignored. - # - # Try to use only exact name inclusions if possible. This allows the driver to filter on the - # server side with a WHERE IN clause. If you use any other rule, it has to fetch all system - # rows and filter on the client side. - # - # Required: no. The default value excludes all Cassandra and DSE system keyspaces. If the - # option is unset, this is interpreted as "include all keyspaces". - # Modifiable at runtime: yes, the new value will be used for refreshes issued after the - # change. - # Overridable in a profile: no - refreshed-keyspaces = [ "!system", "!/^system_.*/", "!/^dse_.*/", "!solr_admin", "!OpsCenter" ] - - # The timeout for the requests to the schema tables. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for refreshes issued after the - # change. - # Overridable in a profile: no - request-timeout = ${datastax-java-driver.basic.request.timeout} - - # The page size for the requests to the schema tables. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for refreshes issued after the - # change. - # Overridable in a profile: no - request-page-size = ${datastax-java-driver.basic.request.page-size} - - # Protects against bursts of schema updates (for example when a client issues a sequence of - # DDL queries), by coalescing them into a single update. - # Debouncing may be disabled by setting the window to 0 or max-events to 1 (this is highly - # discouraged for schema refreshes). - debouncer { - # How long the driver waits to apply a refresh. If another refresh is requested within that - # time, the window is reset and a single refresh will be triggered when it ends. - # - # Required: yes - # Modifiable at runtime: no - # Overridable in a profile: no - window = 1 second - - # The maximum number of refreshes that can accumulate. If this count is reached, a refresh - # is done immediately and the window is reset. - # - # Required: yes - # Modifiable at runtime: no - # Overridable in a profile: no - max-events = 20 - } - } - - # Whether token metadata (Session.getMetadata.getTokenMap) is enabled. - # This metadata is exposed by the driver for informational purposes, and is also necessary for - # token-aware routing. - # If this is false, it will remain empty, or to the last known value. Note that its computation - # requires information about the schema; therefore if schema metadata is disabled or filtered to - # a subset of keyspaces, the token map will be incomplete, regardless of the value of this - # property. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for refreshes issued after the change. - # Overridable in a profile: no - token-map.enabled = true - } - - advanced.control-connection { - # How long the driver waits for responses to control queries (e.g. fetching the list of nodes, - # refreshing the schema). - # - # Required: yes - # Modifiable at runtime: no - # Overridable in a profile: no - timeout = ${datastax-java-driver.advanced.connection.init-query-timeout} - - # Due to the distributed nature of Cassandra, schema changes made on one node might not be - # immediately visible to others. Under certain circumstances, the driver waits until all nodes - # agree on a common schema version (namely: before a schema refresh, before repreparing all - # queries on a newly up node, and before completing a successful schema-altering query). To do - # so, it queries system tables to find out the schema version of all nodes that are currently - # UP. If all the versions match, the check succeeds, otherwise it is retried periodically, until - # a given timeout. - # - # A schema agreement failure is not fatal, but it might produce unexpected results (for example, - # getting an "unconfigured table" error for a table that you created right before, just because - # the two queries went to different coordinators). - # - # Note that schema agreement never succeeds in a mixed-version cluster (it would be challenging - # because the way the schema version is computed varies across server versions); the assumption - # is that schema updates are unlikely to happen during a rolling upgrade anyway. - schema-agreement { - # The interval between each attempt. - # Required: yes - # Modifiable at runtime: yes, the new value will be used for checks issued after the change. - # Overridable in a profile: no - interval = 200 milliseconds - - # The timeout after which schema agreement fails. - # If this is set to 0, schema agreement is skipped and will always fail. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for checks issued after the change. - # Overridable in a profile: no - timeout = 10 seconds - - # Whether to log a warning if schema agreement fails. - # You might want to change this if you've set the timeout to 0. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for checks issued after the change. - # Overridable in a profile: no - warn-on-failure = true - } - } - - advanced.prepared-statements { - # Whether `Session.prepare` calls should be sent to all nodes in the cluster. - # - # A request to prepare is handled in two steps: - # 1) send to a single node first (to rule out simple errors like malformed queries). - # 2) if step 1 succeeds, re-send to all other active nodes (i.e. not ignored by the load - # balancing policy). - # This option controls whether step 2 is executed. - # - # The reason why you might want to disable it is to optimize network usage if you have a large - # number of clients preparing the same set of statements at startup. If your load balancing - # policy distributes queries randomly, each client will pick a different host to prepare its - # statements, and on the whole each host has a good chance of having been hit by at least one - # client for each statement. - # On the other hand, if that assumption turns out to be wrong and one host hasn't prepared a - # given statement, it needs to be re-prepared on the fly the first time it gets executed; this - # causes a performance penalty (one extra roundtrip to resend the query to prepare, and another - # to retry the execution). - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for prepares issued after the change. - # Overridable in a profile: yes - prepare-on-all-nodes = true - - # How the driver replicates prepared statements on a node that just came back up or joined the - # cluster. - reprepare-on-up { - # Whether the driver tries to prepare on new nodes at all. - # - # The reason why you might want to disable it is to optimize reconnection time when you - # believe nodes often get marked down because of temporary network issues, rather than the - # node really crashing. In that case, the node still has prepared statements in its cache when - # the driver reconnects, so re-preparing is redundant. - # - # On the other hand, if that assumption turns out to be wrong and the node had really - # restarted, its prepared statement cache is empty (before CASSANDRA-8831), and statements - # need to be re-prepared on the fly the first time they get executed; this causes a - # performance penalty (one extra roundtrip to resend the query to prepare, and another to - # retry the execution). - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for nodes that come back up after the - # change. - # Overridable in a profile: no - enabled = true - - # Whether to check `system.prepared_statements` on the target node before repreparing. - # - # This table exists since CASSANDRA-8831 (merged in 3.10). It stores the statements already - # prepared on the node, and preserves them across restarts. - # - # Checking the table first avoids repreparing unnecessarily, but the cost of the query is not - # always worth the improvement, especially if the number of statements is low. - # - # If the table does not exist, or the query fails for any other reason, the error is ignored - # and the driver proceeds to reprepare statements according to the other parameters. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for nodes that come back up after the - # change. - # Overridable in a profile: no - check-system-table = false - - # The maximum number of statements that should be reprepared. 0 or a negative value means no - # limit. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for nodes that come back up after the - # change. - # Overridable in a profile: no - max-statements = 0 - - # The maximum number of concurrent requests when repreparing. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for nodes that come back up after the - # change. - # Overridable in a profile: no - max-parallelism = 100 - - # The request timeout. This applies both to querying the system.prepared_statements table (if - # relevant), and the prepare requests themselves. - # - # Required: yes - # Modifiable at runtime: yes, the new value will be used for nodes that come back up after the - # change. - # Overridable in a profile: no - timeout = ${datastax-java-driver.advanced.connection.init-query-timeout} - } - - # How to build the cache of prepared statements. - prepared-cache { - # Whether to use weak references for the prepared statements cache values. - # - # If this option is absent, weak references will be used. - # - # Required: no - # Modifiable at runtime: no - # Overridable in a profile: no - // weak-values = true - } - } - - # Options related to the Netty event loop groups used internally by the driver. - advanced.netty { - - # Whether the threads created by the driver should be daemon threads. - # This will apply to the threads in io-group, admin-group, and the timer thread. - # - # Required: yes - # Modifiable at runtime: no - # Overridable in a profile: no - daemon = false - - # The event loop group used for I/O operations (reading and writing to Cassandra nodes). - # By default, threads in this group are named after the session name, "-io-" and an incrementing - # counter, for example "s0-io-0". - io-group { - # The number of threads. - # If this is set to 0, the driver will use `Runtime.getRuntime().availableProcessors() * 2`. - # - # Required: yes - # Modifiable at runtime: no - # Overridable in a profile: no - size = 0 - - # The options to shut down the event loop group gracefully when the driver closes. If a task - # gets submitted during the quiet period, it is accepted and the quiet period starts over. - # The timeout limits the overall shutdown time. - # - # Required: yes - # Modifiable at runtime: no - # Overridable in a profile: no - shutdown {quiet-period = 2, timeout = 15, unit = SECONDS} - } - # The event loop group used for admin tasks not related to request I/O (handle cluster events, - # refresh metadata, schedule reconnections, etc.) - # By default, threads in this group are named after the session name, "-admin-" and an - # incrementing counter, for example "s0-admin-0". - admin-group { - size = 2 - - shutdown {quiet-period = 2, timeout = 15, unit = SECONDS} - } - # The timer used for scheduling request timeouts and speculative executions - # By default, this thread is named after the session name and "-timer-0", for example - # "s0-timer-0". - timer { - # The timer tick duration. - # This is how frequent the timer should wake up to check for timed-out tasks or speculative - # executions. Lower resolution (i.e. longer durations) will leave more CPU cycles for running - # I/O operations at the cost of precision of exactly when a request timeout will expire or a - # speculative execution will run. Higher resolution (i.e. shorter durations) will result in - # more precise request timeouts and speculative execution scheduling, but at the cost of CPU - # cycles taken from I/O operations, which could lead to lower overall I/O throughput. - # - # The default value is 100 milliseconds, which is a comfortable value for most use cases. - # However if you are using more agressive timeouts or speculative execution delays, then you - # should lower the timer tick duration as well, so that its value is always equal to or lesser - # than the timeout duration and/or speculative execution delay you intend to use. - # - # Note for Windows users: avoid setting this to aggressive values, that is, anything under 100 - # milliseconds; doing so is known to cause extreme CPU usage. Also, the tick duration must be - # a multiple of 10 under Windows; if that is not the case, it will be automatically rounded - # down to the nearest multiple of 10 (e.g. 99 milliseconds will be rounded down to 90 - # milliseconds). - # - # Required: yes - # Modifiable at runtime: no - # Overridable in a profile: no - tick-duration = 100 milliseconds - - # Number of ticks in a Timer wheel. The underlying implementation uses Netty's - # HashedWheelTimer, which uses hashes to arrange the timeouts. This effectively controls the - # size of the timer wheel. - # - # Required: yes - # Modifiable at runtime: no - # Overridable in a profile: no - ticks-per-wheel = 2048 - } - } - - # The component that coalesces writes on the connections. - # This is exposed mainly to facilitate tuning during development. You shouldn't have to adjust - # this. - advanced.coalescer { - # The reschedule interval. - # - # Required: yes - # Modifiable at runtime: no - # Overridable in a profile: no - reschedule-interval = 10 microseconds - } - - profiles { - # This is where your custom profiles go, for example: - # olap { - # basic.request.timeout = 5 seconds - # } - - # An example configuration profile for graph requests. - // my-graph-profile-example { - // graph { - // read-consistency-level = LOCAL_QUORUM - // write-consistency-level = LOCAL_ONE - // } - // } - - # An example pre-defined configuration profile for OLAP graph queries. - // graph-olap { - // graph { - // traversal-source = "a" // traversal source needs to be set to "a" for OLAP queries. - // } - // } - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/Assertions.java b/core/src/test/java/com/datastax/dse/driver/Assertions.java deleted file mode 100644 index 09f7b281f84..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/Assertions.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver; - -import org.apache.tinkerpop.gremlin.structure.io.Buffer; - -public class Assertions extends org.assertj.core.api.Assertions { - public static TinkerpopBufferAssert assertThat(Buffer actual) { - return new TinkerpopBufferAssert(actual); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/DriverRunListener.java b/core/src/test/java/com/datastax/dse/driver/DriverRunListener.java deleted file mode 100644 index 65e58878dbc..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/DriverRunListener.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver; - -import static org.assertj.core.api.Assertions.fail; - -import org.junit.runner.Description; -import org.junit.runner.notification.RunListener; - -/** - * Common parent of all driver tests, to store common configuration and perform sanity checks. - * - * @see "maven-surefire-plugin configuration in pom.xml" - */ -public class DriverRunListener extends RunListener { - - @Override - public void testFinished(Description description) throws Exception { - // If a test interrupted the main thread silently, this can make later tests fail. Instead, we - // fail the test and clear the interrupt status. - // Note: Thread.interrupted() also clears the flag, which is what we want. - if (Thread.interrupted()) { - fail(description.getMethodName() + " interrupted the main thread"); - } - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/DseTestDataProviders.java b/core/src/test/java/com/datastax/dse/driver/DseTestDataProviders.java deleted file mode 100644 index 7d9aecc28ed..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/DseTestDataProviders.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver; - -import static com.datastax.dse.driver.internal.core.graph.GraphProtocol.GRAPHSON_1_0; -import static com.datastax.dse.driver.internal.core.graph.GraphProtocol.GRAPHSON_2_0; -import static com.datastax.dse.driver.internal.core.graph.GraphProtocol.GRAPH_BINARY_1_0; - -import com.datastax.dse.driver.api.core.DseProtocolVersion; -import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; -import com.datastax.oss.driver.TestDataProviders; -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.tngtech.java.junit.dataprovider.DataProvider; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Arrays; -import java.util.stream.Stream; - -public class DseTestDataProviders { - - private static final ScriptGraphStatement UNDEFINED_IDEMPOTENCE_STATEMENT = - ScriptGraphStatement.newInstance("undefined idempotence"); - private static final ScriptGraphStatement IDEMPOTENT_STATEMENT = - ScriptGraphStatement.builder("idempotent").setIdempotence(true).build(); - private static final ScriptGraphStatement NON_IDEMPOTENT_STATEMENT = - ScriptGraphStatement.builder("non idempotent").setIdempotence(false).build(); - - @DataProvider - public static Object[][] allDseProtocolVersions() { - return concat(DseProtocolVersion.values()); - } - - @DataProvider - public static Object[][] allOssProtocolVersions() { - return concat(DefaultProtocolVersion.values()); - } - - @DataProvider - public static Object[][] allDseAndOssProtocolVersions() { - return concat(DefaultProtocolVersion.values(), DseProtocolVersion.values()); - } - - @DataProvider - public static Object[][] supportedGraphProtocols() { - return new Object[][] {{GRAPHSON_1_0}, {GRAPHSON_2_0}, {GRAPH_BINARY_1_0}}; - } - - /** - * The combination of the default idempotence option and statement setting that produce an - * idempotent statement. - */ - @DataProvider - public static Object[][] idempotentGraphConfig() { - return new Object[][] { - new Object[] {true, UNDEFINED_IDEMPOTENCE_STATEMENT}, - new Object[] {false, IDEMPOTENT_STATEMENT}, - new Object[] {true, IDEMPOTENT_STATEMENT}, - }; - } - - /** - * The combination of the default idempotence option and statement setting that produce a non - * idempotent statement. - */ - @DataProvider - public static Object[][] nonIdempotentGraphConfig() { - return new Object[][] { - new Object[] {false, UNDEFINED_IDEMPOTENCE_STATEMENT}, - new Object[] {true, NON_IDEMPOTENT_STATEMENT}, - new Object[] {false, NON_IDEMPOTENT_STATEMENT}, - }; - } - - @DataProvider - public static Object[][] allDseProtocolVersionsAndSupportedGraphProtocols() { - return TestDataProviders.combine(allDseProtocolVersions(), supportedGraphProtocols()); - } - - @NonNull - private static Object[][] concat(Object[]... values) { - return Stream.of(values) - .flatMap(Arrays::stream) - .map(o -> new Object[] {o}) - .toArray(Object[][]::new); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/DseTestFixtures.java b/core/src/test/java/com/datastax/dse/driver/DseTestFixtures.java deleted file mode 100644 index 7992dde4fea..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/DseTestFixtures.java +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver; - -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.dse.driver.api.core.metadata.DseNodeProperties; -import com.datastax.dse.protocol.internal.response.result.DseRowsMetadata; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.metadata.Metadata; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import com.datastax.oss.driver.internal.core.metadata.MetadataManager; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.response.result.ColumnSpec; -import com.datastax.oss.protocol.internal.response.result.DefaultRows; -import com.datastax.oss.protocol.internal.response.result.RawType; -import com.datastax.oss.protocol.internal.response.result.Rows; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.nio.ByteBuffer; -import java.util.ArrayDeque; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Queue; -import java.util.UUID; - -public class DseTestFixtures { - - // Returns a single row, with a single "message" column with the value "hello, world" - public static Rows singleDseRow() { - DseRowsMetadata metadata = - new DseRowsMetadata( - ImmutableList.of( - new ColumnSpec( - "ks", - "table", - "message", - 0, - RawType.PRIMITIVES.get(ProtocolConstants.DataType.VARCHAR))), - null, - new int[] {}, - null, - 1, - true); - Queue> data = new ArrayDeque<>(); - data.add(ImmutableList.of(Bytes.fromHexString("0x68656C6C6F2C20776F726C64"))); - return new DefaultRows(metadata, data); - } - - // Returns 10 rows, each with a single "message" column with the value "hello, world" - public static Rows tenDseRows(int page, boolean last) { - DseRowsMetadata metadata = - new DseRowsMetadata( - ImmutableList.of( - new ColumnSpec( - "ks", - "table", - "message", - 0, - RawType.PRIMITIVES.get(ProtocolConstants.DataType.VARCHAR))), - last ? null : ByteBuffer.wrap(new byte[] {(byte) page}), - new int[] {}, - null, - page, - last); - Queue> data = new ArrayDeque<>(); - for (int i = 0; i < 10; i++) { - data.add(ImmutableList.of(Bytes.fromHexString("0x68656C6C6F2C20776F726C64"))); - } - return new DefaultRows(metadata, data); - } - - public static DefaultDriverContext mockNodesInMetadataWithVersions( - DefaultDriverContext mockContext, boolean treatNullAsMissing, Version... dseVersions) { - - // mock bits of the context - MetadataManager metadataManager = mock(MetadataManager.class); - Metadata metadata = mock(Metadata.class); - Map nodeMap = new HashMap<>((dseVersions != null) ? dseVersions.length : 1); - if (dseVersions == null) { - Node node = mock(Node.class); - Map nodeExtras = new HashMap<>(1); - if (!treatNullAsMissing) { - // put an explicit null in for DSE_VERSION - nodeExtras.put(DseNodeProperties.DSE_VERSION, null); - } - nodeMap.put(UUID.randomUUID(), node); - when(node.getExtras()).thenReturn(nodeExtras); - } else { - for (Version dseVersion : dseVersions) { - // create a node with DSE version in its extra data - Node node = mock(Node.class); - Map nodeExtras = new HashMap<>(1); - if (dseVersion != null || !treatNullAsMissing) { - nodeExtras.put(DseNodeProperties.DSE_VERSION, dseVersion); - } - nodeMap.put(UUID.randomUUID(), node); - when(node.getExtras()).thenReturn(nodeExtras); - } - } - // return mocked data when requested - when(metadata.getNodes()).thenReturn(nodeMap); - when(metadataManager.getMetadata()).thenReturn(metadata); - when(mockContext.getMetadataManager()).thenReturn(metadataManager); - return mockContext; - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/TinkerpopBufferAssert.java b/core/src/test/java/com/datastax/dse/driver/TinkerpopBufferAssert.java deleted file mode 100644 index 278e5a65070..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/TinkerpopBufferAssert.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.protocol.internal.util.Bytes; -import org.apache.tinkerpop.gremlin.structure.io.Buffer; -import org.assertj.core.api.AbstractAssert; - -public class TinkerpopBufferAssert extends AbstractAssert { - public TinkerpopBufferAssert(Buffer actual) { - super(actual, TinkerpopBufferAssert.class); - } - - public TinkerpopBufferAssert containsExactly(String hexString) { - - byte[] expectedBytes = Bytes.fromHexString(hexString).array(); - byte[] actualBytes = new byte[expectedBytes.length]; - actual.readBytes(actualBytes); - assertThat(actualBytes).containsExactly(expectedBytes); - assertThat(actual.readableBytes()).isEqualTo(0); - return this; - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/api/core/data/time/DateRangePrecisionTest.java b/core/src/test/java/com/datastax/dse/driver/api/core/data/time/DateRangePrecisionTest.java deleted file mode 100644 index 4cf8d43b748..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/api/core/data/time/DateRangePrecisionTest.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.data.time; - -import static org.assertj.core.api.Assertions.assertThat; - -import java.time.ZonedDateTime; -import org.junit.Test; - -public class DateRangePrecisionTest { - - @Test - public void should_round_up() { - ZonedDateTime timestamp = ZonedDateTime.parse("2011-02-03T04:05:16.789Z"); - assertThat(DateRangePrecision.MILLISECOND.roundUp(timestamp)) - .isEqualTo("2011-02-03T04:05:16.789Z"); - assertThat(DateRangePrecision.SECOND.roundUp(timestamp)).isEqualTo("2011-02-03T04:05:16.999Z"); - assertThat(DateRangePrecision.MINUTE.roundUp(timestamp)).isEqualTo("2011-02-03T04:05:59.999Z"); - assertThat(DateRangePrecision.HOUR.roundUp(timestamp)).isEqualTo("2011-02-03T04:59:59.999Z"); - assertThat(DateRangePrecision.DAY.roundUp(timestamp)).isEqualTo("2011-02-03T23:59:59.999Z"); - assertThat(DateRangePrecision.MONTH.roundUp(timestamp)).isEqualTo("2011-02-28T23:59:59.999Z"); - assertThat(DateRangePrecision.YEAR.roundUp(timestamp)).isEqualTo("2011-12-31T23:59:59.999Z"); - } - - @Test - public void should_round_down() { - ZonedDateTime timestamp = ZonedDateTime.parse("2011-02-03T04:05:16.789Z"); - assertThat(DateRangePrecision.MILLISECOND.roundDown(timestamp)) - .isEqualTo("2011-02-03T04:05:16.789Z"); - assertThat(DateRangePrecision.SECOND.roundDown(timestamp)) - .isEqualTo("2011-02-03T04:05:16.000Z"); - assertThat(DateRangePrecision.MINUTE.roundDown(timestamp)) - .isEqualTo("2011-02-03T04:05:00.000Z"); - assertThat(DateRangePrecision.HOUR.roundDown(timestamp)).isEqualTo("2011-02-03T04:00:00.000Z"); - assertThat(DateRangePrecision.DAY.roundDown(timestamp)).isEqualTo("2011-02-03T00:00:00.000Z"); - assertThat(DateRangePrecision.MONTH.roundDown(timestamp)).isEqualTo("2011-02-01T00:00:00.000Z"); - assertThat(DateRangePrecision.YEAR.roundDown(timestamp)).isEqualTo("2011-01-01T00:00:00.000Z"); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/api/core/data/time/DateRangeTest.java b/core/src/test/java/com/datastax/dse/driver/api/core/data/time/DateRangeTest.java deleted file mode 100644 index b067c12cad0..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/api/core/data/time/DateRangeTest.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.data.time; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; - -import com.datastax.oss.driver.internal.SerializationHelper; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.text.ParseException; -import java.time.temporal.ChronoField; -import java.util.function.Predicate; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class DateRangeTest { - - @Test - @UseDataProvider("rangeStrings") - public void should_parse_and_format(String source) throws Exception { - DateRange parsed = DateRange.parse(source); - assertThat(parsed.toString()).isEqualTo(source); - } - - @DataProvider - public static Object[][] rangeStrings() { - return new Object[][] { - {"[2011-01 TO 2015]"}, - {"[2010-01-02 TO 2015-05-05T13]"}, - {"[1973-06-30T13:57:28.123Z TO 1999-05-05T14:14:59]"}, - // leap year - {"[2010-01-01T15 TO 2016-02]"}, - // pre-epoch - {"[1500 TO 1501]"}, - {"[0001 TO 0001-01-02]"}, - {"[0000 TO 0000-01-02]"}, - {"[-0001 TO -0001-01-02]"}, - // unbounded - {"[* TO 2014-12-01]"}, - {"[1999 TO *]"}, - {"[* TO *]"}, - // single bound ranges - // AD/BC era boundary - {"0001-01-01"}, - {"-0001-01-01"}, - {"-0009"}, - {"2000-11"}, - {"*"} - }; - } - - @Test - public void should_use_proleptic_parser() throws Exception { - DateRange parsed = DateRange.parse("[0000 TO 0000-01-02]"); - assertThat(parsed.getLowerBound().getTimestamp().get(ChronoField.YEAR)).isEqualTo(0); - } - - @Test - public void should_fail_to_parse_invalid_strings() { - assertThatThrownBy(() -> DateRange.parse("foo")).matches(hasOffset(0)); - assertThatThrownBy(() -> DateRange.parse("[foo TO *]")).matches(hasOffset(1)); - assertThatThrownBy(() -> DateRange.parse("[* TO foo]")).matches(hasOffset(6)); - } - - private static Predicate hasOffset(int offset) { - return e -> ((ParseException) e).getErrorOffset() == offset; - } - - @Test - public void should_fail_to_parse_inverted_range() { - assertThatThrownBy(() -> DateRange.parse("[2001-01 TO 2000]")) - .hasMessage( - "Lower bound of a date range should be before upper bound, got: [2001-01 TO 2000]"); - } - - @Test - public void should_not_equate_single_date_open_to_both_open_range() throws Exception { - assertThat(DateRange.parse("*")).isNotEqualTo(DateRange.parse("[* TO *]")); - } - - @Test - public void should_not_equate_same_ranges_with_different_precisions() throws ParseException { - assertThat(DateRange.parse("[2001 TO 2002]")) - .isNotEqualTo(DateRange.parse("[2001-01 TO 2002-12]")); - } - - @Test - public void should_give_same_hashcode_to_equal_objects() throws ParseException { - assertThat(DateRange.parse("[2001 TO 2002]").hashCode()) - .isEqualTo(DateRange.parse("[2001 TO 2002]").hashCode()); - } - - @Test - public void should_serialize_and_deserialize() throws Exception { - DateRange initial = DateRange.parse("[1973-06-30T13:57:28.123Z TO 1999-05-05T14:14:59]"); - DateRange deserialized = SerializationHelper.serializeAndDeserialize(initial); - assertThat(deserialized).isEqualTo(initial); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/api/core/graph/predicates/CqlCollectionTest.java b/core/src/test/java/com/datastax/dse/driver/api/core/graph/predicates/CqlCollectionTest.java deleted file mode 100644 index a890720a3ef..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/api/core/graph/predicates/CqlCollectionTest.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph.predicates; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; - -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import com.datastax.oss.driver.shaded.guava.common.collect.Sets; -import java.util.ArrayList; -import java.util.Collection; -import java.util.HashMap; -import java.util.HashSet; -import java.util.LinkedHashMap; -import java.util.Map; -import org.apache.tinkerpop.gremlin.process.traversal.P; -import org.junit.Test; - -public class CqlCollectionTest { - - @Test - public void should_evaluate_contains() { - P> contains = CqlCollection.contains("foo"); - assertThat(contains.test(new HashSet<>())).isFalse(); - assertThat(contains.test(new ArrayList<>())).isFalse(); - assertThat(contains.test(Sets.newHashSet("foo"))).isTrue(); - assertThat(contains.test(Lists.newArrayList("foo"))).isTrue(); - assertThat(contains.test(Sets.newHashSet("bar"))).isFalse(); - assertThat(contains.test(Lists.newArrayList("bar"))).isFalse(); - assertThatThrownBy(() -> contains.test(null)).isInstanceOf(IllegalArgumentException.class); - assertThatThrownBy(() -> CqlCollection.contains(null).test(Sets.newHashSet("foo"))) - .isInstanceOf(IllegalArgumentException.class); - } - - @Test - public void should_evaluate_containsKey() { - P> containsKey = CqlCollection.containsKey("foo"); - assertThat(containsKey.test(new HashMap<>())).isFalse(); - assertThat(containsKey.test(new LinkedHashMap<>())).isFalse(); - assertThat(containsKey.test(ImmutableMap.of("foo", "bar"))).isTrue(); - assertThat(containsKey.test(ImmutableMap.of("bar", "foo"))).isFalse(); - assertThatThrownBy(() -> containsKey.test(null)).isInstanceOf(IllegalArgumentException.class); - assertThatThrownBy(() -> CqlCollection.containsKey(null).test(ImmutableMap.of("foo", "bar"))) - .isInstanceOf(IllegalArgumentException.class); - } - - @Test - public void should_evaluate_containsValue() { - P> containsValue = CqlCollection.containsValue("foo"); - assertThat(containsValue.test(new HashMap<>())).isFalse(); - assertThat(containsValue.test(new LinkedHashMap<>())).isFalse(); - assertThat(containsValue.test(ImmutableMap.of("bar", "foo"))).isTrue(); - assertThat(containsValue.test(ImmutableMap.of("foo", "bar"))).isFalse(); - assertThatThrownBy(() -> containsValue.test(null)).isInstanceOf(IllegalArgumentException.class); - assertThatThrownBy(() -> CqlCollection.containsValue(null).test(ImmutableMap.of("foo", "bar"))) - .isInstanceOf(IllegalArgumentException.class); - } - - @Test - public void should_evaluate_entryEq() { - P> entryEq = CqlCollection.entryEq("foo", "bar"); - assertThat(entryEq.test(new HashMap<>())).isFalse(); - assertThat(entryEq.test(new LinkedHashMap<>())).isFalse(); - assertThat(entryEq.test(ImmutableMap.of("foo", "bar"))).isTrue(); - assertThat(entryEq.test(ImmutableMap.of("bar", "foo"))).isFalse(); - assertThatThrownBy(() -> entryEq.test(null)).isInstanceOf(IllegalArgumentException.class); - assertThatThrownBy(() -> CqlCollection.entryEq(null, "foo").test(ImmutableMap.of("foo", "bar"))) - .isInstanceOf(IllegalArgumentException.class); - assertThatThrownBy(() -> CqlCollection.entryEq("foo", null).test(ImmutableMap.of("foo", "bar"))) - .isInstanceOf(IllegalArgumentException.class); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/api/core/graph/predicates/GeoTest.java b/core/src/test/java/com/datastax/dse/driver/api/core/graph/predicates/GeoTest.java deleted file mode 100644 index 143aec97b78..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/api/core/graph/predicates/GeoTest.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph.predicates; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.dse.driver.api.core.data.geometry.LineString; -import com.datastax.dse.driver.api.core.data.geometry.Point; -import com.datastax.dse.driver.api.core.data.geometry.Polygon; -import org.apache.tinkerpop.gremlin.process.traversal.P; -import org.junit.Test; - -public class GeoTest { - - @Test - public void should_convert_units_to_degrees() { - assertThat(Geo.Unit.DEGREES.toDegrees(100.0)).isEqualTo(100.0); - assertThat(Geo.Unit.MILES.toDegrees(68.9722)).isEqualTo(0.9982455747535043); - assertThat(Geo.Unit.KILOMETERS.toDegrees(111.0)).isEqualTo(0.9982456082154465); - assertThat(Geo.Unit.METERS.toDegrees(111000.0)).isEqualTo(0.9982456082154464); - } - - @Test - public void should_test_if_point_is_inside_circle_with_cartesian_coordinates() { - P inside = Geo.inside(Point.fromCoordinates(30, 30), 14.142135623730951); - assertThat(inside.test(Point.fromCoordinates(40, 40))).isTrue(); - assertThat(inside.test(Point.fromCoordinates(40.1, 40))).isFalse(); - } - - @Test - public void should_test_if_point_is_inside_circle_with_geo_coordinates() { - P inside = - Geo.inside(Point.fromCoordinates(30, 30), 12.908258700131379, Geo.Unit.DEGREES); - assertThat(inside.test(Point.fromCoordinates(40, 40))).isTrue(); - assertThat(inside.test(Point.fromCoordinates(40.1, 40))).isFalse(); - } - - @Test - public void should_test_if_point_is_inside_polygon() { - P inside = - Geo.inside( - Polygon.builder() - .addRing( - Point.fromCoordinates(30, 30), - Point.fromCoordinates(40, 40), - Point.fromCoordinates(40, 30)) - .build()); - assertThat(inside.test(Point.fromCoordinates(35, 32))).isTrue(); - assertThat(inside.test(Point.fromCoordinates(33, 37))).isFalse(); - } - - @Test - public void should_build_line_string_from_coordinates() { - LineString lineString = Geo.lineString(1, 2, 3, 4, 5, 6); - assertThat(lineString.getPoints()) - .hasSize(3) - .contains(Point.fromCoordinates(1, 2)) - .contains(Point.fromCoordinates(3, 4)) - .contains(Point.fromCoordinates(5, 6)); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_build_line_string_if_not_enough_coordinates() { - Geo.lineString(1, 2); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_build_line_string_if_uneven_number_of_coordinates() { - Geo.lineString(1, 2, 3, 4, 5); - } - - @Test - public void should_build_polygon_from_coordinates() { - Polygon polygon = Geo.polygon(1, 2, 3, 4, 5, 6, 7, 8); - assertThat(polygon.getExteriorRing()) - .hasSize(4) - .contains(Point.fromCoordinates(1, 2)) - .contains(Point.fromCoordinates(3, 4)) - .contains(Point.fromCoordinates(5, 6)) - .contains(Point.fromCoordinates(7, 8)); - assertThat(polygon.getInteriorRings()).isEmpty(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_build_polygon_if_not_enough_coordinates() { - Geo.polygon(1, 2, 3, 4); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_build_polygon_if_uneven_number_of_coordinates() { - Geo.polygon(1, 2, 3, 4, 5, 6, 7); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/api/core/graph/predicates/SearchTest.java b/core/src/test/java/com/datastax/dse/driver/api/core/graph/predicates/SearchTest.java deleted file mode 100644 index 591269e31ad..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/api/core/graph/predicates/SearchTest.java +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph.predicates; - -import static org.assertj.core.api.Assertions.assertThat; - -import org.apache.tinkerpop.gremlin.process.traversal.P; -import org.junit.Test; - -public class SearchTest { - - @Test - public void testToken() { - P p = Search.token("needle"); - assertThat(p.test("needle")).isTrue(); - assertThat(p.test("This is a needle in a haystack")).isTrue(); - assertThat(p.test("This is just the haystack")).isFalse(); - } - - @Test - public void testPrefix() { - P p = Search.prefix("abcd"); - assertThat(p.test("abcd")).isTrue(); - assertThat(p.test("abcdefg hijkl")).isTrue(); - assertThat(p.test("zabcd")).isFalse(); - } - - @Test - public void testTokenPrefix() { - P p = Search.tokenPrefix("abcd"); - assertThat(p.test("abcd")).isTrue(); - assertThat(p.test("abcdefg hijkl")).isTrue(); - assertThat(p.test("z abcd")).isTrue(); - assertThat(p.test("ab cd")).isFalse(); - } - - @Test - public void testRegex() { - P p = Search.regex("(foo|bar)"); - assertThat(p.test("foo")).isTrue(); - assertThat(p.test("bar")).isTrue(); - assertThat(p.test("foo bar")).isFalse(); - } - - @Test - public void testTokenRegex() { - P p = Search.tokenRegex("(foo|bar)"); - assertThat(p.test("foo")).isTrue(); - assertThat(p.test("bar")).isTrue(); - assertThat(p.test("foo bar")).isTrue(); - assertThat(p.test("foo bar qix")).isTrue(); - assertThat(p.test("qix")).isFalse(); - } - - @Test - public void testPhrase() { - P p = Search.phrase("Hello world", 2); - assertThat(p.test("Hello World")).isTrue(); - assertThat(p.test("Hello Big World")).isTrue(); - assertThat(p.test("Hello Big Wild World")).isTrue(); - assertThat(p.test("Hello The Big Wild World")).isFalse(); - assertThat(p.test("Goodbye world")).isFalse(); - } - - @Test - public void testPhraseFragment() { - // Tests JAVA-1744 - P p = Search.phrase("a b", 0); - assertThat(p.test("a b")).isTrue(); - assertThat(p.test("a")).isFalse(); - assertThat(p.test("b")).isFalse(); - } - - @Test - public void testFuzzy() { - P p = Search.fuzzy("abc", 1); - assertThat(p.test("abcd")).isTrue(); - assertThat(p.test("ab")).isTrue(); - assertThat(p.test("abce")).isTrue(); - assertThat(p.test("abdc")).isTrue(); - assertThat(p.test("badc")).isFalse(); - - // Make sure we do NOT calculate the Damerau–Levenshtein distance (2), but the optimal string - // alignment distance (3): - assertThat(Search.tokenFuzzy("ca", 2).test("abc")).isFalse(); - } - - @Test - public void testTokenFuzzy() { - P p = Search.tokenFuzzy("abc", 1); - assertThat(p.test("foo abcd")).isTrue(); - assertThat(p.test("foo ab")).isTrue(); - assertThat(p.test("foo abce")).isTrue(); - assertThat(p.test("foo abdc")).isTrue(); - assertThat(p.test("foo badc")).isFalse(); - - // Make sure we do NOT calculate the Damerau–Levenshtein distance (2), but the optimal string - // alignment distance (3): - assertThat(Search.tokenFuzzy("ca", 2).test("abc 123")).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/DependencyCheckTest.java b/core/src/test/java/com/datastax/dse/driver/internal/DependencyCheckTest.java deleted file mode 100644 index d001f791e82..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/DependencyCheckTest.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal; - -import java.nio.file.Path; -import java.nio.file.Paths; - -public class DependencyCheckTest extends DependencyCheckTestBase { - - @Override - protected Path getDepsTxtPath() { - return Paths.get( - getBaseResourcePathString(), - "target", - "classes", - "com", - "datastax", - "dse", - "driver", - "internal", - "deps.txt"); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/DependencyCheckTestBase.java b/core/src/test/java/com/datastax/dse/driver/internal/DependencyCheckTestBase.java deleted file mode 100644 index f2ce5513d65..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/DependencyCheckTestBase.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal; - -import static org.assertj.core.api.Assertions.assertThat; - -import java.io.IOException; -import java.io.InputStream; -import java.nio.file.Path; -import java.util.Properties; -import org.junit.Test; - -public abstract class DependencyCheckTestBase { - - private String baseResourcePath; - - protected DependencyCheckTestBase() { - Properties projectProperties = new Properties(); - try (InputStream is = this.getClass().getResourceAsStream("/project.properties")) { - projectProperties.load(is); - baseResourcePath = projectProperties.getProperty("project.basedir"); - } catch (IOException ioe) { - throw new AssertionError( - "Error retrieving \"project.basedir\" value from \"/project.properties\". Please check test resources in this project.", - ioe); - } - assert baseResourcePath != null; - } - - @Test - public void should_generate_deps_txt() { - assertThat(getDepsTxtPath()).exists(); - } - - protected final String getBaseResourcePathString() { - return baseResourcePath; - } - - protected abstract Path getDepsTxtPath(); -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/context/DseStartupOptionsBuilderTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/context/DseStartupOptionsBuilderTest.java deleted file mode 100644 index 9e4556e528d..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/context/DseStartupOptionsBuilderTest.java +++ /dev/null @@ -1,196 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.context; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatIllegalArgumentException; -import static org.mockito.Mockito.when; -import static org.mockito.MockitoAnnotations.initMocks; - -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.session.ProgrammaticArguments; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.uuid.Uuids; -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import com.datastax.oss.driver.internal.core.context.StartupOptionsBuilder; -import com.datastax.oss.protocol.internal.request.Startup; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import java.util.UUID; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; - -@RunWith(DataProviderRunner.class) -public class DseStartupOptionsBuilderTest { - - private DefaultDriverContext driverContext; - - // Mocks for instantiating the DSE driver context - @Mock private DriverConfigLoader configLoader; - @Mock private DriverConfig driverConfig; - @Mock private DriverExecutionProfile defaultProfile; - - @Before - public void before() { - initMocks(this); - when(configLoader.getInitialConfig()).thenReturn(driverConfig); - when(driverConfig.getDefaultProfile()).thenReturn(defaultProfile); - when(defaultProfile.isDefined(DseDriverOption.CONTINUOUS_PAGING_PAGE_SIZE)).thenReturn(true); - } - - private void buildContext(UUID clientId, String applicationName, String applicationVersion) { - this.driverContext = - new DefaultDriverContext( - configLoader, - ProgrammaticArguments.builder() - .withStartupClientId(clientId) - .withStartupApplicationName(applicationName) - .withStartupApplicationVersion(applicationVersion) - .build()); - } - - private void assertDefaultStartupOptions(Startup startup) { - assertThat(startup.options).containsEntry(Startup.CQL_VERSION_KEY, "3.0.0"); - assertThat(startup.options) - .containsEntry( - StartupOptionsBuilder.DRIVER_NAME_KEY, Session.OSS_DRIVER_COORDINATES.getName()); - assertThat(startup.options).containsKey(StartupOptionsBuilder.DRIVER_VERSION_KEY); - Version version = Version.parse(startup.options.get(StartupOptionsBuilder.DRIVER_VERSION_KEY)); - assertThat(version).isEqualTo(Session.OSS_DRIVER_COORDINATES.getVersion()); - assertThat(startup.options).containsKey(StartupOptionsBuilder.CLIENT_ID_KEY); - } - - @Test - public void should_build_startup_options_with_no_compression_if_undefined() { - when(defaultProfile.getString(DefaultDriverOption.PROTOCOL_COMPRESSION, "none")) - .thenReturn("none"); - buildContext(null, null, null); - Startup startup = new Startup(driverContext.getStartupOptions()); - assertThat(startup.options).doesNotContainKey(Startup.COMPRESSION_KEY); - assertDefaultStartupOptions(startup); - } - - @Test - @DataProvider({"lz4", "snappy"}) - public void should_build_startup_options_with_compression(String compression) { - when(defaultProfile.getString(DefaultDriverOption.PROTOCOL_COMPRESSION, "none")) - .thenReturn(compression); - buildContext(null, null, null); - Startup startup = new Startup(driverContext.getStartupOptions()); - // assert the compression option is present - assertThat(startup.options).containsEntry(Startup.COMPRESSION_KEY, compression); - assertThat(startup.options).doesNotContainKey(StartupOptionsBuilder.APPLICATION_NAME_KEY); - assertThat(startup.options).doesNotContainKey(StartupOptionsBuilder.APPLICATION_VERSION_KEY); - assertDefaultStartupOptions(startup); - } - - @Test - public void should_fail_to_build_startup_options_with_invalid_compression() { - when(defaultProfile.getString(DefaultDriverOption.PROTOCOL_COMPRESSION, "none")) - .thenReturn("foobar"); - buildContext(null, null, null); - assertThatIllegalArgumentException() - .isThrownBy(() -> new Startup(driverContext.getStartupOptions())); - } - - @Test - public void should_build_startup_options_with_client_id() { - when(defaultProfile.getString(DefaultDriverOption.PROTOCOL_COMPRESSION, "none")) - .thenReturn("none"); - UUID customClientId = Uuids.random(); - buildContext(customClientId, null, null); - Startup startup = new Startup(driverContext.getStartupOptions()); - // assert the client id is present - assertThat(startup.options) - .containsEntry(StartupOptionsBuilder.CLIENT_ID_KEY, customClientId.toString()); - assertThat(startup.options).doesNotContainKey(Startup.COMPRESSION_KEY); - assertThat(startup.options).doesNotContainKey(StartupOptionsBuilder.APPLICATION_NAME_KEY); - assertThat(startup.options).doesNotContainKey(StartupOptionsBuilder.APPLICATION_VERSION_KEY); - assertDefaultStartupOptions(startup); - } - - @Test - public void should_build_startup_options_with_application_version_and_name() { - when(defaultProfile.getString(DefaultDriverOption.PROTOCOL_COMPRESSION, "none")) - .thenReturn("none"); - buildContext(null, "Custom_App_Name", "Custom_App_Version"); - Startup startup = new Startup(driverContext.getStartupOptions()); - // assert the app name and version are present - assertThat(startup.options) - .containsEntry(StartupOptionsBuilder.APPLICATION_NAME_KEY, "Custom_App_Name"); - assertThat(startup.options) - .containsEntry(StartupOptionsBuilder.APPLICATION_VERSION_KEY, "Custom_App_Version"); - assertThat(startup.options).doesNotContainKey(Startup.COMPRESSION_KEY); - assertDefaultStartupOptions(startup); - } - - @Test - public void should_build_startup_options_with_all_options() { - // mock config to specify "snappy" compression - when(defaultProfile.getString(DefaultDriverOption.PROTOCOL_COMPRESSION, "none")) - .thenReturn("snappy"); - - UUID customClientId = Uuids.random(); - - buildContext(customClientId, "Custom_App_Name", "Custom_App_Version"); - Startup startup = new Startup(driverContext.getStartupOptions()); - assertThat(startup.options) - .containsEntry(StartupOptionsBuilder.CLIENT_ID_KEY, customClientId.toString()) - .containsEntry(StartupOptionsBuilder.APPLICATION_NAME_KEY, "Custom_App_Name") - .containsEntry(StartupOptionsBuilder.APPLICATION_VERSION_KEY, "Custom_App_Version"); - assertThat(startup.options).containsEntry(Startup.COMPRESSION_KEY, "snappy"); - assertDefaultStartupOptions(startup); - } - - @Test - public void should_use_configuration_when_no_programmatic_values_provided() { - when(defaultProfile.getString(DseDriverOption.APPLICATION_NAME, null)) - .thenReturn("Config_App_Name"); - when(defaultProfile.getString(DseDriverOption.APPLICATION_VERSION, null)) - .thenReturn("Config_App_Version"); - when(defaultProfile.getString(DefaultDriverOption.PROTOCOL_COMPRESSION, "none")) - .thenReturn("none"); - - buildContext(null, null, null); - Startup startup = new Startup(driverContext.getStartupOptions()); - - assertThat(startup.options) - .containsEntry(StartupOptionsBuilder.APPLICATION_NAME_KEY, "Config_App_Name") - .containsEntry(StartupOptionsBuilder.APPLICATION_VERSION_KEY, "Config_App_Version"); - } - - @Test - public void should_ignore_configuration_when_programmatic_values_provided() { - when(defaultProfile.getString(DefaultDriverOption.PROTOCOL_COMPRESSION, "none")) - .thenReturn("none"); - - buildContext(null, "Custom_App_Name", "Custom_App_Version"); - Startup startup = new Startup(driverContext.getStartupOptions()); - - assertThat(startup.options) - .containsEntry(StartupOptionsBuilder.APPLICATION_NAME_KEY, "Custom_App_Name") - .containsEntry(StartupOptionsBuilder.APPLICATION_VERSION_KEY, "Custom_App_Version"); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandlerNodeTargetingTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandlerNodeTargetingTest.java deleted file mode 100644 index 1edb7c183bf..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandlerNodeTargetingTest.java +++ /dev/null @@ -1,182 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.continuous; - -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.Mockito.never; - -import com.datastax.dse.driver.DseTestDataProviders; -import com.datastax.dse.driver.DseTestFixtures; -import com.datastax.dse.driver.api.core.DseProtocolVersion; -import com.datastax.dse.driver.api.core.cql.continuous.ContinuousAsyncResultSet; -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.NodeUnavailableException; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.internal.core.cql.RequestHandlerTestHarness; -import com.datastax.oss.driver.internal.core.metadata.LoadBalancingPolicyWrapper; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.List; -import java.util.Map; -import java.util.concurrent.CompletionStage; -import org.junit.Test; -import org.mockito.InOrder; -import org.mockito.Mockito; - -public class ContinuousCqlRequestHandlerNodeTargetingTest - extends ContinuousCqlRequestHandlerTestBase { - - @Test - @UseDataProvider(value = "allDseProtocolVersions", location = DseTestDataProviders.class) - public void should_fail_if_targeted_node_not_available(DseProtocolVersion version) { - try (RequestHandlerTestHarness harness = - continuousHarnessBuilder() - .withResponse(node1, defaultFrameOf(DseTestFixtures.singleDseRow())) - .withResponse(node2, defaultFrameOf(DseTestFixtures.singleDseRow())) - .withEmptyPool(node3) - .withProtocolVersion(version) - .build()) { - - LoadBalancingPolicyWrapper loadBalancingPolicy = - harness.getContext().getLoadBalancingPolicyWrapper(); - InOrder invocations = Mockito.inOrder(loadBalancingPolicy); - - // target node3, which should be unavailable - CompletionStage resultSetFuture = - new ContinuousCqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT.setNode(node3), - harness.getSession(), - harness.getContext(), - "target node 3, unavailable") - .handle(); - - assertThatStage(resultSetFuture) - .isFailed( - error -> { - assertThat(error).isInstanceOf(AllNodesFailedException.class); - Map> errors = - ((AllNodesFailedException) error).getAllErrors(); - assertThat(errors).hasSize(1); - List nodeErrors = errors.values().iterator().next(); - assertThat(nodeErrors).singleElement().isInstanceOf(NodeUnavailableException.class); - invocations - .verify(loadBalancingPolicy, never()) - .newQueryPlan(any(Request.class), anyString(), any(Session.class)); - }); - - resultSetFuture = - new ContinuousCqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, - harness.getSession(), - harness.getContext(), - "no node targeting, should use node 1") - .handle(); - - assertThatStage(resultSetFuture) - .isSuccess( - resultSet -> { - assertThat(resultSet.getExecutionInfo().getCoordinator()).isEqualTo(node1); - invocations - .verify(loadBalancingPolicy) - .newQueryPlan( - UNDEFINED_IDEMPOTENCE_STATEMENT, - DriverExecutionProfile.DEFAULT_NAME, - harness.getSession()); - }); - - resultSetFuture = - new ContinuousCqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, - harness.getSession(), - harness.getContext(), - "no node targeting, should use node 2") - .handle(); - - assertThatStage(resultSetFuture) - .isSuccess( - resultSet -> { - assertThat(resultSet.getExecutionInfo().getCoordinator()).isEqualTo(node2); - invocations - .verify(loadBalancingPolicy) - .newQueryPlan( - UNDEFINED_IDEMPOTENCE_STATEMENT, - DriverExecutionProfile.DEFAULT_NAME, - harness.getSession()); - }); - } - } - - @Test - @UseDataProvider(value = "allDseProtocolVersions", location = DseTestDataProviders.class) - public void should_target_node(DseProtocolVersion version) { - try (RequestHandlerTestHarness harness = - continuousHarnessBuilder() - .withResponse(node1, defaultFrameOf(DseTestFixtures.singleDseRow())) - .withResponse(node2, defaultFrameOf(DseTestFixtures.singleDseRow())) - .withResponse(node3, defaultFrameOf(DseTestFixtures.singleDseRow())) - .withProtocolVersion(version) - .build()) { - - LoadBalancingPolicyWrapper loadBalancingPolicy = - harness.getContext().getLoadBalancingPolicyWrapper(); - InOrder invocations = Mockito.inOrder(loadBalancingPolicy); - - CompletionStage resultSetFuture = - new ContinuousCqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT.setNode(node3), - harness.getSession(), - harness.getContext(), - "target node 3") - .handle(); - - assertThatStage(resultSetFuture) - .isSuccess( - resultSet -> { - assertThat(resultSet.getExecutionInfo().getCoordinator()).isEqualTo(node3); - invocations - .verify(loadBalancingPolicy, never()) - .newQueryPlan(any(Request.class), anyString(), any(Session.class)); - }); - - resultSetFuture = - new ContinuousCqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, - harness.getSession(), - harness.getContext(), - "no node targeting") - .handle(); - - assertThatStage(resultSetFuture) - .isSuccess( - resultSet -> { - assertThat(resultSet.getExecutionInfo().getCoordinator()).isEqualTo(node1); - invocations - .verify(loadBalancingPolicy) - .newQueryPlan( - UNDEFINED_IDEMPOTENCE_STATEMENT, - DriverExecutionProfile.DEFAULT_NAME, - harness.getSession()); - }); - } - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandlerReprepareTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandlerReprepareTest.java deleted file mode 100644 index fd8d0ea1f98..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandlerReprepareTest.java +++ /dev/null @@ -1,197 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.continuous; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static com.datastax.oss.protocol.internal.Frame.NO_PAYLOAD; -import static org.assertj.core.api.Assertions.catchThrowable; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyBoolean; -import static org.mockito.ArgumentMatchers.anyMap; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.dse.driver.DseTestDataProviders; -import com.datastax.dse.driver.DseTestFixtures; -import com.datastax.dse.driver.api.core.DseProtocolVersion; -import com.datastax.dse.driver.api.core.cql.continuous.ContinuousAsyncResultSet; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.servererrors.SyntaxError; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRequestHandler; -import com.datastax.oss.driver.internal.core.adminrequest.UnexpectedResponseException; -import com.datastax.oss.driver.internal.core.cql.RequestHandlerTestHarness; -import com.datastax.oss.driver.internal.core.session.RepreparePayload; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.ProtocolConstants.ErrorCode; -import com.datastax.oss.protocol.internal.request.Prepare; -import com.datastax.oss.protocol.internal.request.Query; -import com.datastax.oss.protocol.internal.response.Error; -import com.datastax.oss.protocol.internal.response.error.Unprepared; -import com.datastax.oss.protocol.internal.response.result.Prepared; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import io.netty.util.concurrent.Future; -import java.nio.ByteBuffer; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import org.junit.Test; -import org.mockito.Mock; - -public class ContinuousCqlRequestHandlerReprepareTest extends ContinuousCqlRequestHandlerTestBase { - - private final byte[] preparedId = {1, 2, 3}; - private final ByteBuffer preparedIdBuf = ByteBuffer.wrap(preparedId); - - private final RepreparePayload repreparePayload = - new RepreparePayload(preparedIdBuf, "irrelevant", CqlIdentifier.fromCql("ks"), NO_PAYLOAD); - - private final ConcurrentMap repreparePayloads = - new ConcurrentHashMap<>(ImmutableMap.of(preparedIdBuf, repreparePayload)); - - private final Unprepared unprepared = new Unprepared("test", preparedId); - private final Prepared prepared = new Prepared(preparedId, null, null, null); - private final Error unrecoverable = - new Error(ProtocolConstants.ErrorCode.SYNTAX_ERROR, "bad query"); - private final Error recoverable = new Error(ErrorCode.SERVER_ERROR, "sorry"); - - @Mock private Future future; - - @Override - public void setup() { - super.setup(); - when(future.isSuccess()).thenReturn(true); - } - - @Test - @UseDataProvider(value = "allDseProtocolVersions", location = DseTestDataProviders.class) - public void should_prepare_and_retry_on_same_node(DseProtocolVersion version) { - - try (RequestHandlerTestHarness harness = - continuousHarnessBuilder() - .withResponse(node1, defaultFrameOf(unprepared)) - .withProtocolVersion(version) - .build()) { - - when(harness.getSession().getRepreparePayloads()).thenReturn(repreparePayloads); - when(harness.getChannel(node1).write(any(Prepare.class), anyBoolean(), anyMap(), any())) - .then( - invocation -> { - AdminRequestHandler admin = invocation.getArgument(3); - admin.onResponse(defaultFrameOf(prepared)); - return future; - }); - - new ContinuousCqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, harness.getSession(), harness.getContext(), "test") - .handle(); - - verify(harness.getChannel(node1)).write(any(Prepare.class), anyBoolean(), anyMap(), any()); - // should have attempted to execute the query twice on the same node - verify(harness.getChannel(node1), times(2)) - .write(any(Query.class), anyBoolean(), anyMap(), any()); - } - } - - @Test - @UseDataProvider(value = "allDseProtocolVersions", location = DseTestDataProviders.class) - public void should_abort_when_prepare_fails_with_unrecoverable_error(DseProtocolVersion version) { - - try (RequestHandlerTestHarness harness = - continuousHarnessBuilder() - .withResponse(node1, defaultFrameOf(unprepared)) - .withProtocolVersion(version) - .build()) { - - when(harness.getSession().getRepreparePayloads()).thenReturn(repreparePayloads); - when(harness.getChannel(node1).write(any(Prepare.class), anyBoolean(), anyMap(), any())) - .then( - invocation -> { - AdminRequestHandler admin = invocation.getArgument(3); - admin.onResponse(defaultFrameOf(unrecoverable)); - return future; - }); - - ContinuousCqlRequestHandler handler = - new ContinuousCqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, harness.getSession(), harness.getContext(), "test"); - CompletionStage page1Future = handler.handle(); - - verify(harness.getChannel(node1)).write(any(Query.class), anyBoolean(), anyMap(), any()); - verify(harness.getChannel(node1)).write(any(Prepare.class), anyBoolean(), anyMap(), any()); - - assertThat(handler.getState()).isEqualTo(-2); - assertThat(page1Future).isCompletedExceptionally(); - Throwable t = catchThrowable(() -> page1Future.toCompletableFuture().get()); - assertThat(t).hasRootCauseInstanceOf(SyntaxError.class).hasMessageContaining("bad query"); - } - } - - @Test - @UseDataProvider(value = "allDseProtocolVersions", location = DseTestDataProviders.class) - public void should_try_next_node_when_prepare_fails_with_recoverable_error( - DseProtocolVersion version) { - - try (RequestHandlerTestHarness harness = - continuousHarnessBuilder() - .withResponse(node1, defaultFrameOf(unprepared)) - .withResponse(node2, defaultFrameOf(DseTestFixtures.singleDseRow())) - .withProtocolVersion(version) - .build()) { - - when(harness.getSession().getRepreparePayloads()).thenReturn(repreparePayloads); - when(harness.getChannel(node1).write(any(Prepare.class), anyBoolean(), anyMap(), any())) - .then( - invocation -> { - AdminRequestHandler admin = invocation.getArgument(3); - admin.onResponse(defaultFrameOf(recoverable)); - return future; - }); - - ContinuousCqlRequestHandler handler = - new ContinuousCqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, harness.getSession(), harness.getContext(), "test"); - CompletionStage page1Future = handler.handle(); - - verify(harness.getChannel(node1)).write(any(Query.class), anyBoolean(), anyMap(), any()); - verify(harness.getChannel(node1)).write(any(Prepare.class), anyBoolean(), anyMap(), any()); - // should have tried the next host - verify(harness.getChannel(node2)).write(any(Query.class), anyBoolean(), anyMap(), any()); - - assertThat(handler.getState()).isEqualTo(-1); - assertThatStage(page1Future) - .isSuccess( - rs -> { - assertThat(rs.currentPage()).hasSize(1); - assertThat(rs.hasMorePages()).isFalse(); - assertThat(rs.getExecutionInfo().getCoordinator()).isEqualTo(node2); - assertThat(rs.getExecutionInfo().getErrors()) - .hasSize(1) - .allSatisfy( - entry -> { - assertThat(entry.getKey()).isEqualTo(node1); - assertThat(entry.getValue()) - .isInstanceOf(UnexpectedResponseException.class) - .hasMessageContaining(recoverable.toString()); - }); - }); - } - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandlerRetryTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandlerRetryTest.java deleted file mode 100644 index 97fe82985de..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandlerRetryTest.java +++ /dev/null @@ -1,600 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.continuous; - -import static com.datastax.dse.driver.DseTestDataProviders.allDseProtocolVersions; -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static com.datastax.oss.driver.TestDataProviders.combine; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.atMost; -import static org.mockito.Mockito.when; - -import com.datastax.dse.driver.DseTestFixtures; -import com.datastax.dse.driver.api.core.DseProtocolVersion; -import com.datastax.dse.driver.api.core.cql.continuous.ContinuousAsyncResultSet; -import com.datastax.oss.driver.TestDataProviders; -import com.datastax.oss.driver.api.core.DefaultConsistencyLevel; -import com.datastax.oss.driver.api.core.connection.HeartbeatException; -import com.datastax.oss.driver.api.core.cql.BatchStatement; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.retry.RetryVerdict; -import com.datastax.oss.driver.api.core.servererrors.BootstrappingException; -import com.datastax.oss.driver.api.core.servererrors.DefaultWriteType; -import com.datastax.oss.driver.api.core.servererrors.InvalidQueryException; -import com.datastax.oss.driver.api.core.servererrors.ReadTimeoutException; -import com.datastax.oss.driver.api.core.servererrors.ServerError; -import com.datastax.oss.driver.api.core.servererrors.UnavailableException; -import com.datastax.oss.driver.api.core.servererrors.WriteTimeoutException; -import com.datastax.oss.driver.internal.core.cql.RequestHandlerTestHarness; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.response.Error; -import com.datastax.oss.protocol.internal.response.error.ReadTimeout; -import com.datastax.oss.protocol.internal.response.error.Unavailable; -import com.datastax.oss.protocol.internal.response.error.WriteTimeout; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.TimeUnit; -import org.junit.Test; -import org.mockito.Mockito; - -public class ContinuousCqlRequestHandlerRetryTest extends ContinuousCqlRequestHandlerTestBase { - - @Test - @UseDataProvider("allIdempotenceConfigs") - public void should_always_try_next_node_if_bootstrapping( - boolean defaultIdempotence, Statement statement, DseProtocolVersion version) { - - try (RequestHandlerTestHarness harness = - continuousHarnessBuilder() - .withProtocolVersion(version) - .withDefaultIdempotence(defaultIdempotence) - .withResponse( - node1, - defaultFrameOf( - new Error(ProtocolConstants.ErrorCode.IS_BOOTSTRAPPING, "mock message"))) - .withResponse(node2, defaultFrameOf(DseTestFixtures.singleDseRow())) - .build()) { - - ContinuousCqlRequestHandler handler = - new ContinuousCqlRequestHandler( - statement, harness.getSession(), harness.getContext(), "test"); - CompletionStage resultSetFuture = handler.handle(); - - assertThat(handler.getState()).isEqualTo(-1); - - assertThatStage(resultSetFuture) - .isSuccess( - resultSet -> { - Iterator rows = resultSet.currentPage().iterator(); - assertThat(rows.hasNext()).isTrue(); - assertThat(rows.next().getString("message")).isEqualTo("hello, world"); - - ExecutionInfo executionInfo = resultSet.getExecutionInfo(); - assertThat(executionInfo.getCoordinator()).isEqualTo(node2); - assertThat(executionInfo.getErrors()).hasSize(1); - assertThat(executionInfo.getErrors().get(0).getKey()).isEqualTo(node1); - assertThat(executionInfo.getErrors().get(0).getValue()) - .isInstanceOf(BootstrappingException.class); - assertThat(executionInfo.getIncomingPayload()).isEmpty(); - assertThat(executionInfo.getPagingState()).isNull(); - assertThat(executionInfo.getSpeculativeExecutionCount()).isEqualTo(0); - assertThat(executionInfo.getSuccessfulExecutionIndex()).isEqualTo(0); - assertThat(executionInfo.getWarnings()).isEmpty(); - - Mockito.verifyNoMoreInteractions(harness.getContext().getRetryPolicy(anyString())); - }); - } - } - - @Test - @UseDataProvider("allIdempotenceConfigs") - public void should_always_rethrow_query_validation_error( - boolean defaultIdempotence, Statement statement, DseProtocolVersion version) { - - try (RequestHandlerTestHarness harness = - continuousHarnessBuilder() - .withProtocolVersion(version) - .withDefaultIdempotence(defaultIdempotence) - .withResponse( - node1, - defaultFrameOf(new Error(ProtocolConstants.ErrorCode.INVALID, "mock message"))) - .build()) { - - ContinuousCqlRequestHandler handler = - new ContinuousCqlRequestHandler( - statement, harness.getSession(), harness.getContext(), "test"); - CompletionStage resultSetFuture = handler.handle(); - - assertThat(handler.getState()).isEqualTo(-2); - - assertThatStage(resultSetFuture) - .isFailed( - error -> { - assertThat(error) - .isInstanceOf(InvalidQueryException.class) - .hasMessage("mock message"); - Mockito.verifyNoMoreInteractions(harness.getContext().getRetryPolicy(anyString())); - - Mockito.verify(nodeMetricUpdater1) - .incrementCounter(eq(DefaultNodeMetric.OTHER_ERRORS), anyString()); - Mockito.verify(nodeMetricUpdater1) - .updateTimer( - eq(DefaultNodeMetric.CQL_MESSAGES), - anyString(), - anyLong(), - eq(TimeUnit.NANOSECONDS)); - Mockito.verifyNoMoreInteractions(nodeMetricUpdater1); - }); - } - } - - @Test - @UseDataProvider("failureAndIdempotent") - public void should_try_next_node_if_idempotent_and_retry_policy_decides_so( - FailureScenario failureScenario, - boolean defaultIdempotence, - Statement statement, - DseProtocolVersion version) { - - RequestHandlerTestHarness.Builder harnessBuilder = - continuousHarnessBuilder() - .withProtocolVersion(version) - .withDefaultIdempotence(defaultIdempotence); - failureScenario.mockRequestError(harnessBuilder, node1); - harnessBuilder.withResponse(node2, defaultFrameOf(DseTestFixtures.singleDseRow())); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - failureScenario.mockRetryPolicyVerdict( - harness.getContext().getRetryPolicy(anyString()), RetryVerdict.RETRY_NEXT); - - ContinuousCqlRequestHandler handler = - new ContinuousCqlRequestHandler( - statement, harness.getSession(), harness.getContext(), "test"); - CompletionStage resultSetFuture = handler.handle(); - - assertThat(handler.getState()).isEqualTo(-1); - - assertThatStage(resultSetFuture) - .isSuccess( - resultSet -> { - Iterator rows = resultSet.currentPage().iterator(); - assertThat(rows.hasNext()).isTrue(); - assertThat(rows.next().getString("message")).isEqualTo("hello, world"); - - ExecutionInfo executionInfo = resultSet.getExecutionInfo(); - assertThat(executionInfo.getCoordinator()).isEqualTo(node2); - assertThat(executionInfo.getErrors()).hasSize(1); - assertThat(executionInfo.getErrors().get(0).getKey()).isEqualTo(node1); - - Mockito.verify(nodeMetricUpdater1) - .incrementCounter(eq(failureScenario.errorMetric), anyString()); - Mockito.verify(nodeMetricUpdater1) - .incrementCounter(eq(DefaultNodeMetric.RETRIES), anyString()); - Mockito.verify(nodeMetricUpdater1) - .incrementCounter(eq(failureScenario.retryMetric), anyString()); - Mockito.verify(nodeMetricUpdater1, atMost(1)) - .updateTimer( - eq(DefaultNodeMetric.CQL_MESSAGES), - anyString(), - anyLong(), - eq(TimeUnit.NANOSECONDS)); - Mockito.verifyNoMoreInteractions(nodeMetricUpdater1); - }); - } - } - - @Test - @UseDataProvider("failureAndIdempotent") - public void should_try_same_node_if_idempotent_and_retry_policy_decides_so( - FailureScenario failureScenario, - boolean defaultIdempotence, - Statement statement, - DseProtocolVersion version) { - - RequestHandlerTestHarness.Builder harnessBuilder = - continuousHarnessBuilder() - .withProtocolVersion(version) - .withDefaultIdempotence(defaultIdempotence); - failureScenario.mockRequestError(harnessBuilder, node1); - harnessBuilder.withResponse(node1, defaultFrameOf(DseTestFixtures.singleDseRow())); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - failureScenario.mockRetryPolicyVerdict( - harness.getContext().getRetryPolicy(anyString()), RetryVerdict.RETRY_SAME); - - ContinuousCqlRequestHandler handler = - new ContinuousCqlRequestHandler( - statement, harness.getSession(), harness.getContext(), "test"); - CompletionStage resultSetFuture = handler.handle(); - - assertThat(handler.getState()).isEqualTo(-1); - - assertThatStage(resultSetFuture) - .isSuccess( - resultSet -> { - Iterator rows = resultSet.currentPage().iterator(); - assertThat(rows.hasNext()).isTrue(); - assertThat(rows.next().getString("message")).isEqualTo("hello, world"); - - ExecutionInfo executionInfo = resultSet.getExecutionInfo(); - assertThat(executionInfo.getCoordinator()).isEqualTo(node1); - assertThat(executionInfo.getErrors()).hasSize(1); - assertThat(executionInfo.getErrors().get(0).getKey()).isEqualTo(node1); - - Mockito.verify(nodeMetricUpdater1) - .incrementCounter(eq(failureScenario.errorMetric), anyString()); - Mockito.verify(nodeMetricUpdater1) - .incrementCounter(eq(DefaultNodeMetric.RETRIES), anyString()); - Mockito.verify(nodeMetricUpdater1) - .incrementCounter(eq(failureScenario.retryMetric), anyString()); - Mockito.verify(nodeMetricUpdater1, atMost(2)) - .updateTimer( - eq(DefaultNodeMetric.CQL_MESSAGES), - anyString(), - anyLong(), - eq(TimeUnit.NANOSECONDS)); - Mockito.verifyNoMoreInteractions(nodeMetricUpdater1); - }); - } - } - - @Test - @UseDataProvider("failureAndIdempotent") - public void should_ignore_error_if_idempotent_and_retry_policy_decides_so( - FailureScenario failureScenario, - boolean defaultIdempotence, - Statement statement, - DseProtocolVersion version) { - - RequestHandlerTestHarness.Builder harnessBuilder = - continuousHarnessBuilder() - .withProtocolVersion(version) - .withDefaultIdempotence(defaultIdempotence); - failureScenario.mockRequestError(harnessBuilder, node1); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - failureScenario.mockRetryPolicyVerdict( - harness.getContext().getRetryPolicy(anyString()), RetryVerdict.IGNORE); - - ContinuousCqlRequestHandler handler = - new ContinuousCqlRequestHandler( - statement, harness.getSession(), harness.getContext(), "test"); - CompletionStage resultSetFuture = handler.handle(); - - assertThat(handler.getState()).isEqualTo(-1); - - assertThatStage(resultSetFuture) - .isSuccess( - resultSet -> { - Iterator rows = resultSet.currentPage().iterator(); - assertThat(rows.hasNext()).isFalse(); - - ExecutionInfo executionInfo = resultSet.getExecutionInfo(); - assertThat(executionInfo.getCoordinator()).isEqualTo(node1); - assertThat(executionInfo.getErrors()).hasSize(0); - - Mockito.verify(nodeMetricUpdater1) - .incrementCounter(eq(failureScenario.errorMetric), anyString()); - Mockito.verify(nodeMetricUpdater1) - .incrementCounter(eq(DefaultNodeMetric.IGNORES), anyString()); - Mockito.verify(nodeMetricUpdater1) - .incrementCounter(eq(failureScenario.ignoreMetric), anyString()); - Mockito.verify(nodeMetricUpdater1, atMost(1)) - .updateTimer( - eq(DefaultNodeMetric.CQL_MESSAGES), - anyString(), - anyLong(), - eq(TimeUnit.NANOSECONDS)); - Mockito.verifyNoMoreInteractions(nodeMetricUpdater1); - }); - } - } - - @Test - @UseDataProvider("failureAndIdempotent") - public void should_rethrow_error_if_idempotent_and_retry_policy_decides_so( - FailureScenario failureScenario, - boolean defaultIdempotence, - Statement statement, - DseProtocolVersion version) { - - RequestHandlerTestHarness.Builder harnessBuilder = - continuousHarnessBuilder() - .withProtocolVersion(version) - .withDefaultIdempotence(defaultIdempotence); - failureScenario.mockRequestError(harnessBuilder, node1); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - - failureScenario.mockRetryPolicyVerdict( - harness.getContext().getRetryPolicy(anyString()), RetryVerdict.RETHROW); - - ContinuousCqlRequestHandler handler = - new ContinuousCqlRequestHandler( - statement, harness.getSession(), harness.getContext(), "test"); - CompletionStage resultSetFuture = handler.handle(); - - assertThat(handler.getState()).isEqualTo(-2); - - assertThatStage(resultSetFuture) - .isFailed( - error -> { - assertThat(error).isInstanceOf(failureScenario.expectedExceptionClass); - - Mockito.verify(nodeMetricUpdater1) - .incrementCounter(eq(failureScenario.errorMetric), anyString()); - Mockito.verify(nodeMetricUpdater1, atMost(1)) - .updateTimer( - eq(DefaultNodeMetric.CQL_MESSAGES), - anyString(), - anyLong(), - eq(TimeUnit.NANOSECONDS)); - Mockito.verifyNoMoreInteractions(nodeMetricUpdater1); - }); - } - } - - @Test - @UseDataProvider("failureAndNotIdempotent") - public void should_rethrow_error_if_not_idempotent_and_error_unsafe_or_policy_rethrows( - FailureScenario failureScenario, - boolean defaultIdempotence, - Statement statement, - DseProtocolVersion version) { - - // For two of the possible exceptions, the retry policy is called even if the statement is not - // idempotent - boolean shouldCallRetryPolicy = - (failureScenario.expectedExceptionClass.equals(UnavailableException.class) - || failureScenario.expectedExceptionClass.equals(ReadTimeoutException.class)); - - RequestHandlerTestHarness.Builder harnessBuilder = - continuousHarnessBuilder() - .withProtocolVersion(version) - .withDefaultIdempotence(defaultIdempotence); - failureScenario.mockRequestError(harnessBuilder, node1); - harnessBuilder.withResponse(node2, defaultFrameOf(DseTestFixtures.singleDseRow())); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - - if (shouldCallRetryPolicy) { - failureScenario.mockRetryPolicyVerdict( - harness.getContext().getRetryPolicy(anyString()), RetryVerdict.RETHROW); - } - - ContinuousCqlRequestHandler handler = - new ContinuousCqlRequestHandler( - statement, harness.getSession(), harness.getContext(), "test"); - CompletionStage resultSetFuture = handler.handle(); - - assertThat(handler.getState()).isEqualTo(-2); - - assertThatStage(resultSetFuture) - .isFailed( - error -> { - assertThat(error).isInstanceOf(failureScenario.expectedExceptionClass); - // When non idempotent, the policy is bypassed completely: - if (!shouldCallRetryPolicy) { - Mockito.verifyNoMoreInteractions( - harness.getContext().getRetryPolicy(anyString())); - } - - Mockito.verify(nodeMetricUpdater1) - .incrementCounter(eq(failureScenario.errorMetric), anyString()); - Mockito.verify(nodeMetricUpdater1, atMost(1)) - .updateTimer( - eq(DefaultNodeMetric.CQL_MESSAGES), - anyString(), - anyLong(), - eq(TimeUnit.NANOSECONDS)); - Mockito.verifyNoMoreInteractions(nodeMetricUpdater1); - }); - } - } - - /** - * Sets up the mocks to simulate an error from a node, and make the retry policy return a given - * decision for that error. - */ - private abstract static class FailureScenario { - private final Class expectedExceptionClass; - final DefaultNodeMetric errorMetric; - final DefaultNodeMetric retryMetric; - final DefaultNodeMetric ignoreMetric; - - FailureScenario( - Class expectedExceptionClass, - DefaultNodeMetric errorMetric, - DefaultNodeMetric retryMetric, - DefaultNodeMetric ignoreMetric) { - this.expectedExceptionClass = expectedExceptionClass; - this.errorMetric = errorMetric; - this.retryMetric = retryMetric; - this.ignoreMetric = ignoreMetric; - } - - abstract void mockRequestError(RequestHandlerTestHarness.Builder builder, Node node); - - abstract void mockRetryPolicyVerdict(RetryPolicy policy, RetryVerdict verdict); - } - - @DataProvider - public static Object[][] failure() { - return TestDataProviders.fromList( - new FailureScenario( - ReadTimeoutException.class, - DefaultNodeMetric.READ_TIMEOUTS, - DefaultNodeMetric.RETRIES_ON_READ_TIMEOUT, - DefaultNodeMetric.IGNORES_ON_READ_TIMEOUT) { - @Override - public void mockRequestError(RequestHandlerTestHarness.Builder builder, Node node) { - builder.withResponse( - node, - defaultFrameOf( - new ReadTimeout( - "mock message", ProtocolConstants.ConsistencyLevel.LOCAL_ONE, 1, 2, true))); - } - - @Override - public void mockRetryPolicyVerdict(RetryPolicy policy, RetryVerdict verdict) { - when(policy.onReadTimeoutVerdict( - any(SimpleStatement.class), - eq(DefaultConsistencyLevel.LOCAL_ONE), - eq(2), - eq(1), - eq(true), - eq(0))) - .thenReturn(verdict); - } - }, - new FailureScenario( - WriteTimeoutException.class, - DefaultNodeMetric.WRITE_TIMEOUTS, - DefaultNodeMetric.RETRIES_ON_WRITE_TIMEOUT, - DefaultNodeMetric.IGNORES_ON_WRITE_TIMEOUT) { - @Override - public void mockRequestError(RequestHandlerTestHarness.Builder builder, Node node) { - builder.withResponse( - node, - defaultFrameOf( - new WriteTimeout( - "mock message", - ProtocolConstants.ConsistencyLevel.LOCAL_ONE, - 1, - 2, - ProtocolConstants.WriteType.SIMPLE))); - } - - @Override - public void mockRetryPolicyVerdict(RetryPolicy policy, RetryVerdict verdict) { - when(policy.onWriteTimeoutVerdict( - any(SimpleStatement.class), - eq(DefaultConsistencyLevel.LOCAL_ONE), - eq(DefaultWriteType.SIMPLE), - eq(2), - eq(1), - eq(0))) - .thenReturn(verdict); - } - }, - new FailureScenario( - UnavailableException.class, - DefaultNodeMetric.UNAVAILABLES, - DefaultNodeMetric.RETRIES_ON_UNAVAILABLE, - DefaultNodeMetric.IGNORES_ON_UNAVAILABLE) { - @Override - public void mockRequestError(RequestHandlerTestHarness.Builder builder, Node node) { - builder.withResponse( - node, - defaultFrameOf( - new Unavailable( - "mock message", ProtocolConstants.ConsistencyLevel.LOCAL_ONE, 2, 1))); - } - - @Override - public void mockRetryPolicyVerdict(RetryPolicy policy, RetryVerdict verdict) { - when(policy.onUnavailableVerdict( - any(SimpleStatement.class), - eq(DefaultConsistencyLevel.LOCAL_ONE), - eq(2), - eq(1), - eq(0))) - .thenReturn(verdict); - } - }, - new FailureScenario( - ServerError.class, - DefaultNodeMetric.OTHER_ERRORS, - DefaultNodeMetric.RETRIES_ON_OTHER_ERROR, - DefaultNodeMetric.IGNORES_ON_OTHER_ERROR) { - @Override - public void mockRequestError(RequestHandlerTestHarness.Builder builder, Node node) { - builder.withResponse( - node, - defaultFrameOf( - new Error(ProtocolConstants.ErrorCode.SERVER_ERROR, "mock server error"))); - } - - @Override - public void mockRetryPolicyVerdict(RetryPolicy policy, RetryVerdict verdict) { - when(policy.onErrorResponseVerdict( - any(SimpleStatement.class), any(ServerError.class), eq(0))) - .thenReturn(verdict); - } - }, - new FailureScenario( - HeartbeatException.class, - DefaultNodeMetric.ABORTED_REQUESTS, - DefaultNodeMetric.RETRIES_ON_ABORTED, - DefaultNodeMetric.IGNORES_ON_ABORTED) { - @Override - public void mockRequestError(RequestHandlerTestHarness.Builder builder, Node node) { - builder.withResponseFailure(node, Mockito.mock(HeartbeatException.class)); - } - - @Override - public void mockRetryPolicyVerdict(RetryPolicy policy, RetryVerdict verdict) { - when(policy.onRequestAbortedVerdict( - any(SimpleStatement.class), any(HeartbeatException.class), eq(0))) - .thenReturn(verdict); - } - }); - } - - @DataProvider - public static Object[][] failureAndIdempotent() { - return combine(failure(), excludeBatchStatements(idempotentConfig()), allDseProtocolVersions()); - } - - @DataProvider - public static Object[][] failureAndNotIdempotent() { - return combine( - failure(), excludeBatchStatements(nonIdempotentConfig()), allDseProtocolVersions()); - } - - @DataProvider - public static Object[][] allIdempotenceConfigs() { - return combine( - excludeBatchStatements(ContinuousCqlRequestHandlerTestBase.allIdempotenceConfigs()), - allDseProtocolVersions()); - } - - private static Object[][] excludeBatchStatements(Object[][] configs) { - List result = new ArrayList<>(); - for (Object[] config : configs) { - if (!(config[1] instanceof BatchStatement)) { - result.add(config); - } - } - return result.toArray(new Object[][] {}); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandlerTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandlerTest.java deleted file mode 100644 index a816183e9ee..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandlerTest.java +++ /dev/null @@ -1,529 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.continuous; - -import static com.datastax.dse.driver.api.core.DseProtocolVersion.DSE_V1; -import static com.datastax.dse.driver.api.core.DseProtocolVersion.DSE_V2; -import static com.datastax.dse.protocol.internal.DseProtocolConstants.RevisionType.CANCEL_CONTINUOUS_PAGING; -import static com.datastax.dse.protocol.internal.DseProtocolConstants.RevisionType.MORE_CONTINUOUS_PAGES; -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyBoolean; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.anyMap; -import static org.mockito.ArgumentMatchers.argThat; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.ArgumentMatchers.matches; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyNoMoreInteractions; -import static org.mockito.Mockito.when; - -import com.datastax.dse.driver.DseTestDataProviders; -import com.datastax.dse.driver.DseTestFixtures; -import com.datastax.dse.driver.api.core.DseProtocolVersion; -import com.datastax.dse.driver.api.core.cql.continuous.ContinuousAsyncResultSet; -import com.datastax.dse.protocol.internal.request.Revise; -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.DriverTimeoutException; -import com.datastax.oss.driver.api.core.NoNodeAvailableException; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.servererrors.BootstrappingException; -import com.datastax.oss.driver.api.core.tracker.RequestTracker; -import com.datastax.oss.driver.internal.core.ProtocolFeature; -import com.datastax.oss.driver.internal.core.cql.PoolBehavior; -import com.datastax.oss.driver.internal.core.cql.RequestHandlerTestHarness; -import com.datastax.oss.driver.internal.core.util.concurrent.CapturingTimer.CapturedTimeout; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.Iterator; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.TimeUnit; -import java.util.regex.Pattern; -import org.junit.Test; -import org.mockito.Mockito; - -public class ContinuousCqlRequestHandlerTest extends ContinuousCqlRequestHandlerTestBase { - - private static final Pattern LOG_PREFIX_PER_REQUEST = Pattern.compile("test\\|\\d*\\|\\d"); - - @Test - @UseDataProvider(value = "allDseProtocolVersions", location = DseTestDataProviders.class) - public void should_complete_single_page_result(DseProtocolVersion version) { - try (RequestHandlerTestHarness harness = - continuousHarnessBuilder() - .withProtocolVersion(version) - .withResponse(node1, defaultFrameOf(DseTestFixtures.singleDseRow())) - .build()) { - - CompletionStage resultSetFuture = - new ContinuousCqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, - harness.getSession(), - harness.getContext(), - "test") - .handle(); - - assertThatStage(resultSetFuture) - .isSuccess( - resultSet -> { - Iterator rows = resultSet.currentPage().iterator(); - assertThat(rows.hasNext()).isTrue(); - assertThat(rows.next().getString("message")).isEqualTo("hello, world"); - ExecutionInfo executionInfo = resultSet.getExecutionInfo(); - assertThat(executionInfo.getCoordinator()).isEqualTo(node1); - assertThat(executionInfo.getErrors()).isEmpty(); - assertThat(executionInfo.getIncomingPayload()).isEmpty(); - assertThat(executionInfo.getPagingState()).isNull(); - assertThat(executionInfo.getSpeculativeExecutionCount()).isEqualTo(0); - assertThat(executionInfo.getSuccessfulExecutionIndex()).isEqualTo(0); - assertThat(executionInfo.getWarnings()).isEmpty(); - }); - } - } - - @Test - @UseDataProvider(value = "allDseProtocolVersions", location = DseTestDataProviders.class) - public void should_complete_multi_page_result(DseProtocolVersion version) { - RequestHandlerTestHarness.Builder builder = - continuousHarnessBuilder().withProtocolVersion(version); - PoolBehavior node1Behavior = builder.customBehavior(node1); - try (RequestHandlerTestHarness harness = builder.build()) { - - ContinuousCqlRequestHandler handler = - new ContinuousCqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, harness.getSession(), harness.getContext(), "test"); - CompletionStage page1Future = handler.handle(); - - node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(1, false))); - - assertThatStage(page1Future) - .isSuccess( - page1 -> { - assertThat(page1.hasMorePages()).isTrue(); - assertThat(page1.pageNumber()).isEqualTo(1); - Iterator rows = page1.currentPage().iterator(); - assertThat(rows.hasNext()).isTrue(); - assertThat(rows).toIterable().hasSize(10); - ExecutionInfo executionInfo = page1.getExecutionInfo(); - assertThat(executionInfo.getCoordinator()).isEqualTo(node1); - assertThat(executionInfo.getErrors()).isEmpty(); - assertThat(executionInfo.getIncomingPayload()).isEmpty(); - assertThat(executionInfo.getPagingState()).isNotNull(); - assertThat(executionInfo.getSpeculativeExecutionCount()).isEqualTo(0); - assertThat(executionInfo.getSuccessfulExecutionIndex()).isEqualTo(0); - assertThat(executionInfo.getWarnings()).isEmpty(); - }); - - ContinuousAsyncResultSet page1 = CompletableFutures.getCompleted(page1Future); - assertThat(handler.getPendingResult()).isNull(); - CompletionStage page2Future = page1.fetchNextPage(); - assertThat(handler.getPendingResult()).isNotNull(); - node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(2, true))); - - assertThatStage(page2Future) - .isSuccess( - page2 -> { - assertThat(page2.hasMorePages()).isFalse(); - assertThat(page2.pageNumber()).isEqualTo(2); - Iterator rows = page2.currentPage().iterator(); - assertThat(rows.hasNext()).isTrue(); - assertThat(rows).toIterable().hasSize(10); - ExecutionInfo executionInfo = page2.getExecutionInfo(); - assertThat(executionInfo.getCoordinator()).isEqualTo(node1); - assertThat(executionInfo.getErrors()).isEmpty(); - assertThat(executionInfo.getIncomingPayload()).isEmpty(); - assertThat(executionInfo.getPagingState()).isNull(); - assertThat(executionInfo.getSpeculativeExecutionCount()).isEqualTo(0); - assertThat(executionInfo.getSuccessfulExecutionIndex()).isEqualTo(0); - assertThat(executionInfo.getWarnings()).isEmpty(); - }); - } - } - - @Test - @UseDataProvider(value = "allDseProtocolVersions", location = DseTestDataProviders.class) - public void should_fail_if_no_node_available(DseProtocolVersion version) { - try (RequestHandlerTestHarness harness = - continuousHarnessBuilder() - .withProtocolVersion(version) - // Mock no responses => this will produce an empty query plan - .build()) { - - CompletionStage resultSetFuture = - new ContinuousCqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, - harness.getSession(), - harness.getContext(), - "test") - .handle(); - - assertThatStage(resultSetFuture) - .isFailed(error -> assertThat(error).isInstanceOf(NoNodeAvailableException.class)); - } - } - - @Test - @UseDataProvider(value = "allOssProtocolVersions", location = DseTestDataProviders.class) - public void should_throw_if_protocol_version_does_not_support_continuous_paging( - ProtocolVersion version) { - try (RequestHandlerTestHarness harness = - continuousHarnessBuilder().withProtocolVersion(version).build()) { - Mockito.when( - harness - .getContext() - .getProtocolVersionRegistry() - .supports(any(DefaultProtocolVersion.class), any(ProtocolFeature.class))) - .thenReturn(false); - assertThatThrownBy( - () -> - new ContinuousCqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, - harness.getSession(), - harness.getContext(), - "test") - .handle()) - .isInstanceOf(IllegalStateException.class) - .hasMessage("Cannot execute continuous paging requests with protocol version " + version); - } - } - - @Test - @UseDataProvider(value = "allDseProtocolVersions", location = DseTestDataProviders.class) - public void should_time_out_if_first_page_takes_too_long(DseProtocolVersion version) - throws Exception { - RequestHandlerTestHarness.Builder builder = - continuousHarnessBuilder().withProtocolVersion(version); - PoolBehavior node1Behavior = builder.customBehavior(node1); - try (RequestHandlerTestHarness harness = builder.build()) { - - CompletionStage resultSetFuture = - new ContinuousCqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, - harness.getSession(), - harness.getContext(), - "test") - .handle(); - - // mark the initial request as successful, which should schedule a timeout for the first page - node1Behavior.setWriteSuccess(); - CapturedTimeout page1Timeout = harness.nextScheduledTimeout(); - assertThat(page1Timeout.getDelay(TimeUnit.NANOSECONDS)) - .isEqualTo(TIMEOUT_FIRST_PAGE.toNanos()); - - page1Timeout.task().run(page1Timeout); - - assertThatStage(resultSetFuture) - .isFailed( - t -> - assertThat(t) - .isInstanceOf(DriverTimeoutException.class) - .hasMessageContaining("Timed out waiting for page 1")); - } - } - - @Test - @UseDataProvider(value = "allDseProtocolVersions", location = DseTestDataProviders.class) - public void should_time_out_if_other_page_takes_too_long(DseProtocolVersion version) - throws Exception { - RequestHandlerTestHarness.Builder builder = - continuousHarnessBuilder().withProtocolVersion(version); - PoolBehavior node1Behavior = builder.customBehavior(node1); - - try (RequestHandlerTestHarness harness = builder.build()) { - - CompletionStage page1Future = - new ContinuousCqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, - harness.getSession(), - harness.getContext(), - "test") - .handle(); - - // mark the initial request as successful, which should schedule a timeout for the first page - node1Behavior.setWriteSuccess(); - CapturedTimeout page1Timeout = harness.nextScheduledTimeout(); - assertThat(page1Timeout.getDelay(TimeUnit.NANOSECONDS)) - .isEqualTo(TIMEOUT_FIRST_PAGE.toNanos()); - - // the server replies with page 1, the corresponding timeout should be cancelled - node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(1, false))); - assertThat(page1Timeout.isCancelled()).isTrue(); - - // request page 2, the queue is empty so this should request more pages and schedule another - // timeout - ContinuousAsyncResultSet page1 = CompletableFutures.getUninterruptibly(page1Future); - CompletionStage page2Future = page1.fetchNextPage(); - CapturedTimeout page2Timeout = harness.nextScheduledTimeout(); - assertThat(page2Timeout.getDelay(TimeUnit.NANOSECONDS)) - .isEqualTo(TIMEOUT_OTHER_PAGES.toNanos()); - - page2Timeout.task().run(page2Timeout); - - assertThatStage(page2Future) - .isFailed( - t -> - assertThat(t) - .isInstanceOf(DriverTimeoutException.class) - .hasMessageContaining("Timed out waiting for page 2")); - } - } - - @Test - @UseDataProvider(value = "allDseProtocolVersions", location = DseTestDataProviders.class) - public void should_cancel_future_if_session_cancelled(DseProtocolVersion version) { - RequestHandlerTestHarness.Builder builder = - continuousHarnessBuilder().withProtocolVersion(version); - PoolBehavior node1Behavior = builder.customBehavior(node1); - try (RequestHandlerTestHarness harness = builder.build()) { - - ContinuousCqlRequestHandler handler = - new ContinuousCqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, harness.getSession(), harness.getContext(), "test"); - CompletionStage page1Future = handler.handle(); - - node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(1, false))); - // will be discarded - node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(2, false))); - - ContinuousAsyncResultSet page1 = CompletableFutures.getUninterruptibly(page1Future); - page1.cancel(); - - assertThat(handler.getState()).isEqualTo(-2); - assertThat(page1.fetchNextPage()).isCancelled(); - } - } - - @Test - @UseDataProvider(value = "allDseProtocolVersions", location = DseTestDataProviders.class) - public void should_cancel_session_if_future_cancelled(DseProtocolVersion version) { - RequestHandlerTestHarness.Builder builder = - continuousHarnessBuilder().withProtocolVersion(version); - PoolBehavior node1Behavior = builder.customBehavior(node1); - try (RequestHandlerTestHarness harness = builder.build()) { - - ContinuousCqlRequestHandler handler = - new ContinuousCqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, harness.getSession(), harness.getContext(), "test"); - CompletionStage page1Future = handler.handle(); - - page1Future.toCompletableFuture().cancel(true); - // this should be ignored - node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(1, false))); - assertThat(handler.getState()).isEqualTo(-2); - } - } - - @Test - @UseDataProvider(value = "allDseProtocolVersions", location = DseTestDataProviders.class) - public void should_not_cancel_session_if_future_cancelled_but_already_done( - DseProtocolVersion version) { - RequestHandlerTestHarness.Builder builder = - continuousHarnessBuilder().withProtocolVersion(version); - PoolBehavior node1Behavior = builder.customBehavior(node1); - try (RequestHandlerTestHarness harness = builder.build()) { - - ContinuousCqlRequestHandler handler = - new ContinuousCqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, harness.getSession(), harness.getContext(), "test"); - CompletionStage page1Future = handler.handle(); - - // this will complete page 1 future - node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(1, true))); - - // to late - page1Future.toCompletableFuture().cancel(true); - assertThat(handler.getState()).isEqualTo(-1); - } - } - - @Test - public void should_send_cancel_request_if_dse_v2() { - RequestHandlerTestHarness.Builder builder = - continuousHarnessBuilder().withProtocolVersion(DSE_V2); - PoolBehavior node1Behavior = builder.customBehavior(node1); - try (RequestHandlerTestHarness harness = builder.build()) { - - ContinuousCqlRequestHandler handler = - new ContinuousCqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, harness.getSession(), harness.getContext(), "test"); - CompletionStage page1Future = handler.handle(); - - page1Future.toCompletableFuture().cancel(true); - assertThat(handler.getState()).isEqualTo(-2); - verify(node1Behavior.getChannel()) - .write(argThat(this::isCancelRequest), anyBoolean(), anyMap(), any()); - } - } - - @Test - public void should_toggle_channel_autoread_if_dse_v1() { - RequestHandlerTestHarness.Builder builder = - continuousHarnessBuilder().withProtocolVersion(DSE_V1); - PoolBehavior node1Behavior = builder.customBehavior(node1); - try (RequestHandlerTestHarness harness = builder.build()) { - - CompletionStage page1Future = - new ContinuousCqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, - harness.getSession(), - harness.getContext(), - "test") - .handle(); - - // simulate the arrival of 5 pages, the first one will complete page1 future above, - // the following 4 will be enqueued and should trigger autoread off - node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(1, false))); - node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(2, false))); - node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(3, false))); - node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(4, false))); - node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(5, false))); - - verify(node1Behavior.getChannel().config()).setAutoRead(false); - - // simulate the retrieval of 2 pages, this should dequeue page 2 - // and trigger autoread on - ContinuousAsyncResultSet page1 = CompletableFutures.getCompleted(page1Future); - CompletableFutures.getCompleted(page1.fetchNextPage()); - - verify(node1Behavior.getChannel().config()).setAutoRead(true); - - // in DSE_V1, the backpressure request should not have been sent - verify(node1Behavior.getChannel(), never()) - .write(any(Revise.class), anyBoolean(), anyMap(), any()); - } - } - - @Test - public void should_send_backpressure_request_if_dse_v2() { - RequestHandlerTestHarness.Builder builder = - continuousHarnessBuilder().withProtocolVersion(DSE_V2); - PoolBehavior node1Behavior = builder.customBehavior(node1); - try (RequestHandlerTestHarness harness = builder.build()) { - - CompletionStage page1Future = - new ContinuousCqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, - harness.getSession(), - harness.getContext(), - "test") - .handle(); - - // simulate the arrival of 4 pages, the first one will complete page1 future above, - // the following 3 will be enqueued - node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(1, false))); - node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(2, false))); - node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(3, false))); - node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(4, false))); - - // simulate the retrieval of 2 pages, this should dequeue page 2 - // and trigger a backpressure request as the queue is now half empty (2/4) - ContinuousAsyncResultSet page1 = CompletableFutures.getCompleted(page1Future); - CompletableFutures.getCompleted(page1.fetchNextPage()); - - verify(node1Behavior.getChannel()) - .write(argThat(this::isBackpressureRequest), anyBoolean(), anyMap(), any()); - // should not mess with autoread in dse v2 - verify(node1Behavior.getChannel().config(), never()).setAutoRead(anyBoolean()); - } - } - - @Test - @UseDataProvider(value = "allDseProtocolVersions", location = DseTestDataProviders.class) - public void should_invoke_request_tracker(DseProtocolVersion version) { - try (RequestHandlerTestHarness harness = - continuousHarnessBuilder() - .withProtocolVersion(version) - .withResponse( - node1, - defaultFrameOf( - new com.datastax.oss.protocol.internal.response.Error( - ProtocolConstants.ErrorCode.IS_BOOTSTRAPPING, "mock message"))) - .withResponse(node2, defaultFrameOf(DseTestFixtures.singleDseRow())) - .build()) { - - RequestTracker requestTracker = mock(RequestTracker.class); - when(harness.getContext().getRequestTracker()).thenReturn(requestTracker); - - CompletionStage resultSetFuture = - new ContinuousCqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, - harness.getSession(), - harness.getContext(), - "test") - .handle(); - - assertThatStage(resultSetFuture) - .isSuccess( - resultSet -> { - Iterator rows = resultSet.currentPage().iterator(); - assertThat(rows.hasNext()).isTrue(); - assertThat(rows.next().getString("message")).isEqualTo("hello, world"); - ExecutionInfo executionInfo = resultSet.getExecutionInfo(); - assertThat(executionInfo.getCoordinator()).isEqualTo(node2); - assertThat(executionInfo.getErrors()).isNotEmpty(); - assertThat(executionInfo.getIncomingPayload()).isEmpty(); - assertThat(executionInfo.getPagingState()).isNull(); - assertThat(executionInfo.getSpeculativeExecutionCount()).isEqualTo(0); - assertThat(executionInfo.getSuccessfulExecutionIndex()).isEqualTo(0); - assertThat(executionInfo.getWarnings()).isEmpty(); - - verify(requestTracker) - .onNodeError( - eq(UNDEFINED_IDEMPOTENCE_STATEMENT), - any(BootstrappingException.class), - anyLong(), - any(DriverExecutionProfile.class), - eq(node1), - matches(LOG_PREFIX_PER_REQUEST)); - verify(requestTracker) - .onNodeSuccess( - eq(UNDEFINED_IDEMPOTENCE_STATEMENT), - anyLong(), - any(DriverExecutionProfile.class), - eq(node2), - matches(LOG_PREFIX_PER_REQUEST)); - verify(requestTracker) - .onSuccess( - eq(UNDEFINED_IDEMPOTENCE_STATEMENT), - anyLong(), - any(DriverExecutionProfile.class), - eq(node2), - matches(LOG_PREFIX_PER_REQUEST)); - verifyNoMoreInteractions(requestTracker); - }); - } - } - - private boolean isBackpressureRequest(Message argument) { - return argument instanceof Revise && ((Revise) argument).revisionType == MORE_CONTINUOUS_PAGES; - } - - private boolean isCancelRequest(Message argument) { - return argument instanceof Revise - && ((Revise) argument).revisionType == CANCEL_CONTINUOUS_PAGING; - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandlerTestBase.java b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandlerTestBase.java deleted file mode 100644 index 04195f5faf0..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandlerTestBase.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.continuous; - -import static com.datastax.dse.driver.api.core.config.DseDriverOption.CONTINUOUS_PAGING_MAX_ENQUEUED_PAGES; -import static com.datastax.dse.driver.api.core.config.DseDriverOption.CONTINUOUS_PAGING_TIMEOUT_FIRST_PAGE; -import static com.datastax.dse.driver.api.core.config.DseDriverOption.CONTINUOUS_PAGING_TIMEOUT_OTHER_PAGES; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.internal.core.cql.CqlRequestHandlerTestBase; -import com.datastax.oss.driver.internal.core.cql.RequestHandlerTestHarness; -import java.time.Duration; - -public abstract class ContinuousCqlRequestHandlerTestBase extends CqlRequestHandlerTestBase { - - static final Duration TIMEOUT_FIRST_PAGE = Duration.ofSeconds(2); - static final Duration TIMEOUT_OTHER_PAGES = Duration.ofSeconds(1); - - protected RequestHandlerTestHarness.Builder continuousHarnessBuilder() { - return new RequestHandlerTestHarness.Builder() { - @Override - public RequestHandlerTestHarness build() { - RequestHandlerTestHarness harness = super.build(); - DriverExecutionProfile config = harness.getContext().getConfig().getDefaultProfile(); - when(config.getDuration(CONTINUOUS_PAGING_TIMEOUT_FIRST_PAGE)) - .thenReturn(TIMEOUT_FIRST_PAGE); - when(config.getDuration(CONTINUOUS_PAGING_TIMEOUT_OTHER_PAGES)) - .thenReturn(TIMEOUT_OTHER_PAGES); - when(config.getInt(CONTINUOUS_PAGING_MAX_ENQUEUED_PAGES)).thenReturn(4); - return harness; - } - }; - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/DefaultContinuousAsyncResultSetTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/DefaultContinuousAsyncResultSetTest.java deleted file mode 100644 index 1e59559013f..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/DefaultContinuousAsyncResultSetTest.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.continuous; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.mockito.BDDMockito.given; -import static org.mockito.Mockito.verify; - -import com.datastax.dse.driver.api.core.cql.continuous.ContinuousAsyncResultSet; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.internal.core.util.CountingIterator; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import org.assertj.core.api.ThrowableAssert.ThrowingCallable; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -public class DefaultContinuousAsyncResultSetTest { - - @Mock private ColumnDefinitions columnDefinitions; - @Mock private ExecutionInfo executionInfo; - @Mock private ContinuousCqlRequestHandler handler; - @Mock private CountingIterator rows; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - } - - @Test - public void should_fail_to_fetch_next_page_if_last() { - // Given - given(executionInfo.getPagingState()).willReturn(null); - DefaultContinuousAsyncResultSet resultSet = - new DefaultContinuousAsyncResultSet( - rows, columnDefinitions, 1, false, executionInfo, handler); - - // When - boolean hasMorePages = resultSet.hasMorePages(); - ThrowingCallable nextPage = resultSet::fetchNextPage; - - // Then - assertThat(hasMorePages).isFalse(); - assertThatThrownBy(nextPage) - .isInstanceOf(IllegalStateException.class) - .hasMessageContaining("Can't call fetchNextPage() on the last page"); - } - - @Test - public void should_invoke_handler_to_fetch_next_page() { - // Given - CompletableFuture mockResultFuture = new CompletableFuture<>(); - given(handler.fetchNextPage()).willReturn(mockResultFuture); - DefaultContinuousAsyncResultSet resultSet = - new DefaultContinuousAsyncResultSet( - rows, columnDefinitions, 1, true, executionInfo, handler); - - // When - boolean hasMorePages = resultSet.hasMorePages(); - CompletionStage nextPageFuture = resultSet.fetchNextPage(); - - // Then - assertThat(hasMorePages).isTrue(); - verify(handler).fetchNextPage(); - assertThat(nextPageFuture).isEqualTo(mockResultFuture); - } - - @Test - public void should_invoke_handler_to_cancel() { - // Given - DefaultContinuousAsyncResultSet resultSet = - new DefaultContinuousAsyncResultSet( - rows, columnDefinitions, 1, true, executionInfo, handler); - // When - resultSet.cancel(); - - // Then - verify(handler).cancel(); - } - - @Test - public void should_report_remaining_rows() { - // Given - given(rows.remaining()).willReturn(42); - DefaultContinuousAsyncResultSet resultSet = - new DefaultContinuousAsyncResultSet( - rows, columnDefinitions, 1, true, executionInfo, handler); - - // When - int remaining = resultSet.remaining(); - Iterable currentPage = resultSet.currentPage(); - - // Then - assertThat(remaining).isEqualTo(42); - assertThat(currentPage.iterator()).isSameAs(rows); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/DefaultContinuousResultSetTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/DefaultContinuousResultSetTest.java deleted file mode 100644 index 2bfb4768e49..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/DefaultContinuousResultSetTest.java +++ /dev/null @@ -1,154 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.continuous; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.dse.driver.api.core.cql.continuous.ContinuousAsyncResultSet; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.internal.core.util.CountingIterator; -import java.util.Arrays; -import java.util.Iterator; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import org.junit.Test; -import org.mockito.Mockito; - -public class DefaultContinuousResultSetTest { - - @Test - public void should_create_result_set_from_single_page() { - // Given - ContinuousAsyncResultSet page1 = mockPage(false, 0, 1, 2); - - // When - ResultSet resultSet = new DefaultContinuousResultSet(page1); - - // Then - assertThat(resultSet.getColumnDefinitions()).isSameAs(page1.getColumnDefinitions()); - assertThat(resultSet.getExecutionInfo()).isSameAs(page1.getExecutionInfo()); - assertThat(resultSet.getExecutionInfos()).containsExactly(page1.getExecutionInfo()); - - Iterator iterator = resultSet.iterator(); - - assertNextRow(iterator, 0); - assertNextRow(iterator, 1); - assertNextRow(iterator, 2); - - assertThat(iterator.hasNext()).isFalse(); - } - - @Test - public void should_create_result_set_from_multiple_pages() { - // Given - ContinuousAsyncResultSet page1 = mockPage(true, 0, 1, 2); - ContinuousAsyncResultSet page2 = mockPage(true, 3, 4, 5); - ContinuousAsyncResultSet page3 = mockPage(false, 6, 7, 8); - - complete(page1.fetchNextPage(), page2); - complete(page2.fetchNextPage(), page3); - - // When - ResultSet resultSet = new DefaultContinuousResultSet(page1); - - // Then - assertThat(resultSet.iterator().hasNext()).isTrue(); - - assertThat(resultSet.getColumnDefinitions()).isSameAs(page1.getColumnDefinitions()); - assertThat(resultSet.getExecutionInfo()).isSameAs(page1.getExecutionInfo()); - assertThat(resultSet.getExecutionInfos()).containsExactly(page1.getExecutionInfo()); - - Iterator iterator = resultSet.iterator(); - - assertNextRow(iterator, 0); - assertNextRow(iterator, 1); - assertNextRow(iterator, 2); - - assertThat(iterator.hasNext()).isTrue(); - // This should have triggered the fetch of page2 - assertThat(resultSet.getExecutionInfo()).isEqualTo(page2.getExecutionInfo()); - assertThat(resultSet.getExecutionInfos()) - .containsExactly(page1.getExecutionInfo(), page2.getExecutionInfo()); - - assertNextRow(iterator, 3); - assertNextRow(iterator, 4); - assertNextRow(iterator, 5); - - assertThat(iterator.hasNext()).isTrue(); - // This should have triggered the fetch of page3 - assertThat(resultSet.getExecutionInfo()).isEqualTo(page3.getExecutionInfo()); - assertThat(resultSet.getExecutionInfos()) - .containsExactly( - page1.getExecutionInfo(), page2.getExecutionInfo(), page3.getExecutionInfo()); - - assertNextRow(iterator, 6); - assertNextRow(iterator, 7); - assertNextRow(iterator, 8); - } - - private static ContinuousAsyncResultSet mockPage(boolean nextPage, Integer... data) { - ContinuousAsyncResultSet page = Mockito.mock(ContinuousAsyncResultSet.class); - - ColumnDefinitions columnDefinitions = Mockito.mock(ColumnDefinitions.class); - Mockito.when(page.getColumnDefinitions()).thenReturn(columnDefinitions); - - ExecutionInfo executionInfo = Mockito.mock(ExecutionInfo.class); - Mockito.when(page.getExecutionInfo()).thenReturn(executionInfo); - - if (nextPage) { - Mockito.when(page.hasMorePages()).thenReturn(true); - Mockito.when(page.fetchNextPage()).thenReturn(Mockito.spy(new CompletableFuture<>())); - } else { - Mockito.when(page.hasMorePages()).thenReturn(false); - Mockito.when(page.fetchNextPage()).thenThrow(new IllegalStateException()); - } - - Iterator rows = Arrays.asList(data).iterator(); - CountingIterator iterator = - new CountingIterator(data.length) { - @Override - protected Row computeNext() { - return rows.hasNext() ? mockRow(rows.next()) : endOfData(); - } - }; - Mockito.when(page.currentPage()).thenReturn(() -> iterator); - Mockito.when(page.remaining()).thenAnswer(invocation -> iterator.remaining()); - - return page; - } - - private static Row mockRow(int index) { - Row row = Mockito.mock(Row.class); - Mockito.when(row.getInt(0)).thenReturn(index); - return row; - } - - private static void complete( - CompletionStage stage, ContinuousAsyncResultSet result) { - stage.toCompletableFuture().complete(result); - } - - private static void assertNextRow(Iterator iterator, int expectedValue) { - assertThat(iterator.hasNext()).isTrue(); - Row row0 = iterator.next(); - assertThat(row0.getInt(0)).isEqualTo(expectedValue); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/reactive/ContinuousCqlRequestReactiveProcessorTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/reactive/ContinuousCqlRequestReactiveProcessorTest.java deleted file mode 100644 index 0bfb00695d3..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/reactive/ContinuousCqlRequestReactiveProcessorTest.java +++ /dev/null @@ -1,173 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.continuous.reactive; - -import static com.datastax.dse.driver.api.core.DseProtocolVersion.DSE_V1; -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.dse.driver.DseTestDataProviders; -import com.datastax.dse.driver.DseTestFixtures; -import com.datastax.dse.driver.api.core.DseProtocolVersion; -import com.datastax.dse.driver.api.core.cql.continuous.reactive.ContinuousReactiveResultSet; -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveRow; -import com.datastax.dse.driver.internal.core.cql.continuous.ContinuousCqlRequestAsyncProcessor; -import com.datastax.dse.driver.internal.core.cql.continuous.ContinuousCqlRequestHandlerTestBase; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.cql.PoolBehavior; -import com.datastax.oss.driver.internal.core.cql.RequestHandlerTestHarness; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import io.reactivex.Flowable; -import java.util.List; -import org.junit.Test; - -public class ContinuousCqlRequestReactiveProcessorTest extends ContinuousCqlRequestHandlerTestBase { - - @Test - public void should_be_able_to_process_reactive_result_set() { - ContinuousCqlRequestReactiveProcessor processor = - new ContinuousCqlRequestReactiveProcessor(new ContinuousCqlRequestAsyncProcessor()); - assertThat( - processor.canProcess( - UNDEFINED_IDEMPOTENCE_STATEMENT, - ContinuousCqlRequestReactiveProcessor.CONTINUOUS_REACTIVE_RESULT_SET)) - .isTrue(); - } - - @Test - public void should_create_request_handler() { - RequestHandlerTestHarness.Builder builder = - continuousHarnessBuilder().withProtocolVersion(DSE_V1); - try (RequestHandlerTestHarness harness = builder.build()) { - ContinuousCqlRequestReactiveProcessor processor = - new ContinuousCqlRequestReactiveProcessor(new ContinuousCqlRequestAsyncProcessor()); - assertThat( - processor.process( - UNDEFINED_IDEMPOTENCE_STATEMENT, - harness.getSession(), - harness.getContext(), - "test")) - .isInstanceOf(DefaultContinuousReactiveResultSet.class); - } - } - - @Test - @UseDataProvider(value = "allDseProtocolVersions", location = DseTestDataProviders.class) - public void should_complete_single_page_result(DseProtocolVersion version) { - try (RequestHandlerTestHarness harness = - continuousHarnessBuilder() - .withProtocolVersion(version) - .withResponse(node1, defaultFrameOf(DseTestFixtures.singleDseRow())) - .build()) { - - DefaultSession session = harness.getSession(); - InternalDriverContext context = harness.getContext(); - - ContinuousReactiveResultSet publisher = - new ContinuousCqlRequestReactiveProcessor(new ContinuousCqlRequestAsyncProcessor()) - .process(UNDEFINED_IDEMPOTENCE_STATEMENT, session, context, "test"); - - List rows = Flowable.fromPublisher(publisher).toList().blockingGet(); - - assertThat(rows).hasSize(1); - ReactiveRow row = rows.get(0); - assertThat(row.getString("message")).isEqualTo("hello, world"); - ExecutionInfo executionInfo = row.getExecutionInfo(); - assertThat(executionInfo.getCoordinator()).isEqualTo(node1); - assertThat(executionInfo.getErrors()).isEmpty(); - assertThat(executionInfo.getIncomingPayload()).isEmpty(); - assertThat(executionInfo.getPagingState()).isNull(); - assertThat(executionInfo.getSpeculativeExecutionCount()).isEqualTo(0); - assertThat(executionInfo.getSuccessfulExecutionIndex()).isEqualTo(0); - assertThat(executionInfo.getWarnings()).isEmpty(); - - Flowable execInfosFlowable = - Flowable.fromPublisher(publisher.getExecutionInfos()); - assertThat(execInfosFlowable.toList().blockingGet()).containsExactly(executionInfo); - - Flowable colDefsFlowable = - Flowable.fromPublisher(publisher.getColumnDefinitions()); - assertThat(colDefsFlowable.toList().blockingGet()) - .containsExactly(row.getColumnDefinitions()); - - Flowable wasAppliedFlowable = Flowable.fromPublisher(publisher.wasApplied()); - assertThat(wasAppliedFlowable.toList().blockingGet()).containsExactly(row.wasApplied()); - } - } - - @Test - @UseDataProvider(value = "allDseProtocolVersions", location = DseTestDataProviders.class) - public void should_complete_multi_page_result(DseProtocolVersion version) { - RequestHandlerTestHarness.Builder builder = - continuousHarnessBuilder().withProtocolVersion(version); - PoolBehavior node1Behavior = builder.customBehavior(node1); - try (RequestHandlerTestHarness harness = builder.build()) { - - DefaultSession session = harness.getSession(); - InternalDriverContext context = harness.getContext(); - - ContinuousReactiveResultSet publisher = - new ContinuousCqlRequestReactiveProcessor(new ContinuousCqlRequestAsyncProcessor()) - .process(UNDEFINED_IDEMPOTENCE_STATEMENT, session, context, "test"); - - Flowable rowsPublisher = Flowable.fromPublisher(publisher).cache(); - rowsPublisher.subscribe(); - - node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(1, false))); - node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(2, true))); - - List rows = rowsPublisher.toList().blockingGet(); - assertThat(rows).hasSize(20); - - ReactiveRow first = rows.get(0); - ExecutionInfo firstExecutionInfo = first.getExecutionInfo(); - assertThat(firstExecutionInfo.getCoordinator()).isEqualTo(node1); - assertThat(firstExecutionInfo.getErrors()).isEmpty(); - assertThat(firstExecutionInfo.getIncomingPayload()).isEmpty(); - assertThat(firstExecutionInfo.getPagingState()).isNotNull(); - assertThat(firstExecutionInfo.getSpeculativeExecutionCount()).isEqualTo(0); - assertThat(firstExecutionInfo.getSuccessfulExecutionIndex()).isEqualTo(0); - assertThat(firstExecutionInfo.getWarnings()).isEmpty(); - - ReactiveRow inSecondPage = rows.get(10); - ExecutionInfo secondExecutionInfo = inSecondPage.getExecutionInfo(); - assertThat(secondExecutionInfo.getCoordinator()).isEqualTo(node1); - assertThat(secondExecutionInfo.getErrors()).isEmpty(); - assertThat(secondExecutionInfo.getIncomingPayload()).isEmpty(); - assertThat(secondExecutionInfo.getPagingState()).isNull(); - assertThat(secondExecutionInfo.getSpeculativeExecutionCount()).isEqualTo(0); - assertThat(secondExecutionInfo.getSuccessfulExecutionIndex()).isEqualTo(0); - assertThat(secondExecutionInfo.getWarnings()).isEmpty(); - - Flowable execInfosFlowable = - Flowable.fromPublisher(publisher.getExecutionInfos()); - assertThat(execInfosFlowable.toList().blockingGet()) - .containsExactly(firstExecutionInfo, secondExecutionInfo); - - Flowable colDefsFlowable = - Flowable.fromPublisher(publisher.getColumnDefinitions()); - assertThat(colDefsFlowable.toList().blockingGet()) - .containsExactly(first.getColumnDefinitions()); - - Flowable wasAppliedFlowable = Flowable.fromPublisher(publisher.wasApplied()); - assertThat(wasAppliedFlowable.toList().blockingGet()).containsExactly(first.wasApplied()); - } - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/CqlRequestReactiveProcessorTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/CqlRequestReactiveProcessorTest.java deleted file mode 100644 index a7a6bced9e8..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/CqlRequestReactiveProcessorTest.java +++ /dev/null @@ -1,188 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.reactive; - -import static com.datastax.dse.driver.DseTestFixtures.singleDseRow; -import static com.datastax.dse.driver.api.core.DseProtocolVersion.DSE_V1; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.dse.driver.DseTestDataProviders; -import com.datastax.dse.driver.DseTestFixtures; -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveResultSet; -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveRow; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.cql.Conversions; -import com.datastax.oss.driver.internal.core.cql.CqlRequestAsyncProcessor; -import com.datastax.oss.driver.internal.core.cql.CqlRequestHandlerTestBase; -import com.datastax.oss.driver.internal.core.cql.PoolBehavior; -import com.datastax.oss.driver.internal.core.cql.RequestHandlerTestHarness; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import io.reactivex.Flowable; -import java.util.List; -import java.util.concurrent.CompletableFuture; -import org.junit.Test; - -public class CqlRequestReactiveProcessorTest extends CqlRequestHandlerTestBase { - - @Test - public void should_be_able_to_process_reactive_result_set() { - CqlRequestReactiveProcessor processor = - new CqlRequestReactiveProcessor(new CqlRequestAsyncProcessor()); - assertThat( - processor.canProcess( - UNDEFINED_IDEMPOTENCE_STATEMENT, CqlRequestReactiveProcessor.REACTIVE_RESULT_SET)) - .isTrue(); - } - - @Test - public void should_create_request_handler() { - RequestHandlerTestHarness.Builder builder = - RequestHandlerTestHarness.builder().withProtocolVersion(DSE_V1); - try (RequestHandlerTestHarness harness = builder.build()) { - CqlRequestReactiveProcessor processor = - new CqlRequestReactiveProcessor(new CqlRequestAsyncProcessor()); - assertThat( - processor.process( - UNDEFINED_IDEMPOTENCE_STATEMENT, - harness.getSession(), - harness.getContext(), - "test")) - .isInstanceOf(DefaultReactiveResultSet.class); - } - } - - @Test - @UseDataProvider(value = "allDseAndOssProtocolVersions", location = DseTestDataProviders.class) - public void should_complete_single_page_result(ProtocolVersion version) { - try (RequestHandlerTestHarness harness = - RequestHandlerTestHarness.builder() - .withProtocolVersion(version) - .withResponse(node1, defaultFrameOf(singleDseRow())) - .build()) { - - DefaultSession session = harness.getSession(); - InternalDriverContext context = harness.getContext(); - - ReactiveResultSet publisher = - new CqlRequestReactiveProcessor(new CqlRequestAsyncProcessor()) - .process(UNDEFINED_IDEMPOTENCE_STATEMENT, session, context, "test"); - - List rows = Flowable.fromPublisher(publisher).toList().blockingGet(); - - assertThat(rows).hasSize(1); - ReactiveRow row = rows.get(0); - assertThat(row.getString("message")).isEqualTo("hello, world"); - ExecutionInfo executionInfo = row.getExecutionInfo(); - assertThat(executionInfo.getCoordinator()).isEqualTo(node1); - assertThat(executionInfo.getErrors()).isEmpty(); - assertThat(executionInfo.getIncomingPayload()).isEmpty(); - assertThat(executionInfo.getPagingState()).isNull(); - assertThat(executionInfo.getSpeculativeExecutionCount()).isEqualTo(0); - assertThat(executionInfo.getSuccessfulExecutionIndex()).isEqualTo(0); - assertThat(executionInfo.getWarnings()).isEmpty(); - - Flowable execInfosFlowable = - Flowable.fromPublisher(publisher.getExecutionInfos()); - assertThat(execInfosFlowable.toList().blockingGet()).containsExactly(executionInfo); - - Flowable colDefsFlowable = - Flowable.fromPublisher(publisher.getColumnDefinitions()); - assertThat(colDefsFlowable.toList().blockingGet()) - .containsExactly(row.getColumnDefinitions()); - - Flowable wasAppliedFlowable = Flowable.fromPublisher(publisher.wasApplied()); - assertThat(wasAppliedFlowable.toList().blockingGet()).containsExactly(row.wasApplied()); - } - } - - @Test - @UseDataProvider(value = "allDseAndOssProtocolVersions", location = DseTestDataProviders.class) - public void should_complete_multi_page_result(ProtocolVersion version) { - RequestHandlerTestHarness.Builder builder = - RequestHandlerTestHarness.builder().withProtocolVersion(version); - PoolBehavior node1Behavior = builder.customBehavior(node1); - try (RequestHandlerTestHarness harness = builder.build()) { - - DefaultSession session = harness.getSession(); - InternalDriverContext context = harness.getContext(); - - // The 2nd page is obtained by an "external" call to session.executeAsync(), - // so we need to mock that. - CompletableFuture page2Future = new CompletableFuture<>(); - when(session.executeAsync(any(Statement.class))).thenAnswer(invocation -> page2Future); - ExecutionInfo mockInfo = mock(ExecutionInfo.class); - - ReactiveResultSet publisher = - new CqlRequestReactiveProcessor(new CqlRequestAsyncProcessor()) - .process(UNDEFINED_IDEMPOTENCE_STATEMENT, session, context, "test"); - - Flowable rowsPublisher = Flowable.fromPublisher(publisher).cache(); - rowsPublisher.subscribe(); - - // emulate arrival of page 1 - node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(1, false))); - - // emulate arrival of page 2 following the call to session.executeAsync() - page2Future.complete( - Conversions.toResultSet( - DseTestFixtures.tenDseRows(2, true), - mockInfo, - harness.getSession(), - harness.getContext())); - - List rows = rowsPublisher.toList().blockingGet(); - assertThat(rows).hasSize(20); - - ReactiveRow first = rows.get(0); - ExecutionInfo firstExecutionInfo = first.getExecutionInfo(); - assertThat(firstExecutionInfo.getCoordinator()).isEqualTo(node1); - assertThat(firstExecutionInfo.getErrors()).isEmpty(); - assertThat(firstExecutionInfo.getIncomingPayload()).isEmpty(); - assertThat(firstExecutionInfo.getPagingState()).isNotNull(); - assertThat(firstExecutionInfo.getSpeculativeExecutionCount()).isEqualTo(0); - assertThat(firstExecutionInfo.getSuccessfulExecutionIndex()).isEqualTo(0); - assertThat(firstExecutionInfo.getWarnings()).isEmpty(); - - ReactiveRow inSecondPage = rows.get(10); - ExecutionInfo secondExecutionInfo = inSecondPage.getExecutionInfo(); - assertThat(secondExecutionInfo).isSameAs(mockInfo); - - Flowable execInfosFlowable = - Flowable.fromPublisher(publisher.getExecutionInfos()); - assertThat(execInfosFlowable.toList().blockingGet()) - .containsExactly(firstExecutionInfo, secondExecutionInfo); - - Flowable colDefsFlowable = - Flowable.fromPublisher(publisher.getColumnDefinitions()); - assertThat(colDefsFlowable.toList().blockingGet()) - .containsExactly(first.getColumnDefinitions()); - - Flowable wasAppliedFlowable = Flowable.fromPublisher(publisher.wasApplied()); - assertThat(wasAppliedFlowable.toList().blockingGet()).containsExactly(first.wasApplied()); - } - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/DefaultReactiveResultSetTckTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/DefaultReactiveResultSetTckTest.java deleted file mode 100644 index a9ff5222460..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/DefaultReactiveResultSetTckTest.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.reactive; - -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveRow; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import io.reactivex.Flowable; -import java.util.Collections; -import java.util.List; -import java.util.concurrent.CompletableFuture; -import org.reactivestreams.Publisher; -import org.reactivestreams.tck.PublisherVerification; -import org.reactivestreams.tck.TestEnvironment; - -public class DefaultReactiveResultSetTckTest extends PublisherVerification { - - public DefaultReactiveResultSetTckTest() { - super(new TestEnvironment()); - } - - @Override - public Publisher createPublisher(long elements) { - // The TCK usually requests between 0 and 20 items, or Long.MAX_VALUE. - // Past 3 elements it never checks how many elements have been effectively produced, - // so we can safely cap at, say, 20. - int effective = (int) Math.min(elements, 20L); - return new DefaultReactiveResultSet(() -> createResults(effective)); - } - - @Override - public Publisher createFailedPublisher() { - DefaultReactiveResultSet publisher = new DefaultReactiveResultSet(() -> createResults(1)); - // Since our publisher does not support multiple - // subscriptions, we use that to create a failed publisher. - publisher.subscribe(new TestSubscriber<>()); - return publisher; - } - - private static CompletableFuture createResults(int elements) { - CompletableFuture previous = null; - if (elements > 0) { - // create pages of 5 elements each to exercise pagination - List pages = - Flowable.range(0, elements).buffer(5).map(List::size).toList().blockingGet(); - Collections.reverse(pages); - for (Integer size : pages) { - CompletableFuture future = new CompletableFuture<>(); - future.complete(new MockAsyncResultSet(size, previous)); - previous = future; - } - } else { - previous = new CompletableFuture<>(); - previous.complete(new MockAsyncResultSet(0, null)); - } - return previous; - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/MockAsyncResultSet.java b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/MockAsyncResultSet.java deleted file mode 100644 index 3783a2c6922..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/MockAsyncResultSet.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.reactive; - -import static org.mockito.Mockito.mock; - -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.Row; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Iterator; -import java.util.List; -import java.util.concurrent.CompletionStage; -import java.util.stream.Collectors; -import java.util.stream.IntStream; -import org.assertj.core.util.Lists; - -public class MockAsyncResultSet implements AsyncResultSet { - - private final List rows; - private final Iterator iterator; - private final CompletionStage nextPage; - private final ExecutionInfo executionInfo = mock(ExecutionInfo.class); - private final ColumnDefinitions columnDefinitions = mock(ColumnDefinitions.class); - private int remaining; - - public MockAsyncResultSet(int size, CompletionStage nextPage) { - this(IntStream.range(0, size).boxed().map(MockRow::new).collect(Collectors.toList()), nextPage); - } - - public MockAsyncResultSet(List rows, CompletionStage nextPage) { - this.rows = rows; - iterator = rows.iterator(); - remaining = rows.size(); - this.nextPage = nextPage; - } - - @Override - public Row one() { - Row next = iterator.next(); - remaining--; - return next; - } - - @Override - public int remaining() { - return remaining; - } - - @NonNull - @Override - public List currentPage() { - return Lists.newArrayList(rows); - } - - @Override - public boolean hasMorePages() { - return nextPage != null; - } - - @NonNull - @Override - public CompletionStage fetchNextPage() throws IllegalStateException { - return nextPage; - } - - @NonNull - @Override - public ColumnDefinitions getColumnDefinitions() { - return columnDefinitions; - } - - @NonNull - @Override - public ExecutionInfo getExecutionInfo() { - return executionInfo; - } - - @Override - public boolean wasApplied() { - return true; - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/MockRow.java b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/MockRow.java deleted file mode 100644 index 792bfb432f6..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/MockRow.java +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.reactive; - -import static org.mockito.Mockito.mock; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.internal.core.cql.EmptyColumnDefinitions; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.util.Collections; -import java.util.List; - -class MockRow implements Row { - - private int index; - - MockRow(int index) { - this.index = index; - } - - @Override - public int size() { - return 0; - } - - @NonNull - @Override - public CodecRegistry codecRegistry() { - return mock(CodecRegistry.class); - } - - @NonNull - @Override - public ProtocolVersion protocolVersion() { - return DefaultProtocolVersion.V4; - } - - @NonNull - @Override - public ColumnDefinitions getColumnDefinitions() { - return EmptyColumnDefinitions.INSTANCE; - } - - @NonNull - @Override - public List allIndicesOf(@NonNull String name) { - return Collections.singletonList(0); - } - - @Override - public int firstIndexOf(@NonNull String name) { - return 0; - } - - @NonNull - @Override - public List allIndicesOf(@NonNull CqlIdentifier id) { - return Collections.singletonList(0); - } - - @Override - public int firstIndexOf(@NonNull CqlIdentifier id) { - return 0; - } - - @NonNull - @Override - public DataType getType(int i) { - return DataTypes.INT; - } - - @NonNull - @Override - public DataType getType(@NonNull String name) { - return DataTypes.INT; - } - - @NonNull - @Override - public DataType getType(@NonNull CqlIdentifier id) { - return DataTypes.INT; - } - - @Override - public ByteBuffer getBytesUnsafe(int i) { - return null; - } - - @Override - public boolean isDetached() { - return false; - } - - @Override - public void attach(@NonNull AttachmentPoint attachmentPoint) {} - - // equals and hashCode required for TCK tests that check that two subscribers - // receive the exact same set of items. - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof MockRow)) { - return false; - } - MockRow mockRow = (MockRow) o; - return index == mockRow.index; - } - - @Override - public int hashCode() { - return index; - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/ReactiveResultSetSubscriptionTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/ReactiveResultSetSubscriptionTest.java deleted file mode 100644 index 6a1a5d644e3..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/ReactiveResultSetSubscriptionTest.java +++ /dev/null @@ -1,180 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.reactive; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveRow; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.servererrors.UnavailableException; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.CompletableFuture; -import org.junit.Test; - -public class ReactiveResultSetSubscriptionTest { - - @Test - public void should_retrieve_entire_result_set() { - CompletableFuture future1 = new CompletableFuture<>(); - CompletableFuture future2 = new CompletableFuture<>(); - CompletableFuture future3 = new CompletableFuture<>(); - MockAsyncResultSet page1 = new MockAsyncResultSet(3, future2); - MockAsyncResultSet page2 = new MockAsyncResultSet(3, future3); - MockAsyncResultSet page3 = new MockAsyncResultSet(3, null); - TestSubscriber mainSubscriber = new TestSubscriber<>(); - TestSubscriber colDefsSubscriber = new TestSubscriber<>(); - TestSubscriber execInfosSubscriber = new TestSubscriber<>(); - TestSubscriber wasAppliedSubscriber = new TestSubscriber<>(); - ReactiveResultSetSubscription subscription = - new ReactiveResultSetSubscription<>( - mainSubscriber, colDefsSubscriber, execInfosSubscriber, wasAppliedSubscriber); - mainSubscriber.onSubscribe(subscription); - subscription.start(() -> future1); - future1.complete(page1); - future2.complete(page2); - future3.complete(page3); - mainSubscriber.awaitTermination(); - List expected = new ArrayList<>(page1.currentPage()); - expected.addAll(page2.currentPage()); - expected.addAll(page3.currentPage()); - assertThat(mainSubscriber.getElements()).extracting("row").isEqualTo(expected); - assertThat(colDefsSubscriber.getElements()) - .hasSize(1) - .containsExactly(page1.getColumnDefinitions()); - assertThat(execInfosSubscriber.getElements()) - .hasSize(3) - .containsExactly( - page1.getExecutionInfo(), page2.getExecutionInfo(), page3.getExecutionInfo()); - assertThat(wasAppliedSubscriber.getElements()).hasSize(1).containsExactly(true); - } - - @Test - public void should_report_error_on_first_page() { - CompletableFuture future1 = new CompletableFuture<>(); - TestSubscriber mainSubscriber = new TestSubscriber<>(); - TestSubscriber colDefsSubscriber = new TestSubscriber<>(); - TestSubscriber execInfosSubscriber = new TestSubscriber<>(); - TestSubscriber wasAppliedSubscriber = new TestSubscriber<>(); - ReactiveResultSetSubscription subscription = - new ReactiveResultSetSubscription<>( - mainSubscriber, colDefsSubscriber, execInfosSubscriber, wasAppliedSubscriber); - mainSubscriber.onSubscribe(subscription); - subscription.start(() -> future1); - future1.completeExceptionally(new UnavailableException(null, null, 0, 0)); - mainSubscriber.awaitTermination(); - assertThat(mainSubscriber.getError()).isNotNull().isInstanceOf(UnavailableException.class); - assertThat(colDefsSubscriber.getError()).isNotNull().isInstanceOf(UnavailableException.class); - assertThat(execInfosSubscriber.getError()).isNotNull().isInstanceOf(UnavailableException.class); - assertThat(wasAppliedSubscriber.getError()) - .isNotNull() - .isInstanceOf(UnavailableException.class); - } - - @Test - public void should_report_synchronous_failure_on_first_page() { - TestSubscriber mainSubscriber = new TestSubscriber<>(); - TestSubscriber colDefsSubscriber = new TestSubscriber<>(); - TestSubscriber execInfosSubscriber = new TestSubscriber<>(); - TestSubscriber wasAppliedSubscriber = new TestSubscriber<>(); - ReactiveResultSetSubscription subscription = - new ReactiveResultSetSubscription<>( - mainSubscriber, colDefsSubscriber, execInfosSubscriber, wasAppliedSubscriber); - mainSubscriber.onSubscribe(subscription); - subscription.start( - () -> { - throw new IllegalStateException(); - }); - mainSubscriber.awaitTermination(); - assertThat(mainSubscriber.getError()).isNotNull().isInstanceOf(IllegalStateException.class); - assertThat(colDefsSubscriber.getError()).isNotNull().isInstanceOf(IllegalStateException.class); - assertThat(execInfosSubscriber.getError()) - .isNotNull() - .isInstanceOf(IllegalStateException.class); - assertThat(wasAppliedSubscriber.getError()) - .isNotNull() - .isInstanceOf(IllegalStateException.class); - } - - @Test - public void should_report_error_on_intermediary_page() { - CompletableFuture future1 = new CompletableFuture<>(); - CompletableFuture future2 = new CompletableFuture<>(); - MockAsyncResultSet page1 = new MockAsyncResultSet(3, future2); - TestSubscriber mainSubscriber = new TestSubscriber<>(); - TestSubscriber colDefsSubscriber = new TestSubscriber<>(); - TestSubscriber execInfosSubscriber = new TestSubscriber<>(); - TestSubscriber wasAppliedSubscriber = new TestSubscriber<>(); - ReactiveResultSetSubscription subscription = - new ReactiveResultSetSubscription<>( - mainSubscriber, colDefsSubscriber, execInfosSubscriber, wasAppliedSubscriber); - mainSubscriber.onSubscribe(subscription); - subscription.start(() -> future1); - future1.complete(page1); - future2.completeExceptionally(new UnavailableException(null, null, 0, 0)); - mainSubscriber.awaitTermination(); - assertThat(mainSubscriber.getElements()).extracting("row").isEqualTo(page1.currentPage()); - assertThat(mainSubscriber.getError()).isNotNull().isInstanceOf(UnavailableException.class); - // colDefsSubscriber completed normally when page1 arrived - assertThat(colDefsSubscriber.getError()).isNull(); - assertThat(colDefsSubscriber.getElements()) - .hasSize(1) - .containsExactly(page1.getColumnDefinitions()); - // execInfosSubscriber completed with error, but should have emitted 1 item for page1 - assertThat(execInfosSubscriber.getElements()) - .hasSize(1) - .containsExactly(page1.getExecutionInfo()); - assertThat(execInfosSubscriber.getError()).isNotNull().isInstanceOf(UnavailableException.class); - // colDefsSubscriber completed normally when page1 arrived - assertThat(wasAppliedSubscriber.getElements()).hasSize(1).containsExactly(true); - assertThat(wasAppliedSubscriber.getError()).isNull(); - } - - @Test - public void should_handle_empty_non_final_pages() { - CompletableFuture future1 = new CompletableFuture<>(); - CompletableFuture future2 = new CompletableFuture<>(); - CompletableFuture future3 = new CompletableFuture<>(); - MockAsyncResultSet page1 = new MockAsyncResultSet(10, future2); - MockAsyncResultSet page2 = new MockAsyncResultSet(0, future3); - MockAsyncResultSet page3 = new MockAsyncResultSet(10, null); - TestSubscriber mainSubscriber = new TestSubscriber<>(1); - TestSubscriber colDefsSubscriber = new TestSubscriber<>(); - TestSubscriber execInfosSubscriber = new TestSubscriber<>(); - TestSubscriber wasAppliedSubscriber = new TestSubscriber<>(); - ReactiveResultSetSubscription subscription = - new ReactiveResultSetSubscription<>( - mainSubscriber, colDefsSubscriber, execInfosSubscriber, wasAppliedSubscriber); - mainSubscriber.onSubscribe(subscription); - subscription.start(() -> future1); - future1.complete(page1); - future2.complete(page2); - // emulate backpressure - subscription.request(1); - future3.complete(page3); - subscription.request(Long.MAX_VALUE); - mainSubscriber.awaitTermination(); - assertThat(mainSubscriber.getError()).isNull(); - List expected = new ArrayList<>(page1.currentPage()); - expected.addAll(page3.currentPage()); - assertThat(mainSubscriber.getElements()).hasSize(20).extracting("row").isEqualTo(expected); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/SimpleUnicastProcessorTckTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/SimpleUnicastProcessorTckTest.java deleted file mode 100644 index 3bdd138beef..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/SimpleUnicastProcessorTckTest.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.reactive; - -import org.reactivestreams.Publisher; -import org.reactivestreams.tck.PublisherVerification; -import org.reactivestreams.tck.TestEnvironment; - -public class SimpleUnicastProcessorTckTest extends PublisherVerification { - - public SimpleUnicastProcessorTckTest() { - super(new TestEnvironment()); - } - - @Override - public Publisher createPublisher(long elements) { - // The TCK usually requests between 0 and 20 items, or Long.MAX_VALUE. - // Past 3 elements it never checks how many elements have been effectively produced, - // so we can safely cap at, say, 20. - int effective = (int) Math.min(elements, 20L); - SimpleUnicastProcessor processor = new SimpleUnicastProcessor<>(); - for (int i = 0; i < effective; i++) { - processor.onNext(i); - } - processor.onComplete(); - return processor; - } - - @Override - public Publisher createFailedPublisher() { - SimpleUnicastProcessor processor = new SimpleUnicastProcessor<>(); - // Since our publisher does not support multiple - // subscriptions, we use that to create a failed publisher. - processor.subscribe(new TestSubscriber<>()); - return processor; - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/SimpleUnicastProcessorTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/SimpleUnicastProcessorTest.java deleted file mode 100644 index 3ad2173946b..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/SimpleUnicastProcessorTest.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.reactive; - -import static org.assertj.core.api.Assertions.assertThat; - -import org.junit.Test; - -public class SimpleUnicastProcessorTest { - - /** Test for JAVA-2387. */ - @Test - public void should_propagate_upstream_signals_when_downstream_already_subscribed() { - // given - SimpleUnicastProcessor processor = new SimpleUnicastProcessor<>(); - TestSubscriber subscriber = new TestSubscriber<>(); - // when - processor.subscribe(subscriber); // subscription happens before signals arrive - processor.onNext(1); - processor.onComplete(); - subscriber.awaitTermination(); - // then - assertThat(subscriber.getElements()).hasSize(1).containsExactly(1); - assertThat(subscriber.getError()).isNull(); - } - - @Test - public void should_delay_upstream_signals_until_downstream_is_subscribed() { - // given - SimpleUnicastProcessor processor = new SimpleUnicastProcessor<>(); - TestSubscriber subscriber = new TestSubscriber<>(); - // when - processor.onNext(1); - processor.onComplete(); - processor.subscribe(subscriber); // subscription happens after signals arrive - subscriber.awaitTermination(); - // then - assertThat(subscriber.getElements()).hasSize(1).containsExactly(1); - assertThat(subscriber.getError()).isNull(); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/TestSubscriber.java b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/TestSubscriber.java deleted file mode 100644 index 652155e5309..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/TestSubscriber.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.cql.reactive; - -import static org.assertj.core.api.Fail.fail; - -import com.datastax.oss.driver.shaded.guava.common.util.concurrent.Uninterruptibles; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; -import org.reactivestreams.Subscriber; -import org.reactivestreams.Subscription; - -public class TestSubscriber implements Subscriber { - - private final List elements = new ArrayList<>(); - private final CountDownLatch latch = new CountDownLatch(1); - private final long demand; - private Subscription subscription; - private Throwable error; - - public TestSubscriber() { - this.demand = Long.MAX_VALUE; - } - - public TestSubscriber(long demand) { - this.demand = demand; - } - - @Override - public void onSubscribe(Subscription s) { - if (subscription != null) { - fail("already subscribed"); - } - subscription = s; - subscription.request(demand); - } - - @Override - public void onNext(T t) { - elements.add(t); - } - - @Override - public void onError(Throwable t) { - error = t; - latch.countDown(); - } - - @Override - public void onComplete() { - latch.countDown(); - } - - @Nullable - public Throwable getError() { - return error; - } - - @NonNull - public List getElements() { - return elements; - } - - public void awaitTermination() { - if (!Uninterruptibles.awaitUninterruptibly(latch, 1, TimeUnit.MINUTES)) { - fail("subscriber not terminated"); - } - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultLineStringTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultLineStringTest.java deleted file mode 100644 index 38dc84549c4..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultLineStringTest.java +++ /dev/null @@ -1,212 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.data.geometry; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Fail.fail; - -import com.datastax.dse.driver.api.core.data.geometry.LineString; -import com.datastax.dse.driver.api.core.data.geometry.Point; -import com.esri.core.geometry.ogc.OGCLineString; -import java.nio.ByteBuffer; -import java.nio.ByteOrder; -import org.junit.Test; - -public class DefaultLineStringTest { - private final LineString lineString = - LineString.fromPoints( - Point.fromCoordinates(30, 10), - Point.fromCoordinates(10, 30), - Point.fromCoordinates(40, 40)); - - private final String wkt = "LINESTRING (30 10, 10 30, 40 40)"; - - private final String json = - "{\"type\":\"LineString\",\"coordinates\":[[30.0,10.0],[10.0,30.0],[40.0,40.0]]}"; - - @Test - public void should_parse_valid_well_known_text() { - assertThat(LineString.fromWellKnownText(wkt)).isEqualTo(lineString); - } - - @Test - public void should_fail_to_parse_invalid_well_known_text() { - assertInvalidWkt("linestring()"); - assertInvalidWkt("linestring(30 10 20, 10 30 20)"); // 3d - assertInvalidWkt("linestring(0 0, 1 1, 0 1, 1 0)"); // crossing itself - assertInvalidWkt("superlinestring(30 10, 10 30, 40 40)"); - } - - @Test - public void should_convert_to_well_known_text() { - assertThat(lineString.toString()).isEqualTo(wkt); - } - - @Test - public void should_convert_to_well_known_binary() { - ByteBuffer actual = lineString.asWellKnownBinary(); - - ByteBuffer expected = ByteBuffer.allocate(1024).order(ByteOrder.nativeOrder()); - expected.position(0); - expected.put((byte) (ByteOrder.nativeOrder() == ByteOrder.LITTLE_ENDIAN ? 1 : 0)); // endianness - expected.putInt(2); // type - expected.putInt(3); // num lineStrings - expected.putDouble(30); // x1 - expected.putDouble(10); // y1 - expected.putDouble(10); // x2 - expected.putDouble(30); // y2 - expected.putDouble(40); // x3 - expected.putDouble(40); // y3 - expected.flip(); - - assertThat(actual).isEqualTo(expected); - } - - @Test - public void should_load_from_well_known_binary() { - ByteBuffer bb = ByteBuffer.allocate(1024).order(ByteOrder.nativeOrder()); - bb.position(0); - bb.put((byte) (ByteOrder.nativeOrder() == ByteOrder.LITTLE_ENDIAN ? 1 : 0)); // endianness - bb.putInt(2); // type - bb.putInt(3); // num lineStrings - bb.putDouble(30); // x1 - bb.putDouble(10); // y1 - bb.putDouble(10); // x2 - bb.putDouble(30); // y2 - bb.putDouble(40); // x3 - bb.putDouble(40); // y3 - bb.flip(); - - assertThat(LineString.fromWellKnownBinary(bb)).isEqualTo(lineString); - } - - @Test - public void should_parse_valid_geo_json() { - assertThat(LineString.fromGeoJson(json)).isEqualTo(lineString); - } - - @Test - public void should_convert_to_geo_json() { - assertThat(lineString.asGeoJson()).isEqualTo(json); - } - - @Test - public void should_convert_to_ogc_line_string() { - assertThat(((DefaultLineString) lineString).getOgcGeometry()).isInstanceOf(OGCLineString.class); - } - - @Test - public void should_produce_same_hashCode_for_equal_objects() { - LineString line1 = - LineString.fromPoints( - Point.fromCoordinates(30, 10), - Point.fromCoordinates(10, 30), - Point.fromCoordinates(40, 40)); - LineString line2 = LineString.fromWellKnownText(wkt); - assertThat(line1).isEqualTo(line2); - assertThat(line1.hashCode()).isEqualTo(line2.hashCode()); - } - - @Test - public void should_expose_points() { - assertThat(lineString.getPoints()) - .containsOnly( - Point.fromCoordinates(30, 10), - Point.fromCoordinates(10, 30), - Point.fromCoordinates(40, 40)); - assertThat(LineString.fromWellKnownText(wkt).getPoints()) - .containsOnly( - Point.fromCoordinates(30, 10), - Point.fromCoordinates(10, 30), - Point.fromCoordinates(40, 40)); - } - - @Test - public void should_encode_and_decode() throws Exception { - assertThat(SerializationUtils.serializeAndDeserialize(lineString)).isEqualTo(lineString); - } - - @Test - public void should_contain_self() { - assertThat(lineString.contains(lineString)).isTrue(); - } - - @Test - public void should_contain_all_intersected_points_except_start_and_end() { - LineString s = - LineString.fromPoints( - Point.fromCoordinates(0, 0), - Point.fromCoordinates(0, 30), - Point.fromCoordinates(30, 30)); - assertThat(s.contains(Point.fromCoordinates(0, 0))).isFalse(); - assertThat(s.contains(Point.fromCoordinates(0, 15))).isTrue(); - assertThat(s.contains(Point.fromCoordinates(0, 30))).isTrue(); - assertThat(s.contains(Point.fromCoordinates(15, 30))).isTrue(); - assertThat(s.contains(Point.fromCoordinates(30, 30))).isFalse(); - } - - @Test - public void should_contain_substring() { - assertThat( - lineString.contains( - LineString.fromPoints( - Point.fromCoordinates(30, 10), Point.fromCoordinates(10, 30)))) - .isTrue(); - } - - @Test - public void should_not_contain_unrelated_string() { - assertThat( - lineString.contains( - LineString.fromPoints( - Point.fromCoordinates(10, 10), Point.fromCoordinates(30, 30)))) - .isFalse(); - } - - @Test - public void should_not_contain_polygon() { - LineString s = - LineString.fromPoints( - Point.fromCoordinates(0, 0), - Point.fromCoordinates(0, 30), - Point.fromCoordinates(30, 30), - Point.fromCoordinates(30, 0)); - LineString p = - LineString.fromPoints( - Point.fromCoordinates(10, 10), - Point.fromCoordinates(10, 20), - Point.fromCoordinates(20, 20), - Point.fromCoordinates(20, 10)); - assertThat(s.contains(p)).isFalse(); - } - - @Test - public void should_accept_empty_shape() throws Exception { - DefaultLineString s = ((DefaultLineString) LineString.fromWellKnownText("LINESTRING EMPTY")); - assertThat(s.getOgcGeometry().isEmpty()).isTrue(); - } - - private void assertInvalidWkt(String s) { - try { - LineString.fromWellKnownText(s); - fail("Should have thrown InvalidTypeException"); - } catch (IllegalArgumentException e) { - // expected - } - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultPointTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultPointTest.java deleted file mode 100644 index 1e3a7366741..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultPointTest.java +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.data.geometry; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Fail.fail; - -import com.datastax.dse.driver.api.core.data.geometry.Point; -import com.esri.core.geometry.ogc.OGCPoint; -import java.nio.ByteBuffer; -import java.nio.ByteOrder; -import org.junit.Test; - -public class DefaultPointTest { - - private DefaultPoint point = new DefaultPoint(1.1, 2.2); - - private final String wkt = "POINT (1.1 2.2)"; - - private final String json = "{\"type\":\"Point\",\"coordinates\":[1.1,2.2]}"; - - @Test - public void should_parse_valid_well_known_text() { - assertThat(Point.fromWellKnownText(wkt)).isEqualTo(point); - } - - @Test - public void should_fail_to_parse_invalid_well_known_text() { - assertInvalidWkt("superpoint(1.1 2.2 3.3)"); - } - - @Test - public void should_convert_to_well_known_text() { - assertThat(point.toString()).isEqualTo(wkt); - } - - @Test - public void should_convert_to_well_knowm_binary() { - ByteBuffer actual = point.asWellKnownBinary(); - - ByteBuffer expected = ByteBuffer.allocate(1024).order(ByteOrder.nativeOrder()); - expected.position(0); - expected.put((byte) (ByteOrder.nativeOrder() == ByteOrder.LITTLE_ENDIAN ? 1 : 0)); // endianness - expected.putInt(1); // type - expected.putDouble(1.1); // x - expected.putDouble(2.2); // y - expected.flip(); - - assertThat(actual).isEqualTo(expected); - } - - @Test - public void should_load_from_well_known_binary() { - ByteBuffer bb = ByteBuffer.allocate(1024).order(ByteOrder.nativeOrder()); - bb.position(0); - bb.put((byte) (ByteOrder.nativeOrder() == ByteOrder.LITTLE_ENDIAN ? 1 : 0)); // endianness - bb.putInt(1); // type - bb.putDouble(1.1); // x - bb.putDouble(2.2); // y - bb.flip(); - - assertThat(Point.fromWellKnownBinary(bb)).isEqualTo(point); - } - - @Test - public void should_parse_valid_geo_json() { - assertThat(Point.fromGeoJson(json)).isEqualTo(point); - } - - @Test - public void should_convert_to_geo_json() { - assertThat(point.asGeoJson()).isEqualTo(json); - } - - @Test - public void should_convert_to_ogc_point() { - assertThat(point.getOgcGeometry()).isInstanceOf(OGCPoint.class); - } - - @Test - public void should_produce_same_hashCode_for_equal_objects() { - Point point1 = new DefaultPoint(10, 20); - Point point2 = Point.fromWellKnownText("POINT (10 20)"); - assertThat(point1).isEqualTo(point2); - assertThat(point1.hashCode()).isEqualTo(point2.hashCode()); - } - - @Test - public void should_encode_and_decode() throws Exception { - assertThat(SerializationUtils.serializeAndDeserialize(point)).isEqualTo(point); - } - - @Test - public void should_contain_self() { - assertThat(point.contains(point)).isTrue(); - } - - @Test - public void should_not_contain_any_other_shape_than_self() { - DefaultPoint point2 = new DefaultPoint(1, 2); - DefaultPoint point3 = new DefaultPoint(1, 3); - assertThat(point.contains(point2)).isFalse(); - assertThat(point.contains(new DefaultLineString(point, point2))).isFalse(); - assertThat(point.contains(new DefaultPolygon(point, point2, point3))).isFalse(); - } - - @Test - public void should_accept_empty_shape() throws Exception { - DefaultPoint point = ((DefaultPoint) Point.fromWellKnownText("POINT EMPTY")); - assertThat(point.getOgcGeometry().isEmpty()).isTrue(); - } - - private void assertInvalidWkt(String s) { - try { - Point.fromWellKnownText(s); - fail("Should have thrown InvalidTypeException"); - } catch (IllegalArgumentException e) { - // expected - } - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultPolygonTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultPolygonTest.java deleted file mode 100644 index d86e9cdc269..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultPolygonTest.java +++ /dev/null @@ -1,342 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.data.geometry; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Fail.fail; - -import com.datastax.dse.driver.api.core.data.geometry.LineString; -import com.datastax.dse.driver.api.core.data.geometry.Point; -import com.datastax.dse.driver.api.core.data.geometry.Polygon; -import com.esri.core.geometry.ogc.OGCPolygon; -import java.nio.ByteBuffer; -import java.nio.ByteOrder; -import org.junit.Test; - -public class DefaultPolygonTest { - - private Polygon polygon = - Polygon.fromPoints( - Point.fromCoordinates(30, 10), - Point.fromCoordinates(10, 20), - Point.fromCoordinates(20, 40), - Point.fromCoordinates(40, 40)); - - private String wkt = "POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))"; - - private String json = - "{\"type\":\"Polygon\",\"coordinates\":[[[30.0,10.0],[10.0,20.0],[20.0,40.0],[40.0,40.0],[30.0,10.0]]]}"; - - @Test - public void should_parse_valid_well_known_text() { - assertThat(Polygon.fromWellKnownText(wkt)).isEqualTo(polygon); - } - - @Test - public void should_fail_to_parse_invalid_well_known_text() { - assertInvalidWkt("polygon(())"); // malformed - assertInvalidWkt("polygon((30 10 1, 40 40 1, 20 40 1, 10 20 1, 30 10 1))"); // 3d - assertInvalidWkt("polygon((0 0, 1 1, 0 1, 1 0, 0 0))"); // crosses itself - assertInvalidWkt("polygon123((30 10, 40 40, 20 40, 10 20, 30 10))"); // malformed - } - - @Test - public void should_convert_to_well_known_binary() { - ByteBuffer actual = polygon.asWellKnownBinary(); - - ByteBuffer expected = ByteBuffer.allocate(1024).order(ByteOrder.nativeOrder()); - expected.position(0); - expected.put((byte) (ByteOrder.nativeOrder() == ByteOrder.LITTLE_ENDIAN ? 1 : 0)); // endianness - expected.putInt(3); // type - expected.putInt(1); // num rings - expected.putInt(5); // num polygons (ring 1/1) - expected.putDouble(30); // x1 - expected.putDouble(10); // y1 - expected.putDouble(40); // x2 - expected.putDouble(40); // y2 - expected.putDouble(20); // x3 - expected.putDouble(40); // y3 - expected.putDouble(10); // x4 - expected.putDouble(20); // y4 - expected.putDouble(30); // x5 - expected.putDouble(10); // y5 - expected.flip(); - - assertThat(actual).isEqualTo(expected); - } - - @Test - public void should_load_from_well_known_binary() { - ByteBuffer bb = ByteBuffer.allocate(1024).order(ByteOrder.nativeOrder()); - bb.position(0); - bb.put((byte) (ByteOrder.nativeOrder() == ByteOrder.LITTLE_ENDIAN ? 1 : 0)); // endianness - bb.putInt(3); // type - bb.putInt(1); // num rings - bb.putInt(5); // num polygons (ring 1/1) - bb.putDouble(30); // x1 - bb.putDouble(10); // y1 - bb.putDouble(40); // x2 - bb.putDouble(40); // y2 - bb.putDouble(20); // x3 - bb.putDouble(40); // y3 - bb.putDouble(10); // x4 - bb.putDouble(20); // y4 - bb.putDouble(30); // x5 - bb.putDouble(10); // y5 - bb.flip(); - - assertThat(Polygon.fromWellKnownBinary(bb)).isEqualTo(polygon); - } - - @Test - public void should_parse_valid_geo_json() { - assertThat(Polygon.fromGeoJson(json)).isEqualTo(polygon); - } - - @Test - public void should_convert_to_geo_json() { - assertThat(polygon.asGeoJson()).isEqualTo(json); - } - - @Test - public void should_convert_to_ogc_polygon() { - assertThat(((DefaultPolygon) polygon).getOgcGeometry()).isInstanceOf(OGCPolygon.class); - } - - @Test - public void should_produce_same_hashCode_for_equal_objects() { - Polygon polygon1 = - Polygon.fromPoints( - Point.fromCoordinates(30, 10), - Point.fromCoordinates(10, 20), - Point.fromCoordinates(20, 40), - Point.fromCoordinates(40, 40)); - Polygon polygon2 = Polygon.fromWellKnownText(wkt); - assertThat(polygon1).isEqualTo(polygon2); - assertThat(polygon1.hashCode()).isEqualTo(polygon2.hashCode()); - } - - @Test - public void should_build_with_constructor_without_checking_orientation() { - // By default, OGC requires outer rings to be clockwise and inner rings to be counterclockwise. - // We disable that in our constructors. - // This polygon has a single outer ring that is counterclockwise. - Polygon polygon = - Polygon.fromPoints( - Point.fromCoordinates(5, 0), - Point.fromCoordinates(5, 3), - Point.fromCoordinates(0, 3), - Point.fromCoordinates(0, 0)); - assertThat(polygon.asWellKnownText()).isEqualTo("POLYGON ((0 0, 5 0, 5 3, 0 3, 0 0))"); - } - - @Test - public void should_build_complex_polygon_with_builder() { - Polygon polygon = - Polygon.builder() - .addRing( - Point.fromCoordinates(0, 0), - Point.fromCoordinates(0, 3), - Point.fromCoordinates(5, 3), - Point.fromCoordinates(5, 0)) - .addRing( - Point.fromCoordinates(1, 1), - Point.fromCoordinates(1, 2), - Point.fromCoordinates(2, 2), - Point.fromCoordinates(2, 1)) - .addRing( - Point.fromCoordinates(3, 1), - Point.fromCoordinates(3, 2), - Point.fromCoordinates(4, 2), - Point.fromCoordinates(4, 1)) - .build(); - assertThat(polygon.asWellKnownText()) - .isEqualTo( - "POLYGON ((0 0, 5 0, 5 3, 0 3, 0 0), (1 1, 1 2, 2 2, 2 1, 1 1), (3 1, 3 2, 4 2, 4 1, 3 1))"); - } - - @Test - public void should_expose_rings() { - assertThat(polygon.getExteriorRing()) - .containsOnly( - Point.fromCoordinates(30, 10), - Point.fromCoordinates(10, 20), - Point.fromCoordinates(20, 40), - Point.fromCoordinates(40, 40)); - assertThat(polygon.getInteriorRings().isEmpty()).isTrue(); - - Polygon fromWkt = Polygon.fromWellKnownText(wkt); - assertThat(fromWkt.getExteriorRing()) - .containsOnly( - Point.fromCoordinates(30, 10), - Point.fromCoordinates(10, 20), - Point.fromCoordinates(20, 40), - Point.fromCoordinates(40, 40)); - assertThat(fromWkt.getInteriorRings().isEmpty()).isTrue(); - - Polygon complex = - Polygon.builder() - .addRing( - Point.fromCoordinates(0, 0), - Point.fromCoordinates(0, 3), - Point.fromCoordinates(5, 3), - Point.fromCoordinates(5, 0)) - .addRing( - Point.fromCoordinates(1, 1), - Point.fromCoordinates(1, 2), - Point.fromCoordinates(2, 2), - Point.fromCoordinates(2, 1)) - .addRing( - Point.fromCoordinates(3, 1), - Point.fromCoordinates(3, 2), - Point.fromCoordinates(4, 2), - Point.fromCoordinates(4, 1)) - .build(); - assertThat(complex.getExteriorRing()) - .containsOnly( - Point.fromCoordinates(0, 0), - Point.fromCoordinates(0, 3), - Point.fromCoordinates(5, 3), - Point.fromCoordinates(5, 0)); - assertThat(complex.getInteriorRings()).hasSize(2); - assertThat(complex.getInteriorRings().get(0)) - .containsOnly( - Point.fromCoordinates(1, 1), - Point.fromCoordinates(1, 2), - Point.fromCoordinates(2, 2), - Point.fromCoordinates(2, 1)); - assertThat(complex.getInteriorRings().get(1)) - .containsOnly( - Point.fromCoordinates(3, 1), - Point.fromCoordinates(3, 2), - Point.fromCoordinates(4, 2), - Point.fromCoordinates(4, 1)); - - Polygon complexFromWkt = - Polygon.fromWellKnownText( - "POLYGON ((0 0, 5 0, 5 3, 0 3, 0 0), (1 1, 1 2, 2 2, 2 1, 1 1), (3 1, 3 2, 4 2, 4 1, 3 1))"); - assertThat(complexFromWkt.getExteriorRing()) - .containsOnly( - Point.fromCoordinates(0, 0), - Point.fromCoordinates(0, 3), - Point.fromCoordinates(5, 3), - Point.fromCoordinates(5, 0)); - assertThat(complexFromWkt.getInteriorRings()).hasSize(2); - assertThat(complexFromWkt.getInteriorRings().get(0)) - .containsOnly( - Point.fromCoordinates(1, 1), - Point.fromCoordinates(1, 2), - Point.fromCoordinates(2, 2), - Point.fromCoordinates(2, 1)); - assertThat(complexFromWkt.getInteriorRings().get(1)) - .containsOnly( - Point.fromCoordinates(3, 1), - Point.fromCoordinates(3, 2), - Point.fromCoordinates(4, 2), - Point.fromCoordinates(4, 1)); - } - - @Test - public void should_encode_and_decode() throws Exception { - assertThat(SerializationUtils.serializeAndDeserialize(polygon)).isEqualTo(polygon); - } - - @Test - public void should_contain_self() { - assertThat(polygon.contains(polygon)).isTrue(); - } - - @Test - public void should_not_contain_point_or_linestring_on_exterior_ring() { - assertThat(polygon.contains(Point.fromCoordinates(30, 10))).isFalse(); - assertThat(polygon.contains(Point.fromCoordinates(30, 40))).isFalse(); - assertThat( - polygon.contains( - LineString.fromPoints( - Point.fromCoordinates(35, 40), Point.fromCoordinates(25, 40)))) - .isFalse(); - } - - @Test - public void should_contain_interior_shape() { - assertThat(polygon.contains(Point.fromCoordinates(20, 20))).isTrue(); - assertThat( - polygon.contains( - LineString.fromPoints( - Point.fromCoordinates(20, 20), Point.fromCoordinates(30, 20)))) - .isTrue(); - assertThat( - polygon.contains( - Polygon.fromPoints( - Point.fromCoordinates(20, 20), - Point.fromCoordinates(30, 20), - Point.fromCoordinates(20, 30)))) - .isTrue(); - } - - @Test - public void should_not_contain_exterior_shape() { - assertThat(polygon.contains(Point.fromCoordinates(10, 10))).isFalse(); - assertThat( - polygon.contains( - LineString.fromPoints( - Point.fromCoordinates(10, 10), Point.fromCoordinates(20, 20)))) - .isFalse(); - assertThat( - polygon.contains( - Polygon.fromPoints( - Point.fromCoordinates(0, 0), - Point.fromCoordinates(0, 10), - Point.fromCoordinates(10, 10)))) - .isFalse(); - } - - @Test - public void should_not_contain_shapes_in_interior_hole() { - Polygon complex = - Polygon.builder() - .addRing( - Point.fromCoordinates(0, 0), - Point.fromCoordinates(30, 0), - Point.fromCoordinates(30, 30), - Point.fromCoordinates(0, 30)) - .addRing( - Point.fromCoordinates(10, 10), - Point.fromCoordinates(20, 10), - Point.fromCoordinates(20, 20), - Point.fromCoordinates(10, 20)) - .build(); - assertThat(complex.contains(Point.fromCoordinates(15, 15))).isFalse(); - } - - @Test - public void should_accept_empty_shape() throws Exception { - Polygon polygon = Polygon.fromWellKnownText("POLYGON EMPTY"); - assertThat(polygon.getExteriorRing()).isEmpty(); - assertThat(((DefaultPolygon) polygon).getOgcGeometry().isEmpty()).isTrue(); - } - - private void assertInvalidWkt(String s) { - try { - Polygon.fromWellKnownText(s); - fail("Should have thrown InvalidTypeException"); - } catch (IllegalArgumentException e) { - // expected - } - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/data/geometry/DistanceTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/data/geometry/DistanceTest.java deleted file mode 100644 index ba158288891..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/data/geometry/DistanceTest.java +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.data.geometry; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.dse.driver.api.core.data.geometry.LineString; -import com.datastax.dse.driver.api.core.data.geometry.Point; -import com.datastax.dse.driver.api.core.data.geometry.Polygon; -import org.junit.Test; - -public class DistanceTest { - - private final Point point = Point.fromCoordinates(1.1, 2.2); - private final Distance distance = new Distance(point, 7.0); - private final String wkt = "DISTANCE((1.1 2.2) 7.0)"; - - @Test - public void should_parse_valid_well_known_text() { - Distance fromWkt = Distance.fromWellKnownText(wkt); - assertThat(fromWkt.getRadius()).isEqualTo(7.0); - assertThat(fromWkt.getCenter()).isEqualTo(point); - assertThat(Distance.fromWellKnownText(wkt)).isEqualTo(distance); - // whitespace doesn't matter between distance and spec. - assertThat(Distance.fromWellKnownText("DISTANCE ((1.1 2.2) 7.0)")).isEqualTo(distance); - // case doesn't matter. - assertThat(Distance.fromWellKnownText("distance((1.1 2.2) 7.0)")).isEqualTo(distance); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_invalid_well_known_text() { - Distance.fromWellKnownText("dist((1.1 2.2) 3.3)"); - } - - @Test - public void should_convert_to_well_known_text() { - assertThat(distance.asWellKnownText()).isEqualTo(wkt); - } - - @Test - public void should_contain_point() { - assertThat(distance.contains(Point.fromCoordinates(2.0, 3.0))).isTrue(); - } - - @Test - public void should_not_contain_point() { - // y axis falls outside of distance - assertThat(distance.contains(Point.fromCoordinates(2.0, 9.3))).isFalse(); - } - - @Test - public void should_contain_linestring() { - assertThat( - distance.contains( - LineString.fromPoints( - Point.fromCoordinates(2.0, 3.0), - Point.fromCoordinates(3.1, 6.2), - Point.fromCoordinates(-1.0, -2.0)))) - .isTrue(); - } - - @Test - public void should_not_contain_linestring() { - // second point falls outside of distance at y axis. - assertThat( - distance.contains( - LineString.fromPoints( - Point.fromCoordinates(2.0, 3.0), - Point.fromCoordinates(3.1, 9.2), - Point.fromCoordinates(-1.0, -2.0)))) - .isFalse(); - } - - @Test - public void should_contain_polygon() { - Polygon polygon = - Polygon.fromPoints( - Point.fromCoordinates(3, 1), - Point.fromCoordinates(1, 2), - Point.fromCoordinates(2, 4), - Point.fromCoordinates(4, 4)); - assertThat(distance.contains(polygon)).isTrue(); - } - - @Test - public void should_not_contain_polygon() { - Polygon polygon = - Polygon.fromPoints( - Point.fromCoordinates(3, 1), - Point.fromCoordinates(1, 2), - Point.fromCoordinates(2, 4), - Point.fromCoordinates(10, 4)); - // final point falls outside of distance at x axis. - assertThat(distance.contains(polygon)).isFalse(); - } - - @Test(expected = UnsupportedOperationException.class) - public void should_fail_to_convert_to_ogc() { - distance.getOgcGeometry(); - } - - @Test(expected = UnsupportedOperationException.class) - public void should_fail_to_convert_to_wkb() { - distance.asWellKnownBinary(); - } - - @Test(expected = UnsupportedOperationException.class) - public void should_fail_to_convert_to_geo_json() { - distance.asGeoJson(); - } - - @Test - public void should_serialize_and_deserialize() throws Exception { - assertThat(SerializationUtils.serializeAndDeserialize(distance)).isEqualTo(distance); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/data/geometry/SerializationUtils.java b/core/src/test/java/com/datastax/dse/driver/internal/core/data/geometry/SerializationUtils.java deleted file mode 100644 index 84bd1dab343..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/data/geometry/SerializationUtils.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.data.geometry; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.dse.driver.api.core.data.geometry.Geometry; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.io.ObjectInputStream; -import java.io.ObjectOutputStream; - -public class SerializationUtils { - - public static Object serializeAndDeserialize(Geometry geometry) - throws IOException, ClassNotFoundException { - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - ObjectOutputStream out = new ObjectOutputStream(baos); - - out.writeObject(geometry); - - byte[] bytes = baos.toByteArray(); - if (!(geometry instanceof Distance)) { - byte[] wkb = Bytes.getArray(geometry.asWellKnownBinary()); - assertThat(bytes).containsSequence(wkb); - } - ObjectInputStream in = new ObjectInputStream(new ByteArrayInputStream(bytes)); - return in.readObject(); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/ContinuousGraphRequestHandlerSpeculativeExecutionTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/ContinuousGraphRequestHandlerSpeculativeExecutionTest.java deleted file mode 100644 index c67be162181..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/ContinuousGraphRequestHandlerSpeculativeExecutionTest.java +++ /dev/null @@ -1,530 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import static com.datastax.dse.driver.internal.core.graph.GraphTestUtils.createGraphBinaryModule; -import static com.datastax.dse.driver.internal.core.graph.GraphTestUtils.defaultDseFrameOf; -import static com.datastax.dse.driver.internal.core.graph.GraphTestUtils.singleGraphRow; -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyNoMoreInteractions; -import static org.mockito.Mockito.when; - -import com.datastax.dse.driver.DseTestDataProviders; -import com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet; -import com.datastax.dse.driver.api.core.graph.GraphStatement; -import com.datastax.dse.driver.api.core.metrics.DseNodeMetric; -import com.datastax.dse.driver.internal.core.graph.binary.GraphBinaryModule; -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.NoNodeAvailableException; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; -import com.datastax.oss.driver.api.core.metrics.NodeMetric; -import com.datastax.oss.driver.api.core.servererrors.BootstrappingException; -import com.datastax.oss.driver.api.core.specex.SpeculativeExecutionPolicy; -import com.datastax.oss.driver.internal.core.cql.PoolBehavior; -import com.datastax.oss.driver.internal.core.metadata.DefaultNode; -import com.datastax.oss.driver.internal.core.metrics.NodeMetricUpdater; -import com.datastax.oss.driver.internal.core.util.concurrent.CapturingTimer.CapturedTimeout; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.response.Error; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.time.Duration; -import java.util.List; -import java.util.Map; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.TimeUnit; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -/** - * These tests are almost exact copies of {@link - * com.datastax.oss.driver.internal.core.cql.CqlRequestHandlerSpeculativeExecutionTest}. - */ -@RunWith(DataProviderRunner.class) -public class ContinuousGraphRequestHandlerSpeculativeExecutionTest { - - @Mock DefaultNode node1; - @Mock DefaultNode node2; - @Mock DefaultNode node3; - - @Mock NodeMetricUpdater nodeMetricUpdater1; - @Mock NodeMetricUpdater nodeMetricUpdater2; - @Mock NodeMetricUpdater nodeMetricUpdater3; - - @Mock GraphSupportChecker graphSupportChecker; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - when(node1.getMetricUpdater()).thenReturn(nodeMetricUpdater1); - when(node2.getMetricUpdater()).thenReturn(nodeMetricUpdater2); - when(node3.getMetricUpdater()).thenReturn(nodeMetricUpdater3); - when(nodeMetricUpdater1.isEnabled(any(NodeMetric.class), anyString())).thenReturn(true); - when(nodeMetricUpdater2.isEnabled(any(NodeMetric.class), anyString())).thenReturn(true); - when(nodeMetricUpdater3.isEnabled(any(NodeMetric.class), anyString())).thenReturn(true); - when(graphSupportChecker.inferGraphProtocol(any(), any(), any())) - .thenReturn(GraphProtocol.GRAPH_BINARY_1_0); - } - - @Test - @UseDataProvider(location = DseTestDataProviders.class, value = "nonIdempotentGraphConfig") - public void should_not_schedule_speculative_executions_if_not_idempotent( - boolean defaultIdempotence, GraphStatement statement) { - GraphRequestHandlerTestHarness.Builder harnessBuilder = - GraphRequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); - PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); - - try (GraphRequestHandlerTestHarness harness = harnessBuilder.build()) { - SpeculativeExecutionPolicy speculativeExecutionPolicy = - harness.getContext().getSpeculativeExecutionPolicy(DriverExecutionProfile.DEFAULT_NAME); - - GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); - new ContinuousGraphRequestHandler( - statement, - harness.getSession(), - harness.getContext(), - "test", - module, - graphSupportChecker) - .handle(); - - node1Behavior.verifyWrite(); - node1Behavior.setWriteSuccess(); - - // should not schedule any timeout - assertThat(harness.nextScheduledTimeout()).isNull(); - - verifyNoMoreInteractions(speculativeExecutionPolicy); - verifyNoMoreInteractions(nodeMetricUpdater1); - } - } - - @Test - @UseDataProvider(location = DseTestDataProviders.class, value = "idempotentGraphConfig") - public void should_schedule_speculative_executions( - boolean defaultIdempotence, GraphStatement statement) throws Exception { - GraphRequestHandlerTestHarness.Builder harnessBuilder = - GraphRequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); - PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); - PoolBehavior node2Behavior = harnessBuilder.customBehavior(node2); - PoolBehavior node3Behavior = harnessBuilder.customBehavior(node3); - - try (GraphRequestHandlerTestHarness harness = harnessBuilder.build()) { - SpeculativeExecutionPolicy speculativeExecutionPolicy = - harness.getContext().getSpeculativeExecutionPolicy(DriverExecutionProfile.DEFAULT_NAME); - long firstExecutionDelay = 100L; - long secondExecutionDelay = 200L; - when(speculativeExecutionPolicy.nextExecution( - any(Node.class), eq(null), eq(statement), eq(1))) - .thenReturn(firstExecutionDelay); - when(speculativeExecutionPolicy.nextExecution( - any(Node.class), eq(null), eq(statement), eq(2))) - .thenReturn(secondExecutionDelay); - when(speculativeExecutionPolicy.nextExecution( - any(Node.class), eq(null), eq(statement), eq(3))) - .thenReturn(-1L); - - GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); - new ContinuousGraphRequestHandler( - statement, - harness.getSession(), - harness.getContext(), - "test", - module, - graphSupportChecker) - .handle(); - - node1Behavior.verifyWrite(); - node1Behavior.setWriteSuccess(); - - CapturedTimeout speculativeExecution1 = harness.nextScheduledTimeout(); - assertThat(speculativeExecution1.getDelay(TimeUnit.MILLISECONDS)) - .isEqualTo(firstExecutionDelay); - verifyNoMoreInteractions(nodeMetricUpdater1); - speculativeExecution1.task().run(speculativeExecution1); - verify(nodeMetricUpdater1) - .incrementCounter( - DefaultNodeMetric.SPECULATIVE_EXECUTIONS, DriverExecutionProfile.DEFAULT_NAME); - node2Behavior.verifyWrite(); - node2Behavior.setWriteSuccess(); - - CapturedTimeout speculativeExecution2 = harness.nextScheduledTimeout(); - assertThat(speculativeExecution2.getDelay(TimeUnit.MILLISECONDS)) - .isEqualTo(secondExecutionDelay); - verifyNoMoreInteractions(nodeMetricUpdater2); - speculativeExecution2.task().run(speculativeExecution2); - verify(nodeMetricUpdater2) - .incrementCounter( - DefaultNodeMetric.SPECULATIVE_EXECUTIONS, DriverExecutionProfile.DEFAULT_NAME); - node3Behavior.verifyWrite(); - node3Behavior.setWriteSuccess(); - - // No more scheduled tasks since the policy returns 0 on the third call. - assertThat(harness.nextScheduledTimeout()).isNull(); - - // Note that we don't need to complete any response, the test is just about checking that - // executions are started. - } - } - - @Test - @UseDataProvider(location = DseTestDataProviders.class, value = "idempotentGraphConfig") - public void should_not_start_execution_if_result_complete( - boolean defaultIdempotence, GraphStatement statement) throws Exception { - GraphRequestHandlerTestHarness.Builder harnessBuilder = - GraphRequestHandlerTestHarness.builder() - .withGraphTimeout(Duration.ofSeconds(10)) - .withDefaultIdempotence(defaultIdempotence); - PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); - PoolBehavior node2Behavior = harnessBuilder.customBehavior(node2); - - try (GraphRequestHandlerTestHarness harness = harnessBuilder.build()) { - SpeculativeExecutionPolicy speculativeExecutionPolicy = - harness.getContext().getSpeculativeExecutionPolicy(DriverExecutionProfile.DEFAULT_NAME); - long firstExecutionDelay = 100L; - when(speculativeExecutionPolicy.nextExecution( - any(Node.class), eq(null), eq(statement), eq(1))) - .thenReturn(firstExecutionDelay); - - GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); - ContinuousGraphRequestHandler requestHandler = - new ContinuousGraphRequestHandler( - statement, - harness.getSession(), - harness.getContext(), - "test", - module, - graphSupportChecker); - CompletionStage resultSetFuture = requestHandler.handle(); - node1Behavior.verifyWrite(); - node1Behavior.setWriteSuccess(); - - // The first timeout scheduled should be the global timeout - CapturedTimeout globalTimeout = harness.nextScheduledTimeout(); - assertThat(globalTimeout.getDelay(TimeUnit.SECONDS)).isEqualTo(10); - - // Check that the first execution was scheduled but don't run it yet - CapturedTimeout speculativeExecution1 = harness.nextScheduledTimeout(); - assertThat(speculativeExecution1.getDelay(TimeUnit.MILLISECONDS)) - .isEqualTo(firstExecutionDelay); - - // Complete the request from the initial execution - node1Behavior.setResponseSuccess( - defaultDseFrameOf(singleGraphRow(GraphProtocol.GRAPH_BINARY_1_0, module))); - assertThatStage(resultSetFuture).isSuccess(); - - // Pending speculative executions should have been cancelled. However we don't check - // firstExecutionTask directly because the request handler's onResponse can sometimes be - // invoked before operationComplete (this is very unlikely in practice, but happens in our - // Travis CI build). When that happens, the speculative execution is not recorded yet when - // cancelScheduledTasks runs. - - // The fact that we missed the speculative execution is not a problem; even if it starts, it - // will eventually find out that the result is already complete and cancel itself: - speculativeExecution1.task().run(speculativeExecution1); - node2Behavior.verifyNoWrite(); - - verify(nodeMetricUpdater1) - .updateTimer( - eq(DseNodeMetric.GRAPH_MESSAGES), - eq(DriverExecutionProfile.DEFAULT_NAME), - anyLong(), - eq(TimeUnit.NANOSECONDS)); - verifyNoMoreInteractions(nodeMetricUpdater1); - } - } - - @Test - @UseDataProvider(location = DseTestDataProviders.class, value = "idempotentGraphConfig") - public void should_fail_if_no_nodes(boolean defaultIdempotence, GraphStatement statement) { - GraphRequestHandlerTestHarness.Builder harnessBuilder = - GraphRequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); - // No configured behaviors => will yield an empty query plan - - try (GraphRequestHandlerTestHarness harness = harnessBuilder.build()) { - SpeculativeExecutionPolicy speculativeExecutionPolicy = - harness.getContext().getSpeculativeExecutionPolicy(DriverExecutionProfile.DEFAULT_NAME); - long firstExecutionDelay = 100L; - when(speculativeExecutionPolicy.nextExecution( - any(Node.class), eq(null), eq(statement), eq(1))) - .thenReturn(firstExecutionDelay); - - GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); - CompletionStage resultSetFuture = - new ContinuousGraphRequestHandler( - statement, - harness.getSession(), - harness.getContext(), - "test", - module, - graphSupportChecker) - .handle(); - - assertThatStage(resultSetFuture) - .isFailed(error -> assertThat(error).isInstanceOf(NoNodeAvailableException.class)); - } - } - - @Test - @UseDataProvider(location = DseTestDataProviders.class, value = "idempotentGraphConfig") - public void should_fail_if_no_more_nodes_and_initial_execution_is_last( - boolean defaultIdempotence, GraphStatement statement) throws Exception { - GraphRequestHandlerTestHarness.Builder harnessBuilder = - GraphRequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); - PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); - harnessBuilder.withResponse( - node2, - defaultDseFrameOf(new Error(ProtocolConstants.ErrorCode.IS_BOOTSTRAPPING, "mock message"))); - - try (GraphRequestHandlerTestHarness harness = harnessBuilder.build()) { - SpeculativeExecutionPolicy speculativeExecutionPolicy = - harness.getContext().getSpeculativeExecutionPolicy(DriverExecutionProfile.DEFAULT_NAME); - long firstExecutionDelay = 100L; - when(speculativeExecutionPolicy.nextExecution( - any(Node.class), eq(null), eq(statement), eq(1))) - .thenReturn(firstExecutionDelay); - - GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); - CompletionStage resultSetFuture = - new ContinuousGraphRequestHandler( - statement, - harness.getSession(), - harness.getContext(), - "test", - module, - graphSupportChecker) - .handle(); - node1Behavior.verifyWrite(); - node1Behavior.setWriteSuccess(); - // do not simulate a response from node1 yet - - // Run the next scheduled task to start the speculative execution. node2 will reply with a - // BOOTSTRAPPING error, causing a RETRY_NEXT; but the query plan is now empty so the - // speculative execution stops. - // next scheduled timeout should be the first speculative execution. Get it and run it. - CapturedTimeout speculativeExecution1 = harness.nextScheduledTimeout(); - assertThat(speculativeExecution1.getDelay(TimeUnit.MILLISECONDS)) - .isEqualTo(firstExecutionDelay); - speculativeExecution1.task().run(speculativeExecution1); - - // node1 now replies with the same response, that triggers a RETRY_NEXT - node1Behavior.setResponseSuccess( - defaultDseFrameOf( - new Error(ProtocolConstants.ErrorCode.IS_BOOTSTRAPPING, "mock message"))); - - // But again the query plan is empty so that should fail the request - assertThatStage(resultSetFuture) - .isFailed( - error -> { - assertThat(error).isInstanceOf(AllNodesFailedException.class); - Map> nodeErrors = - ((AllNodesFailedException) error).getAllErrors(); - assertThat(nodeErrors).containsOnlyKeys(node1, node2); - assertThat(nodeErrors.get(node1).get(0)).isInstanceOf(BootstrappingException.class); - assertThat(nodeErrors.get(node2).get(0)).isInstanceOf(BootstrappingException.class); - }); - } - } - - @Test - @UseDataProvider(location = DseTestDataProviders.class, value = "idempotentGraphConfig") - public void should_fail_if_no_more_nodes_and_speculative_execution_is_last( - boolean defaultIdempotence, GraphStatement statement) throws Exception { - GraphRequestHandlerTestHarness.Builder harnessBuilder = - GraphRequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); - PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); - PoolBehavior node2Behavior = harnessBuilder.customBehavior(node2); - - try (GraphRequestHandlerTestHarness harness = harnessBuilder.build()) { - SpeculativeExecutionPolicy speculativeExecutionPolicy = - harness.getContext().getSpeculativeExecutionPolicy(DriverExecutionProfile.DEFAULT_NAME); - long firstExecutionDelay = 100L; - when(speculativeExecutionPolicy.nextExecution( - any(Node.class), eq(null), eq(statement), eq(1))) - .thenReturn(firstExecutionDelay); - - GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); - CompletionStage resultSetFuture = - new ContinuousGraphRequestHandler( - statement, - harness.getSession(), - harness.getContext(), - "test", - module, - graphSupportChecker) - .handle(); - node1Behavior.verifyWrite(); - node1Behavior.setWriteSuccess(); - // do not simulate a response from node1 yet - - // next scheduled timeout should be the first speculative execution. Get it and run it. - CapturedTimeout speculativeExecution1 = harness.nextScheduledTimeout(); - assertThat(speculativeExecution1.getDelay(TimeUnit.MILLISECONDS)) - .isEqualTo(firstExecutionDelay); - speculativeExecution1.task().run(speculativeExecution1); - - // node1 now replies with a BOOTSTRAPPING error that triggers a RETRY_NEXT - // but the query plan is empty so the initial execution stops - node1Behavior.setResponseSuccess( - defaultDseFrameOf( - new Error(ProtocolConstants.ErrorCode.IS_BOOTSTRAPPING, "mock message"))); - - // Same thing with node2, so the speculative execution should reach the end of the query plan - // and fail the request - node2Behavior.setResponseSuccess( - defaultDseFrameOf( - new Error(ProtocolConstants.ErrorCode.IS_BOOTSTRAPPING, "mock message"))); - - assertThatStage(resultSetFuture) - .isFailed( - error -> { - assertThat(error).isInstanceOf(AllNodesFailedException.class); - Map> nodeErrors = - ((AllNodesFailedException) error).getAllErrors(); - assertThat(nodeErrors).containsOnlyKeys(node1, node2); - assertThat(nodeErrors.get(node1).get(0)).isInstanceOf(BootstrappingException.class); - assertThat(nodeErrors.get(node2).get(0)).isInstanceOf(BootstrappingException.class); - }); - } - } - - @Test - @UseDataProvider(location = DseTestDataProviders.class, value = "idempotentGraphConfig") - public void should_retry_in_speculative_executions( - boolean defaultIdempotence, GraphStatement statement) throws Exception { - GraphRequestHandlerTestHarness.Builder harnessBuilder = - GraphRequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); - PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); - PoolBehavior node2Behavior = harnessBuilder.customBehavior(node2); - PoolBehavior node3Behavior = harnessBuilder.customBehavior(node3); - - try (GraphRequestHandlerTestHarness harness = harnessBuilder.build()) { - SpeculativeExecutionPolicy speculativeExecutionPolicy = - harness.getContext().getSpeculativeExecutionPolicy(DriverExecutionProfile.DEFAULT_NAME); - long firstExecutionDelay = 100L; - when(speculativeExecutionPolicy.nextExecution( - any(Node.class), eq(null), eq(statement), eq(1))) - .thenReturn(firstExecutionDelay); - - GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); - CompletionStage resultSetFuture = - new ContinuousGraphRequestHandler( - statement, - harness.getSession(), - harness.getContext(), - "test", - module, - graphSupportChecker) - .handle(); - node1Behavior.verifyWrite(); - node1Behavior.setWriteSuccess(); - // do not simulate a response from node1. The request will stay hanging for the rest of this - // test - - // next scheduled timeout should be the first speculative execution. Get it and run it. - CapturedTimeout speculativeExecution1 = harness.nextScheduledTimeout(); - assertThat(speculativeExecution1.getDelay(TimeUnit.MILLISECONDS)) - .isEqualTo(firstExecutionDelay); - speculativeExecution1.task().run(speculativeExecution1); - - node2Behavior.verifyWrite(); - node2Behavior.setWriteSuccess(); - - // node2 replies with a response that triggers a RETRY_NEXT - node2Behavior.setResponseSuccess( - defaultDseFrameOf( - new Error(ProtocolConstants.ErrorCode.IS_BOOTSTRAPPING, "mock message"))); - - node3Behavior.setResponseSuccess( - defaultDseFrameOf(singleGraphRow(GraphProtocol.GRAPH_BINARY_1_0, module))); - - // The second execution should move to node3 and complete the request - assertThatStage(resultSetFuture).isSuccess(); - - // The request to node1 was still in flight, it should have been cancelled - node1Behavior.verifyCancellation(); - } - } - - @Test - @UseDataProvider(location = DseTestDataProviders.class, value = "idempotentGraphConfig") - public void should_stop_retrying_other_executions_if_result_complete( - boolean defaultIdempotence, GraphStatement statement) throws Exception { - GraphRequestHandlerTestHarness.Builder harnessBuilder = - GraphRequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); - PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); - PoolBehavior node2Behavior = harnessBuilder.customBehavior(node2); - PoolBehavior node3Behavior = harnessBuilder.customBehavior(node3); - - try (GraphRequestHandlerTestHarness harness = harnessBuilder.build()) { - SpeculativeExecutionPolicy speculativeExecutionPolicy = - harness.getContext().getSpeculativeExecutionPolicy(DriverExecutionProfile.DEFAULT_NAME); - long firstExecutionDelay = 100L; - when(speculativeExecutionPolicy.nextExecution( - any(Node.class), eq(null), eq(statement), eq(1))) - .thenReturn(firstExecutionDelay); - - GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); - CompletionStage resultSetFuture = - new ContinuousGraphRequestHandler( - statement, - harness.getSession(), - harness.getContext(), - "test", - module, - graphSupportChecker) - .handle(); - node1Behavior.verifyWrite(); - node1Behavior.setWriteSuccess(); - - // next scheduled timeout should be the first speculative execution. Get it and run it. - CapturedTimeout speculativeExecution1 = harness.nextScheduledTimeout(); - assertThat(speculativeExecution1.getDelay(TimeUnit.MILLISECONDS)) - .isEqualTo(firstExecutionDelay); - speculativeExecution1.task().run(speculativeExecution1); - - node2Behavior.verifyWrite(); - node2Behavior.setWriteSuccess(); - - // Complete the request from the initial execution - node1Behavior.setResponseSuccess( - defaultDseFrameOf(singleGraphRow(GraphProtocol.GRAPH_BINARY_1_0, module))); - assertThatStage(resultSetFuture).isSuccess(); - - // node2 replies with a response that would trigger a RETRY_NEXT if the request was still - // running - node2Behavior.setResponseSuccess( - defaultDseFrameOf( - new Error(ProtocolConstants.ErrorCode.IS_BOOTSTRAPPING, "mock message"))); - - // The speculative execution should not move to node3 because it is stopped - node3Behavior.verifyNoWrite(); - } - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/ContinuousGraphRequestHandlerTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/ContinuousGraphRequestHandlerTest.java deleted file mode 100644 index b374539f12e..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/ContinuousGraphRequestHandlerTest.java +++ /dev/null @@ -1,260 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import static com.datastax.dse.driver.internal.core.graph.GraphTestUtils.createGraphBinaryModule; -import static com.datastax.dse.driver.internal.core.graph.GraphTestUtils.defaultDseFrameOf; -import static com.datastax.dse.driver.internal.core.graph.GraphTestUtils.tenGraphRows; -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyNoMoreInteractions; -import static org.mockito.Mockito.when; - -import com.datastax.dse.driver.DseTestDataProviders; -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet; -import com.datastax.dse.driver.api.core.graph.GraphNode; -import com.datastax.dse.driver.api.core.graph.GraphStatement; -import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; -import com.datastax.dse.driver.api.core.metrics.DseNodeMetric; -import com.datastax.dse.driver.api.core.metrics.DseSessionMetric; -import com.datastax.dse.driver.internal.core.graph.binary.GraphBinaryModule; -import com.datastax.oss.driver.api.core.DriverTimeoutException; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import com.datastax.oss.driver.internal.core.cql.PoolBehavior; -import com.datastax.oss.driver.internal.core.cql.RequestHandlerTestHarness; -import com.datastax.oss.driver.internal.core.metadata.DefaultNode; -import com.datastax.oss.driver.internal.core.metrics.NodeMetricUpdater; -import com.datastax.oss.driver.internal.core.util.concurrent.CapturingTimer.CapturedTimeout; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.io.IOException; -import java.time.Duration; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.TimeUnit; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -@RunWith(DataProviderRunner.class) -public class ContinuousGraphRequestHandlerTest { - - @Mock DefaultDriverContext mockContext; - @Mock DefaultNode node; - @Mock NodeMetricUpdater nodeMetricUpdater1; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - when(node.getMetricUpdater()).thenReturn(nodeMetricUpdater1); - } - - @Test - @UseDataProvider(location = DseTestDataProviders.class, value = "supportedGraphProtocols") - public void should_return_paged_results(GraphProtocol graphProtocol) throws IOException { - String profileName = "test-graph"; - when(nodeMetricUpdater1.isEnabled(DseNodeMetric.GRAPH_MESSAGES, profileName)).thenReturn(true); - - GraphBinaryModule module = createGraphBinaryModule(mockContext); - - GraphRequestHandlerTestHarness.Builder builder = - GraphRequestHandlerTestHarness.builder().withGraphProtocolForTestConfig(graphProtocol); - PoolBehavior node1Behavior = builder.customBehavior(node); - - try (RequestHandlerTestHarness harness = builder.build()) { - - GraphStatement graphStatement = - ScriptGraphStatement.newInstance("mockQuery").setExecutionProfileName(profileName); - - ContinuousGraphRequestHandler handler = - new ContinuousGraphRequestHandler( - graphStatement, - harness.getSession(), - harness.getContext(), - "test", - module, - new GraphSupportChecker()); - - // send the initial request - CompletionStage page1Future = handler.handle(); - - node1Behavior.setResponseSuccess( - defaultDseFrameOf(tenGraphRows(graphProtocol, module, 1, false))); - - assertThatStage(page1Future) - .isSuccess( - page1 -> { - assertThat(page1.hasMorePages()).isTrue(); - assertThat(page1.currentPage()).hasSize(10).allMatch(GraphNode::isVertex); - ExecutionInfo executionInfo = page1.getRequestExecutionInfo(); - assertThat(executionInfo.getCoordinator()).isEqualTo(node); - assertThat(executionInfo.getErrors()).isEmpty(); - assertThat(executionInfo.getIncomingPayload()).isEmpty(); - assertThat(executionInfo.getSpeculativeExecutionCount()).isEqualTo(0); - assertThat(executionInfo.getSuccessfulExecutionIndex()).isEqualTo(0); - assertThat(executionInfo.getWarnings()).isEmpty(); - }); - - AsyncGraphResultSet page1 = CompletableFutures.getCompleted(page1Future); - CompletionStage page2Future = page1.fetchNextPage(); - - node1Behavior.setResponseSuccess( - defaultDseFrameOf(tenGraphRows(graphProtocol, module, 2, true))); - - assertThatStage(page2Future) - .isSuccess( - page2 -> { - assertThat(page2.hasMorePages()).isFalse(); - assertThat(page2.currentPage()).hasSize(10).allMatch(GraphNode::isVertex); - ExecutionInfo executionInfo = page2.getRequestExecutionInfo(); - assertThat(executionInfo.getCoordinator()).isEqualTo(node); - assertThat(executionInfo.getErrors()).isEmpty(); - assertThat(executionInfo.getIncomingPayload()).isEmpty(); - assertThat(executionInfo.getSpeculativeExecutionCount()).isEqualTo(0); - assertThat(executionInfo.getSuccessfulExecutionIndex()).isEqualTo(0); - assertThat(executionInfo.getWarnings()).isEmpty(); - }); - - validateMetrics(profileName, harness); - } - } - - @Test - public void should_honor_default_timeout() throws Exception { - // given - GraphBinaryModule binaryModule = createGraphBinaryModule(mockContext); - Duration defaultTimeout = Duration.ofSeconds(1); - - RequestHandlerTestHarness.Builder builder = - GraphRequestHandlerTestHarness.builder().withGraphTimeout(defaultTimeout); - PoolBehavior node1Behavior = builder.customBehavior(node); - - try (RequestHandlerTestHarness harness = builder.build()) { - - DriverExecutionProfile profile = harness.getContext().getConfig().getDefaultProfile(); - when(profile.isDefined(DseDriverOption.GRAPH_SUB_PROTOCOL)).thenReturn(true); - when(profile.getString(DseDriverOption.GRAPH_SUB_PROTOCOL)) - .thenReturn(GraphProtocol.GRAPH_BINARY_1_0.toInternalCode()); - - GraphStatement graphStatement = ScriptGraphStatement.newInstance("mockQuery"); - - // when - ContinuousGraphRequestHandler handler = - new ContinuousGraphRequestHandler( - graphStatement, - harness.getSession(), - harness.getContext(), - "test", - binaryModule, - new GraphSupportChecker()); - - // send the initial request - CompletionStage page1Future = handler.handle(); - - // acknowledge the write, will set the global timeout - node1Behavior.verifyWrite(); - node1Behavior.setWriteSuccess(); - - CapturedTimeout globalTimeout = harness.nextScheduledTimeout(); - assertThat(globalTimeout.getDelay(TimeUnit.NANOSECONDS)).isEqualTo(defaultTimeout.toNanos()); - - // will trigger the global timeout and complete it exceptionally - globalTimeout.task().run(globalTimeout); - assertThat(page1Future.toCompletableFuture()).isCompletedExceptionally(); - - assertThatThrownBy(() -> page1Future.toCompletableFuture().get()) - .hasRootCauseExactlyInstanceOf(DriverTimeoutException.class) - .hasMessageContaining("Query timed out after " + defaultTimeout); - } - } - - @Test - public void should_honor_statement_timeout() throws Exception { - // given - GraphBinaryModule binaryModule = createGraphBinaryModule(mockContext); - Duration defaultTimeout = Duration.ofSeconds(1); - Duration statementTimeout = Duration.ofSeconds(2); - - RequestHandlerTestHarness.Builder builder = - GraphRequestHandlerTestHarness.builder().withGraphTimeout(defaultTimeout); - PoolBehavior node1Behavior = builder.customBehavior(node); - - try (RequestHandlerTestHarness harness = builder.build()) { - - DriverExecutionProfile profile = harness.getContext().getConfig().getDefaultProfile(); - when(profile.isDefined(DseDriverOption.GRAPH_SUB_PROTOCOL)).thenReturn(true); - when(profile.getString(DseDriverOption.GRAPH_SUB_PROTOCOL)) - .thenReturn(GraphProtocol.GRAPH_BINARY_1_0.toInternalCode()); - - GraphStatement graphStatement = - ScriptGraphStatement.newInstance("mockQuery").setTimeout(statementTimeout); - - // when - ContinuousGraphRequestHandler handler = - new ContinuousGraphRequestHandler( - graphStatement, - harness.getSession(), - harness.getContext(), - "test", - binaryModule, - new GraphSupportChecker()); - - // send the initial request - CompletionStage page1Future = handler.handle(); - - // acknowledge the write, will set the global timeout - node1Behavior.verifyWrite(); - node1Behavior.setWriteSuccess(); - - CapturedTimeout globalTimeout = harness.nextScheduledTimeout(); - assertThat(globalTimeout.getDelay(TimeUnit.NANOSECONDS)) - .isEqualTo(statementTimeout.toNanos()); - - // will trigger the global timeout and complete it exceptionally - globalTimeout.task().run(globalTimeout); - assertThat(page1Future.toCompletableFuture()).isCompletedExceptionally(); - - assertThatThrownBy(() -> page1Future.toCompletableFuture().get()) - .hasRootCauseExactlyInstanceOf(DriverTimeoutException.class) - .hasMessageContaining("Query timed out after " + statementTimeout); - } - } - - private void validateMetrics(String profileName, RequestHandlerTestHarness harness) { - // GRAPH_MESSAGES metrics update is invoked only for the first page - verify(nodeMetricUpdater1, times(1)) - .updateTimer( - eq(DseNodeMetric.GRAPH_MESSAGES), eq(profileName), anyLong(), eq(TimeUnit.NANOSECONDS)); - verifyNoMoreInteractions(nodeMetricUpdater1); - - verify(harness.getSession().getMetricUpdater()) - .updateTimer( - eq(DseSessionMetric.GRAPH_REQUESTS), eq(null), anyLong(), eq(TimeUnit.NANOSECONDS)); - verifyNoMoreInteractions(harness.getSession().getMetricUpdater()); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphExecutionInfoConverterTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphExecutionInfoConverterTest.java deleted file mode 100644 index 1814b12aa4e..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphExecutionInfoConverterTest.java +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.dse.driver.api.core.graph.GraphStatement; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.servererrors.ServerError; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.nio.ByteBuffer; -import java.util.AbstractMap.SimpleEntry; -import java.util.Collections; -import java.util.List; -import java.util.Map.Entry; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.Strict.class) -@SuppressWarnings("deprecation") -public class GraphExecutionInfoConverterTest { - - @Mock GraphStatement request; - @Mock Node node; - - private List> errors; - private List warnings; - private ImmutableMap payload; - - @Before - public void setUp() { - errors = - Collections.singletonList( - new SimpleEntry<>(node, new ServerError(node, "this is a server error"))); - warnings = Collections.singletonList("this is a warning"); - payload = ImmutableMap.of("key", Bytes.fromHexString("0xcafebabe")); - } - - @Test - public void should_convert_to_graph_execution_info() { - - // given - ExecutionInfo executionInfo = mock(ExecutionInfo.class); - when(executionInfo.getRequest()).thenReturn(request); - when(executionInfo.getCoordinator()).thenReturn(node); - when(executionInfo.getSpeculativeExecutionCount()).thenReturn(42); - when(executionInfo.getSuccessfulExecutionIndex()).thenReturn(10); - when(executionInfo.getErrors()).thenReturn(errors); - when(executionInfo.getWarnings()).thenReturn(warnings); - when(executionInfo.getIncomingPayload()).thenReturn(payload); - - // when - com.datastax.dse.driver.api.core.graph.GraphExecutionInfo graphExecutionInfo = - GraphExecutionInfoConverter.convert(executionInfo); - - // then - assertThat(graphExecutionInfo.getStatement()).isSameAs(request); - assertThat(graphExecutionInfo.getCoordinator()).isSameAs(node); - assertThat(graphExecutionInfo.getSpeculativeExecutionCount()).isEqualTo(42); - assertThat(graphExecutionInfo.getSuccessfulExecutionIndex()).isEqualTo(10); - assertThat(graphExecutionInfo.getErrors()).isEqualTo(errors); - assertThat(graphExecutionInfo.getWarnings()).isEqualTo(warnings); - assertThat(graphExecutionInfo.getIncomingPayload()).isEqualTo(payload); - } - - @Test - public void should_convert_from_graph_execution_info() { - - // given - com.datastax.dse.driver.api.core.graph.GraphExecutionInfo graphExecutionInfo = - mock(com.datastax.dse.driver.api.core.graph.GraphExecutionInfo.class); - when(graphExecutionInfo.getStatement()).thenAnswer(args -> request); - when(graphExecutionInfo.getCoordinator()).thenReturn(node); - when(graphExecutionInfo.getSpeculativeExecutionCount()).thenReturn(42); - when(graphExecutionInfo.getSuccessfulExecutionIndex()).thenReturn(10); - when(graphExecutionInfo.getErrors()).thenReturn(errors); - when(graphExecutionInfo.getWarnings()).thenReturn(warnings); - when(graphExecutionInfo.getIncomingPayload()).thenReturn(payload); - - // when - ExecutionInfo executionInfo = GraphExecutionInfoConverter.convert(graphExecutionInfo); - - // then - assertThat(executionInfo.getRequest()).isSameAs(request); - assertThatThrownBy(executionInfo::getStatement).isInstanceOf(ClassCastException.class); - assertThat(executionInfo.getCoordinator()).isSameAs(node); - assertThat(executionInfo.getSpeculativeExecutionCount()).isEqualTo(42); - assertThat(executionInfo.getSuccessfulExecutionIndex()).isEqualTo(10); - assertThat(executionInfo.getErrors()).isEqualTo(errors); - assertThat(executionInfo.getWarnings()).isEqualTo(warnings); - assertThat(executionInfo.getIncomingPayload()).isEqualTo(payload); - assertThat(executionInfo.getPagingState()).isNull(); - assertThat(executionInfo.isSchemaInAgreement()).isTrue(); - assertThat(executionInfo.getQueryTraceAsync()).isCompletedExceptionally(); - assertThatThrownBy(executionInfo::getQueryTrace) - .isInstanceOf(IllegalStateException.class) - .hasMessage("Tracing was disabled for this request"); - assertThat(executionInfo.getResponseSizeInBytes()).isEqualTo(-1L); - assertThat(executionInfo.getCompressedResponseSizeInBytes()).isEqualTo(-1L); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphNodeTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphNodeTest.java deleted file mode 100644 index d7ded441e70..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphNodeTest.java +++ /dev/null @@ -1,299 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import static com.datastax.dse.driver.internal.core.graph.GraphProtocol.GRAPHSON_1_0; -import static com.datastax.dse.driver.internal.core.graph.GraphProtocol.GRAPHSON_2_0; -import static com.datastax.dse.driver.internal.core.graph.GraphProtocol.GRAPH_BINARY_1_0; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.dse.driver.api.core.DseProtocolVersion; -import com.datastax.dse.driver.api.core.graph.GraphNode; -import com.datastax.dse.driver.internal.core.graph.binary.GraphBinaryModule; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import org.apache.tinkerpop.gremlin.process.remote.traversal.DefaultRemoteTraverser; -import org.apache.tinkerpop.gremlin.process.traversal.Traverser; -import org.apache.tinkerpop.gremlin.process.traversal.step.util.EmptyPath; -import org.apache.tinkerpop.gremlin.structure.io.Buffer; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; -import org.apache.tinkerpop.gremlin.structure.io.binary.TypeSerializerRegistry; -import org.apache.tinkerpop.gremlin.structure.util.detached.DetachedEdge; -import org.apache.tinkerpop.gremlin.structure.util.detached.DetachedProperty; -import org.apache.tinkerpop.gremlin.structure.util.detached.DetachedVertex; -import org.apache.tinkerpop.gremlin.structure.util.detached.DetachedVertexProperty; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class GraphNodeTest { - - private GraphBinaryModule graphBinaryModule; - - @Before - public void setup() { - DefaultDriverContext dseDriverContext = mock(DefaultDriverContext.class); - when(dseDriverContext.getCodecRegistry()).thenReturn(CodecRegistry.DEFAULT); - when(dseDriverContext.getProtocolVersion()).thenReturn(DseProtocolVersion.DSE_V2); - - TypeSerializerRegistry registry = - GraphBinaryModule.createDseTypeSerializerRegistry(dseDriverContext); - graphBinaryModule = - new GraphBinaryModule(new GraphBinaryReader(registry), new GraphBinaryWriter(registry)); - } - - @Test - public void should_not_support_set_for_graphson_2_0() throws IOException { - // when - GraphNode graphNode = serdeAndCreateGraphNode(ImmutableSet.of("value"), GRAPHSON_2_0); - - // then - assertThat(graphNode.isSet()).isFalse(); - } - - @Test - public void should_throw_for_set_for_graphson_1_0() throws IOException { - // when - GraphNode graphNode = serdeAndCreateGraphNode(ImmutableSet.of("value"), GRAPHSON_1_0); - - // then - assertThat(graphNode.isSet()).isFalse(); - assertThatThrownBy(graphNode::asSet).isExactlyInstanceOf(UnsupportedOperationException.class); - } - - @Test - @UseDataProvider(value = "allGraphProtocols") - public void should_create_graph_node_for_list(GraphProtocol graphVersion) throws IOException { - // when - GraphNode graphNode = serdeAndCreateGraphNode(ImmutableList.of("value"), graphVersion); - - // then - assertThat(graphNode.isList()).isTrue(); - List result = graphNode.asList(); - assertThat(result).isEqualTo(ImmutableList.of("value")); - } - - @Test - @UseDataProvider("allGraphProtocols") - public void should_create_graph_node_for_map(GraphProtocol graphProtocol) throws IOException { - // when - GraphNode graphNode = serdeAndCreateGraphNode(ImmutableMap.of("value", 1234), graphProtocol); - - // then - assertThat(graphNode.isMap()).isTrue(); - Map result = graphNode.asMap(); - assertThat(result).isEqualTo(ImmutableMap.of("value", 1234)); - } - - @Test - @UseDataProvider("graphson1_0and2_0") - public void should_create_graph_node_for_map_for_non_string_key(GraphProtocol graphProtocol) - throws IOException { - // when - GraphNode graphNode = serdeAndCreateGraphNode(ImmutableMap.of(12, 1234), graphProtocol); - - // then - assertThat(graphNode.isMap()).isTrue(); - Map result = graphNode.asMap(); - assertThat(result).isEqualTo(ImmutableMap.of("12", 1234)); - } - - @Test - @UseDataProvider(value = "allGraphProtocols") - public void should_calculate_size_of_collection_types(GraphProtocol graphProtocol) - throws IOException { - // when - GraphNode mapNode = serdeAndCreateGraphNode(ImmutableMap.of(12, 1234), graphProtocol); - GraphNode setNode = serdeAndCreateGraphNode(ImmutableSet.of(12, 1234), graphProtocol); - GraphNode listNode = serdeAndCreateGraphNode(ImmutableList.of(12, 1234, 99999), graphProtocol); - - // then - assertThat(mapNode.size()).isEqualTo(1); - assertThat(setNode.size()).isEqualTo(2); - assertThat(listNode.size()).isEqualTo(3); - } - - @Test - @UseDataProvider(value = "allGraphProtocols") - public void should_return_is_value_only_for_scalar_value(GraphProtocol graphProtocol) - throws IOException { - // when - GraphNode mapNode = serdeAndCreateGraphNode(ImmutableMap.of(12, 1234), graphProtocol); - GraphNode setNode = serdeAndCreateGraphNode(ImmutableMap.of(12, 1234), graphProtocol); - GraphNode listNode = serdeAndCreateGraphNode(ImmutableMap.of(12, 1234), graphProtocol); - GraphNode vertexNode = - serdeAndCreateGraphNode(new DetachedVertex("a", "l", null), graphProtocol); - GraphNode edgeNode = - serdeAndCreateGraphNode( - new DetachedEdge("a", "l", Collections.emptyMap(), "v1", "l1", "v2", "l2"), - graphProtocol); - GraphNode pathNode = serdeAndCreateGraphNode(EmptyPath.instance(), graphProtocol); - GraphNode propertyNode = serdeAndCreateGraphNode(new DetachedProperty<>("a", 1), graphProtocol); - GraphNode vertexPropertyNode = - serdeAndCreateGraphNode( - new DetachedVertexProperty<>("id", "l", "v", null, new DetachedVertex("a", "l", null)), - graphProtocol); - GraphNode scalarValueNode = serdeAndCreateGraphNode(true, graphProtocol); - - // then - assertThat(mapNode.isValue()).isFalse(); - assertThat(setNode.isValue()).isFalse(); - assertThat(listNode.isValue()).isFalse(); - assertThat(vertexNode.isValue()).isFalse(); - assertThat(edgeNode.isValue()).isFalse(); - assertThat(pathNode.isValue()).isFalse(); - assertThat(propertyNode.isValue()).isFalse(); - assertThat(vertexPropertyNode.isValue()).isFalse(); - assertThat(scalarValueNode.isValue()).isTrue(); - } - - @Test - @UseDataProvider("objectGraphNodeProtocols") - public void should_check_if_node_is_property_not_map(GraphProtocol graphProtocol) - throws IOException { - // when - GraphNode propertyNode = serdeAndCreateGraphNode(new DetachedProperty<>("a", 1), graphProtocol); - - // then - assertThat(propertyNode.isProperty()).isTrue(); - assertThat(propertyNode.isMap()).isFalse(); - assertThat(propertyNode.asProperty()).isNotNull(); - } - - @Test - public void should_check_if_node_is_property_or_map_for_1_0() throws IOException { - // when - GraphNode propertyNode = serdeAndCreateGraphNode(new DetachedProperty<>("a", 1), GRAPHSON_1_0); - - // then - assertThat(propertyNode.isProperty()).isTrue(); - assertThat(propertyNode.isMap()).isTrue(); - assertThat(propertyNode.asProperty()).isNotNull(); - } - - @Test - @UseDataProvider("allGraphProtocols") - public void should_check_if_node_is_vertex_property(GraphProtocol graphProtocol) - throws IOException { - // when - GraphNode vertexPropertyNode = - serdeAndCreateGraphNode( - new DetachedVertexProperty<>("id", "l", "v", null, new DetachedVertex("a", "l", null)), - graphProtocol); - - // then - assertThat(vertexPropertyNode.isVertexProperty()).isTrue(); - assertThat(vertexPropertyNode.isVertexProperty()).isNotNull(); - } - - @Test - public void should_check_if_node_is_path_for_graphson_1_0() throws IOException { - // when - GraphNode pathNode = serdeAndCreateGraphNode(EmptyPath.instance(), GRAPHSON_1_0); - - // then - assertThat(pathNode.isPath()).isFalse(); - assertThatThrownBy(pathNode::asPath).isExactlyInstanceOf(UnsupportedOperationException.class); - } - - @Test - @UseDataProvider("objectGraphNodeProtocols") - public void should_check_if_node_is_path(GraphProtocol graphProtocol) throws IOException { - // when - GraphNode pathNode = serdeAndCreateGraphNode(EmptyPath.instance(), graphProtocol); - - // then - assertThat(pathNode.isPath()).isTrue(); - assertThat(pathNode.asPath()).isNotNull(); - } - - @Test - @UseDataProvider("allGraphProtocols") - public void should_check_if_node_is_vertex(GraphProtocol graphProtocol) throws IOException { - // when - GraphNode vertexNode = - serdeAndCreateGraphNode(new DetachedVertex("a", "l", null), graphProtocol); - - // then - assertThat(vertexNode.isVertex()).isTrue(); - assertThat(vertexNode.asVertex()).isNotNull(); - } - - @Test - @UseDataProvider("allGraphProtocols") - public void should_check_if_node_is_edge(GraphProtocol graphProtocol) throws IOException { - // when - GraphNode edgeNode = - serdeAndCreateGraphNode( - new DetachedEdge("a", "l", Collections.emptyMap(), "v1", "l1", "v2", "l2"), - graphProtocol); - - // then - assertThat(edgeNode.isEdge()).isTrue(); - assertThat(edgeNode.asEdge()).isNotNull(); - } - - private GraphNode serdeAndCreateGraphNode(Object inputValue, GraphProtocol graphProtocol) - throws IOException { - if (graphProtocol.isGraphBinary()) { - Buffer tinkerBuf = graphBinaryModule.serialize(new DefaultRemoteTraverser<>(inputValue, 0L)); - ByteBuffer nioBuffer = tinkerBuf.nioBuffer(); - tinkerBuf.release(); - return new ObjectGraphNode( - GraphConversions.createGraphBinaryGraphNode( - ImmutableList.of(nioBuffer), graphBinaryModule) - .as(Traverser.class) - .get()); - } else { - return GraphSONUtils.createGraphNode( - ImmutableList.of(GraphSONUtils.serializeToByteBuffer(inputValue, graphProtocol)), - graphProtocol); - } - } - - @DataProvider - public static Object[][] allGraphProtocols() { - return new Object[][] {{GRAPHSON_1_0}, {GRAPHSON_2_0}, {GRAPH_BINARY_1_0}}; - } - - @DataProvider - public static Object[][] graphson1_0and2_0() { - return new Object[][] {{GRAPHSON_1_0}, {GRAPHSON_2_0}}; - } - - @DataProvider - public static Object[][] objectGraphNodeProtocols() { - return new Object[][] {{GRAPHSON_2_0}, {GRAPH_BINARY_1_0}}; - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphRequestHandlerTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphRequestHandlerTest.java deleted file mode 100644 index 9f325003610..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphRequestHandlerTest.java +++ /dev/null @@ -1,589 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import static com.datastax.dse.driver.internal.core.graph.GraphProtocol.GRAPHSON_1_0; -import static com.datastax.dse.driver.internal.core.graph.GraphProtocol.GRAPHSON_2_0; -import static com.datastax.dse.driver.internal.core.graph.GraphProtocol.GRAPH_BINARY_1_0; -import static com.datastax.dse.driver.internal.core.graph.GraphTestUtils.createGraphBinaryModule; -import static com.datastax.dse.driver.internal.core.graph.GraphTestUtils.defaultDseFrameOf; -import static com.datastax.dse.driver.internal.core.graph.GraphTestUtils.serialize; -import static com.datastax.dse.driver.internal.core.graph.GraphTestUtils.singleGraphRow; -import static com.datastax.oss.driver.api.core.type.codec.TypeCodecs.BIGINT; -import static com.datastax.oss.driver.api.core.type.codec.TypeCodecs.TEXT; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.ArgumentMatchers.matches; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyNoMoreInteractions; -import static org.mockito.Mockito.when; - -import com.datastax.dse.driver.DseTestDataProviders; -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.dse.driver.api.core.data.geometry.Point; -import com.datastax.dse.driver.api.core.graph.BatchGraphStatement; -import com.datastax.dse.driver.api.core.graph.DseGraph; -import com.datastax.dse.driver.api.core.graph.FluentGraphStatement; -import com.datastax.dse.driver.api.core.graph.GraphNode; -import com.datastax.dse.driver.api.core.graph.GraphResultSet; -import com.datastax.dse.driver.api.core.graph.GraphStatement; -import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; -import com.datastax.dse.driver.api.core.metrics.DseNodeMetric; -import com.datastax.dse.driver.api.core.metrics.DseSessionMetric; -import com.datastax.dse.driver.internal.core.graph.binary.GraphBinaryModule; -import com.datastax.dse.protocol.internal.request.RawBytesQuery; -import com.datastax.dse.protocol.internal.request.query.DseQueryOptions; -import com.datastax.oss.driver.api.core.DefaultConsistencyLevel; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.tracker.RequestTracker; -import com.datastax.oss.driver.api.core.uuid.Uuids; -import com.datastax.oss.driver.internal.core.cql.Conversions; -import com.datastax.oss.driver.internal.core.cql.PoolBehavior; -import com.datastax.oss.driver.internal.core.metadata.DefaultNode; -import com.datastax.oss.driver.internal.core.metrics.NodeMetricUpdater; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.request.Query; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.io.IOException; -import java.math.BigInteger; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.time.LocalDateTime; -import java.time.ZoneOffset; -import java.util.List; -import java.util.Map; -import java.util.concurrent.TimeUnit; -import java.util.regex.Pattern; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; -import org.apache.tinkerpop.gremlin.structure.Vertex; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.Mockito; -import org.mockito.MockitoAnnotations; - -@RunWith(DataProviderRunner.class) -public class GraphRequestHandlerTest { - - private static final Pattern LOG_PREFIX_PER_REQUEST = Pattern.compile("test-graph\\|\\d+"); - - @Mock DefaultNode node; - - @Mock protected NodeMetricUpdater nodeMetricUpdater1; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - when(node.getMetricUpdater()).thenReturn(nodeMetricUpdater1); - } - - @Test - @UseDataProvider(location = DseTestDataProviders.class, value = "supportedGraphProtocols") - public void should_create_query_message_from_script_statement(GraphProtocol graphProtocol) - throws IOException { - // initialization - GraphRequestHandlerTestHarness harness = GraphRequestHandlerTestHarness.builder().build(); - ScriptGraphStatement graphStatement = - ScriptGraphStatement.newInstance("mockQuery") - .setQueryParam("p1", 1L) - .setQueryParam("p2", Uuids.random()); - - GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); - - // when - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(graphStatement, harness.getContext()); - - Message m = - GraphConversions.createMessageFromGraphStatement( - graphStatement, graphProtocol, executionProfile, harness.getContext(), module); - - // checks - assertThat(m).isInstanceOf(Query.class); - Query q = ((Query) m); - assertThat(q.query).isEqualTo("mockQuery"); - assertThat(q.options.positionalValues) - .containsExactly(serialize(graphStatement.getQueryParams(), graphProtocol, module)); - assertThat(q.options.namedValues).isEmpty(); - } - - @Test - @UseDataProvider(location = DseTestDataProviders.class, value = "supportedGraphProtocols") - public void should_create_query_message_from_fluent_statement(GraphProtocol graphProtocol) - throws IOException { - // initialization - GraphRequestHandlerTestHarness harness = GraphRequestHandlerTestHarness.builder().build(); - GraphTraversal traversalTest = - DseGraph.g.V().has("person", "name", "marko").has("p1", 1L).has("p2", Uuids.random()); - GraphStatement graphStatement = FluentGraphStatement.newInstance(traversalTest); - - GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); - - // when - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(graphStatement, harness.getContext()); - - Message m = - GraphConversions.createMessageFromGraphStatement( - graphStatement, graphProtocol, executionProfile, harness.getContext(), module); - - Map createdCustomPayload = - GraphConversions.createCustomPayload( - graphStatement, graphProtocol, executionProfile, harness.getContext(), module); - - // checks - assertThat(m).isInstanceOf(RawBytesQuery.class); - testQueryRequestAndPayloadContents( - ((RawBytesQuery) m), - createdCustomPayload, - GraphConversions.bytecodeToSerialize(graphStatement), - graphProtocol, - module); - } - - @Test - @UseDataProvider(location = DseTestDataProviders.class, value = "supportedGraphProtocols") - public void should_create_query_message_from_batch_statement(GraphProtocol graphProtocol) - throws IOException { - // initialization - GraphRequestHandlerTestHarness harness = GraphRequestHandlerTestHarness.builder().build(); - @SuppressWarnings("rawtypes") - List traversalsTest = - ImmutableList.of( - // randomly testing some complex data types. Complete suite of data types test is in - // GraphDataTypesTest - DseGraph.g - .addV("person") - .property("p1", 2.3f) - .property("p2", LocalDateTime.now(ZoneOffset.UTC)), - DseGraph.g - .addV("software") - .property("p3", new BigInteger("123456789123456789123456789123456789")) - .property("p4", ImmutableList.of(Point.fromCoordinates(30.4, 25.63746284)))); - GraphStatement graphStatement = - BatchGraphStatement.builder().addTraversals(traversalsTest).build(); - - GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); - - // when - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(graphStatement, harness.getContext()); - - Message m = - GraphConversions.createMessageFromGraphStatement( - graphStatement, graphProtocol, executionProfile, harness.getContext(), module); - - Map createdCustomPayload = - GraphConversions.createCustomPayload( - graphStatement, graphProtocol, executionProfile, harness.getContext(), module); - - // checks - assertThat(m).isInstanceOf(RawBytesQuery.class); - testQueryRequestAndPayloadContents( - ((RawBytesQuery) m), - createdCustomPayload, - GraphConversions.bytecodeToSerialize(graphStatement), - graphProtocol, - module); - } - - private void testQueryRequestAndPayloadContents( - RawBytesQuery q, - Map customPayload, - Object traversalTest, - GraphProtocol graphProtocol, - GraphBinaryModule module) - throws IOException { - if (graphProtocol.isGraphBinary()) { - assertThat(q.query).isEqualTo(GraphConversions.EMPTY_STRING_QUERY); - assertThat(customPayload).containsKey(GraphConversions.GRAPH_BINARY_QUERY_OPTION_KEY); - ByteBuffer encodedQuery = customPayload.get(GraphConversions.GRAPH_BINARY_QUERY_OPTION_KEY); - assertThat(encodedQuery).isNotNull(); - assertThat(encodedQuery).isEqualTo(serialize(traversalTest, graphProtocol, module)); - } else { - assertThat(q.query).isEqualTo(serialize(traversalTest, graphProtocol, module).array()); - assertThat(customPayload).doesNotContainKey(GraphConversions.GRAPH_BINARY_QUERY_OPTION_KEY); - } - } - - @Test - @UseDataProvider(location = DseTestDataProviders.class, value = "supportedGraphProtocols") - public void should_set_correct_query_options_from_graph_statement(GraphProtocol subProtocol) - throws IOException { - // initialization - GraphRequestHandlerTestHarness harness = GraphRequestHandlerTestHarness.builder().build(); - GraphStatement graphStatement = - ScriptGraphStatement.newInstance("mockQuery").setQueryParam("name", "value"); - - GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); - - // when - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(graphStatement, harness.getContext()); - Message m = - GraphConversions.createMessageFromGraphStatement( - graphStatement, subProtocol, executionProfile, harness.getContext(), module); - - // checks - Query query = ((Query) m); - DseQueryOptions options = ((DseQueryOptions) query.options); - assertThat(options.consistency) - .isEqualTo( - DefaultConsistencyLevel.valueOf( - executionProfile.getString(DefaultDriverOption.REQUEST_CONSISTENCY)) - .getProtocolCode()); - // set by the mock timestamp generator - assertThat(options.defaultTimestamp).isEqualTo(-9223372036854775808L); - assertThat(options.positionalValues) - .isEqualTo( - ImmutableList.of(serialize(ImmutableMap.of("name", "value"), subProtocol, module))); - - m = - GraphConversions.createMessageFromGraphStatement( - graphStatement.setTimestamp(2L), - subProtocol, - executionProfile, - harness.getContext(), - module); - query = ((Query) m); - options = ((DseQueryOptions) query.options); - assertThat(options.defaultTimestamp).isEqualTo(2L); - } - - @Test - @UseDataProvider(location = DseTestDataProviders.class, value = "supportedGraphProtocols") - public void should_create_payload_from_config_options(GraphProtocol subProtocol) { - // initialization - GraphRequestHandlerTestHarness harness = GraphRequestHandlerTestHarness.builder().build(); - GraphStatement graphStatement = - ScriptGraphStatement.newInstance("mockQuery").setExecutionProfileName("test-graph"); - - GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); - - // when - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(graphStatement, harness.getContext()); - - Map requestPayload = - GraphConversions.createCustomPayload( - graphStatement, subProtocol, executionProfile, harness.getContext(), module); - - // checks - Mockito.verify(executionProfile).getString(DseDriverOption.GRAPH_TRAVERSAL_SOURCE, null); - Mockito.verify(executionProfile).getString(DseDriverOption.GRAPH_NAME, null); - Mockito.verify(executionProfile).getBoolean(DseDriverOption.GRAPH_IS_SYSTEM_QUERY, false); - Mockito.verify(executionProfile).getDuration(DseDriverOption.GRAPH_TIMEOUT, Duration.ZERO); - Mockito.verify(executionProfile).getString(DseDriverOption.GRAPH_READ_CONSISTENCY_LEVEL, null); - Mockito.verify(executionProfile).getString(DseDriverOption.GRAPH_WRITE_CONSISTENCY_LEVEL, null); - - assertThat(requestPayload.get(GraphConversions.GRAPH_SOURCE_OPTION_KEY)) - .isEqualTo(TEXT.encode("a", harness.getContext().getProtocolVersion())); - assertThat(requestPayload.get(GraphConversions.GRAPH_RESULTS_OPTION_KEY)) - .isEqualTo( - TEXT.encode(subProtocol.toInternalCode(), harness.getContext().getProtocolVersion())); - assertThat(requestPayload.get(GraphConversions.GRAPH_NAME_OPTION_KEY)) - .isEqualTo(TEXT.encode("mockGraph", harness.getContext().getProtocolVersion())); - assertThat(requestPayload.get(GraphConversions.GRAPH_LANG_OPTION_KEY)) - .isEqualTo(TEXT.encode("gremlin-groovy", harness.getContext().getProtocolVersion())); - assertThat(requestPayload.get(GraphConversions.GRAPH_TIMEOUT_OPTION_KEY)) - .isEqualTo(BIGINT.encode(2L, harness.getContext().getProtocolVersion())); - assertThat(requestPayload.get(GraphConversions.GRAPH_READ_CONSISTENCY_LEVEL_OPTION_KEY)) - .isEqualTo(TEXT.encode("LOCAL_TWO", harness.getContext().getProtocolVersion())); - assertThat(requestPayload.get(GraphConversions.GRAPH_WRITE_CONSISTENCY_LEVEL_OPTION_KEY)) - .isEqualTo(TEXT.encode("LOCAL_THREE", harness.getContext().getProtocolVersion())); - } - - @Test - @UseDataProvider(location = DseTestDataProviders.class, value = "supportedGraphProtocols") - public void should_create_payload_from_statement_options(GraphProtocol subProtocol) { - // initialization - GraphRequestHandlerTestHarness harness = GraphRequestHandlerTestHarness.builder().build(); - GraphStatement graphStatement = - ScriptGraphStatement.builder("mockQuery") - .setGraphName("mockGraph") - .setTraversalSource("a") - .setTimeout(Duration.ofMillis(2)) - .setReadConsistencyLevel(DefaultConsistencyLevel.TWO) - .setWriteConsistencyLevel(DefaultConsistencyLevel.THREE) - .setSystemQuery(false) - .build(); - - GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); - - // when - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(graphStatement, harness.getContext()); - - Map requestPayload = - GraphConversions.createCustomPayload( - graphStatement, subProtocol, executionProfile, harness.getContext(), module); - - // checks - Mockito.verify(executionProfile, never()) - .getString(DseDriverOption.GRAPH_TRAVERSAL_SOURCE, null); - Mockito.verify(executionProfile, never()).getString(DseDriverOption.GRAPH_NAME, null); - Mockito.verify(executionProfile, never()) - .getBoolean(DseDriverOption.GRAPH_IS_SYSTEM_QUERY, false); - Mockito.verify(executionProfile, never()) - .getDuration(DseDriverOption.GRAPH_TIMEOUT, Duration.ZERO); - Mockito.verify(executionProfile, never()) - .getString(DseDriverOption.GRAPH_READ_CONSISTENCY_LEVEL, null); - Mockito.verify(executionProfile, never()) - .getString(DseDriverOption.GRAPH_WRITE_CONSISTENCY_LEVEL, null); - - assertThat(requestPayload.get(GraphConversions.GRAPH_SOURCE_OPTION_KEY)) - .isEqualTo(TEXT.encode("a", harness.getContext().getProtocolVersion())); - assertThat(requestPayload.get(GraphConversions.GRAPH_RESULTS_OPTION_KEY)) - .isEqualTo( - TEXT.encode(subProtocol.toInternalCode(), harness.getContext().getProtocolVersion())); - assertThat(requestPayload.get(GraphConversions.GRAPH_NAME_OPTION_KEY)) - .isEqualTo(TEXT.encode("mockGraph", harness.getContext().getProtocolVersion())); - assertThat(requestPayload.get(GraphConversions.GRAPH_LANG_OPTION_KEY)) - .isEqualTo(TEXT.encode("gremlin-groovy", harness.getContext().getProtocolVersion())); - assertThat(requestPayload.get(GraphConversions.GRAPH_TIMEOUT_OPTION_KEY)) - .isEqualTo(BIGINT.encode(2L, harness.getContext().getProtocolVersion())); - assertThat(requestPayload.get(GraphConversions.GRAPH_READ_CONSISTENCY_LEVEL_OPTION_KEY)) - .isEqualTo(TEXT.encode("TWO", harness.getContext().getProtocolVersion())); - assertThat(requestPayload.get(GraphConversions.GRAPH_WRITE_CONSISTENCY_LEVEL_OPTION_KEY)) - .isEqualTo(TEXT.encode("THREE", harness.getContext().getProtocolVersion())); - } - - @Test - @UseDataProvider(location = DseTestDataProviders.class, value = "supportedGraphProtocols") - public void should_not_set_graph_name_on_system_queries(GraphProtocol subProtocol) { - // initialization - GraphRequestHandlerTestHarness harness = GraphRequestHandlerTestHarness.builder().build(); - GraphStatement graphStatement = - ScriptGraphStatement.newInstance("mockQuery").setSystemQuery(true); - - GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); - - // when - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(graphStatement, harness.getContext()); - - Map requestPayload = - GraphConversions.createCustomPayload( - graphStatement, subProtocol, executionProfile, harness.getContext(), module); - - // checks - assertThat(requestPayload.get(GraphConversions.GRAPH_NAME_OPTION_KEY)).isNull(); - assertThat(requestPayload.get(GraphConversions.GRAPH_SOURCE_OPTION_KEY)).isNull(); - } - - @Test - @UseDataProvider("supportedGraphProtocolsWithDseVersions") - public void should_return_results_for_statements(GraphProtocol graphProtocol, Version dseVersion) - throws IOException { - - GraphRequestHandlerTestHarness.Builder builder = - GraphRequestHandlerTestHarness.builder() - .withGraphProtocolForTestConfig(graphProtocol) - .withDseVersionInMetadata(dseVersion); - PoolBehavior node1Behavior = builder.customBehavior(node); - GraphRequestHandlerTestHarness harness = builder.build(); - - GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); - - // ideally we would be able to provide a function here to - // produce results instead of a static predefined response. - // Function to which we would pass the harness instance or a (mocked)DriverContext. - // Since that's not possible in the RequestHandlerTestHarness API at the moment, we - // have to use another DseDriverContext and GraphBinaryModule here, - // instead of reusing the one in the harness' DriverContext - node1Behavior.setResponseSuccess(defaultDseFrameOf(singleGraphRow(graphProtocol, module))); - - GraphSupportChecker graphSupportChecker = mock(GraphSupportChecker.class); - when(graphSupportChecker.isPagingEnabled(any(), any())).thenReturn(false); - when(graphSupportChecker.inferGraphProtocol(any(), any(), any())).thenReturn(graphProtocol); - - GraphRequestAsyncProcessor p = - Mockito.spy(new GraphRequestAsyncProcessor(harness.getContext(), graphSupportChecker)); - when(p.getGraphBinaryModule()).thenReturn(module); - - GraphStatement graphStatement = - ScriptGraphStatement.newInstance("mockQuery").setExecutionProfileName("test-graph"); - GraphResultSet grs = - new GraphRequestSyncProcessor(p) - .process(graphStatement, harness.getSession(), harness.getContext(), "test-graph"); - - List nodes = grs.all(); - assertThat(nodes.size()).isEqualTo(1); - - GraphNode graphNode = nodes.get(0); - assertThat(graphNode.isVertex()).isTrue(); - - Vertex vRead = graphNode.asVertex(); - assertThat(vRead.label()).isEqualTo("person"); - assertThat(vRead.id()).isEqualTo(1); - if (!graphProtocol.isGraphBinary()) { - // GraphBinary does not encode properties regardless of whether they are present in the - // parent element or not :/ - assertThat(vRead.property("name").id()).isEqualTo(11); - assertThat(vRead.property("name").value()).isEqualTo("marko"); - } - } - - @DataProvider - public static Object[][] supportedGraphProtocolsWithDseVersions() { - return new Object[][] { - {GRAPHSON_1_0, Version.parse("6.7.0")}, - {GRAPHSON_1_0, Version.parse("6.8.0")}, - {GRAPHSON_2_0, Version.parse("6.7.0")}, - {GRAPHSON_2_0, Version.parse("6.8.0")}, - {GRAPH_BINARY_1_0, Version.parse("6.7.0")}, - {GRAPH_BINARY_1_0, Version.parse("6.8.0")}, - }; - } - - @Test - @UseDataProvider("dseVersionsWithDefaultGraphProtocol") - public void should_invoke_request_tracker_and_update_metrics( - GraphProtocol graphProtocol, Version dseVersion) throws IOException { - when(nodeMetricUpdater1.isEnabled( - DseNodeMetric.GRAPH_MESSAGES, DriverExecutionProfile.DEFAULT_NAME)) - .thenReturn(true); - - GraphRequestHandlerTestHarness.Builder builder = - GraphRequestHandlerTestHarness.builder() - .withGraphProtocolForTestConfig(graphProtocol) - .withDseVersionInMetadata(dseVersion); - PoolBehavior node1Behavior = builder.customBehavior(node); - GraphRequestHandlerTestHarness harness = builder.build(); - - GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); - - GraphSupportChecker graphSupportChecker = mock(GraphSupportChecker.class); - when(graphSupportChecker.isPagingEnabled(any(), any())).thenReturn(false); - when(graphSupportChecker.inferGraphProtocol(any(), any(), any())).thenReturn(graphProtocol); - - GraphRequestAsyncProcessor p = - Mockito.spy(new GraphRequestAsyncProcessor(harness.getContext(), graphSupportChecker)); - when(p.getGraphBinaryModule()).thenReturn(module); - - RequestTracker requestTracker = mock(RequestTracker.class); - when(harness.getContext().getRequestTracker()).thenReturn(requestTracker); - - GraphStatement graphStatement = ScriptGraphStatement.newInstance("mockQuery"); - - node1Behavior.setResponseSuccess(defaultDseFrameOf(singleGraphRow(graphProtocol, module))); - - GraphResultSet grs = - new GraphRequestSyncProcessor( - new GraphRequestAsyncProcessor(harness.getContext(), graphSupportChecker)) - .process(graphStatement, harness.getSession(), harness.getContext(), "test-graph"); - - List nodes = grs.all(); - assertThat(nodes.size()).isEqualTo(1); - - GraphNode graphNode = nodes.get(0); - assertThat(graphNode.isVertex()).isTrue(); - - Vertex actual = graphNode.asVertex(); - assertThat(actual.label()).isEqualTo("person"); - assertThat(actual.id()).isEqualTo(1); - if (!graphProtocol.isGraphBinary()) { - // GraphBinary does not encode properties regardless of whether they are present in the - // parent element or not :/ - assertThat(actual.property("name").id()).isEqualTo(11); - assertThat(actual.property("name").value()).isEqualTo("marko"); - } - - verify(requestTracker) - .onSuccess( - eq(graphStatement), - anyLong(), - any(DriverExecutionProfile.class), - eq(node), - matches(LOG_PREFIX_PER_REQUEST)); - verify(requestTracker) - .onNodeSuccess( - eq(graphStatement), - anyLong(), - any(DriverExecutionProfile.class), - eq(node), - matches(LOG_PREFIX_PER_REQUEST)); - verifyNoMoreInteractions(requestTracker); - - verify(nodeMetricUpdater1) - .isEnabled(DseNodeMetric.GRAPH_MESSAGES, DriverExecutionProfile.DEFAULT_NAME); - verify(nodeMetricUpdater1) - .updateTimer( - eq(DseNodeMetric.GRAPH_MESSAGES), - eq(DriverExecutionProfile.DEFAULT_NAME), - anyLong(), - eq(TimeUnit.NANOSECONDS)); - verifyNoMoreInteractions(nodeMetricUpdater1); - - verify(harness.getSession().getMetricUpdater()) - .isEnabled(DseSessionMetric.GRAPH_REQUESTS, DriverExecutionProfile.DEFAULT_NAME); - verify(harness.getSession().getMetricUpdater()) - .updateTimer( - eq(DseSessionMetric.GRAPH_REQUESTS), - eq(DriverExecutionProfile.DEFAULT_NAME), - anyLong(), - eq(TimeUnit.NANOSECONDS)); - verifyNoMoreInteractions(harness.getSession().getMetricUpdater()); - } - - @Test - public void should_honor_statement_consistency_level() { - // initialization - GraphRequestHandlerTestHarness harness = GraphRequestHandlerTestHarness.builder().build(); - ScriptGraphStatement graphStatement = - ScriptGraphStatement.builder("mockScript") - .setConsistencyLevel(DefaultConsistencyLevel.THREE) - .build(); - - GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); - - // when - DriverExecutionProfile executionProfile = - Conversions.resolveExecutionProfile(graphStatement, harness.getContext()); - - Message m = - GraphConversions.createMessageFromGraphStatement( - graphStatement, GRAPH_BINARY_1_0, executionProfile, harness.getContext(), module); - - // checks - assertThat(m).isInstanceOf(Query.class); - Query q = ((Query) m); - assertThat(q.options.consistency).isEqualTo(DefaultConsistencyLevel.THREE.getProtocolCode()); - } - - @DataProvider - public static Object[][] dseVersionsWithDefaultGraphProtocol() { - // Default GraphSON sub protocol version differs based on DSE version, so test with a version - // less than DSE 6.8 as well as DSE 6.8. - return new Object[][] { - {GRAPHSON_2_0, Version.parse("6.7.0")}, - {GRAPH_BINARY_1_0, Version.parse("6.8.0")}, - }; - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphRequestHandlerTestHarness.java b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphRequestHandlerTestHarness.java deleted file mode 100644 index 7e46b09bd59..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphRequestHandlerTestHarness.java +++ /dev/null @@ -1,232 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import static org.mockito.Mockito.when; - -import com.datastax.dse.driver.DseTestFixtures; -import com.datastax.dse.driver.api.core.DseProtocolVersion; -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.oss.driver.api.core.DefaultConsistencyLevel; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.internal.core.DefaultConsistencyLevelRegistry; -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import com.datastax.oss.driver.internal.core.cql.RequestHandlerTestHarness; -import com.datastax.oss.driver.internal.core.servererrors.DefaultWriteTypeRegistry; -import com.datastax.oss.driver.internal.core.session.throttling.PassThroughRequestThrottler; -import com.datastax.oss.driver.internal.core.tracker.NoopRequestTracker; -import com.datastax.oss.protocol.internal.Frame; -import io.netty.channel.EventLoop; -import java.time.Duration; -import javax.annotation.Nullable; -import org.mockito.ArgumentMatchers; -import org.mockito.Mock; - -/** - * Provides the environment to test a request handler, where a query plan can be defined, and the - * behavior of each successive node simulated. - */ -public class GraphRequestHandlerTestHarness extends RequestHandlerTestHarness { - - @Mock DriverExecutionProfile testProfile; - - @Mock DriverExecutionProfile systemQueryExecutionProfile; - - @Mock DefaultDriverContext dseDriverContext; - - @Mock EventLoop eventLoop; - - protected GraphRequestHandlerTestHarness( - Builder builder, - @Nullable GraphProtocol graphProtocolForTestConfig, - Duration graphTimeout, - @Nullable Version dseVersionForTestMetadata) { - super(builder); - - // not mocked by RequestHandlerTestHarness, will be used when DseDriverOptions.GRAPH_TIMEOUT - // is not zero in the config - when(eventLoopGroup.next()).thenReturn(eventLoop); - - // default graph options as in the reference.conf file - when(defaultProfile.getString(DseDriverOption.GRAPH_TRAVERSAL_SOURCE, null)).thenReturn("g"); - when(defaultProfile.isDefined(DseDriverOption.GRAPH_SUB_PROTOCOL)).thenReturn(Boolean.FALSE); - when(defaultProfile.getBoolean(DseDriverOption.GRAPH_IS_SYSTEM_QUERY, false)).thenReturn(false); - when(defaultProfile.getString(DseDriverOption.GRAPH_NAME, null)).thenReturn("mockGraph"); - when(defaultProfile.getDuration(DseDriverOption.GRAPH_TIMEOUT, Duration.ZERO)) - .thenReturn(graphTimeout); - - when(testProfile.getName()).thenReturn("test-graph"); - when(testProfile.getDuration(DseDriverOption.GRAPH_TIMEOUT, Duration.ZERO)) - .thenReturn(Duration.ofMillis(2L)); - when(testProfile.getString(DefaultDriverOption.REQUEST_CONSISTENCY)) - .thenReturn(DefaultConsistencyLevel.LOCAL_ONE.name()); - when(testProfile.getInt(DefaultDriverOption.REQUEST_PAGE_SIZE)).thenReturn(5000); - when(testProfile.getString(DefaultDriverOption.REQUEST_SERIAL_CONSISTENCY)) - .thenReturn(DefaultConsistencyLevel.SERIAL.name()); - when(testProfile.getBoolean(DefaultDriverOption.REQUEST_DEFAULT_IDEMPOTENCE)).thenReturn(false); - when(testProfile.getBoolean(DefaultDriverOption.PREPARE_ON_ALL_NODES)).thenReturn(true); - when(testProfile.getString(DseDriverOption.GRAPH_TRAVERSAL_SOURCE, null)).thenReturn("a"); - when(testProfile.isDefined(DseDriverOption.GRAPH_SUB_PROTOCOL)) - .thenReturn(graphProtocolForTestConfig != null); - // only mock the config if graphProtocolForTestConfig is not null - if (graphProtocolForTestConfig != null) { - when(testProfile.getString(DseDriverOption.GRAPH_SUB_PROTOCOL)) - .thenReturn(graphProtocolForTestConfig.toInternalCode()); - } - when(testProfile.getBoolean(DseDriverOption.GRAPH_IS_SYSTEM_QUERY, false)).thenReturn(false); - when(testProfile.getString(DseDriverOption.GRAPH_NAME, null)).thenReturn("mockGraph"); - when(testProfile.getString(DseDriverOption.GRAPH_READ_CONSISTENCY_LEVEL, null)) - .thenReturn("LOCAL_TWO"); - when(testProfile.getString(DseDriverOption.GRAPH_WRITE_CONSISTENCY_LEVEL, null)) - .thenReturn("LOCAL_THREE"); - when(config.getProfile("test-graph")).thenReturn(testProfile); - - when(systemQueryExecutionProfile.getName()).thenReturn("graph-system-query"); - when(systemQueryExecutionProfile.getDuration(DseDriverOption.GRAPH_TIMEOUT, Duration.ZERO)) - .thenReturn(Duration.ZERO); - when(systemQueryExecutionProfile.getString(DefaultDriverOption.REQUEST_CONSISTENCY)) - .thenReturn(DefaultConsistencyLevel.LOCAL_ONE.name()); - when(systemQueryExecutionProfile.getInt(DefaultDriverOption.REQUEST_PAGE_SIZE)) - .thenReturn(5000); - when(systemQueryExecutionProfile.getString(DefaultDriverOption.REQUEST_SERIAL_CONSISTENCY)) - .thenReturn(DefaultConsistencyLevel.SERIAL.name()); - when(systemQueryExecutionProfile.getBoolean(DefaultDriverOption.REQUEST_DEFAULT_IDEMPOTENCE)) - .thenReturn(false); - when(systemQueryExecutionProfile.getBoolean(DefaultDriverOption.PREPARE_ON_ALL_NODES)) - .thenReturn(true); - when(systemQueryExecutionProfile.getName()).thenReturn("graph-system-query"); - when(systemQueryExecutionProfile.getDuration(DseDriverOption.GRAPH_TIMEOUT, Duration.ZERO)) - .thenReturn(Duration.ofMillis(2)); - when(systemQueryExecutionProfile.getBoolean(DseDriverOption.GRAPH_IS_SYSTEM_QUERY, false)) - .thenReturn(true); - when(systemQueryExecutionProfile.getString(DseDriverOption.GRAPH_READ_CONSISTENCY_LEVEL, null)) - .thenReturn("LOCAL_TWO"); - when(systemQueryExecutionProfile.getString(DseDriverOption.GRAPH_WRITE_CONSISTENCY_LEVEL, null)) - .thenReturn("LOCAL_THREE"); - - when(config.getProfile("graph-system-query")).thenReturn(systemQueryExecutionProfile); - - // need to re-mock everything on the context because the RequestHandlerTestHarness returns a - // InternalDriverContext and not a DseDriverContext. Couldn't figure out a way with mockito - // to say "mock this object (this.dseDriverContext), and delegate every call to that - // other object (this.context), except _this_ call and _this_ and so on" - // Spy wouldn't work because the spied object has to be of the same type as the final object - when(dseDriverContext.getConfig()).thenReturn(config); - when(dseDriverContext.getNettyOptions()).thenReturn(nettyOptions); - when(dseDriverContext.getLoadBalancingPolicyWrapper()).thenReturn(loadBalancingPolicyWrapper); - when(dseDriverContext.getRetryPolicy(ArgumentMatchers.anyString())).thenReturn(retryPolicy); - when(dseDriverContext.getSpeculativeExecutionPolicy(ArgumentMatchers.anyString())) - .thenReturn(speculativeExecutionPolicy); - when(dseDriverContext.getCodecRegistry()).thenReturn(CodecRegistry.DEFAULT); - when(dseDriverContext.getTimestampGenerator()).thenReturn(timestampGenerator); - when(dseDriverContext.getProtocolVersion()).thenReturn(DseProtocolVersion.DSE_V2); - when(dseDriverContext.getProtocolVersionRegistry()).thenReturn(protocolVersionRegistry); - when(dseDriverContext.getConsistencyLevelRegistry()) - .thenReturn(new DefaultConsistencyLevelRegistry()); - when(dseDriverContext.getWriteTypeRegistry()).thenReturn(new DefaultWriteTypeRegistry()); - when(dseDriverContext.getRequestThrottler()) - .thenReturn(new PassThroughRequestThrottler(dseDriverContext)); - when(dseDriverContext.getRequestTracker()).thenReturn(new NoopRequestTracker(dseDriverContext)); - // if DSE Version is specified for test metadata, then we need to mock that up on the context - if (dseVersionForTestMetadata != null) { - DseTestFixtures.mockNodesInMetadataWithVersions( - dseDriverContext, true, dseVersionForTestMetadata); - } - } - - @Override - public DefaultDriverContext getContext() { - return dseDriverContext; - } - - public static GraphRequestHandlerTestHarness.Builder builder() { - return new GraphRequestHandlerTestHarness.Builder(); - } - - public static class Builder extends RequestHandlerTestHarness.Builder { - - private GraphProtocol graphProtocolForTestConfig; - private Duration graphTimeout = Duration.ZERO; - private Version dseVersionForTestMetadata; - - public GraphRequestHandlerTestHarness.Builder withGraphProtocolForTestConfig( - GraphProtocol protocol) { - this.graphProtocolForTestConfig = protocol; - return this; - } - - public GraphRequestHandlerTestHarness.Builder withDseVersionInMetadata(Version dseVersion) { - this.dseVersionForTestMetadata = dseVersion; - return this; - } - - public GraphRequestHandlerTestHarness.Builder withGraphTimeout(Duration globalTimeout) { - this.graphTimeout = globalTimeout; - return this; - } - - @Override - public GraphRequestHandlerTestHarness.Builder withEmptyPool(Node node) { - super.withEmptyPool(node); - return this; - } - - @Override - public GraphRequestHandlerTestHarness.Builder withWriteFailure(Node node, Throwable cause) { - super.withWriteFailure(node, cause); - return this; - } - - @Override - public GraphRequestHandlerTestHarness.Builder withResponseFailure(Node node, Throwable cause) { - super.withResponseFailure(node, cause); - return this; - } - - @Override - public GraphRequestHandlerTestHarness.Builder withResponse(Node node, Frame response) { - super.withResponse(node, response); - return this; - } - - @Override - public GraphRequestHandlerTestHarness.Builder withDefaultIdempotence( - boolean defaultIdempotence) { - super.withDefaultIdempotence(defaultIdempotence); - return this; - } - - @Override - public GraphRequestHandlerTestHarness.Builder withProtocolVersion( - ProtocolVersion protocolVersion) { - super.withProtocolVersion(protocolVersion); - return this; - } - - @Override - public GraphRequestHandlerTestHarness build() { - return new GraphRequestHandlerTestHarness( - this, graphProtocolForTestConfig, graphTimeout, dseVersionForTestMetadata); - } - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphResultSetTestBase.java b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphResultSetTestBase.java deleted file mode 100644 index aed248675ae..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphResultSetTestBase.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.when; - -import com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet; -import com.datastax.dse.driver.api.core.graph.GraphNode; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.internal.core.util.CountingIterator; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import java.util.Arrays; -import java.util.Iterator; -import java.util.Queue; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; - -public abstract class GraphResultSetTestBase { - - /** Mocks an async result set where column 0 has type INT, with rows with the provided data. */ - protected AsyncGraphResultSet mockPage(boolean nextPage, Integer... data) { - AsyncGraphResultSet page = mock(AsyncGraphResultSet.class); - - ExecutionInfo executionInfo = mock(ExecutionInfo.class); - when(page.getRequestExecutionInfo()).thenReturn(executionInfo); - - if (nextPage) { - when(page.hasMorePages()).thenReturn(true); - when(page.fetchNextPage()).thenReturn(spy(new CompletableFuture<>())); - } else { - when(page.hasMorePages()).thenReturn(false); - when(page.fetchNextPage()).thenThrow(new IllegalStateException()); - } - - // Emulate DefaultAsyncResultSet's internals (this is a bit sketchy, maybe it would be better - // to use real DefaultAsyncResultSet instances) - Queue queue = Lists.newLinkedList(Arrays.asList(data)); - CountingIterator iterator = - new CountingIterator(queue.size()) { - @Override - protected GraphNode computeNext() { - Integer index = queue.poll(); - return (index == null) ? endOfData() : mockRow(index); - } - }; - when(page.currentPage()).thenReturn(() -> iterator); - when(page.remaining()).thenAnswer(invocation -> iterator.remaining()); - - return page; - } - - private GraphNode mockRow(int index) { - GraphNode row = mock(GraphNode.class); - when(row.asInt()).thenReturn(index); - return row; - } - - protected static void complete( - CompletionStage stage, AsyncGraphResultSet result) { - stage.toCompletableFuture().complete(result); - } - - protected void assertNextRow(Iterator iterator, int expectedValue) { - assertThat(iterator.hasNext()).isTrue(); - GraphNode row = iterator.next(); - assertThat(row.asInt()).isEqualTo(expectedValue); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphResultSetsTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphResultSetsTest.java deleted file mode 100644 index fd5cffd2530..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphResultSetsTest.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet; -import com.datastax.dse.driver.api.core.graph.GraphNode; -import com.datastax.dse.driver.api.core.graph.GraphResultSet; -import java.util.Iterator; -import org.junit.Test; - -public class GraphResultSetsTest extends GraphResultSetTestBase { - - @Test - public void should_create_result_set_from_single_page() { - // Given - AsyncGraphResultSet page1 = mockPage(false, 0, 1, 2); - - // When - GraphResultSet resultSet = GraphResultSets.toSync(page1); - - // Then - assertThat(resultSet.getRequestExecutionInfo()).isSameAs(page1.getRequestExecutionInfo()); - - Iterator iterator = resultSet.iterator(); - - assertNextRow(iterator, 0); - assertNextRow(iterator, 1); - assertNextRow(iterator, 2); - - assertThat(iterator.hasNext()).isFalse(); - } - - @Test - public void should_create_result_set_from_multiple_pages() { - // Given - AsyncGraphResultSet page1 = mockPage(true, 0, 1, 2); - AsyncGraphResultSet page2 = mockPage(true, 3, 4, 5); - AsyncGraphResultSet page3 = mockPage(false, 6, 7, 8); - - complete(page1.fetchNextPage(), page2); - complete(page2.fetchNextPage(), page3); - - // When - GraphResultSet resultSet = GraphResultSets.toSync(page1); - - // Then - assertThat(resultSet.iterator().hasNext()).isTrue(); - - assertThat(resultSet.getRequestExecutionInfo()).isSameAs(page1.getRequestExecutionInfo()); - assertThat(((MultiPageGraphResultSet) resultSet).getRequestExecutionInfos()) - .containsExactly(page1.getRequestExecutionInfo()); - - Iterator iterator = resultSet.iterator(); - - assertNextRow(iterator, 0); - assertNextRow(iterator, 1); - assertNextRow(iterator, 2); - - assertThat(iterator.hasNext()).isTrue(); - // This should have triggered the fetch of page2 - assertThat(resultSet.getRequestExecutionInfo()).isEqualTo(page2.getRequestExecutionInfo()); - assertThat(((MultiPageGraphResultSet) resultSet).getRequestExecutionInfos()) - .containsExactly(page1.getRequestExecutionInfo(), page2.getRequestExecutionInfo()); - - assertNextRow(iterator, 3); - assertNextRow(iterator, 4); - assertNextRow(iterator, 5); - - assertThat(iterator.hasNext()).isTrue(); - // This should have triggered the fetch of page3 - assertThat(resultSet.getRequestExecutionInfo()).isEqualTo(page3.getRequestExecutionInfo()); - assertThat(((MultiPageGraphResultSet) resultSet).getRequestExecutionInfos()) - .containsExactly( - page1.getRequestExecutionInfo(), - page2.getRequestExecutionInfo(), - page3.getRequestExecutionInfo()); - - assertNextRow(iterator, 6); - assertNextRow(iterator, 7); - assertNextRow(iterator, 8); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphStatementBuilderBaseTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphStatementBuilderBaseTest.java deleted file mode 100644 index 4799437e617..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphStatementBuilderBaseTest.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.dse.driver.api.core.graph.FluentGraphStatement; -import com.datastax.dse.driver.api.core.graph.GraphStatementBuilderBase; -import com.datastax.oss.driver.api.core.cql.Statement; -import edu.umd.cs.findbugs.annotations.NonNull; -import org.junit.Test; - -public class GraphStatementBuilderBaseTest { - - private static class MockGraphStatementBuilder - extends GraphStatementBuilderBase { - - @NonNull - @Override - public FluentGraphStatement build() { - FluentGraphStatement rv = mock(FluentGraphStatement.class); - when(rv.getTimestamp()).thenReturn(this.timestamp); - return rv; - } - } - - @Test - public void should_use_timestamp_if_set() { - - MockGraphStatementBuilder builder = new MockGraphStatementBuilder(); - builder.setTimestamp(1); - assertThat(builder.build().getTimestamp()).isEqualTo(1); - } - - @Test - public void should_use_correct_default_timestamp_if_not_set() { - - MockGraphStatementBuilder builder = new MockGraphStatementBuilder(); - assertThat(builder.build().getTimestamp()).isEqualTo(Statement.NO_DEFAULT_TIMESTAMP); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphSupportCheckerTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphSupportCheckerTest.java deleted file mode 100644 index ec31bd4b12d..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphSupportCheckerTest.java +++ /dev/null @@ -1,325 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import static com.datastax.dse.driver.DseTestFixtures.mockNodesInMetadataWithVersions; -import static com.datastax.dse.driver.api.core.graph.PagingEnabledOptions.AUTO; -import static com.datastax.dse.driver.api.core.graph.PagingEnabledOptions.DISABLED; -import static com.datastax.dse.driver.api.core.graph.PagingEnabledOptions.ENABLED; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.assertj.core.api.AssertionsForInterfaceTypes.assertThat; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verifyZeroInteractions; -import static org.mockito.Mockito.when; - -import com.datastax.dse.driver.DseTestDataProviders; -import com.datastax.dse.driver.api.core.DseProtocolVersion; -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.dse.driver.api.core.graph.GraphStatement; -import com.datastax.dse.driver.api.core.graph.PagingEnabledOptions; -import com.datastax.dse.driver.api.core.metadata.DseNodeProperties; -import com.datastax.dse.driver.internal.core.DseProtocolFeature; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Metadata; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.ProtocolVersionRegistry; -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.MetadataManager; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class GraphSupportCheckerTest { - - @UseDataProvider("graphPagingEnabledAndDseVersions") - @Test - public void should_check_if_paging_is_supported( - boolean protocolWithPagingSupport, - PagingEnabledOptions statementGraphPagingEnabled, - PagingEnabledOptions contextGraphPagingEnabled, - List nodeDseVersions, - boolean expected) { - // given - GraphStatement graphStatement = mock(GraphStatement.class); - InternalDriverContext context = protocolWithPagingSupport(protocolWithPagingSupport); - statementGraphPagingEnabled(graphStatement, statementGraphPagingEnabled); - contextGraphPagingEnabled(context, contextGraphPagingEnabled); - addNodeWithDseVersion(context, nodeDseVersions); - - // when - boolean pagingEnabled = new GraphSupportChecker().isPagingEnabled(graphStatement, context); - - // then - assertThat(pagingEnabled).isEqualTo(expected); - } - - @Test - public void should_not_support_paging_when_statement_profile_not_present() { - // given - GraphStatement graphStatement = mock(GraphStatement.class); - InternalDriverContext context = protocolWithPagingSupport(true); - contextGraphPagingEnabled(context, DISABLED); - addNodeWithDseVersion(context, Collections.singletonList(Version.parse("6.8.0"))); - - // when - boolean pagingEnabled = new GraphSupportChecker().isPagingEnabled(graphStatement, context); - - // then - assertThat(pagingEnabled).isEqualTo(false); - } - - @Test - public void - should_support_paging_when_statement_profile_not_present_but_context_profile_has_paging_enabled() { - // given - GraphStatement graphStatement = mock(GraphStatement.class); - InternalDriverContext context = protocolWithPagingSupport(true); - contextGraphPagingEnabled(context, ENABLED); - addNodeWithDseVersion(context, Collections.singletonList(Version.parse("6.8.0"))); - - // when - boolean pagingEnabled = new GraphSupportChecker().isPagingEnabled(graphStatement, context); - - // then - assertThat(pagingEnabled).isEqualTo(true); - } - - @DataProvider() - public static Object[][] graphPagingEnabledAndDseVersions() { - List listWithGraphPagingNode = Collections.singletonList(Version.parse("6.8.0")); - List listWithoutGraphPagingNode = Collections.singletonList(Version.parse("6.7.0")); - List listWithNull = Collections.singletonList(null); - List listWithTwoNodesOneNotSupporting = - Arrays.asList(Version.parse("6.7.0"), Version.parse("6.8.0")); - - return new Object[][] { - {false, ENABLED, ENABLED, listWithGraphPagingNode, true}, - {true, ENABLED, ENABLED, listWithoutGraphPagingNode, true}, - {true, ENABLED, DISABLED, listWithGraphPagingNode, true}, - {true, ENABLED, ENABLED, listWithGraphPagingNode, true}, - {true, ENABLED, ENABLED, listWithNull, true}, - {true, ENABLED, ENABLED, listWithTwoNodesOneNotSupporting, true}, - {true, DISABLED, ENABLED, listWithGraphPagingNode, false}, - {true, DISABLED, AUTO, listWithGraphPagingNode, false}, - {true, DISABLED, DISABLED, listWithGraphPagingNode, false}, - {true, AUTO, AUTO, listWithGraphPagingNode, true}, - {true, AUTO, DISABLED, listWithGraphPagingNode, true}, - {false, AUTO, AUTO, listWithGraphPagingNode, false}, - {true, AUTO, AUTO, listWithTwoNodesOneNotSupporting, false}, - {true, AUTO, AUTO, listWithNull, false}, - }; - } - - private void addNodeWithDseVersion(InternalDriverContext context, List dseVersions) { - MetadataManager manager = mock(MetadataManager.class); - when(context.getMetadataManager()).thenReturn(manager); - Metadata metadata = mock(Metadata.class); - when(manager.getMetadata()).thenReturn(metadata); - Map nodes = new HashMap<>(); - for (Version v : dseVersions) { - Node node = mock(Node.class); - Map extras = new HashMap<>(); - extras.put(DseNodeProperties.DSE_VERSION, v); - when(node.getExtras()).thenReturn(extras); - nodes.put(UUID.randomUUID(), node); - } - when(metadata.getNodes()).thenReturn(nodes); - } - - private void contextGraphPagingEnabled( - InternalDriverContext context, PagingEnabledOptions option) { - DriverExecutionProfile driverExecutionProfile = mock(DriverExecutionProfile.class); - when(driverExecutionProfile.getString(DseDriverOption.GRAPH_PAGING_ENABLED)) - .thenReturn(option.name()); - DriverConfig config = mock(DriverConfig.class); - when(context.getConfig()).thenReturn(config); - when(config.getDefaultProfile()).thenReturn(driverExecutionProfile); - } - - private InternalDriverContext protocolWithPagingSupport(boolean pagingSupport) { - InternalDriverContext context = mock(InternalDriverContext.class); - when(context.getProtocolVersion()).thenReturn(DseProtocolVersion.DSE_V2); - ProtocolVersionRegistry protocolVersionRegistry = mock(ProtocolVersionRegistry.class); - when(protocolVersionRegistry.supports( - DseProtocolVersion.DSE_V2, DseProtocolFeature.CONTINUOUS_PAGING)) - .thenReturn(pagingSupport); - when(context.getProtocolVersionRegistry()).thenReturn(protocolVersionRegistry); - return context; - } - - private void statementGraphPagingEnabled( - GraphStatement graphStatement, PagingEnabledOptions option) { - DriverExecutionProfile driverExecutionProfile = mock(DriverExecutionProfile.class); - when(driverExecutionProfile.getString(DseDriverOption.GRAPH_PAGING_ENABLED)) - .thenReturn(option.name()); - when(graphStatement.getExecutionProfile()).thenReturn(driverExecutionProfile); - } - - @Test - @UseDataProvider("dseVersionsAndGraphProtocols") - public void should_determine_default_graph_protocol_from_dse_version( - Version[] dseVersions, GraphProtocol expectedProtocol) { - // mock up the metadata for the context - // using 'true' here will treat null test Versions as no DSE_VERSION info in the metadata - DefaultDriverContext context = - mockNodesInMetadataWithVersions(mock(DefaultDriverContext.class), true, dseVersions); - GraphProtocol graphProtocol = new GraphSupportChecker().getDefaultGraphProtocol(context); - assertThat(graphProtocol).isEqualTo(expectedProtocol); - } - - @Test - @UseDataProvider("dseVersionsAndGraphProtocols") - public void should_determine_default_graph_protocol_from_dse_version_with_null_versions( - Version[] dseVersions, GraphProtocol expectedProtocol) { - // mock up the metadata for the context - // using 'false' here will treat null test Versions as explicit NULL info for DSE_VERSION - DefaultDriverContext context = - mockNodesInMetadataWithVersions(mock(DefaultDriverContext.class), false, dseVersions); - GraphProtocol graphProtocol = new GraphSupportChecker().getDefaultGraphProtocol(context); - assertThat(graphProtocol).isEqualTo(expectedProtocol); - } - - @DataProvider - public static Object[][] dseVersionsAndGraphProtocols() { - return new Object[][] { - {new Version[] {Version.parse("5.0.3")}, GraphProtocol.GRAPHSON_2_0}, - {new Version[] {Version.parse("6.0.1")}, GraphProtocol.GRAPHSON_2_0}, - {new Version[] {Version.parse("6.7.4")}, GraphProtocol.GRAPHSON_2_0}, - {new Version[] {Version.parse("6.8.0")}, GraphProtocol.GRAPH_BINARY_1_0}, - {new Version[] {Version.parse("7.0.0")}, GraphProtocol.GRAPH_BINARY_1_0}, - {new Version[] {Version.parse("5.0.3"), Version.parse("6.8.0")}, GraphProtocol.GRAPHSON_2_0}, - {new Version[] {Version.parse("6.7.4"), Version.parse("6.8.0")}, GraphProtocol.GRAPHSON_2_0}, - {new Version[] {Version.parse("6.8.0"), Version.parse("6.7.4")}, GraphProtocol.GRAPHSON_2_0}, - { - new Version[] {Version.parse("6.8.0"), Version.parse("7.0.0")}, - GraphProtocol.GRAPH_BINARY_1_0 - }, - {new Version[] {Version.parse("6.7.4"), Version.parse("6.7.4")}, GraphProtocol.GRAPHSON_2_0}, - { - new Version[] {Version.parse("6.8.0"), Version.parse("6.8.0")}, - GraphProtocol.GRAPH_BINARY_1_0 - }, - {null, GraphProtocol.GRAPHSON_2_0}, - {new Version[] {null}, GraphProtocol.GRAPHSON_2_0}, - {new Version[] {null, Version.parse("6.8.0")}, GraphProtocol.GRAPHSON_2_0}, - { - new Version[] {Version.parse("6.8.0"), Version.parse("7.0.0"), null}, - GraphProtocol.GRAPHSON_2_0 - }, - }; - } - - @Test - @UseDataProvider(location = DseTestDataProviders.class, value = "supportedGraphProtocols") - public void should_pickup_graph_protocol_from_statement(GraphProtocol graphProtocol) { - GraphStatement graphStatement = mock(GraphStatement.class); - DriverExecutionProfile executionProfile = mock(DriverExecutionProfile.class); - when(graphStatement.getSubProtocol()).thenReturn(graphProtocol.toInternalCode()); - - GraphProtocol inferredProtocol = - new GraphSupportChecker() - .inferGraphProtocol( - graphStatement, executionProfile, mock(InternalDriverContext.class)); - - assertThat(inferredProtocol).isEqualTo(graphProtocol); - verifyZeroInteractions(executionProfile); - } - - @Test - @UseDataProvider("graphProtocolStringsAndDseVersions") - public void should_pickup_graph_protocol_and_parse_from_string_config( - String stringConfig, Version dseVersion) { - GraphStatement graphStatement = mock(GraphStatement.class); - DriverExecutionProfile executionProfile = mock(DriverExecutionProfile.class); - when(executionProfile.isDefined(DseDriverOption.GRAPH_SUB_PROTOCOL)).thenReturn(Boolean.TRUE); - when(executionProfile.getString(eq(DseDriverOption.GRAPH_SUB_PROTOCOL))) - .thenReturn(stringConfig); - - DefaultDriverContext context = - mockNodesInMetadataWithVersions(mock(DefaultDriverContext.class), true, dseVersion); - GraphProtocol inferredProtocol = - new GraphSupportChecker().inferGraphProtocol(graphStatement, executionProfile, context); - assertThat(inferredProtocol.toInternalCode()).isEqualTo(stringConfig); - } - - @DataProvider - public static Object[][] graphProtocolStringsAndDseVersions() { - // putting manual strings here to be sure to be notified if a value in - // GraphProtocol ever changes - return new Object[][] { - {"graphson-1.0", Version.parse("6.7.0")}, - {"graphson-1.0", Version.parse("6.8.0")}, - {"graphson-2.0", Version.parse("6.7.0")}, - {"graphson-2.0", Version.parse("6.8.0")}, - {"graph-binary-1.0", Version.parse("6.7.0")}, - {"graph-binary-1.0", Version.parse("6.8.0")}, - }; - } - - @Test - @UseDataProvider("dseVersions6") - public void should_use_correct_default_protocol_when_parsing(Version dseVersion) { - GraphStatement graphStatement = mock(GraphStatement.class); - DriverExecutionProfile executionProfile = mock(DriverExecutionProfile.class); - DefaultDriverContext context = - mockNodesInMetadataWithVersions(mock(DefaultDriverContext.class), true, dseVersion); - GraphProtocol inferredProtocol = - new GraphSupportChecker().inferGraphProtocol(graphStatement, executionProfile, context); - // For DSE 6.8 and newer, the default should be GraphSON binary - // for DSE older than 6.8, the default should be GraphSON2 - assertThat(inferredProtocol) - .isEqualTo( - (dseVersion.compareTo(Version.parse("6.8.0")) < 0) - ? GraphProtocol.GRAPHSON_2_0 - : GraphProtocol.GRAPH_BINARY_1_0); - } - - @DataProvider - public static Object[][] dseVersions6() { - return new Object[][] {{Version.parse("6.7.0")}, {Version.parse("6.8.0")}}; - } - - @Test - public void should_fail_if_graph_protocol_used_is_invalid() { - assertThatThrownBy(() -> GraphProtocol.fromString("invalid")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Graph protocol used [\"invalid\"] unknown. Possible values are: [ \"graphson-1.0\", \"graphson-2.0\", \"graph-binary-1.0\"]"); - } - - @Test - public void should_fail_if_graph_protocol_used_is_graphson_3() { - assertThatThrownBy(() -> GraphProtocol.fromString("graphson-3.0")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Graph protocol used [\"graphson-3.0\"] unknown. Possible values are: [ \"graphson-1.0\", \"graphson-2.0\", \"graph-binary-1.0\"]"); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphTestUtils.java b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphTestUtils.java deleted file mode 100644 index f58fc54d8c7..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphTestUtils.java +++ /dev/null @@ -1,189 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.dse.driver.api.core.DseProtocolVersion; -import com.datastax.dse.driver.internal.core.graph.binary.GraphBinaryModule; -import com.datastax.dse.protocol.internal.response.result.DseRowsMetadata; -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.protocol.internal.Frame; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.response.result.ColumnSpec; -import com.datastax.oss.protocol.internal.response.result.DefaultRows; -import com.datastax.oss.protocol.internal.response.result.RawType; -import com.datastax.oss.protocol.internal.response.result.Rows; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.ArrayDeque; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.Queue; -import org.apache.tinkerpop.gremlin.process.remote.traversal.DefaultRemoteTraverser; -import org.apache.tinkerpop.gremlin.structure.Direction; -import org.apache.tinkerpop.gremlin.structure.T; -import org.apache.tinkerpop.gremlin.structure.Vertex; -import org.apache.tinkerpop.gremlin.structure.io.Buffer; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; -import org.apache.tinkerpop.gremlin.structure.io.binary.TypeSerializerRegistry; -import org.apache.tinkerpop.gremlin.structure.util.detached.DetachedVertex; -import org.apache.tinkerpop.gremlin.structure.util.detached.DetachedVertexProperty; -import org.assertj.core.api.InstanceOfAssertFactories; - -public class GraphTestUtils { - - public static ByteBuffer serialize( - Object value, GraphProtocol graphProtocol, GraphBinaryModule graphBinaryModule) - throws IOException { - - Buffer tinkerBuf = graphBinaryModule.serialize(value); - ByteBuffer nioBuffer = tinkerBuf.nioBuffer(); - tinkerBuf.release(); - return graphProtocol.isGraphBinary() - ? nioBuffer - : GraphSONUtils.serializeToByteBuffer(value, graphProtocol); - } - - public static Frame defaultDseFrameOf(Message responseMessage) { - return Frame.forResponse( - DseProtocolVersion.DSE_V2.getCode(), - 0, - null, - Frame.NO_PAYLOAD, - Collections.emptyList(), - responseMessage); - } - - public static Message singleGraphRow(GraphProtocol graphProtocol, GraphBinaryModule module) - throws IOException { - Vertex value = - DetachedVertex.build() - .setId(1) - .setLabel("person") - .addProperty( - DetachedVertexProperty.build() - .setId(11) - .setLabel("name") - .setValue("marko") - .create()) - .create(); - DseRowsMetadata metadata = - new DseRowsMetadata( - ImmutableList.of( - new ColumnSpec( - "ks", - "table", - "gremlin", - 0, - graphProtocol.isGraphBinary() - ? RawType.PRIMITIVES.get(ProtocolConstants.DataType.BLOB) - : RawType.PRIMITIVES.get(ProtocolConstants.DataType.VARCHAR))), - null, - new int[] {}, - null, - 1, - true); - Queue> data = new ArrayDeque<>(); - data.add( - ImmutableList.of( - serialize( - graphProtocol.isGraphBinary() - // GraphBinary returns results directly inside a Traverser - ? new DefaultRemoteTraverser<>(value, 1) - : ImmutableMap.of("result", value), - graphProtocol, - module))); - return new DefaultRows(metadata, data); - } - - // Returns 10 rows, each with a vertex - public static Rows tenGraphRows( - GraphProtocol graphProtocol, GraphBinaryModule module, int page, boolean last) - throws IOException { - DseRowsMetadata metadata = - new DseRowsMetadata( - ImmutableList.of( - new ColumnSpec( - "ks", - "table", - "gremlin", - 0, - graphProtocol.isGraphBinary() - ? RawType.PRIMITIVES.get(ProtocolConstants.DataType.BLOB) - : RawType.PRIMITIVES.get(ProtocolConstants.DataType.VARCHAR))), - null, - new int[] {}, - null, - page, - last); - Queue> data = new ArrayDeque<>(); - int start = (page - 1) * 10; - for (int i = start; i < start + 10; i++) { - Vertex v = - DetachedVertex.build() - .setId("vertex" + i) - .setLabel("person") - .addProperty( - DetachedVertexProperty.build() - .setId("property" + i) - .setLabel("name") - .setValue("user" + i) - .create()) - .create(); - data.add( - ImmutableList.of( - serialize( - graphProtocol.isGraphBinary() - // GraphBinary returns results directly inside a Traverser - ? new DefaultRemoteTraverser<>(v, 1) - : ImmutableMap.of("result", v), - graphProtocol, - module))); - } - return new DefaultRows(metadata, data); - } - - public static GraphBinaryModule createGraphBinaryModule(DefaultDriverContext context) { - TypeSerializerRegistry registry = GraphBinaryModule.createDseTypeSerializerRegistry(context); - return new GraphBinaryModule(new GraphBinaryReader(registry), new GraphBinaryWriter(registry)); - } - - public static void assertThatContainsProperties( - Map properties, Object... propsToMatch) { - for (int i = 0; i < propsToMatch.length; i += 2) { - assertThat(properties).containsEntry(propsToMatch[i], propsToMatch[i + 1]); - } - } - - public static void assertThatContainsLabel( - Map properties, Direction direction, String label) { - assertThat(properties) - .hasEntrySatisfying( - direction, - value -> - assertThat(value) - .asInstanceOf(InstanceOfAssertFactories.MAP) - .containsEntry(T.label, label)); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/binary/GraphDataTypesTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/binary/GraphDataTypesTest.java deleted file mode 100644 index e36f7e97e5a..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/binary/GraphDataTypesTest.java +++ /dev/null @@ -1,333 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph.binary; - -import static com.datastax.oss.driver.api.core.type.DataTypes.BIGINT; -import static com.datastax.oss.driver.api.core.type.DataTypes.DOUBLE; -import static com.datastax.oss.driver.api.core.type.DataTypes.DURATION; -import static com.datastax.oss.driver.api.core.type.DataTypes.FLOAT; -import static com.datastax.oss.driver.api.core.type.DataTypes.INT; -import static com.datastax.oss.driver.api.core.type.DataTypes.TEXT; -import static com.datastax.oss.driver.api.core.type.DataTypes.listOf; -import static com.datastax.oss.driver.api.core.type.DataTypes.mapOf; -import static com.datastax.oss.driver.api.core.type.DataTypes.setOf; -import static com.datastax.oss.driver.api.core.type.DataTypes.tupleOf; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.when; - -import com.datastax.dse.driver.api.core.DseProtocolVersion; -import com.datastax.dse.driver.api.core.data.geometry.LineString; -import com.datastax.dse.driver.api.core.data.geometry.Point; -import com.datastax.dse.driver.api.core.data.geometry.Polygon; -import com.datastax.dse.driver.api.core.graph.BatchGraphStatement; -import com.datastax.dse.driver.api.core.graph.DseGraph; -import com.datastax.dse.driver.api.core.graph.GraphNode; -import com.datastax.dse.driver.api.core.type.DseDataTypes; -import com.datastax.dse.driver.api.core.type.codec.DseTypeCodecs; -import com.datastax.dse.driver.internal.core.data.geometry.Distance; -import com.datastax.dse.driver.internal.core.graph.EditDistance; -import com.datastax.dse.driver.internal.core.graph.GraphConversions; -import com.datastax.dse.driver.internal.core.graph.GraphProtocol; -import com.datastax.dse.driver.internal.core.graph.GraphSONUtils; -import com.datastax.oss.driver.TestDataProviders; -import com.datastax.oss.driver.api.core.data.CqlDuration; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.TupleType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.api.core.type.codec.registry.MutableCodecRegistry; -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import com.datastax.oss.driver.internal.core.type.UserDefinedTypeBuilder; -import com.datastax.oss.driver.internal.core.type.codec.registry.DefaultCodecRegistry; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.io.IOException; -import java.math.BigDecimal; -import java.math.BigInteger; -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.nio.ByteBuffer; -import java.time.Instant; -import java.time.LocalDate; -import java.time.LocalTime; -import java.time.ZoneOffset; -import java.util.List; -import java.util.Set; -import org.apache.tinkerpop.gremlin.structure.io.Buffer; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; -import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; -import org.apache.tinkerpop.gremlin.structure.io.binary.TypeSerializerRegistry; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -@RunWith(DataProviderRunner.class) -public class GraphDataTypesTest { - - private GraphBinaryModule graphBinaryModule; - - @Mock private DefaultDriverContext context; - - private static final MutableCodecRegistry CODEC_REGISTRY = - new DefaultCodecRegistry("testDseRegistry"); - - static { - CODEC_REGISTRY.register(DseTypeCodecs.POINT, DseTypeCodecs.LINE_STRING, DseTypeCodecs.POLYGON); - } - - private static Object[][] graphsonOneDataTypes = - new Object[][] { - {"~’~^ää#123#ö"}, - {(byte) 34}, - {BigDecimal.TEN}, - {BigInteger.TEN}, - {Boolean.TRUE}, - {false}, - {23}, - {23L}, - {23.0d}, - {23f}, - {(short) 23}, - {LocalDate.now(ZoneOffset.UTC)}, - {LocalTime.now(ZoneOffset.UTC)}, - {java.util.UUID.randomUUID()}, - {Instant.now()}, - }; - - private static Object[][] graphsonTwoDataTypes = - new Object[][] { - {ImmutableList.of(1L, 2L, 3L)}, - {ImmutableSet.of(1L, 2L, 3L)}, - {ImmutableMap.of("a", 1, "b", 2)}, - {Point.fromCoordinates(3.3, 4.4)}, - { - LineString.fromPoints( - Point.fromCoordinates(1, 1), Point.fromCoordinates(2, 2), Point.fromCoordinates(3, 3)) - }, - { - Polygon.fromPoints( - Point.fromCoordinates(3, 4), Point.fromCoordinates(5, 4), Point.fromCoordinates(6, 6)) - }, - }; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - when(context.getCodecRegistry()).thenReturn(CODEC_REGISTRY); - when(context.getProtocolVersion()).thenReturn(DseProtocolVersion.DSE_V2); - - TypeSerializerRegistry registry = GraphBinaryModule.createDseTypeSerializerRegistry(context); - graphBinaryModule = - new GraphBinaryModule(new GraphBinaryReader(registry), new GraphBinaryWriter(registry)); - } - - @DataProvider - public static Object[][] graphsonOneDataProvider() { - - return graphsonOneDataTypes; - } - - @DataProvider - public static Object[][] graphsonTwoDataProvider() { - - return TestDataProviders.concat(graphsonOneDataTypes, graphsonTwoDataTypes); - } - - @DataProvider - public static Object[][] binaryDataProvider() throws UnknownHostException { - - Object[][] binaryDataTypes = - new Object[][] { - {InetAddress.getLocalHost()}, - {ImmutableList.of(ImmutableList.of(1L, 3L), ImmutableList.of(2L, 4L))}, - {ImmutableSet.of(ImmutableSet.of(1, 2, 3))}, - {ImmutableMap.of(ImmutableMap.of("a", 1), ImmutableMap.of(2, "b"))}, - {tupleOf(INT, TEXT, FLOAT).newValue(1, "2", 3.41f)}, - { - tupleOf(INT, TEXT, tupleOf(TEXT, DURATION)) - .newValue( - 1, "2", tupleOf(TEXT, DURATION).newValue("a", CqlDuration.newInstance(2, 1, 0))) - }, - { - tupleOf( - listOf(INT), - setOf(FLOAT), - DataTypes.mapOf(TEXT, BIGINT), - listOf(listOf(DOUBLE)), - setOf(setOf(FLOAT)), - listOf(tupleOf(INT, TEXT))) - .newValue( - ImmutableList.of(4, 8, 22, 34, 37, 59), - ImmutableSet.of(28f, 44f, 59f), - ImmutableMap.of("big10", 2345L), - ImmutableList.of( - ImmutableList.of(11.1d, 33.3d), ImmutableList.of(22.2d, 44.4d)), - ImmutableSet.of(ImmutableSet.of(55.5f)), - ImmutableList.of(tupleOf(INT, TEXT).newValue(3, "three"))) - }, - { - new UserDefinedTypeBuilder("ks", "udt1") - .withField("a", INT) - .withField("b", TEXT) - .build() - .newValue(1, "two") - }, - {new Distance(Point.fromCoordinates(3.4, 17.0), 2.5)}, - {new EditDistance("xyz", 3)}, - {DseGraph.g.V().has("name", "marko").asAdmin().getBytecode()}, - { - GraphConversions.bytecodeToSerialize( - BatchGraphStatement.builder() - .addTraversal(DseGraph.g.addV("person").property("name", "1")) - .addTraversal(DseGraph.g.addV("person").property("name", "1")) - .build()) - }, - }; - return TestDataProviders.concat(graphsonTwoDataProvider(), binaryDataTypes); - } - - @Test - @UseDataProvider("binaryDataProvider") - public void dataTypesTest(Object value) throws IOException { - verifySerDeBinary(value); - } - - @Test - @UseDataProvider("graphsonOneDataProvider") - public void dataTypesTestGraphsonOne(Object value) throws IOException { - verifySerDeGraphson(value, GraphProtocol.GRAPHSON_1_0); - } - - @Test - @UseDataProvider("graphsonTwoDataProvider") - public void dataTypesTestGraphsonTwo(Object value) throws IOException { - verifySerDeGraphson(value, GraphProtocol.GRAPHSON_2_0); - } - - @Test - public void complexUdtTests() throws IOException { - UserDefinedType type1 = - new UserDefinedTypeBuilder("ks", "udt1").withField("a", INT).withField("b", TEXT).build(); - verifySerDeBinary(type1.newValue(1, "2")); - - TupleType secondNested = tupleOf(BIGINT, listOf(BIGINT)); - TupleType firstNested = tupleOf(TEXT, secondNested); - - UserDefinedType type2 = - new UserDefinedTypeBuilder("ks", "udt2") - .withField("a", INT) - .withField("b", TEXT) - .withField("c", type1) - .withField("mylist", listOf(BIGINT)) - .withField("mytuple_withlist", firstNested) - .build(); - - verifySerDeBinary( - type2.newValue( - 1, - "2", - type1.newValue(3, "4"), - ImmutableList.of(5L), - firstNested.newValue("6", secondNested.newValue(7L, ImmutableList.of(8L))))); - - UserDefinedType type3 = - new UserDefinedTypeBuilder("ks", "udt3") - .withField("a", listOf(INT)) - .withField("b", setOf(FLOAT)) - .withField("c", mapOf(TEXT, BIGINT)) - .withField("d", listOf(listOf(DOUBLE))) - .withField("e", setOf(setOf(FLOAT))) - .withField("f", listOf(tupleOf(INT, TEXT))) - .build(); - - verifySerDeBinary( - type3.newValue( - ImmutableList.of(1), - ImmutableSet.of(2.1f), - ImmutableMap.of("3", 4L), - ImmutableList.of(ImmutableList.of(5.1d, 6.1d), ImmutableList.of(7.1d)), - ImmutableSet.of(ImmutableSet.of(8.1f), ImmutableSet.of(9.1f)), - ImmutableList.of(tupleOf(INT, TEXT).newValue(10, "11")))); - } - - @Test - public void complexTypesAndGeoTests() throws IOException { - - TupleType tuple = tupleOf(DseDataTypes.POINT, DseDataTypes.LINE_STRING, DseDataTypes.POLYGON); - tuple.attach(context); - - verifySerDeBinary( - tuple.newValue( - Point.fromCoordinates(3.3, 4.4), - LineString.fromPoints( - Point.fromCoordinates(1, 1), - Point.fromCoordinates(2, 2), - Point.fromCoordinates(3, 3)), - Polygon.fromPoints( - Point.fromCoordinates(3, 4), - Point.fromCoordinates(5, 4), - Point.fromCoordinates(6, 6)))); - - UserDefinedType udt = - new UserDefinedTypeBuilder("ks", "udt1") - .withField("a", DseDataTypes.POINT) - .withField("b", DseDataTypes.LINE_STRING) - .withField("c", DseDataTypes.POLYGON) - .build(); - udt.attach(context); - - verifySerDeBinary( - udt.newValue( - Point.fromCoordinates(3.3, 4.4), - LineString.fromPoints( - Point.fromCoordinates(1, 1), - Point.fromCoordinates(2, 2), - Point.fromCoordinates(3, 3)), - Polygon.fromPoints( - Point.fromCoordinates(3, 4), - Point.fromCoordinates(5, 4), - Point.fromCoordinates(6, 6)))); - } - - private void verifySerDeBinary(Object input) throws IOException { - Buffer result = graphBinaryModule.serialize(input); - Object deserialized = graphBinaryModule.deserialize(result); - result.release(); - assertThat(deserialized).isEqualTo(input); - } - - private void verifySerDeGraphson(Object input, GraphProtocol protocol) throws IOException { - ByteBuffer buffer = GraphSONUtils.serializeToByteBuffer(input, protocol); - Object deserialized = deserializeGraphson(buffer, protocol, input.getClass()); - - Object expected = (input instanceof Set) ? ImmutableList.copyOf((Set) input) : input; - assertThat(deserialized).isEqualTo(expected); - } - - private Object deserializeGraphson( - ByteBuffer buffer, GraphProtocol protocol, Class expectedClass) throws IOException { - List data = ImmutableList.of(buffer); - GraphNode node = GraphSONUtils.createGraphNode(data, protocol); - return node.as(expectedClass); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/reactive/ReactiveGraphRequestProcessorTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/reactive/ReactiveGraphRequestProcessorTest.java deleted file mode 100644 index 324c4ff4672..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/reactive/ReactiveGraphRequestProcessorTest.java +++ /dev/null @@ -1,192 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph.reactive; - -import static com.datastax.dse.driver.api.core.DseProtocolVersion.DSE_V1; -import static com.datastax.dse.driver.internal.core.graph.GraphTestUtils.createGraphBinaryModule; -import static com.datastax.dse.driver.internal.core.graph.GraphTestUtils.defaultDseFrameOf; -import static com.datastax.dse.driver.internal.core.graph.GraphTestUtils.tenGraphRows; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.dse.driver.DseTestDataProviders; -import com.datastax.dse.driver.api.core.DseProtocolVersion; -import com.datastax.dse.driver.api.core.graph.GraphStatement; -import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; -import com.datastax.dse.driver.api.core.graph.reactive.ReactiveGraphNode; -import com.datastax.dse.driver.api.core.graph.reactive.ReactiveGraphResultSet; -import com.datastax.dse.driver.internal.core.cql.continuous.ContinuousCqlRequestHandlerTestBase; -import com.datastax.dse.driver.internal.core.graph.GraphProtocol; -import com.datastax.dse.driver.internal.core.graph.GraphRequestAsyncProcessor; -import com.datastax.dse.driver.internal.core.graph.GraphRequestHandlerTestHarness; -import com.datastax.dse.driver.internal.core.graph.GraphSupportChecker; -import com.datastax.dse.driver.internal.core.graph.binary.GraphBinaryModule; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import com.datastax.oss.driver.internal.core.cql.PoolBehavior; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import io.reactivex.Flowable; -import java.io.IOException; -import java.util.List; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mockito; - -public class ReactiveGraphRequestProcessorTest extends ContinuousCqlRequestHandlerTestBase { - - private GraphRequestAsyncProcessor asyncProcessor; - private GraphSupportChecker graphSupportChecker; - - @Before - public void setUp() { - DefaultDriverContext context = mock(DefaultDriverContext.class); - graphSupportChecker = mock(GraphSupportChecker.class); - asyncProcessor = Mockito.spy(new GraphRequestAsyncProcessor(context, graphSupportChecker)); - } - - @Test - public void should_be_able_to_process_graph_reactive_result_set() { - ReactiveGraphRequestProcessor processor = new ReactiveGraphRequestProcessor(asyncProcessor); - assertThat( - processor.canProcess( - ScriptGraphStatement.newInstance("g.V()"), - ReactiveGraphRequestProcessor.REACTIVE_GRAPH_RESULT_SET)) - .isTrue(); - } - - @Test - public void should_create_reactive_result_set() { - GraphRequestHandlerTestHarness.Builder builder = - GraphRequestHandlerTestHarness.builder().withProtocolVersion(DSE_V1); - try (GraphRequestHandlerTestHarness harness = builder.build()) { - ReactiveGraphRequestProcessor processor = new ReactiveGraphRequestProcessor(asyncProcessor); - GraphStatement graphStatement = ScriptGraphStatement.newInstance("g.V()"); - assertThat( - processor.process(graphStatement, harness.getSession(), harness.getContext(), "test")) - .isInstanceOf(DefaultReactiveGraphResultSet.class); - } - } - - @Test - @UseDataProvider( - value = "allDseProtocolVersionsAndSupportedGraphProtocols", - location = DseTestDataProviders.class) - public void should_complete_single_page_result( - DseProtocolVersion version, GraphProtocol graphProtocol) throws IOException { - when(graphSupportChecker.isPagingEnabled(any(), any())).thenReturn(false); - when(graphSupportChecker.inferGraphProtocol(any(), any(), any())).thenReturn(graphProtocol); - - GraphRequestHandlerTestHarness.Builder builder = - GraphRequestHandlerTestHarness.builder().withProtocolVersion(version); - PoolBehavior node1Behavior = builder.customBehavior(node1); - try (GraphRequestHandlerTestHarness harness = builder.build()) { - - DefaultSession session = harness.getSession(); - DefaultDriverContext context = harness.getContext(); - GraphStatement graphStatement = ScriptGraphStatement.newInstance("g.V()"); - - GraphBinaryModule graphBinaryModule = createGraphBinaryModule(context); - when(asyncProcessor.getGraphBinaryModule()).thenReturn(graphBinaryModule); - - ReactiveGraphResultSet publisher = - new ReactiveGraphRequestProcessor(asyncProcessor) - .process(graphStatement, session, context, "test"); - - Flowable rowsPublisher = Flowable.fromPublisher(publisher).cache(); - rowsPublisher.subscribe(); - - // emulate single page - node1Behavior.setResponseSuccess( - defaultDseFrameOf(tenGraphRows(graphProtocol, graphBinaryModule, 1, true))); - - List rows = rowsPublisher.toList().blockingGet(); - - assertThat(rows).hasSize(10); - checkResultSet(rows); - - Flowable execInfosFlowable = - Flowable.fromPublisher(publisher.getExecutionInfos()); - assertThat(execInfosFlowable.toList().blockingGet()) - .hasSize(1) - .containsExactly(rows.get(0).getExecutionInfo()); - } - } - - @Test - @UseDataProvider( - value = "allDseProtocolVersionsAndSupportedGraphProtocols", - location = DseTestDataProviders.class) - public void should_complete_multi_page_result( - DseProtocolVersion version, GraphProtocol graphProtocol) throws IOException { - when(graphSupportChecker.isPagingEnabled(any(), any())).thenReturn(true); - when(graphSupportChecker.inferGraphProtocol(any(), any(), any())).thenReturn(graphProtocol); - - GraphRequestHandlerTestHarness.Builder builder = - GraphRequestHandlerTestHarness.builder().withProtocolVersion(version); - PoolBehavior node1Behavior = builder.customBehavior(node1); - try (GraphRequestHandlerTestHarness harness = builder.build()) { - - DefaultSession session = harness.getSession(); - DefaultDriverContext context = harness.getContext(); - GraphStatement graphStatement = ScriptGraphStatement.newInstance("g.V()"); - - GraphBinaryModule graphBinaryModule = createGraphBinaryModule(context); - when(asyncProcessor.getGraphBinaryModule()).thenReturn(graphBinaryModule); - - ReactiveGraphResultSet publisher = - new ReactiveGraphRequestProcessor(asyncProcessor) - .process(graphStatement, session, context, "test"); - - Flowable rowsPublisher = Flowable.fromPublisher(publisher).cache(); - rowsPublisher.subscribe(); - - // emulate page 1 - node1Behavior.setResponseSuccess( - defaultDseFrameOf(tenGraphRows(graphProtocol, graphBinaryModule, 1, false))); - // emulate page 2 - node1Behavior.setResponseSuccess( - defaultDseFrameOf(tenGraphRows(graphProtocol, graphBinaryModule, 2, true))); - - List rows = rowsPublisher.toList().blockingGet(); - assertThat(rows).hasSize(20); - checkResultSet(rows); - - Flowable execInfosFlowable = - Flowable.fromPublisher(publisher.getExecutionInfos()); - assertThat(execInfosFlowable.toList().blockingGet()) - .hasSize(2) - .containsExactly(rows.get(0).getExecutionInfo(), rows.get(10).getExecutionInfo()); - } - } - - private void checkResultSet(List rows) { - for (ReactiveGraphNode row : rows) { - assertThat(row.isVertex()).isTrue(); - ExecutionInfo executionInfo = row.getExecutionInfo(); - assertThat(executionInfo.getCoordinator()).isEqualTo(node1); - assertThat(executionInfo.getErrors()).isEmpty(); - assertThat(executionInfo.getIncomingPayload()).isEmpty(); - assertThat(executionInfo.getSpeculativeExecutionCount()).isEqualTo(0); - assertThat(executionInfo.getSuccessfulExecutionIndex()).isEqualTo(0); - assertThat(executionInfo.getWarnings()).isEmpty(); - } - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/schema/refresh/GraphSchemaRefreshTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/schema/refresh/GraphSchemaRefreshTest.java deleted file mode 100644 index 0d05f129520..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/schema/refresh/GraphSchemaRefreshTest.java +++ /dev/null @@ -1,418 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.graph.schema.refresh; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.when; - -import com.datastax.dse.driver.api.core.metadata.schema.DseEdgeMetadata; -import com.datastax.dse.driver.api.core.metadata.schema.DseGraphTableMetadata; -import com.datastax.dse.driver.api.core.metadata.schema.DseVertexMetadata; -import com.datastax.dse.driver.internal.core.metadata.schema.DefaultDseEdgeMetadata; -import com.datastax.dse.driver.internal.core.metadata.schema.DefaultDseKeyspaceMetadata; -import com.datastax.dse.driver.internal.core.metadata.schema.DefaultDseTableMetadata; -import com.datastax.dse.driver.internal.core.metadata.schema.DefaultDseVertexMetadata; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.internal.core.channel.ChannelFactory; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.DefaultMetadata; -import com.datastax.oss.driver.internal.core.metadata.MetadataRefresh; -import com.datastax.oss.driver.internal.core.metadata.schema.DefaultColumnMetadata; -import com.datastax.oss.driver.internal.core.metadata.schema.events.KeyspaceChangeEvent; -import com.datastax.oss.driver.internal.core.metadata.schema.events.TableChangeEvent; -import com.datastax.oss.driver.internal.core.metadata.schema.refresh.SchemaRefresh; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Collections; -import java.util.Map; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class GraphSchemaRefreshTest { - - private static final DefaultDseTableMetadata OLD_TABLE = - newTable( - CqlIdentifier.fromInternal("ks_with_engine"), - CqlIdentifier.fromInternal("tbl"), - null, - null); - private static final DefaultDseKeyspaceMetadata OLD_KS1 = newKeyspace("ks1", null); - private static final DefaultDseKeyspaceMetadata KS_WITH_ENGINE = - newKeyspace( - CqlIdentifier.fromInternal("ks_with_engine"), - "Core", - ImmutableMap.of(CqlIdentifier.fromInternal("tbl"), OLD_TABLE)); - - @Mock private InternalDriverContext context; - @Mock private ChannelFactory channelFactory; - private DefaultMetadata oldMetadata; - - @Before - public void setup() { - when(context.getChannelFactory()).thenReturn(channelFactory); - oldMetadata = - DefaultMetadata.EMPTY.withSchema( - ImmutableMap.of(OLD_KS1.getName(), OLD_KS1, KS_WITH_ENGINE.getName(), KS_WITH_ENGINE), - false, - context); - } - - @Test - public void should_detect_created_keyspace_without_graph_engine() { - DefaultDseKeyspaceMetadata ks2 = newKeyspace("ks2", null); - SchemaRefresh refresh = - new SchemaRefresh( - ImmutableMap.of( - OLD_KS1.getName(), - OLD_KS1, - KS_WITH_ENGINE.getName(), - KS_WITH_ENGINE, - ks2.getName(), - ks2)); - MetadataRefresh.Result result = refresh.compute(oldMetadata, false, context); - assertThat(result.newMetadata.getKeyspaces()).hasSize(3); - assertThat(result.events).containsExactly(KeyspaceChangeEvent.created(ks2)); - } - - @Test - public void should_detect_created_keyspace_with_graph_engine() { - DefaultDseKeyspaceMetadata ks2 = newKeyspace("ks2", "Core"); - SchemaRefresh refresh = - new SchemaRefresh( - ImmutableMap.of( - OLD_KS1.getName(), - OLD_KS1, - KS_WITH_ENGINE.getName(), - KS_WITH_ENGINE, - ks2.getName(), - ks2)); - MetadataRefresh.Result result = refresh.compute(oldMetadata, false, context); - assertThat(result.newMetadata.getKeyspaces()).hasSize(3); - assertThat(result.events).containsExactly(KeyspaceChangeEvent.created(ks2)); - } - - @Test - public void should_detect_top_level_graph_engine_update_in_keyspace() { - // Change only one top-level option (graph_engine) - DefaultDseKeyspaceMetadata newKs1 = newKeyspace("ks1", "Core"); - SchemaRefresh refresh = - new SchemaRefresh( - ImmutableMap.of(KS_WITH_ENGINE.getName(), KS_WITH_ENGINE, OLD_KS1.getName(), newKs1)); - MetadataRefresh.Result result = refresh.compute(oldMetadata, false, context); - assertThat(result.newMetadata.getKeyspaces()).hasSize(2); - assertThat(result.events).containsExactly(KeyspaceChangeEvent.updated(OLD_KS1, newKs1)); - } - - @Test - public void should_detect_adding_and_renaming_and_removing_vertex_label() { - DefaultDseTableMetadata newTable = - newTable( - KS_WITH_ENGINE.getName(), - CqlIdentifier.fromInternal("tbl"), - new DefaultDseVertexMetadata(CqlIdentifier.fromInternal("someLabel")), - null); - DefaultDseKeyspaceMetadata ks = - newKeyspace( - KS_WITH_ENGINE.getName(), - "Core", - ImmutableMap.of(CqlIdentifier.fromInternal("tbl"), newTable)); - SchemaRefresh refresh = - new SchemaRefresh( - ImmutableMap.of(KS_WITH_ENGINE.getName(), ks, OLD_KS1.getName(), OLD_KS1)); - MetadataRefresh.Result result = refresh.compute(oldMetadata, false, context); - assertThat(result.newMetadata.getKeyspaces()).hasSize(2); - assertThat(result.events).containsExactly(TableChangeEvent.updated(OLD_TABLE, newTable)); - assertThat(result.newMetadata.getKeyspaces().get(KS_WITH_ENGINE.getName())).isNotNull(); - assertThat( - ((DseGraphTableMetadata) - result - .newMetadata - .getKeyspaces() - .get(KS_WITH_ENGINE.getName()) - .getTable("tbl") - .get()) - .getVertex()) - .isNotNull(); - assertThat( - ((DseGraphTableMetadata) - result - .newMetadata - .getKeyspaces() - .get(KS_WITH_ENGINE.getName()) - .getTable("tbl") - .get()) - .getVertex() - .get() - .getLabelName() - .asInternal()) - .isEqualTo("someLabel"); - - // now rename the vertex label - newTable = - newTable( - KS_WITH_ENGINE.getName(), - CqlIdentifier.fromInternal("tbl"), - new DefaultDseVertexMetadata(CqlIdentifier.fromInternal("someNewLabel")), - null); - ks = - newKeyspace( - KS_WITH_ENGINE.getName(), - "Core", - ImmutableMap.of(CqlIdentifier.fromInternal("tbl"), newTable)); - refresh = - new SchemaRefresh( - ImmutableMap.of(KS_WITH_ENGINE.getName(), ks, OLD_KS1.getName(), OLD_KS1)); - result = refresh.compute(oldMetadata, false, context); - assertThat(result.newMetadata.getKeyspaces()).hasSize(2); - assertThat(result.events).containsExactly(TableChangeEvent.updated(OLD_TABLE, newTable)); - assertThat( - ((DseGraphTableMetadata) - result - .newMetadata - .getKeyspaces() - .get(KS_WITH_ENGINE.getName()) - .getTable("tbl") - .get()) - .getVertex() - .get() - .getLabelName() - .asInternal()) - .isEqualTo("someNewLabel"); - - // now remove the vertex label from the table - DefaultMetadata metadataWithVertexLabel = result.newMetadata; - DefaultDseTableMetadata tableWithRemovedLabel = - newTable(KS_WITH_ENGINE.getName(), CqlIdentifier.fromInternal("tbl"), null, null); - ks = - newKeyspace( - KS_WITH_ENGINE.getName(), - "Core", - ImmutableMap.of(CqlIdentifier.fromInternal("tbl"), tableWithRemovedLabel)); - refresh = - new SchemaRefresh( - ImmutableMap.of(KS_WITH_ENGINE.getName(), ks, OLD_KS1.getName(), OLD_KS1)); - result = refresh.compute(metadataWithVertexLabel, false, context); - assertThat(result.newMetadata.getKeyspaces()).hasSize(2); - assertThat(result.events) - .containsExactly(TableChangeEvent.updated(newTable, tableWithRemovedLabel)); - assertThat( - ((DseGraphTableMetadata) - result - .newMetadata - .getKeyspaces() - .get(KS_WITH_ENGINE.getName()) - .getTable("tbl") - .get()) - .getVertex() - .isPresent()) - .isFalse(); - } - - @Test - public void should_detect_adding_and_renaming_and_removing_edge_label() { - DefaultDseTableMetadata newTable = - newTable( - KS_WITH_ENGINE.getName(), - CqlIdentifier.fromInternal("tbl"), - null, - newEdgeMetadata( - CqlIdentifier.fromInternal("created"), - CqlIdentifier.fromInternal("person"), - CqlIdentifier.fromInternal("software"))); - DefaultDseKeyspaceMetadata ks = - newKeyspace( - KS_WITH_ENGINE.getName(), - "Core", - ImmutableMap.of(CqlIdentifier.fromInternal("tbl"), newTable)); - SchemaRefresh refresh = - new SchemaRefresh( - ImmutableMap.of(KS_WITH_ENGINE.getName(), ks, OLD_KS1.getName(), OLD_KS1)); - MetadataRefresh.Result result = refresh.compute(oldMetadata, false, context); - assertThat(result.newMetadata.getKeyspaces()).hasSize(2); - assertThat(result.events).containsExactly(TableChangeEvent.updated(OLD_TABLE, newTable)); - assertThat(result.newMetadata.getKeyspaces().get(KS_WITH_ENGINE.getName())).isNotNull(); - assertThat( - ((DseGraphTableMetadata) - result - .newMetadata - .getKeyspaces() - .get(KS_WITH_ENGINE.getName()) - .getTable("tbl") - .get()) - .getVertex()) - .isNotNull(); - assertThat( - ((DseGraphTableMetadata) - result - .newMetadata - .getKeyspaces() - .get(KS_WITH_ENGINE.getName()) - .getTable("tbl") - .get()) - .getEdge() - .get() - .getLabelName() - .asInternal()) - .isEqualTo("created"); - - // now rename the edge label - newTable = - newTable( - KS_WITH_ENGINE.getName(), - CqlIdentifier.fromInternal("tbl"), - null, - newEdgeMetadata( - CqlIdentifier.fromInternal("CHANGED"), - CqlIdentifier.fromInternal("person"), - CqlIdentifier.fromInternal("software"))); - ks = - newKeyspace( - KS_WITH_ENGINE.getName(), - "Core", - ImmutableMap.of(CqlIdentifier.fromInternal("tbl"), newTable)); - refresh = - new SchemaRefresh( - ImmutableMap.of(KS_WITH_ENGINE.getName(), ks, OLD_KS1.getName(), OLD_KS1)); - result = refresh.compute(oldMetadata, false, context); - assertThat(result.newMetadata.getKeyspaces()).hasSize(2); - assertThat(result.events).containsExactly(TableChangeEvent.updated(OLD_TABLE, newTable)); - assertThat( - ((DseGraphTableMetadata) - result - .newMetadata - .getKeyspaces() - .get(KS_WITH_ENGINE.getName()) - .getTable("tbl") - .get()) - .getEdge() - .get() - .getLabelName() - .asInternal()) - .isEqualTo("CHANGED"); - - // now remove the edge label from the table - DefaultMetadata metadataWithEdgeLabel = result.newMetadata; - DefaultDseTableMetadata tableWithRemovedLabel = - newTable(KS_WITH_ENGINE.getName(), CqlIdentifier.fromInternal("tbl"), null, null); - ks = - newKeyspace( - KS_WITH_ENGINE.getName(), - "Core", - ImmutableMap.of(CqlIdentifier.fromInternal("tbl"), tableWithRemovedLabel)); - refresh = - new SchemaRefresh( - ImmutableMap.of(KS_WITH_ENGINE.getName(), ks, OLD_KS1.getName(), OLD_KS1)); - result = refresh.compute(metadataWithEdgeLabel, false, context); - assertThat(result.newMetadata.getKeyspaces()).hasSize(2); - assertThat(result.events) - .containsExactly(TableChangeEvent.updated(newTable, tableWithRemovedLabel)); - assertThat( - ((DseGraphTableMetadata) - result - .newMetadata - .getKeyspaces() - .get(KS_WITH_ENGINE.getName()) - .getTable("tbl") - .get()) - .getEdge() - .isPresent()) - .isFalse(); - } - - private static DefaultDseKeyspaceMetadata newKeyspace(String name, String graphEngine) { - return new DefaultDseKeyspaceMetadata( - CqlIdentifier.fromInternal(name), - false, - false, - graphEngine, - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap()); - } - - private static DefaultDseKeyspaceMetadata newKeyspace( - CqlIdentifier name, String graphEngine, @NonNull Map tables) { - return new DefaultDseKeyspaceMetadata( - name, - false, - false, - graphEngine, - Collections.emptyMap(), - Collections.emptyMap(), - tables, - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap()); - } - - private static DefaultDseTableMetadata newTable( - @NonNull CqlIdentifier keyspace, - @NonNull CqlIdentifier name, - @Nullable DseVertexMetadata vertex, - @Nullable DseEdgeMetadata edge) { - ImmutableList cols = - ImmutableList.of( - new DefaultColumnMetadata( - keyspace, - CqlIdentifier.fromInternal("parent"), - CqlIdentifier.fromInternal("id"), - DataTypes.INT, - false)); - return new DefaultDseTableMetadata( - keyspace, - name, - null, - false, - false, - cols, - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap(), - vertex, - edge); - } - - private static DefaultDseEdgeMetadata newEdgeMetadata( - @NonNull CqlIdentifier labelName, - @NonNull CqlIdentifier fromTable, - @NonNull CqlIdentifier toTable) { - return new DefaultDseEdgeMetadata( - labelName, - fromTable, - fromTable, - Collections.emptyList(), - Collections.emptyList(), - toTable, - toTable, - Collections.emptyList(), - Collections.emptyList()); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/insights/AddressFormatterTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/insights/AddressFormatterTest.java deleted file mode 100644 index 85af9b5691b..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/insights/AddressFormatterTest.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.net.UnknownHostException; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class AddressFormatterTest { - - @Test - @UseDataProvider("addressesProvider") - public void should_format_addresses(Object address, String expected) { - // when - String result = AddressFormatter.nullSafeToString(address); - - // then - assertThat(result).isEqualTo(expected); - } - - @DataProvider - public static Object[][] addressesProvider() throws UnknownHostException { - return new Object[][] { - {new InetSocketAddress(8888), "0.0.0.0:8888"}, - {new InetSocketAddress("127.0.0.1", 8888), "127.0.0.1:8888"}, - {InetSocketAddress.createUnresolved("127.0.0.2", 8080), "127.0.0.2:8080"}, - {InetAddress.getByName("127.0.0.1"), "127.0.0.1"}, - }; - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/insights/ConfigAntiPatternsFinderTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/insights/ConfigAntiPatternsFinderTest.java deleted file mode 100644 index d5466b23dbc..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/insights/ConfigAntiPatternsFinderTest.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights; - -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.SSL_ENGINE_FACTORY_CLASS; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.SSL_HOSTNAME_VALIDATION; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.Collections; -import java.util.Map; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class ConfigAntiPatternsFinderTest { - - private static final ImmutableMap SSL_ANTI_PATTERN = - ImmutableMap.of( - "sslWithoutCertValidation", - "Client-to-node encryption is enabled but server certificate validation is disabled"); - - @Test - @UseDataProvider("sslConfigProvider") - public void should_find_ssl_anti_pattern( - boolean sslEngineFactoryClassDefined, - boolean hostnameValidation, - Map expected) { - // given - InternalDriverContext context = - mockDefaultProfile(sslEngineFactoryClassDefined, hostnameValidation); - - // when - Map antiPatterns = new ConfigAntiPatternsFinder().findAntiPatterns(context); - - // then - assertThat(antiPatterns).isEqualTo(expected); - } - - private InternalDriverContext mockDefaultProfile( - boolean sslEngineFactoryClassDefined, boolean hostnameValidation) { - InternalDriverContext context = mock(InternalDriverContext.class); - DriverConfig driverConfig = mock(DriverConfig.class); - when(context.getConfig()).thenReturn(driverConfig); - DriverExecutionProfile profile = mock(DriverExecutionProfile.class); - when(profile.isDefined(SSL_ENGINE_FACTORY_CLASS)).thenReturn(sslEngineFactoryClassDefined); - when(profile.getBoolean(SSL_HOSTNAME_VALIDATION, false)).thenReturn(hostnameValidation); - when(driverConfig.getDefaultProfile()).thenReturn(profile); - return context; - } - - @DataProvider - public static Object[][] sslConfigProvider() { - return new Object[][] { - {true, true, Collections.emptyMap()}, - {true, false, SSL_ANTI_PATTERN}, - {false, false, Collections.emptyMap()}, - {false, true, Collections.emptyMap()} - }; - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/insights/DataCentersFinderTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/insights/DataCentersFinderTest.java deleted file mode 100644 index dde6db6059e..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/insights/DataCentersFinderTest.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights; - -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.CONNECTION_POOL_REMOTE_SIZE; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import com.datastax.oss.driver.shaded.guava.common.collect.Sets; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.Collection; -import java.util.Set; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class DataCentersFinderTest { - - @Test - @UseDataProvider("hostProvider") - public void should_detect_data_centers( - int numberOfRemoteHosts, - String dc1, - NodeDistance h1Distance, - String dc2, - NodeDistance h2Distance, - Set expected) { - // given - DriverExecutionProfile executionProfile = mock(DriverExecutionProfile.class); - when(executionProfile.getInt(CONNECTION_POOL_REMOTE_SIZE)).thenReturn(numberOfRemoteHosts); - Collection nodes = mockNodes(dc1, h1Distance, dc2, h2Distance); - - // when - Set dataCenters = new DataCentersFinder().getDataCenters(nodes, executionProfile); - - // then - assertThat(dataCenters).isEqualTo(Sets.newHashSet(expected)); - } - - @DataProvider - public static Object[][] hostProvider() { - return new Object[][] { - {1, "dc1", NodeDistance.LOCAL, "dc2", NodeDistance.REMOTE, Sets.newHashSet("dc1", "dc2")}, - {1, "dc1", NodeDistance.LOCAL, "dc1", NodeDistance.REMOTE, Sets.newHashSet("dc1")}, - {0, "dc1", NodeDistance.LOCAL, "dc2", NodeDistance.REMOTE, Sets.newHashSet("dc1")}, - {0, "dc1", NodeDistance.IGNORED, "dc2", NodeDistance.REMOTE, Sets.newHashSet()}, - {1, "dc1", NodeDistance.IGNORED, "dc2", NodeDistance.REMOTE, Sets.newHashSet("dc2")}, - {1, "dc1", NodeDistance.LOCAL, "dc2", NodeDistance.IGNORED, Sets.newHashSet("dc1")}, - {0, "dc1", NodeDistance.IGNORED, "dc2", NodeDistance.REMOTE, Sets.newHashSet()}, - {0, "dc1", NodeDistance.LOCAL, "dc2", NodeDistance.IGNORED, Sets.newHashSet("dc1")}, - }; - } - - private Collection mockNodes( - String dc1, NodeDistance h1Distance, String dc2, NodeDistance h2Distance) { - Node n1 = mock(Node.class); - when(n1.getDatacenter()).thenReturn(dc1); - when(n1.getDistance()).thenReturn(h1Distance); - - Node n2 = mock(Node.class); - when(n2.getDatacenter()).thenReturn(dc2); - when(n2.getDistance()).thenReturn(h2Distance); - - return ImmutableSet.of(n1, n2); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/insights/ExecutionProfileMockUtil.java b/core/src/test/java/com/datastax/dse/driver/internal/core/insights/ExecutionProfileMockUtil.java deleted file mode 100644 index de0f3a9d60b..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/insights/ExecutionProfileMockUtil.java +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights; - -import static com.datastax.dse.driver.api.core.config.DseDriverOption.GRAPH_TRAVERSAL_SOURCE; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.AUTH_PROVIDER_CLASS; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.CONNECTION_POOL_REMOTE_SIZE; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.HEARTBEAT_INTERVAL; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.LOAD_BALANCING_DISTANCE_EVALUATOR_CLASS; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.LOAD_BALANCING_POLICY_CLASS; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.PROTOCOL_COMPRESSION; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.RECONNECTION_BASE_DELAY; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.REQUEST_CONSISTENCY; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.REQUEST_SERIAL_CONSISTENCY; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.REQUEST_TIMEOUT; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.SPECULATIVE_EXECUTION_DELAY; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.SPECULATIVE_EXECUTION_MAX; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.SPECULATIVE_EXECUTION_POLICY_CLASS; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.SSL_ENGINE_FACTORY_CLASS; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import java.time.Duration; - -class ExecutionProfileMockUtil { - static final String DEFAULT_LOCAL_DC = "local-dc"; - static final int SPECEX_MAX_DEFAULT = 100; - static final int SPECEX_DELAY_DEFAULT = 20; - - static DriverExecutionProfile mockDefaultExecutionProfile() { - DriverExecutionProfile profile = mock(DriverExecutionProfile.class); - - when(profile.getDuration(REQUEST_TIMEOUT)).thenReturn(Duration.ofMillis(100)); - when(profile.getString(LOAD_BALANCING_POLICY_CLASS)).thenReturn("LoadBalancingPolicyImpl"); - when(profile.isDefined(LOAD_BALANCING_DISTANCE_EVALUATOR_CLASS)).thenReturn(true); - when(profile.isDefined(LOAD_BALANCING_LOCAL_DATACENTER)).thenReturn(true); - when(profile.getString(LOAD_BALANCING_LOCAL_DATACENTER)).thenReturn(DEFAULT_LOCAL_DC); - when(profile.isDefined(SPECULATIVE_EXECUTION_MAX)).thenReturn(true); - when(profile.getInt(SPECULATIVE_EXECUTION_MAX)).thenReturn(SPECEX_MAX_DEFAULT); - when(profile.isDefined(SPECULATIVE_EXECUTION_DELAY)).thenReturn(true); - when(profile.getInt(SPECULATIVE_EXECUTION_DELAY)).thenReturn(SPECEX_DELAY_DEFAULT); - when(profile.getString(SPECULATIVE_EXECUTION_POLICY_CLASS)) - .thenReturn("SpeculativeExecutionImpl"); - when(profile.getString(REQUEST_CONSISTENCY)).thenReturn("LOCAL_ONE"); - when(profile.getString(REQUEST_SERIAL_CONSISTENCY)).thenReturn("SERIAL"); - when(profile.getInt(CONNECTION_POOL_LOCAL_SIZE)).thenReturn(2); - when(profile.getInt(CONNECTION_POOL_REMOTE_SIZE)).thenReturn(1); - when(profile.getString(eq(PROTOCOL_COMPRESSION), any())).thenReturn("none"); - when(profile.getDuration(HEARTBEAT_INTERVAL)).thenReturn(Duration.ofMillis(100)); - when(profile.getDuration(RECONNECTION_BASE_DELAY)).thenReturn(Duration.ofMillis(100)); - when(profile.isDefined(SSL_ENGINE_FACTORY_CLASS)).thenReturn(true); - when(profile.getString(eq(AUTH_PROVIDER_CLASS), any())).thenReturn("AuthProviderImpl"); - when(profile.getString(GRAPH_TRAVERSAL_SOURCE, null)).thenReturn("src-graph"); - return profile; - } - - static DriverExecutionProfile mockNonDefaultRequestTimeoutExecutionProfile() { - DriverExecutionProfile profile = mockDefaultExecutionProfile(); - when(profile.getDuration(REQUEST_TIMEOUT)).thenReturn(Duration.ofMillis(50)); - return profile; - } - - static DriverExecutionProfile mockNonDefaultLoadBalancingExecutionProfile() { - DriverExecutionProfile profile = mockDefaultExecutionProfile(); - when(profile.getString(LOAD_BALANCING_POLICY_CLASS)).thenReturn("NonDefaultLoadBalancing"); - return profile; - } - - static DriverExecutionProfile mockUndefinedLocalDcExecutionProfile() { - DriverExecutionProfile profile = mockNonDefaultLoadBalancingExecutionProfile(); - when(profile.isDefined(LOAD_BALANCING_LOCAL_DATACENTER)).thenReturn(false); - return profile; - } - - static DriverExecutionProfile mockNonDefaultSpeculativeExecutionInfo() { - DriverExecutionProfile profile = mockDefaultExecutionProfile(); - when(profile.getString(SPECULATIVE_EXECUTION_POLICY_CLASS)) - .thenReturn("NonDefaultSpecexPolicy"); - return profile; - } - - static DriverExecutionProfile mockNonDefaultConsistency() { - DriverExecutionProfile profile = mockDefaultExecutionProfile(); - when(profile.getString(REQUEST_CONSISTENCY)).thenReturn("ALL"); - return profile; - } - - static DriverExecutionProfile mockNonDefaultSerialConsistency() { - DriverExecutionProfile profile = mockDefaultExecutionProfile(); - when(profile.getString(REQUEST_SERIAL_CONSISTENCY)).thenReturn("ONE"); - return profile; - } - - static DriverExecutionProfile mockNonDefaultGraphOptions() { - DriverExecutionProfile profile = mockDefaultExecutionProfile(); - when(profile.getString(GRAPH_TRAVERSAL_SOURCE, null)).thenReturn("non-default-graph"); - return profile; - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/insights/ExecutionProfilesInfoFinderTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/insights/ExecutionProfilesInfoFinderTest.java deleted file mode 100644 index fc92ab20521..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/insights/ExecutionProfilesInfoFinderTest.java +++ /dev/null @@ -1,235 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights; - -import static com.datastax.dse.driver.internal.core.insights.ExecutionProfileMockUtil.DEFAULT_LOCAL_DC; -import static com.datastax.dse.driver.internal.core.insights.ExecutionProfileMockUtil.SPECEX_DELAY_DEFAULT; -import static com.datastax.dse.driver.internal.core.insights.ExecutionProfileMockUtil.SPECEX_MAX_DEFAULT; -import static com.datastax.dse.driver.internal.core.insights.ExecutionProfileMockUtil.mockDefaultExecutionProfile; -import static com.datastax.dse.driver.internal.core.insights.ExecutionProfileMockUtil.mockNonDefaultConsistency; -import static com.datastax.dse.driver.internal.core.insights.ExecutionProfileMockUtil.mockNonDefaultGraphOptions; -import static com.datastax.dse.driver.internal.core.insights.ExecutionProfileMockUtil.mockNonDefaultLoadBalancingExecutionProfile; -import static com.datastax.dse.driver.internal.core.insights.ExecutionProfileMockUtil.mockNonDefaultRequestTimeoutExecutionProfile; -import static com.datastax.dse.driver.internal.core.insights.ExecutionProfileMockUtil.mockNonDefaultSerialConsistency; -import static com.datastax.dse.driver.internal.core.insights.ExecutionProfileMockUtil.mockNonDefaultSpeculativeExecutionInfo; -import static com.datastax.dse.driver.internal.core.insights.ExecutionProfileMockUtil.mockUndefinedLocalDcExecutionProfile; -import static com.datastax.dse.driver.internal.core.insights.PackageUtil.DEFAULT_LOAD_BALANCING_PACKAGE; -import static com.datastax.dse.driver.internal.core.insights.PackageUtil.DEFAULT_SPECULATIVE_EXECUTION_PACKAGE; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.dse.driver.internal.core.insights.schema.LoadBalancingInfo; -import com.datastax.dse.driver.internal.core.insights.schema.SpecificExecutionProfile; -import com.datastax.dse.driver.internal.core.insights.schema.SpeculativeExecutionInfo; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.Map; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mockito; - -@RunWith(DataProviderRunner.class) -public class ExecutionProfilesInfoFinderTest { - - @Test - public void should_include_info_about_default_profile() { - // given - DriverExecutionProfile defaultExecutionProfile = mockDefaultExecutionProfile(); - Map profiles = - ImmutableMap.of("default", defaultExecutionProfile); - - InternalDriverContext context = - mockDriverContextWithProfiles(defaultExecutionProfile, profiles); - - // when - Map executionProfilesInfo = - new ExecutionProfilesInfoFinder().getExecutionProfilesInfo(context); - - // then - assertThat(executionProfilesInfo) - .isEqualTo( - ImmutableMap.of( - "default", - new SpecificExecutionProfile( - 100, - new LoadBalancingInfo( - "LoadBalancingPolicyImpl", - ImmutableMap.of("localDataCenter", "local-dc", "filterFunction", true), - DEFAULT_LOAD_BALANCING_PACKAGE), - new SpeculativeExecutionInfo( - "SpeculativeExecutionImpl", - ImmutableMap.of("maxSpeculativeExecutions", 100, "delay", 20), - DEFAULT_SPECULATIVE_EXECUTION_PACKAGE), - "LOCAL_ONE", - "SERIAL", - ImmutableMap.of("source", "src-graph")))); - } - - @Test - @UseDataProvider("executionProfileProvider") - public void should_include_info_about_default_profile_and_only_difference_for_specific_profile( - DriverExecutionProfile nonDefaultExecutionProfile, SpecificExecutionProfile expected) { - // given - - DriverExecutionProfile defaultExecutionProfile = mockDefaultExecutionProfile(); - Map profiles = - ImmutableMap.of( - "default", defaultExecutionProfile, "non-default", nonDefaultExecutionProfile); - InternalDriverContext context = - mockDriverContextWithProfiles(defaultExecutionProfile, profiles); - // when - Map executionProfilesInfo = - new ExecutionProfilesInfoFinder().getExecutionProfilesInfo(context); - - // then - assertThat(executionProfilesInfo) - .isEqualTo( - ImmutableMap.of( - "default", - new SpecificExecutionProfile( - 100, - new LoadBalancingInfo( - "LoadBalancingPolicyImpl", - ImmutableMap.of("localDataCenter", "local-dc", "filterFunction", true), - DEFAULT_LOAD_BALANCING_PACKAGE), - new SpeculativeExecutionInfo( - "SpeculativeExecutionImpl", - ImmutableMap.of("maxSpeculativeExecutions", 100, "delay", 20), - DEFAULT_SPECULATIVE_EXECUTION_PACKAGE), - "LOCAL_ONE", - "SERIAL", - ImmutableMap.of("source", "src-graph")), - "non-default", - expected)); - } - - @DataProvider - public static Object[][] executionProfileProvider() { - return new Object[][] { - { - mockNonDefaultRequestTimeoutExecutionProfile(), - new SpecificExecutionProfile(50, null, null, null, null, null) - }, - { - mockNonDefaultLoadBalancingExecutionProfile(), - new SpecificExecutionProfile( - null, - new LoadBalancingInfo( - "NonDefaultLoadBalancing", - ImmutableMap.of("localDataCenter", DEFAULT_LOCAL_DC, "filterFunction", true), - DEFAULT_LOAD_BALANCING_PACKAGE), - null, - null, - null, - null) - }, - { - mockUndefinedLocalDcExecutionProfile(), - new SpecificExecutionProfile( - null, - new LoadBalancingInfo( - "NonDefaultLoadBalancing", - ImmutableMap.of("filterFunction", true), - DEFAULT_LOAD_BALANCING_PACKAGE), - null, - null, - null, - null) - }, - { - mockNonDefaultSpeculativeExecutionInfo(), - new SpecificExecutionProfile( - null, - null, - new SpeculativeExecutionInfo( - "NonDefaultSpecexPolicy", - ImmutableMap.of( - "maxSpeculativeExecutions", SPECEX_MAX_DEFAULT, "delay", SPECEX_DELAY_DEFAULT), - DEFAULT_SPECULATIVE_EXECUTION_PACKAGE), - null, - null, - null) - }, - { - mockNonDefaultConsistency(), - new SpecificExecutionProfile(null, null, null, "ALL", null, null) - }, - { - mockNonDefaultSerialConsistency(), - new SpecificExecutionProfile(null, null, null, null, "ONE", null) - }, - { - mockNonDefaultGraphOptions(), - new SpecificExecutionProfile( - null, null, null, null, null, ImmutableMap.of("source", "non-default-graph")) - }, - { - mockDefaultExecutionProfile(), - new SpecificExecutionProfile(null, null, null, null, null, null) - } - }; - } - - @Test - public void should_not_include_null_fields_in_json() throws JsonProcessingException { - // given - SpecificExecutionProfile specificExecutionProfile = - new SpecificExecutionProfile(50, null, null, "ONE", null, ImmutableMap.of("a", "b")); - - // when - String result = new ObjectMapper().writeValueAsString(specificExecutionProfile); - - // then - assertThat(result) - .isEqualTo("{\"readTimeout\":50,\"consistency\":\"ONE\",\"graphOptions\":{\"a\":\"b\"}}"); - } - - @Test - public void should_include_empty_execution_profile_if_has_all_nulls() - throws JsonProcessingException { - // given - Map executionProfiles = - ImmutableMap.of("p", new SpecificExecutionProfile(null, null, null, null, null, null)); - - // when - String result = new ObjectMapper().writeValueAsString(executionProfiles); - - // then - assertThat(result).isEqualTo("{\"p\":{}}"); - } - - private InternalDriverContext mockDriverContextWithProfiles( - DriverExecutionProfile defaultExecutionProfile, - Map profiles) { - InternalDriverContext context = mock(InternalDriverContext.class); - DriverConfig driverConfig = mock(DriverConfig.class); - Mockito.>when(driverConfig.getProfiles()) - .thenReturn(profiles); - when(driverConfig.getDefaultProfile()).thenReturn(defaultExecutionProfile); - when(context.getConfig()).thenReturn(driverConfig); - return context; - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/insights/InsightsClientTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/insights/InsightsClientTest.java deleted file mode 100644 index 74869893b72..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/insights/InsightsClientTest.java +++ /dev/null @@ -1,535 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights; - -import static com.datastax.dse.driver.api.core.DseProtocolVersion.DSE_V2; -import static com.datastax.dse.driver.internal.core.insights.ExecutionProfileMockUtil.mockDefaultExecutionProfile; -import static com.datastax.dse.driver.internal.core.insights.ExecutionProfileMockUtil.mockNonDefaultRequestTimeoutExecutionProfile; -import static com.datastax.dse.driver.internal.core.insights.PackageUtil.DEFAULT_AUTH_PROVIDER_PACKAGE; -import static com.datastax.dse.driver.internal.core.insights.PackageUtil.DEFAULT_LOAD_BALANCING_PACKAGE; -import static com.datastax.dse.driver.internal.core.insights.PackageUtil.DEFAULT_SPECULATIVE_EXECUTION_PACKAGE; -import static java.util.concurrent.TimeUnit.SECONDS; -import static org.assertj.core.api.Assertions.assertThat; -import static org.awaitility.Awaitility.await; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.dse.driver.api.core.metadata.DseNodeProperties; -import com.datastax.dse.driver.internal.core.insights.configuration.InsightsConfiguration; -import com.datastax.dse.driver.internal.core.insights.schema.AuthProviderType; -import com.datastax.dse.driver.internal.core.insights.schema.Insight; -import com.datastax.dse.driver.internal.core.insights.schema.InsightMetadata; -import com.datastax.dse.driver.internal.core.insights.schema.InsightType; -import com.datastax.dse.driver.internal.core.insights.schema.InsightsPlatformInfo; -import com.datastax.dse.driver.internal.core.insights.schema.InsightsPlatformInfo.CPUS; -import com.datastax.dse.driver.internal.core.insights.schema.InsightsPlatformInfo.OS; -import com.datastax.dse.driver.internal.core.insights.schema.InsightsPlatformInfo.RuntimeAndCompileTimeVersions; -import com.datastax.dse.driver.internal.core.insights.schema.InsightsStartupData; -import com.datastax.dse.driver.internal.core.insights.schema.InsightsStatusData; -import com.datastax.dse.driver.internal.core.insights.schema.LoadBalancingInfo; -import com.datastax.dse.driver.internal.core.insights.schema.PoolSizeByHostDistance; -import com.datastax.dse.driver.internal.core.insights.schema.ReconnectionPolicyInfo; -import com.datastax.dse.driver.internal.core.insights.schema.SSL; -import com.datastax.dse.driver.internal.core.insights.schema.SessionStateForNode; -import com.datastax.dse.driver.internal.core.insights.schema.SpecificExecutionProfile; -import com.datastax.dse.driver.internal.core.insights.schema.SpeculativeExecutionInfo; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.core.metadata.Metadata; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import com.datastax.oss.driver.internal.core.context.StartupOptionsBuilder; -import com.datastax.oss.driver.internal.core.control.ControlConnection; -import com.datastax.oss.driver.internal.core.metadata.DefaultNode; -import com.datastax.oss.driver.internal.core.metadata.MetadataManager; -import com.datastax.oss.driver.internal.core.pool.ChannelPool; -import com.datastax.oss.driver.internal.core.session.PoolManager; -import com.datastax.oss.driver.shaded.guava.common.base.Suppliers; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import com.datastax.oss.driver.shaded.guava.common.collect.Sets; -import com.fasterxml.jackson.core.type.TypeReference; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import io.netty.channel.DefaultEventLoop; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.net.UnknownHostException; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.Executors; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.function.Supplier; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mockito; - -@RunWith(DataProviderRunner.class) -public class InsightsClientTest { - private static final StackTraceElement[] EMPTY_STACK_TRACE = {}; - private static final Map EMPTY_OBJECT_MAP = Collections.emptyMap(); - private static final Supplier MOCK_TIME_SUPPLIER = Suppliers.ofInstance(1L); - private static final InsightsConfiguration INSIGHTS_CONFIGURATION = - new InsightsConfiguration(true, 300000L, new DefaultEventLoop()); - - @Test - public void should_construct_json_event_startup_message() throws IOException { - // given - DefaultDriverContext context = mockDefaultDriverContext(); - PlatformInfoFinder platformInfoFinder = mock(PlatformInfoFinder.class); - OS os = new OS("linux", "1.2", "x64"); - CPUS cpus = new CPUS(8, "intel i7"); - Map javaDeps = - ImmutableMap.of("version", new RuntimeAndCompileTimeVersions("1.8.0", "1.8.0", false)); - Map> runtimeInfo = - ImmutableMap.of("java", javaDeps); - InsightsPlatformInfo insightsPlatformInfo = new InsightsPlatformInfo(os, cpus, runtimeInfo); - when(platformInfoFinder.getInsightsPlatformInfo()).thenReturn(insightsPlatformInfo); - - ConfigAntiPatternsFinder configAntiPatternsFinder = mock(ConfigAntiPatternsFinder.class); - when(configAntiPatternsFinder.findAntiPatterns(any(DefaultDriverContext.class))) - .thenReturn( - ImmutableMap.of( - "contactPointsMultipleDCs", - "Contact points contain hosts from multiple data centers")); - - DataCentersFinder dataCentersFinder = mock(DataCentersFinder.class); - when(dataCentersFinder.getDataCenters(any(DefaultDriverContext.class))) - .thenReturn(Sets.newHashSet("dc1", "dc2")); - ReconnectionPolicyInfoFinder reconnectionPolicyInfoFinder = - mock(ReconnectionPolicyInfoFinder.class); - when(reconnectionPolicyInfoFinder.getReconnectionPolicyInfo(any(), any())) - .thenReturn( - new ReconnectionPolicyInfo( - "reconnection-policy-a", ImmutableMap.of("opt-a", 1), "com.datastax.dse")); - - InsightsClient insightsClient = - new InsightsClient( - context, - MOCK_TIME_SUPPLIER, - INSIGHTS_CONFIGURATION, - platformInfoFinder, - reconnectionPolicyInfoFinder, - new ExecutionProfilesInfoFinder(), - configAntiPatternsFinder, - dataCentersFinder, - EMPTY_STACK_TRACE); - - // when - String startupMessage = insightsClient.createStartupMessage(); - Insight insight = - new ObjectMapper() - .readValue(startupMessage, new TypeReference>() {}); - - // then - assertThat(insight.getMetadata()) - .isEqualTo( - new InsightMetadata( - "driver.startup", - 1L, - ImmutableMap.of("language", "java"), - InsightType.EVENT, - "v1")); - - InsightsStartupData insightData = insight.getInsightData(); - assertThat(insightData.getClientId()).isEqualTo("client-id"); - assertThat(insightData.getSessionId()).isNotNull(); - assertThat(insightData.getDriverName()).isEqualTo("DataStax Enterprise Java Driver"); - assertThat(insightData.getDriverVersion()).isNotEmpty(); - assertThat(insightData.getApplicationName()).isEqualTo("app-name"); - assertThat(insightData.getApplicationVersion()).isEqualTo("1.0.0"); - assertThat(insightData.isApplicationNameWasGenerated()).isEqualTo(false); - assertThat(insightData.getContactPoints()) - .isEqualTo(ImmutableMap.of("localhost", Collections.singletonList("127.0.0.1:9999"))); - - assertThat(insightData.getInitialControlConnection()).isEqualTo("127.0.0.1:10"); - assertThat(insightData.getLocalAddress()).isEqualTo("127.0.0.1"); - assertThat(insightData.getHostName()).isNotEmpty(); - assertThat(insightData.getProtocolVersion()).isEqualTo(DSE_V2.getCode()); - assertThat(insightData.getExecutionProfiles()) - .isEqualTo( - ImmutableMap.of( - "default", - new SpecificExecutionProfile( - 100, - new LoadBalancingInfo( - "LoadBalancingPolicyImpl", - ImmutableMap.of("localDataCenter", "local-dc", "filterFunction", true), - DEFAULT_LOAD_BALANCING_PACKAGE), - new SpeculativeExecutionInfo( - "SpeculativeExecutionImpl", - ImmutableMap.of("maxSpeculativeExecutions", 100, "delay", 20), - DEFAULT_SPECULATIVE_EXECUTION_PACKAGE), - "LOCAL_ONE", - "SERIAL", - ImmutableMap.of("source", "src-graph")), - "non-default", - new SpecificExecutionProfile(50, null, null, null, null, null))); - assertThat(insightData.getPoolSizeByHostDistance()) - .isEqualTo(new PoolSizeByHostDistance(2, 1, 0)); - assertThat(insightData.getHeartbeatInterval()).isEqualTo(100); - assertThat(insightData.getCompression()).isEqualTo("none"); - assertThat(insightData.getReconnectionPolicy()) - .isEqualTo( - new ReconnectionPolicyInfo( - "reconnection-policy-a", ImmutableMap.of("opt-a", 1), "com.datastax.dse")); - assertThat(insightData.getSsl()).isEqualTo(new SSL(true, false)); - assertThat(insightData.getAuthProvider()) - .isEqualTo(new AuthProviderType("AuthProviderImpl", DEFAULT_AUTH_PROVIDER_PACKAGE)); - assertThat(insightData.getOtherOptions()).isEqualTo(EMPTY_OBJECT_MAP); - assertThat(insightData.getPlatformInfo()).isEqualTo(insightsPlatformInfo); - assertThat(insightData.getConfigAntiPatterns()) - .isEqualTo( - ImmutableMap.of( - "contactPointsMultipleDCs", - "Contact points contain hosts from multiple data centers")); - assertThat(insightData.getPeriodicStatusInterval()).isEqualTo(300); - assertThat(insightData.getDataCenters()).isEqualTo(Sets.newHashSet("dc1", "dc2")); - } - - @Test - public void should_group_contact_points_by_host_name() { - // given - Set contactPoints = - ImmutableSet.of( - InetSocketAddress.createUnresolved("127.0.0.1", 8080), - InetSocketAddress.createUnresolved("127.0.0.1", 8081), - InetSocketAddress.createUnresolved("127.0.0.2", 8081)); - - Map> expected = - ImmutableMap.of( - "127.0.0.1", - ImmutableList.of("127.0.0.1:8080", "127.0.0.1:8081"), - "127.0.0.2", - ImmutableList.of("127.0.0.2:8081")); - - // when - Map> resolvedContactPoints = - InsightsClient.getResolvedContactPoints(contactPoints); - - // then - assertThat(resolvedContactPoints).isEqualTo(expected); - } - - @Test - public void should_construct_json_event_status_message() throws IOException { - // given - InsightsClient insightsClient = - new InsightsClient( - mockDefaultDriverContext(), - MOCK_TIME_SUPPLIER, - INSIGHTS_CONFIGURATION, - null, - null, - null, - null, - null, - EMPTY_STACK_TRACE); - - // when - String statusMessage = insightsClient.createStatusMessage(); - - // then - Insight insight = - new ObjectMapper() - .readValue(statusMessage, new TypeReference>() {}); - assertThat(insight.getMetadata()) - .isEqualTo( - new InsightMetadata( - "driver.status", 1L, ImmutableMap.of("language", "java"), InsightType.EVENT, "v1")); - InsightsStatusData insightData = insight.getInsightData(); - assertThat(insightData.getClientId()).isEqualTo("client-id"); - assertThat(insightData.getSessionId()).isNotNull(); - assertThat(insightData.getControlConnection()).isEqualTo("127.0.0.1:10"); - assertThat(insightData.getConnectedNodes()) - .isEqualTo( - ImmutableMap.of( - "127.0.0.1:10", new SessionStateForNode(1, 10), - "127.0.0.1:20", new SessionStateForNode(2, 20))); - } - - @Test - public void should_schedule_task_with_initial_delay() { - // given - final AtomicInteger counter = new AtomicInteger(); - Runnable runnable = counter::incrementAndGet; - - // when - InsightsClient.scheduleInsightsTask(100L, Executors.newScheduledThreadPool(1), runnable); - - // then - await().atMost(1, SECONDS).until(() -> counter.get() >= 1); - } - - @Test - @UseDataProvider(value = "stackTraceProvider") - public void should_get_caller_of_create_cluster(StackTraceElement[] stackTrace, String expected) { - // when - String result = InsightsClient.getClusterCreateCaller(stackTrace); - - // then - assertThat(result).isEqualTo(expected); - } - - @Test - @SuppressWarnings("ResultOfMethodCallIgnored") - public void should_execute_should_send_event_check_only_once() - throws UnknownHostException, InterruptedException { - // given - InsightsConfiguration insightsConfiguration = mock(InsightsConfiguration.class); - when(insightsConfiguration.isMonitorReportingEnabled()).thenReturn(true); - when(insightsConfiguration.getStatusEventDelayMillis()).thenReturn(10L); - when(insightsConfiguration.getExecutor()).thenReturn(new DefaultEventLoop()); - - InsightsClient insightsClient = - new InsightsClient( - mockDefaultDriverContext(), - MOCK_TIME_SUPPLIER, - insightsConfiguration, - null, - null, - null, - null, - null, - EMPTY_STACK_TRACE); - - // when - insightsClient.scheduleStatusMessageSend(); - // emulate periodic calls to sendStatusMessage - insightsClient.sendStatusMessage(); - insightsClient.sendStatusMessage(); - insightsClient.sendStatusMessage(); - - // then - verify(insightsConfiguration, times(1)).isMonitorReportingEnabled(); - } - - @DataProvider - public static Object[][] stackTraceProvider() { - StackTraceElement[] onlyInitCall = - new StackTraceElement[] { - new StackTraceElement( - "com.datastax.oss.driver.internal.core.context.DefaultDriverContext", - "", - "DefaultDriverContext.java", - 94), - }; - - StackTraceElement[] stackTraceElementsWithoutInitCall = - new StackTraceElement[] { - new StackTraceElement("java.lang.Thread", "getStackTrace", "Thread.java", 1559), - new StackTraceElement( - "com.datastax.driver.core.InsightsClient", - "getClusterCreateCaller", - "InsightsClient.java", - 302) - }; - StackTraceElement[] stackTraceWithOneInitCall = - new StackTraceElement[] { - new StackTraceElement("java.lang.Thread", "getStackTrace", "Thread.java", 1559), - new StackTraceElement( - "com.datastax.oss.driver.internal.core.context.DefaultDriverContext", - "", - "DefaultDriverContext.java", - 243), - }; - StackTraceElement[] stackTraceWithOneInitCallAndCaller = - new StackTraceElement[] { - new StackTraceElement("java.lang.Thread", "getStackTrace", "Thread.java", 1559), - new StackTraceElement( - "com.datastax.oss.driver.internal.core.context.DefaultDriverContext", - "", - "DefaultDriverContext.java", - 243), - new StackTraceElement( - "com.example.ActualCallerNameApp", "main", "ActualCallerNameApp.java", 1) - }; - - StackTraceElement[] stackTraceWithTwoInitCallsAndCaller = - new StackTraceElement[] { - new StackTraceElement("java.lang.Thread", "getStackTrace", "Thread.java", 1559), - new StackTraceElement( - "com.datastax.oss.driver.internal.core.context.DefaultDriverContext", - "", - "DefaultDriverContext.java", - 243), - new StackTraceElement( - "com.datastax.oss.driver.api.core.session.SessionBuilder", - "buildDefaultSessionAsync", - "SessionBuilder.java", - 300), - new StackTraceElement( - "com.example.ActualCallerNameApp", "main", "ActualCallerNameApp.java", 1) - }; - StackTraceElement[] stackTraceWithChainOfInitCalls = - new StackTraceElement[] { - new StackTraceElement( - "com.datastax.oss.driver.internal.core.context.DefaultDriverContext", - "", - "DefaultDriverContext.java", - 243), - new StackTraceElement( - "com.datastax.oss.driver.api.core.session.SessionBuilder", - "buildDefaultSessionAsync", - "SessionBuilder.java", - 332), - new StackTraceElement( - "com.datastax.oss.driver.api.core.session.SessionBuilder", - "buildAsync", - "SessionBuilder.java", - 291), - new StackTraceElement( - "com.datastax.oss.driver.api.core.session.SessionBuilder", - "build", - "SessionBuilder.java", - 306) - }; - StackTraceElement[] stackTraceWithChainOfInitCallsAndCaller = - new StackTraceElement[] { - new StackTraceElement("java.lang.Thread", "getStackTrace", "Thread.java", 1559), - new StackTraceElement( - "com.datastax.oss.driver.internal.core.context.DefaultDriverContext", - "", - "DefaultDriverContext.java", - 243), - new StackTraceElement( - "com.datastax.oss.driver.api.core.session.SessionBuilder", - "buildContext", - "SessionBuilder.java", - 687), - new StackTraceElement( - "com.datastax.oss.driver.api.core.session.SessionBuilder", - "buildDefaultSessionAsync", - "SessionBuilder.java", - 332), - new StackTraceElement( - "com.datastax.oss.driver.api.core.session.SessionBuilder", - "buildAsync", - "SessionBuilder.java", - 291), - new StackTraceElement( - "com.datastax.oss.driver.api.core.session.SessionBuilder", - "build", - "SessionBuilder.java", - 306), - new StackTraceElement( - "com.example.ActualCallerNameApp", "main", "ActualCallerNameApp.java", 8) - }; - - return new Object[][] { - {new StackTraceElement[] {}, InsightsClient.DEFAULT_JAVA_APPLICATION}, - {stackTraceElementsWithoutInitCall, InsightsClient.DEFAULT_JAVA_APPLICATION}, - {stackTraceWithOneInitCall, InsightsClient.DEFAULT_JAVA_APPLICATION}, - {onlyInitCall, InsightsClient.DEFAULT_JAVA_APPLICATION}, - {stackTraceWithOneInitCallAndCaller, "com.example.ActualCallerNameApp"}, - {stackTraceWithTwoInitCallsAndCaller, "com.example.ActualCallerNameApp"}, - {stackTraceWithChainOfInitCalls, InsightsClient.DEFAULT_JAVA_APPLICATION}, - {stackTraceWithChainOfInitCallsAndCaller, "com.example.ActualCallerNameApp"} - }; - } - - private DefaultDriverContext mockDefaultDriverContext() throws UnknownHostException { - DefaultDriverContext context = mock(DefaultDriverContext.class); - mockConnectionPools(context); - MetadataManager manager = mock(MetadataManager.class); - when(context.getMetadataManager()).thenReturn(manager); - Metadata metadata = mock(Metadata.class); - when(manager.getMetadata()).thenReturn(metadata); - Node node = mock(Node.class); - when(node.getExtras()) - .thenReturn( - ImmutableMap.of( - DseNodeProperties.DSE_VERSION, Objects.requireNonNull(Version.parse("6.0.5")))); - when(metadata.getNodes()).thenReturn(ImmutableMap.of(UUID.randomUUID(), node)); - DriverExecutionProfile defaultExecutionProfile = mockDefaultExecutionProfile(); - DriverExecutionProfile nonDefaultExecutionProfile = - mockNonDefaultRequestTimeoutExecutionProfile(); - - Map startupOptions = new HashMap<>(); - startupOptions.put(StartupOptionsBuilder.CLIENT_ID_KEY, "client-id"); - startupOptions.put(StartupOptionsBuilder.APPLICATION_VERSION_KEY, "1.0.0"); - startupOptions.put(StartupOptionsBuilder.APPLICATION_NAME_KEY, "app-name"); - startupOptions.put(StartupOptionsBuilder.DRIVER_VERSION_KEY, "2.x"); - startupOptions.put(StartupOptionsBuilder.DRIVER_NAME_KEY, "DataStax Enterprise Java Driver"); - - when(context.getStartupOptions()).thenReturn(startupOptions); - when(context.getProtocolVersion()).thenReturn(DSE_V2); - DefaultNode contactPoint = mock(DefaultNode.class); - EndPoint contactEndPoint = mock(EndPoint.class); - when(contactEndPoint.resolve()).thenReturn(new InetSocketAddress("127.0.0.1", 9999)); - when(contactPoint.getEndPoint()).thenReturn(contactEndPoint); - when(manager.getContactPoints()).thenReturn(ImmutableSet.of(contactPoint)); - - DriverConfig driverConfig = mock(DriverConfig.class); - when(context.getConfig()).thenReturn(driverConfig); - Map profiles = - ImmutableMap.of( - "default", defaultExecutionProfile, "non-default", nonDefaultExecutionProfile); - Mockito.>when(driverConfig.getProfiles()) - .thenReturn(profiles); - when(driverConfig.getDefaultProfile()).thenReturn(defaultExecutionProfile); - - ControlConnection controlConnection = mock(ControlConnection.class); - DriverChannel channel = mock(DriverChannel.class); - EndPoint controlConnectionEndpoint = mock(EndPoint.class); - when(controlConnectionEndpoint.resolve()).thenReturn(new InetSocketAddress("127.0.0.1", 10)); - - when(channel.getEndPoint()).thenReturn(controlConnectionEndpoint); - when(channel.localAddress()).thenReturn(new InetSocketAddress("127.0.0.1", 10)); - when(controlConnection.channel()).thenReturn(channel); - when(context.getControlConnection()).thenReturn(controlConnection); - return context; - } - - private void mockConnectionPools(DefaultDriverContext driverContext) { - Node node1 = mock(Node.class); - EndPoint endPoint1 = mock(EndPoint.class); - when(endPoint1.resolve()).thenReturn(new InetSocketAddress("127.0.0.1", 10)); - when(node1.getEndPoint()).thenReturn(endPoint1); - when(node1.getOpenConnections()).thenReturn(1); - ChannelPool channelPool1 = mock(ChannelPool.class); - when(channelPool1.getInFlight()).thenReturn(10); - - Node node2 = mock(Node.class); - EndPoint endPoint2 = mock(EndPoint.class); - when(endPoint2.resolve()).thenReturn(new InetSocketAddress("127.0.0.1", 20)); - when(node2.getEndPoint()).thenReturn(endPoint2); - when(node2.getOpenConnections()).thenReturn(2); - ChannelPool channelPool2 = mock(ChannelPool.class); - when(channelPool2.getInFlight()).thenReturn(20); - - Map channelPools = ImmutableMap.of(node1, channelPool1, node2, channelPool2); - PoolManager poolManager = mock(PoolManager.class); - when(poolManager.getPools()).thenReturn(channelPools); - when(driverContext.getPoolManager()).thenReturn(poolManager); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/insights/InsightsSupportVerifierTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/insights/InsightsSupportVerifierTest.java deleted file mode 100644 index 9edd4494bdd..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/insights/InsightsSupportVerifierTest.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.dse.driver.api.core.metadata.DseNodeProperties; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.Collection; -import java.util.Collections; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class InsightsSupportVerifierTest { - - @Test - @UseDataProvider(value = "dseHostsProvider") - public void should_detect_DSE_versions_that_supports_insights( - Collection hosts, boolean expected) { - // when - boolean result = InsightsSupportVerifier.supportsInsights(hosts); - - // then - assertThat(result).isEqualTo(expected); - } - - @DataProvider - public static Object[][] dseHostsProvider() { - Node dse605 = mock(Node.class); - when(dse605.getExtras()) - .thenReturn(ImmutableMap.of(DseNodeProperties.DSE_VERSION, Version.parse("6.0.5"))); - Node dse604 = mock(Node.class); - when(dse604.getExtras()) - .thenReturn(ImmutableMap.of(DseNodeProperties.DSE_VERSION, Version.parse("6.0.4"))); - Node dse600 = mock(Node.class); - when(dse600.getExtras()) - .thenReturn(ImmutableMap.of(DseNodeProperties.DSE_VERSION, Version.parse("6.0.0"))); - Node dse5113 = mock(Node.class); - when(dse5113.getExtras()) - .thenReturn(ImmutableMap.of(DseNodeProperties.DSE_VERSION, Version.parse("5.1.13"))); - Node dse500 = mock(Node.class); - when(dse500.getExtras()) - .thenReturn(ImmutableMap.of(DseNodeProperties.DSE_VERSION, Version.parse("5.0.0"))); - Node nodeWithoutExtras = mock(Node.class); - when(nodeWithoutExtras.getExtras()).thenReturn(Collections.emptyMap()); - - return new Object[][] { - {ImmutableList.of(dse605), true}, - {ImmutableList.of(dse604), false}, - {ImmutableList.of(dse600), false}, - {ImmutableList.of(dse5113), true}, - {ImmutableList.of(dse500), false}, - {ImmutableList.of(dse5113, dse605), true}, - {ImmutableList.of(dse5113, dse600), false}, - {ImmutableList.of(dse500, dse600), false}, - {ImmutableList.of(), false}, - {ImmutableList.of(nodeWithoutExtras), false} - }; - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/insights/PackageUtilTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/insights/PackageUtilTest.java deleted file mode 100644 index 336f19184d3..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/insights/PackageUtilTest.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class PackageUtilTest { - - private static final String DEFAULT_PACKAGE = "default.package"; - - @Test - public void should_find_package_name_for_class() { - // given - TestClass testClass = new TestClass(); - - // then - String namespace = PackageUtil.getNamespace(testClass.getClass()); - - // then - assertThat(namespace).isEqualTo("com.datastax.dse.driver.internal.core.insights"); - } - - @Test - @UseDataProvider("packagesProvider") - public void should_get_full_package_or_return_default(String fullClassSetting, String expected) { - // when - String result = PackageUtil.getFullPackageOrDefault(fullClassSetting, DEFAULT_PACKAGE); - - // then - assertThat(result).isEqualTo(expected); - } - - @Test - @UseDataProvider("classesProvider") - public void should_get_class_name_from_full_class_setting( - String fullClassSetting, String expected) { - // when - String result = PackageUtil.getClassName(fullClassSetting); - - // then - assertThat(result).isEqualTo(expected); - } - - @DataProvider - public static Object[][] packagesProvider() { - return new Object[][] { - {"com.P", "com"}, - {"ClassName", DEFAULT_PACKAGE}, - {"", DEFAULT_PACKAGE}, - {"com.p.a.2.x.12.Class", "com.p.a.2.x.12"}, - }; - } - - @DataProvider - public static Object[][] classesProvider() { - return new Object[][] { - {"com.P", "P"}, - {"ClassName", "ClassName"}, - {"", ""}, - {"com.p.a.2.x.12.Class", "Class"}, - }; - } - - private static class TestClass {} -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/insights/PlatformInfoFinderTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/insights/PlatformInfoFinderTest.java deleted file mode 100644 index 2a098363d46..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/insights/PlatformInfoFinderTest.java +++ /dev/null @@ -1,220 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights; - -import static com.datastax.dse.driver.internal.core.insights.PlatformInfoFinder.UNVERIFIED_RUNTIME_VERSION; -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.dse.driver.internal.core.insights.schema.InsightsPlatformInfo.RuntimeAndCompileTimeVersions; -import java.io.InputStream; -import java.net.URL; -import java.util.HashMap; -import java.util.Iterator; -import java.util.LinkedHashMap; -import java.util.Map; -import org.junit.Test; - -public class PlatformInfoFinderTest { - - private URL nullUrlProvider(PlatformInfoFinder.DependencyFromFile d) { - return null; - } - - private URL nettyUrlProvider(PlatformInfoFinder.DependencyFromFile d) { - return this.getClass().getResource("/insights/pom.properties"); - } - - private URL malformedUrlProvider(PlatformInfoFinder.DependencyFromFile d) { - return this.getClass().getResource("/insights/malformed-pom.properties"); - } - - private URL nonExistingUrlProvider(PlatformInfoFinder.DependencyFromFile d) { - return this.getClass().getResource("/insights/non-existing.pom"); - } - - @Test - public void should_find_dependencies_from_file() { - // given - InputStream inputStream = - this.getClass().getResourceAsStream("/insights/test-dependencies.txt"); - Map expected = new HashMap<>(); - expected.put( - "io.netty:netty-transport-native-epoll", - withUnverifiedRuntimeVersionOptional("4.0.56.Final")); - expected.put("org.slf4j:slf4j-api", withUnverifiedRuntimeVersion("1.7.25")); - expected.put("org.ow2.asm:asm", withUnverifiedRuntimeVersion("5.0.3")); - expected.put("com.esri.geometry:esri-geometry-api", withUnverifiedRuntimeVersion("1.2.1")); - expected.put("io.netty:netty-transport", withUnverifiedRuntimeVersion("4.0.56.Final")); - expected.put("com.github.jnr:jnr-x86asm", withUnverifiedRuntimeVersion("1.0.2")); - expected.put("org.ow2.asm:asm-analysis", withUnverifiedRuntimeVersion("5.0.3")); - expected.put("com.github.jnr:jnr-constants", withUnverifiedRuntimeVersion("0.9.9")); - expected.put("io.netty:netty-common", withUnverifiedRuntimeVersion("4.0.56.Final")); - expected.put("com.google.guava:guava", withUnverifiedRuntimeVersion("19.0")); - expected.put("org.xerial.snappy:snappy-java", withUnverifiedRuntimeVersionOptional("1.1.2.6")); - expected.put("io.dropwizard.metrics:metrics-core", withUnverifiedRuntimeVersion("3.2.2")); - expected.put("org.ow2.asm:asm-tree", withUnverifiedRuntimeVersion("5.0.3")); - expected.put("com.github.jnr:jnr-posix", withUnverifiedRuntimeVersion("3.0.44")); - expected.put("org.codehaus.jackson:jackson-core-asl", withUnverifiedRuntimeVersion("1.9.12")); - expected.put( - "com.fasterxml.jackson.core:jackson-databind", withUnverifiedRuntimeVersion("2.7.9.3")); - expected.put("io.netty:netty-codec", withUnverifiedRuntimeVersion("4.0.56.Final")); - expected.put( - "com.fasterxml.jackson.core:jackson-annotations", withUnverifiedRuntimeVersion("2.8.11")); - expected.put("com.fasterxml.jackson.core:jackson-core", withUnverifiedRuntimeVersion("2.8.11")); - expected.put("io.netty:netty-handler", withUnverifiedRuntimeVersion("4.0.56.Final")); - expected.put("at.yawk.lz4:lz4-java", withUnverifiedRuntimeVersionOptional("1.10.1")); - expected.put("org.hdrhistogram:HdrHistogram", withUnverifiedRuntimeVersionOptional("2.1.10")); - expected.put("com.github.jnr:jffi", withUnverifiedRuntimeVersion("1.2.16")); - expected.put("io.netty:netty-buffer", withUnverifiedRuntimeVersion("4.0.56.Final")); - expected.put("org.ow2.asm:asm-commons", withUnverifiedRuntimeVersion("5.0.3")); - expected.put("org.json:json", withUnverifiedRuntimeVersion("20090211")); - expected.put("org.ow2.asm:asm-util", withUnverifiedRuntimeVersion("5.0.3")); - expected.put("com.github.jnr:jnr-ffi", withUnverifiedRuntimeVersion("2.1.7")); - - // when - Map stringStringMap = - new PlatformInfoFinder(this::nullUrlProvider).fetchDependenciesFromFile(inputStream); - - // then - assertThat(stringStringMap).hasSize(28); - assertThat(stringStringMap).isEqualTo(expected); - } - - @Test - public void should_find_dependencies_from_file_without_duplicate() { - // given - InputStream inputStream = - this.getClass().getResourceAsStream("/insights/duplicate-dependencies.txt"); - - // when - Map stringStringMap = - new PlatformInfoFinder(this::nullUrlProvider).fetchDependenciesFromFile(inputStream); - - // then - assertThat(stringStringMap).hasSize(1); - } - - @Test - public void should_keep_order_of_dependencies() { - // given - InputStream inputStream = - this.getClass().getResourceAsStream("/insights/ordered-dependencies.txt"); - Map expected = new LinkedHashMap<>(); - expected.put("b-org.com:art1", withUnverifiedRuntimeVersion("1.0")); - expected.put("a-org.com:art1", withUnverifiedRuntimeVersion("2.0")); - expected.put("c-org.com:art1", withUnverifiedRuntimeVersion("3.0")); - - // when - Map stringStringMap = - new PlatformInfoFinder(this::nullUrlProvider).fetchDependenciesFromFile(inputStream); - - // then - assertThat(stringStringMap).isEqualTo(expected); - Iterator iterator = expected.keySet().iterator(); - assertThat(iterator.next()).isEqualTo("b-org.com:art1"); - assertThat(iterator.next()).isEqualTo("a-org.com:art1"); - assertThat(iterator.next()).isEqualTo("c-org.com:art1"); - } - - @Test - public void should_add_information_about_java_platform() { - // given - Map> runtimeDependencies = new HashMap<>(); - - // when - new PlatformInfoFinder(this::nullUrlProvider).addJavaVersion(runtimeDependencies); - - // then - Map javaDependencies = runtimeDependencies.get("java"); - assertThat(javaDependencies.size()).isEqualTo(3); - } - - @Test - public void should_load_runtime_version_from_pom_properties_URL() { - // given - InputStream inputStream = this.getClass().getResourceAsStream("/insights/netty-dependency.txt"); - Map expected = new LinkedHashMap<>(); - expected.put( - "io.netty:netty-handler", - new RuntimeAndCompileTimeVersions("4.0.56.Final", "4.0.0.Final", false)); - - // when - Map stringStringMap = - new PlatformInfoFinder(this::nettyUrlProvider).fetchDependenciesFromFile(inputStream); - - // then - assertThat(stringStringMap).isEqualTo(expected); - } - - @Test - public void should_load_runtime_version_of_optional_dependency_from_pom_properties_URL() { - // given - InputStream inputStream = - this.getClass().getResourceAsStream("/insights/netty-dependency-optional.txt"); - Map expected = new LinkedHashMap<>(); - expected.put( - "io.netty:netty-handler", - new RuntimeAndCompileTimeVersions("4.0.56.Final", "4.0.0.Final", true)); - - // when - Map stringStringMap = - new PlatformInfoFinder(this::nettyUrlProvider).fetchDependenciesFromFile(inputStream); - - // then - assertThat(stringStringMap).isEqualTo(expected); - } - - @Test - public void should_not_load_runtime_dependency_from_malformed_pom_properties() { - // given - InputStream inputStream = this.getClass().getResourceAsStream("/insights/netty-dependency.txt"); - Map expected = new LinkedHashMap<>(); - expected.put("io.netty:netty-handler", withUnverifiedRuntimeVersion("4.0.0.Final")); - - // when - Map stringStringMap = - new PlatformInfoFinder(this::malformedUrlProvider).fetchDependenciesFromFile(inputStream); - - // then - assertThat(stringStringMap).isEqualTo(expected); - } - - @Test - public void should_not_load_runtime_dependency_from_non_existing_pom_properties() { - // given - InputStream inputStream = this.getClass().getResourceAsStream("/insights/netty-dependency.txt"); - Map expected = new LinkedHashMap<>(); - expected.put("io.netty:netty-handler", withUnverifiedRuntimeVersion("4.0.0.Final")); - - // when - Map stringStringMap = - new PlatformInfoFinder(this::nonExistingUrlProvider).fetchDependenciesFromFile(inputStream); - - // then - assertThat(stringStringMap).isEqualTo(expected); - } - - private RuntimeAndCompileTimeVersions withUnverifiedRuntimeVersion(String compileVersion) { - return new RuntimeAndCompileTimeVersions(UNVERIFIED_RUNTIME_VERSION, compileVersion, false); - } - - private RuntimeAndCompileTimeVersions withUnverifiedRuntimeVersionOptional( - String compileVersion) { - return new RuntimeAndCompileTimeVersions(UNVERIFIED_RUNTIME_VERSION, compileVersion, true); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/insights/ReconnectionPolicyInfoFinderTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/insights/ReconnectionPolicyInfoFinderTest.java deleted file mode 100644 index a076ca38b1c..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/insights/ReconnectionPolicyInfoFinderTest.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.insights; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.dse.driver.internal.core.insights.schema.ReconnectionPolicyInfo; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.connection.ReconnectionPolicy; -import com.datastax.oss.driver.internal.core.connection.ConstantReconnectionPolicy; -import com.datastax.oss.driver.internal.core.connection.ExponentialReconnectionPolicy; -import java.time.Duration; -import org.assertj.core.data.MapEntry; -import org.junit.Test; - -public class ReconnectionPolicyInfoFinderTest { - - @Test - public void should_find_an_info_about_constant_reconnection_policy() { - // given - DriverExecutionProfile driverExecutionProfile = mock(DriverExecutionProfile.class); - when(driverExecutionProfile.getDuration(DefaultDriverOption.RECONNECTION_BASE_DELAY)) - .thenReturn(Duration.ofMillis(100)); - ReconnectionPolicy constantReconnectionPolicy = mock(ConstantReconnectionPolicy.class); - - // when - ReconnectionPolicyInfo reconnectionPolicyInfo = - new ReconnectionPolicyInfoFinder() - .getReconnectionPolicyInfo(constantReconnectionPolicy, driverExecutionProfile); - - // then - assertThat(reconnectionPolicyInfo.getOptions()).contains(MapEntry.entry("delayMs", 100L)); - assertThat(reconnectionPolicyInfo.getType()).contains("ConstantReconnectionPolicy"); - } - - @Test - public void should_find_an_info_about_exponential_reconnection_policy() { - ExponentialReconnectionPolicy exponentialReconnectionPolicy = - mock(ExponentialReconnectionPolicy.class); - when(exponentialReconnectionPolicy.getBaseDelayMs()).thenReturn(100L); - when(exponentialReconnectionPolicy.getMaxAttempts()).thenReturn(10L); - when(exponentialReconnectionPolicy.getMaxDelayMs()).thenReturn(200L); - - // when - ReconnectionPolicyInfo reconnectionPolicyInfo = - new ReconnectionPolicyInfoFinder() - .getReconnectionPolicyInfo(exponentialReconnectionPolicy, null); - - // then - assertThat(reconnectionPolicyInfo.getOptions()).contains(MapEntry.entry("baseDelayMs", 100L)); - assertThat(reconnectionPolicyInfo.getOptions()).contains(MapEntry.entry("maxAttempts", 10L)); - assertThat(reconnectionPolicyInfo.getOptions()).contains(MapEntry.entry("maxDelayMs", 200L)); - assertThat(reconnectionPolicyInfo.getType()).contains("ExponentialReconnectionPolicy"); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/protocol/TinkerpopBufferPrimitiveCodecTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/protocol/TinkerpopBufferPrimitiveCodecTest.java deleted file mode 100644 index 3ef89c78714..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/protocol/TinkerpopBufferPrimitiveCodecTest.java +++ /dev/null @@ -1,358 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.protocol; - -import static com.datastax.dse.driver.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; - -import com.datastax.dse.driver.Assertions; -import com.datastax.dse.driver.internal.core.graph.binary.buffer.DseNettyBufferFactory; -import com.datastax.oss.driver.internal.core.protocol.ByteBufPrimitiveCodecTest; -import com.datastax.oss.protocol.internal.util.Bytes; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.nio.ByteBuffer; -import java.util.function.Supplier; -import org.apache.tinkerpop.gremlin.structure.io.Buffer; -import org.junit.Test; -import org.junit.runner.RunWith; - -/** - * Note: like {@link ByteBufPrimitiveCodecTest} we don't test trivial methods that simply delegate - * to the underlying Buffer, nor default implementations inherited from {@link - * com.datastax.oss.protocol.internal.PrimitiveCodec}. - */ -@RunWith(DataProviderRunner.class) -public class TinkerpopBufferPrimitiveCodecTest { - - private static final DseNettyBufferFactory factory = new DseNettyBufferFactory(); - private final TinkerpopBufferPrimitiveCodec codec = new TinkerpopBufferPrimitiveCodec(factory); - - @Test - public void should_concatenate() { - Buffer left = factory.withBytes(0xca, 0xfe); - Buffer right = factory.withBytes(0xba, 0xbe); - assertThat(codec.concat(left, right)).containsExactly("0xcafebabe"); - } - - @Test - public void should_read_inet_v4() { - Buffer source = - factory.withBytes( - // length (as a byte) - 0x04, - // address - 0x7f, - 0x00, - 0x00, - 0x01, - // port (as an int) - 0x00, - 0x00, - 0x23, - 0x52); - InetSocketAddress inet = codec.readInet(source); - assertThat(inet.getAddress().getHostAddress()).isEqualTo("127.0.0.1"); - assertThat(inet.getPort()).isEqualTo(9042); - } - - @Test - public void should_read_inet_v6() { - Buffer lengthAndAddress = factory.heap(17); - lengthAndAddress.writeByte(16); - lengthAndAddress.writeLong(0); - lengthAndAddress.writeLong(1); - Buffer source = - codec.concat( - lengthAndAddress, - // port (as an int) - factory.withBytes(0x00, 0x00, 0x23, 0x52)); - InetSocketAddress inet = codec.readInet(source); - assertThat(inet.getAddress().getHostAddress()).isEqualTo("0:0:0:0:0:0:0:1"); - assertThat(inet.getPort()).isEqualTo(9042); - } - - @Test - public void should_fail_to_read_inet_if_length_invalid() { - Buffer source = - factory.withBytes( - // length (as a byte) - 0x03, - // address - 0x7f, - 0x00, - 0x01, - // port (as an int) - 0x00, - 0x00, - 0x23, - 0x52); - assertThatThrownBy(() -> codec.readInet(source)) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage("Invalid address length: 3 ([127, 0, 1])"); - } - - @Test - public void should_read_inetaddr_v4() { - Buffer source = - factory.withBytes( - // length (as a byte) - 0x04, - // address - 0x7f, - 0x00, - 0x00, - 0x01); - InetAddress inetAddr = codec.readInetAddr(source); - assertThat(inetAddr.getHostAddress()).isEqualTo("127.0.0.1"); - } - - @Test - public void should_read_inetaddr_v6() { - Buffer source = factory.heap(17); - source.writeByte(16); - source.writeLong(0); - source.writeLong(1); - InetAddress inetAddr = codec.readInetAddr(source); - assertThat(inetAddr.getHostAddress()).isEqualTo("0:0:0:0:0:0:0:1"); - } - - @Test - public void should_fail_to_read_inetaddr_if_length_invalid() { - Buffer source = - factory.withBytes( - // length (as a byte) - 0x03, - // address - 0x7f, - 0x00, - 0x01); - assertThatThrownBy(() -> codec.readInetAddr(source)) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage("Invalid address length: 3 ([127, 0, 1])"); - } - - @Test - public void should_read_bytes() { - Buffer source = - factory.withBytes( - // length (as an int) - 0x00, - 0x00, - 0x00, - 0x04, - // contents - 0xca, - 0xfe, - 0xba, - 0xbe); - ByteBuffer bytes = codec.readBytes(source); - assertThat(Bytes.toHexString(bytes)).isEqualTo("0xcafebabe"); - } - - @Test - public void should_read_null_bytes() { - Buffer source = factory.withBytes(0xFF, 0xFF, 0xFF, 0xFF); // -1 (as an int) - assertThat(codec.readBytes(source)).isNull(); - } - - @Test - public void should_read_short_bytes() { - Buffer source = - factory.withBytes( - // length (as an unsigned short) - 0x00, - 0x04, - // contents - 0xca, - 0xfe, - 0xba, - 0xbe); - assertThat(Bytes.toHexString(codec.readShortBytes(source))).isEqualTo("0xcafebabe"); - } - - @DataProvider - public static Object[][] bufferTypes() { - return new Object[][] { - {(Supplier) factory::heap}, - {(Supplier) factory::io}, - {(Supplier) factory::direct} - }; - } - - @Test - @UseDataProvider("bufferTypes") - public void should_read_string(Supplier supplier) { - Buffer source = - factory.withBytes( - supplier, - // length (as an unsigned short) - 0x00, - 0x05, - // UTF-8 contents - 0x68, - 0x65, - 0x6c, - 0x6c, - 0x6f); - assertThat(codec.readString(source)).isEqualTo("hello"); - } - - @Test - public void should_fail_to_read_string_if_not_enough_characters() { - Buffer source = factory.heap(); - source.writeShort(4); - - assertThatThrownBy(() -> codec.readString(source)) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage("Not enough bytes to read an UTF-8 serialized string of size 4"); - } - - @Test - public void should_read_long_string() { - Buffer source = - factory.withBytes( - // length (as an int) - 0x00, - 0x00, - 0x00, - 0x05, - // UTF-8 contents - 0x68, - 0x65, - 0x6c, - 0x6c, - 0x6f); - assertThat(codec.readLongString(source)).isEqualTo("hello"); - } - - @Test - public void should_fail_to_read_long_string_if_not_enough_characters() { - Buffer source = factory.heap(4, 4); - source.writeInt(4); - - assertThatThrownBy(() -> codec.readLongString(source)) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage("Not enough bytes to read an UTF-8 serialized string of size 4"); - } - - @Test - public void should_write_inet_v4() throws Exception { - Buffer dest = factory.heap(1 + 4 + 4); - InetSocketAddress inet = new InetSocketAddress(InetAddress.getByName("127.0.0.1"), 9042); - codec.writeInet(inet, dest); - assertThat(dest) - .containsExactly( - "0x04" // size as a byte - + "7f000001" // address - + "00002352" // port - ); - } - - @Test - public void should_write_inet_v6() throws Exception { - Buffer dest = factory.heap(1 + 16 + 4); - InetSocketAddress inet = new InetSocketAddress(InetAddress.getByName("::1"), 9042); - codec.writeInet(inet, dest); - assertThat(dest) - .containsExactly( - "0x10" // size as a byte - + "00000000000000000000000000000001" // address - + "00002352" // port - ); - } - - @Test - public void should_write_inetaddr_v4() throws Exception { - Buffer dest = factory.heap(1 + 4); - InetAddress inetAddr = InetAddress.getByName("127.0.0.1"); - codec.writeInetAddr(inetAddr, dest); - assertThat(dest) - .containsExactly( - "0x04" // size as a byte - + "7f000001" // address - ); - } - - @Test - public void should_write_inetaddr_v6() throws Exception { - Buffer dest = factory.heap(1 + 16); - InetAddress inetAddr = InetAddress.getByName("::1"); - codec.writeInetAddr(inetAddr, dest); - Assertions.assertThat(dest) - .containsExactly( - "0x10" // size as a byte - + "00000000000000000000000000000001" // address - ); - } - - @Test - public void should_write_string() { - Buffer dest = factory.heap(); - codec.writeString("hello", dest); - assertThat(dest) - .containsExactly( - "0x0005" // size as an unsigned short - + "68656c6c6f" // UTF-8 contents - ); - } - - @Test - public void should_write_long_string() { - Buffer dest = factory.heap(9); - codec.writeLongString("hello", dest); - assertThat(dest) - .containsExactly( - "0x00000005" - + // size as an int - "68656c6c6f" // UTF-8 contents - ); - } - - @Test - public void should_write_bytes() { - Buffer dest = factory.heap(8); - codec.writeBytes(Bytes.fromHexString("0xcafebabe"), dest); - assertThat(dest) - .containsExactly( - "0x00000004" - + // size as an int - "cafebabe"); - } - - @Test - public void should_write_short_bytes() { - Buffer dest = factory.heap(6); - codec.writeShortBytes(new byte[] {(byte) 0xca, (byte) 0xfe, (byte) 0xba, (byte) 0xbe}, dest); - assertThat(dest) - .containsExactly( - "0x0004" - + // size as an unsigned short - "cafebabe"); - } - - @Test - public void should_write_null_bytes() { - Buffer dest = factory.heap(4); - codec.writeBytes((ByteBuffer) null, dest); - assertThat(dest).containsExactly("0xFFFFFFFF"); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/type/codec/geometry/GeometryCodecTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/type/codec/geometry/GeometryCodecTest.java deleted file mode 100644 index 9e4d019660c..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/type/codec/geometry/GeometryCodecTest.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.type.codec.geometry; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.dse.driver.api.core.data.geometry.Geometry; - -public abstract class GeometryCodecTest> { - - private C codec; - - protected GeometryCodecTest(C codec) { - this.codec = codec; - } - - public void should_format(G input, String expected) { - assertThat(codec.format(input)).isEqualTo(expected); - } - - public void should_parse(String input, G expected) { - assertThat(codec.parse(input)).isEqualTo(expected); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/type/codec/geometry/LineStringCodecTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/type/codec/geometry/LineStringCodecTest.java deleted file mode 100644 index ba71026ac2c..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/type/codec/geometry/LineStringCodecTest.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.type.codec.geometry; - -import com.datastax.dse.driver.api.core.data.geometry.LineString; -import com.datastax.dse.driver.internal.core.data.geometry.DefaultLineString; -import com.datastax.dse.driver.internal.core.data.geometry.DefaultPoint; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class LineStringCodecTest extends GeometryCodecTest { - - private static DefaultLineString lineString = - new DefaultLineString( - new DefaultPoint(30, 10), new DefaultPoint(10, 30), new DefaultPoint(40, 40)); - - public LineStringCodecTest() { - super(new LineStringCodec()); - } - - @DataProvider - public static Object[][] serde() { - return new Object[][] {{null, null}, {lineString, lineString}}; - } - - @DataProvider - public static Object[][] format() { - return new Object[][] {{null, "NULL"}, {lineString, "'LINESTRING (30 10, 10 30, 40 40)'"}}; - } - - @DataProvider - public static Object[][] parse() { - return new Object[][] { - {null, null}, - {"", null}, - {" ", null}, - {"NULL", null}, - {" NULL ", null}, - {"'LINESTRING (30 10, 10 30, 40 40)'", lineString}, - {" ' LineString (30 10, 10 30, 40 40 ) ' ", lineString} - }; - } - - @Test - @UseDataProvider("format") - @Override - public void should_format(LineString input, String expected) { - super.should_format(input, expected); - } - - @Test - @UseDataProvider("parse") - @Override - public void should_parse(String input, LineString expected) { - super.should_parse(input, expected); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/type/codec/geometry/PointCodecTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/type/codec/geometry/PointCodecTest.java deleted file mode 100644 index 7948f4d758a..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/type/codec/geometry/PointCodecTest.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.type.codec.geometry; - -import com.datastax.dse.driver.api.core.data.geometry.Point; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class PointCodecTest extends GeometryCodecTest { - - public PointCodecTest() { - super(new PointCodec()); - } - - @DataProvider - public static Object[][] serde() { - return new Object[][] { - {null, null}, - {Point.fromCoordinates(1, 2), Point.fromCoordinates(1, 2)}, - {Point.fromCoordinates(-1.1, -2.2), Point.fromCoordinates(-1.1, -2.2)} - }; - } - - @DataProvider - public static Object[][] format() { - return new Object[][] { - {null, "NULL"}, - {Point.fromCoordinates(1, 2), "'POINT (1 2)'"}, - {Point.fromCoordinates(-1.1, -2.2), "'POINT (-1.1 -2.2)'"} - }; - } - - @DataProvider - public static Object[][] parse() { - return new Object[][] { - {null, null}, - {"", null}, - {" ", null}, - {"NULL", null}, - {" NULL ", null}, - {"'POINT ( 1 2 )'", Point.fromCoordinates(1, 2)}, - {"'POINT ( 1.0 2.0 )'", Point.fromCoordinates(1, 2)}, - {"' point ( -1.1 -2.2 )'", Point.fromCoordinates(-1.1, -2.2)}, - {" ' Point ( -1.1 -2.2 ) ' ", Point.fromCoordinates(-1.1, -2.2)} - }; - } - - @Test - @UseDataProvider("format") - @Override - public void should_format(Point input, String expected) { - super.should_format(input, expected); - } - - @Test - @UseDataProvider("parse") - @Override - public void should_parse(String input, Point expected) { - super.should_parse(input, expected); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/type/codec/geometry/PolygonCodecTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/type/codec/geometry/PolygonCodecTest.java deleted file mode 100644 index 290dabe7519..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/type/codec/geometry/PolygonCodecTest.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.type.codec.geometry; - -import com.datastax.dse.driver.api.core.data.geometry.Polygon; -import com.datastax.dse.driver.internal.core.data.geometry.DefaultPoint; -import com.datastax.dse.driver.internal.core.data.geometry.DefaultPolygon; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class PolygonCodecTest extends GeometryCodecTest { - private static Polygon polygon = - new DefaultPolygon( - new DefaultPoint(30, 10), - new DefaultPoint(10, 20), - new DefaultPoint(20, 40), - new DefaultPoint(40, 40)); - - public PolygonCodecTest() { - super(new PolygonCodec()); - } - - @DataProvider - public static Object[][] serde() { - return new Object[][] {{null, null}, {polygon, polygon}}; - } - - @DataProvider - public static Object[][] format() { - return new Object[][] { - {null, "NULL"}, {polygon, "'POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))'"} - }; - } - - @DataProvider - public static Object[][] parse() { - return new Object[][] { - {null, null}, - {"", null}, - {" ", null}, - {"NULL", null}, - {" NULL ", null}, - {"'POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))'", polygon}, - {" ' Polygon ( ( 30 10, 40 40, 20 40, 10 20, 30 10 ) ) ' ", polygon} - }; - } - - @Test - @UseDataProvider("format") - @Override - public void should_format(Polygon input, String expected) { - super.should_format(input, expected); - } - - @Test - @UseDataProvider("parse") - @Override - public void should_parse(String input, Polygon expected) { - super.should_parse(input, expected); - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/type/codec/time/DateRangeCodecTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/type/codec/time/DateRangeCodecTest.java deleted file mode 100644 index b9b618b8dd3..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/type/codec/time/DateRangeCodecTest.java +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.type.codec.time; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.dse.driver.api.core.data.time.DateRange; -import com.datastax.dse.driver.api.core.type.codec.DseTypeCodecs; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.shaded.guava.common.base.MoreObjects; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.nio.ByteBuffer; -import java.text.ParseException; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class DateRangeCodecTest { - - @Test - @UseDataProvider("dateRanges") - public void should_encode_and_decode(DateRange dateRange) { - TypeCodec codec = DseTypeCodecs.DATE_RANGE; - DateRange decoded = - codec.decode(codec.encode(dateRange, ProtocolVersion.DEFAULT), ProtocolVersion.DEFAULT); - assertThat(decoded).isEqualTo(dateRange); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_encode_unknown_date_range_type() { - DseTypeCodecs.DATE_RANGE.decode(ByteBuffer.wrap(new byte[] {127}), ProtocolVersion.DEFAULT); - } - - @Test - @UseDataProvider("dateRangeStrings") - public void should_format_and_parse(String dateRangeString) { - TypeCodec codec = DseTypeCodecs.DATE_RANGE; - String formatted = codec.format(codec.parse(dateRangeString)); - assertThat(formatted).isEqualTo(MoreObjects.firstNonNull(dateRangeString, "NULL")); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_invalid_string() { - DseTypeCodecs.DATE_RANGE.parse("foo"); - } - - @DataProvider - public static Object[][] dateRanges() throws ParseException { - return new Object[][] { - {null}, - {DateRange.parse("[2011-01 TO 2015]")}, - {DateRange.parse("[2010-01-02 TO 2015-05-05T13]")}, - {DateRange.parse("[1973-06-30T13:57:28.123Z TO 1999-05-05T14:14:59]")}, - {DateRange.parse("[2010-01-01T15 TO 2016-02]")}, - {DateRange.parse("[1500 TO 1501]")}, - {DateRange.parse("[0001-01-01 TO 0001-01-01]")}, - {DateRange.parse("[0001-01-01 TO 0001-01-02]")}, - {DateRange.parse("[0000-01-01 TO 0000-01-01]")}, - {DateRange.parse("[0000-01-01 TO 0000-01-02]")}, - {DateRange.parse("[-0001-01-01 TO -0001-01-01]")}, - {DateRange.parse("[-0001-01-01 TO -0001-01-02]")}, - {DateRange.parse("[* TO 2014-12-01]")}, - {DateRange.parse("[1999 TO *]")}, - {DateRange.parse("[* TO *]")}, - {DateRange.parse("-0009")}, - {DateRange.parse("2000-11")}, - {DateRange.parse("*")} - }; - } - - @DataProvider - public static Object[][] dateRangeStrings() { - return new Object[][] { - {null}, - {"NULL"}, - {"'[2011-01 TO 2015]'"}, - {"'[2010-01-02 TO 2015-05-05T13]'"}, - {"'[1973-06-30T13:57:28.123Z TO 1999-05-05T14:14:59]'"}, - {"'[2010-01-01T15 TO 2016-02]'"}, - {"'[1500 TO 1501]'"}, - {"'[0001-01-01 TO 0001-01-01]'"}, - {"'[0001-01-01 TO 0001-01-02]'"}, - {"'[0000-01-01 TO 0000-01-01]'"}, - {"'[0000-01-01 TO 0000-01-02]'"}, - {"'[-0001-01-01 TO -0001-01-01]'"}, - {"'[-0001-01-01 TO -0001-01-02]'"}, - {"'[* TO 2014-12-01]'"}, - {"'[1999 TO *]'"}, - {"'[* TO *]'"}, - {"'-0009'"}, - {"'2000-11'"}, - {"'*'"} - }; - } -} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/util/concurrent/BoundedConcurrentQueueTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/util/concurrent/BoundedConcurrentQueueTest.java deleted file mode 100644 index 5cf8a67f84b..00000000000 --- a/core/src/test/java/com/datastax/dse/driver/internal/core/util/concurrent/BoundedConcurrentQueueTest.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.core.util.concurrent; - -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.assertj.core.api.Assertions.assertThat; - -import java.util.concurrent.CompletionStage; -import org.junit.Test; - -public class BoundedConcurrentQueueTest { - - @Test - public void should_dequeue_null_when_empty() { - BoundedConcurrentQueue queue = new BoundedConcurrentQueue<>(4); - assertThat(queue.peek()).isNull(); - assertThat(queue.poll()).isNull(); - } - - @Test - public void should_enqueue_and_dequeue_while_not_full() { - BoundedConcurrentQueue queue = new BoundedConcurrentQueue<>(4); - - assertThatStage(queue.offer(1)).isSuccess(e -> assertThat(e).isEqualTo(1)); - assertThat(queue.peek()).isEqualTo(1); - assertThat(queue.poll()).isEqualTo(1); - - assertThatStage(queue.offer(2)).isSuccess(e -> assertThat(e).isEqualTo(2)); - assertThatStage(queue.offer(3)).isSuccess(e -> assertThat(e).isEqualTo(3)); - assertThatStage(queue.offer(4)).isSuccess(e -> assertThat(e).isEqualTo(4)); - - assertThat(queue.peek()).isEqualTo(2); - assertThat(queue.poll()).isEqualTo(2); - assertThat(queue.peek()).isEqualTo(3); - assertThat(queue.poll()).isEqualTo(3); - assertThat(queue.peek()).isEqualTo(4); - assertThat(queue.poll()).isEqualTo(4); - assertThat(queue.poll()).isNull(); - } - - @Test - public void should_delay_insertion_when_full_until_space_available() { - BoundedConcurrentQueue queue = new BoundedConcurrentQueue<>(4); - - assertThatStage(queue.offer(1)).isSuccess(e -> assertThat(e).isEqualTo(1)); - assertThatStage(queue.offer(2)).isSuccess(e -> assertThat(e).isEqualTo(2)); - assertThatStage(queue.offer(3)).isSuccess(e -> assertThat(e).isEqualTo(3)); - assertThatStage(queue.offer(4)).isSuccess(e -> assertThat(e).isEqualTo(4)); - - CompletionStage enqueue5 = queue.offer(5); - assertThat(enqueue5).isNotDone(); - - assertThat(queue.poll()).isEqualTo(1); - assertThatStage(enqueue5).isSuccess(e -> assertThat(e).isEqualTo(5)); - } - - @Test(expected = IllegalStateException.class) - public void should_fail_to_insert_when_other_insert_already_pending() { - BoundedConcurrentQueue queue = new BoundedConcurrentQueue<>(1); - assertThatStage(queue.offer(1)).isSuccess(); - assertThatStage(queue.offer(2)).isNotDone(); - queue.offer(3); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/Assertions.java b/core/src/test/java/com/datastax/oss/driver/Assertions.java deleted file mode 100644 index 8478053e6d8..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/Assertions.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver; - -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.VersionAssert; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.metadata.token.TokenRange; -import com.datastax.oss.driver.internal.core.CompletionStageAssert; -import com.datastax.oss.driver.internal.core.DriverConfigAssert; -import com.datastax.oss.driver.internal.core.NettyFutureAssert; -import com.datastax.oss.driver.internal.core.metadata.token.TokenRangeAssert; -import io.netty.buffer.ByteBuf; -import io.netty.util.concurrent.Future; -import java.util.concurrent.CompletionStage; - -public class Assertions extends org.assertj.core.api.Assertions { - public static ByteBufAssert assertThat(ByteBuf actual) { - return new ByteBufAssert(actual); - } - - public static DriverConfigAssert assertThat(DriverConfig actual) { - return new DriverConfigAssert(actual); - } - - public static NettyFutureAssert assertThat(Future actual) { - return new NettyFutureAssert<>(actual); - } - - /** - * Use a different name because this clashes with AssertJ's built-in one. Our implementation is a - * bit more flexible for checking completion values and errors. - */ - public static CompletionStageAssert assertThatStage(CompletionStage actual) { - return new CompletionStageAssert<>(actual); - } - - public static VersionAssert assertThat(Version actual) { - return new VersionAssert(actual); - } - - public static TokenRangeAssert assertThat(TokenRange actual) { - return new TokenRangeAssert(actual); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/ByteBufAssert.java b/core/src/test/java/com/datastax/oss/driver/ByteBufAssert.java deleted file mode 100644 index 4cd9c3ed358..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/ByteBufAssert.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.protocol.internal.util.Bytes; -import io.netty.buffer.ByteBuf; -import org.assertj.core.api.AbstractAssert; - -public class ByteBufAssert extends AbstractAssert { - public ByteBufAssert(ByteBuf actual) { - super(actual, ByteBufAssert.class); - } - - public ByteBufAssert containsExactly(String hexString) { - ByteBuf copy = actual.duplicate(); - byte[] expectedBytes = Bytes.fromHexString(hexString).array(); - byte[] actualBytes = new byte[expectedBytes.length]; - copy.readBytes(actualBytes); - assertThat(actualBytes).containsExactly(expectedBytes); - // And nothing more - assertThat(copy.isReadable()).isFalse(); - return this; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/DriverRunListener.java b/core/src/test/java/com/datastax/oss/driver/DriverRunListener.java deleted file mode 100644 index 085134b28f2..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/DriverRunListener.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver; - -import static org.assertj.core.api.Assertions.fail; - -import org.junit.runner.Description; -import org.junit.runner.notification.RunListener; - -/** - * Common parent of all driver tests, to store common configuration and perform sanity checks. - * - * @see "maven-surefire-plugin configuration in pom.xml" - */ -public class DriverRunListener extends RunListener { - - @Override - public void testFinished(Description description) throws Exception { - // If a test interrupted the main thread silently, this can make later tests fail. Instead, we - // fail the test and clear the interrupt status. - // Note: Thread.interrupted() also clears the flag, which is what we want. - if (Thread.interrupted()) { - fail(description.getMethodName() + " interrupted the main thread"); - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/TestDataProviders.java b/core/src/test/java/com/datastax/oss/driver/TestDataProviders.java deleted file mode 100644 index a0448c4b769..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/TestDataProviders.java +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver; - -import com.tngtech.java.junit.dataprovider.DataProvider; -import java.util.Arrays; -import java.util.Locale; - -public class TestDataProviders { - - public static Object[][] fromList(Object... l) { - Object[][] result = new Object[l.length][]; - for (int i = 0; i < l.length; i++) { - result[i] = new Object[1]; - result[i][0] = l[i]; - } - return result; - } - - public static Object[][] concat(Object[][] left, Object[][] right) { - Object[][] result = Arrays.copyOf(left, left.length + right.length); - System.arraycopy(right, 0, result, left.length, right.length); - return result; - } - - // example: [ [a,b], [c,d] ], [ [1], [2] ], [ [true], [false] ] - // => [ [a,b,1,true], [a,b,1,false], [a,b,2,true], [a,b,2,false], ... ] - public static Object[][] combine(Object[][]... providers) { - int numberOfProviders = providers.length; // (ex: 3) - - // ex: 2 * 2 * 2 combinations - int numberOfCombinations = 1; - for (Object[][] provider : providers) { - numberOfCombinations *= provider.length; - } - - Object[][] result = new Object[numberOfCombinations][]; - // The current index in each provider (ex: [1,0,1] => [c,d,1,false]) - int[] indices = new int[numberOfProviders]; - - for (int c = 0; c < numberOfCombinations; c++) { - int combinationLength = 0; - for (int p = 0; p < numberOfProviders; p++) { - combinationLength += providers[p][indices[p]].length; - } - Object[] combination = new Object[combinationLength]; - int destPos = 0; - for (int p = 0; p < numberOfProviders; p++) { - Object[] src = providers[p][indices[p]]; - System.arraycopy(src, 0, combination, destPos, src.length); - destPos += src.length; - } - result[c] = combination; - - // Update indices: try to increment from the right, if it overflows reset and move left - for (int p = providers.length - 1; p >= 0; p--) { - if (indices[p] < providers[p].length - 1) { - // ex: [0,0,0], p = 2 => [0,0,1] - indices[p] += 1; - break; - } else { - // ex: [0,0,1], p = 2 => [0,0,0], loop to increment to [0,1,0] - indices[p] = 0; - } - } - } - return result; - } - - @DataProvider - public static Object[][] booleans() { - return fromList(true, false); - } - - /** An arbitrary set of locales to use when testing locale-sensitive operations. */ - @DataProvider - public static Object[][] locales() { - return new Object[][] { - new Object[] {Locale.US}, - // non-latin alphabets - new Object[] {Locale.CHINA}, - new Object[] {Locale.JAPAN}, - new Object[] {Locale.KOREA}, - new Object[] {new Locale("gr") /* greek */}, - new Object[] {new Locale("ar") /* arabic */}, - // latin-based alphabets with extended character sets - new Object[] {new Locale("vi") /* vietnamese */}, - // JAVA-2883: Turkish is the most problematic locale as String.toLowerCase("TITLE") - // wouldn't return "title" but rather "tıtle", where 'ı' is the 'LATIN SMALL LETTER - // DOTLESS I' character specific to the Turkish language. - new Object[] {new Locale("tr") /* turkish*/}, - }; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/AllNodesFailedExceptionTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/AllNodesFailedExceptionTest.java deleted file mode 100644 index 4cd4c0fcd74..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/api/core/AllNodesFailedExceptionTest.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import static com.datastax.oss.driver.api.core.ConsistencyLevel.QUORUM; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.data.MapEntry.entry; - -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.servererrors.ReadTimeoutException; -import com.datastax.oss.driver.api.core.servererrors.UnavailableException; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class AllNodesFailedExceptionTest { - - @Mock(name = "node1") - private Node node1; - - @Mock(name = "node2") - private Node node2; - - @SuppressWarnings("deprecation") - @Test - public void should_create_instance_from_map_of_first_errors() { - // given - UnavailableException e1 = new UnavailableException(node1, QUORUM, 2, 1); - ReadTimeoutException e2 = new ReadTimeoutException(node2, QUORUM, 2, 1, false); - Map errors = ImmutableMap.of(node1, e1, node2, e2); - // when - AllNodesFailedException e = AllNodesFailedException.fromErrors(errors); - // then - assertThat(e) - .hasMessage( - "All 2 node(s) tried for the query failed " - + "(showing first 2 nodes, use getAllErrors() for more): " - + "node1: [%s], node2: [%s]", - e1, e2); - assertThat(e.getAllErrors()) - .hasEntrySatisfying(node1, list -> assertThat(list).containsExactly(e1)); - assertThat(e.getAllErrors()) - .hasEntrySatisfying(node2, list -> assertThat(list).containsExactly(e2)); - assertThat(e.getErrors()).containsEntry(node1, e1); - assertThat(e.getErrors()).containsEntry(node2, e2); - assertThat(e).hasSuppressedException(e1).hasSuppressedException(e2); - } - - @SuppressWarnings("deprecation") - @Test - public void should_create_instance_from_list_of_all_errors() { - // given - UnavailableException e1a = new UnavailableException(node1, QUORUM, 2, 1); - ReadTimeoutException e1b = new ReadTimeoutException(node1, QUORUM, 2, 1, false); - ReadTimeoutException e2a = new ReadTimeoutException(node2, QUORUM, 2, 1, false); - List> errors = - ImmutableList.of(entry(node1, e1a), entry(node1, e1b), entry(node2, e2a)); - // when - AllNodesFailedException e = AllNodesFailedException.fromErrors(errors); - // then - assertThat(e) - .hasMessage( - "All 2 node(s) tried for the query failed " - + "(showing first 2 nodes, use getAllErrors() for more): " - + "node1: [%s, %s], node2: [%s]", - e1a, e1b, e2a); - assertThat(e.getAllErrors()) - .hasEntrySatisfying(node1, list -> assertThat(list).containsExactly(e1a, e1b)); - assertThat(e.getAllErrors()) - .hasEntrySatisfying(node2, list -> assertThat(list).containsExactly(e2a)); - assertThat(e.getErrors()).containsEntry(node1, e1a); - assertThat(e.getErrors()).containsEntry(node2, e2a); - assertThat(e) - .hasSuppressedException(e1a) - .hasSuppressedException(e1b) - .hasSuppressedException(e2a); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/CqlIdentifierTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/CqlIdentifierTest.java deleted file mode 100644 index 5c7203b8f8d..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/api/core/CqlIdentifierTest.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.TestDataProviders; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.Locale; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class CqlIdentifierTest { - - @Test - public void should_build_from_internal() { - assertThat(CqlIdentifier.fromInternal("foo").asInternal()).isEqualTo("foo"); - assertThat(CqlIdentifier.fromInternal("Foo").asInternal()).isEqualTo("Foo"); - assertThat(CqlIdentifier.fromInternal("foo bar").asInternal()).isEqualTo("foo bar"); - assertThat(CqlIdentifier.fromInternal("foo\"bar").asInternal()).isEqualTo("foo\"bar"); - assertThat(CqlIdentifier.fromInternal("create").asInternal()).isEqualTo("create"); - } - - @Test - @UseDataProvider(location = TestDataProviders.class, value = "locales") - public void should_build_from_valid_cql(Locale locale) { - Locale def = Locale.getDefault(); - try { - Locale.setDefault(locale); - assertThat(CqlIdentifier.fromCql("foo").asInternal()).isEqualTo("foo"); - assertThat(CqlIdentifier.fromCql("Foo").asInternal()).isEqualTo("foo"); - assertThat(CqlIdentifier.fromCql("\"Foo\"").asInternal()).isEqualTo("Foo"); - assertThat(CqlIdentifier.fromCql("\"foo bar\"").asInternal()).isEqualTo("foo bar"); - assertThat(CqlIdentifier.fromCql("\"foo\"\"bar\"").asInternal()).isEqualTo("foo\"bar"); - assertThat(CqlIdentifier.fromCql("\"create\"").asInternal()).isEqualTo("create"); - // JAVA-2883: this would fail under turkish locale if it was used internally - assertThat(CqlIdentifier.fromCql("TITLE").asInternal()).isEqualTo("title"); - } finally { - Locale.setDefault(def); - } - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_build_from_valid_cql_if_special_characters() { - CqlIdentifier.fromCql("foo bar"); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_build_from_valid_cql_if_reserved_keyword() { - CqlIdentifier.fromCql("Create"); - } - - @Test - public void should_format_as_cql() { - assertThat(CqlIdentifier.fromInternal("foo").asCql(false)).isEqualTo("\"foo\""); - assertThat(CqlIdentifier.fromInternal("Foo").asCql(false)).isEqualTo("\"Foo\""); - assertThat(CqlIdentifier.fromInternal("foo bar").asCql(false)).isEqualTo("\"foo bar\""); - assertThat(CqlIdentifier.fromInternal("foo\"bar").asCql(false)).isEqualTo("\"foo\"\"bar\""); - assertThat(CqlIdentifier.fromInternal("create").asCql(false)).isEqualTo("\"create\""); - } - - @Test - public void should_format_as_pretty_cql() { - assertThat(CqlIdentifier.fromInternal("foo").asCql(true)).isEqualTo("foo"); - assertThat(CqlIdentifier.fromInternal("Foo").asCql(true)).isEqualTo("\"Foo\""); - assertThat(CqlIdentifier.fromInternal("foo bar").asCql(true)).isEqualTo("\"foo bar\""); - assertThat(CqlIdentifier.fromInternal("foo\"bar").asCql(true)).isEqualTo("\"foo\"\"bar\""); - assertThat(CqlIdentifier.fromInternal("create").asCql(true)).isEqualTo("\"create\""); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/VersionAssert.java b/core/src/test/java/com/datastax/oss/driver/api/core/VersionAssert.java deleted file mode 100644 index 61beb5cea51..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/api/core/VersionAssert.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import static com.datastax.oss.driver.Assertions.assertThat; - -import org.assertj.core.api.AbstractComparableAssert; - -public class VersionAssert extends AbstractComparableAssert { - - public VersionAssert(Version actual) { - super(actual, VersionAssert.class); - } - - public VersionAssert hasMajorMinorPatch(int major, int minor, int patch) { - assertThat(actual.getMajor()).isEqualTo(major); - assertThat(actual.getMinor()).isEqualTo(minor); - assertThat(actual.getPatch()).isEqualTo(patch); - return this; - } - - public VersionAssert hasDsePatch(int dsePatch) { - assertThat(actual.getDSEPatch()).isEqualTo(dsePatch); - return this; - } - - public VersionAssert hasPreReleaseLabels(String... labels) { - assertThat(actual.getPreReleaseLabels()).containsExactly(labels); - return this; - } - - public VersionAssert hasNoPreReleaseLabels() { - assertThat(actual.getPreReleaseLabels()).isNull(); - return this; - } - - public VersionAssert hasBuildLabel(String label) { - assertThat(actual.getBuildLabel()).isEqualTo(label); - return this; - } - - public VersionAssert hasNextStable(String version) { - assertThat(actual.nextStable()).isEqualTo(Version.parse(version)); - return this; - } - - @Override - public VersionAssert hasToString(String string) { - assertThat(actual.toString()).isEqualTo(string); - return this; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/VersionTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/VersionTest.java deleted file mode 100644 index bce30816f9c..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/api/core/VersionTest.java +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import static com.datastax.oss.driver.Assertions.assertThat; - -import org.junit.Test; - -public class VersionTest { - - @Test - public void should_parse_release_version() { - assertThat(Version.parse("1.2.19")) - .hasMajorMinorPatch(1, 2, 19) - .hasDsePatch(-1) - .hasNoPreReleaseLabels() - .hasBuildLabel(null) - .hasNextStable("1.2.19") - .hasToString("1.2.19"); - } - - @Test - public void should_parse_release_without_patch() { - assertThat(Version.parse("1.2")).hasMajorMinorPatch(1, 2, 0); - } - - @Test - public void should_parse_pre_release_version() { - assertThat(Version.parse("1.2.0-beta1-SNAPSHOT")) - .hasMajorMinorPatch(1, 2, 0) - .hasDsePatch(-1) - .hasPreReleaseLabels("beta1", "SNAPSHOT") - .hasBuildLabel(null) - .hasToString("1.2.0-beta1-SNAPSHOT") - .hasNextStable("1.2.0"); - } - - @Test - public void should_allow_tilde_as_first_pre_release_delimiter() { - assertThat(Version.parse("1.2.0~beta1-SNAPSHOT")) - .hasMajorMinorPatch(1, 2, 0) - .hasDsePatch(-1) - .hasPreReleaseLabels("beta1", "SNAPSHOT") - .hasBuildLabel(null) - .hasToString("1.2.0-beta1-SNAPSHOT") - .hasNextStable("1.2.0"); - } - - @Test - public void should_parse_dse_patch() { - assertThat(Version.parse("1.2.19.2-SNAPSHOT")) - .hasMajorMinorPatch(1, 2, 19) - .hasDsePatch(2) - .hasToString("1.2.19.2-SNAPSHOT") - .hasNextStable("1.2.19.2"); - } - - @Test - public void should_order_versions() { - // by component - assertOrder("1.2.0", "2.0.0", -1); - assertOrder("2.0.0", "2.1.0", -1); - assertOrder("2.0.1", "2.0.2", -1); - assertOrder("2.0.1.1", "2.0.1.2", -1); - - // shortened vs. longer version - assertOrder("2.0", "2.0.0", 0); - assertOrder("2.0", "2.0.1", -1); - - // any DSE version is higher than no DSE version - assertOrder("2.0.0", "2.0.0.0", -1); - assertOrder("2.0.0", "2.0.0.1", -1); - - // pre-release vs. release - assertOrder("2.0.0-beta1", "2.0.0", -1); - assertOrder("2.0.0-SNAPSHOT", "2.0.0", -1); - assertOrder("2.0.0-beta1-SNAPSHOT", "2.0.0", -1); - - // pre-release vs. pre-release - assertOrder("2.0.0-a-b-c", "2.0.0-a-b-d", -1); - assertOrder("2.0.0-a-b-c", "2.0.0-a-b-c-d", -1); - - // build number ignored - assertOrder("2.0.0+build01", "2.0.0+build02", 0); - } - - private void assertOrder(String version1, String version2, int expected) { - assertThat(Version.parse(version1).compareTo(Version.parse(version2))).isEqualTo(expected); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/auth/ProgrammaticPlainTextAuthProviderTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/auth/ProgrammaticPlainTextAuthProviderTest.java deleted file mode 100644 index 44d2acfbb2e..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/api/core/auth/ProgrammaticPlainTextAuthProviderTest.java +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.auth; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.auth.PlainTextAuthProviderBase.Credentials; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.Strict.class) -public class ProgrammaticPlainTextAuthProviderTest { - - @Mock private EndPoint endpoint; - - @Test - public void should_return_correct_credentials_without_authorization_id() { - // given - ProgrammaticPlainTextAuthProvider provider = - new ProgrammaticPlainTextAuthProvider("user", "pass"); - // when - Credentials credentials = provider.getCredentials(endpoint, "irrelevant"); - // then - assertThat(credentials.getUsername()).isEqualTo("user".toCharArray()); - assertThat(credentials.getPassword()).isEqualTo("pass".toCharArray()); - assertThat(credentials.getAuthorizationId()).isEqualTo(new char[0]); - } - - @Test - public void should_return_correct_credentials_with_authorization_id() { - // given - ProgrammaticPlainTextAuthProvider provider = - new ProgrammaticPlainTextAuthProvider("user", "pass", "proxy"); - // when - Credentials credentials = provider.getCredentials(endpoint, "irrelevant"); - // then - assertThat(credentials.getUsername()).isEqualTo("user".toCharArray()); - assertThat(credentials.getPassword()).isEqualTo("pass".toCharArray()); - assertThat(credentials.getAuthorizationId()).isEqualTo("proxy".toCharArray()); - } - - @Test - public void should_change_username() { - // given - ProgrammaticPlainTextAuthProvider provider = - new ProgrammaticPlainTextAuthProvider("user", "pass"); - // when - provider.setUsername("user2"); - Credentials credentials = provider.getCredentials(endpoint, "irrelevant"); - // then - assertThat(credentials.getUsername()).isEqualTo("user2".toCharArray()); - assertThat(credentials.getPassword()).isEqualTo("pass".toCharArray()); - assertThat(credentials.getAuthorizationId()).isEqualTo(new char[0]); - } - - @Test - public void should_change_password() { - // given - ProgrammaticPlainTextAuthProvider provider = - new ProgrammaticPlainTextAuthProvider("user", "pass"); - // when - provider.setPassword("pass2"); - Credentials credentials = provider.getCredentials(endpoint, "irrelevant"); - // then - assertThat(credentials.getUsername()).isEqualTo("user".toCharArray()); - assertThat(credentials.getPassword()).isEqualTo("pass2".toCharArray()); - assertThat(credentials.getAuthorizationId()).isEqualTo(new char[0]); - } - - @Test - public void should_change_authorization_id() { - // given - ProgrammaticPlainTextAuthProvider provider = - new ProgrammaticPlainTextAuthProvider("user", "pass", "proxy"); - // when - provider.setAuthorizationId("proxy2"); - Credentials credentials = provider.getCredentials(endpoint, "irrelevant"); - // then - assertThat(credentials.getUsername()).isEqualTo("user".toCharArray()); - assertThat(credentials.getPassword()).isEqualTo("pass".toCharArray()); - assertThat(credentials.getAuthorizationId()).isEqualTo("proxy2".toCharArray()); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/config/OptionsMapTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/config/OptionsMapTest.java deleted file mode 100644 index ec0410ed868..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/api/core/config/OptionsMapTest.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.config; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.internal.SerializationHelper; -import java.time.Duration; -import java.util.function.Consumer; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class OptionsMapTest { - @Mock private Consumer mockListener; - - @Test - public void should_serialize_and_deserialize() { - // Given - OptionsMap initial = OptionsMap.driverDefaults(); - Duration slowTimeout = Duration.ofSeconds(30); - initial.put("slow", TypedDriverOption.REQUEST_TIMEOUT, slowTimeout); - initial.addChangeListener(mockListener); - - // When - OptionsMap deserialized = SerializationHelper.serializeAndDeserialize(initial); - - // Then - assertThat(deserialized.get(TypedDriverOption.REQUEST_TIMEOUT)) - .isEqualTo(Duration.ofSeconds(2)); - assertThat(deserialized.get("slow", TypedDriverOption.REQUEST_TIMEOUT)).isEqualTo(slowTimeout); - // Listeners are transient - assertThat(deserialized.removeChangeListener(mockListener)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/config/TypedDriverOptionTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/config/TypedDriverOptionTest.java deleted file mode 100644 index eee4000a459..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/api/core/config/TypedDriverOptionTest.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.config; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import java.util.HashSet; -import java.util.Set; -import org.junit.Test; - -public class TypedDriverOptionTest { - - /** - * Checks that every built-in {@link DriverOption} has an equivalent constant in {@link - * TypedDriverOption}. - */ - @Test - public void should_have_equivalents_for_all_builtin_untyped_options() { - Set optionsThatHaveATypedEquivalent = new HashSet<>(); - for (TypedDriverOption typedOption : TypedDriverOption.builtInValues()) { - optionsThatHaveATypedEquivalent.add(typedOption.getRawOption()); - } - - // These options are only used internally to compare policy configurations across profiles. - // Users never use them directly, so they don't need typed equivalents. - Set exclusions = - ImmutableSet.of( - DefaultDriverOption.LOAD_BALANCING_POLICY, - DefaultDriverOption.RETRY_POLICY, - DefaultDriverOption.SPECULATIVE_EXECUTION_POLICY); - - for (DriverOption option : - ImmutableSet.builder() - .add(DefaultDriverOption.values()) - .add(DseDriverOption.values()) - .build()) { - if (!exclusions.contains(option)) { - assertThat(optionsThatHaveATypedEquivalent) - .as( - "Couldn't find a typed equivalent for %s.%s. " - + "You need to either add a constant in %s, or an exclusion in this test.", - option.getClass().getSimpleName(), option, TypedDriverOption.class.getSimpleName()) - .contains(option); - } - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/cql/StatementBuilderTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/cql/StatementBuilderTest.java deleted file mode 100644 index 9904b1e27d7..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/api/core/cql/StatementBuilderTest.java +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.shaded.guava.common.base.Charsets; -import java.nio.ByteBuffer; -import org.junit.Test; - -public class StatementBuilderTest { - - private static class MockSimpleStatementBuilder - extends StatementBuilder { - - public MockSimpleStatementBuilder() { - super(); - } - - public MockSimpleStatementBuilder(SimpleStatement template) { - super(template); - } - - @Override - public SimpleStatement build() { - - SimpleStatement rv = mock(SimpleStatement.class); - when(rv.isTracing()).thenReturn(this.tracing); - when(rv.getRoutingKey()).thenReturn(this.routingKey); - return rv; - } - } - - @Test - public void should_handle_set_tracing_without_args() { - - MockSimpleStatementBuilder builder = new MockSimpleStatementBuilder(); - assertThat(builder.build().isTracing()).isFalse(); - builder.setTracing(); - assertThat(builder.build().isTracing()).isTrue(); - } - - @Test - public void should_handle_set_tracing_with_args() { - - MockSimpleStatementBuilder builder = new MockSimpleStatementBuilder(); - assertThat(builder.build().isTracing()).isFalse(); - builder.setTracing(true); - assertThat(builder.build().isTracing()).isTrue(); - builder.setTracing(false); - assertThat(builder.build().isTracing()).isFalse(); - } - - @Test - public void should_override_set_tracing_in_template() { - - SimpleStatement template = SimpleStatement.builder("select * from system.peers").build(); - MockSimpleStatementBuilder builder = new MockSimpleStatementBuilder(template); - assertThat(builder.build().isTracing()).isFalse(); - builder.setTracing(true); - assertThat(builder.build().isTracing()).isTrue(); - - template = SimpleStatement.builder("select * from system.peers").setTracing().build(); - builder = new MockSimpleStatementBuilder(template); - assertThat(builder.build().isTracing()).isTrue(); - builder.setTracing(false); - assertThat(builder.build().isTracing()).isFalse(); - } - - @Test - public void should_match_set_routing_key_vararg() { - - ByteBuffer buff1 = ByteBuffer.wrap("the quick brown fox".getBytes(Charsets.UTF_8)); - ByteBuffer buff2 = ByteBuffer.wrap("jumped over the lazy dog".getBytes(Charsets.UTF_8)); - - Statement expectedStmt = - SimpleStatement.builder("select * from system.peers").build().setRoutingKey(buff1, buff2); - - MockSimpleStatementBuilder builder = new MockSimpleStatementBuilder(); - Statement builderStmt = builder.setRoutingKey(buff1, buff2).build(); - assertThat(expectedStmt.getRoutingKey()).isEqualTo(builderStmt.getRoutingKey()); - - /* Confirm that order matters here */ - builderStmt = builder.setRoutingKey(buff2, buff1).build(); - assertThat(expectedStmt.getRoutingKey()).isNotEqualTo(builderStmt.getRoutingKey()); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/cql/StatementProfileTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/cql/StatementProfileTest.java deleted file mode 100644 index af2dccd0432..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/api/core/cql/StatementProfileTest.java +++ /dev/null @@ -1,196 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.TestDataProviders; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.internal.core.cql.DefaultBoundStatement; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.Collections; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class StatementProfileTest { - - private static final DriverExecutionProfile PROFILE = mock(DriverExecutionProfile.class); - private static final String NAME = "mockProfileName"; - - @Test - @UseDataProvider("statements") - public void should_set_profile_and_name_on_statement( - Statement statement, - Operation operation1, - Operation operation2, - String expectedName, - DriverExecutionProfile expectedProfile) { - - statement = operation1.applyTo(statement); - statement = operation2.applyTo(statement); - - assertThat(statement.getExecutionProfileName()).isEqualTo(expectedName); - assertThat(statement.getExecutionProfile()).isEqualTo(expectedProfile); - } - - @Test - @UseDataProvider("builders") - public void should_set_profile_and_name_on_builder( - StatementBuilder builder, - Operation operation1, - Operation operation2, - String expectedName, - DriverExecutionProfile expectedProfile) { - - builder = operation1.applyTo(builder); - builder = operation2.applyTo(builder); - - Statement statement = builder.build(); - - assertThat(statement.getExecutionProfileName()).isEqualTo(expectedName); - assertThat(statement.getExecutionProfile()).isEqualTo(expectedProfile); - } - - private static Object[][] scenarios() { - return new Object[][] { - // operation1, operation2, expectedName, expectedProfile - - // only one set: - new Object[] {setProfile(PROFILE), noop(), null, PROFILE}, - new Object[] {setName(NAME), noop(), NAME, null}, - - // last one wins: - new Object[] {setProfile(PROFILE), setName(NAME), NAME, null}, - new Object[] {setName(NAME), setProfile(PROFILE), null, PROFILE}, - - // null does not unset other: - new Object[] {setProfile(PROFILE), setName(null), null, PROFILE}, - new Object[] {setName(NAME), setProfile(null), NAME, null}, - }; - } - - @DataProvider - public static Object[][] statements() { - SimpleStatement simpleStatement = SimpleStatement.newInstance("mock query"); - Object[][] statements = - TestDataProviders.fromList( - simpleStatement, - newBoundStatement(), - BatchStatement.newInstance(BatchType.LOGGED, simpleStatement)); - - return TestDataProviders.combine(statements, scenarios()); - } - - @DataProvider - public static Object[][] builders() { - SimpleStatement simpleStatement = SimpleStatement.newInstance("mock query"); - Object[][] builders = - TestDataProviders.fromList( - SimpleStatement.builder(simpleStatement), - new BoundStatementBuilder(newBoundStatement()), - BatchStatement.builder(BatchType.LOGGED).addStatement(simpleStatement)); - - return TestDataProviders.combine(builders, scenarios()); - } - - private interface Operation { - - Statement applyTo(Statement statement); - - StatementBuilder applyTo(StatementBuilder builder); - } - - private static Operation setProfile(DriverExecutionProfile profile) { - return new Operation() { - @Override - public Statement applyTo(Statement statement) { - return statement.setExecutionProfile(profile); - } - - @Override - public StatementBuilder applyTo(StatementBuilder builder) { - return builder.setExecutionProfile(profile); - } - }; - } - - private static Operation setName(String name) { - return new Operation() { - @Override - public Statement applyTo(Statement statement) { - return statement.setExecutionProfileName(name); - } - - @Override - public StatementBuilder applyTo(StatementBuilder builder) { - return builder.setExecutionProfileName(name); - } - }; - } - - private static Operation noop() { - return new Operation() { - @Override - public Statement applyTo(Statement statement) { - return statement; - } - - @Override - public StatementBuilder applyTo(StatementBuilder builder) { - return builder; - } - }; - } - - private static BoundStatement newBoundStatement() { - // Mock the minimum state needed to create a DefaultBoundStatement that can also be used to - // initialize a builder - PreparedStatement preparedStatement = mock(PreparedStatement.class); - ColumnDefinitions variableDefinitions = mock(ColumnDefinitions.class); - when(preparedStatement.getVariableDefinitions()).thenReturn(variableDefinitions); - return new DefaultBoundStatement( - preparedStatement, - variableDefinitions, - new ByteBuffer[0], - null, - null, - null, - null, - null, - Collections.emptyMap(), - null, - false, - Statement.NO_DEFAULT_TIMESTAMP, - null, - 5000, - null, - null, - Duration.ZERO, - null, - null, - null, - Statement.NO_NOW_IN_SECONDS); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/data/CqlDurationTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/data/CqlDurationTest.java deleted file mode 100644 index f55453b3eba..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/api/core/data/CqlDurationTest.java +++ /dev/null @@ -1,210 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.data; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.assertj.core.api.Assertions.fail; - -import com.datastax.oss.driver.TestDataProviders; -import com.datastax.oss.driver.internal.SerializationHelper; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.time.ZonedDateTime; -import java.time.temporal.ChronoUnit; -import java.time.temporal.UnsupportedTemporalTypeException; -import java.util.Locale; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class CqlDurationTest { - - @Test - @UseDataProvider(location = TestDataProviders.class, value = "locales") - public void should_parse_from_string_with_standard_pattern(Locale locale) { - Locale def = Locale.getDefault(); - try { - Locale.setDefault(locale); - assertThat(CqlDuration.from("1y2mo")).isEqualTo(CqlDuration.newInstance(14, 0, 0)); - assertThat(CqlDuration.from("-1y2mo")).isEqualTo(CqlDuration.newInstance(-14, 0, 0)); - assertThat(CqlDuration.from("1Y2MO")).isEqualTo(CqlDuration.newInstance(14, 0, 0)); - assertThat(CqlDuration.from("2w")).isEqualTo(CqlDuration.newInstance(0, 14, 0)); - assertThat(CqlDuration.from("2d10h")) - .isEqualTo(CqlDuration.newInstance(0, 2, 10 * CqlDuration.NANOS_PER_HOUR)); - assertThat(CqlDuration.from("2d")).isEqualTo(CqlDuration.newInstance(0, 2, 0)); - assertThat(CqlDuration.from("30h")) - .isEqualTo(CqlDuration.newInstance(0, 0, 30 * CqlDuration.NANOS_PER_HOUR)); - assertThat(CqlDuration.from("30h20m")) - .isEqualTo( - CqlDuration.newInstance( - 0, 0, 30 * CqlDuration.NANOS_PER_HOUR + 20 * CqlDuration.NANOS_PER_MINUTE)); - assertThat(CqlDuration.from("20m")) - .isEqualTo(CqlDuration.newInstance(0, 0, 20 * CqlDuration.NANOS_PER_MINUTE)); - assertThat(CqlDuration.from("56s")) - .isEqualTo(CqlDuration.newInstance(0, 0, 56 * CqlDuration.NANOS_PER_SECOND)); - assertThat(CqlDuration.from("567ms")) - .isEqualTo(CqlDuration.newInstance(0, 0, 567 * CqlDuration.NANOS_PER_MILLI)); - assertThat(CqlDuration.from("1950us")) - .isEqualTo(CqlDuration.newInstance(0, 0, 1950 * CqlDuration.NANOS_PER_MICRO)); - assertThat(CqlDuration.from("1950µs")) - .isEqualTo(CqlDuration.newInstance(0, 0, 1950 * CqlDuration.NANOS_PER_MICRO)); - assertThat(CqlDuration.from("1950000ns")).isEqualTo(CqlDuration.newInstance(0, 0, 1950000)); - assertThat(CqlDuration.from("1950000NS")).isEqualTo(CqlDuration.newInstance(0, 0, 1950000)); - assertThat(CqlDuration.from("-1950000ns")).isEqualTo(CqlDuration.newInstance(0, 0, -1950000)); - assertThat(CqlDuration.from("1y3mo2h10m")) - .isEqualTo(CqlDuration.newInstance(15, 0, 130 * CqlDuration.NANOS_PER_MINUTE)); - } finally { - Locale.setDefault(def); - } - } - - @Test - @UseDataProvider(location = TestDataProviders.class, value = "locales") - public void should_parse_from_string_with_iso8601_pattern(Locale locale) { - Locale def = Locale.getDefault(); - try { - Locale.setDefault(locale); - assertThat(CqlDuration.from("P1Y2D")).isEqualTo(CqlDuration.newInstance(12, 2, 0)); - assertThat(CqlDuration.from("P1Y2M")).isEqualTo(CqlDuration.newInstance(14, 0, 0)); - assertThat(CqlDuration.from("P2W")).isEqualTo(CqlDuration.newInstance(0, 14, 0)); - assertThat(CqlDuration.from("P1YT2H")) - .isEqualTo(CqlDuration.newInstance(12, 0, 2 * CqlDuration.NANOS_PER_HOUR)); - assertThat(CqlDuration.from("-P1Y2M")).isEqualTo(CqlDuration.newInstance(-14, 0, 0)); - assertThat(CqlDuration.from("P2D")).isEqualTo(CqlDuration.newInstance(0, 2, 0)); - assertThat(CqlDuration.from("PT30H")) - .isEqualTo(CqlDuration.newInstance(0, 0, 30 * CqlDuration.NANOS_PER_HOUR)); - assertThat(CqlDuration.from("PT30H20M")) - .isEqualTo( - CqlDuration.newInstance( - 0, 0, 30 * CqlDuration.NANOS_PER_HOUR + 20 * CqlDuration.NANOS_PER_MINUTE)); - assertThat(CqlDuration.from("PT20M")) - .isEqualTo(CqlDuration.newInstance(0, 0, 20 * CqlDuration.NANOS_PER_MINUTE)); - assertThat(CqlDuration.from("PT56S")) - .isEqualTo(CqlDuration.newInstance(0, 0, 56 * CqlDuration.NANOS_PER_SECOND)); - assertThat(CqlDuration.from("P1Y3MT2H10M")) - .isEqualTo(CqlDuration.newInstance(15, 0, 130 * CqlDuration.NANOS_PER_MINUTE)); - } finally { - Locale.setDefault(def); - } - } - - @Test - @UseDataProvider(location = TestDataProviders.class, value = "locales") - public void should_parse_from_string_with_iso8601_alternative_pattern(Locale locale) { - Locale def = Locale.getDefault(); - try { - Locale.setDefault(locale); - assertThat(CqlDuration.from("P0001-00-02T00:00:00")) - .isEqualTo(CqlDuration.newInstance(12, 2, 0)); - assertThat(CqlDuration.from("P0001-02-00T00:00:00")) - .isEqualTo(CqlDuration.newInstance(14, 0, 0)); - assertThat(CqlDuration.from("P0001-00-00T02:00:00")) - .isEqualTo(CqlDuration.newInstance(12, 0, 2 * CqlDuration.NANOS_PER_HOUR)); - assertThat(CqlDuration.from("-P0001-02-00T00:00:00")) - .isEqualTo(CqlDuration.newInstance(-14, 0, 0)); - assertThat(CqlDuration.from("P0000-00-02T00:00:00")) - .isEqualTo(CqlDuration.newInstance(0, 2, 0)); - assertThat(CqlDuration.from("P0000-00-00T30:00:00")) - .isEqualTo(CqlDuration.newInstance(0, 0, 30 * CqlDuration.NANOS_PER_HOUR)); - assertThat(CqlDuration.from("P0000-00-00T30:20:00")) - .isEqualTo( - CqlDuration.newInstance( - 0, 0, 30 * CqlDuration.NANOS_PER_HOUR + 20 * CqlDuration.NANOS_PER_MINUTE)); - assertThat(CqlDuration.from("P0000-00-00T00:20:00")) - .isEqualTo(CqlDuration.newInstance(0, 0, 20 * CqlDuration.NANOS_PER_MINUTE)); - assertThat(CqlDuration.from("P0000-00-00T00:00:56")) - .isEqualTo(CqlDuration.newInstance(0, 0, 56 * CqlDuration.NANOS_PER_SECOND)); - assertThat(CqlDuration.from("P0001-03-00T02:10:00")) - .isEqualTo(CqlDuration.newInstance(15, 0, 130 * CqlDuration.NANOS_PER_MINUTE)); - } finally { - Locale.setDefault(def); - } - } - - @Test - public void should_fail_to_parse_invalid_durations() { - assertInvalidDuration( - Long.MAX_VALUE + "d", - "Invalid duration. The total number of days must be less or equal to 2147483647"); - assertInvalidDuration("2µ", "Unable to convert '2µ' to a duration"); - assertInvalidDuration("-2µ", "Unable to convert '2µ' to a duration"); - assertInvalidDuration("12.5s", "Unable to convert '12.5s' to a duration"); - assertInvalidDuration("2m12.5s", "Unable to convert '2m12.5s' to a duration"); - assertInvalidDuration("2m-12s", "Unable to convert '2m-12s' to a duration"); - assertInvalidDuration("12s3s", "Invalid duration. The seconds are specified multiple times"); - assertInvalidDuration("12s3m", "Invalid duration. The seconds should be after minutes"); - assertInvalidDuration("1Y3M4D", "Invalid duration. The minutes should be after days"); - assertInvalidDuration("P2Y3W", "Unable to convert 'P2Y3W' to a duration"); - assertInvalidDuration("P0002-00-20", "Unable to convert 'P0002-00-20' to a duration"); - } - - private void assertInvalidDuration(String duration, String expectedErrorMessage) { - try { - CqlDuration.from(duration); - fail("Expected RuntimeException"); - } catch (RuntimeException e) { - assertThat(e.getMessage()).isEqualTo(expectedErrorMessage); - } - } - - @Test - public void should_get_by_unit() { - CqlDuration duration = CqlDuration.from("3mo2d15s"); - assertThat(duration.get(ChronoUnit.MONTHS)).isEqualTo(3); - assertThat(duration.get(ChronoUnit.DAYS)).isEqualTo(2); - assertThat(duration.get(ChronoUnit.NANOS)).isEqualTo(15 * CqlDuration.NANOS_PER_SECOND); - assertThatThrownBy(() -> duration.get(ChronoUnit.YEARS)) - .isInstanceOf(UnsupportedTemporalTypeException.class); - } - - @Test - public void should_add_to_temporal() { - ZonedDateTime dateTime = ZonedDateTime.parse("2018-10-04T00:00-07:00[America/Los_Angeles]"); - assertThat(dateTime.plus(CqlDuration.from("1mo"))) - .isEqualTo("2018-11-04T00:00-07:00[America/Los_Angeles]"); - assertThat(dateTime.plus(CqlDuration.from("1mo1h10s"))) - .isEqualTo("2018-11-04T01:00:10-07:00[America/Los_Angeles]"); - // 11-04 2:00 is daylight saving time end - assertThat(dateTime.plus(CqlDuration.from("1mo3h"))) - .isEqualTo("2018-11-04T02:00-08:00[America/Los_Angeles]"); - } - - @Test - public void should_subtract_from_temporal() { - ZonedDateTime dateTime = ZonedDateTime.parse("2018-10-04T00:00-07:00[America/Los_Angeles]"); - assertThat(dateTime.minus(CqlDuration.from("2mo"))) - .isEqualTo("2018-08-04T00:00-07:00[America/Los_Angeles]"); - assertThat(dateTime.minus(CqlDuration.from("1h15s15ns"))) - .isEqualTo("2018-10-03T22:59:44.999999985-07:00[America/Los_Angeles]"); - } - - @Test - public void should_serialize_and_deserialize() throws Exception { - CqlDuration initial = CqlDuration.from("3mo2d15s"); - CqlDuration deserialized = SerializationHelper.serializeAndDeserialize(initial); - assertThat(deserialized).isEqualTo(initial); - } - - @Test - public void should_serialize_and_deserialize_negative() throws Exception { - CqlDuration initial = CqlDuration.from("-2d15m"); - CqlDuration deserialized = SerializationHelper.serializeAndDeserialize(initial); - assertThat(deserialized).isEqualTo(initial); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/data/CqlVectorTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/data/CqlVectorTest.java deleted file mode 100644 index 3e0872cb946..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/api/core/data/CqlVectorTest.java +++ /dev/null @@ -1,259 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.data; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.assertj.core.api.Assertions.fail; - -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.internal.SerializationHelper; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.io.ByteArrayInputStream; -import java.io.ObjectInputStream; -import java.io.ObjectStreamException; -import java.time.LocalTime; -import java.util.AbstractList; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; -import java.util.stream.Collectors; -import org.apache.commons.codec.DecoderException; -import org.apache.commons.codec.binary.Hex; -import org.assertj.core.util.Lists; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class CqlVectorTest { - - @DataProvider - public static Object[][] dataProvider() { - return new Object[][] { - {new Float[] {1.0f, 2.5f}}, - {new LocalTime[] {LocalTime.of(1, 2), LocalTime.of(3, 4)}}, - {new List[] {Arrays.asList(1, 2), Arrays.asList(3, 4)}}, - {new CqlVector[] {CqlVector.newInstance("a", "bc"), CqlVector.newInstance("d", "ef")}} - }; - } - - private void validate_built_vector(CqlVector vec, Object[] expectedVals) { - assertThat(vec.size()).isEqualTo(2); - assertThat(vec.isEmpty()).isFalse(); - assertThat(vec.get(0)).isEqualTo(expectedVals[0]); - assertThat(vec.get(1)).isEqualTo(expectedVals[1]); - } - - @UseDataProvider("dataProvider") - @Test - public void should_build_vector_from_elements(Object[] vals) { - validate_built_vector(CqlVector.newInstance(vals), vals); - } - - @Test - @UseDataProvider("dataProvider") - public void should_build_vector_from_list(Object[] vals) { - validate_built_vector(CqlVector.newInstance(Lists.newArrayList(vals)), vals); - } - - @Test - public void should_throw_from_null_string() { - assertThatThrownBy( - () -> { - CqlVector.from(null, TypeCodecs.FLOAT); - }) - .isInstanceOf(IllegalArgumentException.class); - } - - @Test - public void should_throw_from_empty_string() { - - assertThatThrownBy( - () -> { - CqlVector.from("", TypeCodecs.FLOAT); - }) - .isInstanceOf(IllegalArgumentException.class); - } - - @Test - public void should_throw_when_building_with_nulls() { - - assertThatThrownBy( - () -> { - CqlVector.newInstance(1.1f, null, 2.2f); - }) - .isInstanceOf(IllegalArgumentException.class); - - Float[] theArray = new Float[] {1.1f, null, 2.2f}; - assertThatThrownBy( - () -> { - CqlVector.newInstance(theArray); - }) - .isInstanceOf(IllegalArgumentException.class); - - List theList = Lists.newArrayList(1.1f, null, 2.2f); - assertThatThrownBy( - () -> { - CqlVector.newInstance(theList); - }) - .isInstanceOf(IllegalArgumentException.class); - } - - @Test - public void should_build_empty_vector() { - CqlVector vector = CqlVector.newInstance(); - assertThat(vector.isEmpty()).isTrue(); - assertThat(vector.size()).isEqualTo(0); - } - - @Test - @UseDataProvider("dataProvider") - public void should_behave_mostly_like_a_list(T[] vals) { - T[] theArray = Arrays.copyOf(vals, vals.length); - CqlVector vector = CqlVector.newInstance(theArray); - assertThat(vector.get(0)).isEqualTo(theArray[0]); - vector.set(0, theArray[1]); - assertThat(vector.get(0)).isEqualTo(theArray[1]); - assertThat(vector.isEmpty()).isFalse(); - assertThat(vector.size()).isEqualTo(2); - Iterator iterator = vector.iterator(); - assertThat(iterator.next()).isEqualTo(theArray[1]); - assertThat(iterator.next()).isEqualTo(theArray[1]); - } - - @Test - @UseDataProvider("dataProvider") - public void should_play_nicely_with_streams(T[] vals) { - CqlVector vector = CqlVector.newInstance(vals); - List results = - vector.stream() - .map(Object::toString) - .collect(Collectors.toCollection(() -> new ArrayList())); - for (int i = 0; i < vector.size(); ++i) { - assertThat(results.get(i)).isEqualTo(vector.get(i).toString()); - } - } - - @Test - @UseDataProvider("dataProvider") - public void should_reflect_changes_to_mutable_list(T[] vals) { - List theList = Lists.newArrayList(vals); - CqlVector vector = CqlVector.newInstance(theList); - assertThat(vector.size()).isEqualTo(2); - assertThat(vector.get(1)).isEqualTo(vals[1]); - - T newVal = vals[0]; - theList.set(1, newVal); - assertThat(vector.size()).isEqualTo(2); - assertThat(vector.get(1)).isEqualTo(newVal); - } - - @Test - @UseDataProvider("dataProvider") - public void should_reflect_changes_to_array(T[] vals) { - T[] theArray = Arrays.copyOf(vals, vals.length); - CqlVector vector = CqlVector.newInstance(theArray); - assertThat(vector.size()).isEqualTo(2); - assertThat(vector.get(1)).isEqualTo(theArray[1]); - - T newVal = theArray[0]; - theArray[1] = newVal; - assertThat(vector.size()).isEqualTo(2); - assertThat(vector.get(1)).isEqualTo(newVal); - } - - @Test - @UseDataProvider("dataProvider") - public void should_correctly_compare_vectors(T[] vals) { - CqlVector vector1 = CqlVector.newInstance(vals); - CqlVector vector2 = CqlVector.newInstance(vals); - CqlVector vector3 = CqlVector.newInstance(Lists.newArrayList(vals)); - assertThat(vector1).isNotSameAs(vector2); - assertThat(vector1).isEqualTo(vector2); - assertThat(vector1).isNotSameAs(vector3); - assertThat(vector1).isEqualTo(vector3); - - T[] differentArgs = Arrays.copyOf(vals, vals.length); - T newVal = differentArgs[1]; - differentArgs[0] = newVal; - CqlVector vector4 = CqlVector.newInstance(differentArgs); - assertThat(vector1).isNotSameAs(vector4); - assertThat(vector1).isNotEqualTo(vector4); - - T[] biggerArgs = Arrays.copyOf(vals, vals.length + 1); - biggerArgs[biggerArgs.length - 1] = newVal; - CqlVector vector5 = CqlVector.newInstance(biggerArgs); - assertThat(vector1).isNotSameAs(vector5); - assertThat(vector1).isNotEqualTo(vector5); - } - - @Test - @UseDataProvider("dataProvider") - public void should_serialize_and_deserialize(T[] vals) throws Exception { - CqlVector initial = CqlVector.newInstance(vals); - CqlVector deserialized = SerializationHelper.serializeAndDeserialize(initial); - assertThat(deserialized).isEqualTo(initial); - } - - @Test - public void should_serialize_and_deserialize_empty_vector() throws Exception { - CqlVector initial = CqlVector.newInstance(Collections.emptyList()); - CqlVector deserialized = SerializationHelper.serializeAndDeserialize(initial); - assertThat(deserialized).isEqualTo(initial); - } - - @Test - @UseDataProvider("dataProvider") - public void should_serialize_and_deserialize_unserializable_list(T[] vals) throws Exception { - CqlVector initial = - CqlVector.newInstance( - new AbstractList() { - @Override - public T get(int index) { - return vals[index]; - } - - @Override - public int size() { - return vals.length; - } - }); - CqlVector deserialized = SerializationHelper.serializeAndDeserialize(initial); - assertThat(deserialized).isEqualTo(initial); - } - - @Test - public void should_not_use_preallocate_serialized_size() throws DecoderException { - // serialized CqlVector(1.0f, 2.5f, 3.0f) with size field adjusted to Integer.MAX_VALUE - byte[] suspiciousBytes = - Hex.decodeHex( - "aced000573720042636f6d2e64617461737461782e6f73732e6472697665722e6170692e636f72652e646174612e43716c566563746f722453657269616c697a6174696f6e50726f78790000000000000001030000787077047fffffff7372000f6a6176612e6c616e672e466c6f6174daedc9a2db3cf0ec02000146000576616c7565787200106a6176612e6c616e672e4e756d62657286ac951d0b94e08b02000078703f8000007371007e0002402000007371007e00024040000078" - .toCharArray()); - try { - new ObjectInputStream(new ByteArrayInputStream(suspiciousBytes)).readObject(); - fail("Should not be able to deserialize bytes with incorrect size field"); - } catch (Exception e) { - // check we fail to deserialize, rather than OOM - assertThat(e).isInstanceOf(ObjectStreamException.class); - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/metadata/SafeInitNodeStateListenerTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/metadata/SafeInitNodeStateListenerTest.java deleted file mode 100644 index a5b9b447e6a..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/api/core/metadata/SafeInitNodeStateListenerTest.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata; - -import com.datastax.oss.driver.api.core.session.Session; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.InOrder; -import org.mockito.Mock; -import org.mockito.Mockito; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class SafeInitNodeStateListenerTest { - - @Mock private NodeStateListener delegate; - @Mock private Node node; - @Mock private Session session; - - @Test - public void should_replay_init_events() { - SafeInitNodeStateListener wrapper = new SafeInitNodeStateListener(delegate, true); - - // Not a realistic sequence of invocations in the driver, but that doesn't matter - wrapper.onAdd(node); - wrapper.onUp(node); - wrapper.onSessionReady(session); - wrapper.onDown(node); - - InOrder inOrder = Mockito.inOrder(delegate); - inOrder.verify(delegate).onSessionReady(session); - inOrder.verify(delegate).onAdd(node); - inOrder.verify(delegate).onUp(node); - inOrder.verify(delegate).onDown(node); - } - - @Test - public void should_discard_init_events() { - SafeInitNodeStateListener wrapper = new SafeInitNodeStateListener(delegate, false); - - wrapper.onAdd(node); - wrapper.onUp(node); - wrapper.onSessionReady(session); - wrapper.onDown(node); - - InOrder inOrder = Mockito.inOrder(delegate); - inOrder.verify(delegate).onSessionReady(session); - inOrder.verify(delegate).onDown(node); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/paging/OffsetPagerAsyncTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/paging/OffsetPagerAsyncTest.java deleted file mode 100644 index 3963bf6de84..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/api/core/paging/OffsetPagerAsyncTest.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.paging; - -import static com.datastax.oss.driver.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.paging.OffsetPager.Page; -import com.datastax.oss.driver.internal.core.MockAsyncPagingIterable; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.concurrent.CompletionStage; -import org.junit.Test; - -public class OffsetPagerAsyncTest extends OffsetPagerTestBase { - - @Override - protected Page getActualPage( - OffsetPager pager, OffsetPagerTestFixture fixture, int fetchSize) { - CompletionStage> pageFuture = - pager.getPage(fixture.getAsyncIterable(fetchSize), fixture.getRequestedPage()); - return CompletableFutures.getCompleted(pageFuture); - } - - /** - * Covers the corner case where the server sends back an empty frame at the end of the result set. - */ - @Test - @UseDataProvider("fetchSizes") - public void should_return_last_page_when_result_finishes_with_empty_frame(int fetchSize) { - MockAsyncPagingIterable iterable = - new MockAsyncPagingIterable<>(ImmutableList.of("a", "b", "c"), fetchSize, true); - OffsetPager pager = new OffsetPager(3); - Page page = CompletableFutures.getCompleted(pager.getPage(iterable, 1)); - - assertThat(page.getElements()).containsExactly("a", "b", "c"); - assertThat(page.getPageNumber()).isEqualTo(1); - assertThat(page.isLast()).isTrue(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/paging/OffsetPagerSyncTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/paging/OffsetPagerSyncTest.java deleted file mode 100644 index 0d8b380dd49..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/api/core/paging/OffsetPagerSyncTest.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.paging; - -public class OffsetPagerSyncTest extends OffsetPagerTestBase { - - @Override - protected OffsetPager.Page getActualPage( - OffsetPager pager, OffsetPagerTestFixture fixture, /*ignored*/ int fetchSize) { - return pager.getPage(fixture.getSyncIterable(), fixture.getRequestedPage()); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/paging/OffsetPagerTestBase.java b/core/src/test/java/com/datastax/oss/driver/api/core/paging/OffsetPagerTestBase.java deleted file mode 100644 index 7f9ca2ddaa2..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/api/core/paging/OffsetPagerTestBase.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.paging; - -import com.datastax.oss.driver.TestDataProviders; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public abstract class OffsetPagerTestBase { - - /** - * The fetch size only matters for the async implementation. For sync this will essentially run - * the same fixture 4 times, but that's not a problem because tests are fast. - */ - @DataProvider - public static Object[][] fetchSizes() { - return TestDataProviders.fromList(1, 2, 3, 100); - } - - @DataProvider - public static Object[][] scenarios() { - Object[][] fixtures = - TestDataProviders.fromList( - // ------- inputs -------- | ------ expected ------- - // iterable | page | size | page | contents | last? - "a,b,c,d,e,f | 1 | 3 | 1 | a,b,c | false", - "a,b,c,d,e,f | 2 | 3 | 2 | d,e,f | true", - "a,b,c,d,e,f | 2 | 4 | 2 | e,f | true", - "a,b,c,d,e,f | 2 | 5 | 2 | f | true", - "a,b,c | 1 | 3 | 1 | a,b,c | true", - "a,b | 1 | 3 | 1 | a,b | true", - "a | 1 | 3 | 1 | a | true", - // Empty iterator => return one empty page - " | 1 | 3 | 1 | | true", - // Past the end => return last page - "a,b,c,d,e,f | 3 | 3 | 2 | d,e,f | true", - "a,b,c,d,e | 3 | 3 | 2 | d,e | true"); - return TestDataProviders.combine(fixtures, fetchSizes()); - } - - @Test - @UseDataProvider("scenarios") - public void should_return_existing_page(String fixtureSpec, int fetchSize) { - OffsetPagerTestFixture fixture = new OffsetPagerTestFixture(fixtureSpec); - OffsetPager pager = new OffsetPager(fixture.getPageSize()); - OffsetPager.Page actualPage = getActualPage(pager, fixture, fetchSize); - fixture.assertMatches(actualPage); - } - - protected abstract OffsetPager.Page getActualPage( - OffsetPager pager, OffsetPagerTestFixture fixture, int fetchSize); -} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/paging/OffsetPagerTestFixture.java b/core/src/test/java/com/datastax/oss/driver/api/core/paging/OffsetPagerTestFixture.java deleted file mode 100644 index 91079722aa2..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/api/core/paging/OffsetPagerTestFixture.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.paging; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; - -import com.datastax.oss.driver.api.core.PagingIterable; -import com.datastax.oss.driver.internal.core.MockAsyncPagingIterable; -import com.datastax.oss.driver.internal.core.MockPagingIterable; -import com.datastax.oss.driver.shaded.guava.common.base.Splitter; -import java.util.List; - -public class OffsetPagerTestFixture { - - private static final Splitter SPEC_SPLITTER = Splitter.on('|').trimResults(); - private static final Splitter LIST_SPLITTER = Splitter.on(',').trimResults().omitEmptyStrings(); - - private final List inputElements; - private final int requestedPage; - private final int pageSize; - private final int expectedPageNumber; - private final List expectedElements; - private final boolean expectedIsLast; - - public OffsetPagerTestFixture(String spec) { - List components = SPEC_SPLITTER.splitToList(spec); - int size = components.size(); - if (size != 3 && size != 6) { - fail("Invalid fixture spec, expected 3 or 5 components"); - } - - this.inputElements = LIST_SPLITTER.splitToList(components.get(0)); - this.requestedPage = Integer.parseInt(components.get(1)); - this.pageSize = Integer.parseInt(components.get(2)); - if (size == 3) { - this.expectedPageNumber = -1; - this.expectedElements = null; - this.expectedIsLast = false; - } else { - this.expectedPageNumber = Integer.parseInt(components.get(3)); - this.expectedElements = LIST_SPLITTER.splitToList(components.get(4)); - this.expectedIsLast = Boolean.parseBoolean(components.get(5)); - } - } - - public PagingIterable getSyncIterable() { - return new MockPagingIterable<>(inputElements.iterator()); - } - - public MockAsyncPagingIterable getAsyncIterable(int fetchSize) { - return new MockAsyncPagingIterable<>(inputElements, fetchSize, false); - } - - public int getRequestedPage() { - return requestedPage; - } - - public int getPageSize() { - return pageSize; - } - - public void assertMatches(OffsetPager.Page actualPage) { - assertThat(actualPage.getPageNumber()).isEqualTo(expectedPageNumber); - assertThat(actualPage.getElements()).isEqualTo(expectedElements); - assertThat(actualPage.isLast()).isEqualTo(expectedIsLast); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/retry/ConsistencyDowngradingRetryPolicyTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/retry/ConsistencyDowngradingRetryPolicyTest.java deleted file mode 100644 index e4463d833bf..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/api/core/retry/ConsistencyDowngradingRetryPolicyTest.java +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.retry; - -import static com.datastax.oss.driver.api.core.ConsistencyLevel.EACH_QUORUM; -import static com.datastax.oss.driver.api.core.ConsistencyLevel.ONE; -import static com.datastax.oss.driver.api.core.ConsistencyLevel.QUORUM; -import static com.datastax.oss.driver.api.core.ConsistencyLevel.SERIAL; -import static com.datastax.oss.driver.api.core.ConsistencyLevel.THREE; -import static com.datastax.oss.driver.api.core.ConsistencyLevel.TWO; -import static com.datastax.oss.driver.api.core.retry.RetryDecision.IGNORE; -import static com.datastax.oss.driver.api.core.retry.RetryDecision.RETHROW; -import static com.datastax.oss.driver.api.core.retry.RetryDecision.RETRY_NEXT; -import static com.datastax.oss.driver.api.core.retry.RetryDecision.RETRY_SAME; -import static com.datastax.oss.driver.api.core.servererrors.WriteType.BATCH; -import static com.datastax.oss.driver.api.core.servererrors.WriteType.BATCH_LOG; -import static com.datastax.oss.driver.api.core.servererrors.WriteType.CAS; -import static com.datastax.oss.driver.api.core.servererrors.WriteType.CDC; -import static com.datastax.oss.driver.api.core.servererrors.WriteType.COUNTER; -import static com.datastax.oss.driver.api.core.servererrors.WriteType.SIMPLE; -import static com.datastax.oss.driver.api.core.servererrors.WriteType.UNLOGGED_BATCH; -import static com.datastax.oss.driver.api.core.servererrors.WriteType.VIEW; - -import com.datastax.oss.driver.api.core.connection.ClosedConnectionException; -import com.datastax.oss.driver.api.core.connection.HeartbeatException; -import com.datastax.oss.driver.api.core.servererrors.OverloadedException; -import com.datastax.oss.driver.api.core.servererrors.ReadFailureException; -import com.datastax.oss.driver.api.core.servererrors.ServerError; -import com.datastax.oss.driver.api.core.servererrors.TruncateException; -import com.datastax.oss.driver.api.core.servererrors.WriteFailureException; -import com.datastax.oss.driver.internal.core.retry.ConsistencyDowngradingRetryPolicy; -import org.junit.Test; - -public class ConsistencyDowngradingRetryPolicyTest extends RetryPolicyTestBase { - - public ConsistencyDowngradingRetryPolicyTest() { - super(new ConsistencyDowngradingRetryPolicy("test")); - } - - @Test - public void should_process_read_timeouts() { - // retry count != 0 - assertOnReadTimeout(QUORUM, 2, 2, false, 1).hasDecision(RETHROW); - // serial CL - assertOnReadTimeout(SERIAL, 2, 2, false, 0).hasDecision(RETHROW); - // received < blockFor - assertOnReadTimeout(QUORUM, 4, 3, true, 0).hasDecision(RETRY_SAME).hasConsistency(THREE); - assertOnReadTimeout(QUORUM, 4, 3, false, 0).hasDecision(RETRY_SAME).hasConsistency(THREE); - assertOnReadTimeout(QUORUM, 3, 2, true, 0).hasDecision(RETRY_SAME).hasConsistency(TWO); - assertOnReadTimeout(QUORUM, 3, 2, false, 0).hasDecision(RETRY_SAME).hasConsistency(TWO); - assertOnReadTimeout(QUORUM, 2, 1, true, 0).hasDecision(RETRY_SAME).hasConsistency(ONE); - assertOnReadTimeout(QUORUM, 2, 1, false, 0).hasDecision(RETRY_SAME).hasConsistency(ONE); - assertOnReadTimeout(EACH_QUORUM, 2, 0, true, 0).hasDecision(RETRY_SAME).hasConsistency(ONE); - assertOnReadTimeout(EACH_QUORUM, 2, 0, false, 0).hasDecision(RETRY_SAME).hasConsistency(ONE); - assertOnReadTimeout(QUORUM, 2, 0, true, 0).hasDecision(RETHROW); - assertOnReadTimeout(QUORUM, 2, 0, false, 0).hasDecision(RETHROW); - // data present - assertOnReadTimeout(QUORUM, 2, 2, false, 0).hasDecision(RETRY_SAME); - assertOnReadTimeout(QUORUM, 2, 2, true, 0).hasDecision(RETHROW); - } - - @Test - public void should_process_write_timeouts() { - // retry count != 0 - assertOnWriteTimeout(QUORUM, BATCH_LOG, 2, 0, 1).hasDecision(RETHROW); - // SIMPLE - assertOnWriteTimeout(QUORUM, SIMPLE, 2, 1, 0).hasDecision(IGNORE); - assertOnWriteTimeout(QUORUM, SIMPLE, 2, 0, 0).hasDecision(RETHROW); - // BATCH - assertOnWriteTimeout(QUORUM, BATCH, 2, 1, 0).hasDecision(IGNORE); - assertOnWriteTimeout(QUORUM, BATCH, 2, 0, 0).hasDecision(RETHROW); - // UNLOGGED_BATCH - assertOnWriteTimeout(QUORUM, UNLOGGED_BATCH, 4, 3, 0) - .hasDecision(RETRY_SAME) - .hasConsistency(THREE); - assertOnWriteTimeout(QUORUM, UNLOGGED_BATCH, 3, 2, 0) - .hasDecision(RETRY_SAME) - .hasConsistency(TWO); - assertOnWriteTimeout(QUORUM, UNLOGGED_BATCH, 2, 1, 0) - .hasDecision(RETRY_SAME) - .hasConsistency(ONE); - assertOnWriteTimeout(EACH_QUORUM, UNLOGGED_BATCH, 2, 0, 0) - .hasDecision(RETRY_SAME) - .hasConsistency(ONE); - assertOnWriteTimeout(QUORUM, UNLOGGED_BATCH, 2, 0, 0).hasDecision(RETHROW); - // BATCH_LOG - assertOnWriteTimeout(QUORUM, BATCH_LOG, 2, 1, 0).hasDecision(RETRY_SAME); - // others - assertOnWriteTimeout(QUORUM, COUNTER, 2, 1, 0).hasDecision(RETHROW); - assertOnWriteTimeout(QUORUM, CAS, 2, 1, 0).hasDecision(RETHROW); - assertOnWriteTimeout(QUORUM, VIEW, 2, 1, 0).hasDecision(RETHROW); - assertOnWriteTimeout(QUORUM, CDC, 2, 1, 0).hasDecision(RETHROW); - } - - @Test - public void should_process_unavailable() { - // retry count != 0 - assertOnUnavailable(QUORUM, 2, 1, 1).hasDecision(RETHROW); - // SERIAL - assertOnUnavailable(SERIAL, 2, 1, 0).hasDecision(RETRY_NEXT); - // downgrade - assertOnUnavailable(QUORUM, 4, 3, 0).hasDecision(RETRY_SAME).hasConsistency(THREE); - assertOnUnavailable(QUORUM, 3, 2, 0).hasDecision(RETRY_SAME).hasConsistency(TWO); - assertOnUnavailable(QUORUM, 2, 1, 0).hasDecision(RETRY_SAME).hasConsistency(ONE); - assertOnUnavailable(EACH_QUORUM, 2, 0, 0).hasDecision(RETRY_SAME).hasConsistency(ONE); - assertOnUnavailable(QUORUM, 2, 0, 0).hasDecision(RETHROW); - } - - @Test - public void should_process_aborted_request() { - assertOnRequestAborted(ClosedConnectionException.class, 0).hasDecision(RETRY_NEXT); - assertOnRequestAborted(ClosedConnectionException.class, 1).hasDecision(RETRY_NEXT); - assertOnRequestAborted(HeartbeatException.class, 0).hasDecision(RETRY_NEXT); - assertOnRequestAborted(HeartbeatException.class, 1).hasDecision(RETRY_NEXT); - assertOnRequestAborted(Throwable.class, 0).hasDecision(RETHROW); - } - - @Test - public void should_process_error_response() { - assertOnErrorResponse(ReadFailureException.class, 0).hasDecision(RETHROW); - assertOnErrorResponse(ReadFailureException.class, 1).hasDecision(RETHROW); - assertOnErrorResponse(WriteFailureException.class, 0).hasDecision(RETHROW); - assertOnErrorResponse(WriteFailureException.class, 1).hasDecision(RETHROW); - assertOnErrorResponse(WriteFailureException.class, 1).hasDecision(RETHROW); - - assertOnErrorResponse(OverloadedException.class, 0).hasDecision(RETRY_NEXT); - assertOnErrorResponse(OverloadedException.class, 1).hasDecision(RETRY_NEXT); - assertOnErrorResponse(ServerError.class, 0).hasDecision(RETRY_NEXT); - assertOnErrorResponse(ServerError.class, 1).hasDecision(RETRY_NEXT); - assertOnErrorResponse(TruncateException.class, 0).hasDecision(RETRY_NEXT); - assertOnErrorResponse(TruncateException.class, 1).hasDecision(RETRY_NEXT); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/retry/DefaultRetryPolicyTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/retry/DefaultRetryPolicyTest.java deleted file mode 100644 index e36ccff2b91..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/api/core/retry/DefaultRetryPolicyTest.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.retry; - -import static com.datastax.oss.driver.api.core.DefaultConsistencyLevel.QUORUM; -import static com.datastax.oss.driver.api.core.retry.RetryDecision.RETHROW; -import static com.datastax.oss.driver.api.core.retry.RetryDecision.RETRY_NEXT; -import static com.datastax.oss.driver.api.core.retry.RetryDecision.RETRY_SAME; -import static com.datastax.oss.driver.api.core.servererrors.DefaultWriteType.BATCH_LOG; -import static com.datastax.oss.driver.api.core.servererrors.DefaultWriteType.SIMPLE; - -import com.datastax.oss.driver.api.core.connection.ClosedConnectionException; -import com.datastax.oss.driver.api.core.connection.HeartbeatException; -import com.datastax.oss.driver.api.core.servererrors.OverloadedException; -import com.datastax.oss.driver.api.core.servererrors.ReadFailureException; -import com.datastax.oss.driver.api.core.servererrors.ServerError; -import com.datastax.oss.driver.api.core.servererrors.TruncateException; -import com.datastax.oss.driver.api.core.servererrors.WriteFailureException; -import com.datastax.oss.driver.internal.core.retry.DefaultRetryPolicy; -import org.junit.Test; - -public class DefaultRetryPolicyTest extends RetryPolicyTestBase { - - public DefaultRetryPolicyTest() { - super(new DefaultRetryPolicy(null, null)); - } - - @Test - public void should_process_read_timeouts() { - assertOnReadTimeout(QUORUM, 2, 2, false, 0).hasDecision(RETRY_SAME); - assertOnReadTimeout(QUORUM, 2, 2, false, 1).hasDecision(RETHROW); - assertOnReadTimeout(QUORUM, 2, 2, true, 0).hasDecision(RETHROW); - assertOnReadTimeout(QUORUM, 2, 1, true, 0).hasDecision(RETHROW); - assertOnReadTimeout(QUORUM, 2, 1, false, 0).hasDecision(RETHROW); - } - - @Test - public void should_process_write_timeouts() { - assertOnWriteTimeout(QUORUM, BATCH_LOG, 2, 0, 0).hasDecision(RETRY_SAME); - assertOnWriteTimeout(QUORUM, BATCH_LOG, 2, 0, 1).hasDecision(RETHROW); - assertOnWriteTimeout(QUORUM, SIMPLE, 2, 0, 0).hasDecision(RETHROW); - } - - @Test - public void should_process_unavailable() { - assertOnUnavailable(QUORUM, 2, 1, 0).hasDecision(RETRY_NEXT); - assertOnUnavailable(QUORUM, 2, 1, 1).hasDecision(RETHROW); - } - - @Test - public void should_process_aborted_request() { - assertOnRequestAborted(ClosedConnectionException.class, 0).hasDecision(RETRY_NEXT); - assertOnRequestAborted(ClosedConnectionException.class, 1).hasDecision(RETRY_NEXT); - assertOnRequestAborted(HeartbeatException.class, 0).hasDecision(RETRY_NEXT); - assertOnRequestAborted(HeartbeatException.class, 1).hasDecision(RETRY_NEXT); - assertOnRequestAborted(Throwable.class, 0).hasDecision(RETHROW); - } - - @Test - public void should_process_error_response() { - assertOnErrorResponse(ReadFailureException.class, 0).hasDecision(RETHROW); - assertOnErrorResponse(ReadFailureException.class, 1).hasDecision(RETHROW); - assertOnErrorResponse(WriteFailureException.class, 0).hasDecision(RETHROW); - assertOnErrorResponse(WriteFailureException.class, 1).hasDecision(RETHROW); - assertOnErrorResponse(WriteFailureException.class, 1).hasDecision(RETHROW); - - assertOnErrorResponse(OverloadedException.class, 0).hasDecision(RETRY_NEXT); - assertOnErrorResponse(OverloadedException.class, 1).hasDecision(RETRY_NEXT); - assertOnErrorResponse(ServerError.class, 0).hasDecision(RETRY_NEXT); - assertOnErrorResponse(ServerError.class, 1).hasDecision(RETRY_NEXT); - assertOnErrorResponse(TruncateException.class, 0).hasDecision(RETRY_NEXT); - assertOnErrorResponse(TruncateException.class, 1).hasDecision(RETRY_NEXT); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/retry/RetryPolicyTestBase.java b/core/src/test/java/com/datastax/oss/driver/api/core/retry/RetryPolicyTestBase.java deleted file mode 100644 index a57f4ab352f..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/api/core/retry/RetryPolicyTestBase.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.retry; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; - -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.servererrors.CoordinatorException; -import com.datastax.oss.driver.api.core.servererrors.WriteType; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.internal.core.retry.ConsistencyDowngradingRetryVerdict; -import org.assertj.core.api.AbstractAssert; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public abstract class RetryPolicyTestBase { - private final RetryPolicy policy; - - @Mock private Request request; - - protected RetryPolicyTestBase(RetryPolicy policy) { - this.policy = policy; - } - - protected RetryVerdictAssert assertOnReadTimeout( - ConsistencyLevel cl, int blockFor, int received, boolean dataPresent, int retryCount) { - return new RetryVerdictAssert( - policy.onReadTimeoutVerdict(request, cl, blockFor, received, dataPresent, retryCount)); - } - - protected RetryVerdictAssert assertOnWriteTimeout( - ConsistencyLevel cl, WriteType writeType, int blockFor, int received, int retryCount) { - return new RetryVerdictAssert( - policy.onWriteTimeoutVerdict(request, cl, writeType, blockFor, received, retryCount)); - } - - protected RetryVerdictAssert assertOnUnavailable( - ConsistencyLevel cl, int required, int alive, int retryCount) { - return new RetryVerdictAssert( - policy.onUnavailableVerdict(request, cl, required, alive, retryCount)); - } - - protected RetryVerdictAssert assertOnRequestAborted( - Class errorClass, int retryCount) { - return new RetryVerdictAssert( - policy.onRequestAbortedVerdict(request, mock(errorClass), retryCount)); - } - - protected RetryVerdictAssert assertOnErrorResponse( - Class errorClass, int retryCount) { - return new RetryVerdictAssert( - policy.onErrorResponseVerdict(request, mock(errorClass), retryCount)); - } - - public static class RetryVerdictAssert extends AbstractAssert { - RetryVerdictAssert(RetryVerdict actual) { - super(actual, RetryVerdictAssert.class); - } - - public RetryVerdictAssert hasDecision(RetryDecision decision) { - assertThat(actual.getRetryDecision()).isEqualTo(decision); - return this; - } - - public RetryVerdictAssert hasConsistency(ConsistencyLevel cl) { - assertThat(actual) - .isInstanceOf(ConsistencyDowngradingRetryVerdict.class) - .extracting("consistencyLevel") - .isEqualTo(cl); - return this; - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/specex/ConstantSpeculativeExecutionPolicyTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/specex/ConstantSpeculativeExecutionPolicyTest.java deleted file mode 100644 index efd804fa66e..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/api/core/specex/ConstantSpeculativeExecutionPolicyTest.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.specex; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.internal.core.specex.ConstantSpeculativeExecutionPolicy; -import java.time.Duration; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class ConstantSpeculativeExecutionPolicyTest { - @Mock private DriverContext context; - @Mock private DriverConfig config; - @Mock private DriverExecutionProfile defaultProfile; - @Mock private Request request; - - @Before - public void setup() { - when(context.getConfig()).thenReturn(config); - when(config.getProfile(DriverExecutionProfile.DEFAULT_NAME)).thenReturn(defaultProfile); - } - - private void mockOptions(int maxExecutions, long constantDelayMillis) { - when(defaultProfile.getInt(DefaultDriverOption.SPECULATIVE_EXECUTION_MAX)) - .thenReturn(maxExecutions); - when(defaultProfile.getDuration(DefaultDriverOption.SPECULATIVE_EXECUTION_DELAY)) - .thenReturn(Duration.ofMillis(constantDelayMillis)); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_if_delay_negative() { - mockOptions(1, -10); - new ConstantSpeculativeExecutionPolicy(context, DriverExecutionProfile.DEFAULT_NAME); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_if_max_less_than_one() { - mockOptions(0, 10); - new ConstantSpeculativeExecutionPolicy(context, DriverExecutionProfile.DEFAULT_NAME); - } - - @Test - public void should_return_delay_until_max() { - mockOptions(3, 10); - SpeculativeExecutionPolicy policy = - new ConstantSpeculativeExecutionPolicy(context, DriverExecutionProfile.DEFAULT_NAME); - - // Initial execution starts, schedule first speculative execution - assertThat(policy.nextExecution(null, null, request, 1)).isEqualTo(10); - // First speculative execution starts, schedule second one - assertThat(policy.nextExecution(null, null, request, 2)).isEqualTo(10); - // Second speculative execution starts, we're at 3 => stop - assertThat(policy.nextExecution(null, null, request, 3)).isNegative(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/type/UserDefinedTypeTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/type/UserDefinedTypeTest.java deleted file mode 100644 index 9db93b37c91..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/api/core/type/UserDefinedTypeTest.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.internal.core.type.UserDefinedTypeBuilder; -import org.junit.Test; - -public class UserDefinedTypeTest { - - private static final UserDefinedType ADDRESS_TYPE = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("test"), CqlIdentifier.fromInternal("address")) - // Not actually used in this test, but UDTs must have fields: - .withField(CqlIdentifier.fromInternal("street"), DataTypes.TEXT) - .frozen() - .build(); - private static final UserDefinedType ACCOUNT_TYPE = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("test"), CqlIdentifier.fromInternal("account")) - .withField(CqlIdentifier.fromInternal("ID"), DataTypes.TEXT) // case-sensitive - .withField(CqlIdentifier.fromInternal("name"), DataTypes.TEXT) - .withField(CqlIdentifier.fromInternal("address"), ADDRESS_TYPE) - .withField( - CqlIdentifier.fromInternal("frozen_list"), DataTypes.frozenListOf(DataTypes.TEXT)) - .withField( - CqlIdentifier.fromInternal("list_of_map"), - DataTypes.listOf(DataTypes.frozenMapOf(DataTypes.TEXT, DataTypes.INT))) - .build(); - - @Test - public void should_describe_as_cql() { - assertThat(ACCOUNT_TYPE.describe(false)) - .isEqualTo( - "CREATE TYPE \"test\".\"account\" ( \"ID\" text, \"name\" text, \"address\" frozen<\"test\".\"address\">, \"frozen_list\" frozen>, \"list_of_map\" list>> );"); - } - - @Test - public void should_describe_as_pretty_cql() { - assertThat(ACCOUNT_TYPE.describe(true)) - .isEqualTo( - "CREATE TYPE test.account (\n" - + " \"ID\" text,\n" - + " name text,\n" - + " address frozen,\n" - + " frozen_list frozen>,\n" - + " list_of_map list>>\n" - + ");"); - } - - @Test - public void should_evaluate_equality() { - assertThat(ACCOUNT_TYPE.newValue()).isEqualTo(ACCOUNT_TYPE.newValue()); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/type/reflect/GenericTypeTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/type/reflect/GenericTypeTest.java deleted file mode 100644 index bddb8f92773..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/api/core/type/reflect/GenericTypeTest.java +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.type.reflect; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.shaded.guava.common.reflect.TypeToken; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import org.junit.Test; - -public class GenericTypeTest { - - @Test - public void should_wrap_class() { - GenericType stringType = GenericType.of(String.class); - assertThat(stringType.__getToken()).isEqualTo(TypeToken.of(String.class)); - } - - @Test - public void should_capture_generic_type() { - GenericType> stringListType = new GenericType>() {}; - TypeToken> stringListToken = new TypeToken>() {}; - assertThat(stringListType.__getToken()).isEqualTo(stringListToken); - } - - @Test - public void should_wrap_classes_in_collection() { - GenericType> mapType = GenericType.mapOf(String.class, Integer.class); - assertThat(mapType.__getToken()).isEqualTo(new TypeToken>() {}); - } - - @Test - public void should_wrap_types_in_collection() { - GenericType>> mapType = - GenericType.mapOf(GenericType.of(String.class), GenericType.listOf(Integer.class)); - assertThat(mapType.__getToken()).isEqualTo(new TypeToken>>() {}); - } - - @Test - public void should_substitute_type_parameters() { - assertThat(optionalOf(GenericType.listOf(String.class)).__getToken()) - .isEqualTo(new TypeToken>>() {}); - assertThat(mapOf(String.class, Integer.class).__getToken()) - .isEqualTo(new TypeToken>() {}); - } - - @Test - public void should_report_supertype() { - assertThat(GenericType.of(Number.class).isSupertypeOf(GenericType.of(Integer.class))).isTrue(); - assertThat(GenericType.of(Integer.class).isSupertypeOf(GenericType.of(Number.class))).isFalse(); - } - - @Test - public void should_report_subtype() { - assertThat(GenericType.of(Number.class).isSubtypeOf(GenericType.of(Integer.class))).isFalse(); - assertThat(GenericType.of(Integer.class).isSubtypeOf(GenericType.of(Number.class))).isTrue(); - } - - @Test - public void should_wrap_primitive_type() { - assertThat(GenericType.of(Integer.TYPE).wrap()).isEqualTo(GenericType.of(Integer.class)); - GenericType stringType = GenericType.of(String.class); - assertThat(stringType.wrap()).isSameAs(stringType); - } - - @Test - public void should_unwrap_wrapper_type() { - assertThat(GenericType.of(Integer.class).unwrap()).isEqualTo(GenericType.of(Integer.TYPE)); - GenericType stringType = GenericType.of(String.class); - assertThat(stringType.unwrap()).isSameAs(stringType); - } - - @Test - public void should_return_raw_type() { - assertThat(GenericType.INTEGER.getRawType()).isEqualTo(Integer.class); - assertThat(GenericType.listOf(Integer.class).getRawType()).isEqualTo(List.class); - } - - @Test - public void should_return_super_type() { - GenericType> expectedType = iterableOf(GenericType.INTEGER); - assertThat(GenericType.listOf(Integer.class).getSupertype(Iterable.class)) - .isEqualTo(expectedType); - } - - @Test - public void should_return_sub_type() { - GenericType> superType = iterableOf(GenericType.INTEGER); - assertThat(superType.getSubtype(List.class)).isEqualTo(GenericType.listOf(GenericType.INTEGER)); - } - - @Test - public void should_return_type() { - assertThat(GenericType.INTEGER.getType()).isEqualTo(Integer.class); - } - - @Test - public void should_return_component_type() { - assertThat(GenericType.of(Integer[].class).getComponentType()).isEqualTo(GenericType.INTEGER); - } - - @Test - public void should_report_is_array() { - assertThat(GenericType.INTEGER.isArray()).isFalse(); - assertThat(GenericType.of(Integer[].class).isArray()).isTrue(); - } - - private GenericType> optionalOf(GenericType elementType) { - return new GenericType>() {}.where(new GenericTypeParameter() {}, elementType); - } - - private GenericType> iterableOf(GenericType elementType) { - return new GenericType>() {}.where(new GenericTypeParameter() {}, elementType); - } - - private GenericType> mapOf(Class keyClass, Class valueClass) { - return new GenericType>() {}.where(new GenericTypeParameter() {}, keyClass) - .where(new GenericTypeParameter() {}, valueClass); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/uuid/UuidsTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/uuid/UuidsTest.java deleted file mode 100644 index c547f95e67c..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/api/core/uuid/UuidsTest.java +++ /dev/null @@ -1,496 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.uuid; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.assertj.core.api.Assertions.catchThrowable; - -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.nio.ByteBuffer; -import java.nio.charset.StandardCharsets; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; -import java.security.SecureRandom; -import java.util.Arrays; -import java.util.HashSet; -import java.util.Random; -import java.util.Set; -import java.util.SplittableRandom; -import java.util.UUID; -import java.util.concurrent.ConcurrentSkipListSet; -import java.util.concurrent.ThreadLocalRandom; -import java.util.function.Supplier; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class UuidsTest { - - @Test - public void should_generate_unique_random_uuids_Random() { - Set generated = serialGeneration(1_000_000, Uuids::random); - assertThat(generated).hasSize(1_000_000); - } - - @Test - public void should_generate_unique_random_uuids_shared_Random2() { - Random random = new Random(); - Set generated = serialGeneration(1_000_000, () -> Uuids.random(random)); - assertThat(generated).hasSize(1_000_000); - } - - @Test - public void should_generate_unique_random_uuids_across_threads_shared_Random() throws Exception { - Random random = new Random(); - Set generated = parallelGeneration(10, 10_000, () -> () -> Uuids.random(random)); - assertThat(generated).hasSize(10 * 10_000); - } - - @Test - public void should_generate_unique_random_uuids_shared_SecureRandom() { - SecureRandom random = new SecureRandom(); - Set generated = serialGeneration(1_000_000, () -> Uuids.random(random)); - assertThat(generated).hasSize(1_000_000); - } - - @Test - public void should_generate_unique_random_uuids_across_threads_shared_SecureRandom() - throws Exception { - SecureRandom random = new SecureRandom(); - Set generated = parallelGeneration(10, 10_000, () -> () -> Uuids.random(random)); - assertThat(generated).hasSize(10 * 10_000); - } - - @Test - public void should_generate_unique_random_uuids_ThreadLocalRandom() { - ThreadLocalRandom random = ThreadLocalRandom.current(); - Set generated = serialGeneration(1_000_000, () -> Uuids.random(random)); - assertThat(generated).hasSize(1_000_000); - } - - @Test - public void should_generate_unique_random_uuids_across_threads_ThreadLocalRandom() - throws Exception { - Set generated = - parallelGeneration( - 10, - 10_000, - () -> { - ThreadLocalRandom random = ThreadLocalRandom.current(); - return () -> Uuids.random(random); - }); - assertThat(generated).hasSize(10 * 10_000); - } - - @Test - public void should_generate_unique_random_uuids_Netty_ThreadLocalRandom() { - io.netty.util.internal.ThreadLocalRandom random = - io.netty.util.internal.ThreadLocalRandom.current(); - Set generated = serialGeneration(1_000_000, () -> Uuids.random(random)); - assertThat(generated).hasSize(1_000_000); - } - - @Test - public void should_generate_unique_random_uuids_across_threads_Netty_ThreadLocalRandom() - throws Exception { - Set generated = - parallelGeneration( - 10, - 10_000, - () -> { - io.netty.util.internal.ThreadLocalRandom random = - io.netty.util.internal.ThreadLocalRandom.current(); - return () -> Uuids.random(random); - }); - assertThat(generated).hasSize(10 * 10_000); - } - - @Test - public void should_generate_unique_random_uuids_SplittableRandom() { - SplittableRandom random = new SplittableRandom(); - Set generated = serialGeneration(1_000_000, () -> Uuids.random(random)); - assertThat(generated).hasSize(1_000_000); - } - - @Test - public void should_generate_unique_random_uuids_across_threads_SplittableRandom() - throws Exception { - Set generated = - parallelGeneration( - 10, - 10_000, - () -> { - SplittableRandom random = new SplittableRandom(); - return () -> Uuids.random(random); - }); - assertThat(generated).hasSize(10 * 10_000); - } - - @Test - @UseDataProvider("byteArrayNames") - public void should_generate_name_based_uuid_from_namespace_and_byte_array( - UUID namespace, byte[] name) throws NoSuchAlgorithmException { - // when - UUID actual = Uuids.nameBased(namespace, name); - // then - assertThat(actual).isNotNull(); - assertThat(actual.version()).isEqualTo(3); - assertUuid(namespace, name, 3, actual); - } - - @DataProvider - public static Object[][] byteArrayNames() { - return new Object[][] { - {Uuids.NAMESPACE_DNS, new byte[] {}}, {Uuids.NAMESPACE_URL, new byte[] {1, 2, 3, 4}}, - }; - } - - @Test - @UseDataProvider("byteArrayNamesWithVersions") - public void should_generate_name_based_uuid_from_namespace_byte_array_and_version( - UUID namespace, byte[] name, int version) throws NoSuchAlgorithmException { - // when - UUID actual = Uuids.nameBased(namespace, name, version); - // then - assertThat(actual).isNotNull(); - assertThat(actual.version()).isEqualTo(version); - assertUuid(namespace, name, version, actual); - } - - @DataProvider - public static Object[][] byteArrayNamesWithVersions() { - return new Object[][] { - {Uuids.NAMESPACE_DNS, new byte[] {}, 3}, - {Uuids.NAMESPACE_URL, new byte[] {1, 2, 3, 4}, 3}, - {Uuids.NAMESPACE_OID, new byte[] {}, 5}, - {Uuids.NAMESPACE_X500, new byte[] {1, 2, 3, 4}, 5}, - }; - } - - @Test - @UseDataProvider("stringNames") - public void should_generate_name_based_uuid_from_namespace_and_string(UUID namespace, String name) - throws NoSuchAlgorithmException { - // when - UUID actual = Uuids.nameBased(namespace, name); - // then - assertThat(actual).isNotNull(); - assertThat(actual.version()).isEqualTo(3); - assertUuid(namespace, name, 3, actual); - } - - @DataProvider - public static Object[][] stringNames() { - return new Object[][] { - {Uuids.NAMESPACE_DNS, ""}, {Uuids.NAMESPACE_URL, "Hello world!"}, {Uuids.NAMESPACE_OID, "你好"}, - }; - } - - @Test - @UseDataProvider("stringNamesWithVersions") - public void should_generate_name_based_uuid_from_namespace_string_and_version( - UUID namespace, String name, int version) throws NoSuchAlgorithmException { - // when - UUID actual = Uuids.nameBased(namespace, name, version); - // then - assertThat(actual).isNotNull(); - assertThat(actual.version()).isEqualTo(version); - assertUuid(namespace, name, version, actual); - } - - @DataProvider - public static Object[][] stringNamesWithVersions() { - return new Object[][] { - {Uuids.NAMESPACE_DNS, "", 3}, - {Uuids.NAMESPACE_URL, "Hello world!", 3}, - {Uuids.NAMESPACE_OID, "你好", 3}, - {Uuids.NAMESPACE_DNS, "", 5}, - {Uuids.NAMESPACE_URL, "Hello world!", 5}, - {Uuids.NAMESPACE_OID, "你好", 5}, - }; - } - - @Test - @UseDataProvider("concatenatedData") - public void should_generate_name_based_uuid_from_concatenated_data(byte[] namespaceAndName) - throws NoSuchAlgorithmException { - // when - UUID actual = Uuids.nameBased(namespaceAndName); - // then - assertThat(actual).isNotNull(); - assertThat(actual.version()).isEqualTo(3); - assertUuid(namespaceAndName, 3, actual); - } - - @DataProvider - public static Object[][] concatenatedData() { - return new Object[][] { - {concat(Uuids.NAMESPACE_DNS, new byte[] {})}, - {concat(Uuids.NAMESPACE_URL, new byte[] {1, 2, 3, 4})}, - }; - } - - @Test - @UseDataProvider("concatenatedDataWithVersions") - public void should_generate_name_based_uuid_from_concatenated_data_and_version( - byte[] namespaceAndName, int version) throws NoSuchAlgorithmException { - // when - UUID actual = Uuids.nameBased(namespaceAndName, version); - // then - assertThat(actual).isNotNull(); - assertThat(actual.version()).isEqualTo(version); - assertUuid(namespaceAndName, version, actual); - } - - @DataProvider - public static Object[][] concatenatedDataWithVersions() { - return new Object[][] { - {concat(Uuids.NAMESPACE_DNS, new byte[] {}), 3}, - {concat(Uuids.NAMESPACE_URL, new byte[] {1, 2, 3, 4}), 3}, - {concat(Uuids.NAMESPACE_DNS, new byte[] {}), 5}, - {concat(Uuids.NAMESPACE_URL, new byte[] {1, 2, 3, 4}), 5}, - }; - } - - @Test - public void should_throw_when_invalid_version() { - Throwable error = catchThrowable(() -> Uuids.nameBased(Uuids.NAMESPACE_URL, "irrelevant", 1)); - assertThat(error) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage("Invalid name-based UUID version, expecting 3 or 5, got: 1"); - } - - @Test - public void should_throw_when_invalid_data() { - Throwable error = catchThrowable(() -> Uuids.nameBased(new byte[] {1}, 3)); - assertThat(error) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage("namespaceAndName must be at least 16 bytes long"); - } - - @Test - public void should_generate_timestamp_within_10_ms() { - - // The Uuids class does some computation at class initialization, which may screw up our - // assumption below that Uuids.timeBased() takes less than 10ms, so force class loading now. - Uuids.timeBased(); - - long start = System.currentTimeMillis(); - UUID uuid = Uuids.timeBased(); - - assertThat(uuid.version()).isEqualTo(1); - assertThat(uuid.variant()).isEqualTo(2); - - long timestamp = Uuids.unixTimestamp(uuid); - - assertThat(timestamp) - .as("Generated timestamp should be within 10 ms") - .isBetween(start, start + 10); - } - - @Test - public void should_generate_unique_time_based_uuids() { - Set generated = serialGeneration(1_000_000, Uuids::timeBased); - assertThat(generated).hasSize(1_000_000); - } - - @Test - public void should_generate_unique_time_based_uuids_across_threads() throws Exception { - Set generated = parallelGeneration(10, 10_000, () -> Uuids::timeBased); - assertThat(generated).hasSize(10 * 10_000); - } - - @Test - public void should_generate_ever_increasing_timestamps() { - int count = 1_000_000; - long previous = 0; - for (int i = 0; i < count; i++) { - long current = Uuids.timeBased().timestamp(); - assertThat(current).isGreaterThan(previous); - previous = current; - } - } - - @Test - public void should_generate_within_bounds_for_given_timestamp() { - - Random random = new Random(System.currentTimeMillis()); - - int timestampsCount = 10; - int uuidsPerTimestamp = 10; - - for (int i = 0; i < timestampsCount; i++) { - long timestamp = random.nextInt(); - for (int j = 0; j < uuidsPerTimestamp; j++) { - UUID uuid = new UUID(Uuids.makeMsb(Uuids.fromUnixTimestamp(timestamp)), random.nextLong()); - assertBetween(uuid, Uuids.startOf(timestamp), Uuids.endOf(timestamp)); - } - } - } - - // Compares using Cassandra's sorting algorithm (not the same as compareTo). - private static void assertBetween(UUID uuid, UUID lowerBound, UUID upperBound) { - ByteBuffer uuidBytes = TypeCodecs.UUID.encode(uuid, DefaultProtocolVersion.V3); - ByteBuffer lb = TypeCodecs.UUID.encode(lowerBound, DefaultProtocolVersion.V3); - ByteBuffer ub = TypeCodecs.UUID.encode(upperBound, DefaultProtocolVersion.V3); - assertThat(uuidBytes).isNotNull(); - assertThat(lb).isNotNull(); - assertThat(ub).isNotNull(); - assertThat(compareTimestampBytes(lb, uuidBytes)).isLessThanOrEqualTo(0); - assertThat(compareTimestampBytes(ub, uuidBytes)).isGreaterThanOrEqualTo(0); - } - - private static int compareTimestampBytes(ByteBuffer o1, ByteBuffer o2) { - int o1Pos = o1.position(); - int o2Pos = o2.position(); - - int d = (o1.get(o1Pos + 6) & 0xF) - (o2.get(o2Pos + 6) & 0xF); - if (d != 0) { - return d; - } - d = (o1.get(o1Pos + 7) & 0xFF) - (o2.get(o2Pos + 7) & 0xFF); - if (d != 0) { - return d; - } - d = (o1.get(o1Pos + 4) & 0xFF) - (o2.get(o2Pos + 4) & 0xFF); - if (d != 0) { - return d; - } - d = (o1.get(o1Pos + 5) & 0xFF) - (o2.get(o2Pos + 5) & 0xFF); - if (d != 0) { - return d; - } - d = (o1.get(o1Pos) & 0xFF) - (o2.get(o2Pos) & 0xFF); - if (d != 0) { - return d; - } - d = (o1.get(o1Pos + 1) & 0xFF) - (o2.get(o2Pos + 1) & 0xFF); - if (d != 0) { - return d; - } - d = (o1.get(o1Pos + 2) & 0xFF) - (o2.get(o2Pos + 2) & 0xFF); - if (d != 0) { - return d; - } - return (o1.get(o1Pos + 3) & 0xFF) - (o2.get(o2Pos + 3) & 0xFF); - } - - private static void assertUuid(UUID namespace, String name, int version, UUID actual) - throws NoSuchAlgorithmException { - assertUuid(namespace, name.getBytes(StandardCharsets.UTF_8), version, actual); - } - - private static void assertUuid(UUID namespace, byte[] name, int version, UUID actual) - throws NoSuchAlgorithmException { - byte[] data = digest(namespace, name, version); - assertThat(longToBytes(actual.getMostSignificantBits())) - .isEqualTo(Arrays.copyOfRange(data, 0, 8)); - assertThat(longToBytes(actual.getLeastSignificantBits())) - .isEqualTo(Arrays.copyOfRange(data, 8, 16)); - } - - private static void assertUuid(byte[] namespaceAndName, int version, UUID actual) - throws NoSuchAlgorithmException { - byte[] data = digest(namespaceAndName, version); - assertThat(longToBytes(actual.getMostSignificantBits())) - .isEqualTo(Arrays.copyOfRange(data, 0, 8)); - assertThat(longToBytes(actual.getLeastSignificantBits())) - .isEqualTo(Arrays.copyOfRange(data, 8, 16)); - } - - private static byte[] digest(UUID namespace, byte[] name, int version) - throws NoSuchAlgorithmException { - byte[] namespaceAndName = concat(namespace, name); - return digest(namespaceAndName, version); - } - - private static byte[] digest(byte[] namespaceAndName, int version) - throws NoSuchAlgorithmException { - MessageDigest result; - String algorithm = version == 3 ? "MD5" : "SHA-1"; - result = MessageDigest.getInstance(algorithm); - byte[] digest = result.digest(namespaceAndName); - digest[6] &= (byte) 0x0f; - digest[6] |= (byte) (version << 4); - digest[8] &= (byte) 0x3f; - digest[8] |= (byte) 0x80; - return digest; - } - - private static byte[] concat(UUID namespace, byte[] name) { - return ByteBuffer.allocate(16 + name.length) - .putLong(namespace.getMostSignificantBits()) - .putLong(namespace.getLeastSignificantBits()) - .put(name) - .array(); - } - - private static byte[] longToBytes(long x) { - return ByteBuffer.allocate(Long.BYTES).putLong(x).array(); - } - - private Set serialGeneration(int count, Supplier uuidSupplier) { - Set generated = new HashSet<>(count); - for (int i = 0; i < count; ++i) { - generated.add(uuidSupplier.get()); - } - return generated; - } - - public Set parallelGeneration( - int threadCount, int uuidsPerThread, Supplier> uuidSupplier) - throws InterruptedException { - Set generated = new ConcurrentSkipListSet<>(); - UuidGenerator[] generators = new UuidGenerator[threadCount]; - for (int i = 0; i < threadCount; i++) { - generators[i] = new UuidGenerator(uuidsPerThread, uuidSupplier, generated); - } - for (int i = 0; i < threadCount; i++) { - generators[i].start(); - } - for (int i = 0; i < threadCount; i++) { - generators[i].join(); - } - return generated; - } - - private static class UuidGenerator extends Thread { - - private final int toGenerate; - private final Set generated; - private final Supplier> uuidSupplier; - - UuidGenerator(int toGenerate, Supplier> uuidSupplier, Set generated) { - this.toGenerate = toGenerate; - this.generated = generated; - this.uuidSupplier = uuidSupplier; - } - - @Override - public void run() { - Supplier uuidSupplier = this.uuidSupplier.get(); - for (int i = 0; i < toGenerate; ++i) { - generated.add(uuidSupplier.get()); - } - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/SerializationHelper.java b/core/src/test/java/com/datastax/oss/driver/internal/SerializationHelper.java deleted file mode 100644 index 4daf7e28eb6..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/SerializationHelper.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal; - -import static org.assertj.core.api.Assertions.fail; - -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.ObjectInputStream; -import java.io.ObjectOutputStream; - -public abstract class SerializationHelper { - - public static byte[] serialize(T t) { - try { - ByteArrayOutputStream bytes = new ByteArrayOutputStream(); - ObjectOutputStream out = new ObjectOutputStream(bytes); - out.writeObject(t); - return bytes.toByteArray(); - } catch (Exception e) { - fail("Unexpected error", e); - throw new AssertionError(); // never reached - } - } - - // the calling code performs validations on the result, so this doesn't matter - @SuppressWarnings("TypeParameterUnusedInFormals") - public static T deserialize(byte[] bytes) { - try { - ObjectInputStream in = new ObjectInputStream(new ByteArrayInputStream(bytes)); - @SuppressWarnings("unchecked") - T t = (T) in.readObject(); - return t; - } catch (Exception e) { - fail("Unexpected error", e); - throw new AssertionError(); // never reached - } - } - - public static T serializeAndDeserialize(T t) { - return deserialize(serialize(t)); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/AsyncPagingIterableWrapperTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/AsyncPagingIterableWrapperTest.java deleted file mode 100644 index dff9877b62d..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/AsyncPagingIterableWrapperTest.java +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.MappedAsyncPagingIterable; -import com.datastax.oss.driver.api.core.cql.ColumnDefinition; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.cql.DefaultAsyncResultSet; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import java.nio.ByteBuffer; -import java.util.ArrayDeque; -import java.util.List; -import java.util.Queue; -import java.util.concurrent.CompletableFuture; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -public class AsyncPagingIterableWrapperTest { - - @Mock private ColumnDefinitions columnDefinitions; - @Mock private Statement statement; - @Mock private CqlSession session; - @Mock private InternalDriverContext context; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - - // One single column "i" of type int: - when(columnDefinitions.contains("i")).thenReturn(true); - ColumnDefinition iDefinition = mock(ColumnDefinition.class); - when(iDefinition.getType()).thenReturn(DataTypes.INT); - when(columnDefinitions.get("i")).thenReturn(iDefinition); - when(columnDefinitions.firstIndexOf("i")).thenReturn(0); - when(columnDefinitions.get(0)).thenReturn(iDefinition); - - when(context.getCodecRegistry()).thenReturn(CodecRegistry.DEFAULT); - when(context.getProtocolVersion()).thenReturn(DefaultProtocolVersion.DEFAULT); - } - - @Test - public void should_wrap_result_set() throws Exception { - // Given - // two pages of data: - ExecutionInfo executionInfo1 = mockExecutionInfo(); - DefaultAsyncResultSet resultSet1 = - new DefaultAsyncResultSet( - columnDefinitions, executionInfo1, mockData(0, 5), session, context); - DefaultAsyncResultSet resultSet2 = - new DefaultAsyncResultSet( - columnDefinitions, mockExecutionInfo(), mockData(5, 10), session, context); - // chain them together: - ByteBuffer mockPagingState = ByteBuffer.allocate(0); - when(executionInfo1.getPagingState()).thenReturn(mockPagingState); - Statement mockNextStatement = mock(Statement.class); - when(((Statement) statement).copy(mockPagingState)).thenReturn(mockNextStatement); - when(session.executeAsync(mockNextStatement)) - .thenAnswer(invocation -> CompletableFuture.completedFuture(resultSet2)); - - // When - MappedAsyncPagingIterable iterable1 = resultSet1.map(row -> row.getInt("i")); - - // Then - for (int i = 0; i < 5; i++) { - assertThat(iterable1.one()).isEqualTo(i); - assertThat(iterable1.remaining()).isEqualTo(resultSet1.remaining()).isEqualTo(4 - i); - } - assertThat(iterable1.hasMorePages()).isTrue(); - - MappedAsyncPagingIterable iterable2 = - iterable1.fetchNextPage().toCompletableFuture().get(); - for (int i = 5; i < 10; i++) { - assertThat(iterable2.one()).isEqualTo(i); - assertThat(iterable2.remaining()).isEqualTo(resultSet2.remaining()).isEqualTo(9 - i); - } - assertThat(iterable2.hasMorePages()).isFalse(); - } - - /** Checks that consuming from the wrapper consumes from the source, and vice-versa. */ - @Test - public void should_share_iteration_progress_with_wrapped_result_set() { - // Given - DefaultAsyncResultSet resultSet = - new DefaultAsyncResultSet( - columnDefinitions, mockExecutionInfo(), mockData(0, 10), session, context); - - // When - MappedAsyncPagingIterable iterable = resultSet.map(row -> row.getInt("i")); - - // Then - // Consume alternatively from the source and mapped iterable, and check that they stay in sync - for (int i = 0; i < 10; i++) { - Object element = (i % 2 == 0 ? resultSet : iterable).one(); - assertThat(element).isNotNull(); - assertThat(iterable.remaining()).isEqualTo(resultSet.remaining()).isEqualTo(9 - i); - } - assertThat(resultSet.hasMorePages()).isFalse(); - assertThat(iterable.hasMorePages()).isFalse(); - } - - private ExecutionInfo mockExecutionInfo() { - ExecutionInfo executionInfo = mock(ExecutionInfo.class); - when(executionInfo.getRequest()).thenAnswer(invocation -> statement); - return executionInfo; - } - - private Queue> mockData(int start, int end) { - Queue> data = new ArrayDeque<>(); - for (int i = start; i < end; i++) { - data.add(Lists.newArrayList(TypeCodecs.INT.encode(i, DefaultProtocolVersion.DEFAULT))); - } - return data; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/CompletionStageAssert.java b/core/src/test/java/com/datastax/oss/driver/internal/core/CompletionStageAssert.java deleted file mode 100644 index 6c0d78d62dd..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/CompletionStageAssert.java +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; - -import java.util.concurrent.CancellationException; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import java.util.function.Consumer; -import org.assertj.core.api.AbstractAssert; - -public class CompletionStageAssert - extends AbstractAssert, CompletionStage> { - - public CompletionStageAssert(CompletionStage actual) { - super(actual, CompletionStageAssert.class); - } - - public CompletionStageAssert isSuccess(Consumer valueAssertions) { - try { - V value = actual.toCompletableFuture().get(2, TimeUnit.SECONDS); - valueAssertions.accept(value); - } catch (TimeoutException e) { - fail("Future did not complete within the timeout"); - } catch (Throwable t) { - fail("Unexpected error while waiting on the future", t); - } - return this; - } - - public CompletionStageAssert isSuccess() { - return isSuccess(v -> {}); - } - - public CompletionStageAssert isFailed(Consumer failureAssertions) { - try { - actual.toCompletableFuture().get(2, TimeUnit.SECONDS); - fail("Expected completion stage to fail"); - } catch (TimeoutException e) { - fail("Future did not complete within the timeout"); - } catch (InterruptedException e) { - fail("Interrupted while waiting for future to fail"); - } catch (ExecutionException e) { - failureAssertions.accept(e.getCause()); - } - return this; - } - - public CompletionStageAssert isFailed() { - return isFailed(f -> {}); - } - - public CompletionStageAssert isCancelled() { - boolean cancelled = false; - try { - actual.toCompletableFuture().get(2, TimeUnit.SECONDS); - } catch (CancellationException e) { - cancelled = true; - } catch (Exception ignored) { - } - if (!cancelled) { - fail("Expected completion stage to be cancelled"); - } - return this; - } - - public CompletionStageAssert isNotCancelled() { - boolean cancelled = false; - try { - actual.toCompletableFuture().get(2, TimeUnit.SECONDS); - } catch (CancellationException e) { - cancelled = true; - } catch (Exception ignored) { - } - if (cancelled) { - fail("Expected completion stage not to be cancelled"); - } - return this; - } - - public CompletionStageAssert isDone() { - assertThat(actual.toCompletableFuture().isDone()) - .overridingErrorMessage("Expected completion stage to be done") - .isTrue(); - return this; - } - - public CompletionStageAssert isNotDone() { - assertThat(actual.toCompletableFuture().isDone()) - .overridingErrorMessage("Expected completion stage not to be done") - .isFalse(); - return this; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/ContactPointsTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/ContactPointsTest.java deleted file mode 100644 index 72b875b8602..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/ContactPointsTest.java +++ /dev/null @@ -1,183 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.filter; -import static org.junit.Assume.assumeTrue; -import static org.mockito.Mockito.atLeast; -import static org.mockito.Mockito.verify; - -import ch.qos.logback.classic.Level; -import ch.qos.logback.classic.Logger; -import ch.qos.logback.classic.spi.ILoggingEvent; -import ch.qos.logback.core.Appender; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.internal.core.metadata.DefaultEndPoint; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.net.UnknownHostException; -import java.util.Collections; -import java.util.Set; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.ArgumentCaptor; -import org.mockito.Captor; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; -import org.slf4j.LoggerFactory; - -@RunWith(MockitoJUnitRunner.class) -public class ContactPointsTest { - - @Mock private Appender appender; - @Captor private ArgumentCaptor loggingEventCaptor; - private Logger logger; - private Level initialLogLevel; - - @Before - public void setup() { - logger = (Logger) LoggerFactory.getLogger(ContactPoints.class); - initialLogLevel = logger.getLevel(); - logger.setLevel(Level.INFO); - logger.addAppender(appender); - } - - @After - public void teardown() { - logger.detachAppender(appender); - logger.setLevel(initialLogLevel); - } - - @Test - public void should_parse_ipv4_address_and_port_in_configuration() { - Set endPoints = - ContactPoints.merge(Collections.emptySet(), ImmutableList.of("127.0.0.1:9042"), true); - - assertThat(endPoints) - .containsExactly(new DefaultEndPoint(new InetSocketAddress("127.0.0.1", 9042))); - } - - @Test - public void should_parse_ipv6_address_and_port_in_configuration() { - Set endPoints = - ContactPoints.merge( - Collections.emptySet(), ImmutableList.of("0:0:0:0:0:0:0:1:9042", "::2:9042"), true); - - assertThat(endPoints) - .containsExactly( - new DefaultEndPoint(new InetSocketAddress("::1", 9042)), - new DefaultEndPoint(new InetSocketAddress("::2", 9042))); - } - - @Test - public void should_parse_host_and_port_in_configuration_and_create_unresolved() { - Set endPoints = - ContactPoints.merge(Collections.emptySet(), ImmutableList.of("localhost:9042"), false); - - assertThat(endPoints) - .containsExactly( - new DefaultEndPoint(InetSocketAddress.createUnresolved("localhost", 9042))); - } - - @Test - public void should_parse_host_and_port_and_resolve_all_a_records() throws UnknownHostException { - int localhostARecordsCount = InetAddress.getAllByName("localhost").length; - assumeTrue( - "This test assumes that localhost resolves to multiple A-records", - localhostARecordsCount >= 2); - - Set endPoints = - ContactPoints.merge(Collections.emptySet(), ImmutableList.of("localhost:9042"), true); - - assertThat(endPoints).hasSize(localhostARecordsCount); - assertLog( - Level.INFO, - "Contact point localhost:9042 resolves to multiple addresses, will use them all"); - } - - @Test - public void should_ignore_malformed_host_and_port_and_warn() { - Set endPoints = - ContactPoints.merge(Collections.emptySet(), ImmutableList.of("foobar"), true); - - assertThat(endPoints).isEmpty(); - assertLog(Level.WARN, "Ignoring invalid contact point foobar (expecting format host:port)"); - } - - @Test - public void should_ignore_malformed_port_and_warn() { - Set endPoints = - ContactPoints.merge(Collections.emptySet(), ImmutableList.of("127.0.0.1:foobar"), true); - - assertThat(endPoints).isEmpty(); - assertLog( - Level.WARN, - "Ignoring invalid contact point 127.0.0.1:foobar (expecting port to be a number, got foobar)"); - } - - @Test - public void should_merge_programmatic_and_configuration() { - Set endPoints = - ContactPoints.merge( - ImmutableSet.of(new DefaultEndPoint(new InetSocketAddress("127.0.0.1", 9042))), - ImmutableList.of("127.0.0.2:9042"), - true); - - assertThat(endPoints) - .containsOnly( - new DefaultEndPoint(new InetSocketAddress("127.0.0.1", 9042)), - new DefaultEndPoint(new InetSocketAddress("127.0.0.2", 9042))); - } - - @Test - public void should_warn_if_duplicate_between_programmatic_and_configuration() { - Set endPoints = - ContactPoints.merge( - ImmutableSet.of(new DefaultEndPoint(new InetSocketAddress("127.0.0.1", 9042))), - ImmutableList.of("127.0.0.1:9042"), - true); - - assertThat(endPoints) - .containsOnly(new DefaultEndPoint(new InetSocketAddress("127.0.0.1", 9042))); - assertLog(Level.WARN, "Duplicate contact point /127.0.0.1:9042"); - } - - @Test - public void should_warn_if_duplicate_in_configuration() { - Set endPoints = - ContactPoints.merge( - Collections.emptySet(), ImmutableList.of("127.0.0.1:9042", "127.0.0.1:9042"), true); - - assertThat(endPoints) - .containsOnly(new DefaultEndPoint(new InetSocketAddress("127.0.0.1", 9042))); - assertLog(Level.WARN, "Duplicate contact point /127.0.0.1:9042"); - } - - private void assertLog(Level level, String message) { - verify(appender, atLeast(1)).doAppend(loggingEventCaptor.capture()); - Iterable logs = - filter(loggingEventCaptor.getAllValues()).with("level", level).get(); - assertThat(logs).hasSize(1); - assertThat(logs.iterator().next().getFormattedMessage()).contains(message); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/DefaultProtocolVersionRegistryTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/DefaultProtocolVersionRegistryTest.java deleted file mode 100644 index 1d7cc65d1f2..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/DefaultProtocolVersionRegistryTest.java +++ /dev/null @@ -1,195 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core; - -import static com.datastax.dse.driver.api.core.DseProtocolVersion.DSE_V1; -import static com.datastax.dse.driver.api.core.DseProtocolVersion.DSE_V2; -import static com.datastax.oss.driver.api.core.ProtocolVersion.V3; -import static com.datastax.oss.driver.api.core.ProtocolVersion.V4; -import static com.datastax.oss.driver.api.core.ProtocolVersion.V5; -import static com.datastax.oss.driver.api.core.ProtocolVersion.V6; -import static com.datastax.oss.driver.internal.core.DefaultProtocolFeature.DATE_TYPE; -import static com.datastax.oss.driver.internal.core.DefaultProtocolFeature.SMALLINT_AND_TINYINT_TYPES; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; - -import com.datastax.dse.driver.api.core.DseProtocolVersion; -import com.datastax.dse.driver.api.core.metadata.DseNodeProperties; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.UnsupportedProtocolVersionException; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import java.util.Optional; -import org.junit.Test; -import org.mockito.Mockito; - -/** - * Note: some tests in this class depend on the set of supported protocol versions, they will need - * to be updated as new versions are added or become non-beta. - */ -public class DefaultProtocolVersionRegistryTest { - - private DefaultProtocolVersionRegistry registry = new DefaultProtocolVersionRegistry("test"); - - @Test - public void should_find_version_by_name() { - assertThat(registry.fromName("V4")).isEqualTo(ProtocolVersion.V4); - assertThat(registry.fromName("DSE_V1")).isEqualTo(DseProtocolVersion.DSE_V1); - } - - @Test - public void should_fail_to_find_version_by_name_different_case() { - assertThatThrownBy(() -> registry.fromName("v4")).isInstanceOf(IllegalArgumentException.class); - assertThatThrownBy(() -> registry.fromName("dse_v1")) - .isInstanceOf(IllegalArgumentException.class); - assertThatThrownBy(() -> registry.fromName("dDSE_v1")) - .isInstanceOf(IllegalArgumentException.class); - assertThatThrownBy(() -> registry.fromName("dse_v1")) - .isInstanceOf(IllegalArgumentException.class); - } - - @Test - public void should_downgrade_if_lower_version_available() { - Optional downgraded = registry.downgrade(V4); - downgraded.map(version -> assertThat(version).isEqualTo(V3)).orElseThrow(AssertionError::new); - } - - @Test - public void should_not_downgrade_if_no_lower_version() { - Optional downgraded = registry.downgrade(V3); - assertThat(downgraded.isPresent()).isFalse(); - } - - @Test - public void should_downgrade_from_dse_to_oss() { - assertThat(registry.downgrade(DseProtocolVersion.DSE_V1).get()).isEqualTo(ProtocolVersion.V5); - } - - @Test - public void should_pick_dse_v2_as_highest_common_when_all_nodes_are_dse_7() { - assertThat(registry.highestCommon(ImmutableList.of(mockDseNode("7.0"), mockDseNode("7.1")))) - .isEqualTo(DseProtocolVersion.DSE_V2); - } - - @Test - public void should_pick_dse_v2_as_highest_common_when_all_nodes_are_dse_6() { - assertThat(registry.highestCommon(ImmutableList.of(mockDseNode("6.0"), mockDseNode("6.1")))) - .isEqualTo(DseProtocolVersion.DSE_V2); - } - - @Test - public void should_pick_dse_v1_as_highest_common_when_all_nodes_are_dse_5_1_or_more() { - assertThat(registry.highestCommon(ImmutableList.of(mockDseNode("5.1"), mockDseNode("6.1")))) - .isEqualTo(DseProtocolVersion.DSE_V1); - } - - @Test - public void should_pick_oss_v4_as_highest_common_when_all_nodes_are_dse_5_or_more() { - assertThat( - registry.highestCommon( - ImmutableList.of(mockDseNode("5.0"), mockDseNode("5.1"), mockDseNode("6.1")))) - .isEqualTo(ProtocolVersion.V4); - } - - @Test - public void should_pick_oss_v3_as_highest_common_when_all_nodes_are_dse_4_7_or_more() { - assertThat( - registry.highestCommon( - ImmutableList.of(mockDseNode("4.7"), mockDseNode("5.1"), mockDseNode("6.1")))) - .isEqualTo(ProtocolVersion.V3); - } - - @Test(expected = UnsupportedProtocolVersionException.class) - public void should_fail_to_pick_highest_common_when_one_node_is_dse_4_6() { - registry.highestCommon( - ImmutableList.of(mockDseNode("4.6"), mockDseNode("5.1"), mockDseNode("6.1"))); - } - - @Test(expected = UnsupportedProtocolVersionException.class) - public void should_fail_to_pick_highest_common_when_one_node_is_2_0() { - registry.highestCommon( - ImmutableList.of(mockCassandraNode("3.0.0"), mockCassandraNode("2.0.9"))); - } - - @Test - public void should_pick_oss_v3_as_highest_common_when_one_node_is_cassandra_2_1() { - assertThat( - registry.highestCommon( - ImmutableList.of( - mockDseNode("5.1"), // oss v4 - mockDseNode("6.1"), // oss v4 - mockCassandraNode("2.1") // oss v3 - ))) - .isEqualTo(ProtocolVersion.V3); - } - - @Test - public void should_support_date_type_on_oss_v4_and_later() { - assertThat(registry.supports(V3, DATE_TYPE)).isFalse(); - assertThat(registry.supports(V4, DATE_TYPE)).isTrue(); - assertThat(registry.supports(V5, DATE_TYPE)).isTrue(); - assertThat(registry.supports(V6, DATE_TYPE)).isTrue(); - assertThat(registry.supports(DSE_V1, DATE_TYPE)).isTrue(); - assertThat(registry.supports(DSE_V2, DATE_TYPE)).isTrue(); - } - - @Test - public void should_support_smallint_and_tinyint_types_on_oss_v4_and_later() { - assertThat(registry.supports(V3, SMALLINT_AND_TINYINT_TYPES)).isFalse(); - assertThat(registry.supports(V4, SMALLINT_AND_TINYINT_TYPES)).isTrue(); - assertThat(registry.supports(V5, SMALLINT_AND_TINYINT_TYPES)).isTrue(); - assertThat(registry.supports(V6, SMALLINT_AND_TINYINT_TYPES)).isTrue(); - assertThat(registry.supports(DSE_V1, SMALLINT_AND_TINYINT_TYPES)).isTrue(); - assertThat(registry.supports(DSE_V2, SMALLINT_AND_TINYINT_TYPES)).isTrue(); - } - - private Node mockCassandraNode(String rawVersion) { - Node node = Mockito.mock(Node.class); - if (rawVersion != null) { - Mockito.when(node.getCassandraVersion()).thenReturn(Version.parse(rawVersion)); - } - return node; - } - - private Node mockDseNode(String rawDseVersion) { - Node node = Mockito.mock(Node.class); - Version dseVersion = Version.parse(rawDseVersion); - Mockito.when(node.getExtras()) - .thenReturn(ImmutableMap.of(DseNodeProperties.DSE_VERSION, dseVersion)); - - Version cassandraVersion; - if (dseVersion.compareTo(DefaultProtocolVersionRegistry.DSE_7_0_0) >= 0) { - cassandraVersion = Version.parse("5.0"); - } else if (dseVersion.compareTo(DefaultProtocolVersionRegistry.DSE_6_0_0) >= 0) { - cassandraVersion = Version.parse("4.0"); - } else if (dseVersion.compareTo(DefaultProtocolVersionRegistry.DSE_5_1_0) >= 0) { - cassandraVersion = Version.parse("3.11"); - } else if (dseVersion.compareTo(DefaultProtocolVersionRegistry.DSE_5_0_0) >= 0) { - cassandraVersion = Version.parse("3.0"); - } else if (dseVersion.compareTo(DefaultProtocolVersionRegistry.DSE_4_7_0) >= 0) { - cassandraVersion = Version.parse("2.1"); - } else { - cassandraVersion = Version.parse("2.0"); - } - Mockito.when(node.getCassandraVersion()).thenReturn(cassandraVersion); - - return node; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/DriverConfigAssert.java b/core/src/test/java/com/datastax/oss/driver/internal/core/DriverConfigAssert.java deleted file mode 100644 index adbe26159db..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/DriverConfigAssert.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverOption; -import org.assertj.core.api.AbstractAssert; - -public class DriverConfigAssert extends AbstractAssert { - public DriverConfigAssert(DriverConfig actual) { - super(actual, DriverConfigAssert.class); - } - - public DriverConfigAssert hasIntOption(DriverOption option, int expected) { - assertThat(actual.getDefaultProfile().getInt(option)).isEqualTo(expected); - return this; - } - - public DriverConfigAssert hasIntOption(String profileName, DriverOption option, int expected) { - assertThat(actual.getProfile(profileName).getInt(option)).isEqualTo(expected); - return this; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/MockAsyncPagingIterable.java b/core/src/test/java/com/datastax/oss/driver/internal/core/MockAsyncPagingIterable.java deleted file mode 100644 index 731c558a81f..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/MockAsyncPagingIterable.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core; - -import com.datastax.oss.driver.api.core.AsyncPagingIterable; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.ArrayDeque; -import java.util.Collections; -import java.util.List; -import java.util.Queue; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; - -public class MockAsyncPagingIterable - implements AsyncPagingIterable> { - - private final Queue currentPage; - private final MockAsyncPagingIterable nextPage; - - public MockAsyncPagingIterable(List elements, int fetchSize, boolean addEmptyLastPage) { - if (elements.size() <= fetchSize) { - currentPage = new ArrayDeque<>(elements); - nextPage = - addEmptyLastPage - ? new MockAsyncPagingIterable<>(Collections.emptyList(), fetchSize, false) - : null; - } else { - currentPage = new ArrayDeque<>(elements.subList(0, fetchSize)); - nextPage = - new MockAsyncPagingIterable<>( - elements.subList(fetchSize, elements.size()), fetchSize, addEmptyLastPage); - } - } - - @NonNull - @Override - public Iterable currentPage() { - return currentPage; - } - - @Override - public int remaining() { - return currentPage.size(); - } - - @Override - public boolean hasMorePages() { - return nextPage != null; - } - - @NonNull - @Override - public CompletionStage> fetchNextPage() - throws IllegalStateException { - Preconditions.checkState(nextPage != null); - return CompletableFuture.completedFuture(nextPage); - } - - @NonNull - @Override - public ColumnDefinitions getColumnDefinitions() { - throw new UnsupportedOperationException("irrelevant"); - } - - @NonNull - @Override - public ExecutionInfo getExecutionInfo() { - throw new UnsupportedOperationException("irrelevant"); - } - - @Override - public boolean wasApplied() { - throw new UnsupportedOperationException("irrelevant"); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/MockPagingIterable.java b/core/src/test/java/com/datastax/oss/driver/internal/core/MockPagingIterable.java deleted file mode 100644 index 885983ee98e..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/MockPagingIterable.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core; - -import com.datastax.oss.driver.api.core.PagingIterable; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Iterator; -import java.util.List; - -public class MockPagingIterable implements PagingIterable { - - private final Iterator iterator; - - public MockPagingIterable(Iterator iterator) { - this.iterator = iterator; - } - - @NonNull - @Override - public Iterator iterator() { - return iterator; - } - - @Override - public boolean isFullyFetched() { - return !iterator.hasNext(); - } - - @NonNull - @Override - public ColumnDefinitions getColumnDefinitions() { - throw new UnsupportedOperationException("irrelevant"); - } - - @NonNull - @Override - public List getExecutionInfos() { - throw new UnsupportedOperationException("irrelevant"); - } - - @Override - public int getAvailableWithoutFetching() { - throw new UnsupportedOperationException("irrelevant"); - } - - @Override - public boolean wasApplied() { - throw new UnsupportedOperationException("irrelevant"); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/NettyFutureAssert.java b/core/src/test/java/com/datastax/oss/driver/internal/core/NettyFutureAssert.java deleted file mode 100644 index 15af3c61bff..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/NettyFutureAssert.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; - -import io.netty.util.concurrent.Future; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import java.util.function.Consumer; -import org.assertj.core.api.AbstractAssert; - -public class NettyFutureAssert extends AbstractAssert, Future> { - - public NettyFutureAssert(Future actual) { - super(actual, NettyFutureAssert.class); - } - - public NettyFutureAssert isNotDone() { - assertThat(actual.isDone()).isFalse(); - return this; - } - - public NettyFutureAssert isSuccess(Consumer valueAssertions) { - try { - V value = actual.get(100, TimeUnit.MILLISECONDS); - valueAssertions.accept(value); - } catch (TimeoutException e) { - fail("Future did not complete within the timeout"); - } catch (Throwable t) { - fail("Unexpected error while waiting on the future", t); - } - return this; - } - - public NettyFutureAssert isSuccess() { - return isSuccess(v -> {}); - } - - public NettyFutureAssert isFailed(Consumer failureAssertions) { - try { - actual.get(100, TimeUnit.MILLISECONDS); - fail("Expected future to fail"); - } catch (TimeoutException e) { - fail("Future did not fail within the timeout"); - } catch (InterruptedException e) { - fail("Interrupted while waiting for future to fail"); - } catch (ExecutionException e) { - failureAssertions.accept(e.getCause()); - } - return this; - } - - public NettyFutureAssert isFailed() { - return isFailed(f -> {}); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/PagingIterableWrapperTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/PagingIterableWrapperTest.java deleted file mode 100644 index 1e7cc62f8ac..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/PagingIterableWrapperTest.java +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.PagingIterable; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.internal.core.cql.ResultSetTestBase; -import com.datastax.oss.driver.internal.core.cql.ResultSets; -import java.util.Iterator; -import org.junit.Test; - -public class PagingIterableWrapperTest extends ResultSetTestBase { - - @Test - public void should_wrap_result_set() { - // Given - AsyncResultSet page1 = mockPage(true, 0, 1, 2); - AsyncResultSet page2 = mockPage(true, 3, 4, 5); - AsyncResultSet page3 = mockPage(false, 6, 7, 8); - - complete(page1.fetchNextPage(), page2); - complete(page2.fetchNextPage(), page3); - - // When - PagingIterable iterable = ResultSets.newInstance(page1).map(row -> row.getInt(0)); - - // Then - assertThat(iterable.getExecutionInfo()).isSameAs(page1.getExecutionInfo()); - assertThat(iterable.getExecutionInfos()).containsExactly(page1.getExecutionInfo()); - - Iterator iterator = iterable.iterator(); - - assertThat(iterator.next()).isEqualTo(0); - assertThat(iterator.next()).isEqualTo(1); - assertThat(iterator.next()).isEqualTo(2); - - assertThat(iterator.hasNext()).isTrue(); - // This should have triggered the fetch of page2 - assertThat(iterable.getExecutionInfo()).isEqualTo(page2.getExecutionInfo()); - assertThat(iterable.getExecutionInfos()) - .containsExactly(page1.getExecutionInfo(), page2.getExecutionInfo()); - - assertThat(iterator.next()).isEqualTo(3); - assertThat(iterator.next()).isEqualTo(4); - assertThat(iterator.next()).isEqualTo(5); - - assertThat(iterator.hasNext()).isTrue(); - // This should have triggered the fetch of page3 - assertThat(iterable.getExecutionInfo()).isEqualTo(page3.getExecutionInfo()); - assertThat(iterable.getExecutionInfos()) - .containsExactly( - page1.getExecutionInfo(), page2.getExecutionInfo(), page3.getExecutionInfo()); - - assertThat(iterator.next()).isEqualTo(6); - assertThat(iterator.next()).isEqualTo(7); - assertThat(iterator.next()).isEqualTo(8); - } - - /** Checks that consuming from the wrapper consumes from the source, and vice-versa. */ - @Test - public void should_share_iteration_progress_with_wrapped_result_set() { - // Given - AsyncResultSet page1 = mockPage(true, 0, 1, 2); - AsyncResultSet page2 = mockPage(true, 3, 4, 5); - AsyncResultSet page3 = mockPage(false, 6, 7, 8); - - complete(page1.fetchNextPage(), page2); - complete(page2.fetchNextPage(), page3); - - // When - ResultSet resultSet = ResultSets.newInstance(page1); - PagingIterable iterable = resultSet.map(row -> row.getInt(0)); - - // Then - Iterator sourceIterator = resultSet.iterator(); - Iterator mappedIterator = iterable.iterator(); - - assertThat(mappedIterator.next()).isEqualTo(0); - assertNextRow(sourceIterator, 1); - assertThat(mappedIterator.next()).isEqualTo(2); - assertNextRow(sourceIterator, 3); - assertThat(mappedIterator.next()).isEqualTo(4); - assertNextRow(sourceIterator, 5); - assertThat(mappedIterator.next()).isEqualTo(6); - assertNextRow(sourceIterator, 7); - assertThat(mappedIterator.next()).isEqualTo(8); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/TestResponses.java b/core/src/test/java/com/datastax/oss/driver/internal/core/TestResponses.java deleted file mode 100644 index ce028e66dbd..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/TestResponses.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core; - -import com.datastax.oss.driver.shaded.guava.common.base.Charsets; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.response.Supported; -import com.datastax.oss.protocol.internal.response.result.ColumnSpec; -import com.datastax.oss.protocol.internal.response.result.DefaultRows; -import com.datastax.oss.protocol.internal.response.result.RawType; -import com.datastax.oss.protocol.internal.response.result.Rows; -import com.datastax.oss.protocol.internal.response.result.RowsMetadata; -import java.nio.ByteBuffer; -import java.util.List; -import java.util.Map; -import java.util.Queue; - -public class TestResponses { - /** The response to the query run by each connection to check if the cluster name matches. */ - public static Rows clusterNameResponse(String actualClusterName) { - ColumnSpec colSpec = - new ColumnSpec( - "system", - "local", - "cluster_name", - 0, - RawType.PRIMITIVES.get(ProtocolConstants.DataType.VARCHAR)); - RowsMetadata metadata = new RowsMetadata(ImmutableList.of(colSpec), null, null, null); - Queue> data = Lists.newLinkedList(); - data.add(Lists.newArrayList(ByteBuffer.wrap(actualClusterName.getBytes(Charsets.UTF_8)))); - return new DefaultRows(metadata, data); - } - - public static Supported supportedResponse(String key, String value) { - Map> options = ImmutableMap.of(key, ImmutableList.of(value)); - return new Supported(options); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/addresstranslation/Ec2MultiRegionAddressTranslatorTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/addresstranslation/Ec2MultiRegionAddressTranslatorTest.java deleted file mode 100644 index 2b871b3e0cc..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/addresstranslation/Ec2MultiRegionAddressTranslatorTest.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.addresstranslation; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.any; -import static org.mockito.Mockito.anyString; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import java.net.InetAddress; -import java.net.InetSocketAddress; -import javax.naming.NamingException; -import javax.naming.directory.BasicAttributes; -import javax.naming.directory.InitialDirContext; -import org.junit.Test; - -public class Ec2MultiRegionAddressTranslatorTest { - - @Test - public void should_return_same_address_when_no_entry_found() throws Exception { - InitialDirContext mock = mock(InitialDirContext.class); - when(mock.getAttributes(anyString(), any(String[].class))).thenReturn(new BasicAttributes()); - Ec2MultiRegionAddressTranslator translator = new Ec2MultiRegionAddressTranslator(mock); - - InetSocketAddress address = new InetSocketAddress("192.0.2.5", 9042); - assertThat(translator.translate(address)).isEqualTo(address); - } - - @Test - public void should_return_same_address_when_exception_encountered() throws Exception { - InitialDirContext mock = mock(InitialDirContext.class); - when(mock.getAttributes(anyString(), any(String[].class))) - .thenThrow(new NamingException("Problem resolving address (not really).")); - Ec2MultiRegionAddressTranslator translator = new Ec2MultiRegionAddressTranslator(mock); - - InetSocketAddress address = new InetSocketAddress("192.0.2.5", 9042); - assertThat(translator.translate(address)).isEqualTo(address); - } - - @Test - public void should_return_new_address_when_match_found() throws Exception { - InetSocketAddress expectedAddress = new InetSocketAddress("54.32.55.66", 9042); - - InitialDirContext mock = mock(InitialDirContext.class); - when(mock.getAttributes("5.2.0.192.in-addr.arpa", new String[] {"PTR"})) - .thenReturn(new BasicAttributes("PTR", expectedAddress.getHostName())); - Ec2MultiRegionAddressTranslator translator = new Ec2MultiRegionAddressTranslator(mock); - - InetSocketAddress address = new InetSocketAddress("192.0.2.5", 9042); - assertThat(translator.translate(address)).isEqualTo(expectedAddress); - } - - @Test - public void should_close_context_when_closed() throws Exception { - InitialDirContext mock = mock(InitialDirContext.class); - Ec2MultiRegionAddressTranslator translator = new Ec2MultiRegionAddressTranslator(mock); - - // ensure close has not been called to this point. - verify(mock, times(0)).close(); - translator.close(); - // ensure close is closed. - verify(mock).close(); - } - - @Test - public void should_build_reversed_domain_name_for_ip_v4() throws Exception { - InetAddress address = InetAddress.getByName("192.0.2.5"); - assertThat(Ec2MultiRegionAddressTranslator.reverse(address)) - .isEqualTo("5.2.0.192.in-addr.arpa"); - } - - @Test - public void should_build_reversed_domain_name_for_ip_v6() throws Exception { - InetAddress address = InetAddress.getByName("2001:db8::567:89ab"); - assertThat(Ec2MultiRegionAddressTranslator.reverse(address)) - .isEqualTo("b.a.9.8.7.6.5.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa"); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/addresstranslation/FixedHostNameAddressTranslatorTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/addresstranslation/FixedHostNameAddressTranslatorTest.java deleted file mode 100644 index 3bb9c4bc291..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/addresstranslation/FixedHostNameAddressTranslatorTest.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.addresstranslation; - -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.ADDRESS_TRANSLATOR_ADVERTISED_HOSTNAME; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import com.datastax.oss.driver.internal.core.context.MockedDriverContextFactory; -import java.net.InetSocketAddress; -import org.junit.Test; - -public class FixedHostNameAddressTranslatorTest { - - @Test - public void should_translate_address() { - DriverExecutionProfile defaultProfile = mock(DriverExecutionProfile.class); - when(defaultProfile.getString(ADDRESS_TRANSLATOR_ADVERTISED_HOSTNAME)).thenReturn("myaddress"); - DefaultDriverContext defaultDriverContext = - MockedDriverContextFactory.defaultDriverContext(defaultProfile); - - FixedHostNameAddressTranslator translator = - new FixedHostNameAddressTranslator(defaultDriverContext); - InetSocketAddress address = new InetSocketAddress("192.0.2.5", 6061); - - assertThat(translator.translate(address)).isEqualTo(new InetSocketAddress("myaddress", 6061)); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/addresstranslation/SubnetAddressTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/addresstranslation/SubnetAddressTest.java deleted file mode 100644 index bd505f5dd44..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/addresstranslation/SubnetAddressTest.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.addresstranslation; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; - -import java.net.InetSocketAddress; -import org.junit.Test; - -public class SubnetAddressTest { - @Test - public void should_return_return_true_on_overlapping_with_another_subnet_address() { - SubnetAddress subnetAddress1 = - new SubnetAddress("100.64.0.0/15", mock(InetSocketAddress.class)); - SubnetAddress subnetAddress2 = - new SubnetAddress("100.65.0.0/16", mock(InetSocketAddress.class)); - assertThat(subnetAddress1.isOverlapping(subnetAddress2)).isTrue(); - } - - @Test - public void should_return_return_false_on_not_overlapping_with_another_subnet_address() { - SubnetAddress subnetAddress1 = - new SubnetAddress("100.64.0.0/15", mock(InetSocketAddress.class)); - SubnetAddress subnetAddress2 = - new SubnetAddress("100.66.0.0/15", mock(InetSocketAddress.class)); - assertThat(subnetAddress1.isOverlapping(subnetAddress2)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/addresstranslation/SubnetAddressTranslatorTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/addresstranslation/SubnetAddressTranslatorTest.java deleted file mode 100644 index 420170654dc..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/addresstranslation/SubnetAddressTranslatorTest.java +++ /dev/null @@ -1,152 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.addresstranslation; - -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.ADDRESS_TRANSLATOR_DEFAULT_ADDRESS; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.ADDRESS_TRANSLATOR_SUBNET_ADDRESSES; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatIllegalArgumentException; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import com.datastax.oss.driver.internal.core.context.MockedDriverContextFactory; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import java.net.InetSocketAddress; -import java.util.Map; -import org.junit.Test; - -@SuppressWarnings("resource") -public class SubnetAddressTranslatorTest { - - @Test - public void should_translate_to_correct_subnet_address_ipv4() { - Map subnetAddresses = - ImmutableMap.of( - "\"100.64.0.0/15\"", "cassandra.datacenter1.com:19042", - "100.66.0.\"0/15\"", "cassandra.datacenter2.com:19042"); - DefaultDriverContext context = context(subnetAddresses); - SubnetAddressTranslator translator = new SubnetAddressTranslator(context); - InetSocketAddress address = new InetSocketAddress("100.64.0.1", 9042); - assertThat(translator.translate(address)) - .isEqualTo(InetSocketAddress.createUnresolved("cassandra.datacenter1.com", 19042)); - } - - @Test - public void should_translate_to_correct_subnet_address_ipv6() { - Map subnetAddresses = - ImmutableMap.of( - "\"::ffff:6440:0/111\"", "cassandra.datacenter1.com:19042", - "\"::ffff:6442:0/111\"", "cassandra.datacenter2.com:19042"); - DefaultDriverContext context = context(subnetAddresses); - SubnetAddressTranslator translator = new SubnetAddressTranslator(context); - InetSocketAddress address = new InetSocketAddress("::ffff:6440:1", 9042); - assertThat(translator.translate(address)) - .isEqualTo(InetSocketAddress.createUnresolved("cassandra.datacenter1.com", 19042)); - } - - @Test - public void should_translate_to_default_address() { - DefaultDriverContext context = context(ImmutableMap.of()); - when(context - .getConfig() - .getDefaultProfile() - .getString(ADDRESS_TRANSLATOR_DEFAULT_ADDRESS, null)) - .thenReturn("cassandra.com:19042"); - SubnetAddressTranslator translator = new SubnetAddressTranslator(context); - InetSocketAddress address = new InetSocketAddress("100.68.0.1", 9042); - assertThat(translator.translate(address)) - .isEqualTo(InetSocketAddress.createUnresolved("cassandra.com", 19042)); - } - - @Test - public void should_pass_through_not_matched_address() { - DefaultDriverContext context = context(ImmutableMap.of()); - SubnetAddressTranslator translator = new SubnetAddressTranslator(context); - InetSocketAddress address = new InetSocketAddress("100.68.0.1", 9042); - assertThat(translator.translate(address)).isEqualTo(address); - } - - @Test - public void should_fail_on_intersecting_subnets_ipv4() { - Map subnetAddresses = - ImmutableMap.of( - "\"100.64.0.0/15\"", "cassandra.datacenter1.com:19042", - "100.65.0.\"0/16\"", "cassandra.datacenter2.com:19042"); - DefaultDriverContext context = context(subnetAddresses); - assertThatIllegalArgumentException() - .isThrownBy(() -> new SubnetAddressTranslator(context)) - .withMessage( - "Configured subnets are overlapping: " - + String.format( - "SubnetAddress[subnet=[100, 64, 0, 0], address=%s], ", - InetSocketAddress.createUnresolved("cassandra.datacenter1.com", 19042)) - + String.format( - "SubnetAddress[subnet=[100, 65, 0, 0], address=%s]", - InetSocketAddress.createUnresolved("cassandra.datacenter2.com", 19042))); - } - - @Test - public void should_fail_on_intersecting_subnets_ipv6() { - Map subnetAddresses = - ImmutableMap.of( - "\"::ffff:6440:0/111\"", "cassandra.datacenter1.com:19042", - "\"::ffff:6441:0/112\"", "cassandra.datacenter2.com:19042"); - DefaultDriverContext context = context(subnetAddresses); - assertThatIllegalArgumentException() - .isThrownBy(() -> new SubnetAddressTranslator(context)) - .withMessage( - "Configured subnets are overlapping: " - + String.format( - "SubnetAddress[subnet=[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 100, 64, 0, 0], address=%s], ", - InetSocketAddress.createUnresolved("cassandra.datacenter1.com", 19042)) - + String.format( - "SubnetAddress[subnet=[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 100, 65, 0, 0], address=%s]", - InetSocketAddress.createUnresolved("cassandra.datacenter2.com", 19042))); - } - - @Test - public void should_fail_on_subnet_address_without_port() { - Map subnetAddresses = - ImmutableMap.of("\"100.64.0.0/15\"", "cassandra.datacenter1.com"); - DefaultDriverContext context = context(subnetAddresses); - assertThatIllegalArgumentException() - .isThrownBy(() -> new SubnetAddressTranslator(context)) - .withMessage("Invalid address cassandra.datacenter1.com (expecting format host:port)"); - } - - @Test - public void should_fail_on_default_address_without_port() { - DefaultDriverContext context = context(ImmutableMap.of()); - when(context - .getConfig() - .getDefaultProfile() - .getString(ADDRESS_TRANSLATOR_DEFAULT_ADDRESS, null)) - .thenReturn("cassandra.com"); - assertThatIllegalArgumentException() - .isThrownBy(() -> new SubnetAddressTranslator(context)) - .withMessage("Invalid address cassandra.com (expecting format host:port)"); - } - - private static DefaultDriverContext context(Map subnetAddresses) { - DriverExecutionProfile profile = mock(DriverExecutionProfile.class); - when(profile.getStringMap(ADDRESS_TRANSLATOR_SUBNET_ADDRESSES)).thenReturn(subnetAddresses); - return MockedDriverContextFactory.defaultDriverContext(profile); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/addresstranslation/SubnetTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/addresstranslation/SubnetTest.java deleted file mode 100644 index f8ba8929e9e..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/addresstranslation/SubnetTest.java +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.addresstranslation; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatIllegalArgumentException; -import static org.assertj.core.api.Assertions.assertThatNoException; - -import java.net.UnknownHostException; -import org.junit.Test; - -public class SubnetTest { - @Test - public void should_parse_to_correct_ipv4_subnet() throws UnknownHostException { - Subnet subnet = Subnet.parse("100.64.0.0/15"); - assertThat(subnet.getSubnet()).containsExactly(100, 64, 0, 0); - assertThat(subnet.getNetworkMask()).containsExactly(255, 254, 0, 0); - assertThat(subnet.getUpper()).containsExactly(100, 65, 255, 255); - assertThat(subnet.getLower()).containsExactly(100, 64, 0, 0); - } - - @Test - public void should_parse_to_correct_ipv6_subnet() throws UnknownHostException { - Subnet subnet = Subnet.parse("2001:db8:85a3::8a2e:370:0/111"); - assertThat(subnet.getSubnet()) - .containsExactly(32, 1, 13, 184, 133, 163, 0, 0, 0, 0, 138, 46, 3, 112, 0, 0); - assertThat(subnet.getNetworkMask()) - .containsExactly( - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 254, 0, 0); - assertThat(subnet.getUpper()) - .containsExactly(32, 1, 13, 184, 133, 163, 0, 0, 0, 0, 138, 46, 3, 113, 255, 255); - assertThat(subnet.getLower()) - .containsExactly(32, 1, 13, 184, 133, 163, 0, 0, 0, 0, 138, 46, 3, 112, 0, 0); - } - - @Test - public void should_parse_to_correct_ipv6_subnet_ipv4_convertible() throws UnknownHostException { - Subnet subnet = Subnet.parse("::ffff:6440:0/111"); - assertThat(subnet.getSubnet()) - .containsExactly(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 100, 64, 0, 0); - assertThat(subnet.getNetworkMask()) - .containsExactly( - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 254, 0, 0); - assertThat(subnet.getUpper()) - .containsExactly(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 100, 65, 255, 255); - assertThat(subnet.getLower()) - .containsExactly(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 100, 64, 0, 0); - } - - @Test - public void should_fail_on_invalid_cidr_format() { - assertThatIllegalArgumentException() - .isThrownBy(() -> Subnet.parse("invalid")) - .withMessage("Invalid subnet: invalid"); - } - - @Test - public void should_parse_bounding_prefix_lengths_correctly() { - assertThatNoException().isThrownBy(() -> Subnet.parse("0.0.0.0/0")); - assertThatNoException().isThrownBy(() -> Subnet.parse("100.64.0.0/32")); - } - - @Test - public void should_fail_on_invalid_prefix_length() { - assertThatIllegalArgumentException() - .isThrownBy(() -> Subnet.parse("100.64.0.0/-1")) - .withMessage("Prefix length -1 must be within [0; 32]"); - assertThatIllegalArgumentException() - .isThrownBy(() -> Subnet.parse("100.64.0.0/33")) - .withMessage("Prefix length 33 must be within [0; 32]"); - } - - @Test - public void should_fail_on_not_prefix_block_subnet_ipv4() { - assertThatIllegalArgumentException() - .isThrownBy(() -> Subnet.parse("100.65.0.0/15")) - .withMessage("Subnet 100.65.0.0/15 must be represented as a network prefix block"); - } - - @Test - public void should_fail_on_not_prefix_block_subnet_ipv6() { - assertThatIllegalArgumentException() - .isThrownBy(() -> Subnet.parse("::ffff:6441:0/111")) - .withMessage("Subnet ::ffff:6441:0/111 must be represented as a network prefix block"); - } - - @Test - public void should_return_true_on_containing_address() throws UnknownHostException { - Subnet subnet = Subnet.parse("100.64.0.0/15"); - assertThat(subnet.contains(new byte[] {100, 64, 0, 0})).isTrue(); - assertThat(subnet.contains(new byte[] {100, 65, (byte) 255, (byte) 255})).isTrue(); - assertThat(subnet.contains(new byte[] {100, 65, 100, 100})).isTrue(); - } - - @Test - public void should_return_false_on_not_containing_address() throws UnknownHostException { - Subnet subnet = Subnet.parse("100.64.0.0/15"); - assertThat(subnet.contains(new byte[] {100, 63, (byte) 255, (byte) 255})).isFalse(); - assertThat(subnet.contains(new byte[] {100, 66, 0, 0})).isFalse(); - // IPv6 cannot be contained by IPv4 subnet. - assertThat(subnet.contains(new byte[16])).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelFactoryAvailableIdsTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelFactoryAvailableIdsTest.java deleted file mode 100644 index a1eab41b998..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelFactoryAvailableIdsTest.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.timeout; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.internal.core.metrics.NoopNodeMetricUpdater; -import com.datastax.oss.protocol.internal.Frame; -import com.datastax.oss.protocol.internal.request.Query; -import com.datastax.oss.protocol.internal.response.result.Void; -import io.netty.util.concurrent.Future; -import java.util.concurrent.CompletionStage; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; - -public class ChannelFactoryAvailableIdsTest extends ChannelFactoryTestBase { - - @Mock private ResponseCallback responseCallback; - - @Before - @Override - public void setup() throws InterruptedException { - super.setup(); - when(defaultProfile.isDefined(DefaultDriverOption.PROTOCOL_VERSION)).thenReturn(true); - when(defaultProfile.getString(DefaultDriverOption.PROTOCOL_VERSION)).thenReturn("V4"); - when(protocolVersionRegistry.fromName("V4")).thenReturn(DefaultProtocolVersion.V4); - - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_MAX_REQUESTS)).thenReturn(128); - - when(responseCallback.isLastResponse(any(Frame.class))).thenReturn(true); - } - - @Test - public void should_report_available_ids() { - // Given - ChannelFactory factory = newChannelFactory(); - - // When - CompletionStage channelFuture = - factory.connect( - SERVER_ADDRESS, DriverChannelOptions.builder().build(), NoopNodeMetricUpdater.INSTANCE); - completeSimpleChannelInit(); - - // Then - assertThatStage(channelFuture) - .isSuccess( - channel -> { - assertThat(channel.getAvailableIds()).isEqualTo(128); - - // Write a request, should decrease the count - assertThat(channel.preAcquireId()).isTrue(); - Future writeFuture = - channel.write(new Query("test"), false, Frame.NO_PAYLOAD, responseCallback); - assertThat(writeFuture) - .isSuccess( - v -> { - assertThat(channel.getAvailableIds()).isEqualTo(127); - - // Complete the request, should increase again - writeInboundFrame(readOutboundFrame(), Void.INSTANCE); - verify(responseCallback, timeout(500)).onResponse(any(Frame.class)); - assertThat(channel.getAvailableIds()).isEqualTo(128); - }); - }); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelFactoryClusterNameTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelFactoryClusterNameTest.java deleted file mode 100644 index d9793247c9c..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelFactoryClusterNameTest.java +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.internal.core.TestResponses; -import com.datastax.oss.driver.internal.core.metrics.NoopNodeMetricUpdater; -import com.datastax.oss.protocol.internal.response.Ready; -import java.util.concurrent.CompletionStage; -import org.junit.Test; - -public class ChannelFactoryClusterNameTest extends ChannelFactoryTestBase { - - @Test - public void should_set_cluster_name_from_first_connection() { - // Given - when(defaultProfile.isDefined(DefaultDriverOption.PROTOCOL_VERSION)).thenReturn(false); - when(protocolVersionRegistry.highestNonBeta()).thenReturn(DefaultProtocolVersion.V4); - ChannelFactory factory = newChannelFactory(); - - // When - CompletionStage channelFuture = - factory.connect( - SERVER_ADDRESS, DriverChannelOptions.DEFAULT, NoopNodeMetricUpdater.INSTANCE); - - writeInboundFrame( - readOutboundFrame(), TestResponses.supportedResponse("mock_key", "mock_value")); - writeInboundFrame(readOutboundFrame(), new Ready()); - writeInboundFrame(readOutboundFrame(), TestResponses.clusterNameResponse("mockClusterName")); - - // Then - assertThatStage(channelFuture).isSuccess(); - assertThat(factory.getClusterName()).isEqualTo("mockClusterName"); - } - - @Test - public void should_check_cluster_name_for_next_connections() throws Throwable { - // Given - when(defaultProfile.isDefined(DefaultDriverOption.PROTOCOL_VERSION)).thenReturn(false); - when(protocolVersionRegistry.highestNonBeta()).thenReturn(DefaultProtocolVersion.V4); - ChannelFactory factory = newChannelFactory(); - - // When - CompletionStage channelFuture = - factory.connect( - SERVER_ADDRESS, DriverChannelOptions.DEFAULT, NoopNodeMetricUpdater.INSTANCE); - // open a first connection that will define the cluster name - writeInboundFrame( - readOutboundFrame(), TestResponses.supportedResponse("mock_key", "mock_value")); - writeInboundFrame(readOutboundFrame(), new Ready()); - writeInboundFrame(readOutboundFrame(), TestResponses.clusterNameResponse("mockClusterName")); - assertThatStage(channelFuture).isSuccess(); - // open a second connection that returns the same cluster name - channelFuture = - factory.connect( - SERVER_ADDRESS, DriverChannelOptions.DEFAULT, NoopNodeMetricUpdater.INSTANCE); - writeInboundFrame(readOutboundFrame(), new Ready()); - writeInboundFrame(readOutboundFrame(), TestResponses.clusterNameResponse("mockClusterName")); - - // Then - assertThatStage(channelFuture).isSuccess(); - - // When - // open a third connection that returns a different cluster name - channelFuture = - factory.connect( - SERVER_ADDRESS, DriverChannelOptions.DEFAULT, NoopNodeMetricUpdater.INSTANCE); - writeInboundFrame(readOutboundFrame(), new Ready()); - writeInboundFrame(readOutboundFrame(), TestResponses.clusterNameResponse("wrongClusterName")); - - // Then - assertThatStage(channelFuture) - .isFailed( - e -> - assertThat(e) - .isInstanceOf(ClusterNameMismatchException.class) - .hasMessageContaining( - "reports cluster name 'wrongClusterName' that doesn't match " - + "our cluster name 'mockClusterName'.")); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelFactoryProtocolNegotiationTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelFactoryProtocolNegotiationTest.java deleted file mode 100644 index b9738a140c0..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelFactoryProtocolNegotiationTest.java +++ /dev/null @@ -1,271 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.UnsupportedProtocolVersionException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.internal.core.TestResponses; -import com.datastax.oss.driver.internal.core.metrics.NoopNodeMetricUpdater; -import com.datastax.oss.protocol.internal.Frame; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.request.Options; -import com.datastax.oss.protocol.internal.response.Error; -import com.datastax.oss.protocol.internal.response.Ready; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.Optional; -import java.util.concurrent.CompletionStage; -import org.junit.Test; - -public class ChannelFactoryProtocolNegotiationTest extends ChannelFactoryTestBase { - - @Test - public void should_succeed_if_version_specified_and_supported_by_server() { - // Given - when(defaultProfile.isDefined(DefaultDriverOption.PROTOCOL_VERSION)).thenReturn(true); - when(defaultProfile.getString(DefaultDriverOption.PROTOCOL_VERSION)).thenReturn("V4"); - when(protocolVersionRegistry.fromName("V4")).thenReturn(DefaultProtocolVersion.V4); - ChannelFactory factory = newChannelFactory(); - - // When - CompletionStage channelFuture = - factory.connect( - SERVER_ADDRESS, DriverChannelOptions.DEFAULT, NoopNodeMetricUpdater.INSTANCE); - - completeSimpleChannelInit(); - - // Then - assertThatStage(channelFuture) - .isSuccess(channel -> assertThat(channel.getClusterName()).isEqualTo("mockClusterName")); - assertThat(factory.protocolVersion).isEqualTo(DefaultProtocolVersion.V4); - } - - @Test - @UseDataProvider("unsupportedProtocolCodes") - public void should_fail_if_version_specified_and_not_supported_by_server(int errorCode) { - // Given - when(defaultProfile.isDefined(DefaultDriverOption.PROTOCOL_VERSION)).thenReturn(true); - when(defaultProfile.getString(DefaultDriverOption.PROTOCOL_VERSION)).thenReturn("V4"); - when(protocolVersionRegistry.fromName("V4")).thenReturn(DefaultProtocolVersion.V4); - ChannelFactory factory = newChannelFactory(); - - // When - CompletionStage channelFuture = - factory.connect( - SERVER_ADDRESS, DriverChannelOptions.DEFAULT, NoopNodeMetricUpdater.INSTANCE); - - Frame requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Options.class); - writeInboundFrame(requestFrame, TestResponses.supportedResponse("mock_key", "mock_value")); - - requestFrame = readOutboundFrame(); - assertThat(requestFrame.protocolVersion).isEqualTo(DefaultProtocolVersion.V4.getCode()); - // Server does not support v4 - writeInboundFrame( - requestFrame, new Error(errorCode, "Invalid or unsupported protocol version")); - - // Then - assertThatStage(channelFuture) - .isFailed( - e -> { - assertThat(e) - .isInstanceOf(UnsupportedProtocolVersionException.class) - .hasMessageContaining("Host does not support protocol version V4"); - assertThat(((UnsupportedProtocolVersionException) e).getAttemptedVersions()) - .containsExactly(DefaultProtocolVersion.V4); - }); - } - - @Test - public void should_fail_if_version_specified_and_considered_beta_by_server() { - // Given - when(defaultProfile.isDefined(DefaultDriverOption.PROTOCOL_VERSION)).thenReturn(true); - when(defaultProfile.getString(DefaultDriverOption.PROTOCOL_VERSION)).thenReturn("V5"); - when(protocolVersionRegistry.fromName("V5")).thenReturn(DefaultProtocolVersion.V5); - ChannelFactory factory = newChannelFactory(); - - // When - CompletionStage channelFuture = - factory.connect( - SERVER_ADDRESS, DriverChannelOptions.DEFAULT, NoopNodeMetricUpdater.INSTANCE); - - Frame requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Options.class); - writeInboundFrame(requestFrame, TestResponses.supportedResponse("mock_key", "mock_value")); - - requestFrame = readOutboundFrame(); - assertThat(requestFrame.protocolVersion).isEqualTo(DefaultProtocolVersion.V5.getCode()); - // Server considers v5 beta, e.g. C* 3.10 or 3.11 - writeInboundFrame( - requestFrame, - new Error( - ProtocolConstants.ErrorCode.PROTOCOL_ERROR, - "Beta version of the protocol used (5/v5-beta), but USE_BETA flag is unset")); - - // Then - assertThatStage(channelFuture) - .isFailed( - e -> { - assertThat(e) - .isInstanceOf(UnsupportedProtocolVersionException.class) - .hasMessageContaining("Host does not support protocol version V5"); - assertThat(((UnsupportedProtocolVersionException) e).getAttemptedVersions()) - .containsExactly(DefaultProtocolVersion.V5); - }); - } - - @Test - public void should_succeed_if_version_not_specified_and_server_supports_latest_supported() { - // Given - when(defaultProfile.isDefined(DefaultDriverOption.PROTOCOL_VERSION)).thenReturn(false); - when(protocolVersionRegistry.highestNonBeta()).thenReturn(DefaultProtocolVersion.V4); - ChannelFactory factory = newChannelFactory(); - - // When - CompletionStage channelFuture = - factory.connect( - SERVER_ADDRESS, DriverChannelOptions.DEFAULT, NoopNodeMetricUpdater.INSTANCE); - - Frame requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Options.class); - writeInboundFrame(requestFrame, TestResponses.supportedResponse("mock_key", "mock_value")); - - requestFrame = readOutboundFrame(); - assertThat(requestFrame.protocolVersion).isEqualTo(DefaultProtocolVersion.V4.getCode()); - writeInboundFrame(requestFrame, new Ready()); - - requestFrame = readOutboundFrame(); - writeInboundFrame(requestFrame, TestResponses.clusterNameResponse("mockClusterName")); - - // Then - assertThatStage(channelFuture) - .isSuccess(channel -> assertThat(channel.getClusterName()).isEqualTo("mockClusterName")); - assertThat(factory.protocolVersion).isEqualTo(DefaultProtocolVersion.V4); - } - - @Test - @UseDataProvider("unsupportedProtocolCodes") - public void should_negotiate_if_version_not_specified_and_server_supports_legacy(int errorCode) { - // Given - when(defaultProfile.isDefined(DefaultDriverOption.PROTOCOL_VERSION)).thenReturn(false); - when(protocolVersionRegistry.highestNonBeta()).thenReturn(DefaultProtocolVersion.V4); - when(protocolVersionRegistry.downgrade(DefaultProtocolVersion.V4)) - .thenReturn(Optional.of(DefaultProtocolVersion.V3)); - ChannelFactory factory = newChannelFactory(); - - // When - CompletionStage channelFuture = - factory.connect( - SERVER_ADDRESS, DriverChannelOptions.DEFAULT, NoopNodeMetricUpdater.INSTANCE); - - Frame requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Options.class); - writeInboundFrame(requestFrame, TestResponses.supportedResponse("mock_key", "mock_value")); - - requestFrame = readOutboundFrame(); - assertThat(requestFrame.protocolVersion).isEqualTo(DefaultProtocolVersion.V4.getCode()); - // Server does not support v4 - writeInboundFrame( - requestFrame, new Error(errorCode, "Invalid or unsupported protocol version")); - - // Then - // Factory should initialize a new connection, that retries with the lower version - requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Options.class); - writeInboundFrame(requestFrame, TestResponses.supportedResponse("mock_key", "mock_value")); - - requestFrame = readOutboundFrame(); - assertThat(requestFrame.protocolVersion).isEqualTo(DefaultProtocolVersion.V3.getCode()); - writeInboundFrame(requestFrame, new Ready()); - - requestFrame = readOutboundFrame(); - writeInboundFrame(requestFrame, TestResponses.clusterNameResponse("mockClusterName")); - assertThatStage(channelFuture) - .isSuccess(channel -> assertThat(channel.getClusterName()).isEqualTo("mockClusterName")); - assertThat(factory.protocolVersion).isEqualTo(DefaultProtocolVersion.V3); - } - - @Test - @UseDataProvider("unsupportedProtocolCodes") - public void should_fail_if_negotiation_finds_no_matching_version(int errorCode) { - // Given - when(defaultProfile.isDefined(DefaultDriverOption.PROTOCOL_VERSION)).thenReturn(false); - when(protocolVersionRegistry.highestNonBeta()).thenReturn(DefaultProtocolVersion.V4); - when(protocolVersionRegistry.downgrade(DefaultProtocolVersion.V4)) - .thenReturn(Optional.of(DefaultProtocolVersion.V3)); - when(protocolVersionRegistry.downgrade(DefaultProtocolVersion.V3)).thenReturn(Optional.empty()); - ChannelFactory factory = newChannelFactory(); - - // When - CompletionStage channelFuture = - factory.connect( - SERVER_ADDRESS, DriverChannelOptions.DEFAULT, NoopNodeMetricUpdater.INSTANCE); - - Frame requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Options.class); - writeInboundFrame(requestFrame, TestResponses.supportedResponse("mock_key", "mock_value")); - - requestFrame = readOutboundFrame(); - assertThat(requestFrame.protocolVersion).isEqualTo(DefaultProtocolVersion.V4.getCode()); - // Server does not support v4 - writeInboundFrame( - requestFrame, new Error(errorCode, "Invalid or unsupported protocol version")); - - // Client retries with v3 - requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Options.class); - writeInboundFrame(requestFrame, TestResponses.supportedResponse("mock_key", "mock_value")); - - requestFrame = readOutboundFrame(); - assertThat(requestFrame.protocolVersion).isEqualTo(DefaultProtocolVersion.V3.getCode()); - // Server does not support v3 - writeInboundFrame( - requestFrame, new Error(errorCode, "Invalid or unsupported protocol version")); - - // Then - assertThatStage(channelFuture) - .isFailed( - e -> { - assertThat(e) - .isInstanceOf(UnsupportedProtocolVersionException.class) - .hasMessageContaining( - "Protocol negotiation failed: could not find a common version " - + "(attempted: [V4, V3])"); - assertThat(((UnsupportedProtocolVersionException) e).getAttemptedVersions()) - .containsExactly(DefaultProtocolVersion.V4, DefaultProtocolVersion.V3); - }); - } - - /** - * Depending on the Cassandra version, an "unsupported protocol" response can use different error - * codes, so we test all of them. - */ - @DataProvider - public static Object[][] unsupportedProtocolCodes() { - return new Object[][] { - new Object[] {ProtocolConstants.ErrorCode.PROTOCOL_ERROR}, - // C* 2.1 reports a server error instead of protocol error, see CASSANDRA-9451. - new Object[] {ProtocolConstants.ErrorCode.SERVER_ERROR} - }; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelFactorySupportedOptionsTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelFactorySupportedOptionsTest.java deleted file mode 100644 index 559e11e0bc2..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelFactorySupportedOptionsTest.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.internal.core.TestResponses; -import com.datastax.oss.driver.internal.core.metrics.NoopNodeMetricUpdater; -import com.datastax.oss.protocol.internal.response.Ready; -import java.util.concurrent.CompletionStage; -import org.junit.Test; - -public class ChannelFactorySupportedOptionsTest extends ChannelFactoryTestBase { - - @Test - public void should_query_supported_options_on_first_channel() throws Throwable { - // Given - when(defaultProfile.isDefined(DefaultDriverOption.PROTOCOL_VERSION)).thenReturn(false); - when(protocolVersionRegistry.highestNonBeta()).thenReturn(DefaultProtocolVersion.V4); - ChannelFactory factory = newChannelFactory(); - - // When - CompletionStage channelFuture1 = - factory.connect( - SERVER_ADDRESS, DriverChannelOptions.DEFAULT, NoopNodeMetricUpdater.INSTANCE); - writeInboundFrame( - readOutboundFrame(), TestResponses.supportedResponse("mock_key", "mock_value")); - writeInboundFrame(readOutboundFrame(), new Ready()); - writeInboundFrame(readOutboundFrame(), TestResponses.clusterNameResponse("mockClusterName")); - - // Then - assertThatStage(channelFuture1).isSuccess(); - DriverChannel channel1 = channelFuture1.toCompletableFuture().get(); - assertThat(channel1.getOptions()).containsKey("mock_key"); - assertThat(channel1.getOptions().get("mock_key")).containsOnly("mock_value"); - - // When - CompletionStage channelFuture2 = - factory.connect( - SERVER_ADDRESS, DriverChannelOptions.DEFAULT, NoopNodeMetricUpdater.INSTANCE); - writeInboundFrame(readOutboundFrame(), new Ready()); - writeInboundFrame(readOutboundFrame(), TestResponses.clusterNameResponse("mockClusterName")); - - // Then - assertThatStage(channelFuture2).isSuccess(); - DriverChannel channel2 = channelFuture2.toCompletableFuture().get(); - assertThat(channel2.getOptions()).isNull(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelFactoryTestBase.java b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelFactoryTestBase.java deleted file mode 100644 index b25a1e9ad71..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelFactoryTestBase.java +++ /dev/null @@ -1,297 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import static java.util.concurrent.TimeUnit.MILLISECONDS; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.internal.core.ProtocolVersionRegistry; -import com.datastax.oss.driver.internal.core.TestResponses; -import com.datastax.oss.driver.internal.core.context.EventBus; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.context.NettyOptions; -import com.datastax.oss.driver.internal.core.metrics.NodeMetricUpdater; -import com.datastax.oss.driver.internal.core.protocol.ByteBufPrimitiveCodec; -import com.datastax.oss.protocol.internal.Compressor; -import com.datastax.oss.protocol.internal.Frame; -import com.datastax.oss.protocol.internal.FrameCodec; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.request.Options; -import com.datastax.oss.protocol.internal.request.Startup; -import com.datastax.oss.protocol.internal.response.Ready; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import io.netty.bootstrap.ServerBootstrap; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufAllocator; -import io.netty.channel.Channel; -import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelInboundHandlerAdapter; -import io.netty.channel.ChannelInitializer; -import io.netty.channel.DefaultEventLoopGroup; -import io.netty.channel.local.LocalChannel; -import io.netty.channel.local.LocalServerChannel; -import java.time.Duration; -import java.util.Collections; -import java.util.Optional; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.Exchanger; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import org.junit.After; -import org.junit.Before; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; -import org.mockito.stubbing.Answer; - -/** - * Sets up the infrastructure for channel factory tests. - * - *

Because the factory manages channel creation itself, {@link - * io.netty.channel.embedded.EmbeddedChannel} is not suitable. Instead, we launch an embedded server - * and connect to it with the local transport. - * - *

The current implementation assumes that only one connection will be tested at a time, but - * support for multiple simultaneous connections could easily be added: store multiple instances of - * requestFrameExchanger and serverResponseChannel, and add a parameter to readOutboundFrame and - * writeInboundFrame (for instance the position of the connection in creation order) to specify - * which instance to use. - */ -@RunWith(DataProviderRunner.class) -public abstract class ChannelFactoryTestBase { - static final EndPoint SERVER_ADDRESS = - new LocalEndPoint(ChannelFactoryTestBase.class.getSimpleName() + "-server"); - - private static final int TIMEOUT_MILLIS = 500; - - DefaultEventLoopGroup serverGroup; - DefaultEventLoopGroup clientGroup; - - @Mock InternalDriverContext context; - @Mock DriverConfig driverConfig; - @Mock DriverExecutionProfile defaultProfile; - @Mock NettyOptions nettyOptions; - @Mock ProtocolVersionRegistry protocolVersionRegistry; - @Mock EventBus eventBus; - @Mock Compressor compressor; - - // The server's I/O thread will store the last received request here, and block until the test - // thread retrieves it. This assumes readOutboundFrame() is called for each actual request, else - // the test will hang forever. - private final Exchanger requestFrameExchanger = new Exchanger<>(); - - // The channel that accepts incoming connections on the server - private LocalServerChannel serverAcceptChannel; - // The channel to send responses to the last open connection - private volatile LocalChannel serverResponseChannel; - - @Before - public void setup() throws InterruptedException { - MockitoAnnotations.initMocks(this); - - serverGroup = new DefaultEventLoopGroup(1); - clientGroup = new DefaultEventLoopGroup(1); - - when(context.getConfig()).thenReturn(driverConfig); - when(driverConfig.getDefaultProfile()).thenReturn(defaultProfile); - when(defaultProfile.isDefined(DefaultDriverOption.AUTH_PROVIDER_CLASS)).thenReturn(false); - when(defaultProfile.getDuration(DefaultDriverOption.CONNECTION_INIT_QUERY_TIMEOUT)) - .thenReturn(Duration.ofMillis(TIMEOUT_MILLIS)); - when(defaultProfile.getDuration(DefaultDriverOption.CONNECTION_SET_KEYSPACE_TIMEOUT)) - .thenReturn(Duration.ofMillis(TIMEOUT_MILLIS)); - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_MAX_REQUESTS)).thenReturn(1); - when(defaultProfile.getDuration(DefaultDriverOption.HEARTBEAT_INTERVAL)) - .thenReturn(Duration.ofSeconds(30)); - when(defaultProfile.getDuration(DefaultDriverOption.CONNECTION_CONNECT_TIMEOUT)) - .thenReturn(Duration.ofSeconds(5)); - - when(context.getProtocolVersionRegistry()).thenReturn(protocolVersionRegistry); - when(context.getNettyOptions()).thenReturn(nettyOptions); - when(nettyOptions.ioEventLoopGroup()).thenReturn(clientGroup); - when(nettyOptions.channelClass()).thenAnswer((Answer) i -> LocalChannel.class); - when(nettyOptions.allocator()).thenReturn(ByteBufAllocator.DEFAULT); - when(context.getFrameCodec()) - .thenReturn( - FrameCodec.defaultClient( - new ByteBufPrimitiveCodec(ByteBufAllocator.DEFAULT), Compressor.none())); - when(context.getSslHandlerFactory()).thenReturn(Optional.empty()); - when(context.getEventBus()).thenReturn(eventBus); - when(context.getWriteCoalescer()).thenReturn(new PassThroughWriteCoalescer(null)); - when(context.getCompressor()).thenReturn(compressor); - - // Start local server - ServerBootstrap serverBootstrap = - new ServerBootstrap() - .group(serverGroup) - .channel(LocalServerChannel.class) - .localAddress(SERVER_ADDRESS.resolve()) - .childHandler(new ServerInitializer()); - ChannelFuture channelFuture = serverBootstrap.bind().sync(); - serverAcceptChannel = (LocalServerChannel) channelFuture.sync().channel(); - } - - // Sets up the pipeline for our local server - private class ServerInitializer extends ChannelInitializer { - @Override - protected void initChannel(LocalChannel ch) throws Exception { - // Install a single handler that stores received requests, so that the test can check what - // the client sent - ch.pipeline() - .addLast( - new ChannelInboundHandlerAdapter() { - @Override - @SuppressWarnings("unchecked") - public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { - super.channelRead(ctx, msg); - requestFrameExchanger.exchange((Frame) msg); - } - }); - - // Store the channel so that the test can send responses back to the client - serverResponseChannel = ch; - } - } - - protected Frame readOutboundFrame() { - try { - return requestFrameExchanger.exchange(null, TIMEOUT_MILLIS, MILLISECONDS); - } catch (InterruptedException e) { - fail("unexpected interruption while waiting for outbound frame", e); - } catch (TimeoutException e) { - fail("Timed out reading outbound frame"); - } - return null; // never reached - } - - protected void writeInboundFrame(Frame requestFrame, Message response) { - writeInboundFrame(requestFrame, response, requestFrame.protocolVersion); - } - - private void writeInboundFrame(Frame requestFrame, Message response, int protocolVersion) { - serverResponseChannel.writeAndFlush( - Frame.forResponse( - protocolVersion, - requestFrame.streamId, - null, - Frame.NO_PAYLOAD, - Collections.emptyList(), - response)); - } - - /** - * Simulate the sequence of roundtrips to initialize a simple channel without authentication or - * keyspace (avoids repeating it in subclasses). - */ - protected void completeSimpleChannelInit() { - Frame requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Options.class); - writeInboundFrame(requestFrame, TestResponses.supportedResponse("mock_key", "mock_value")); - - requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Startup.class); - writeInboundFrame(requestFrame, new Ready()); - - requestFrame = readOutboundFrame(); - writeInboundFrame(requestFrame, TestResponses.clusterNameResponse("mockClusterName")); - } - - ChannelFactory newChannelFactory() { - return new TestChannelFactory(context); - } - - // A simplified channel factory to use in the tests. - // It only installs high-level handlers on the pipeline, not the frame codecs. So we'll receive - // Frame objects on the server side, which is simpler to test. - private static class TestChannelFactory extends ChannelFactory { - - private TestChannelFactory(InternalDriverContext internalDriverContext) { - super(internalDriverContext); - } - - @Override - ChannelInitializer initializer( - EndPoint endPoint, - ProtocolVersion protocolVersion, - DriverChannelOptions options, - NodeMetricUpdater nodeMetricUpdater, - CompletableFuture resultFuture) { - return new ChannelInitializer() { - @Override - protected void initChannel(Channel channel) throws Exception { - try { - DriverExecutionProfile defaultProfile = context.getConfig().getDefaultProfile(); - - long setKeyspaceTimeoutMillis = - defaultProfile - .getDuration(DefaultDriverOption.CONNECTION_SET_KEYSPACE_TIMEOUT) - .toMillis(); - int maxRequestsPerConnection = - defaultProfile.getInt(DefaultDriverOption.CONNECTION_MAX_REQUESTS); - - InFlightHandler inFlightHandler = - new InFlightHandler( - protocolVersion, - new StreamIdGenerator(maxRequestsPerConnection), - Integer.MAX_VALUE, - setKeyspaceTimeoutMillis, - channel.newPromise(), - null, - "test"); - - HeartbeatHandler heartbeatHandler = new HeartbeatHandler(defaultProfile); - ProtocolInitHandler initHandler = - new ProtocolInitHandler( - context, - protocolVersion, - getClusterName(), - endPoint, - options, - heartbeatHandler, - productType == null); - channel - .pipeline() - .addLast(ChannelFactory.INFLIGHT_HANDLER_NAME, inFlightHandler) - .addLast(ChannelFactory.INIT_HANDLER_NAME, initHandler); - } catch (Throwable t) { - resultFuture.completeExceptionally(t); - } - } - }; - } - } - - @After - public void tearDown() throws InterruptedException { - serverAcceptChannel.close(); - - serverGroup - .shutdownGracefully(TIMEOUT_MILLIS, TIMEOUT_MILLIS * 2, TimeUnit.MILLISECONDS) - .sync(); - clientGroup - .shutdownGracefully(TIMEOUT_MILLIS, TIMEOUT_MILLIS * 2, TimeUnit.MILLISECONDS) - .sync(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelHandlerTestBase.java b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelHandlerTestBase.java deleted file mode 100644 index 5feb85a457b..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelHandlerTestBase.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.protocol.internal.Frame; -import com.datastax.oss.protocol.internal.Message; -import io.netty.channel.embedded.EmbeddedChannel; -import java.util.Collections; -import org.junit.Before; - -/** - * Infrastructure for channel handler test. - * - *

It relies on an embedded channel where the tested handler is installed. Then the test can - * simulate incoming/outgoing messages, and check that the handler propagates the adequate messages - * upstream/downstream. - */ -public class ChannelHandlerTestBase { - protected EmbeddedChannel channel; - - @Before - public void setup() { - channel = new EmbeddedChannel(); - } - - /** Reads a request frame that we expect the tested handler to have sent inbound. */ - protected Frame readInboundFrame() { - channel.runPendingTasks(); - Object o = channel.readInbound(); - assertThat(o).isInstanceOf(Frame.class); - return ((Frame) o); - } - - /** Reads a request frame that we expect the tested handler to have sent outbound. */ - protected Frame readOutboundFrame() { - channel.runPendingTasks(); - Object o = channel.readOutbound(); - assertThat(o).isInstanceOf(Frame.class); - return ((Frame) o); - } - - protected void assertNoOutboundFrame() { - channel.runPendingTasks(); - Object o = channel.readOutbound(); - assertThat(o).isNull(); - } - - /** Writes a response frame for the tested handler to read. */ - protected void writeInboundFrame(Frame responseFrame) { - channel.writeInbound(responseFrame); - } - - /** Writes a response frame that matches the given request, with the given response message. */ - protected void writeInboundFrame(Frame requestFrame, Message response) { - channel.writeInbound(buildInboundFrame(requestFrame, response)); - } - - /** Builds a response frame matching a request frame. */ - protected Frame buildInboundFrame(Frame requestFrame, Message response) { - return Frame.forResponse( - requestFrame.protocolVersion, - requestFrame.streamId, - null, - requestFrame.customPayload, - Collections.emptyList(), - response); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ConnectInitHandlerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ConnectInitHandlerTest.java deleted file mode 100644 index 6024ed26a5c..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ConnectInitHandlerTest.java +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import static com.datastax.oss.driver.Assertions.assertThat; - -import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelHandlerContext; -import java.net.InetSocketAddress; -import org.junit.Before; -import org.junit.Test; - -public class ConnectInitHandlerTest extends ChannelHandlerTestBase { - - private TestHandler handler; - - @Before - @Override - public void setup() { - super.setup(); - handler = new TestHandler(); - channel.pipeline().addLast(handler); - } - - @Test - public void should_call_onRealConnect_when_connection_succeeds() { - assertThat(handler.hasConnected).isFalse(); - - // When - channel.connect(new InetSocketAddress("localhost", 9042)); - - // Then - assertThat(handler.hasConnected).isTrue(); - } - - @Test - public void should_not_complete_connect_future_before_triggered_by_handler() { - // When - ChannelFuture connectFuture = channel.connect(new InetSocketAddress("localhost", 9042)); - - // Then - assertThat(connectFuture.isDone()).isFalse(); - } - - @Test - public void should_complete_connect_future_when_handler_completes() { - // Given - ChannelFuture connectFuture = channel.connect(new InetSocketAddress("localhost", 9042)); - - // When - handler.setConnectSuccess(); - - // Then - assertThat(connectFuture.isSuccess()).isTrue(); - } - - @Test - public void should_remove_handler_from_pipeline_when_handler_completes() { - // Given - channel.connect(new InetSocketAddress("localhost", 9042)); - - // When - handler.setConnectSuccess(); - - // Then - assertThat(channel.pipeline().get(TestHandler.class)).isNull(); - } - - @Test - public void should_fail_connect_future_when_handler_fails() { - // Given - ChannelFuture connectFuture = channel.connect(new InetSocketAddress("localhost", 9042)); - Exception exception = new Exception("test"); - - // When - handler.setConnectFailure(exception); - - // Then - assertThat(connectFuture).isFailed(e -> assertThat(e).isEqualTo(exception)); - } - - /** - * Well-behaved implementations should not call setConnect* multiple times in a row, but check - * that we handle it gracefully if they do. - */ - @Test - public void should_ignore_subsequent_calls_if_handler_already_failed() { - // Given - ChannelFuture connectFuture = channel.connect(new InetSocketAddress("localhost", 9042)); - Exception exception = new Exception("test"); - - // When - handler.setConnectFailure(exception); - handler.setConnectFailure(new Exception("test2")); - handler.setConnectSuccess(); - - // Then - assertThat(connectFuture).isFailed(e -> assertThat(e).isEqualTo(exception)); - } - - static class TestHandler extends ConnectInitHandler { - boolean hasConnected; - - @Override - protected void onRealConnect(ChannelHandlerContext ctx) { - hasConnected = true; - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/DriverChannelTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/DriverChannelTest.java deleted file mode 100644 index e0660b9609e..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/DriverChannelTest.java +++ /dev/null @@ -1,165 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import static com.datastax.oss.driver.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.connection.ClosedConnectionException; -import com.datastax.oss.protocol.internal.Frame; -import com.datastax.oss.protocol.internal.request.Query; -import com.datastax.oss.protocol.internal.response.result.Void; -import io.netty.channel.Channel; -import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelPromise; -import io.netty.util.concurrent.Future; -import java.util.AbstractMap; -import java.util.ArrayDeque; -import java.util.Map; -import java.util.Queue; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -public class DriverChannelTest extends ChannelHandlerTestBase { - public static final int SET_KEYSPACE_TIMEOUT_MILLIS = 100; - - private DriverChannel driverChannel; - private MockWriteCoalescer writeCoalescer; - - @Mock private StreamIdGenerator streamIds; - - @Before - @Override - public void setup() { - super.setup(); - MockitoAnnotations.initMocks(this); - channel - .pipeline() - .addLast( - new InFlightHandler( - DefaultProtocolVersion.V3, - streamIds, - Integer.MAX_VALUE, - SET_KEYSPACE_TIMEOUT_MILLIS, - channel.newPromise(), - null, - "test")); - writeCoalescer = new MockWriteCoalescer(); - driverChannel = - new DriverChannel( - new EmbeddedEndPoint(), channel, writeCoalescer, DefaultProtocolVersion.V3); - } - - /** - * Ensures that the potential delay introduced by the write coalescer does not mess with the - * graceful shutdown sequence: any write submitted before {@link DriverChannel#close()} is - * guaranteed to complete. - */ - @Test - public void should_wait_for_coalesced_writes_when_closing_gracefully() { - // Given - MockResponseCallback responseCallback = new MockResponseCallback(); - driverChannel.write(new Query("test"), false, Frame.NO_PAYLOAD, responseCallback); - // nothing written yet because the coalescer hasn't flushed - assertNoOutboundFrame(); - - // When - Future closeFuture = driverChannel.close(); - - // Then - // not closed yet because there is still a pending write - assertThat(closeFuture).isNotDone(); - assertNoOutboundFrame(); - - // When - // the coalescer finally runs - writeCoalescer.triggerFlush(); - - // Then - // the pending write goes through - Frame requestFrame = readOutboundFrame(); - assertThat(requestFrame).isNotNull(); - // not closed yet because there is now a pending response - assertThat(closeFuture).isNotDone(); - - // When - // the pending response arrives - writeInboundFrame(requestFrame, Void.INSTANCE); - assertThat(responseCallback.getLastResponse().message).isEqualTo(Void.INSTANCE); - - // Then - assertThat(closeFuture).isSuccess(); - } - - /** - * Ensures that the potential delay introduced by the write coalescer does not mess with the - * forceful shutdown sequence: any write submitted before {@link DriverChannel#forceClose()} - * should get the "Channel was force-closed" error, whether it had been flushed or not. - */ - @Test - public void should_wait_for_coalesced_writes_when_closing_forcefully() { - // Given - MockResponseCallback responseCallback = new MockResponseCallback(); - driverChannel.write(new Query("test"), false, Frame.NO_PAYLOAD, responseCallback); - // nothing written yet because the coalescer hasn't flushed - assertNoOutboundFrame(); - - // When - Future closeFuture = driverChannel.forceClose(); - - // Then - // not closed yet because there is still a pending write - assertThat(closeFuture).isNotDone(); - assertNoOutboundFrame(); - - // When - // the coalescer finally runs - writeCoalescer.triggerFlush(); - // and the pending write goes through - Frame requestFrame = readOutboundFrame(); - assertThat(requestFrame).isNotNull(); - - // Then - assertThat(closeFuture).isSuccess(); - assertThat(responseCallback.getFailure()) - .isInstanceOf(ClosedConnectionException.class) - .hasMessageContaining("Channel was force-closed"); - } - - // Simple implementation that holds all the writes, and flushes them when it's explicitly - // triggered. - private class MockWriteCoalescer implements WriteCoalescer { - private Queue> messages = new ArrayDeque<>(); - - @Override - public ChannelFuture writeAndFlush(Channel channel, Object message) { - assertThat(channel).isEqualTo(DriverChannelTest.this.channel); - ChannelPromise writePromise = channel.newPromise(); - messages.offer(new AbstractMap.SimpleEntry<>(message, writePromise)); - return writePromise; - } - - void triggerFlush() { - for (Map.Entry entry : messages) { - channel.writeAndFlush(entry.getKey(), entry.getValue()); - } - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/EmbeddedEndPoint.java b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/EmbeddedEndPoint.java deleted file mode 100644 index 5e463299a66..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/EmbeddedEndPoint.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.net.SocketAddress; - -/** Endpoint implementation for unit tests that use an embedded Netty channel. */ -public class EmbeddedEndPoint implements EndPoint { - - @NonNull - @Override - public SocketAddress resolve() { - throw new UnsupportedOperationException("This should not get called from unit tests"); - } - - @NonNull - @Override - public String asMetricPrefix() { - throw new UnsupportedOperationException("This should not get called from unit tests"); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/InFlightHandlerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/InFlightHandlerTest.java deleted file mode 100644 index 35049e99af1..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/InFlightHandlerTest.java +++ /dev/null @@ -1,660 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.connection.BusyConnectionException; -import com.datastax.oss.driver.api.core.connection.ClosedConnectionException; -import com.datastax.oss.driver.internal.core.protocol.FrameDecodingException; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.protocol.internal.Frame; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.request.Query; -import com.datastax.oss.protocol.internal.response.Error; -import com.datastax.oss.protocol.internal.response.event.StatusChangeEvent; -import com.datastax.oss.protocol.internal.response.result.SetKeyspace; -import com.datastax.oss.protocol.internal.response.result.Void; -import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelPromise; -import java.net.InetSocketAddress; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.concurrent.TimeUnit; -import org.junit.Before; -import org.junit.Test; -import org.mockito.ArgumentCaptor; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -public class InFlightHandlerTest extends ChannelHandlerTestBase { - private static final Query QUERY = new Query("select * from foo"); - private static final int SET_KEYSPACE_TIMEOUT_MILLIS = 100; - private static final int MAX_ORPHAN_IDS = 10; - - @Mock private StreamIdGenerator streamIds; - - @Before - @Override - public void setup() { - super.setup(); - MockitoAnnotations.initMocks(this); - when(streamIds.preAcquire()).thenReturn(true); - } - - @Test - public void should_fail_if_connection_busy() throws Throwable { - // Given - addToPipeline(); - when(streamIds.acquire()).thenReturn(-1); - - // When - ChannelFuture writeFuture = - channel.writeAndFlush( - new DriverChannel.RequestMessage( - QUERY, false, Frame.NO_PAYLOAD, new MockResponseCallback())); - - // Then - assertThat(writeFuture) - .isFailed(e -> assertThat(e).isInstanceOf(BusyConnectionException.class)); - } - - @Test - public void should_assign_streamid_and_send_frame() { - // Given - addToPipeline(); - when(streamIds.acquire()).thenReturn(42); - MockResponseCallback responseCallback = new MockResponseCallback(); - - // When - ChannelFuture writeFuture = - channel.writeAndFlush( - new DriverChannel.RequestMessage(QUERY, false, Frame.NO_PAYLOAD, responseCallback)); - - // Then - assertThat(writeFuture).isSuccess(); - verify(streamIds).acquire(); - - Frame frame = readOutboundFrame(); - assertThat(frame.streamId).isEqualTo(42); - assertThat(frame.message).isEqualTo(QUERY); - } - - @Test - public void should_notify_callback_of_response() { - // Given - addToPipeline(); - when(streamIds.acquire()).thenReturn(42); - MockResponseCallback responseCallback = new MockResponseCallback(); - channel.writeAndFlush( - new DriverChannel.RequestMessage(QUERY, false, Frame.NO_PAYLOAD, responseCallback)); - Frame requestFrame = readOutboundFrame(); - - // When - Frame responseFrame = buildInboundFrame(requestFrame, Void.INSTANCE); - writeInboundFrame(responseFrame); - - // Then - assertThat(responseCallback.getLastResponse()).isSameAs(responseFrame); - verify(streamIds).release(42); - } - - @Test - public void should_notify_response_promise_when_decoding_fails() throws Throwable { - // Given - addToPipeline(); - when(streamIds.acquire()).thenReturn(42); - MockResponseCallback responseCallback = new MockResponseCallback(); - channel - .writeAndFlush( - new DriverChannel.RequestMessage(QUERY, false, Frame.NO_PAYLOAD, responseCallback)) - .awaitUninterruptibly(); - - // When - RuntimeException mockCause = new RuntimeException("test"); - channel.pipeline().fireExceptionCaught(new FrameDecodingException(42, mockCause)); - - // Then - assertThat(responseCallback.getFailure()).isSameAs(mockCause); - verify(streamIds).release(42); - } - - @Test - public void should_release_stream_id_when_orphaned_callback_receives_response() { - // Given - addToPipeline(); - when(streamIds.acquire()).thenReturn(42); - MockResponseCallback responseCallback = new MockResponseCallback(); - channel.writeAndFlush( - new DriverChannel.RequestMessage(QUERY, false, Frame.NO_PAYLOAD, responseCallback)); - Frame requestFrame = readOutboundFrame(); - - // When - channel.writeAndFlush(responseCallback); // means cancellation (see DriverChannel#cancel) - Frame responseFrame = buildInboundFrame(requestFrame, Void.INSTANCE); - writeInboundFrame(responseFrame); - - // Then - verify(streamIds).release(42); - // The response is not propagated, because we assume a callback that cancelled managed its own - // termination - assertThat(responseCallback.getLastResponse()).isNull(); - } - - @Test - public void should_delay_graceful_close_and_complete_when_last_pending_completes() { - // Given - addToPipeline(); - when(streamIds.acquire()).thenReturn(42); - MockResponseCallback responseCallback = new MockResponseCallback(); - channel - .writeAndFlush( - new DriverChannel.RequestMessage(QUERY, false, Frame.NO_PAYLOAD, responseCallback)) - .awaitUninterruptibly(); - - // When - channel.write(DriverChannel.GRACEFUL_CLOSE_MESSAGE); - - // Then - // not closed yet because there is one pending request - assertThat(channel.closeFuture()).isNotDone(); - - // When - // completing pending request - Frame requestFrame = readOutboundFrame(); - writeInboundFrame(requestFrame, Void.INSTANCE); - - // Then - assertThat(channel.closeFuture()).isSuccess(); - } - - @Test - public void should_delay_graceful_close_and_complete_when_last_pending_cancelled() { - // Given - addToPipeline(); - when(streamIds.acquire()).thenReturn(42); - MockResponseCallback responseCallback = new MockResponseCallback(); - channel - .writeAndFlush( - new DriverChannel.RequestMessage(QUERY, false, Frame.NO_PAYLOAD, responseCallback)) - .awaitUninterruptibly(); - - // When - channel.write(DriverChannel.GRACEFUL_CLOSE_MESSAGE); - - // Then - // not closed yet because there is one pending request - assertThat(channel.closeFuture()).isNotDone(); - - // When - // cancelling pending request - channel.write(responseCallback); - - // Then - assertThat(channel.closeFuture()).isSuccess(); - } - - @Test - public void should_graceful_close_immediately_if_no_pending() { - // Given - addToPipeline(); - - // When - channel.write(DriverChannel.GRACEFUL_CLOSE_MESSAGE); - - // Then - assertThat(channel.closeFuture()).isSuccess(); - } - - @Test - public void should_refuse_new_writes_during_graceful_close() { - // Given - addToPipeline(); - when(streamIds.acquire()).thenReturn(42); - MockResponseCallback responseCallback = new MockResponseCallback(); - channel - .writeAndFlush( - new DriverChannel.RequestMessage(QUERY, false, Frame.NO_PAYLOAD, responseCallback)) - .awaitUninterruptibly(); - - // When - channel.write(DriverChannel.GRACEFUL_CLOSE_MESSAGE); - - // Then - // not closed yet because there is one pending request - assertThat(channel.closeFuture()).isNotDone(); - // should not allow other write - ChannelFuture otherWriteFuture = - channel.writeAndFlush( - new DriverChannel.RequestMessage(QUERY, false, Frame.NO_PAYLOAD, responseCallback)); - assertThat(otherWriteFuture) - .isFailed( - e -> - assertThat(e) - .isInstanceOf(IllegalStateException.class) - .hasMessage("Channel is closing")); - } - - @Test - public void should_close_gracefully_if_orphan_ids_above_max_and_pending_request() { - // Given - addToPipeline(); - // Generate n orphan ids by writing and cancelling the requests: - for (int i = 0; i < MAX_ORPHAN_IDS; i++) { - when(streamIds.acquire()).thenReturn(i); - MockResponseCallback responseCallback = new MockResponseCallback(); - channel - .writeAndFlush( - new DriverChannel.RequestMessage(QUERY, false, Frame.NO_PAYLOAD, responseCallback)) - .awaitUninterruptibly(); - channel.writeAndFlush(responseCallback).awaitUninterruptibly(); - } - // Generate another request that is pending and not cancelled: - when(streamIds.acquire()).thenReturn(MAX_ORPHAN_IDS); - MockResponseCallback pendingResponseCallback = new MockResponseCallback(); - channel - .writeAndFlush( - new DriverChannel.RequestMessage( - QUERY, false, Frame.NO_PAYLOAD, pendingResponseCallback)) - .awaitUninterruptibly(); - - // When - // Generate the n+1th orphan id that makes us go above the threshold - when(streamIds.acquire()).thenReturn(MAX_ORPHAN_IDS + 1); - MockResponseCallback responseCallback = new MockResponseCallback(); - channel - .writeAndFlush( - new DriverChannel.RequestMessage(QUERY, false, Frame.NO_PAYLOAD, responseCallback)) - .awaitUninterruptibly(); - channel.writeAndFlush(responseCallback).awaitUninterruptibly(); - - // Then - // Channel should be closing gracefully. There's no way to observe that from the outside, so - // write another request and check that it's rejected: - assertThat(channel.closeFuture()).isNotDone(); - ChannelFuture otherWriteFuture = - channel.writeAndFlush( - new DriverChannel.RequestMessage(QUERY, false, Frame.NO_PAYLOAD, responseCallback)); - assertThat(otherWriteFuture) - .isFailed( - e -> - assertThat(e) - .isInstanceOf(IllegalStateException.class) - .hasMessage("Channel is closing")); - - // When - // Cancel the last pending request - channel.writeAndFlush(pendingResponseCallback).awaitUninterruptibly(); - - // Then - // The graceful shutdown completes - assertThat(channel.closeFuture()).isSuccess(); - } - - @Test - public void should_close_gracefully_if_orphan_ids_above_max_and_multiple_pending_requests() { - // Given - addToPipeline(); - // Generate n orphan ids by writing and cancelling the requests. - for (int i = 0; i < MAX_ORPHAN_IDS; i++) { - when(streamIds.acquire()).thenReturn(i); - MockResponseCallback responseCallback = new MockResponseCallback(); - channel - .writeAndFlush( - new DriverChannel.RequestMessage(QUERY, false, Frame.NO_PAYLOAD, responseCallback)) - .awaitUninterruptibly(); - channel.writeAndFlush(responseCallback).awaitUninterruptibly(); - } - // Generate 3 additional requests that are pending and not cancelled. - List pendingResponseCallbacks = new ArrayList<>(); - for (int i = 0; i < 3; i++) { - when(streamIds.acquire()).thenReturn(MAX_ORPHAN_IDS + i); - MockResponseCallback responseCallback = new MockResponseCallback(); - channel - .writeAndFlush( - new DriverChannel.RequestMessage(QUERY, false, Frame.NO_PAYLOAD, responseCallback)) - .awaitUninterruptibly(); - pendingResponseCallbacks.add(responseCallback); - } - - // When - // Generate the n+1th orphan id that makes us go above the threshold by canceling one if the - // pending requests. - channel.writeAndFlush(pendingResponseCallbacks.remove(0)).awaitUninterruptibly(); - - // Then - // Channel should be closing gracefully but there's no way to observe that from the outside - // besides writing another request and check that it's rejected. - assertThat(channel.closeFuture()).isNotDone(); - ChannelFuture otherWriteFuture = - channel.writeAndFlush( - new DriverChannel.RequestMessage( - QUERY, false, Frame.NO_PAYLOAD, new MockResponseCallback())); - assertThat(otherWriteFuture).isFailed(); - assertThat(otherWriteFuture.cause()) - .isInstanceOf(IllegalStateException.class) - .hasMessage("Channel is closing"); - - // When - // Cancel the remaining pending requests causing the n+ith orphan ids above the threshold. - for (MockResponseCallback pendingResponseCallback : pendingResponseCallbacks) { - ChannelFuture future = channel.writeAndFlush(pendingResponseCallback).awaitUninterruptibly(); - - // Then - // The future should succeed even though the channel has started closing gracefully. - assertThat(future).isSuccess(); - } - - // Then - // The graceful shutdown completes. - assertThat(channel.closeFuture()).isSuccess(); - } - - @Test - public void should_close_immediately_if_orphan_ids_above_max_and_no_pending_requests() { - // Given - addToPipeline(); - // Generate n orphan ids by writing and cancelling the requests: - for (int i = 0; i < MAX_ORPHAN_IDS; i++) { - when(streamIds.acquire()).thenReturn(i); - MockResponseCallback responseCallback = new MockResponseCallback(); - channel - .writeAndFlush( - new DriverChannel.RequestMessage(QUERY, false, Frame.NO_PAYLOAD, responseCallback)) - .awaitUninterruptibly(); - channel.writeAndFlush(responseCallback).awaitUninterruptibly(); - } - - // When - // Generate the n+1th orphan id that makes us go above the threshold - when(streamIds.acquire()).thenReturn(MAX_ORPHAN_IDS); - MockResponseCallback responseCallback = new MockResponseCallback(); - channel - .writeAndFlush( - new DriverChannel.RequestMessage(QUERY, false, Frame.NO_PAYLOAD, responseCallback)) - .awaitUninterruptibly(); - channel.writeAndFlush(responseCallback).awaitUninterruptibly(); - - // Then - // Channel should close immediately since no active pending requests. - assertThat(channel.closeFuture()).isSuccess(); - } - - @Test - public void should_fail_all_pending_when_force_closed() throws Throwable { - // Given - addToPipeline(); - when(streamIds.acquire()).thenReturn(42, 43); - MockResponseCallback responseCallback1 = new MockResponseCallback(); - MockResponseCallback responseCallback2 = new MockResponseCallback(); - channel - .writeAndFlush( - new DriverChannel.RequestMessage(QUERY, false, Frame.NO_PAYLOAD, responseCallback1)) - .awaitUninterruptibly(); - channel - .writeAndFlush( - new DriverChannel.RequestMessage(QUERY, false, Frame.NO_PAYLOAD, responseCallback2)) - .awaitUninterruptibly(); - - // When - channel.write(DriverChannel.FORCEFUL_CLOSE_MESSAGE); - - // Then - assertThat(channel.closeFuture()).isSuccess(); - for (MockResponseCallback callback : ImmutableList.of(responseCallback1, responseCallback2)) { - assertThat(callback.getFailure()) - .isInstanceOf(ClosedConnectionException.class) - .hasMessageContaining("Channel was force-closed"); - } - } - - @Test - public void should_fail_all_pending_and_close_on_unexpected_inbound_exception() throws Throwable { - // Given - addToPipeline(); - when(streamIds.acquire()).thenReturn(42, 43); - MockResponseCallback responseCallback1 = new MockResponseCallback(); - MockResponseCallback responseCallback2 = new MockResponseCallback(); - channel - .writeAndFlush( - new DriverChannel.RequestMessage(QUERY, false, Frame.NO_PAYLOAD, responseCallback1)) - .awaitUninterruptibly(); - channel - .writeAndFlush( - new DriverChannel.RequestMessage(QUERY, false, Frame.NO_PAYLOAD, responseCallback2)) - .awaitUninterruptibly(); - - // When - RuntimeException mockException = new RuntimeException("test"); - channel.pipeline().fireExceptionCaught(mockException); - - // Then - assertThat(channel.closeFuture()).isSuccess(); - for (MockResponseCallback callback : ImmutableList.of(responseCallback1, responseCallback2)) { - Throwable failure = callback.getFailure(); - assertThat(failure).isInstanceOf(ClosedConnectionException.class); - assertThat(failure.getCause()).isSameAs(mockException); - } - } - - @Test - public void should_fail_all_pending_if_connection_lost() { - // Given - addToPipeline(); - when(streamIds.acquire()).thenReturn(42, 43); - MockResponseCallback responseCallback1 = new MockResponseCallback(); - MockResponseCallback responseCallback2 = new MockResponseCallback(); - channel - .writeAndFlush( - new DriverChannel.RequestMessage(QUERY, false, Frame.NO_PAYLOAD, responseCallback1)) - .awaitUninterruptibly(); - channel - .writeAndFlush( - new DriverChannel.RequestMessage(QUERY, false, Frame.NO_PAYLOAD, responseCallback2)) - .awaitUninterruptibly(); - - // When - channel.pipeline().fireChannelInactive(); - - // Then - for (MockResponseCallback callback : ImmutableList.of(responseCallback1, responseCallback2)) { - assertThat(callback.getFailure()) - .isInstanceOf(ClosedConnectionException.class) - .hasMessageContaining("Lost connection to remote peer"); - } - } - - @Test - public void should_hold_stream_id_for_multi_response_callback() { - // Given - addToPipeline(); - when(streamIds.acquire()).thenReturn(42); - MockResponseCallback responseCallback = - new MockResponseCallback(frame -> frame.message instanceof Error); - - // When - channel - .writeAndFlush( - new DriverChannel.RequestMessage(QUERY, false, Frame.NO_PAYLOAD, responseCallback)) - .awaitUninterruptibly(); - - // Then - // notify callback of stream id - assertThat(responseCallback.streamId).isEqualTo(42); - - Frame requestFrame = readOutboundFrame(); - for (int i = 0; i < 5; i++) { - // When - // completing pending request - Frame responseFrame = buildInboundFrame(requestFrame, Void.INSTANCE); - writeInboundFrame(responseFrame); - - // Then - assertThat(responseCallback.getLastResponse()).isSameAs(responseFrame); - // Stream id not released, callback can receive more responses - verify(streamIds, never()).release(42); - } - - // When - // a terminal response comes in - Frame responseFrame = buildInboundFrame(requestFrame, new Error(0, "test")); - writeInboundFrame(responseFrame); - - // Then - verify(streamIds).release(42); - assertThat(responseCallback.getLastResponse()).isSameAs(responseFrame); - - // When - // more responses come in - writeInboundFrame(requestFrame, Void.INSTANCE); - - // Then - // the callback does not get them anymore (this could only be responses to a new request that - // reused the id) - assertThat(responseCallback.getLastResponse()).isNull(); - } - - @Test - public void - should_release_stream_id_when_orphaned_multi_response_callback_receives_last_response() { - // Given - addToPipeline(); - when(streamIds.acquire()).thenReturn(42); - MockResponseCallback responseCallback = - new MockResponseCallback(frame -> frame.message instanceof Error); - - channel - .writeAndFlush( - new DriverChannel.RequestMessage(QUERY, false, Frame.NO_PAYLOAD, responseCallback)) - .awaitUninterruptibly(); - - Frame requestFrame = readOutboundFrame(); - for (int i = 0; i < 5; i++) { - Frame responseFrame = buildInboundFrame(requestFrame, Void.INSTANCE); - writeInboundFrame(responseFrame); - assertThat(responseCallback.getLastResponse()).isSameAs(responseFrame); - verify(streamIds, never()).release(42); - } - - // When - // cancelled mid-flight - channel.writeAndFlush(responseCallback); - - // Then - // subsequent non-final responses are not propagated (we assume the callback completed itself - // already), but do not release the stream id - writeInboundFrame(requestFrame, Void.INSTANCE); - assertThat(responseCallback.getLastResponse()).isNull(); - verify(streamIds, never()).release(42); - - // When - // the terminal response arrives - writeInboundFrame(requestFrame, new Error(0, "test")); - - // Then - // still not propagated but the id is released - assertThat(responseCallback.getLastResponse()).isNull(); - verify(streamIds).release(42); - } - - @Test - public void should_set_keyspace() { - // Given - addToPipeline(); - ChannelPromise setKeyspacePromise = channel.newPromise(); - DriverChannel.SetKeyspaceEvent setKeyspaceEvent = - new DriverChannel.SetKeyspaceEvent(CqlIdentifier.fromCql("ks"), setKeyspacePromise); - - // When - channel.pipeline().fireUserEventTriggered(setKeyspaceEvent); - Frame requestFrame = readOutboundFrame(); - - // Then - assertThat(requestFrame.message).isInstanceOf(Query.class); - writeInboundFrame(requestFrame, new SetKeyspace("ks")); - assertThat(setKeyspacePromise).isSuccess(); - } - - @Test - public void should_fail_to_set_keyspace_if_query_times_out() throws InterruptedException { - // Given - addToPipeline(); - ChannelPromise setKeyspacePromise = channel.newPromise(); - DriverChannel.SetKeyspaceEvent setKeyspaceEvent = - new DriverChannel.SetKeyspaceEvent(CqlIdentifier.fromCql("ks"), setKeyspacePromise); - - // When - channel.pipeline().fireUserEventTriggered(setKeyspaceEvent); - TimeUnit.MILLISECONDS.sleep(SET_KEYSPACE_TIMEOUT_MILLIS * 2); - channel.runPendingTasks(); - - // Then - assertThat(setKeyspacePromise).isFailed(); - } - - @Test - public void should_notify_callback_of_events() { - // Given - EventCallback eventCallback = mock(EventCallback.class); - addToPipelineWithEventCallback(eventCallback); - - // When - StatusChangeEvent event = - new StatusChangeEvent( - ProtocolConstants.StatusChangeType.UP, new InetSocketAddress("127.0.0.1", 9042)); - Frame eventFrame = - Frame.forResponse( - DefaultProtocolVersion.V3.getCode(), - -1, - null, - Collections.emptyMap(), - Collections.emptyList(), - event); - writeInboundFrame(eventFrame); - - // Then - ArgumentCaptor captor = ArgumentCaptor.forClass(StatusChangeEvent.class); - verify(eventCallback).onEvent(captor.capture()); - assertThat(captor.getValue()).isSameAs(event); - } - - private void addToPipeline() { - addToPipelineWithEventCallback(null); - } - - private void addToPipelineWithEventCallback(EventCallback eventCallback) { - channel - .pipeline() - .addLast( - new InFlightHandler( - DefaultProtocolVersion.V3, - streamIds, - MAX_ORPHAN_IDS, - SET_KEYSPACE_TIMEOUT_MILLIS, - channel.newPromise(), - eventCallback, - "test")); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/LocalEndPoint.java b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/LocalEndPoint.java deleted file mode 100644 index c90731eece9..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/LocalEndPoint.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import edu.umd.cs.findbugs.annotations.NonNull; -import io.netty.channel.local.LocalAddress; -import java.net.SocketAddress; - -/** Endpoint implementation for unit tests that use the local Netty transport. */ -public class LocalEndPoint implements EndPoint { - - private final LocalAddress localAddress; - - public LocalEndPoint(String id) { - this.localAddress = new LocalAddress(id); - } - - @NonNull - @Override - public SocketAddress resolve() { - return localAddress; - } - - @NonNull - @Override - public String asMetricPrefix() { - throw new UnsupportedOperationException("This should not get called from unit tests"); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/MockAuthenticator.java b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/MockAuthenticator.java deleted file mode 100644 index 6015203ed38..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/MockAuthenticator.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import com.datastax.oss.driver.api.core.auth.SyncAuthenticator; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.nio.ByteBuffer; - -/** - * Dummy authenticator for our tests. - * - *

The initial response is hard-coded. When the server asks it to evaluate a challenge, it always - * replies with the same token. When authentication succeeds, the success token is stored for later - * inspection. - */ -public class MockAuthenticator implements SyncAuthenticator { - static final String INITIAL_RESPONSE = "0xcafebabe"; - - volatile String successToken; - - @Override - public ByteBuffer initialResponseSync() { - return Bytes.fromHexString(INITIAL_RESPONSE); - } - - @Override - public ByteBuffer evaluateChallengeSync(ByteBuffer challenge) { - return challenge; - } - - @Override - public void onAuthenticationSuccessSync(ByteBuffer token) { - successToken = Bytes.toHexString(token); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/MockChannelFactoryHelper.java b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/MockChannelFactoryHelper.java deleted file mode 100644 index 43768131108..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/MockChannelFactoryHelper.java +++ /dev/null @@ -1,182 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.timeout; -import static org.mockito.Mockito.verifyZeroInteractions; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.driver.shaded.guava.common.collect.ListMultimap; -import com.datastax.oss.driver.shaded.guava.common.collect.MultimapBuilder; -import com.datastax.oss.driver.shaded.guava.common.collect.Sets; -import java.util.ArrayDeque; -import java.util.Deque; -import java.util.HashMap; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import org.mockito.ArgumentCaptor; -import org.mockito.InOrder; -import org.mockito.internal.util.MockUtil; -import org.mockito.stubbing.OngoingStubbing; - -/** - * Helper class to set up and verify a sequence of invocations on a ChannelFactory mock. - * - *

Use the builder at the beginning of the test to stub expected calls. Then call the verify - * methods throughout the test to check that each call has been performed. - * - *

This class handles asynchronous calls to the thread factory, but it must be used from a single - * thread (see {@link #waitForCalls(Node, int)}). - */ -public class MockChannelFactoryHelper { - - private static final int CONNECT_TIMEOUT_MILLIS = 500; - - public static Builder builder(ChannelFactory channelFactory) { - return new Builder(channelFactory); - } - - private final ChannelFactory channelFactory; - private final InOrder inOrder; - // If waitForCalls sees more invocations than expected, the difference is stored here - private final Map previous = new HashMap<>(); - - public MockChannelFactoryHelper(ChannelFactory channelFactory) { - this.channelFactory = channelFactory; - this.inOrder = inOrder(channelFactory); - } - - public void waitForCall(Node node) { - waitForCalls(node, 1); - } - - /** - * Waits for a given number of calls to {@code ChannelFactory.connect()}. - * - *

Because we test asynchronous, non-blocking code, there might already be more calls than - * expected when this method is called. If so, the extra calls are stored and stored and will be - * taken into account next time. - */ - public void waitForCalls(Node node, int expected) { - int fromLastTime = previous.getOrDefault(node, 0); - if (fromLastTime >= expected) { - previous.put(node, fromLastTime - expected); - return; - } - expected -= fromLastTime; - - // Because we test asynchronous, non-blocking code, there might have been already more - // invocations than expected. Use `atLeast` and a captor to find out. - ArgumentCaptor optionsCaptor = - ArgumentCaptor.forClass(DriverChannelOptions.class); - inOrder - .verify(channelFactory, timeout(CONNECT_TIMEOUT_MILLIS).atLeast(expected)) - .connect(eq(node), optionsCaptor.capture()); - int actual = optionsCaptor.getAllValues().size(); - - int extras = actual - expected; - if (extras > 0) { - previous.compute(node, (k, v) -> (v == null) ? extras : v + extras); - } - } - - public void verifyNoMoreCalls() { - inOrder - .verify(channelFactory, timeout(CONNECT_TIMEOUT_MILLIS).times(0)) - .connect(any(Node.class), any(DriverChannelOptions.class)); - - Set counts = Sets.newHashSet(previous.values()); - if (!counts.isEmpty()) { - assertThat(counts).containsExactly(0); - } - } - - public static class Builder { - private final ChannelFactory channelFactory; - private final ListMultimap invocations = - MultimapBuilder.hashKeys().arrayListValues().build(); - - public Builder(ChannelFactory channelFactory) { - assertThat(MockUtil.isMock(channelFactory)).as("expected a mock").isTrue(); - verifyZeroInteractions(channelFactory); - this.channelFactory = channelFactory; - } - - public Builder success(Node node, DriverChannel channel) { - invocations.put(node, channel); - return this; - } - - public Builder failure(Node node, String error) { - invocations.put(node, new Exception(error)); - return this; - } - - public Builder failure(Node node, Throwable error) { - invocations.put(node, error); - return this; - } - - public Builder pending(Node node, CompletableFuture future) { - invocations.put(node, future); - return this; - } - - public MockChannelFactoryHelper build() { - stub(); - return new MockChannelFactoryHelper(channelFactory); - } - - private void stub() { - for (Node node : invocations.keySet()) { - Deque> results = new ArrayDeque<>(); - for (Object object : invocations.get(node)) { - if (object instanceof DriverChannel) { - results.add(CompletableFuture.completedFuture(((DriverChannel) object))); - } else if (object instanceof Throwable) { - results.add(CompletableFutures.failedFuture(((Throwable) object))); - } else if (object instanceof CompletableFuture) { - @SuppressWarnings("unchecked") - CompletionStage future = (CompletionStage) object; - results.add(future); - } else { - fail("unexpected type: " + object.getClass()); - } - } - if (results.size() > 0) { - CompletionStage first = results.poll(); - OngoingStubbing> ongoingStubbing = - when(channelFactory.connect(eq(node), any(DriverChannelOptions.class))) - .thenReturn(first); - for (CompletionStage result : results) { - ongoingStubbing.thenReturn(result); - } - } - } - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/MockResponseCallback.java b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/MockResponseCallback.java deleted file mode 100644 index 8774ee3e298..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/MockResponseCallback.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import com.datastax.oss.protocol.internal.Frame; -import java.util.ArrayDeque; -import java.util.Queue; -import java.util.function.Predicate; - -class MockResponseCallback implements ResponseCallback { - private final Queue responses = new ArrayDeque<>(); - private final Predicate isLastResponse; - - volatile int streamId = -1; - - MockResponseCallback() { - this(f -> true); - } - - MockResponseCallback(Predicate isLastResponse) { - this.isLastResponse = isLastResponse; - } - - @Override - public void onResponse(Frame responseFrame) { - responses.offer(responseFrame); - } - - @Override - public void onFailure(Throwable error) { - responses.offer(error); - } - - @Override - public boolean isLastResponse(Frame responseFrame) { - return isLastResponse.test(responseFrame); - } - - @Override - public void onStreamIdAssigned(int streamId) { - this.streamId = streamId; - } - - Frame getLastResponse() { - return (Frame) responses.poll(); - } - - Throwable getFailure() { - return (Throwable) responses.poll(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ProtocolInitHandlerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ProtocolInitHandlerTest.java deleted file mode 100644 index 2fd12fc9f94..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ProtocolInitHandlerTest.java +++ /dev/null @@ -1,652 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import ch.qos.logback.classic.Level; -import ch.qos.logback.classic.Logger; -import ch.qos.logback.classic.spi.ILoggingEvent; -import ch.qos.logback.core.Appender; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.InvalidKeyspaceException; -import com.datastax.oss.driver.api.core.auth.AuthProvider; -import com.datastax.oss.driver.api.core.auth.AuthenticationException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.internal.core.DefaultProtocolVersionRegistry; -import com.datastax.oss.driver.internal.core.ProtocolVersionRegistry; -import com.datastax.oss.driver.internal.core.TestResponses; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.TestNodeFactory; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.protocol.internal.Frame; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.request.AuthResponse; -import com.datastax.oss.protocol.internal.request.Options; -import com.datastax.oss.protocol.internal.request.Query; -import com.datastax.oss.protocol.internal.request.Register; -import com.datastax.oss.protocol.internal.request.Startup; -import com.datastax.oss.protocol.internal.response.AuthChallenge; -import com.datastax.oss.protocol.internal.response.AuthSuccess; -import com.datastax.oss.protocol.internal.response.Authenticate; -import com.datastax.oss.protocol.internal.response.Error; -import com.datastax.oss.protocol.internal.response.Ready; -import com.datastax.oss.protocol.internal.response.result.SetKeyspace; -import com.datastax.oss.protocol.internal.util.Bytes; -import io.netty.channel.ChannelFuture; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.time.Duration; -import java.util.ConcurrentModificationException; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.concurrent.TimeUnit; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; -import org.slf4j.LoggerFactory; - -public class ProtocolInitHandlerTest extends ChannelHandlerTestBase { - - private static final long QUERY_TIMEOUT_MILLIS = 100L; - // The handled only uses this to call the auth provider and for exception messages, so the actual - // value doesn't matter: - private static final EndPoint END_POINT = TestNodeFactory.newEndPoint(1); - - @Mock private InternalDriverContext internalDriverContext; - @Mock private DriverConfig driverConfig; - @Mock private DriverExecutionProfile defaultProfile; - @Mock private Appender appender; - - private ProtocolVersionRegistry protocolVersionRegistry = - new DefaultProtocolVersionRegistry("test"); - private HeartbeatHandler heartbeatHandler; - - @Before - @Override - public void setup() { - super.setup(); - MockitoAnnotations.initMocks(this); - when(internalDriverContext.getConfig()).thenReturn(driverConfig); - when(driverConfig.getDefaultProfile()).thenReturn(defaultProfile); - when(defaultProfile.getDuration(DefaultDriverOption.CONNECTION_INIT_QUERY_TIMEOUT)) - .thenReturn(Duration.ofMillis(QUERY_TIMEOUT_MILLIS)); - when(defaultProfile.getDuration(DefaultDriverOption.HEARTBEAT_INTERVAL)) - .thenReturn(Duration.ofSeconds(30)); - when(internalDriverContext.getProtocolVersionRegistry()).thenReturn(protocolVersionRegistry); - - channel - .pipeline() - .addLast( - ChannelFactory.INFLIGHT_HANDLER_NAME, - new InFlightHandler( - DefaultProtocolVersion.V4, - new StreamIdGenerator(100), - Integer.MAX_VALUE, - 100, - channel.newPromise(), - null, - "test")); - - heartbeatHandler = new HeartbeatHandler(defaultProfile); - } - - @Test - public void should_initialize() { - channel - .pipeline() - .addLast( - ChannelFactory.INIT_HANDLER_NAME, - new ProtocolInitHandler( - internalDriverContext, - DefaultProtocolVersion.V4, - null, - END_POINT, - DriverChannelOptions.DEFAULT, - heartbeatHandler, - false)); - - ChannelFuture connectFuture = channel.connect(new InetSocketAddress("localhost", 9042)); - - // It should send a STARTUP message - Frame requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Startup.class); - assertThat(connectFuture).isNotDone(); - - // Simulate a READY response - writeInboundFrame(buildInboundFrame(requestFrame, new Ready())); - - // Simulate the cluster name check - requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Query.class); - writeInboundFrame(requestFrame, TestResponses.clusterNameResponse("someClusterName")); - - // Init should complete - assertThat(connectFuture).isSuccess(); - } - - @Test - public void should_query_supported_options() { - channel - .pipeline() - .addLast( - ChannelFactory.INIT_HANDLER_NAME, - new ProtocolInitHandler( - internalDriverContext, - DefaultProtocolVersion.V4, - null, - END_POINT, - DriverChannelOptions.DEFAULT, - heartbeatHandler, - true)); - - ChannelFuture connectFuture = channel.connect(new InetSocketAddress("localhost", 9042)); - - // It should send an OPTIONS message - Frame requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Options.class); - assertThat(connectFuture).isNotDone(); - - // Simulate the SUPPORTED response - writeInboundFrame(requestFrame, TestResponses.supportedResponse("mock_key", "mock_value")); - - Map> supportedOptions = channel.attr(DriverChannel.OPTIONS_KEY).get(); - assertThat(supportedOptions).containsKey("mock_key"); - assertThat(supportedOptions.get("mock_key")).containsOnly("mock_value"); - - // It should send a STARTUP message - requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Startup.class); - assertThat(connectFuture).isNotDone(); - - // Simulate a READY response - writeInboundFrame(buildInboundFrame(requestFrame, new Ready())); - - // Simulate the cluster name check - requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Query.class); - writeInboundFrame(requestFrame, TestResponses.clusterNameResponse("someClusterName")); - - // Init should complete - assertThat(connectFuture).isSuccess(); - } - - @Test - public void should_add_heartbeat_handler_to_pipeline_on_success() { - ProtocolInitHandler protocolInitHandler = - new ProtocolInitHandler( - internalDriverContext, - DefaultProtocolVersion.V4, - null, - END_POINT, - DriverChannelOptions.DEFAULT, - heartbeatHandler, - false); - - channel.pipeline().addLast(ChannelFactory.INIT_HANDLER_NAME, protocolInitHandler); - - ChannelFuture connectFuture = channel.connect(new InetSocketAddress("localhost", 9042)); - - // heartbeat should initially not be in pipeline - assertThat(channel.pipeline().get(ChannelFactory.HEARTBEAT_HANDLER_NAME)).isNull(); - - // It should send a STARTUP message - Frame requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Startup.class); - assertThat(connectFuture).isNotDone(); - - // Simulate a READY response - writeInboundFrame(buildInboundFrame(requestFrame, new Ready())); - - // Simulate the cluster name check - requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Query.class); - writeInboundFrame(requestFrame, TestResponses.clusterNameResponse("someClusterName")); - - // Init should complete - assertThat(connectFuture).isSuccess(); - - // should have added heartbeat handler to pipeline. - assertThat(channel.pipeline().get(ChannelFactory.HEARTBEAT_HANDLER_NAME)) - .isEqualTo(heartbeatHandler); - // should have removed itself from pipeline. - assertThat(channel.pipeline().last()).isNotEqualTo(protocolInitHandler); - } - - @Test - public void should_fail_to_initialize_if_init_query_times_out() throws InterruptedException { - channel - .pipeline() - .addLast( - ChannelFactory.INIT_HANDLER_NAME, - new ProtocolInitHandler( - internalDriverContext, - DefaultProtocolVersion.V4, - null, - END_POINT, - DriverChannelOptions.DEFAULT, - heartbeatHandler, - false)); - - ChannelFuture connectFuture = channel.connect(new InetSocketAddress("localhost", 9042)); - - readOutboundFrame(); - - // Simulate a pause longer than the timeout - TimeUnit.MILLISECONDS.sleep(QUERY_TIMEOUT_MILLIS * 2); - channel.runPendingTasks(); - - assertThat(connectFuture).isFailed(); - } - - @Test - public void should_initialize_with_authentication() { - channel - .pipeline() - .addLast( - ChannelFactory.INIT_HANDLER_NAME, - new ProtocolInitHandler( - internalDriverContext, - DefaultProtocolVersion.V4, - null, - END_POINT, - DriverChannelOptions.DEFAULT, - heartbeatHandler, - false)); - - String serverAuthenticator = "mockServerAuthenticator"; - AuthProvider authProvider = mock(AuthProvider.class); - MockAuthenticator authenticator = new MockAuthenticator(); - when(authProvider.newAuthenticator(END_POINT, serverAuthenticator)).thenReturn(authenticator); - when(internalDriverContext.getAuthProvider()).thenReturn(Optional.of(authProvider)); - - ChannelFuture connectFuture = channel.connect(new InetSocketAddress("localhost", 9042)); - - Frame requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Startup.class); - assertThat(connectFuture).isNotDone(); - - // Simulate a response that says that the server requires authentication - writeInboundFrame(requestFrame, new Authenticate(serverAuthenticator)); - - // The connection should have created an authenticator from the auth provider - verify(authProvider).newAuthenticator(END_POINT, serverAuthenticator); - - // And sent an auth response - requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(AuthResponse.class); - AuthResponse authResponse = (AuthResponse) requestFrame.message; - assertThat(Bytes.toHexString(authResponse.token)).isEqualTo(MockAuthenticator.INITIAL_RESPONSE); - assertThat(connectFuture).isNotDone(); - - // As long as the server sends an auth challenge, the client should reply with another - // auth_response - String mockToken = "0xabcd"; - for (int i = 0; i < 5; i++) { - writeInboundFrame(requestFrame, new AuthChallenge(Bytes.fromHexString(mockToken))); - - requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(AuthResponse.class); - authResponse = (AuthResponse) requestFrame.message; - // Our mock impl happens to send back the same token - assertThat(Bytes.toHexString(authResponse.token)).isEqualTo(mockToken); - assertThat(connectFuture).isNotDone(); - } - - // When the server finally sends back a success message, should proceed to the cluster name - // check and succeed - writeInboundFrame(requestFrame, new AuthSuccess(Bytes.fromHexString(mockToken))); - assertThat(authenticator.successToken).isEqualTo(mockToken); - - requestFrame = readOutboundFrame(); - writeInboundFrame(requestFrame, TestResponses.clusterNameResponse("someClusterName")); - - assertThat(connectFuture).isSuccess(); - } - - @Test - public void should_invoke_auth_provider_when_server_does_not_send_challenge() { - channel - .pipeline() - .addLast( - ChannelFactory.INIT_HANDLER_NAME, - new ProtocolInitHandler( - internalDriverContext, - DefaultProtocolVersion.V4, - null, - END_POINT, - DriverChannelOptions.DEFAULT, - heartbeatHandler, - false)); - - AuthProvider authProvider = mock(AuthProvider.class); - when(internalDriverContext.getAuthProvider()).thenReturn(Optional.of(authProvider)); - - ChannelFuture connectFuture = channel.connect(new InetSocketAddress("localhost", 9042)); - - Frame requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Startup.class); - - // Simulate a READY response, the provider should be notified - writeInboundFrame(buildInboundFrame(requestFrame, new Ready())); - verify(authProvider).onMissingChallenge(END_POINT); - - // Since our mock does nothing, init should proceed normally - requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Query.class); - writeInboundFrame(requestFrame, TestResponses.clusterNameResponse("someClusterName")); - assertThat(connectFuture).isSuccess(); - } - - @Test - public void should_fail_to_initialize_if_server_sends_auth_error() throws Throwable { - channel - .pipeline() - .addLast( - ChannelFactory.INIT_HANDLER_NAME, - new ProtocolInitHandler( - internalDriverContext, - DefaultProtocolVersion.V4, - null, - END_POINT, - DriverChannelOptions.DEFAULT, - heartbeatHandler, - false)); - - String serverAuthenticator = "mockServerAuthenticator"; - AuthProvider authProvider = mock(AuthProvider.class); - MockAuthenticator authenticator = new MockAuthenticator(); - when(authProvider.newAuthenticator(END_POINT, serverAuthenticator)).thenReturn(authenticator); - when(internalDriverContext.getAuthProvider()).thenReturn(Optional.of(authProvider)); - - ChannelFuture connectFuture = channel.connect(new InetSocketAddress("localhost", 9042)); - - Frame requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Startup.class); - assertThat(connectFuture).isNotDone(); - - writeInboundFrame(requestFrame, new Authenticate("mockServerAuthenticator")); - - requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(AuthResponse.class); - assertThat(connectFuture).isNotDone(); - - writeInboundFrame( - requestFrame, new Error(ProtocolConstants.ErrorCode.AUTH_ERROR, "mock error")); - - assertThat(connectFuture) - .isFailed( - e -> - assertThat(e) - .isInstanceOf(AuthenticationException.class) - .hasMessage( - String.format( - "Authentication error on node %s: server replied with 'mock error' to AuthResponse request", - END_POINT))); - } - - @Test - public void should_check_cluster_name_if_provided() { - channel - .pipeline() - .addLast( - ChannelFactory.INIT_HANDLER_NAME, - new ProtocolInitHandler( - internalDriverContext, - DefaultProtocolVersion.V4, - "expectedClusterName", - END_POINT, - DriverChannelOptions.DEFAULT, - heartbeatHandler, - false)); - - ChannelFuture connectFuture = channel.connect(new InetSocketAddress("localhost", 9042)); - - Frame requestFrame = readOutboundFrame(); - writeInboundFrame(requestFrame, new Ready()); - - requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Query.class); - Query query = (Query) requestFrame.message; - assertThat(query.query).isEqualTo("SELECT cluster_name FROM system.local"); - assertThat(connectFuture).isNotDone(); - - writeInboundFrame(requestFrame, TestResponses.clusterNameResponse("expectedClusterName")); - - assertThat(connectFuture).isSuccess(); - } - - @Test - public void should_fail_to_initialize_if_cluster_name_does_not_match() throws Throwable { - channel - .pipeline() - .addLast( - ChannelFactory.INIT_HANDLER_NAME, - new ProtocolInitHandler( - internalDriverContext, - DefaultProtocolVersion.V4, - "expectedClusterName", - END_POINT, - DriverChannelOptions.DEFAULT, - heartbeatHandler, - false)); - - ChannelFuture connectFuture = channel.connect(new InetSocketAddress("localhost", 9042)); - - writeInboundFrame(readOutboundFrame(), new Ready()); - writeInboundFrame( - readOutboundFrame(), TestResponses.clusterNameResponse("differentClusterName")); - - assertThat(connectFuture) - .isFailed( - e -> - assertThat(e) - .isInstanceOf(ClusterNameMismatchException.class) - .hasMessageContaining( - String.format( - "Node %s reports cluster name 'differentClusterName' that doesn't match our cluster name 'expectedClusterName'.", - END_POINT))); - } - - @Test - public void should_initialize_with_keyspace() { - DriverChannelOptions options = - DriverChannelOptions.builder().withKeyspace(CqlIdentifier.fromCql("ks")).build(); - channel - .pipeline() - .addLast( - ChannelFactory.INIT_HANDLER_NAME, - new ProtocolInitHandler( - internalDriverContext, - DefaultProtocolVersion.V4, - null, - END_POINT, - options, - heartbeatHandler, - false)); - - ChannelFuture connectFuture = channel.connect(new InetSocketAddress("localhost", 9042)); - - writeInboundFrame(readOutboundFrame(), new Ready()); - writeInboundFrame(readOutboundFrame(), TestResponses.clusterNameResponse("someClusterName")); - - Frame requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Query.class); - assertThat(((Query) requestFrame.message).query).isEqualTo("USE \"ks\""); - writeInboundFrame(requestFrame, new SetKeyspace("ks")); - - assertThat(connectFuture).isSuccess(); - } - - @Test - public void should_initialize_with_events() { - List eventTypes = ImmutableList.of("foo", "bar"); - EventCallback eventCallback = mock(EventCallback.class); - DriverChannelOptions driverChannelOptions = - DriverChannelOptions.builder().withEvents(eventTypes, eventCallback).build(); - channel - .pipeline() - .addLast( - ChannelFactory.INIT_HANDLER_NAME, - new ProtocolInitHandler( - internalDriverContext, - DefaultProtocolVersion.V4, - null, - END_POINT, - driverChannelOptions, - heartbeatHandler, - false)); - - ChannelFuture connectFuture = channel.connect(new InetSocketAddress("localhost", 9042)); - - writeInboundFrame(readOutboundFrame(), new Ready()); - writeInboundFrame(readOutboundFrame(), TestResponses.clusterNameResponse("someClusterName")); - - Frame requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Register.class); - assertThat(((Register) requestFrame.message).eventTypes).containsExactly("foo", "bar"); - writeInboundFrame(requestFrame, new Ready()); - - assertThat(connectFuture).isSuccess(); - } - - @Test - public void should_initialize_with_keyspace_and_events() { - List eventTypes = ImmutableList.of("foo", "bar"); - EventCallback eventCallback = mock(EventCallback.class); - DriverChannelOptions driverChannelOptions = - DriverChannelOptions.builder() - .withKeyspace(CqlIdentifier.fromCql("ks")) - .withEvents(eventTypes, eventCallback) - .build(); - channel - .pipeline() - .addLast( - ChannelFactory.INIT_HANDLER_NAME, - new ProtocolInitHandler( - internalDriverContext, - DefaultProtocolVersion.V4, - null, - END_POINT, - driverChannelOptions, - heartbeatHandler, - false)); - - ChannelFuture connectFuture = channel.connect(new InetSocketAddress("localhost", 9042)); - - writeInboundFrame(readOutboundFrame(), new Ready()); - writeInboundFrame(readOutboundFrame(), TestResponses.clusterNameResponse("someClusterName")); - - Frame requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Query.class); - assertThat(((Query) requestFrame.message).query).isEqualTo("USE \"ks\""); - writeInboundFrame(requestFrame, new SetKeyspace("ks")); - - requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Register.class); - assertThat(((Register) requestFrame.message).eventTypes).containsExactly("foo", "bar"); - writeInboundFrame(requestFrame, new Ready()); - - assertThat(connectFuture).isSuccess(); - } - - @Test - public void should_fail_to_initialize_if_keyspace_is_invalid() { - DriverChannelOptions driverChannelOptions = - DriverChannelOptions.builder().withKeyspace(CqlIdentifier.fromCql("ks")).build(); - channel - .pipeline() - .addLast( - ChannelFactory.INIT_HANDLER_NAME, - new ProtocolInitHandler( - internalDriverContext, - DefaultProtocolVersion.V4, - null, - END_POINT, - driverChannelOptions, - heartbeatHandler, - false)); - - ChannelFuture connectFuture = channel.connect(new InetSocketAddress("localhost", 9042)); - - writeInboundFrame(readOutboundFrame(), new Ready()); - writeInboundFrame(readOutboundFrame(), TestResponses.clusterNameResponse("someClusterName")); - - Frame requestFrame = readOutboundFrame(); - assertThat(requestFrame.message).isInstanceOf(Query.class); - assertThat(((Query) requestFrame.message).query).isEqualTo("USE \"ks\""); - writeInboundFrame( - requestFrame, new Error(ProtocolConstants.ErrorCode.INVALID, "invalid keyspace")); - - assertThat(connectFuture) - .isFailed( - error -> - assertThat(error) - .isInstanceOf(InvalidKeyspaceException.class) - .hasMessage("invalid keyspace")); - } - - /** - * This covers a corner case where {@code abortAllInFlight} was recursing into itself, causing a - * {@link ConcurrentModificationException}. This was recoverable but caused Netty to generate a - * warning log. - * - * @see JAVA-2838 - */ - @Test - public void should_fail_pending_requests_only_once_if_init_fails() { - Logger logger = - (Logger) LoggerFactory.getLogger("io.netty.channel.AbstractChannelHandlerContext"); - Level levelBefore = logger.getLevel(); - logger.setLevel(Level.WARN); - logger.addAppender(appender); - - channel - .pipeline() - .addLast( - "init", - new ProtocolInitHandler( - internalDriverContext, - DefaultProtocolVersion.V4, - null, - END_POINT, - DriverChannelOptions.DEFAULT, - heartbeatHandler, - false)); - - ChannelFuture connectFuture = channel.connect(new InetSocketAddress("localhost", 9042)); - channel.pipeline().fireExceptionCaught(new IOException("Mock I/O exception")); - assertThat(connectFuture).isFailed(); - - verify(appender, never()).doAppend(any(ILoggingEvent.class)); - - logger.detachAppender(appender); - logger.setLevel(levelBefore); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/StreamIdGeneratorTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/StreamIdGeneratorTest.java deleted file mode 100644 index 83802884c45..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/StreamIdGeneratorTest.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.channel; - -import static com.datastax.oss.driver.Assertions.assertThat; - -import org.junit.Test; - -public class StreamIdGeneratorTest { - @Test - public void should_have_all_available_upon_creation() { - StreamIdGenerator generator = new StreamIdGenerator(8); - assertThat(generator.getAvailableIds()).isEqualTo(8); - } - - @Test - public void should_return_available_ids_in_sequence() { - StreamIdGenerator generator = new StreamIdGenerator(8); - for (int i = 0; i < 8; i++) { - assertThat(generator.preAcquire()).isTrue(); - assertThat(generator.acquire()).isEqualTo(i); - assertThat(generator.getAvailableIds()).isEqualTo(7 - i); - } - } - - @Test - public void should_return_minus_one_when_no_id_available() { - StreamIdGenerator generator = new StreamIdGenerator(8); - for (int i = 0; i < 8; i++) { - assertThat(generator.preAcquire()).isTrue(); - // also validating that ids are held as soon as preAcquire() is called, even if acquire() has - // not been invoked yet - } - assertThat(generator.getAvailableIds()).isEqualTo(0); - assertThat(generator.preAcquire()).isFalse(); - } - - @Test - public void should_return_previously_released_ids() { - StreamIdGenerator generator = new StreamIdGenerator(8); - for (int i = 0; i < 8; i++) { - assertThat(generator.preAcquire()).isTrue(); - assertThat(generator.acquire()).isEqualTo(i); - } - generator.release(7); - generator.release(2); - assertThat(generator.getAvailableIds()).isEqualTo(2); - assertThat(generator.preAcquire()).isTrue(); - assertThat(generator.acquire()).isEqualTo(2); - assertThat(generator.preAcquire()).isTrue(); - assertThat(generator.acquire()).isEqualTo(7); - assertThat(generator.preAcquire()).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/config/MockOptions.java b/core/src/test/java/com/datastax/oss/driver/internal/core/config/MockOptions.java deleted file mode 100644 index cee57abbfdf..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/config/MockOptions.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config; - -import com.datastax.oss.driver.api.core.config.DriverOption; -import edu.umd.cs.findbugs.annotations.NonNull; - -public enum MockOptions implements DriverOption { - INT1("int1"), - INT2("int2"), - AUTH_PROVIDER("auth_provider"), - SUBNET_ADDRESSES("subnet_addresses"), - ; - - private final String path; - - MockOptions(String path) { - this.path = path; - } - - @NonNull - @Override - public String getPath() { - return path; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/config/MockTypedOptions.java b/core/src/test/java/com/datastax/oss/driver/internal/core/config/MockTypedOptions.java deleted file mode 100644 index ecad298aa37..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/config/MockTypedOptions.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config; - -import com.datastax.oss.driver.api.core.config.TypedDriverOption; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; - -public class MockTypedOptions { - public static final TypedDriverOption INT1 = - new TypedDriverOption<>(MockOptions.INT1, GenericType.INTEGER); - public static final TypedDriverOption INT2 = - new TypedDriverOption<>(MockOptions.INT2, GenericType.INTEGER); -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/config/cloud/CloudConfigFactoryTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/config/cloud/CloudConfigFactoryTest.java deleted file mode 100644 index a0db82d298e..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/config/cloud/CloudConfigFactoryTest.java +++ /dev/null @@ -1,235 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config.cloud; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; -import static com.github.tomakehurst.wiremock.client.WireMock.any; -import static com.github.tomakehurst.wiremock.client.WireMock.stubFor; -import static com.github.tomakehurst.wiremock.client.WireMock.urlEqualTo; -import static com.github.tomakehurst.wiremock.client.WireMock.urlPathEqualTo; -import static com.github.tomakehurst.wiremock.core.WireMockConfiguration.wireMockConfig; -import static org.assertj.core.api.Assertions.catchThrowable; - -import com.datastax.oss.driver.internal.core.ssl.SniSslEngineFactory; -import com.fasterxml.jackson.core.JsonParseException; -import com.github.tomakehurst.wiremock.common.JettySettings; -import com.github.tomakehurst.wiremock.core.Options; -import com.github.tomakehurst.wiremock.http.AdminRequestHandler; -import com.github.tomakehurst.wiremock.http.HttpServer; -import com.github.tomakehurst.wiremock.http.HttpServerFactory; -import com.github.tomakehurst.wiremock.http.StubRequestHandler; -import com.github.tomakehurst.wiremock.jetty9.JettyHttpServer; -import com.github.tomakehurst.wiremock.junit.WireMockRule; -import com.google.common.base.Joiner; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.net.URISyntaxException; -import java.net.URL; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import org.eclipse.jetty.io.NetworkTrafficListener; -import org.eclipse.jetty.server.ConnectionFactory; -import org.eclipse.jetty.server.ServerConnector; -import org.eclipse.jetty.server.SslConnectionFactory; -import org.eclipse.jetty.util.ssl.SslContextFactory; -import org.junit.Rule; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class CloudConfigFactoryTest { - - private static final String BUNDLE_PATH = "/config/cloud/creds.zip"; - - @Rule - public WireMockRule wireMockRule = - new WireMockRule( - wireMockConfig() - .httpsPort(30443) - .dynamicPort() - .httpServerFactory(new HttpsServerFactory()) - .needClientAuth(true) - .keystorePath(path("/config/cloud/identity.jks").toString()) - .keystorePassword("fakePasswordForTests") - .trustStorePath(path("/config/cloud/trustStore.jks").toString()) - .trustStorePassword("fakePasswordForTests2")); - - public CloudConfigFactoryTest() throws URISyntaxException {} - - @Test - public void should_load_config_from_local_filesystem() throws Exception { - // given - URL configFile = getClass().getResource(BUNDLE_PATH); - mockProxyMetadataService(jsonMetadata()); - // when - CloudConfigFactory cloudConfigFactory = new CloudConfigFactory(); - CloudConfig cloudConfig = cloudConfigFactory.createCloudConfig(configFile); - // then - assertCloudConfig(cloudConfig); - } - - @Test - public void should_load_config_from_external_location() throws Exception { - // given - mockHttpSecureBundle(secureBundle()); - mockProxyMetadataService(jsonMetadata()); - // when - URL configFile = new URL("http", "localhost", wireMockRule.port(), BUNDLE_PATH); - CloudConfigFactory cloudConfigFactory = new CloudConfigFactory(); - CloudConfig cloudConfig = cloudConfigFactory.createCloudConfig(configFile); - // then - assertCloudConfig(cloudConfig); - } - - @Test - public void should_throw_when_bundle_not_found() throws Exception { - // given - stubFor(any(urlEqualTo(BUNDLE_PATH)).willReturn(aResponse().withStatus(404))); - // when - URL configFile = new URL("http", "localhost", wireMockRule.port(), BUNDLE_PATH); - CloudConfigFactory cloudConfigFactory = new CloudConfigFactory(); - Throwable t = catchThrowable(() -> cloudConfigFactory.createCloudConfig(configFile)); - assertThat(t) - .isInstanceOf(FileNotFoundException.class) - .hasMessageContaining(configFile.toExternalForm()); - } - - @Test - public void should_throw_when_bundle_not_readable() throws Exception { - // given - mockHttpSecureBundle("not a zip file".getBytes(StandardCharsets.UTF_8)); - // when - URL configFile = new URL("http", "localhost", wireMockRule.port(), BUNDLE_PATH); - CloudConfigFactory cloudConfigFactory = new CloudConfigFactory(); - Throwable t = catchThrowable(() -> cloudConfigFactory.createCloudConfig(configFile)); - assertThat(t) - .isInstanceOf(IllegalStateException.class) - .hasMessageContaining("Invalid bundle: missing file config.json"); - } - - @Test - public void should_throw_when_metadata_not_found() throws Exception { - // given - mockHttpSecureBundle(secureBundle()); - stubFor(any(urlPathEqualTo("/metadata")).willReturn(aResponse().withStatus(404))); - // when - URL configFile = new URL("http", "localhost", wireMockRule.port(), BUNDLE_PATH); - CloudConfigFactory cloudConfigFactory = new CloudConfigFactory(); - Throwable t = catchThrowable(() -> cloudConfigFactory.createCloudConfig(configFile)); - assertThat(t).isInstanceOf(FileNotFoundException.class).hasMessageContaining("metadata"); - } - - @Test - public void should_throw_when_metadata_not_readable() throws Exception { - // given - mockHttpSecureBundle(secureBundle()); - mockProxyMetadataService("not a valid json payload"); - // when - URL configFile = new URL("http", "localhost", wireMockRule.port(), BUNDLE_PATH); - CloudConfigFactory cloudConfigFactory = new CloudConfigFactory(); - Throwable t = catchThrowable(() -> cloudConfigFactory.createCloudConfig(configFile)); - assertThat(t).isInstanceOf(JsonParseException.class).hasMessageContaining("Unrecognized token"); - } - - private void mockHttpSecureBundle(byte[] body) { - stubFor( - any(urlEqualTo(BUNDLE_PATH)) - .willReturn( - aResponse() - .withStatus(200) - .withHeader("Content-Type", "application/octet-stream") - .withBody(body))); - } - - private void mockProxyMetadataService(String jsonMetadata) { - stubFor( - any(urlPathEqualTo("/metadata")) - .willReturn( - aResponse() - .withStatus(200) - .withHeader("Content-Type", "application/json") - .withBody(jsonMetadata))); - } - - private byte[] secureBundle() throws IOException, URISyntaxException { - return Files.readAllBytes(path(BUNDLE_PATH)); - } - - private String jsonMetadata() throws IOException, URISyntaxException { - return Joiner.on('\n') - .join(Files.readAllLines(path("/config/cloud/metadata.json"), StandardCharsets.UTF_8)); - } - - private Path path(String resource) throws URISyntaxException { - return Paths.get(getClass().getResource(resource).toURI()); - } - - private void assertCloudConfig(CloudConfig config) { - InetSocketAddress expectedProxyAddress = InetSocketAddress.createUnresolved("localhost", 30002); - assertThat(config.getLocalDatacenter()).isEqualTo("dc1"); - assertThat(config.getProxyAddress()).isEqualTo(expectedProxyAddress); - assertThat(config.getEndPoints()).extracting("proxyAddress").containsOnly(expectedProxyAddress); - assertThat(config.getEndPoints()) - .extracting("serverName") - .containsExactly( - "4ac06655-f861-49f9-881e-3fee22e69b94", - "2af7c253-3394-4a0d-bfac-f1ad81b5154d", - "b17b6e2a-3f48-4d6a-81c1-20a0a1f3192a"); - assertThat(config.getSslEngineFactory()).isNotNull().isInstanceOf(SniSslEngineFactory.class); - } - - static { - javax.net.ssl.HttpsURLConnection.setDefaultHostnameVerifier( - (hostname, sslSession) -> hostname.equals("localhost")); - } - - // see https://github.com/tomakehurst/wiremock/issues/874 - private static class HttpsServerFactory implements HttpServerFactory { - @Override - public HttpServer buildHttpServer( - Options options, - AdminRequestHandler adminRequestHandler, - StubRequestHandler stubRequestHandler) { - return new JettyHttpServer(options, adminRequestHandler, stubRequestHandler) { - @Override - protected ServerConnector createServerConnector( - String bindAddress, - JettySettings jettySettings, - int port, - NetworkTrafficListener listener, - ConnectionFactory... connectionFactories) { - if (port == options.httpsSettings().port()) { - SslConnectionFactory sslConnectionFactory = - (SslConnectionFactory) connectionFactories[0]; - SslContextFactory sslContextFactory = sslConnectionFactory.getSslContextFactory(); - sslContextFactory.setKeyStorePassword(options.httpsSettings().keyStorePassword()); - connectionFactories = - new ConnectionFactory[] {sslConnectionFactory, connectionFactories[1]}; - } - return super.createServerConnector( - bindAddress, jettySettings, port, listener, connectionFactories); - } - }; - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/config/composite/CompositeDriverConfigReloadTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/config/composite/CompositeDriverConfigReloadTest.java deleted file mode 100644 index 1d327a08101..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/config/composite/CompositeDriverConfigReloadTest.java +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config.composite; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.assertj.core.api.Assertions.catchThrowable; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -@RunWith(DataProviderRunner.class) -public class CompositeDriverConfigReloadTest { - - @Mock private DriverConfigLoader primaryLoader; - @Mock private DriverConfigLoader fallbackLoader; - private DriverConfigLoader compositeLoader; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - compositeLoader = DriverConfigLoader.compose(primaryLoader, fallbackLoader); - } - - @Test - @UseDataProvider("reloadabilities") - public void should_be_reloadable_if_either_child_is( - boolean primaryIsReloadable, - boolean fallbackIsReloadable, - boolean compositeShouldBeReloadable) { - when(primaryLoader.supportsReloading()).thenReturn(primaryIsReloadable); - when(fallbackLoader.supportsReloading()).thenReturn(fallbackIsReloadable); - assertThat(compositeLoader.supportsReloading()).isEqualTo(compositeShouldBeReloadable); - } - - @Test - @UseDataProvider("reloadabilities") - public void should_delegate_reloading_to_reloadable_children( - boolean primaryIsReloadable, - boolean fallbackIsReloadable, - boolean compositeShouldBeReloadable) { - when(primaryLoader.supportsReloading()).thenReturn(primaryIsReloadable); - when(primaryLoader.reload()) - .thenReturn( - primaryIsReloadable - ? CompletableFuture.completedFuture(true) - : CompletableFutures.failedFuture(new UnsupportedOperationException())); - - when(fallbackLoader.supportsReloading()).thenReturn(fallbackIsReloadable); - when(fallbackLoader.reload()) - .thenReturn( - fallbackIsReloadable - ? CompletableFuture.completedFuture(true) - : CompletableFutures.failedFuture(new UnsupportedOperationException())); - - CompletionStage reloadFuture = compositeLoader.reload(); - - if (compositeShouldBeReloadable) { - assertThat(reloadFuture).isCompletedWithValue(true); - } else { - assertThat(reloadFuture).isCompletedExceptionally(); - Throwable t = catchThrowable(() -> reloadFuture.toCompletableFuture().get()); - assertThat(t).hasRootCauseInstanceOf(UnsupportedOperationException.class); - } - verify(primaryLoader, primaryIsReloadable ? times(1) : never()).reload(); - verify(fallbackLoader, fallbackIsReloadable ? times(1) : never()).reload(); - } - - @DataProvider - public static Object[][] reloadabilities() { - return new Object[][] { - // primaryIsReloadable, fallbackIsReloadable, compositeShouldBeReloadable - new Object[] {true, true, true}, - new Object[] {true, false, true}, - new Object[] {false, true, true}, - new Object[] {false, false, false}, - }; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/config/composite/CompositeDriverConfigTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/config/composite/CompositeDriverConfigTest.java deleted file mode 100644 index e5d5ffcdf83..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/config/composite/CompositeDriverConfigTest.java +++ /dev/null @@ -1,147 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config.composite; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.entry; - -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.config.OptionsMap; -import com.datastax.oss.driver.api.core.config.TypedDriverOption; -import org.junit.Before; -import org.junit.Test; - -public class CompositeDriverConfigTest { - - private OptionsMap primaryMap; - private OptionsMap fallbackMap; - private DriverConfig compositeConfig; - private DriverExecutionProfile compositeDefaultProfile; - - @Before - public void setup() { - primaryMap = new OptionsMap(); - // We need at least one option so that the default profile exists. Do it now to avoid having to - // do it in every test. We use an option that we won't reuse in the tests so that there are no - // unwanted interactions. - primaryMap.put(TypedDriverOption.CONTINUOUS_PAGING_MAX_PAGES, 1); - - fallbackMap = new OptionsMap(); - fallbackMap.put(TypedDriverOption.CONTINUOUS_PAGING_MAX_PAGES, 1); - - DriverConfigLoader compositeLoader = - DriverConfigLoader.compose( - DriverConfigLoader.fromMap(primaryMap), DriverConfigLoader.fromMap(fallbackMap)); - compositeConfig = compositeLoader.getInitialConfig(); - compositeDefaultProfile = compositeConfig.getDefaultProfile(); - } - - @Test - public void should_use_value_from_primary_config() { - primaryMap.put(TypedDriverOption.CONNECTION_POOL_LOCAL_SIZE, 1); - - assertThat(compositeDefaultProfile.isDefined(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)) - .isTrue(); - assertThat(compositeDefaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)) - .isEqualTo(1); - assertThat(compositeDefaultProfile.entrySet()) - .containsExactly( - entry(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE.getPath(), 1), - entry(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES.getPath(), 1)); - } - - @Test - public void should_ignore_value_from_fallback_config_if_defined_in_both() { - primaryMap.put(TypedDriverOption.CONNECTION_POOL_LOCAL_SIZE, 1); - fallbackMap.put(TypedDriverOption.CONNECTION_POOL_LOCAL_SIZE, 2); - - assertThat(compositeDefaultProfile.isDefined(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)) - .isTrue(); - assertThat(compositeDefaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)) - .isEqualTo(1); - assertThat(compositeDefaultProfile.entrySet()) - .containsExactly( - entry(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE.getPath(), 1), - entry(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES.getPath(), 1)); - } - - @Test - public void should_use_value_from_fallback_config_if_not_defined_in_primary() { - fallbackMap.put(TypedDriverOption.CONNECTION_POOL_LOCAL_SIZE, 1); - - assertThat(compositeDefaultProfile.isDefined(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)) - .isTrue(); - assertThat(compositeDefaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)) - .isEqualTo(1); - assertThat(compositeDefaultProfile.entrySet()) - .containsExactly( - entry(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE.getPath(), 1), - entry(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES.getPath(), 1)); - } - - @Test - public void should_merge_profiles() { - primaryMap.put("onlyInPrimary", TypedDriverOption.CONNECTION_POOL_LOCAL_SIZE, 1); - primaryMap.put("inBoth", TypedDriverOption.CONNECTION_POOL_LOCAL_SIZE, 2); - fallbackMap.put("inBoth", TypedDriverOption.CONNECTION_POOL_LOCAL_SIZE, 3); - fallbackMap.put("onlyInFallback", TypedDriverOption.CONNECTION_POOL_LOCAL_SIZE, 4); - - assertThat(compositeConfig.getProfiles()) - .containsKeys( - DriverExecutionProfile.DEFAULT_NAME, - "onlyInPrimary", - "inBoth", - "inBoth", - "onlyInFallback"); - - assertThat( - compositeConfig - .getProfile("onlyInPrimary") - .getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)) - .isEqualTo(1); - assertThat( - compositeConfig - .getProfile("inBoth") - .getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)) - .isEqualTo(2); - assertThat( - compositeConfig - .getProfile("onlyInFallback") - .getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)) - .isEqualTo(4); - - assertThat(compositeConfig.getProfile("onlyInPrimary").entrySet()) - .containsExactly( - entry(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE.getPath(), 1), - entry(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES.getPath(), 1)); - - assertThat(compositeConfig.getProfile("inBoth").entrySet()) - .containsExactly( - entry(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE.getPath(), 2), - entry(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES.getPath(), 1)); - - assertThat(compositeConfig.getProfile("onlyInFallback").entrySet()) - .containsExactly( - entry(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE.getPath(), 4), - entry(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES.getPath(), 1)); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/config/map/MapBasedDriverConfigLoaderTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/config/map/MapBasedDriverConfigLoaderTest.java deleted file mode 100644 index 93f6b274826..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/config/map/MapBasedDriverConfigLoaderTest.java +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config.map; - -import static com.typesafe.config.ConfigFactory.defaultReference; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.config.DriverOption; -import com.datastax.oss.driver.api.core.config.OptionsMap; -import com.datastax.oss.driver.api.core.config.TypedDriverOption; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.config.MockOptions; -import com.datastax.oss.driver.internal.core.config.MockTypedOptions; -import com.datastax.oss.driver.internal.core.config.typesafe.DefaultDriverConfigLoader; -import com.typesafe.config.ConfigException; -import com.typesafe.config.ConfigFactory; -import java.util.Optional; -import org.junit.Test; - -public class MapBasedDriverConfigLoaderTest { - - @Test - public void should_reflect_changes_in_real_time() { - OptionsMap source = new OptionsMap(); - source.put(MockTypedOptions.INT1, 1); - - DriverConfigLoader loader = DriverConfigLoader.fromMap(source); - DriverConfig config = loader.getInitialConfig(); - assertThat(config.getDefaultProfile().getInt(MockOptions.INT1)).isEqualTo(1); - - source.put(MockTypedOptions.INT1, 2); - assertThat(config.getDefaultProfile().getInt(MockOptions.INT1)).isEqualTo(2); - } - - /** - * Checks that, if we ask to pre-fill the default profile, then we get the same set of options as - * the built-in reference.conf. - */ - @Test - public void should_fill_default_profile_like_reference_file() { - OptionsMap optionsMap = OptionsMap.driverDefaults(); - DriverExecutionProfile mapBasedConfig = - DriverConfigLoader.fromMap(optionsMap).getInitialConfig().getDefaultProfile(); - DriverExecutionProfile fileBasedConfig = - new DefaultDriverConfigLoader( - () -> { - // Only load reference.conf since we are focusing on driver defaults - ConfigFactory.invalidateCaches(); - return defaultReference().getConfig(DefaultDriverConfigLoader.DEFAULT_ROOT_PATH); - }) - .getInitialConfig() - .getDefaultProfile(); - - // Make sure we're not missing any options. -1 is for CONFIG_RELOAD_INTERVAL, which is not - // defined by OptionsMap because it is irrelevant for the map-based config. - assertThat(mapBasedConfig.entrySet()).hasSize(fileBasedConfig.entrySet().size() - 1); - - for (TypedDriverOption option : TypedDriverOption.builtInValues()) { - if (option.getRawOption() == DefaultDriverOption.CONFIG_RELOAD_INTERVAL) { - continue; - } - Optional fileBasedValue = get(fileBasedConfig, option); - Optional mapBasedValue = get(mapBasedConfig, option); - assertThat(mapBasedValue) - .as("Wrong value for %s in OptionsMap", option.getRawOption()) - .isEqualTo(fileBasedValue); - } - } - - private Optional get(DriverExecutionProfile config, TypedDriverOption typedOption) { - DriverOption option = typedOption.getRawOption(); - GenericType type = typedOption.getExpectedType(); - Object value = null; - if (config.isDefined(option)) { - // This is ugly, we have no other way than enumerating all possible types. - // This kind of bridging code between OptionsMap and DriverConfig is unlikely to exist - // anywhere outside of this test. - if (type.equals(GenericType.listOf(String.class))) { - value = config.getStringList(option); - } else if (type.equals(GenericType.STRING)) { - value = config.getString(option); - } else if (type.equals(GenericType.DURATION)) { - value = config.getDuration(option); - } else if (type.equals(GenericType.INTEGER)) { - value = config.getInt(option); - } else if (type.equals(GenericType.BOOLEAN)) { - value = config.getBoolean(option); - } else if (type.equals(GenericType.LONG)) { - try { - value = config.getLong(option); - } catch (ConfigException.WrongType e) { - value = config.getBytes(option); - } - } else if (type.equals(GenericType.mapOf(GenericType.STRING, GenericType.STRING))) { - value = config.getStringMap(option); - } else { - fail("Unexpected type " + type); - } - } - return Optional.ofNullable(value); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/config/map/MapBasedDriverConfigTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/config/map/MapBasedDriverConfigTest.java deleted file mode 100644 index 1ebd5fb48ba..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/config/map/MapBasedDriverConfigTest.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config.map; - -import static com.datastax.oss.driver.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.config.OptionsMap; -import com.datastax.oss.driver.internal.core.config.MockOptions; -import com.datastax.oss.driver.internal.core.config.MockTypedOptions; -import org.junit.Test; - -public class MapBasedDriverConfigTest { - - @Test - public void should_load_minimal_config_with_no_profiles() { - OptionsMap source = new OptionsMap(); - source.put(MockTypedOptions.INT1, 42); - DriverConfig config = DriverConfigLoader.fromMap(source).getInitialConfig(); - - assertThat(config).hasIntOption(MockOptions.INT1, 42); - } - - @Test - public void should_inherit_option_in_profile() { - OptionsMap source = new OptionsMap(); - source.put(MockTypedOptions.INT1, 42); - // need to add an unrelated option to create the profile - source.put("profile1", MockTypedOptions.INT2, 1); - DriverConfig config = DriverConfigLoader.fromMap(source).getInitialConfig(); - - assertThat(config) - .hasIntOption(MockOptions.INT1, 42) - .hasIntOption("profile1", MockOptions.INT1, 42); - } - - @Test - public void should_override_option_in_profile() { - OptionsMap source = new OptionsMap(); - source.put(MockTypedOptions.INT1, 42); - source.put("profile1", MockTypedOptions.INT1, 43); - DriverConfig config = DriverConfigLoader.fromMap(source).getInitialConfig(); - - assertThat(config) - .hasIntOption(MockOptions.INT1, 42) - .hasIntOption("profile1", MockOptions.INT1, 43); - } - - @Test - public void should_create_derived_profile_with_new_option() { - OptionsMap source = new OptionsMap(); - source.put(MockTypedOptions.INT1, 42); - DriverConfig config = DriverConfigLoader.fromMap(source).getInitialConfig(); - DriverExecutionProfile base = config.getDefaultProfile(); - DriverExecutionProfile derived = base.withInt(MockOptions.INT2, 43); - - assertThat(base.isDefined(MockOptions.INT2)).isFalse(); - assertThat(derived.isDefined(MockOptions.INT2)).isTrue(); - assertThat(derived.getInt(MockOptions.INT2)).isEqualTo(43); - } - - @Test - public void should_create_derived_profile_overriding_option() { - OptionsMap source = new OptionsMap(); - source.put(MockTypedOptions.INT1, 42); - DriverConfig config = DriverConfigLoader.fromMap(source).getInitialConfig(); - DriverExecutionProfile base = config.getDefaultProfile(); - DriverExecutionProfile derived = base.withInt(MockOptions.INT1, 43); - - assertThat(base.getInt(MockOptions.INT1)).isEqualTo(42); - assertThat(derived.getInt(MockOptions.INT1)).isEqualTo(43); - } - - @Test - public void should_create_derived_profile_unsetting_option() { - OptionsMap source = new OptionsMap(); - source.put(MockTypedOptions.INT1, 42); - source.put(MockTypedOptions.INT2, 43); - DriverConfig config = DriverConfigLoader.fromMap(source).getInitialConfig(); - DriverExecutionProfile base = config.getDefaultProfile(); - DriverExecutionProfile derived = base.without(MockOptions.INT2); - - assertThat(base.getInt(MockOptions.INT2)).isEqualTo(43); - assertThat(derived.isDefined(MockOptions.INT2)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/config/typesafe/DefaultDriverConfigLoaderTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/config/typesafe/DefaultDriverConfigLoaderTest.java deleted file mode 100644 index 16b8f0b3aa6..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/config/typesafe/DefaultDriverConfigLoaderTest.java +++ /dev/null @@ -1,314 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config.typesafe; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.DefaultConsistencyLevel; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.internal.core.config.ConfigChangeEvent; -import com.datastax.oss.driver.internal.core.config.MockOptions; -import com.datastax.oss.driver.internal.core.context.EventBus; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.context.NettyOptions; -import com.datastax.oss.driver.internal.core.util.concurrent.ScheduledTaskCapturingEventLoop; -import com.datastax.oss.driver.internal.core.util.concurrent.ScheduledTaskCapturingEventLoop.CapturedTask; -import com.typesafe.config.ConfigFactory; -import io.netty.channel.EventLoopGroup; -import java.io.File; -import java.time.Duration; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicReference; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -public class DefaultDriverConfigLoaderTest { - - @Mock private InternalDriverContext context; - @Mock private NettyOptions nettyOptions; - @Mock private EventLoopGroup adminEventExecutorGroup; - @Mock private DriverConfig config; - @Mock private DriverExecutionProfile defaultProfile; - private ScheduledTaskCapturingEventLoop adminExecutor; - private EventBus eventBus; - private AtomicReference configSource; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - - when(context.getSessionName()).thenReturn("test"); - when(context.getNettyOptions()).thenReturn(nettyOptions); - when(nettyOptions.adminEventExecutorGroup()).thenReturn(adminEventExecutorGroup); - - adminExecutor = new ScheduledTaskCapturingEventLoop(adminEventExecutorGroup); - when(adminEventExecutorGroup.next()).thenReturn(adminExecutor); - - eventBus = spy(new EventBus("test")); - when(context.getEventBus()).thenReturn(eventBus); - - // The already loaded config in the context. - // In real life, it's the object managed by the loader, but in this test it's simpler to mock - // it. - when(context.getConfig()).thenReturn(config); - when(config.getDefaultProfile()).thenReturn(defaultProfile); - when(defaultProfile.getDuration(DefaultDriverOption.CONFIG_RELOAD_INTERVAL)) - .thenReturn(Duration.ofSeconds(12)); - - configSource = new AtomicReference<>("int1 = 42"); - } - - @Test - public void should_build_initial_config() { - DefaultDriverConfigLoader loader = - new DefaultDriverConfigLoader(() -> ConfigFactory.parseString(configSource.get())); - DriverConfig initialConfig = loader.getInitialConfig(); - assertThat(initialConfig).hasIntOption(MockOptions.INT1, 42); - } - - @Test - public void should_schedule_reloading_task() { - DefaultDriverConfigLoader loader = - new DefaultDriverConfigLoader(() -> ConfigFactory.parseString(configSource.get())); - - loader.onDriverInit(context); - adminExecutor.waitForNonScheduledTasks(); - - CapturedTask task = adminExecutor.nextTask(); - assertThat(task.getInitialDelay(TimeUnit.SECONDS)).isEqualTo(12); - assertThat(task.getPeriod(TimeUnit.SECONDS)).isEqualTo(12); - } - - @Test - public void should_detect_config_change_from_periodic_reload() { - DefaultDriverConfigLoader loader = - new DefaultDriverConfigLoader(() -> ConfigFactory.parseString(configSource.get())); - DriverConfig initialConfig = loader.getInitialConfig(); - assertThat(initialConfig).hasIntOption(MockOptions.INT1, 42); - - loader.onDriverInit(context); - adminExecutor.waitForNonScheduledTasks(); - - CapturedTask task = adminExecutor.nextTask(); - - configSource.set("int1 = 43"); - - task.run(); - - assertThat(initialConfig).hasIntOption(MockOptions.INT1, 43); - verify(eventBus).fire(ConfigChangeEvent.INSTANCE); - } - - @Test - public void should_detect_config_change_from_manual_reload() { - DefaultDriverConfigLoader loader = - new DefaultDriverConfigLoader(() -> ConfigFactory.parseString(configSource.get())); - DriverConfig initialConfig = loader.getInitialConfig(); - assertThat(initialConfig).hasIntOption(MockOptions.INT1, 42); - - loader.onDriverInit(context); - adminExecutor.waitForNonScheduledTasks(); - - configSource.set("int1 = 43"); - - CompletionStage reloaded = loader.reload(); - adminExecutor.waitForNonScheduledTasks(); - - assertThat(initialConfig).hasIntOption(MockOptions.INT1, 43); - verify(eventBus).fire(ConfigChangeEvent.INSTANCE); - assertThatStage(reloaded).isSuccess(changed -> assertThat(changed).isTrue()); - } - - @Test - public void should_not_notify_from_periodic_reload_if_config_has_not_changed() { - DefaultDriverConfigLoader loader = - new DefaultDriverConfigLoader(() -> ConfigFactory.parseString(configSource.get())); - DriverConfig initialConfig = loader.getInitialConfig(); - assertThat(initialConfig).hasIntOption(MockOptions.INT1, 42); - - loader.onDriverInit(context); - adminExecutor.waitForNonScheduledTasks(); - - CapturedTask task = adminExecutor.nextTask(); - - // no change to the config source - - task.run(); - - verify(eventBus, never()).fire(ConfigChangeEvent.INSTANCE); - } - - @Test - public void should_not_notify_from_manual_reload_if_config_has_not_changed() { - DefaultDriverConfigLoader loader = - new DefaultDriverConfigLoader(() -> ConfigFactory.parseString(configSource.get())); - DriverConfig initialConfig = loader.getInitialConfig(); - assertThat(initialConfig).hasIntOption(MockOptions.INT1, 42); - - loader.onDriverInit(context); - adminExecutor.waitForNonScheduledTasks(); - - CompletionStage reloaded = loader.reload(); - adminExecutor.waitForNonScheduledTasks(); - - verify(eventBus, never()).fire(ConfigChangeEvent.INSTANCE); - assertThatStage(reloaded).isSuccess(changed -> assertThat(changed).isFalse()); - } - - @Test - public void should_load_from_other_classpath_resource() { - DriverConfigLoader loader = DriverConfigLoader.fromClasspath("config/customApplication"); - DriverExecutionProfile config = loader.getInitialConfig().getDefaultProfile(); - // From customApplication.conf: - assertThat(config.getDuration(DefaultDriverOption.REQUEST_TIMEOUT)) - .isEqualTo(Duration.ofSeconds(5)); - // From customApplication.json: - assertThat(config.getInt(DefaultDriverOption.REQUEST_PAGE_SIZE)).isEqualTo(2000); - // From customApplication.properties: - assertThat(config.getString(DefaultDriverOption.REQUEST_CONSISTENCY)) - .isEqualTo(DefaultConsistencyLevel.ONE.name()); - // From reference.conf: - assertThat(config.getString(DefaultDriverOption.REQUEST_SERIAL_CONSISTENCY)) - .isEqualTo(DefaultConsistencyLevel.SERIAL.name()); - } - - @Test - public void should_load_from_file() { - File file = new File("src/test/resources/config/customApplication.conf"); - assertThat(file).exists(); - DriverConfigLoader loader = DriverConfigLoader.fromFile(file); - DriverExecutionProfile config = loader.getInitialConfig().getDefaultProfile(); - // From customApplication.conf: - assertThat(config.getDuration(DefaultDriverOption.REQUEST_TIMEOUT)) - .isEqualTo(Duration.ofSeconds(5)); - // From reference.conf: - assertThat(config.getString(DefaultDriverOption.REQUEST_SERIAL_CONSISTENCY)) - .isEqualTo(DefaultConsistencyLevel.SERIAL.name()); - } - - @Test - public void should_load_from_file_with_system_property() { - File file = new File("src/test/resources/config/customApplication.conf"); - assertThat(file).exists(); - System.setProperty("config.file", file.getAbsolutePath()); - try { - DriverConfigLoader loader = new DefaultDriverConfigLoader(); - DriverExecutionProfile config = loader.getInitialConfig().getDefaultProfile(); - // From customApplication.conf: - assertThat(config.getDuration(DefaultDriverOption.REQUEST_TIMEOUT)) - .isEqualTo(Duration.ofSeconds(5)); - // From reference.conf: - assertThat(config.getString(DefaultDriverOption.REQUEST_SERIAL_CONSISTENCY)) - .isEqualTo(DefaultConsistencyLevel.SERIAL.name()); - } finally { - System.clearProperty("config.file"); - } - } - - @Test - public void should_return_failed_future_if_reloading_not_supported() { - DefaultDriverConfigLoader loader = - new DefaultDriverConfigLoader(() -> ConfigFactory.parseString(configSource.get()), false); - assertThat(loader.supportsReloading()).isFalse(); - CompletionStage stage = loader.reload(); - assertThatStage(stage) - .isFailed( - t -> - assertThat(t) - .isInstanceOf(UnsupportedOperationException.class) - .hasMessage( - "This instance of DefaultDriverConfigLoader does not support reloading")); - } - - /** Test for JAVA-2846. */ - @Test - public void should_load_setting_from_system_property_when_application_conf_is_also_provided() { - System.setProperty("datastax-java-driver.basic.request.timeout", "1 millisecond"); - try { - assertThat( - new DefaultDriverConfigLoader() - .getInitialConfig() - .getDefaultProfile() - .getDuration(DefaultDriverOption.REQUEST_TIMEOUT)) - .isEqualTo(Duration.ofMillis(1)); - } finally { - System.clearProperty("datastax-java-driver.basic.request.timeout"); - } - } - - /** Test for JAVA-2846. */ - @Test - public void - should_load_and_resolve_setting_from_system_property_when_application_conf_is_also_provided() { - System.setProperty( - "datastax-java-driver.advanced.connection.init-query-timeout", "1234 milliseconds"); - try { - assertThat( - new DefaultDriverConfigLoader() - .getInitialConfig() - .getDefaultProfile() - .getDuration(DefaultDriverOption.REQUEST_TIMEOUT)) - .isEqualTo(Duration.ofMillis(1234)); - } finally { - System.clearProperty("datastax-java-driver.advanced.connection.init-query-timeout"); - } - } - - /** Test for JAVA-2846. */ - @Test - public void - should_load_setting_from_system_property_when_application_conf_is_also_provided_for_custom_classloader() { - System.setProperty("datastax-java-driver.basic.request.timeout", "1 millisecond"); - try { - assertThat( - new DefaultDriverConfigLoader(Thread.currentThread().getContextClassLoader()) - .getInitialConfig() - .getDefaultProfile() - .getDuration(DefaultDriverOption.REQUEST_TIMEOUT)) - .isEqualTo(Duration.ofMillis(1)); - } finally { - System.clearProperty("datastax-java-driver.basic.request.timeout"); - } - } - - @Test - public void should_create_from_string() { - DriverExecutionProfile config = - DriverConfigLoader.fromString( - "datastax-java-driver.basic { session-name = my-app\nrequest.timeout = 1 millisecond }") - .getInitialConfig() - .getDefaultProfile(); - - assertThat(config.getString(DefaultDriverOption.SESSION_NAME)).isEqualTo("my-app"); - assertThat(config.getDuration(DefaultDriverOption.REQUEST_TIMEOUT)) - .isEqualTo(Duration.ofMillis(1)); - // Any option not in the string should be pulled from reference.conf - assertThat(config.getString(DefaultDriverOption.REQUEST_CONSISTENCY)).isEqualTo("LOCAL_ONE"); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/config/typesafe/DefaultProgrammaticDriverConfigLoaderBuilderTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/config/typesafe/DefaultProgrammaticDriverConfigLoaderBuilderTest.java deleted file mode 100644 index 4f2edf98246..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/config/typesafe/DefaultProgrammaticDriverConfigLoaderBuilderTest.java +++ /dev/null @@ -1,139 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config.typesafe; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.internal.core.config.MockOptions; -import com.typesafe.config.ConfigFactory; -import org.junit.Test; - -public class DefaultProgrammaticDriverConfigLoaderBuilderTest { - - private static final String FALLBACK_CONFIG = - "int1 = 1\nint2 = 2\nprofiles.profile1 { int1 = 11 }"; - - @Test - public void should_override_option() { - DriverConfigLoader loader = - new DefaultProgrammaticDriverConfigLoaderBuilder( - () -> ConfigFactory.parseString(FALLBACK_CONFIG), "") - .withInt(MockOptions.INT1, 2) - .withInt(MockOptions.INT1, 3) - .withInt(MockOptions.INT1, 4) - .withInt(MockOptions.INT2, 3) - .withInt(MockOptions.INT2, 4) - .build(); - DriverConfig config = loader.getInitialConfig(); - assertThat(config.getDefaultProfile().getInt(MockOptions.INT1)).isEqualTo(4); - assertThat(config.getDefaultProfile().getInt(MockOptions.INT2)).isEqualTo(4); - } - - @Test - public void should_override_option_in_default_profile() { - DriverConfigLoader loader = - new DefaultProgrammaticDriverConfigLoaderBuilder( - () -> ConfigFactory.parseString(FALLBACK_CONFIG), "") - .withInt(MockOptions.INT1, 3) - .build(); - DriverConfig config = loader.getInitialConfig(); - assertThat(config.getDefaultProfile().getInt(MockOptions.INT1)).isEqualTo(3); - assertThat(config.getDefaultProfile().getInt(MockOptions.INT2)).isEqualTo(2); - } - - @Test - public void should_override_option_in_existing_profile() { - DriverConfigLoader loader = - new DefaultProgrammaticDriverConfigLoaderBuilder( - () -> ConfigFactory.parseString(FALLBACK_CONFIG), "") - .startProfile("profile1") - .withInt(MockOptions.INT1, 3) - .build(); - DriverConfig config = loader.getInitialConfig(); - assertThat(config.getDefaultProfile().getInt(MockOptions.INT1)).isEqualTo(1); - assertThat(config.getProfile("profile1").getInt(MockOptions.INT1)).isEqualTo(3); - } - - @Test - public void should_override_option_in_new_profile() { - DriverConfigLoader loader = - new DefaultProgrammaticDriverConfigLoaderBuilder( - () -> ConfigFactory.parseString(FALLBACK_CONFIG), "") - .startProfile("profile2") - .withInt(MockOptions.INT1, 3) - .build(); - DriverConfig config = loader.getInitialConfig(); - assertThat(config.getDefaultProfile().getInt(MockOptions.INT1)).isEqualTo(1); - assertThat(config.getProfile("profile1").getInt(MockOptions.INT1)).isEqualTo(11); - assertThat(config.getProfile("profile2").getInt(MockOptions.INT1)).isEqualTo(3); - assertThat(config.getProfile("profile2").getInt(MockOptions.INT2)).isEqualTo(2); - } - - @Test - public void should_go_back_to_default_profile_when_profile_ends() { - DriverConfigLoader loader = - new DefaultProgrammaticDriverConfigLoaderBuilder( - () -> ConfigFactory.parseString(FALLBACK_CONFIG), "") - .startProfile("profile2") - .withInt(MockOptions.INT1, 3) - .endProfile() - .withInt(MockOptions.INT1, 4) - .build(); - DriverConfig config = loader.getInitialConfig(); - assertThat(config.getDefaultProfile().getInt(MockOptions.INT1)).isEqualTo(4); - } - - @Test - public void should_handle_multiple_programmatic_profiles() { - DriverConfigLoader loader = - new DefaultProgrammaticDriverConfigLoaderBuilder( - () -> ConfigFactory.parseString(FALLBACK_CONFIG), "") - .startProfile("profile2") - .withInt(MockOptions.INT1, 3) - .startProfile("profile3") - .withInt(MockOptions.INT1, 4) - .build(); - DriverConfig config = loader.getInitialConfig(); - assertThat(config.getProfile("profile2").getInt(MockOptions.INT1)).isEqualTo(3); - assertThat(config.getProfile("profile3").getInt(MockOptions.INT1)).isEqualTo(4); - } - - @Test - public void should_honor_root_path() { - String rootPath = "test-root"; - String propertyKey = rootPath + "." + DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE.getPath(); - try { - System.setProperty(propertyKey, "42"); - DriverConfigLoader loader = - new DefaultProgrammaticDriverConfigLoaderBuilder( - DefaultProgrammaticDriverConfigLoaderBuilder.DEFAULT_FALLBACK_SUPPLIER, rootPath) - .withInt(DefaultDriverOption.REQUEST_PAGE_SIZE, 1234) - .build(); - DriverConfig config = loader.getInitialConfig(); - assertThat(config.getDefaultProfile().getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)) - .isEqualTo(42); - assertThat(config.getDefaultProfile().getInt(DefaultDriverOption.REQUEST_PAGE_SIZE)) - .isEqualTo(1234); - } finally { - System.clearProperty(propertyKey); - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/config/typesafe/TypeSafeDriverConfigOverrideDefaultsTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/config/typesafe/TypeSafeDriverConfigOverrideDefaultsTest.java deleted file mode 100644 index 2f2f0a9b3c1..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/config/typesafe/TypeSafeDriverConfigOverrideDefaultsTest.java +++ /dev/null @@ -1,179 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config.typesafe; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.config.DriverOption; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.typesafe.config.Config; -import com.typesafe.config.ConfigFactory; -import java.time.Duration; -import java.util.Map; -import org.junit.Test; - -/** Focuses on {@link TypesafeDriverConfig#overrideDefaults(Map)}. */ -public class TypeSafeDriverConfigOverrideDefaultsTest { - - @Test - public void should_replace_if_value_comes_from_reference() { - // Given - TypesafeDriverConfig config = config(""); - assertThat(config.getDefaultProfile().getString(DefaultDriverOption.REQUEST_CONSISTENCY)) - .isEqualTo("LOCAL_ONE"); - - // When - config.overrideDefaults( - ImmutableMap.of(DefaultDriverOption.REQUEST_CONSISTENCY, "LOCAL_QUORUM")); - - // Then - assertThat(config.getDefaultProfile().getString(DefaultDriverOption.REQUEST_CONSISTENCY)) - .isEqualTo("LOCAL_QUORUM"); - } - - @Test - public void should_replace_multiple_times() { - // Given - TypesafeDriverConfig config = config(""); - assertThat(config.getDefaultProfile().getString(DefaultDriverOption.REQUEST_CONSISTENCY)) - .isEqualTo("LOCAL_ONE"); - - // When - config.overrideDefaults( - ImmutableMap.of(DefaultDriverOption.REQUEST_CONSISTENCY, "LOCAL_QUORUM")); - config.overrideDefaults(ImmutableMap.of(DefaultDriverOption.REQUEST_CONSISTENCY, "TWO")); - - // Then - assertThat(config.getDefaultProfile().getString(DefaultDriverOption.REQUEST_CONSISTENCY)) - .isEqualTo("TWO"); - } - - @Test - public void should_not_replace_if_overridden_from_application() { - // Given - TypesafeDriverConfig config = - config("datastax-java-driver.basic.request.consistency = LOCAL_ONE"); - assertThat(config.getDefaultProfile().getString(DefaultDriverOption.REQUEST_CONSISTENCY)) - .isEqualTo("LOCAL_ONE"); - - // When - config.overrideDefaults( - ImmutableMap.of(DefaultDriverOption.REQUEST_CONSISTENCY, "LOCAL_QUORUM")); - - // Then - // not replaced because it was set explictly in application.conf - assertThat(config.getDefaultProfile().getString(DefaultDriverOption.REQUEST_CONSISTENCY)) - .isEqualTo("LOCAL_ONE"); - } - - @Test - public void should_handle_reloads() { - // Given - TypesafeDriverConfig config = config(""); - assertThat(config.getDefaultProfile().getString(DefaultDriverOption.REQUEST_CONSISTENCY)) - .isEqualTo("LOCAL_ONE"); - - // When - config.overrideDefaults( - ImmutableMap.of(DefaultDriverOption.REQUEST_CONSISTENCY, "LOCAL_QUORUM")); - reload(config, ""); - - // Then - assertThat(config.getDefaultProfile().getString(DefaultDriverOption.REQUEST_CONSISTENCY)) - .isEqualTo("LOCAL_QUORUM"); - - // When - reload(config, "datastax-java-driver.basic.request.consistency = ONE"); - - // Then - // overridden default not used anymore if the reload detected a user change - assertThat(config.getDefaultProfile().getString(DefaultDriverOption.REQUEST_CONSISTENCY)) - .isEqualTo("ONE"); - } - - @Test - public void should_ignore_non_existent_option() { - // Given - TypesafeDriverConfig config = config(""); - DriverOption nonExistent = () -> "non existent"; - - // When - config.overrideDefaults(ImmutableMap.of(nonExistent, "IRRELEVANT")); - - // Then - assertThat(config.getDefaultProfile().isDefined(nonExistent)).isFalse(); - } - - @Test - public void should_handle_profiles() { - // Given - TypesafeDriverConfig config = - config( - "datastax-java-driver.profiles.profile1.basic.request.consistency = TWO\n" - + "datastax-java-driver.profiles.profile2.basic.request.timeout = 5 seconds"); - DriverExecutionProfile profile1 = config.getProfile("profile1"); - DriverExecutionProfile profile2 = config.getProfile("profile2"); - DriverExecutionProfile derivedProfile21 = - profile2.withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(10)); - DriverExecutionProfile derivedProfile22 = - profile2.withString(DefaultDriverOption.REQUEST_CONSISTENCY, "QUORUM"); - assertThat(profile1.getString(DefaultDriverOption.REQUEST_CONSISTENCY)).isEqualTo("TWO"); - assertThat(profile2.getString(DefaultDriverOption.REQUEST_CONSISTENCY)) - .isEqualTo("LOCAL_ONE"); // inherited from default profile in reference.conf - assertThat(derivedProfile21.getString(DefaultDriverOption.REQUEST_CONSISTENCY)) - .isEqualTo("LOCAL_ONE"); // inherited from default profile in reference.conf - assertThat(derivedProfile22.getString(DefaultDriverOption.REQUEST_CONSISTENCY)) - .isEqualTo("QUORUM"); // overridden programmatically - - // When - config.overrideDefaults( - ImmutableMap.of(DefaultDriverOption.REQUEST_CONSISTENCY, "LOCAL_QUORUM")); - - // Then - // Unaffected because it was set manually in application.conf: - assertThat(profile1.getString(DefaultDriverOption.REQUEST_CONSISTENCY)).isEqualTo("TWO"); - // Affected because it was using the default from reference.conf: - assertThat(profile2.getString(DefaultDriverOption.REQUEST_CONSISTENCY)) - .isEqualTo("LOCAL_QUORUM"); - // Same: - assertThat(derivedProfile21.getString(DefaultDriverOption.REQUEST_CONSISTENCY)) - .isEqualTo("LOCAL_QUORUM"); - // Unaffected because it was overridden programmatically: - assertThat(derivedProfile22.getString(DefaultDriverOption.REQUEST_CONSISTENCY)) - .isEqualTo("QUORUM"); - } - - // Builds a config based on reference.conf + the given application.conf overrides - private TypesafeDriverConfig config(String application) { - return new TypesafeDriverConfig(rawConfig(application)); - } - - private boolean reload(TypesafeDriverConfig config, String newApplication) { - return config.reload(rawConfig(newApplication)); - } - - private Config rawConfig(String application) { - ConfigFactory.invalidateCaches(); - return ConfigFactory.parseString(application) - .withFallback(ConfigFactory.defaultReference()) - .resolve() - .getConfig(DefaultDriverConfigLoader.DEFAULT_ROOT_PATH); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/config/typesafe/TypesafeDriverConfigTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/config/typesafe/TypesafeDriverConfigTest.java deleted file mode 100644 index 4a78c3ccb03..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/config/typesafe/TypesafeDriverConfigTest.java +++ /dev/null @@ -1,200 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.config.typesafe; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.assertj.core.api.Assertions.entry; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.internal.core.config.MockOptions; -import com.typesafe.config.Config; -import com.typesafe.config.ConfigFactory; -import java.util.HashMap; -import java.util.Map; -import org.junit.Test; - -public class TypesafeDriverConfigTest { - - @Test - public void should_load_minimal_config_with_no_profiles() { - TypesafeDriverConfig config = parse("int1 = 42"); - assertThat(config).hasIntOption(MockOptions.INT1, 42); - } - - @Test - public void should_load_config_with_no_profiles_and_optional_values() { - TypesafeDriverConfig config = parse("int1 = 42\n int2 = 43"); - assertThat(config).hasIntOption(MockOptions.INT1, 42); - assertThat(config).hasIntOption(MockOptions.INT2, 43); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_if_profile_uses_default_name() { - parse("int1 = 42\n profiles { default { int1 = 43 } }"); - } - - @Test - public void should_inherit_option_in_profile() { - TypesafeDriverConfig config = parse("int1 = 42\n profiles { profile1 { } }"); - assertThat(config) - .hasIntOption(MockOptions.INT1, 42) - .hasIntOption("profile1", MockOptions.INT1, 42); - } - - @Test - public void should_override_option_in_profile() { - TypesafeDriverConfig config = parse("int1 = 42\n profiles { profile1 { int1 = 43 } }"); - assertThat(config) - .hasIntOption(MockOptions.INT1, 42) - .hasIntOption("profile1", MockOptions.INT1, 43); - } - - @Test - public void should_create_derived_profile_with_new_option() { - TypesafeDriverConfig config = parse("int1 = 42"); - DriverExecutionProfile base = config.getDefaultProfile(); - DriverExecutionProfile derived = base.withInt(MockOptions.INT2, 43); - - assertThat(base.isDefined(MockOptions.INT2)).isFalse(); - assertThat(derived.isDefined(MockOptions.INT2)).isTrue(); - assertThat(derived.getInt(MockOptions.INT2)).isEqualTo(43); - } - - @Test - public void should_create_derived_profile_overriding_option() { - TypesafeDriverConfig config = parse("int1 = 42"); - DriverExecutionProfile base = config.getDefaultProfile(); - DriverExecutionProfile derived = base.withInt(MockOptions.INT1, 43); - - assertThat(base.getInt(MockOptions.INT1)).isEqualTo(42); - assertThat(derived.getInt(MockOptions.INT1)).isEqualTo(43); - } - - @Test - public void should_create_derived_profile_unsetting_option() { - TypesafeDriverConfig config = parse("int1 = 42\n int2 = 43"); - DriverExecutionProfile base = config.getDefaultProfile(); - DriverExecutionProfile derived = base.without(MockOptions.INT2); - - assertThat(base.getInt(MockOptions.INT2)).isEqualTo(43); - assertThat(derived.isDefined(MockOptions.INT2)).isFalse(); - } - - @Test - public void should_fetch_string_map() { - TypesafeDriverConfig config = - parse( - "int1 = 42 \n auth_provider { auth_thing_one= one \n auth_thing_two = two \n auth_thing_three = three}"); - DriverExecutionProfile base = config.getDefaultProfile(); - Map map = base.getStringMap(MockOptions.AUTH_PROVIDER); - assertThat(map.entrySet().size()).isEqualTo(3); - assertThat(map.get("auth_thing_one")).isEqualTo("one"); - assertThat(map.get("auth_thing_two")).isEqualTo("two"); - assertThat(map.get("auth_thing_three")).isEqualTo("three"); - } - - @Test - public void should_fetch_string_map_with_forward_slash_in_keys() { - TypesafeDriverConfig config = - parse( - "subnet_addresses { 100.64.0.0/15 = \"cassandra.datacenter1.com:9042\" \n \"100.66.0.0/15\" = \"cassandra.datacenter2.com\" \n \"::ffff:6440:0/111\" = \"cassandra.datacenter3.com:19042\" }"); - DriverExecutionProfile base = config.getDefaultProfile(); - Map map = base.getStringMap(MockOptions.SUBNET_ADDRESSES); - assertThat(map.entrySet().size()).isEqualTo(3); - assertThat(map.get("100.64.0.\"0/15\"")).isEqualTo("cassandra.datacenter1.com:9042"); - assertThat(map.get("\"100.66.0.0/15\"")).isEqualTo("cassandra.datacenter2.com"); - assertThat(map.get("\"::ffff:6440:0/111\"")).isEqualTo("cassandra.datacenter3.com:19042"); - } - - @Test - public void should_create_derived_profile_with_string_map() { - TypesafeDriverConfig config = parse("int1 = 42"); - Map authThingMap = new HashMap<>(); - authThingMap.put("auth_thing_one", "one"); - authThingMap.put("auth_thing_two", "two"); - authThingMap.put("auth_thing_three", "three"); - DriverExecutionProfile base = config.getDefaultProfile(); - DriverExecutionProfile mapBase = base.withStringMap(MockOptions.AUTH_PROVIDER, authThingMap); - Map fetchedMap = mapBase.getStringMap(MockOptions.AUTH_PROVIDER); - assertThat(fetchedMap).isEqualTo(authThingMap); - } - - @Test - public void should_reload() { - TypesafeDriverConfig config = parse("int1 = 42\n profiles { profile1 { int1 = 43 } }"); - - config.reload(ConfigFactory.parseString("int1 = 44\n profiles { profile1 { int1 = 45 } }")); - assertThat(config) - .hasIntOption(MockOptions.INT1, 44) - .hasIntOption("profile1", MockOptions.INT1, 45); - } - - @Test - public void should_update_derived_profiles_after_reloading() { - TypesafeDriverConfig config = parse("int1 = 42\n profiles { profile1 { int1 = 43 } }"); - - DriverExecutionProfile derivedFromDefault = - config.getDefaultProfile().withInt(MockOptions.INT2, 50); - DriverExecutionProfile derivedFromProfile1 = - config.getProfile("profile1").withInt(MockOptions.INT2, 51); - - config.reload(ConfigFactory.parseString("int1 = 44\n profiles { profile1 { int1 = 45 } }")); - - assertThat(derivedFromDefault.getInt(MockOptions.INT1)).isEqualTo(44); - assertThat(derivedFromDefault.getInt(MockOptions.INT2)).isEqualTo(50); - - assertThat(derivedFromProfile1.getInt(MockOptions.INT1)).isEqualTo(45); - assertThat(derivedFromProfile1.getInt(MockOptions.INT2)).isEqualTo(51); - } - - @Test - public void should_enumerate_options() { - TypesafeDriverConfig config = - parse( - "int1 = 42 \n" - + "auth_provider { auth_thing_one= one \n auth_thing_two = two \n auth_thing_three = three}\n" - + "profiles { profile1 { int1 = 45 } }"); - - assertThat(config.getDefaultProfile().entrySet()) - .containsExactly( - entry("auth_provider.auth_thing_one", "one"), - entry("auth_provider.auth_thing_three", "three"), - entry("auth_provider.auth_thing_two", "two"), - entry("int1", 42)); - - assertThat(config.getProfile("profile1").entrySet()) - .containsExactly( - entry("auth_provider.auth_thing_one", "one"), - entry("auth_provider.auth_thing_three", "three"), - entry("auth_provider.auth_thing_two", "two"), - entry("int1", 45)); - } - - @Test - public void should_update_default_profile_on_reload() { - TypesafeDriverConfig config = parse("int1 = 42\n profiles { profile1 { int1 = 43 } }"); - assertThat(config.getDefaultProfile().getInt(MockOptions.INT1)).isEqualTo(42); - config.reload(ConfigFactory.parseString("int1 = 44\n profiles { profile1 { int1 = 45 } }")); - assertThat(config.getDefaultProfile().getInt(MockOptions.INT1)).isEqualTo(44); - } - - private TypesafeDriverConfig parse(String configString) { - Config config = ConfigFactory.parseString(configString); - return new TypesafeDriverConfig(config); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/connection/ExponentialReconnectionPolicyTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/connection/ExponentialReconnectionPolicyTest.java deleted file mode 100644 index 9a973c1b0e4..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/connection/ExponentialReconnectionPolicyTest.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.connection; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.connection.ReconnectionPolicy; -import com.datastax.oss.driver.api.core.context.DriverContext; -import java.time.Duration; -import java.time.temporal.ChronoUnit; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -public class ExponentialReconnectionPolicyTest { - - @Mock private DriverContext driverContext; - @Mock private DriverConfig driverConfig; - @Mock private DriverExecutionProfile profile; - private final long baseDelay = 1000L; - private final long maxDelay = 60000L; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - when(driverConfig.getDefaultProfile()).thenReturn(profile); - when(driverContext.getConfig()).thenReturn(driverConfig); - when(profile.getDuration(DefaultDriverOption.RECONNECTION_BASE_DELAY)) - .thenReturn(Duration.of(baseDelay, ChronoUnit.MILLIS)); - when(profile.getDuration(DefaultDriverOption.RECONNECTION_MAX_DELAY)) - .thenReturn(Duration.of(maxDelay, ChronoUnit.MILLIS)); - } - - @Test - public void should_generate_exponential_delay_with_jitter() throws Exception { - ExponentialReconnectionPolicy policy = new ExponentialReconnectionPolicy(driverContext); - ReconnectionPolicy.ReconnectionSchedule schedule = policy.newControlConnectionSchedule(false); - // generate a number of delays and make sure they are all within the base/max values range - // limit the loop to 53 as the bit shift and min/max calculations will cause long overflows - // past that - for (int i = 0; i < 54; ++i) { - // compute the min and max delays based on attempt count (i) and prevent long overflows - long exponentialDelay = Math.min(baseDelay * (1L << i), maxDelay); - // min will be 85% of the pure exponential delay (with a floor of baseDelay) - long minJitterDelay = Math.max(baseDelay, (exponentialDelay * 85) / 100); - // max will be 115% of the pure exponential delay (with a ceiling of maxDelay) - long maxJitterDelay = Math.min(maxDelay, (exponentialDelay * 115) / 100); - long delay = schedule.nextDelay().toMillis(); - assertThat(delay).isBetween(minJitterDelay, maxJitterDelay); - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/context/DefaultDriverContextTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/context/DefaultDriverContextTest.java deleted file mode 100644 index 6d4585cb4d7..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/context/DefaultDriverContextTest.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.context; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.internal.core.protocol.Lz4Compressor; -import com.datastax.oss.driver.internal.core.protocol.SnappyCompressor; -import com.datastax.oss.protocol.internal.Compressor; -import com.datastax.oss.protocol.internal.NoopCompressor; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import io.netty.buffer.ByteBuf; -import java.util.Optional; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class DefaultDriverContextTest { - - private DefaultDriverContext buildMockedContext(Optional compressionOption) { - - DriverExecutionProfile defaultProfile = mock(DriverExecutionProfile.class); - when(defaultProfile.getString(DefaultDriverOption.PROTOCOL_COMPRESSION, "none")) - .thenReturn(compressionOption.orElse("none")); - return MockedDriverContextFactory.defaultDriverContext(defaultProfile); - } - - private void doCreateCompressorTest(Optional configVal, Class expectedClz) { - - DefaultDriverContext ctx = buildMockedContext(configVal); - Compressor compressor = ctx.getCompressor(); - assertThat(compressor).isNotNull(); - assertThat(compressor).isInstanceOf(expectedClz); - } - - @Test - @DataProvider({"lz4", "lZ4", "Lz4", "LZ4"}) - public void should_create_lz4_compressor(String name) { - - doCreateCompressorTest(Optional.of(name), Lz4Compressor.class); - } - - @Test - @DataProvider({"snappy", "SNAPPY", "sNaPpY", "SNapPy"}) - public void should_create_snappy_compressor(String name) { - - doCreateCompressorTest(Optional.of(name), SnappyCompressor.class); - } - - @Test - public void should_create_noop_compressor_if_undefined() { - - doCreateCompressorTest(Optional.empty(), NoopCompressor.class); - } - - @Test - @DataProvider({"none", "NONE", "NoNe", "nONe"}) - public void should_create_noop_compressor_if_defined_as_none(String name) { - - doCreateCompressorTest(Optional.of(name), NoopCompressor.class); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/context/MockedDriverContextFactory.java b/core/src/test/java/com/datastax/oss/driver/internal/core/context/MockedDriverContextFactory.java deleted file mode 100644 index a8b25193f54..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/context/MockedDriverContextFactory.java +++ /dev/null @@ -1,160 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.context; - -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistanceEvaluator; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeStateListener; -import com.datastax.oss.driver.api.core.metadata.schema.SchemaChangeListener; -import com.datastax.oss.driver.api.core.session.ProgrammaticArguments; -import com.datastax.oss.driver.api.core.tracker.RequestTracker; -import com.datastax.oss.driver.internal.core.ConsistencyLevelRegistry; -import com.datastax.oss.driver.internal.core.loadbalancing.DefaultLoadBalancingPolicy; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.Maps; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.time.Duration; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.UUID; - -public class MockedDriverContextFactory { - - public static DefaultDriverContext defaultDriverContext() { - return defaultDriverContext(MockedDriverContextFactory.defaultProfile("datacenter1")); - } - - public static DefaultDriverContext defaultDriverContext( - DriverExecutionProfile defaultProfile, DriverExecutionProfile... profiles) { - - /* Setup machinery to connect the input DriverExecutionProfile to the config loader */ - final DriverConfig driverConfig = mock(DriverConfig.class); - final DriverConfigLoader configLoader = mock(DriverConfigLoader.class); - when(configLoader.getInitialConfig()).thenReturn(driverConfig); - when(driverConfig.getDefaultProfile()).thenReturn(defaultProfile); - when(driverConfig.getProfile(defaultProfile.getName())).thenReturn(defaultProfile); - - for (DriverExecutionProfile profile : profiles) { - when(driverConfig.getProfile(profile.getName())).thenReturn(profile); - } - - ProgrammaticArguments args = - ProgrammaticArguments.builder() - .withNodeStateListener(mock(NodeStateListener.class)) - .withSchemaChangeListener(mock(SchemaChangeListener.class)) - .withRequestTracker(mock(RequestTracker.class)) - .withLocalDatacenters(Maps.newHashMap()) - .withNodeDistanceEvaluators(Maps.newHashMap()) - .build(); - - return new DefaultDriverContext(configLoader, args) { - @NonNull - @Override - public Map getLoadBalancingPolicies() { - ImmutableMap.Builder map = ImmutableMap.builder(); - map.put( - defaultProfile.getName(), - mockLoadBalancingPolicy( - this, - defaultProfile.getName(), - defaultProfile.getString(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER))); - for (DriverExecutionProfile profile : profiles) { - map.put( - profile.getName(), - mockLoadBalancingPolicy( - this, - profile.getName(), - profile.getString(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER))); - } - return map.build(); - } - - @NonNull - @Override - public ConsistencyLevelRegistry getConsistencyLevelRegistry() { - return mock(ConsistencyLevelRegistry.class); - } - }; - } - - public static DriverExecutionProfile defaultProfile(String localDc) { - return createProfile(DriverExecutionProfile.DEFAULT_NAME, localDc); - } - - public static DriverExecutionProfile createProfile(String name, String localDc) { - DriverExecutionProfile defaultProfile = mock(DriverExecutionProfile.class); - when(defaultProfile.getName()).thenReturn(name); - when(defaultProfile.getString(DefaultDriverOption.PROTOCOL_COMPRESSION, "none")) - .thenReturn("none"); - when(defaultProfile.getDuration(DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER)) - .thenReturn(Duration.ofMinutes(5)); - when(defaultProfile.isDefined(DefaultDriverOption.METRICS_FACTORY_CLASS)).thenReturn(true); - when(defaultProfile.getString(DefaultDriverOption.METRICS_FACTORY_CLASS)) - .thenReturn("DefaultMetricsFactory"); - when(defaultProfile.getString(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)) - .thenReturn(localDc); - return defaultProfile; - } - - public static void allowRemoteDcConnectivity( - DriverExecutionProfile profile, - int maxNodesPerRemoteDc, - boolean allowRemoteSatisfyLocalDc, - List preferredRemoteDcs) { - when(profile.getInt(DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_MAX_NODES_PER_REMOTE_DC)) - .thenReturn(maxNodesPerRemoteDc); - when(profile.getBoolean( - DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_ALLOW_FOR_LOCAL_CONSISTENCY_LEVELS)) - .thenReturn(allowRemoteSatisfyLocalDc); - when(profile.getStringList(DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_PREFERRED_REMOTE_DCS)) - .thenReturn(preferredRemoteDcs); - } - - private static LoadBalancingPolicy mockLoadBalancingPolicy( - DefaultDriverContext driverContext, String profile, String localDc) { - LoadBalancingPolicy loadBalancingPolicy = - new DefaultLoadBalancingPolicy(driverContext, profile) { - @NonNull - @Override - protected Optional discoverLocalDc(@NonNull Map nodes) { - return Optional.ofNullable(localDc); - } - - @NonNull - @Override - protected NodeDistanceEvaluator createNodeDistanceEvaluator( - @Nullable String localDc, @NonNull Map nodes) { - return mock(NodeDistanceEvaluator.class); - } - }; - loadBalancingPolicy.init( - Collections.emptyMap(), mock(LoadBalancingPolicy.DistanceReporter.class)); - return loadBalancingPolicy; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/context/StartupOptionsBuilderTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/context/StartupOptionsBuilderTest.java deleted file mode 100644 index d12e50b7e8e..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/context/StartupOptionsBuilderTest.java +++ /dev/null @@ -1,138 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.context; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatIllegalArgumentException; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.protocol.internal.request.Startup; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class StartupOptionsBuilderTest { - - private DefaultDriverContext buildMockedContext(String compression) { - - DriverExecutionProfile defaultProfile = mock(DriverExecutionProfile.class); - when(defaultProfile.getString(DefaultDriverOption.PROTOCOL_COMPRESSION, "none")) - .thenReturn(compression); - when(defaultProfile.getName()).thenReturn(DriverExecutionProfile.DEFAULT_NAME); - return MockedDriverContextFactory.defaultDriverContext(defaultProfile); - } - - private void assertDefaultStartupOptions(Startup startup) { - - assertThat(startup.options).containsEntry(Startup.CQL_VERSION_KEY, "3.0.0"); - assertThat(startup.options) - .containsEntry( - StartupOptionsBuilder.DRIVER_NAME_KEY, Session.OSS_DRIVER_COORDINATES.getName()); - assertThat(startup.options).containsKey(StartupOptionsBuilder.DRIVER_VERSION_KEY); - Version version = Version.parse(startup.options.get(StartupOptionsBuilder.DRIVER_VERSION_KEY)); - assertThat(version).isEqualByComparingTo(Session.OSS_DRIVER_COORDINATES.getVersion()); - } - - @Test - public void should_build_startup_options_with_no_compression_if_undefined() { - - DefaultDriverContext ctx = MockedDriverContextFactory.defaultDriverContext(); - Startup startup = new Startup(ctx.getStartupOptions()); - assertThat(startup.options).doesNotContainKey(Startup.COMPRESSION_KEY); - assertDefaultStartupOptions(startup); - } - - @Test - public void should_build_startup_options_with_no_compression_if_defined_as_none() { - - DefaultDriverContext ctx = buildMockedContext("none"); - Startup startup = new Startup(ctx.getStartupOptions()); - assertThat(startup.options).doesNotContainKey(Startup.COMPRESSION_KEY); - assertDefaultStartupOptions(startup); - } - - @Test - @DataProvider({"lz4", "snappy"}) - public void should_build_startup_options(String compression) { - - DefaultDriverContext ctx = buildMockedContext(compression); - Startup startup = new Startup(ctx.getStartupOptions()); - // assert the compression option is present - assertThat(startup.options).containsEntry(Startup.COMPRESSION_KEY, compression); - assertDefaultStartupOptions(startup); - } - - @Test - public void should_fail_to_build_startup_options_with_invalid_compression() { - - assertThatIllegalArgumentException() - .isThrownBy( - () -> { - DefaultDriverContext ctx = buildMockedContext("foobar"); - new Startup(ctx.getStartupOptions()); - }); - } - - @Test - public void should_include_all_local_dcs_in_startup_message() { - - DefaultDriverContext ctx = - MockedDriverContextFactory.defaultDriverContext( - MockedDriverContextFactory.defaultProfile("us-west-2"), - MockedDriverContextFactory.createProfile("oltp", "us-east-2"), - MockedDriverContextFactory.createProfile("olap", "eu-central-1")); - Startup startup = new Startup(ctx.getStartupOptions()); - assertThat(startup.options) - .containsEntry( - StartupOptionsBuilder.DRIVER_BAGGAGE, - "{\"default\":{\"DefaultLoadBalancingPolicy\":{\"localDc\":\"us-west-2\"}}," - + "\"oltp\":{\"DefaultLoadBalancingPolicy\":{\"localDc\":\"us-east-2\"}}," - + "\"olap\":{\"DefaultLoadBalancingPolicy\":{\"localDc\":\"eu-central-1\"}}}"); - } - - @Test - public void should_include_all_lbp_details_in_startup_message() { - - DriverExecutionProfile defaultProfile = MockedDriverContextFactory.defaultProfile("dc1"); - DriverExecutionProfile oltpProfile = MockedDriverContextFactory.createProfile("oltp", "dc1"); - MockedDriverContextFactory.allowRemoteDcConnectivity( - oltpProfile, 2, true, ImmutableList.of("dc2", "dc3")); - DefaultDriverContext ctx = - MockedDriverContextFactory.defaultDriverContext(defaultProfile, oltpProfile); - - Startup startup = new Startup(ctx.getStartupOptions()); - - assertThat(startup.options) - .containsEntry( - StartupOptionsBuilder.DRIVER_BAGGAGE, - "{\"default\":{\"DefaultLoadBalancingPolicy\":{\"localDc\":\"dc1\"}}," - + "\"oltp\":{\"DefaultLoadBalancingPolicy\":{" - + "\"localDc\":\"dc1\"," - + "\"preferredRemoteDcs\":[\"dc2\",\"dc3\"]," - + "\"allowDcFailoverForLocalCl\":true," - + "\"maxNodesPerRemoteDc\":2}}}"); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/context/bus/EventBusTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/context/bus/EventBusTest.java deleted file mode 100644 index 61533a8e8e9..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/context/bus/EventBusTest.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.context.bus; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.internal.core.context.EventBus; -import java.util.HashMap; -import java.util.Map; -import org.junit.Before; -import org.junit.Test; - -public class EventBusTest { - - private EventBus bus; - private Map results; - private ChildEvent event = new ChildEvent(); - - @Before - public void setup() { - bus = new EventBus("test"); - results = new HashMap<>(); - } - - @Test - public void should_notify_registered_listeners() { - // Given - bus.register(ChildEvent.class, (e) -> results.put("listener1", e)); - bus.register(ChildEvent.class, (e) -> results.put("listener2", e)); - - // When - bus.fire(event); - - // Then - assertThat(results) - .hasSize(2) - .containsEntry("listener1", event) - .containsEntry("listener2", event); - } - - @Test - public void should_unregister_listener() { - // Given - Object key1 = bus.register(ChildEvent.class, (e) -> results.put("listener1", e)); - bus.register(ChildEvent.class, (e) -> results.put("listener2", e)); - bus.unregister(key1, ChildEvent.class); - - // When - bus.fire(event); - - // Then - assertThat(results).hasSize(1).containsEntry("listener2", event); - } - - @Test - public void should_use_exact_class() { - // Given - bus.register(ChildEvent.class, (e) -> results.put("listener1", e)); - bus.register(ParentEvent.class, (e) -> results.put("listener2", e)); - - // When - bus.fire(event); - - // Then - assertThat(results).hasSize(1).containsEntry("listener1", event); - - // When - results.clear(); - ParentEvent parentEvent = new ParentEvent(); - bus.fire(parentEvent); - - // Then - assertThat(results).hasSize(1).containsEntry("listener2", parentEvent); - } - - private static class ParentEvent {} - - private static class ChildEvent extends ParentEvent {} -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/control/ControlConnectionEventsTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/control/ControlConnectionEventsTest.java deleted file mode 100644 index cb83b523ebe..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/control/ControlConnectionEventsTest.java +++ /dev/null @@ -1,157 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.control; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.awaitility.Awaitility.await; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.channel.DriverChannelOptions; -import com.datastax.oss.driver.internal.core.channel.EventCallback; -import com.datastax.oss.driver.internal.core.metadata.TopologyEvent; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.response.event.SchemaChangeEvent; -import com.datastax.oss.protocol.internal.response.event.StatusChangeEvent; -import com.datastax.oss.protocol.internal.response.event.TopologyChangeEvent; -import java.util.concurrent.CompletableFuture; -import org.junit.Test; -import org.mockito.ArgumentCaptor; - -public class ControlConnectionEventsTest extends ControlConnectionTestBase { - - @Test - public void should_register_for_all_events_if_topology_requested() { - // Given - DriverChannel channel1 = newMockDriverChannel(1); - ArgumentCaptor optionsCaptor = - ArgumentCaptor.forClass(DriverChannelOptions.class); - when(channelFactory.connect(eq(node1), optionsCaptor.capture())) - .thenReturn(CompletableFuture.completedFuture(channel1)); - - // When - controlConnection.init(true, false, false); - - // Then - await() - .untilAsserted( - () -> { - DriverChannelOptions channelOptions = optionsCaptor.getValue(); - assertThat(channelOptions.eventTypes) - .containsExactly( - ProtocolConstants.EventType.SCHEMA_CHANGE, - ProtocolConstants.EventType.STATUS_CHANGE, - ProtocolConstants.EventType.TOPOLOGY_CHANGE); - assertThat(channelOptions.eventCallback).isEqualTo(controlConnection); - }); - } - - @Test - public void should_register_for_schema_events_only_if_topology_not_requested() { - // Given - DriverChannel channel1 = newMockDriverChannel(1); - ArgumentCaptor optionsCaptor = - ArgumentCaptor.forClass(DriverChannelOptions.class); - when(channelFactory.connect(eq(node1), optionsCaptor.capture())) - .thenReturn(CompletableFuture.completedFuture(channel1)); - - // When - controlConnection.init(false, false, false); - - // Then - await() - .untilAsserted( - () -> { - DriverChannelOptions channelOptions = optionsCaptor.getValue(); - assertThat(channelOptions.eventTypes) - .containsExactly(ProtocolConstants.EventType.SCHEMA_CHANGE); - assertThat(channelOptions.eventCallback).isEqualTo(controlConnection); - }); - } - - @Test - public void should_process_status_change_events() { - // Given - DriverChannel channel1 = newMockDriverChannel(1); - ArgumentCaptor optionsCaptor = - ArgumentCaptor.forClass(DriverChannelOptions.class); - when(channelFactory.connect(eq(node1), optionsCaptor.capture())) - .thenReturn(CompletableFuture.completedFuture(channel1)); - controlConnection.init(true, false, false); - await().until(() -> optionsCaptor.getValue() != null); - EventCallback callback = optionsCaptor.getValue().eventCallback; - StatusChangeEvent event = - new StatusChangeEvent(ProtocolConstants.StatusChangeType.UP, ADDRESS1); - - // When - callback.onEvent(event); - - // Then - verify(eventBus).fire(TopologyEvent.suggestUp(ADDRESS1)); - } - - @Test - public void should_process_topology_change_events() { - // Given - DriverChannel channel1 = newMockDriverChannel(1); - ArgumentCaptor optionsCaptor = - ArgumentCaptor.forClass(DriverChannelOptions.class); - when(channelFactory.connect(eq(node1), optionsCaptor.capture())) - .thenReturn(CompletableFuture.completedFuture(channel1)); - controlConnection.init(true, false, false); - await().until(() -> optionsCaptor.getValue() != null); - EventCallback callback = optionsCaptor.getValue().eventCallback; - TopologyChangeEvent event = - new TopologyChangeEvent(ProtocolConstants.TopologyChangeType.NEW_NODE, ADDRESS1); - - // When - callback.onEvent(event); - - // Then - verify(eventBus).fire(TopologyEvent.suggestAdded(ADDRESS1)); - } - - @Test - public void should_process_schema_change_events() { - // Given - DriverChannel channel1 = newMockDriverChannel(1); - ArgumentCaptor optionsCaptor = - ArgumentCaptor.forClass(DriverChannelOptions.class); - when(channelFactory.connect(eq(node1), optionsCaptor.capture())) - .thenReturn(CompletableFuture.completedFuture(channel1)); - controlConnection.init(false, false, false); - await().until(() -> optionsCaptor.getValue() != null); - EventCallback callback = optionsCaptor.getValue().eventCallback; - SchemaChangeEvent event = - new SchemaChangeEvent( - ProtocolConstants.SchemaChangeType.CREATED, - ProtocolConstants.SchemaChangeTarget.FUNCTION, - "ks", - "fn", - ImmutableList.of("text", "text")); - - // When - callback.onEvent(event); - - // Then - verify(metadataManager).refreshSchema("ks", false, false); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/control/ControlConnectionTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/control/ControlConnectionTest.java deleted file mode 100644 index 526efefa2fe..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/control/ControlConnectionTest.java +++ /dev/null @@ -1,564 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.control; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.awaitility.Awaitility.await; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeState; -import com.datastax.oss.driver.internal.core.channel.ChannelEvent; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.channel.MockChannelFactoryHelper; -import com.datastax.oss.driver.internal.core.metadata.DistanceEvent; -import com.datastax.oss.driver.internal.core.metadata.NodeStateEvent; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import java.time.Duration; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.TimeUnit; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class ControlConnectionTest extends ControlConnectionTestBase { - - @Test - public void should_close_successfully_if_it_was_never_init() { - // When - CompletionStage closeFuture = controlConnection.forceCloseAsync(); - - // Then - assertThatStage(closeFuture).isSuccess(); - } - - @Test - public void should_init_with_first_contact_point_if_reachable() { - // Given - DriverChannel channel1 = newMockDriverChannel(1); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory).success(node1, channel1).build(); - - // When - CompletionStage initFuture = controlConnection.init(false, false, false); - factoryHelper.waitForCall(node1); - - // Then - assertThatStage(initFuture) - .isSuccess(v -> assertThat(controlConnection.channel()).isEqualTo(channel1)); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node1)); - - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_always_return_same_init_future() { - // Given - DriverChannel channel1 = newMockDriverChannel(1); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory).success(node1, channel1).build(); - - // When - CompletionStage initFuture1 = controlConnection.init(false, false, false); - factoryHelper.waitForCall(node1); - CompletionStage initFuture2 = controlConnection.init(false, false, false); - - // Then - assertThatStage(initFuture1).isEqualTo(initFuture2); - - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_init_with_second_contact_point_if_first_one_fails() { - // Given - DriverChannel channel2 = newMockDriverChannel(2); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - .failure(node1, "mock failure") - .success(node2, channel2) - .build(); - - // When - CompletionStage initFuture = controlConnection.init(false, false, false); - factoryHelper.waitForCall(node1); - factoryHelper.waitForCall(node2); - - // Then - assertThatStage(initFuture) - .isSuccess(v -> assertThat(controlConnection.channel()).isEqualTo(channel2)); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.controlConnectionFailed(node1)); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node2)); - // each attempt tries all nodes, so there is no reconnection - verify(reconnectionPolicy, never()).newNodeSchedule(any(Node.class)); - - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_fail_to_init_if_all_contact_points_fail() { - // Given - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - .failure(node1, "mock failure") - .failure(node2, "mock failure") - .build(); - - // When - CompletionStage initFuture = controlConnection.init(false, false, false); - factoryHelper.waitForCall(node1); - factoryHelper.waitForCall(node2); - - // Then - assertThatStage(initFuture).isFailed(); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.controlConnectionFailed(node1)); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.controlConnectionFailed(node2)); - // no reconnections at init - verify(reconnectionPolicy, never()).newNodeSchedule(any(Node.class)); - - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_reconnect_if_channel_goes_down() throws Exception { - // Given - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofNanos(1)); - DriverChannel channel1 = newMockDriverChannel(1); - DriverChannel channel2 = newMockDriverChannel(2); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - .success(node1, channel1) - .failure(node1, "mock failure") - .success(node2, channel2) - .build(); - - CompletionStage initFuture = controlConnection.init(false, false, false); - factoryHelper.waitForCall(node1); - - assertThatStage(initFuture) - .isSuccess(v -> assertThat(controlConnection.channel()).isEqualTo(channel1)); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node1)); - - // When - channel1.close(); - - // Then - // a reconnection was started - verify(reconnectionSchedule, VERIFY_TIMEOUT).nextDelay(); - factoryHelper.waitForCall(node1); - factoryHelper.waitForCall(node2); - await().untilAsserted(() -> assertThat(controlConnection.channel()).isEqualTo(channel2)); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelClosed(node1)); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node2)); - verify(metadataManager, VERIFY_TIMEOUT).refreshNodes(); - verify(loadBalancingPolicyWrapper, VERIFY_TIMEOUT).init(); - - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_reconnect_if_node_becomes_ignored() { - // Given - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofNanos(1)); - DriverChannel channel1 = newMockDriverChannel(1); - DriverChannel channel2 = newMockDriverChannel(2); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - .success(node1, channel1) - .success(node2, channel2) - .build(); - - CompletionStage initFuture = controlConnection.init(false, false, false); - factoryHelper.waitForCall(node1); - - assertThatStage(initFuture) - .isSuccess(v -> assertThat(controlConnection.channel()).isEqualTo(channel1)); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node1)); - - // When - mockQueryPlan(node2); - eventBus.fire(new DistanceEvent(NodeDistance.IGNORED, node1)); - - // Then - // an immediate reconnection was started - factoryHelper.waitForCall(node2); - await().untilAsserted(() -> assertThat(controlConnection.channel()).isEqualTo(channel2)); - verify(reconnectionSchedule, never()).nextDelay(); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelClosed(node1)); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node2)); - verify(metadataManager, VERIFY_TIMEOUT).refreshNodes(); - verify(loadBalancingPolicyWrapper, VERIFY_TIMEOUT).init(); - - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_reconnect_if_node_is_removed() { - should_reconnect_if_event(NodeStateEvent.removed(node1)); - } - - @Test - public void should_reconnect_if_node_is_forced_down() { - should_reconnect_if_event(NodeStateEvent.changed(NodeState.UP, NodeState.FORCED_DOWN, node1)); - } - - private void should_reconnect_if_event(NodeStateEvent event) { - // Given - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofNanos(1)); - DriverChannel channel1 = newMockDriverChannel(1); - DriverChannel channel2 = newMockDriverChannel(2); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - .success(node1, channel1) - .success(node2, channel2) - .build(); - - CompletionStage initFuture = controlConnection.init(false, false, false); - factoryHelper.waitForCall(node1); - - assertThatStage(initFuture) - .isSuccess(v -> assertThat(controlConnection.channel()).isEqualTo(channel1)); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node1)); - - // When - mockQueryPlan(node2); - eventBus.fire(event); - - // Then - // an immediate reconnection was started - factoryHelper.waitForCall(node2); - await().untilAsserted(() -> assertThat(controlConnection.channel()).isEqualTo(channel2)); - verify(reconnectionSchedule, never()).nextDelay(); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelClosed(node1)); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node2)); - verify(metadataManager, VERIFY_TIMEOUT).refreshNodes(); - verify(loadBalancingPolicyWrapper, VERIFY_TIMEOUT).init(); - - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_reconnect_if_node_became_ignored_during_reconnection_attempt() { - // Given - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofNanos(1)); - DriverChannel channel1 = newMockDriverChannel(1); - DriverChannel channel2 = newMockDriverChannel(2); - CompletableFuture channel2Future = new CompletableFuture<>(); - DriverChannel channel3 = newMockDriverChannel(3); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - // init - .success(node1, channel1) - // reconnection - .pending(node2, channel2Future) - .success(node1, channel3) - .build(); - - CompletionStage initFuture = controlConnection.init(false, false, false); - factoryHelper.waitForCall(node1); - - assertThatStage(initFuture) - .isSuccess(v -> assertThat(controlConnection.channel()).isEqualTo(channel1)); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node1)); - - mockQueryPlan(node2, node1); - // channel1 goes down, triggering a reconnection - channel1.close(); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelClosed(node1)); - verify(reconnectionSchedule, VERIFY_TIMEOUT).nextDelay(); - // the reconnection to node2 is in progress - factoryHelper.waitForCall(node2); - - // When - // node2 becomes ignored - eventBus.fire(new DistanceEvent(NodeDistance.IGNORED, node2)); - // the reconnection to node2 completes - channel2Future.complete(channel2); - - // Then - // The channel should get closed and we should try the next node - verify(channel2, VERIFY_TIMEOUT).forceClose(); - factoryHelper.waitForCall(node1); - } - - @Test - public void should_reconnect_if_node_was_removed_during_reconnection_attempt() { - should_reconnect_if_event_during_reconnection_attempt(NodeStateEvent.removed(node2)); - } - - @Test - public void should_reconnect_if_node_was_forced_down_during_reconnection_attempt() { - should_reconnect_if_event_during_reconnection_attempt( - NodeStateEvent.changed(NodeState.UP, NodeState.FORCED_DOWN, node2)); - } - - private void should_reconnect_if_event_during_reconnection_attempt(NodeStateEvent event) { - // Given - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofNanos(1)); - DriverChannel channel1 = newMockDriverChannel(1); - DriverChannel channel2 = newMockDriverChannel(2); - CompletableFuture channel2Future = new CompletableFuture<>(); - DriverChannel channel3 = newMockDriverChannel(3); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - // init - .success(node1, channel1) - // reconnection - .pending(node2, channel2Future) - .success(node1, channel3) - .build(); - - CompletionStage initFuture = controlConnection.init(false, false, false); - factoryHelper.waitForCall(node1); - - assertThatStage(initFuture).isSuccess(); - await().untilAsserted(() -> assertThat(controlConnection.channel()).isEqualTo(channel1)); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node1)); - - mockQueryPlan(node2, node1); - // channel1 goes down, triggering a reconnection - channel1.close(); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelClosed(node1)); - verify(reconnectionSchedule, VERIFY_TIMEOUT).nextDelay(); - // the reconnection to node2 is in progress - factoryHelper.waitForCall(node2); - - // When - // node2 goes into the new state - eventBus.fire(event); - // the reconnection to node2 completes - channel2Future.complete(channel2); - - // Then - // The channel should get closed and we should try the next node - verify(channel2, VERIFY_TIMEOUT).forceClose(); - factoryHelper.waitForCall(node1); - } - - @Test - public void should_force_reconnection_if_pending() { - // Given - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofDays(1)); - - DriverChannel channel1 = newMockDriverChannel(1); - DriverChannel channel2 = newMockDriverChannel(2); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - .success(node1, channel1) - .failure(node1, "mock failure") - .success(node2, channel2) - .build(); - - CompletionStage initFuture = controlConnection.init(false, false, false); - factoryHelper.waitForCall(node1); - assertThatStage(initFuture) - .isSuccess(v -> assertThat(controlConnection.channel()).isEqualTo(channel1)); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node1)); - - // the channel fails and a reconnection is scheduled for later - channel1.close(); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelClosed(node1)); - verify(reconnectionSchedule, VERIFY_TIMEOUT).nextDelay(); - - // When - controlConnection.reconnectNow(); - factoryHelper.waitForCall(node1); - factoryHelper.waitForCall(node2); - - // Then - await().untilAsserted(() -> assertThat(controlConnection.channel()).isEqualTo(channel2)); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node2)); - - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_force_reconnection_even_if_connected() { - // Given - DriverChannel channel1 = newMockDriverChannel(1); - DriverChannel channel2 = newMockDriverChannel(2); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - .success(node1, channel1) - .failure(node1, "mock failure") - .success(node2, channel2) - .build(); - - CompletionStage initFuture = controlConnection.init(false, false, false); - factoryHelper.waitForCall(node1); - assertThatStage(initFuture) - .isSuccess(v -> assertThat(controlConnection.channel()).isEqualTo(channel1)); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node1)); - - // When - controlConnection.reconnectNow(); - - // Then - factoryHelper.waitForCall(node1); - factoryHelper.waitForCall(node2); - await().untilAsserted(() -> assertThat(controlConnection.channel()).isEqualTo(channel2)); - verify(channel1, VERIFY_TIMEOUT).forceClose(); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelClosed(node1)); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node2)); - - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_not_force_reconnection_if_not_init() throws InterruptedException { - // When - controlConnection.reconnectNow(); - TimeUnit.MILLISECONDS.sleep(500); - - // Then - verify(reconnectionSchedule, never()).nextDelay(); - } - - @Test - public void should_not_force_reconnection_if_closed() throws InterruptedException { - // Given - DriverChannel channel1 = newMockDriverChannel(1); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory).success(node1, channel1).build(); - CompletionStage initFuture = controlConnection.init(false, false, false); - factoryHelper.waitForCall(node1); - assertThatStage(initFuture).isSuccess(); - CompletionStage closeFuture = controlConnection.forceCloseAsync(); - assertThatStage(closeFuture).isSuccess(); - - // When - controlConnection.reconnectNow(); - TimeUnit.MILLISECONDS.sleep(500); - - // Then - verify(reconnectionSchedule, never()).nextDelay(); - - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_close_channel_when_closing() { - // Given - DriverChannel channel1 = newMockDriverChannel(1); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory).success(node1, channel1).build(); - - CompletionStage initFuture = controlConnection.init(false, false, false); - factoryHelper.waitForCall(node1); - assertThatStage(initFuture).isSuccess(); - - // When - CompletionStage closeFuture = controlConnection.forceCloseAsync(); - - // Then - assertThatStage(closeFuture).isSuccess(); - verify(channel1, VERIFY_TIMEOUT).forceClose(); - - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_close_channel_if_closed_during_reconnection() { - // Given - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofNanos(1)); - - DriverChannel channel1 = newMockDriverChannel(1); - DriverChannel channel2 = newMockDriverChannel(2); - CompletableFuture channel2Future = new CompletableFuture<>(); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - .success(node1, channel1) - .failure(node1, "mock failure") - .pending(node2, channel2Future) - .build(); - - CompletionStage initFuture = controlConnection.init(false, false, false); - factoryHelper.waitForCall(node1); - assertThatStage(initFuture) - .isSuccess(v -> assertThat(controlConnection.channel()).isEqualTo(channel1)); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node1)); - - // the channel fails and a reconnection is scheduled - channel1.close(); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelClosed(node1)); - verify(reconnectionSchedule, VERIFY_TIMEOUT).nextDelay(); - factoryHelper.waitForCall(node1); - // channel2 starts initializing (but the future is not completed yet) - factoryHelper.waitForCall(node2); - - // When - // the control connection gets closed before channel2 initialization is complete - CompletionStage closeFuture = controlConnection.forceCloseAsync(); - assertThatStage(closeFuture).isSuccess(); - channel2Future.complete(channel2); - - // Then - verify(channel2, VERIFY_TIMEOUT).forceClose(); - // no event because the control connection never "owned" the channel - verify(eventBus, never()).fire(ChannelEvent.channelOpened(node2)); - verify(eventBus, never()).fire(ChannelEvent.channelClosed(node2)); - - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_handle_channel_failure_if_closed_during_reconnection() { - // Given - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofNanos(1)); - - DriverChannel channel1 = newMockDriverChannel(1); - DriverChannel channel2 = newMockDriverChannel(2); - CompletableFuture channel1Future = new CompletableFuture<>(); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - .success(node1, channel1) - .pending(node1, channel1Future) - .success(node2, channel2) - .build(); - - CompletionStage initFuture = controlConnection.init(false, false, false); - factoryHelper.waitForCall(node1); - assertThatStage(initFuture) - .isSuccess(v -> assertThat(controlConnection.channel()).isEqualTo(channel1)); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node1)); - - // the channel fails and a reconnection is scheduled - channel1.close(); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelClosed(node1)); - verify(reconnectionSchedule, VERIFY_TIMEOUT).nextDelay(); - // channel1 starts initializing (but the future is not completed yet) - factoryHelper.waitForCall(node1); - - // When - // the control connection gets closed before channel1 initialization fails - CompletionStage closeFuture = controlConnection.forceCloseAsync(); - assertThatStage(closeFuture).isSuccess(); - channel1Future.completeExceptionally(new Exception("mock failure")); - - // Then - // should never try channel2 because the reconnection has detected that it can stop after the - // first failure - factoryHelper.verifyNoMoreCalls(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/control/ControlConnectionTestBase.java b/core/src/test/java/com/datastax/oss/driver/internal/core/control/ControlConnectionTestBase.java deleted file mode 100644 index c52199465a8..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/control/ControlConnectionTestBase.java +++ /dev/null @@ -1,178 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.control; - -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyBoolean; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.timeout; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.connection.ReconnectionPolicy; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.channel.ChannelFactory; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.channel.DriverChannelOptions; -import com.datastax.oss.driver.internal.core.context.EventBus; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.context.NettyOptions; -import com.datastax.oss.driver.internal.core.metadata.DefaultEndPoint; -import com.datastax.oss.driver.internal.core.metadata.DefaultNode; -import com.datastax.oss.driver.internal.core.metadata.LoadBalancingPolicyWrapper; -import com.datastax.oss.driver.internal.core.metadata.MetadataManager; -import com.datastax.oss.driver.internal.core.metadata.TestNodeFactory; -import com.datastax.oss.driver.internal.core.metrics.MetricsFactory; -import io.netty.channel.Channel; -import io.netty.channel.DefaultChannelPromise; -import io.netty.channel.DefaultEventLoopGroup; -import io.netty.channel.EventLoop; -import java.net.InetSocketAddress; -import java.time.Duration; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ConcurrentLinkedQueue; -import java.util.concurrent.Exchanger; -import java.util.concurrent.TimeUnit; -import org.junit.After; -import org.junit.Before; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; -import org.mockito.verification.VerificationWithTimeout; - -abstract class ControlConnectionTestBase { - protected static final InetSocketAddress ADDRESS1 = new InetSocketAddress("127.0.0.1", 9042); - - /** How long we wait when verifying mocks for async invocations */ - protected static final VerificationWithTimeout VERIFY_TIMEOUT = timeout(500); - - @Mock protected InternalDriverContext context; - @Mock protected DriverConfig config; - @Mock protected DriverExecutionProfile defaultProfile; - @Mock protected ReconnectionPolicy reconnectionPolicy; - @Mock protected ReconnectionPolicy.ReconnectionSchedule reconnectionSchedule; - @Mock protected NettyOptions nettyOptions; - protected DefaultEventLoopGroup adminEventLoopGroup; - protected EventBus eventBus; - @Mock protected ChannelFactory channelFactory; - protected Exchanger> channelFactoryFuture; - @Mock protected LoadBalancingPolicyWrapper loadBalancingPolicyWrapper; - @Mock protected MetadataManager metadataManager; - @Mock protected MetricsFactory metricsFactory; - - protected DefaultNode node1; - protected DefaultNode node2; - - protected ControlConnection controlConnection; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - - adminEventLoopGroup = new DefaultEventLoopGroup(1); - - when(context.getNettyOptions()).thenReturn(nettyOptions); - when(nettyOptions.adminEventExecutorGroup()).thenReturn(adminEventLoopGroup); - eventBus = spy(new EventBus("test")); - when(context.getEventBus()).thenReturn(eventBus); - when(context.getChannelFactory()).thenReturn(channelFactory); - - channelFactoryFuture = new Exchanger<>(); - when(channelFactory.connect(any(Node.class), any(DriverChannelOptions.class))) - .thenAnswer( - invocation -> { - CompletableFuture channelFuture = new CompletableFuture<>(); - channelFactoryFuture.exchange(channelFuture, 100, TimeUnit.MILLISECONDS); - return channelFuture; - }); - - when(context.getConfig()).thenReturn(config); - when(config.getDefaultProfile()).thenReturn(defaultProfile); - when(defaultProfile.getBoolean(DefaultDriverOption.RECONNECT_ON_INIT)).thenReturn(false); - - when(context.getReconnectionPolicy()).thenReturn(reconnectionPolicy); - // Child classes only cover "runtime" reconnections when the driver is already initialized - when(reconnectionPolicy.newControlConnectionSchedule(false)).thenReturn(reconnectionSchedule); - // By default, set a large reconnection delay. Tests that care about reconnection will override - // it. - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofDays(1)); - - when(context.getLoadBalancingPolicyWrapper()).thenReturn(loadBalancingPolicyWrapper); - - when(context.getMetricsFactory()).thenReturn(metricsFactory); - node1 = TestNodeFactory.newNode(1, context); - node2 = TestNodeFactory.newNode(2, context); - mockQueryPlan(node1, node2); - - when(metadataManager.refreshNodes()).thenReturn(CompletableFuture.completedFuture(null)); - when(metadataManager.refreshSchema(anyString(), anyBoolean(), anyBoolean())) - .thenReturn(CompletableFuture.completedFuture(null)); - when(context.getMetadataManager()).thenReturn(metadataManager); - - when(context.getConfig()).thenReturn(config); - when(config.getDefaultProfile()).thenReturn(defaultProfile); - when(defaultProfile.getBoolean(DefaultDriverOption.CONNECTION_WARN_INIT_ERROR)) - .thenReturn(false); - - controlConnection = new ControlConnection(context); - } - - protected void mockQueryPlan(Node... nodes) { - when(loadBalancingPolicyWrapper.newQueryPlan()) - .thenAnswer( - i -> { - ConcurrentLinkedQueue queryPlan = new ConcurrentLinkedQueue<>(); - for (Node node : nodes) { - queryPlan.offer(node); - } - return queryPlan; - }); - } - - @After - public void teardown() { - adminEventLoopGroup.shutdownGracefully(100, 200, TimeUnit.MILLISECONDS); - } - - protected DriverChannel newMockDriverChannel(int id) { - DriverChannel driverChannel = mock(DriverChannel.class); - Channel channel = mock(Channel.class); - EventLoop adminExecutor = adminEventLoopGroup.next(); - DefaultChannelPromise closeFuture = new DefaultChannelPromise(channel, adminExecutor); - when(driverChannel.close()) - .thenAnswer( - i -> { - closeFuture.trySuccess(null); - return closeFuture; - }); - when(driverChannel.forceClose()) - .thenAnswer( - i -> { - closeFuture.trySuccess(null); - return closeFuture; - }); - when(driverChannel.closeFuture()).thenReturn(closeFuture); - when(driverChannel.toString()).thenReturn("channel" + id); - when(driverChannel.getEndPoint()) - .thenReturn(new DefaultEndPoint(new InetSocketAddress("127.0.0." + id, 9042))); - return driverChannel; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/ConversionsTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/ConversionsTest.java deleted file mode 100644 index 954cf0e14a0..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/ConversionsTest.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.cql.ColumnDefinition; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import java.util.List; -import org.junit.Test; - -public class ConversionsTest { - @Test - public void should_find_pk_indices_if_all_bound() { - assertThat(Conversions.findIndices(partitionKey("pk"), variables("pk"))).containsExactly(0); - assertThat(Conversions.findIndices(partitionKey("pk"), variables("pk", "c"))) - .containsExactly(0); - assertThat(Conversions.findIndices(partitionKey("pk"), variables("c", "pk"))) - .containsExactly(1); - assertThat( - Conversions.findIndices( - partitionKey("pk1", "pk2", "pk3"), - variables("c1", "pk2", "pk3", "c2", "pk1", "c3"))) - .containsExactly(4, 1, 2); - } - - @Test - public void should_use_first_pk_index_if_bound_multiple_times() { - assertThat(Conversions.findIndices(partitionKey("pk"), variables("pk", "pk"))) - .containsExactly(0); - assertThat(Conversions.findIndices(partitionKey("pk"), variables("pk", "c1", "pk", "c2"))) - .containsExactly(0); - assertThat( - Conversions.findIndices( - partitionKey("pk1", "pk2", "pk3"), - variables("c1", "pk2", "pk3", "c2", "pk1", "c3", "pk1", "pk2"))) - .containsExactly(4, 1, 2); - } - - @Test - public void should_return_empty_pk_indices_if_at_least_one_component_not_bound() { - assertThat(Conversions.findIndices(partitionKey("pk"), variables("c1", "c2"))).isEmpty(); - assertThat( - Conversions.findIndices( - partitionKey("pk1", "pk2", "pk3"), variables("c1", "pk2", "c2", "pk1", "c3"))) - .isEmpty(); - } - - private List partitionKey(String... columnNames) { - ImmutableList.Builder columns = - ImmutableList.builderWithExpectedSize(columnNames.length); - for (String columnName : columnNames) { - ColumnMetadata column = mock(ColumnMetadata.class); - when(column.getName()).thenReturn(CqlIdentifier.fromInternal(columnName)); - columns.add(column); - } - return columns.build(); - } - - private ColumnDefinitions variables(String... columnNames) { - ImmutableList.Builder columns = - ImmutableList.builderWithExpectedSize(columnNames.length); - for (String columnName : columnNames) { - ColumnDefinition column = mock(ColumnDefinition.class); - when(column.getName()).thenReturn(CqlIdentifier.fromInternal(columnName)); - columns.add(column); - } - return DefaultColumnDefinitions.valueOf(columns.build()); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlPrepareHandlerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlPrepareHandlerTest.java deleted file mode 100644 index 1924ef5a9af..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlPrepareHandlerTest.java +++ /dev/null @@ -1,399 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static com.datastax.oss.driver.internal.core.cql.CqlRequestHandlerTestBase.defaultFrameOf; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyBoolean; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.NodeUnavailableException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.retry.RetryVerdict; -import com.datastax.oss.driver.api.core.servererrors.OverloadedException; -import com.datastax.oss.driver.internal.core.channel.ResponseCallback; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.request.Prepare; -import com.datastax.oss.protocol.internal.response.Error; -import com.datastax.oss.protocol.internal.response.result.ColumnSpec; -import com.datastax.oss.protocol.internal.response.result.Prepared; -import com.datastax.oss.protocol.internal.response.result.RawType; -import com.datastax.oss.protocol.internal.response.result.RowsMetadata; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.nio.ByteBuffer; -import java.util.List; -import java.util.Map; -import java.util.concurrent.CompletionStage; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -public class CqlPrepareHandlerTest { - - private static final DefaultPrepareRequest PREPARE_REQUEST = - new DefaultPrepareRequest("mock query"); - - @Mock private Node node1; - @Mock private Node node2; - @Mock private Node node3; - - private final Map payload = - ImmutableMap.of("key1", ByteBuffer.wrap(new byte[] {1, 2, 3, 4})); - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - } - - @Test - public void should_prepare_on_first_node_and_reprepare_on_others() { - RequestHandlerTestHarness.Builder harnessBuilder = RequestHandlerTestHarness.builder(); - PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); - PoolBehavior node2Behavior = harnessBuilder.customBehavior(node2); - PoolBehavior node3Behavior = harnessBuilder.customBehavior(node3); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - - CompletionStage prepareFuture = - new CqlPrepareHandler(PREPARE_REQUEST, harness.getSession(), harness.getContext(), "test") - .handle(); - - node1Behavior.verifyWrite(); - node1Behavior.setWriteSuccess(); - node1Behavior.setResponseSuccess(defaultFrameOf(simplePrepared())); - - // The future waits for the reprepare attempt on other nodes, so it's not done yet. - assertThatStage(prepareFuture).isNotDone(); - - // Should now reprepare on the remaining nodes: - node2Behavior.verifyWrite(); - node2Behavior.setWriteSuccess(); - node2Behavior.setResponseSuccess(defaultFrameOf(simplePrepared())); - - node3Behavior.verifyWrite(); - node3Behavior.setWriteSuccess(); - node3Behavior.setResponseSuccess(defaultFrameOf(simplePrepared())); - - assertThatStage(prepareFuture).isSuccess(CqlPrepareHandlerTest::assertMatchesSimplePrepared); - } - } - - @Test - public void should_not_reprepare_on_other_nodes_if_disabled_in_config() { - RequestHandlerTestHarness.Builder harnessBuilder = RequestHandlerTestHarness.builder(); - PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); - PoolBehavior node2Behavior = harnessBuilder.customBehavior(node2); - PoolBehavior node3Behavior = harnessBuilder.customBehavior(node3); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - - DriverExecutionProfile config = harness.getContext().getConfig().getDefaultProfile(); - when(config.getBoolean(DefaultDriverOption.PREPARE_ON_ALL_NODES)).thenReturn(false); - - CompletionStage prepareFuture = - new CqlPrepareHandler(PREPARE_REQUEST, harness.getSession(), harness.getContext(), "test") - .handle(); - - node1Behavior.verifyWrite(); - node1Behavior.setWriteSuccess(); - node1Behavior.setResponseSuccess(defaultFrameOf(simplePrepared())); - - // The future should complete immediately: - assertThatStage(prepareFuture).isSuccess(); - - // And the other nodes should not be contacted: - node2Behavior.verifyNoWrite(); - node3Behavior.verifyNoWrite(); - } - } - - @Test - public void should_ignore_errors_while_repreparing_on_other_nodes() { - RequestHandlerTestHarness.Builder harnessBuilder = - RequestHandlerTestHarness.builder().withResponse(node1, defaultFrameOf(simplePrepared())); - PoolBehavior node2Behavior = harnessBuilder.customBehavior(node2); - PoolBehavior node3Behavior = harnessBuilder.customBehavior(node3); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - - CompletionStage prepareFuture = - new CqlPrepareHandler(PREPARE_REQUEST, harness.getSession(), harness.getContext(), "test") - .handle(); - - assertThatStage(prepareFuture).isNotDone(); - - // Other nodes fail, the future should still succeed when all done - node2Behavior.verifyWrite(); - node2Behavior.setWriteSuccess(); - node2Behavior.setResponseSuccess( - defaultFrameOf(new Error(ProtocolConstants.ErrorCode.SERVER_ERROR, "mock error"))); - - node3Behavior.verifyWrite(); - node3Behavior.setWriteFailure(new RuntimeException("mock error")); - - assertThatStage(prepareFuture).isSuccess(CqlPrepareHandlerTest::assertMatchesSimplePrepared); - } - } - - @Test - public void should_retry_initial_prepare_if_recoverable_error() { - RequestHandlerTestHarness.Builder harnessBuilder = - RequestHandlerTestHarness.builder() - .withResponse( - node1, - defaultFrameOf(new Error(ProtocolConstants.ErrorCode.OVERLOADED, "mock message"))) - .withResponse(node2, defaultFrameOf(simplePrepared())); - PoolBehavior node3Behavior = harnessBuilder.customBehavior(node3); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - - // Make node1's error recoverable, will switch to node2 - when(harness - .getContext() - .getRetryPolicy(anyString()) - .onErrorResponseVerdict(eq(PREPARE_REQUEST), any(OverloadedException.class), eq(0))) - .thenReturn(RetryVerdict.RETRY_NEXT); - - CompletionStage prepareFuture = - new CqlPrepareHandler(PREPARE_REQUEST, harness.getSession(), harness.getContext(), "test") - .handle(); - - // Success on node2, reprepare on node3 - assertThatStage(prepareFuture).isNotDone(); - node3Behavior.verifyWrite(); - node3Behavior.setWriteSuccess(); - node3Behavior.setResponseSuccess(defaultFrameOf(simplePrepared())); - - assertThatStage(prepareFuture).isSuccess(CqlPrepareHandlerTest::assertMatchesSimplePrepared); - } - } - - @Test - public void should_not_retry_initial_prepare_if_unrecoverable_error() { - RequestHandlerTestHarness.Builder harnessBuilder = - RequestHandlerTestHarness.builder() - .withResponse( - node1, - defaultFrameOf(new Error(ProtocolConstants.ErrorCode.OVERLOADED, "mock message"))); - PoolBehavior node2Behavior = harnessBuilder.customBehavior(node2); - PoolBehavior node3Behavior = harnessBuilder.customBehavior(node3); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - - // Make node1's error unrecoverable, will rethrow - when(harness - .getContext() - .getRetryPolicy(anyString()) - .onErrorResponseVerdict(eq(PREPARE_REQUEST), any(OverloadedException.class), eq(0))) - .thenReturn(RetryVerdict.RETHROW); - - CompletionStage prepareFuture = - new CqlPrepareHandler(PREPARE_REQUEST, harness.getSession(), harness.getContext(), "test") - .handle(); - - // Success on node2, reprepare on node3 - assertThatStage(prepareFuture) - .isFailed( - error -> { - assertThat(error).isInstanceOf(OverloadedException.class); - node2Behavior.verifyNoWrite(); - node3Behavior.verifyNoWrite(); - }); - } - } - - @Test - public void should_fail_if_nodes_unavailable() { - RequestHandlerTestHarness.Builder harnessBuilder = RequestHandlerTestHarness.builder(); - try (RequestHandlerTestHarness harness = - harnessBuilder.withEmptyPool(node1).withEmptyPool(node2).build()) { - CompletionStage prepareFuture = - new CqlPrepareHandler(PREPARE_REQUEST, harness.getSession(), harness.getContext(), "test") - .handle(); - assertThatStage(prepareFuture) - .isFailed( - error -> { - assertThat(error).isInstanceOf(AllNodesFailedException.class); - Map> allErrors = - ((AllNodesFailedException) error).getAllErrors(); - assertThat(allErrors).hasSize(2); - assertThat(allErrors) - .hasEntrySatisfying( - node1, - nodeErrors -> - assertThat(nodeErrors) - .singleElement() - .isInstanceOf(NodeUnavailableException.class)); - assertThat(allErrors) - .hasEntrySatisfying( - node2, - nodeErrors -> - assertThat(nodeErrors) - .singleElement() - .isInstanceOf(NodeUnavailableException.class)); - }); - } - } - - @Test - public void should_fail_if_retry_policy_ignores_error() { - RequestHandlerTestHarness.Builder harnessBuilder = - RequestHandlerTestHarness.builder() - .withResponse( - node1, - defaultFrameOf(new Error(ProtocolConstants.ErrorCode.OVERLOADED, "mock message"))); - PoolBehavior node2Behavior = harnessBuilder.customBehavior(node2); - PoolBehavior node3Behavior = harnessBuilder.customBehavior(node3); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - - // Make node1's error unrecoverable, will rethrow - RetryPolicy mockRetryPolicy = - harness.getContext().getRetryPolicy(DriverExecutionProfile.DEFAULT_NAME); - when(mockRetryPolicy.onErrorResponseVerdict( - eq(PREPARE_REQUEST), any(OverloadedException.class), eq(0))) - .thenReturn(RetryVerdict.IGNORE); - - CompletionStage prepareFuture = - new CqlPrepareHandler(PREPARE_REQUEST, harness.getSession(), harness.getContext(), "test") - .handle(); - - // Success on node2, reprepare on node3 - assertThatStage(prepareFuture) - .isFailed( - error -> { - assertThat(error) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "IGNORE decisions are not allowed for prepare requests, " - + "please fix your retry policy."); - node2Behavior.verifyNoWrite(); - node3Behavior.verifyNoWrite(); - }); - } - } - - @Test - public void should_propagate_custom_payload_on_single_node() { - RequestHandlerTestHarness.Builder harnessBuilder = RequestHandlerTestHarness.builder(); - DefaultPrepareRequest prepareRequest = - new DefaultPrepareRequest( - SimpleStatement.newInstance("irrelevant").setCustomPayload(payload)); - PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); - PoolBehavior node2Behavior = harnessBuilder.customBehavior(node2); - PoolBehavior node3Behavior = harnessBuilder.customBehavior(node3); - node1Behavior.setResponseSuccess(defaultFrameOf(simplePrepared())); - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - DriverExecutionProfile config = harness.getContext().getConfig().getDefaultProfile(); - when(config.getBoolean(DefaultDriverOption.PREPARE_ON_ALL_NODES)).thenReturn(false); - CompletionStage prepareFuture = - new CqlPrepareHandler(prepareRequest, harness.getSession(), harness.getContext(), "test") - .handle(); - verify(node1Behavior.channel) - .write(any(Prepare.class), anyBoolean(), eq(payload), any(ResponseCallback.class)); - node2Behavior.verifyNoWrite(); - node3Behavior.verifyNoWrite(); - assertThatStage(prepareFuture).isSuccess(CqlPrepareHandlerTest::assertMatchesSimplePrepared); - } - } - - @Test - public void should_propagate_custom_payload_on_all_nodes() { - RequestHandlerTestHarness.Builder harnessBuilder = RequestHandlerTestHarness.builder(); - DefaultPrepareRequest prepareRequest = - new DefaultPrepareRequest( - SimpleStatement.newInstance("irrelevant").setCustomPayload(payload)); - PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); - PoolBehavior node2Behavior = harnessBuilder.customBehavior(node2); - PoolBehavior node3Behavior = harnessBuilder.customBehavior(node3); - node1Behavior.setResponseSuccess(defaultFrameOf(simplePrepared())); - node2Behavior.setResponseSuccess(defaultFrameOf(simplePrepared())); - node3Behavior.setResponseSuccess(defaultFrameOf(simplePrepared())); - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - DriverExecutionProfile config = harness.getContext().getConfig().getDefaultProfile(); - when(config.getBoolean(DefaultDriverOption.PREPARE_ON_ALL_NODES)).thenReturn(true); - CompletionStage prepareFuture = - new CqlPrepareHandler(prepareRequest, harness.getSession(), harness.getContext(), "test") - .handle(); - verify(node1Behavior.channel) - .write(any(Prepare.class), anyBoolean(), eq(payload), any(ResponseCallback.class)); - verify(node2Behavior.channel) - .write(any(Prepare.class), anyBoolean(), eq(payload), any(ResponseCallback.class)); - verify(node3Behavior.channel) - .write(any(Prepare.class), anyBoolean(), eq(payload), any(ResponseCallback.class)); - assertThatStage(prepareFuture).isSuccess(CqlPrepareHandlerTest::assertMatchesSimplePrepared); - } - } - - private static Message simplePrepared() { - RowsMetadata variablesMetadata = - new RowsMetadata( - ImmutableList.of( - new ColumnSpec( - "ks", - "table", - "key", - 0, - RawType.PRIMITIVES.get(ProtocolConstants.DataType.VARCHAR))), - null, - new int[] {0}, - null); - RowsMetadata resultMetadata = - new RowsMetadata( - ImmutableList.of( - new ColumnSpec( - "ks", - "table", - "message", - 0, - RawType.PRIMITIVES.get(ProtocolConstants.DataType.VARCHAR))), - null, - new int[] {}, - null); - return new Prepared( - Bytes.fromHexString("0xffff").array(), null, variablesMetadata, resultMetadata); - } - - private static void assertMatchesSimplePrepared(PreparedStatement statement) { - assertThat(Bytes.toHexString(statement.getId())).isEqualTo("0xffff"); - - ColumnDefinitions variableDefinitions = statement.getVariableDefinitions(); - assertThat(variableDefinitions).hasSize(1); - assertThat(variableDefinitions.get(0).getName().asInternal()).isEqualTo("key"); - - ColumnDefinitions resultSetDefinitions = statement.getResultSetDefinitions(); - assertThat(resultSetDefinitions).hasSize(1); - assertThat(resultSetDefinitions.get(0).getName().asInternal()).isEqualTo("message"); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandlerRetryTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandlerRetryTest.java deleted file mode 100644 index ccac873c616..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandlerRetryTest.java +++ /dev/null @@ -1,607 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.atMost; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyNoMoreInteractions; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.TestDataProviders; -import com.datastax.oss.driver.api.core.DefaultConsistencyLevel; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.connection.HeartbeatException; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.retry.RetryVerdict; -import com.datastax.oss.driver.api.core.servererrors.BootstrappingException; -import com.datastax.oss.driver.api.core.servererrors.DefaultWriteType; -import com.datastax.oss.driver.api.core.servererrors.InvalidQueryException; -import com.datastax.oss.driver.api.core.servererrors.ReadTimeoutException; -import com.datastax.oss.driver.api.core.servererrors.ServerError; -import com.datastax.oss.driver.api.core.servererrors.UnavailableException; -import com.datastax.oss.driver.api.core.servererrors.WriteTimeoutException; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.tracker.RequestIdGenerator; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.response.Error; -import com.datastax.oss.protocol.internal.response.error.ReadTimeout; -import com.datastax.oss.protocol.internal.response.error.Unavailable; -import com.datastax.oss.protocol.internal.response.error.WriteTimeout; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.nio.charset.StandardCharsets; -import java.util.Iterator; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; -import org.junit.Test; - -public class CqlRequestHandlerRetryTest extends CqlRequestHandlerTestBase { - - @Test - @UseDataProvider("allIdempotenceConfigs") - public void should_always_try_next_node_if_bootstrapping( - boolean defaultIdempotence, Statement statement) { - try (RequestHandlerTestHarness harness = - RequestHandlerTestHarness.builder() - .withDefaultIdempotence(defaultIdempotence) - .withResponse( - node1, - defaultFrameOf( - new Error(ProtocolConstants.ErrorCode.IS_BOOTSTRAPPING, "mock message"))) - .withResponse(node2, defaultFrameOf(singleRow())) - .build()) { - - CompletionStage resultSetFuture = - new CqlRequestHandler(statement, harness.getSession(), harness.getContext(), "test") - .handle(); - - assertThatStage(resultSetFuture) - .isSuccess( - resultSet -> { - Iterator rows = resultSet.currentPage().iterator(); - assertThat(rows.hasNext()).isTrue(); - assertThat(rows.next().getString("message")).isEqualTo("hello, world"); - - ExecutionInfo executionInfo = resultSet.getExecutionInfo(); - assertThat(executionInfo.getCoordinator()).isEqualTo(node2); - assertThat(executionInfo.getErrors()).hasSize(1); - assertThat(executionInfo.getErrors().get(0).getKey()).isEqualTo(node1); - assertThat(executionInfo.getErrors().get(0).getValue()) - .isInstanceOf(BootstrappingException.class); - assertThat(executionInfo.getIncomingPayload()).isEmpty(); - assertThat(executionInfo.getPagingState()).isNull(); - assertThat(executionInfo.getSpeculativeExecutionCount()).isEqualTo(0); - assertThat(executionInfo.getSuccessfulExecutionIndex()).isEqualTo(0); - assertThat(executionInfo.getWarnings()).isEmpty(); - - verifyNoMoreInteractions(harness.getContext().getRetryPolicy(anyString())); - }); - } - } - - @Test - @UseDataProvider("allIdempotenceConfigs") - public void should_always_rethrow_query_validation_error( - boolean defaultIdempotence, Statement statement) { - try (RequestHandlerTestHarness harness = - RequestHandlerTestHarness.builder() - .withDefaultIdempotence(defaultIdempotence) - .withResponse( - node1, - defaultFrameOf(new Error(ProtocolConstants.ErrorCode.INVALID, "mock message"))) - .build()) { - - CompletionStage resultSetFuture = - new CqlRequestHandler(statement, harness.getSession(), harness.getContext(), "test") - .handle(); - - assertThatStage(resultSetFuture) - .isFailed( - error -> { - assertThat(error) - .isInstanceOf(InvalidQueryException.class) - .hasMessage("mock message"); - verifyNoMoreInteractions(harness.getContext().getRetryPolicy(anyString())); - - verify(nodeMetricUpdater1) - .incrementCounter( - DefaultNodeMetric.OTHER_ERRORS, DriverExecutionProfile.DEFAULT_NAME); - verify(nodeMetricUpdater1, atMost(1)) - .isEnabled(DefaultNodeMetric.CQL_MESSAGES, DriverExecutionProfile.DEFAULT_NAME); - verify(nodeMetricUpdater1) - .updateTimer( - eq(DefaultNodeMetric.CQL_MESSAGES), - eq(DriverExecutionProfile.DEFAULT_NAME), - anyLong(), - eq(TimeUnit.NANOSECONDS)); - verifyNoMoreInteractions(nodeMetricUpdater1); - }); - } - } - - @Test - @UseDataProvider("failureAndIdempotent") - public void should_try_next_node_if_idempotent_and_retry_policy_decides_so( - FailureScenario failureScenario, boolean defaultIdempotence, Statement statement) { - RequestHandlerTestHarness.Builder harnessBuilder = - RequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); - failureScenario.mockRequestError(harnessBuilder, node1); - harnessBuilder.withResponse(node2, defaultFrameOf(singleRow())); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - failureScenario.mockRetryPolicyVerdict( - harness.getContext().getRetryPolicy(anyString()), RetryVerdict.RETRY_NEXT); - - CompletionStage resultSetFuture = - new CqlRequestHandler(statement, harness.getSession(), harness.getContext(), "test") - .handle(); - - assertThatStage(resultSetFuture) - .isSuccess( - resultSet -> { - Iterator rows = resultSet.currentPage().iterator(); - assertThat(rows.hasNext()).isTrue(); - assertThat(rows.next().getString("message")).isEqualTo("hello, world"); - - ExecutionInfo executionInfo = resultSet.getExecutionInfo(); - assertThat(executionInfo.getCoordinator()).isEqualTo(node2); - assertThat(executionInfo.getErrors()).hasSize(1); - assertThat(executionInfo.getErrors().get(0).getKey()).isEqualTo(node1); - - verify(nodeMetricUpdater1) - .incrementCounter( - failureScenario.errorMetric, DriverExecutionProfile.DEFAULT_NAME); - verify(nodeMetricUpdater1) - .incrementCounter( - DefaultNodeMetric.RETRIES, DriverExecutionProfile.DEFAULT_NAME); - verify(nodeMetricUpdater1) - .incrementCounter( - failureScenario.retryMetric, DriverExecutionProfile.DEFAULT_NAME); - verify(nodeMetricUpdater1, atMost(1)) - .isEnabled(DefaultNodeMetric.CQL_MESSAGES, DriverExecutionProfile.DEFAULT_NAME); - verify(nodeMetricUpdater1, atMost(1)) - .updateTimer( - eq(DefaultNodeMetric.CQL_MESSAGES), - eq(DriverExecutionProfile.DEFAULT_NAME), - anyLong(), - eq(TimeUnit.NANOSECONDS)); - verifyNoMoreInteractions(nodeMetricUpdater1); - }); - } - } - - @Test - @UseDataProvider("failureAndIdempotent") - public void should_try_same_node_if_idempotent_and_retry_policy_decides_so( - FailureScenario failureScenario, boolean defaultIdempotence, Statement statement) { - RequestHandlerTestHarness.Builder harnessBuilder = - RequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); - failureScenario.mockRequestError(harnessBuilder, node1); - harnessBuilder.withResponse(node1, defaultFrameOf(singleRow())); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - failureScenario.mockRetryPolicyVerdict( - harness.getContext().getRetryPolicy(anyString()), RetryVerdict.RETRY_SAME); - - CompletionStage resultSetFuture = - new CqlRequestHandler(statement, harness.getSession(), harness.getContext(), "test") - .handle(); - - assertThatStage(resultSetFuture) - .isSuccess( - resultSet -> { - Iterator rows = resultSet.currentPage().iterator(); - assertThat(rows.hasNext()).isTrue(); - assertThat(rows.next().getString("message")).isEqualTo("hello, world"); - - ExecutionInfo executionInfo = resultSet.getExecutionInfo(); - assertThat(executionInfo.getCoordinator()).isEqualTo(node1); - assertThat(executionInfo.getErrors()).hasSize(1); - assertThat(executionInfo.getErrors().get(0).getKey()).isEqualTo(node1); - - verify(nodeMetricUpdater1) - .incrementCounter( - failureScenario.errorMetric, DriverExecutionProfile.DEFAULT_NAME); - verify(nodeMetricUpdater1) - .incrementCounter( - DefaultNodeMetric.RETRIES, DriverExecutionProfile.DEFAULT_NAME); - verify(nodeMetricUpdater1) - .incrementCounter( - failureScenario.retryMetric, DriverExecutionProfile.DEFAULT_NAME); - verify(nodeMetricUpdater1, atMost(2)) - .isEnabled(DefaultNodeMetric.CQL_MESSAGES, DriverExecutionProfile.DEFAULT_NAME); - verify(nodeMetricUpdater1, atMost(2)) - .updateTimer( - eq(DefaultNodeMetric.CQL_MESSAGES), - eq(DriverExecutionProfile.DEFAULT_NAME), - anyLong(), - eq(TimeUnit.NANOSECONDS)); - verifyNoMoreInteractions(nodeMetricUpdater1); - }); - } - } - - @Test - @UseDataProvider("failureAndIdempotent") - public void should_ignore_error_if_idempotent_and_retry_policy_decides_so( - FailureScenario failureScenario, boolean defaultIdempotence, Statement statement) { - RequestHandlerTestHarness.Builder harnessBuilder = - RequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); - failureScenario.mockRequestError(harnessBuilder, node1); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - failureScenario.mockRetryPolicyVerdict( - harness.getContext().getRetryPolicy(anyString()), RetryVerdict.IGNORE); - - CompletionStage resultSetFuture = - new CqlRequestHandler(statement, harness.getSession(), harness.getContext(), "test") - .handle(); - - assertThatStage(resultSetFuture) - .isSuccess( - resultSet -> { - Iterator rows = resultSet.currentPage().iterator(); - assertThat(rows.hasNext()).isFalse(); - - ExecutionInfo executionInfo = resultSet.getExecutionInfo(); - assertThat(executionInfo.getCoordinator()).isEqualTo(node1); - assertThat(executionInfo.getErrors()).hasSize(0); - - verify(nodeMetricUpdater1) - .incrementCounter( - failureScenario.errorMetric, DriverExecutionProfile.DEFAULT_NAME); - verify(nodeMetricUpdater1) - .incrementCounter( - DefaultNodeMetric.IGNORES, DriverExecutionProfile.DEFAULT_NAME); - verify(nodeMetricUpdater1) - .incrementCounter( - failureScenario.ignoreMetric, DriverExecutionProfile.DEFAULT_NAME); - verify(nodeMetricUpdater1, atMost(1)) - .isEnabled(DefaultNodeMetric.CQL_MESSAGES, DriverExecutionProfile.DEFAULT_NAME); - verify(nodeMetricUpdater1, atMost(1)) - .updateTimer( - eq(DefaultNodeMetric.CQL_MESSAGES), - eq(DriverExecutionProfile.DEFAULT_NAME), - anyLong(), - eq(TimeUnit.NANOSECONDS)); - verifyNoMoreInteractions(nodeMetricUpdater1); - }); - } - } - - @Test - @UseDataProvider("failureAndIdempotent") - public void should_rethrow_error_if_idempotent_and_retry_policy_decides_so( - FailureScenario failureScenario, boolean defaultIdempotence, Statement statement) { - RequestHandlerTestHarness.Builder harnessBuilder = - RequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); - failureScenario.mockRequestError(harnessBuilder, node1); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - - failureScenario.mockRetryPolicyVerdict( - harness.getContext().getRetryPolicy(anyString()), RetryVerdict.RETHROW); - - CompletionStage resultSetFuture = - new CqlRequestHandler(statement, harness.getSession(), harness.getContext(), "test") - .handle(); - - assertThatStage(resultSetFuture) - .isFailed( - error -> { - assertThat(error).isInstanceOf(failureScenario.expectedExceptionClass); - - verify(nodeMetricUpdater1) - .incrementCounter( - failureScenario.errorMetric, DriverExecutionProfile.DEFAULT_NAME); - verify(nodeMetricUpdater1, atMost(1)) - .isEnabled(DefaultNodeMetric.CQL_MESSAGES, DriverExecutionProfile.DEFAULT_NAME); - verify(nodeMetricUpdater1, atMost(1)) - .updateTimer( - eq(DefaultNodeMetric.CQL_MESSAGES), - eq(DriverExecutionProfile.DEFAULT_NAME), - anyLong(), - eq(TimeUnit.NANOSECONDS)); - verifyNoMoreInteractions(nodeMetricUpdater1); - }); - } - } - - @Test - @UseDataProvider("failureAndNotIdempotent") - public void should_rethrow_error_if_not_idempotent_and_error_unsafe_or_policy_rethrows( - FailureScenario failureScenario, boolean defaultIdempotence, Statement statement) { - - // For two of the possible exceptions, the retry policy is called even if the statement is not - // idempotent - boolean shouldCallRetryPolicy = - (failureScenario.expectedExceptionClass.equals(UnavailableException.class) - || failureScenario.expectedExceptionClass.equals(ReadTimeoutException.class)); - - RequestHandlerTestHarness.Builder harnessBuilder = - RequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); - failureScenario.mockRequestError(harnessBuilder, node1); - harnessBuilder.withResponse(node2, defaultFrameOf(singleRow())); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - - if (shouldCallRetryPolicy) { - failureScenario.mockRetryPolicyVerdict( - harness.getContext().getRetryPolicy(anyString()), RetryVerdict.RETHROW); - } - - CompletionStage resultSetFuture = - new CqlRequestHandler(statement, harness.getSession(), harness.getContext(), "test") - .handle(); - - assertThatStage(resultSetFuture) - .isFailed( - error -> { - assertThat(error).isInstanceOf(failureScenario.expectedExceptionClass); - // When non idempotent, the policy is bypassed completely: - if (!shouldCallRetryPolicy) { - verifyNoMoreInteractions(harness.getContext().getRetryPolicy(anyString())); - } - - verify(nodeMetricUpdater1) - .incrementCounter( - failureScenario.errorMetric, DriverExecutionProfile.DEFAULT_NAME); - verify(nodeMetricUpdater1, atMost(1)) - .isEnabled(DefaultNodeMetric.CQL_MESSAGES, DriverExecutionProfile.DEFAULT_NAME); - verify(nodeMetricUpdater1, atMost(1)) - .updateTimer( - eq(DefaultNodeMetric.CQL_MESSAGES), - eq(DriverExecutionProfile.DEFAULT_NAME), - anyLong(), - eq(TimeUnit.NANOSECONDS)); - verifyNoMoreInteractions(nodeMetricUpdater1); - }); - } - } - - @Test - @UseDataProvider("failureAndIdempotent") - public void should_not_fail_with_duplicate_key_when_retrying_with_request_id_generator( - FailureScenario failureScenario, boolean defaultIdempotence, Statement statement) { - - // Create a RequestIdGenerator that uses the same key as the statement's custom payload - RequestIdGenerator requestIdGenerator = - new RequestIdGenerator() { - private AtomicInteger counter = new AtomicInteger(0); - - @Override - public String getSessionRequestId() { - return "session-123"; - } - - @Override - public String getNodeRequestId(@NonNull Request request, @NonNull String parentId) { - return parentId + "-" + counter.getAndIncrement(); - } - }; - - RequestHandlerTestHarness.Builder harnessBuilder = - RequestHandlerTestHarness.builder() - .withDefaultIdempotence(defaultIdempotence) - .withRequestIdGenerator(requestIdGenerator); - failureScenario.mockRequestError(harnessBuilder, node1); - harnessBuilder.withResponse(node2, defaultFrameOf(singleRow())); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - failureScenario.mockRetryPolicyVerdict( - harness.getContext().getRetryPolicy(anyString()), RetryVerdict.RETRY_NEXT); - - CompletionStage resultSetFuture = - new CqlRequestHandler(statement, harness.getSession(), harness.getContext(), "test") - .handle(); - - // The test should succeed without throwing a duplicate key exception - assertThatStage(resultSetFuture) - .isSuccess( - resultSet -> { - Iterator rows = resultSet.currentPage().iterator(); - assertThat(rows.hasNext()).isTrue(); - assertThat(rows.next().getString("message")).isEqualTo("hello, world"); - - ExecutionInfo executionInfo = resultSet.getExecutionInfo(); - assertThat(executionInfo.getCoordinator()).isEqualTo(node2); - assertThat(executionInfo.getErrors()).hasSize(1); - assertThat(executionInfo.getErrors().get(0).getKey()).isEqualTo(node1); - - // Verify that the custom payload still contains the request ID key - // (either the original value or the generated one, depending on implementation) - assertThat(executionInfo.getRequest().getCustomPayload().get("request-id")) - .isEqualTo(ByteBuffer.wrap("session-123-1".getBytes(StandardCharsets.UTF_8))); - }); - } - } - - /** - * Sets up the mocks to simulate an error from a node, and make the retry policy return a given - * decision for that error. - */ - private abstract static class FailureScenario { - private final Class expectedExceptionClass; - final DefaultNodeMetric errorMetric; - final DefaultNodeMetric retryMetric; - final DefaultNodeMetric ignoreMetric; - - protected FailureScenario( - Class expectedExceptionClass, - DefaultNodeMetric errorMetric, - DefaultNodeMetric retryMetric, - DefaultNodeMetric ignoreMetric) { - this.expectedExceptionClass = expectedExceptionClass; - this.errorMetric = errorMetric; - this.retryMetric = retryMetric; - this.ignoreMetric = ignoreMetric; - } - - abstract void mockRequestError(RequestHandlerTestHarness.Builder builder, Node node); - - abstract void mockRetryPolicyVerdict(RetryPolicy policy, RetryVerdict verdict); - } - - @DataProvider - public static Object[][] failure() { - return TestDataProviders.fromList( - new FailureScenario( - ReadTimeoutException.class, - DefaultNodeMetric.READ_TIMEOUTS, - DefaultNodeMetric.RETRIES_ON_READ_TIMEOUT, - DefaultNodeMetric.IGNORES_ON_READ_TIMEOUT) { - @Override - public void mockRequestError(RequestHandlerTestHarness.Builder builder, Node node) { - builder.withResponse( - node, - defaultFrameOf( - new ReadTimeout( - "mock message", ProtocolConstants.ConsistencyLevel.LOCAL_ONE, 1, 2, true))); - } - - @Override - public void mockRetryPolicyVerdict(RetryPolicy policy, RetryVerdict verdict) { - when(policy.onReadTimeoutVerdict( - any(Statement.class), - eq(DefaultConsistencyLevel.LOCAL_ONE), - eq(2), - eq(1), - eq(true), - eq(0))) - .thenReturn(verdict); - } - }, - new FailureScenario( - WriteTimeoutException.class, - DefaultNodeMetric.WRITE_TIMEOUTS, - DefaultNodeMetric.RETRIES_ON_WRITE_TIMEOUT, - DefaultNodeMetric.IGNORES_ON_WRITE_TIMEOUT) { - @Override - public void mockRequestError(RequestHandlerTestHarness.Builder builder, Node node) { - builder.withResponse( - node, - defaultFrameOf( - new WriteTimeout( - "mock message", - ProtocolConstants.ConsistencyLevel.LOCAL_ONE, - 1, - 2, - ProtocolConstants.WriteType.SIMPLE))); - } - - @Override - public void mockRetryPolicyVerdict(RetryPolicy policy, RetryVerdict verdict) { - when(policy.onWriteTimeoutVerdict( - any(Statement.class), - eq(DefaultConsistencyLevel.LOCAL_ONE), - eq(DefaultWriteType.SIMPLE), - eq(2), - eq(1), - eq(0))) - .thenReturn(verdict); - } - }, - new FailureScenario( - UnavailableException.class, - DefaultNodeMetric.UNAVAILABLES, - DefaultNodeMetric.RETRIES_ON_UNAVAILABLE, - DefaultNodeMetric.IGNORES_ON_UNAVAILABLE) { - @Override - public void mockRequestError(RequestHandlerTestHarness.Builder builder, Node node) { - builder.withResponse( - node, - defaultFrameOf( - new Unavailable( - "mock message", ProtocolConstants.ConsistencyLevel.LOCAL_ONE, 2, 1))); - } - - @Override - public void mockRetryPolicyVerdict(RetryPolicy policy, RetryVerdict verdict) { - when(policy.onUnavailableVerdict( - any(Statement.class), - eq(DefaultConsistencyLevel.LOCAL_ONE), - eq(2), - eq(1), - eq(0))) - .thenReturn(verdict); - } - }, - new FailureScenario( - ServerError.class, - DefaultNodeMetric.OTHER_ERRORS, - DefaultNodeMetric.RETRIES_ON_OTHER_ERROR, - DefaultNodeMetric.IGNORES_ON_OTHER_ERROR) { - @Override - public void mockRequestError(RequestHandlerTestHarness.Builder builder, Node node) { - builder.withResponse( - node, - defaultFrameOf( - new Error(ProtocolConstants.ErrorCode.SERVER_ERROR, "mock server error"))); - } - - @Override - public void mockRetryPolicyVerdict(RetryPolicy policy, RetryVerdict verdict) { - when(policy.onErrorResponseVerdict(any(Statement.class), any(ServerError.class), eq(0))) - .thenReturn(verdict); - } - }, - new FailureScenario( - HeartbeatException.class, - DefaultNodeMetric.ABORTED_REQUESTS, - DefaultNodeMetric.RETRIES_ON_ABORTED, - DefaultNodeMetric.IGNORES_ON_ABORTED) { - @Override - public void mockRequestError(RequestHandlerTestHarness.Builder builder, Node node) { - builder.withResponseFailure(node, mock(HeartbeatException.class)); - } - - @Override - public void mockRetryPolicyVerdict(RetryPolicy policy, RetryVerdict verdict) { - when(policy.onRequestAbortedVerdict( - any(Statement.class), any(HeartbeatException.class), eq(0))) - .thenReturn(verdict); - } - }); - } - - @DataProvider - public static Object[][] failureAndIdempotent() { - return TestDataProviders.combine(failure(), idempotentConfig()); - } - - @DataProvider - public static Object[][] failureAndNotIdempotent() { - return TestDataProviders.combine(failure(), nonIdempotentConfig()); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandlerSpeculativeExecutionTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandlerSpeculativeExecutionTest.java deleted file mode 100644 index a09a9eb3d5a..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandlerSpeculativeExecutionTest.java +++ /dev/null @@ -1,427 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyNoMoreInteractions; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.NoNodeAvailableException; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; -import com.datastax.oss.driver.api.core.servererrors.BootstrappingException; -import com.datastax.oss.driver.api.core.specex.SpeculativeExecutionPolicy; -import com.datastax.oss.driver.internal.core.util.concurrent.CapturingTimer.CapturedTimeout; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.response.Error; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.List; -import java.util.Map; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.TimeUnit; -import org.junit.Test; - -public class CqlRequestHandlerSpeculativeExecutionTest extends CqlRequestHandlerTestBase { - - @Test - @UseDataProvider("nonIdempotentConfig") - public void should_not_schedule_speculative_executions_if_not_idempotent( - boolean defaultIdempotence, Statement statement) { - RequestHandlerTestHarness.Builder harnessBuilder = - RequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); - PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - SpeculativeExecutionPolicy speculativeExecutionPolicy = - harness.getContext().getSpeculativeExecutionPolicy(DriverExecutionProfile.DEFAULT_NAME); - - new CqlRequestHandler(statement, harness.getSession(), harness.getContext(), "test").handle(); - - node1Behavior.verifyWrite(); - - assertThat(harness.nextScheduledTimeout()).isNotNull(); // Discard the timeout task - assertThat(harness.nextScheduledTimeout()).isNull(); - - verifyNoMoreInteractions(speculativeExecutionPolicy); - verifyNoMoreInteractions(nodeMetricUpdater1); - } - } - - @Test - @UseDataProvider("idempotentConfig") - public void should_schedule_speculative_executions( - boolean defaultIdempotence, Statement statement) throws Exception { - RequestHandlerTestHarness.Builder harnessBuilder = - RequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); - PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); - PoolBehavior node2Behavior = harnessBuilder.customBehavior(node2); - PoolBehavior node3Behavior = harnessBuilder.customBehavior(node3); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - SpeculativeExecutionPolicy speculativeExecutionPolicy = - harness.getContext().getSpeculativeExecutionPolicy(DriverExecutionProfile.DEFAULT_NAME); - long firstExecutionDelay = 100L; - long secondExecutionDelay = 200L; - when(speculativeExecutionPolicy.nextExecution( - any(Node.class), eq(null), eq(statement), eq(1))) - .thenReturn(firstExecutionDelay); - when(speculativeExecutionPolicy.nextExecution( - any(Node.class), eq(null), eq(statement), eq(2))) - .thenReturn(secondExecutionDelay); - when(speculativeExecutionPolicy.nextExecution( - any(Node.class), eq(null), eq(statement), eq(3))) - .thenReturn(-1L); - - new CqlRequestHandler(statement, harness.getSession(), harness.getContext(), "test").handle(); - - node1Behavior.verifyWrite(); - node1Behavior.setWriteSuccess(); - - harness.nextScheduledTimeout(); // Discard the timeout task - - CapturedTimeout speculativeExecution1 = harness.nextScheduledTimeout(); - assertThat(speculativeExecution1.getDelay(TimeUnit.MILLISECONDS)) - .isEqualTo(firstExecutionDelay); - verifyNoMoreInteractions(nodeMetricUpdater1); - speculativeExecution1.task().run(speculativeExecution1); - verify(nodeMetricUpdater1) - .incrementCounter( - DefaultNodeMetric.SPECULATIVE_EXECUTIONS, DriverExecutionProfile.DEFAULT_NAME); - node2Behavior.verifyWrite(); - node2Behavior.setWriteSuccess(); - - CapturedTimeout speculativeExecution2 = harness.nextScheduledTimeout(); - assertThat(speculativeExecution2.getDelay(TimeUnit.MILLISECONDS)) - .isEqualTo(secondExecutionDelay); - verifyNoMoreInteractions(nodeMetricUpdater2); - speculativeExecution2.task().run(speculativeExecution2); - verify(nodeMetricUpdater2) - .incrementCounter( - DefaultNodeMetric.SPECULATIVE_EXECUTIONS, DriverExecutionProfile.DEFAULT_NAME); - node3Behavior.verifyWrite(); - node3Behavior.setWriteSuccess(); - - // No more scheduled tasks since the policy returns 0 on the third call. - assertThat(harness.nextScheduledTimeout()).isNull(); - - // Note that we don't need to complete any response, the test is just about checking that - // executions are started. - } - } - - @Test - @UseDataProvider("idempotentConfig") - public void should_not_start_execution_if_result_complete( - boolean defaultIdempotence, Statement statement) throws Exception { - RequestHandlerTestHarness.Builder harnessBuilder = - RequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); - PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); - PoolBehavior node2Behavior = harnessBuilder.customBehavior(node2); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - SpeculativeExecutionPolicy speculativeExecutionPolicy = - harness.getContext().getSpeculativeExecutionPolicy(DriverExecutionProfile.DEFAULT_NAME); - long firstExecutionDelay = 100L; - when(speculativeExecutionPolicy.nextExecution( - any(Node.class), eq(null), eq(statement), eq(1))) - .thenReturn(firstExecutionDelay); - - CqlRequestHandler requestHandler = - new CqlRequestHandler(statement, harness.getSession(), harness.getContext(), "test"); - CompletionStage resultSetFuture = requestHandler.handle(); - node1Behavior.verifyWrite(); - node1Behavior.setWriteSuccess(); - - harness.nextScheduledTimeout(); // Discard the timeout task - - // Check that the first execution was scheduled but don't run it yet - CapturedTimeout speculativeExecution1 = harness.nextScheduledTimeout(); - assertThat(speculativeExecution1.getDelay(TimeUnit.MILLISECONDS)) - .isEqualTo(firstExecutionDelay); - - // Complete the request from the initial execution - node1Behavior.setResponseSuccess(defaultFrameOf(singleRow())); - assertThatStage(resultSetFuture).isSuccess(); - - // Pending speculative executions should have been cancelled. However we don't check - // firstExecutionTask directly because the request handler's onResponse can sometimes be - // invoked before operationComplete (this is very unlikely in practice, but happens in our - // Travis CI build). When that happens, the speculative execution is not recorded yet when - // cancelScheduledTasks runs. - // So check the timeout future instead, since it's cancelled in the same method. - assertThat(requestHandler.scheduledTimeout.isCancelled()).isTrue(); - - // The fact that we missed the speculative execution is not a problem; even if it starts, it - // will eventually find out that the result is already complete and cancel itself: - speculativeExecution1.task().run(speculativeExecution1); - node2Behavior.verifyNoWrite(); - - verify(nodeMetricUpdater1) - .isEnabled(DefaultNodeMetric.CQL_MESSAGES, DriverExecutionProfile.DEFAULT_NAME); - verify(nodeMetricUpdater1) - .updateTimer( - eq(DefaultNodeMetric.CQL_MESSAGES), - eq(DriverExecutionProfile.DEFAULT_NAME), - anyLong(), - eq(TimeUnit.NANOSECONDS)); - verifyNoMoreInteractions(nodeMetricUpdater1); - } - } - - @Test - @UseDataProvider("idempotentConfig") - public void should_fail_if_no_nodes(boolean defaultIdempotence, Statement statement) { - RequestHandlerTestHarness.Builder harnessBuilder = - RequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); - // No configured behaviors => will yield an empty query plan - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - SpeculativeExecutionPolicy speculativeExecutionPolicy = - harness.getContext().getSpeculativeExecutionPolicy(DriverExecutionProfile.DEFAULT_NAME); - long firstExecutionDelay = 100L; - when(speculativeExecutionPolicy.nextExecution( - any(Node.class), eq(null), eq(statement), eq(1))) - .thenReturn(firstExecutionDelay); - - CompletionStage resultSetFuture = - new CqlRequestHandler(statement, harness.getSession(), harness.getContext(), "test") - .handle(); - - harness.nextScheduledTimeout(); // Discard the timeout task - - assertThatStage(resultSetFuture) - .isFailed(error -> assertThat(error).isInstanceOf(NoNodeAvailableException.class)); - } - } - - @Test - @UseDataProvider("idempotentConfig") - public void should_fail_if_no_more_nodes_and_initial_execution_is_last( - boolean defaultIdempotence, Statement statement) throws Exception { - RequestHandlerTestHarness.Builder harnessBuilder = - RequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); - PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); - harnessBuilder.withResponse( - node2, - defaultFrameOf(new Error(ProtocolConstants.ErrorCode.IS_BOOTSTRAPPING, "mock message"))); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - SpeculativeExecutionPolicy speculativeExecutionPolicy = - harness.getContext().getSpeculativeExecutionPolicy(DriverExecutionProfile.DEFAULT_NAME); - long firstExecutionDelay = 100L; - when(speculativeExecutionPolicy.nextExecution( - any(Node.class), eq(null), eq(statement), eq(1))) - .thenReturn(firstExecutionDelay); - - CompletionStage resultSetFuture = - new CqlRequestHandler(statement, harness.getSession(), harness.getContext(), "test") - .handle(); - node1Behavior.verifyWrite(); - node1Behavior.setWriteSuccess(); - // do not simulate a response from node1 yet - - harness.nextScheduledTimeout(); // Discard the timeout task - - // Run the next scheduled task to start the speculative execution. node2 will reply with a - // BOOTSTRAPPING error, causing a RETRY_NEXT; but the query plan is now empty so the - // speculative execution stops. - // next scheduled timeout should be the first speculative execution. Get it and run it. - CapturedTimeout speculativeExecution1 = harness.nextScheduledTimeout(); - assertThat(speculativeExecution1.getDelay(TimeUnit.MILLISECONDS)) - .isEqualTo(firstExecutionDelay); - speculativeExecution1.task().run(speculativeExecution1); - - // node1 now replies with the same response, that triggers a RETRY_NEXT - node1Behavior.setResponseSuccess( - defaultFrameOf(new Error(ProtocolConstants.ErrorCode.IS_BOOTSTRAPPING, "mock message"))); - - // But again the query plan is empty so that should fail the request - assertThatStage(resultSetFuture) - .isFailed( - error -> { - assertThat(error).isInstanceOf(AllNodesFailedException.class); - Map> nodeErrors = - ((AllNodesFailedException) error).getAllErrors(); - assertThat(nodeErrors).containsOnlyKeys(node1, node2); - assertThat(nodeErrors.get(node1).get(0)).isInstanceOf(BootstrappingException.class); - assertThat(nodeErrors.get(node2).get(0)).isInstanceOf(BootstrappingException.class); - }); - } - } - - @Test - @UseDataProvider("idempotentConfig") - public void should_fail_if_no_more_nodes_and_speculative_execution_is_last( - boolean defaultIdempotence, Statement statement) throws Exception { - RequestHandlerTestHarness.Builder harnessBuilder = - RequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); - PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); - PoolBehavior node2Behavior = harnessBuilder.customBehavior(node2); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - SpeculativeExecutionPolicy speculativeExecutionPolicy = - harness.getContext().getSpeculativeExecutionPolicy(DriverExecutionProfile.DEFAULT_NAME); - long firstExecutionDelay = 100L; - when(speculativeExecutionPolicy.nextExecution( - any(Node.class), eq(null), eq(statement), eq(1))) - .thenReturn(firstExecutionDelay); - - CompletionStage resultSetFuture = - new CqlRequestHandler(statement, harness.getSession(), harness.getContext(), "test") - .handle(); - node1Behavior.verifyWrite(); - node1Behavior.setWriteSuccess(); - // do not simulate a response from node1 yet - - harness.nextScheduledTimeout(); // Discard the timeout task - - // next scheduled timeout should be the first speculative execution. Get it and run it. - CapturedTimeout speculativeExecution1 = harness.nextScheduledTimeout(); - assertThat(speculativeExecution1.getDelay(TimeUnit.MILLISECONDS)) - .isEqualTo(firstExecutionDelay); - speculativeExecution1.task().run(speculativeExecution1); - - // node1 now replies with a BOOTSTRAPPING error that triggers a RETRY_NEXT - // but the query plan is empty so the initial execution stops - node1Behavior.setResponseSuccess( - defaultFrameOf(new Error(ProtocolConstants.ErrorCode.IS_BOOTSTRAPPING, "mock message"))); - - // Same thing with node2, so the speculative execution should reach the end of the query plan - // and fail the request - node2Behavior.setResponseSuccess( - defaultFrameOf(new Error(ProtocolConstants.ErrorCode.IS_BOOTSTRAPPING, "mock message"))); - - assertThatStage(resultSetFuture) - .isFailed( - error -> { - assertThat(error).isInstanceOf(AllNodesFailedException.class); - Map> nodeErrors = - ((AllNodesFailedException) error).getAllErrors(); - assertThat(nodeErrors).containsOnlyKeys(node1, node2); - assertThat(nodeErrors.get(node1).get(0)).isInstanceOf(BootstrappingException.class); - assertThat(nodeErrors.get(node2).get(0)).isInstanceOf(BootstrappingException.class); - }); - } - } - - @Test - @UseDataProvider("idempotentConfig") - public void should_retry_in_speculative_executions( - boolean defaultIdempotence, Statement statement) throws Exception { - RequestHandlerTestHarness.Builder harnessBuilder = - RequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); - PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); - PoolBehavior node2Behavior = harnessBuilder.customBehavior(node2); - harnessBuilder.withResponse(node3, defaultFrameOf(singleRow())); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - SpeculativeExecutionPolicy speculativeExecutionPolicy = - harness.getContext().getSpeculativeExecutionPolicy(DriverExecutionProfile.DEFAULT_NAME); - long firstExecutionDelay = 100L; - when(speculativeExecutionPolicy.nextExecution( - any(Node.class), eq(null), eq(statement), eq(1))) - .thenReturn(firstExecutionDelay); - - CompletionStage resultSetFuture = - new CqlRequestHandler(statement, harness.getSession(), harness.getContext(), "test") - .handle(); - node1Behavior.verifyWrite(); - node1Behavior.setWriteSuccess(); - // do not simulate a response from node1. The request will stay hanging for the rest of this - // test - - harness.nextScheduledTimeout(); // Discard the timeout task - - // next scheduled timeout should be the first speculative execution. Get it and run it. - CapturedTimeout speculativeExecution1 = harness.nextScheduledTimeout(); - assertThat(speculativeExecution1.getDelay(TimeUnit.MILLISECONDS)) - .isEqualTo(firstExecutionDelay); - speculativeExecution1.task().run(speculativeExecution1); - - node2Behavior.verifyWrite(); - node2Behavior.setWriteSuccess(); - - // node2 replies with a response that triggers a RETRY_NEXT - node2Behavior.setResponseSuccess( - defaultFrameOf(new Error(ProtocolConstants.ErrorCode.IS_BOOTSTRAPPING, "mock message"))); - - // The second execution should move to node3 and complete the request - assertThatStage(resultSetFuture).isSuccess(); - - // The request to node1 was still in flight, it should have been cancelled - node1Behavior.verifyCancellation(); - } - } - - @Test - @UseDataProvider("idempotentConfig") - public void should_stop_retrying_other_executions_if_result_complete( - boolean defaultIdempotence, Statement statement) throws Exception { - RequestHandlerTestHarness.Builder harnessBuilder = - RequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); - PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); - PoolBehavior node2Behavior = harnessBuilder.customBehavior(node2); - PoolBehavior node3Behavior = harnessBuilder.customBehavior(node3); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - SpeculativeExecutionPolicy speculativeExecutionPolicy = - harness.getContext().getSpeculativeExecutionPolicy(DriverExecutionProfile.DEFAULT_NAME); - long firstExecutionDelay = 100L; - when(speculativeExecutionPolicy.nextExecution( - any(Node.class), eq(null), eq(statement), eq(1))) - .thenReturn(firstExecutionDelay); - CompletionStage resultSetFuture = - new CqlRequestHandler(statement, harness.getSession(), harness.getContext(), "test") - .handle(); - node1Behavior.verifyWrite(); - node1Behavior.setWriteSuccess(); - - harness.nextScheduledTimeout(); // Discard the timeout task - - // next scheduled timeout should be the first speculative execution. Get it and run it. - CapturedTimeout speculativeExecution1 = harness.nextScheduledTimeout(); - assertThat(speculativeExecution1.getDelay(TimeUnit.MILLISECONDS)) - .isEqualTo(firstExecutionDelay); - speculativeExecution1.task().run(speculativeExecution1); - - node2Behavior.verifyWrite(); - node2Behavior.setWriteSuccess(); - - // Complete the request from the initial execution - node1Behavior.setResponseSuccess(defaultFrameOf(singleRow())); - assertThatStage(resultSetFuture).isSuccess(); - - // node2 replies with a response that would trigger a RETRY_NEXT if the request was still - // running - node2Behavior.setResponseSuccess( - defaultFrameOf(new Error(ProtocolConstants.ErrorCode.IS_BOOTSTRAPPING, "mock message"))); - - // The speculative execution should not move to node3 because it is stopped - node3Behavior.verifyNoWrite(); - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandlerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandlerTest.java deleted file mode 100644 index c1a2765eef0..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandlerTest.java +++ /dev/null @@ -1,253 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.DriverTimeoutException; -import com.datastax.oss.driver.api.core.NoNodeAvailableException; -import com.datastax.oss.driver.api.core.NodeUnavailableException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.session.RepreparePayload; -import com.datastax.oss.driver.internal.core.util.concurrent.CapturingTimer.CapturedTimeout; -import com.datastax.oss.protocol.internal.request.Prepare; -import com.datastax.oss.protocol.internal.response.error.Unprepared; -import com.datastax.oss.protocol.internal.response.result.Prepared; -import com.datastax.oss.protocol.internal.response.result.SetKeyspace; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.TimeUnit; -import org.junit.Test; - -public class CqlRequestHandlerTest extends CqlRequestHandlerTestBase { - - @Test - public void should_complete_result_if_first_node_replies_immediately() { - try (RequestHandlerTestHarness harness = - RequestHandlerTestHarness.builder() - .withResponse(node1, defaultFrameOf(singleRow())) - .build()) { - - CompletionStage resultSetFuture = - new CqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, - harness.getSession(), - harness.getContext(), - "test") - .handle(); - - assertThatStage(resultSetFuture) - .isSuccess( - resultSet -> { - Iterator rows = resultSet.currentPage().iterator(); - assertThat(rows.hasNext()).isTrue(); - assertThat(rows.next().getString("message")).isEqualTo("hello, world"); - - ExecutionInfo executionInfo = resultSet.getExecutionInfo(); - assertThat(executionInfo.getCoordinator()).isEqualTo(node1); - assertThat(executionInfo.getErrors()).isEmpty(); - assertThat(executionInfo.getIncomingPayload()).isEmpty(); - assertThat(executionInfo.getPagingState()).isNull(); - assertThat(executionInfo.getSpeculativeExecutionCount()).isEqualTo(0); - assertThat(executionInfo.getSuccessfulExecutionIndex()).isEqualTo(0); - assertThat(executionInfo.getWarnings()).isEmpty(); - }); - } - } - - @Test - public void should_fail_if_no_node_available() { - try (RequestHandlerTestHarness harness = - RequestHandlerTestHarness.builder() - // Mock no responses => this will produce an empty query plan - .build()) { - - CompletionStage resultSetFuture = - new CqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, - harness.getSession(), - harness.getContext(), - "test") - .handle(); - - assertThatStage(resultSetFuture) - .isFailed(error -> assertThat(error).isInstanceOf(NoNodeAvailableException.class)); - } - } - - @Test - public void should_fail_if_nodes_unavailable() { - RequestHandlerTestHarness.Builder harnessBuilder = RequestHandlerTestHarness.builder(); - try (RequestHandlerTestHarness harness = - harnessBuilder.withEmptyPool(node1).withEmptyPool(node2).build()) { - CompletionStage resultSetFuture = - new CqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, - harness.getSession(), - harness.getContext(), - "test") - .handle(); - assertThatStage(resultSetFuture) - .isFailed( - error -> { - assertThat(error).isInstanceOf(AllNodesFailedException.class); - Map> allErrors = - ((AllNodesFailedException) error).getAllErrors(); - assertThat(allErrors).hasSize(2); - assertThat(allErrors) - .hasEntrySatisfying( - node1, - nodeErrors -> - assertThat(nodeErrors) - .singleElement() - .isInstanceOf(NodeUnavailableException.class)); - assertThat(allErrors) - .hasEntrySatisfying( - node2, - nodeErrors -> - assertThat(nodeErrors) - .singleElement() - .isInstanceOf(NodeUnavailableException.class)); - }); - } - } - - @Test - public void should_time_out_if_first_node_takes_too_long_to_respond() throws Exception { - RequestHandlerTestHarness.Builder harnessBuilder = RequestHandlerTestHarness.builder(); - PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); - node1Behavior.setWriteSuccess(); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - - CompletionStage resultSetFuture = - new CqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, - harness.getSession(), - harness.getContext(), - "test") - .handle(); - - // First scheduled task is the timeout, run it before node1 has responded - CapturedTimeout requestTimeout = harness.nextScheduledTimeout(); - Duration configuredTimeoutDuration = - harness - .getContext() - .getConfig() - .getDefaultProfile() - .getDuration(DefaultDriverOption.REQUEST_TIMEOUT); - assertThat(requestTimeout.getDelay(TimeUnit.NANOSECONDS)) - .isEqualTo(configuredTimeoutDuration.toNanos()); - requestTimeout.task().run(requestTimeout); - - assertThatStage(resultSetFuture) - .isFailed(t -> assertThat(t).isInstanceOf(DriverTimeoutException.class)); - } - } - - @Test - public void should_switch_keyspace_on_session_after_successful_use_statement() { - try (RequestHandlerTestHarness harness = - RequestHandlerTestHarness.builder() - .withResponse(node1, defaultFrameOf(new SetKeyspace("newKeyspace"))) - .build()) { - - CompletionStage resultSetFuture = - new CqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, - harness.getSession(), - harness.getContext(), - "test") - .handle(); - - assertThatStage(resultSetFuture) - .isSuccess( - resultSet -> - verify(harness.getSession()) - .setKeyspace(CqlIdentifier.fromInternal("newKeyspace"))); - } - } - - @Test - public void should_reprepare_on_the_fly_if_not_prepared() throws InterruptedException { - ByteBuffer mockId = Bytes.fromHexString("0xffff"); - - PreparedStatement preparedStatement = mock(PreparedStatement.class); - when(preparedStatement.getId()).thenReturn(mockId); - ColumnDefinitions columnDefinitions = mock(ColumnDefinitions.class); - when(columnDefinitions.size()).thenReturn(0); - when(preparedStatement.getResultSetDefinitions()).thenReturn(columnDefinitions); - BoundStatement boundStatement = mock(BoundStatement.class); - when(boundStatement.getPreparedStatement()).thenReturn(preparedStatement); - when(boundStatement.getValues()).thenReturn(Collections.emptyList()); - when(boundStatement.getNowInSeconds()).thenReturn(Statement.NO_NOW_IN_SECONDS); - - RequestHandlerTestHarness.Builder harnessBuilder = RequestHandlerTestHarness.builder(); - // For the first attempt that gets the UNPREPARED response - PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); - // For the second attempt that succeeds - harnessBuilder.withResponse(node1, defaultFrameOf(singleRow())); - - try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - - // The handler will look for the info to reprepare in the session's cache, put it there - ConcurrentMap repreparePayloads = new ConcurrentHashMap<>(); - repreparePayloads.put( - mockId, new RepreparePayload(mockId, "mock query", null, Collections.emptyMap())); - when(harness.getSession().getRepreparePayloads()).thenReturn(repreparePayloads); - - CompletionStage resultSetFuture = - new CqlRequestHandler(boundStatement, harness.getSession(), harness.getContext(), "test") - .handle(); - - // Before we proceed, mock the PREPARE exchange that will occur as soon as we complete the - // first response. - node1Behavior.mockFollowupRequest( - Prepare.class, defaultFrameOf(new Prepared(Bytes.getArray(mockId), null, null, null))); - - node1Behavior.setWriteSuccess(); - node1Behavior.setResponseSuccess( - defaultFrameOf(new Unprepared("mock message", Bytes.getArray(mockId)))); - - // Should now re-prepare, re-execute and succeed. - assertThatStage(resultSetFuture).isSuccess(); - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandlerTestBase.java b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandlerTestBase.java deleted file mode 100644 index 9bd3b6fa28c..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandlerTestBase.java +++ /dev/null @@ -1,153 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.TestDataProviders; -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.cql.BatchStatement; -import com.datastax.oss.driver.api.core.cql.BatchType; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.metrics.NodeMetric; -import com.datastax.oss.driver.internal.core.metadata.DefaultNode; -import com.datastax.oss.driver.internal.core.metrics.NodeMetricUpdater; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.protocol.internal.Frame; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.response.result.ColumnSpec; -import com.datastax.oss.protocol.internal.response.result.DefaultRows; -import com.datastax.oss.protocol.internal.response.result.RawType; -import com.datastax.oss.protocol.internal.response.result.RowsMetadata; -import com.datastax.oss.protocol.internal.util.Bytes; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import java.nio.ByteBuffer; -import java.util.ArrayDeque; -import java.util.Collections; -import java.util.List; -import java.util.Queue; -import org.junit.Before; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -@RunWith(DataProviderRunner.class) -public abstract class CqlRequestHandlerTestBase { - - protected static final SimpleStatement UNDEFINED_IDEMPOTENCE_STATEMENT = - SimpleStatement.newInstance("mock query"); - protected static final SimpleStatement IDEMPOTENT_STATEMENT = - SimpleStatement.builder("mock query").setIdempotence(true).build(); - protected static final SimpleStatement NON_IDEMPOTENT_STATEMENT = - SimpleStatement.builder("mock query").setIdempotence(false).build(); - protected static final BatchStatement UNDEFINED_IDEMPOTENCE_BATCH_STATEMENT = - BatchStatement.newInstance(BatchType.LOGGED, UNDEFINED_IDEMPOTENCE_STATEMENT); - protected static final BatchStatement IDEMPOTENT_BATCH_STATEMENT = - BatchStatement.newInstance(BatchType.LOGGED, IDEMPOTENT_STATEMENT).setIdempotent(true); - protected static final BatchStatement NON_IDEMPOTENT_BATCH_STATEMENT = - BatchStatement.newInstance(BatchType.LOGGED, NON_IDEMPOTENT_STATEMENT).setIdempotent(false); - - @Mock protected DefaultNode node1; - @Mock protected DefaultNode node2; - @Mock protected DefaultNode node3; - @Mock protected NodeMetricUpdater nodeMetricUpdater1; - @Mock protected NodeMetricUpdater nodeMetricUpdater2; - @Mock protected NodeMetricUpdater nodeMetricUpdater3; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - - when(node1.getMetricUpdater()).thenReturn(nodeMetricUpdater1); - when(nodeMetricUpdater1.isEnabled(any(NodeMetric.class), anyString())).thenReturn(true); - when(node2.getMetricUpdater()).thenReturn(nodeMetricUpdater2); - when(nodeMetricUpdater2.isEnabled(any(NodeMetric.class), anyString())).thenReturn(true); - when(node3.getMetricUpdater()).thenReturn(nodeMetricUpdater3); - when(nodeMetricUpdater3.isEnabled(any(NodeMetric.class), anyString())).thenReturn(true); - } - - protected static Frame defaultFrameOf(Message responseMessage) { - return Frame.forResponse( - DefaultProtocolVersion.V4.getCode(), - 0, - null, - Frame.NO_PAYLOAD, - Collections.emptyList(), - responseMessage); - } - - // Returns a single row, with a single "message" column with the value "hello, world" - protected static Message singleRow() { - RowsMetadata metadata = - new RowsMetadata( - ImmutableList.of( - new ColumnSpec( - "ks", - "table", - "message", - 0, - RawType.PRIMITIVES.get(ProtocolConstants.DataType.VARCHAR))), - null, - new int[] {}, - null); - Queue> data = new ArrayDeque<>(); - data.add(ImmutableList.of(Bytes.fromHexString("0x68656C6C6F2C20776F726C64"))); - return new DefaultRows(metadata, data); - } - - /** - * The combination of the default idempotence option and statement setting that produce an - * idempotent statement. - */ - @DataProvider - public static Object[][] idempotentConfig() { - return new Object[][] { - new Object[] {true, UNDEFINED_IDEMPOTENCE_STATEMENT}, - new Object[] {false, IDEMPOTENT_STATEMENT}, - new Object[] {true, IDEMPOTENT_STATEMENT}, - new Object[] {true, UNDEFINED_IDEMPOTENCE_BATCH_STATEMENT}, - new Object[] {false, IDEMPOTENT_BATCH_STATEMENT}, - new Object[] {true, IDEMPOTENT_BATCH_STATEMENT}, - }; - } - - /** - * The combination of the default idempotence option and statement setting that produce a non - * idempotent statement. - */ - @DataProvider - public static Object[][] nonIdempotentConfig() { - return new Object[][] { - new Object[] {false, UNDEFINED_IDEMPOTENCE_STATEMENT}, - new Object[] {true, NON_IDEMPOTENT_STATEMENT}, - new Object[] {false, NON_IDEMPOTENT_STATEMENT}, - new Object[] {false, UNDEFINED_IDEMPOTENCE_BATCH_STATEMENT}, - new Object[] {true, NON_IDEMPOTENT_BATCH_STATEMENT}, - new Object[] {false, NON_IDEMPOTENT_BATCH_STATEMENT}, - }; - } - - @DataProvider - public static Object[][] allIdempotenceConfigs() { - return TestDataProviders.concat(idempotentConfig(), nonIdempotentConfig()); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandlerTrackerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandlerTrackerTest.java deleted file mode 100644 index ecc087fb8ac..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandlerTrackerTest.java +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyNoMoreInteractions; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.servererrors.BootstrappingException; -import com.datastax.oss.driver.api.core.tracker.RequestTracker; -import com.datastax.oss.driver.internal.core.tracker.NoopRequestTracker; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.response.Error; -import java.util.concurrent.CompletionStage; -import org.junit.Test; - -public class CqlRequestHandlerTrackerTest extends CqlRequestHandlerTestBase { - - @Test - public void should_invoke_request_tracker() { - try (RequestHandlerTestHarness harness = - RequestHandlerTestHarness.builder() - .withDefaultIdempotence(true) - .withResponse( - node1, - defaultFrameOf( - new Error(ProtocolConstants.ErrorCode.IS_BOOTSTRAPPING, "mock message"))) - .withResponse(node2, defaultFrameOf(singleRow())) - .build()) { - - RequestTracker requestTracker = mock(RequestTracker.class); - when(harness.getContext().getRequestTracker()).thenReturn(requestTracker); - - CompletionStage resultSetFuture = - new CqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, - harness.getSession(), - harness.getContext(), - "test") - .handle(); - - assertThatStage(resultSetFuture) - .isSuccess( - resultSet -> { - verify(requestTracker) - .onNodeError( - eq(UNDEFINED_IDEMPOTENCE_STATEMENT), - any(BootstrappingException.class), - anyLong(), - any(DriverExecutionProfile.class), - eq(node1), - any(String.class)); - verify(requestTracker) - .onNodeSuccess( - eq(UNDEFINED_IDEMPOTENCE_STATEMENT), - anyLong(), - any(DriverExecutionProfile.class), - eq(node2), - any(String.class)); - verify(requestTracker) - .onSuccess( - eq(UNDEFINED_IDEMPOTENCE_STATEMENT), - anyLong(), - any(DriverExecutionProfile.class), - eq(node2), - any(String.class)); - verifyNoMoreInteractions(requestTracker); - }); - } - } - - @Test - public void should_not_invoke_noop_request_tracker() { - try (RequestHandlerTestHarness harness = - RequestHandlerTestHarness.builder() - .withDefaultIdempotence(true) - .withResponse( - node1, - defaultFrameOf( - new Error(ProtocolConstants.ErrorCode.IS_BOOTSTRAPPING, "mock message"))) - .withResponse(node2, defaultFrameOf(singleRow())) - .build()) { - - RequestTracker requestTracker = spy(new NoopRequestTracker(harness.getContext())); - when(harness.getContext().getRequestTracker()).thenReturn(requestTracker); - - CompletionStage resultSetFuture = - new CqlRequestHandler( - UNDEFINED_IDEMPOTENCE_STATEMENT, - harness.getSession(), - harness.getContext(), - "test") - .handle(); - - assertThatStage(resultSetFuture) - .isSuccess(resultSet -> verifyNoMoreInteractions(requestTracker)); - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/DefaultAsyncResultSetTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/DefaultAsyncResultSetTest.java deleted file mode 100644 index 8ed509caeb7..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/DefaultAsyncResultSetTest.java +++ /dev/null @@ -1,195 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.mockito.Mockito.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.ColumnDefinition; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.nio.ByteBuffer; -import java.util.ArrayDeque; -import java.util.List; -import java.util.Queue; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -public class DefaultAsyncResultSetTest { - - @Mock private ColumnDefinitions columnDefinitions; - @Mock private ExecutionInfo executionInfo; - @Mock private Statement statement; - @Mock private CqlSession session; - @Mock private InternalDriverContext context; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - - when(executionInfo.getRequest()).thenAnswer(invocation -> statement); - when(context.getCodecRegistry()).thenReturn(CodecRegistry.DEFAULT); - when(context.getProtocolVersion()).thenReturn(DefaultProtocolVersion.DEFAULT); - } - - @Test(expected = IllegalStateException.class) - public void should_fail_to_fetch_next_page_if_last() { - // Given - when(executionInfo.getPagingState()).thenReturn(null); - - // When - DefaultAsyncResultSet resultSet = - new DefaultAsyncResultSet( - columnDefinitions, executionInfo, new ArrayDeque<>(), session, context); - - // Then - assertThat(resultSet.hasMorePages()).isFalse(); - resultSet.fetchNextPage(); - } - - @Test - public void should_invoke_session_to_fetch_next_page() { - // Given - ByteBuffer mockPagingState = ByteBuffer.allocate(0); - when(executionInfo.getPagingState()).thenReturn(mockPagingState); - - Statement mockNextStatement = mock(Statement.class); - when(((Statement) statement).copy(mockPagingState)).thenReturn(mockNextStatement); - - CompletableFuture mockResultFuture = new CompletableFuture<>(); - when(session.executeAsync(any(Statement.class))).thenAnswer(invocation -> mockResultFuture); - - // When - DefaultAsyncResultSet resultSet = - new DefaultAsyncResultSet( - columnDefinitions, executionInfo, new ArrayDeque<>(), session, context); - assertThat(resultSet.hasMorePages()).isTrue(); - CompletionStage nextPageFuture = resultSet.fetchNextPage(); - - // Then - verify(statement).copy(mockPagingState); - verify(session).executeAsync(mockNextStatement); - assertThatStage(nextPageFuture).isEqualTo(mockResultFuture); - } - - @Test - public void should_report_applied_if_column_not_present_and_empty() { - // Given - when(columnDefinitions.contains("[applied]")).thenReturn(false); - - // When - DefaultAsyncResultSet resultSet = - new DefaultAsyncResultSet( - columnDefinitions, executionInfo, new ArrayDeque<>(), session, context); - - // Then - assertThat(resultSet.wasApplied()).isTrue(); - } - - @Test - public void should_report_applied_if_column_not_present_and_not_empty() { - // Given - when(columnDefinitions.contains("[applied]")).thenReturn(false); - Queue> data = new ArrayDeque<>(); - data.add(Lists.newArrayList(Bytes.fromHexString("0xffff"))); - - // When - DefaultAsyncResultSet resultSet = - new DefaultAsyncResultSet(columnDefinitions, executionInfo, data, session, context); - - // Then - assertThat(resultSet.wasApplied()).isTrue(); - } - - @Test - public void should_report_not_applied_if_column_present_and_false() { - // Given - when(columnDefinitions.contains("[applied]")).thenReturn(true); - ColumnDefinition columnDefinition = mock(ColumnDefinition.class); - when(columnDefinition.getType()).thenReturn(DataTypes.BOOLEAN); - when(columnDefinitions.get("[applied]")).thenReturn(columnDefinition); - when(columnDefinitions.firstIndexOf("[applied]")).thenReturn(0); - when(columnDefinitions.get(0)).thenReturn(columnDefinition); - - Queue> data = new ArrayDeque<>(); - data.add(Lists.newArrayList(TypeCodecs.BOOLEAN.encode(false, DefaultProtocolVersion.DEFAULT))); - - // When - DefaultAsyncResultSet resultSet = - new DefaultAsyncResultSet(columnDefinitions, executionInfo, data, session, context); - - // Then - assertThat(resultSet.wasApplied()).isFalse(); - } - - @Test - public void should_report_not_applied_if_column_present_and_true() { - // Given - when(columnDefinitions.contains("[applied]")).thenReturn(true); - ColumnDefinition columnDefinition = mock(ColumnDefinition.class); - when(columnDefinition.getType()).thenReturn(DataTypes.BOOLEAN); - when(columnDefinitions.get("[applied]")).thenReturn(columnDefinition); - when(columnDefinitions.firstIndexOf("[applied]")).thenReturn(0); - when(columnDefinitions.get(0)).thenReturn(columnDefinition); - - Queue> data = new ArrayDeque<>(); - data.add(Lists.newArrayList(TypeCodecs.BOOLEAN.encode(true, DefaultProtocolVersion.DEFAULT))); - - // When - DefaultAsyncResultSet resultSet = - new DefaultAsyncResultSet(columnDefinitions, executionInfo, data, session, context); - - // Then - assertThat(resultSet.wasApplied()).isTrue(); - } - - @Test(expected = IllegalStateException.class) - public void should_fail_to_report_if_applied_if_column_present_but_empty() { - // Given - when(columnDefinitions.contains("[applied]")).thenReturn(true); - ColumnDefinition columnDefinition = mock(ColumnDefinition.class); - when(columnDefinition.getType()).thenReturn(DataTypes.BOOLEAN); - when(columnDefinitions.get("[applied]")).thenReturn(columnDefinition); - - // When - DefaultAsyncResultSet resultSet = - new DefaultAsyncResultSet( - columnDefinitions, executionInfo, new ArrayDeque<>(), session, context); - - // Then - resultSet.wasApplied(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/PagingIterableSpliteratorTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/PagingIterableSpliteratorTest.java deleted file mode 100644 index d6787cc018e..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/PagingIterableSpliteratorTest.java +++ /dev/null @@ -1,234 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import static java.util.stream.StreamSupport.stream; -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.internal.core.MockPagingIterable; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.ArrayList; -import java.util.List; -import java.util.Spliterator; -import java.util.function.Consumer; -import java.util.stream.Collectors; -import java.util.stream.IntStream; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class PagingIterableSpliteratorTest { - - @Test - @UseDataProvider("splitsWithEstimatedSize") - public void should_split_with_estimated_size( - int size, int chunkSize, List expectedLeft, List expectedRight) { - // given - PagingIterableSpliterator.Builder builder = - PagingIterableSpliterator.builder(iterableOfSize(size)) - .withEstimatedSize(size) - .withChunkSize(chunkSize); - // when - PagingIterableSpliterator right = builder.build(); - Spliterator left = right.trySplit(); - // then - assertThat(right.characteristics()) - .isEqualTo( - Spliterator.ORDERED - | Spliterator.IMMUTABLE - | Spliterator.NONNULL - | Spliterator.SIZED - | Spliterator.SUBSIZED); - assertThat(right.estimateSize()).isEqualTo(expectedRight.size()); - assertThat(right.getExactSizeIfKnown()).isEqualTo(expectedRight.size()); - TestConsumer rightConsumer = new TestConsumer(); - right.forEachRemaining(rightConsumer); - assertThat(rightConsumer.items).containsExactlyElementsOf(expectedRight); - if (expectedLeft.isEmpty()) { - assertThat(left).isNull(); - } else { - assertThat(left.characteristics()) - .isEqualTo( - Spliterator.ORDERED - | Spliterator.IMMUTABLE - | Spliterator.NONNULL - | Spliterator.SIZED - | Spliterator.SUBSIZED); - assertThat(left.estimateSize()).isEqualTo(expectedLeft.size()); - assertThat(left.getExactSizeIfKnown()).isEqualTo(expectedLeft.size()); - TestConsumer leftConsumer = new TestConsumer(); - left.forEachRemaining(leftConsumer); - assertThat(leftConsumer.items).containsExactlyElementsOf(expectedLeft); - } - } - - @DataProvider - public static Iterable splitsWithEstimatedSize() { - List> arguments = new ArrayList<>(); - arguments.add(Lists.newArrayList(0, 1, ImmutableList.of(), ImmutableList.of())); - arguments.add(Lists.newArrayList(1, 1, ImmutableList.of(), ImmutableList.of(0))); - arguments.add(Lists.newArrayList(1, 2, ImmutableList.of(), ImmutableList.of(0))); - arguments.add(Lists.newArrayList(2, 1, ImmutableList.of(0), ImmutableList.of(1))); - arguments.add( - Lists.newArrayList( - 10, 1, ImmutableList.of(0), ImmutableList.of(1, 2, 3, 4, 5, 6, 7, 8, 9))); - arguments.add( - Lists.newArrayList( - 10, 5, ImmutableList.of(0, 1, 2, 3, 4), ImmutableList.of(5, 6, 7, 8, 9))); - arguments.add( - Lists.newArrayList( - 10, 9, ImmutableList.of(0, 1, 2, 3, 4, 5, 6, 7, 8), ImmutableList.of(9))); - arguments.add( - Lists.newArrayList( - 10, 10, ImmutableList.of(), ImmutableList.of(0, 1, 2, 3, 4, 5, 6, 7, 8, 9))); - return arguments; - } - - @Test - @UseDataProvider("splitsWithUnknownSize") - public void should_split_with_unknown_size( - int size, int chunkSize, List expectedLeft, List expectedRight) { - // given - PagingIterableSpliterator.Builder builder = - PagingIterableSpliterator.builder(iterableOfSize(size)).withChunkSize(chunkSize); - // when - PagingIterableSpliterator right = builder.build(); - Spliterator left = right.trySplit(); - // then - assertThat(right.characteristics()) - .isEqualTo(Spliterator.ORDERED | Spliterator.IMMUTABLE | Spliterator.NONNULL); - assertThat(right.estimateSize()).isEqualTo(Long.MAX_VALUE); - assertThat(right.getExactSizeIfKnown()).isEqualTo(-1); - TestConsumer rightConsumer = new TestConsumer(); - right.forEachRemaining(rightConsumer); - assertThat(rightConsumer.items).containsExactlyElementsOf(expectedRight); - if (expectedLeft.isEmpty()) { - assertThat(left).isNull(); - } else { - // left side will also be SIZED and SUBSIZED - assertThat(left.characteristics()) - .isEqualTo( - Spliterator.ORDERED - | Spliterator.IMMUTABLE - | Spliterator.NONNULL - | Spliterator.SIZED - | Spliterator.SUBSIZED); - assertThat(left.estimateSize()).isEqualTo(expectedLeft.size()); - assertThat(left.getExactSizeIfKnown()).isEqualTo(expectedLeft.size()); - TestConsumer leftConsumer = new TestConsumer(); - left.forEachRemaining(leftConsumer); - assertThat(leftConsumer.items).containsExactlyElementsOf(expectedLeft); - } - } - - @DataProvider - public static Iterable splitsWithUnknownSize() { - List> arguments = new ArrayList<>(); - arguments.add(Lists.newArrayList(0, 1, ImmutableList.of(), ImmutableList.of())); - arguments.add(Lists.newArrayList(1, 1, ImmutableList.of(0), ImmutableList.of())); - arguments.add(Lists.newArrayList(1, 2, ImmutableList.of(0), ImmutableList.of())); - arguments.add(Lists.newArrayList(2, 1, ImmutableList.of(0), ImmutableList.of(1))); - arguments.add( - Lists.newArrayList( - 10, 1, ImmutableList.of(0), ImmutableList.of(1, 2, 3, 4, 5, 6, 7, 8, 9))); - arguments.add( - Lists.newArrayList( - 10, 5, ImmutableList.of(0, 1, 2, 3, 4), ImmutableList.of(5, 6, 7, 8, 9))); - arguments.add( - Lists.newArrayList( - 10, 9, ImmutableList.of(0, 1, 2, 3, 4, 5, 6, 7, 8), ImmutableList.of(9))); - arguments.add( - Lists.newArrayList( - 10, 10, ImmutableList.of(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), ImmutableList.of())); - return arguments; - } - - @Test - public void should_consume_with_tryAdvance() { - // given - PagingIterableSpliterator spliterator = - new PagingIterableSpliterator<>(iterableOfSize(10)); - TestConsumer action = new TestConsumer(); - // when - for (int i = 0; i < 20; i++) { - spliterator.tryAdvance(action); - } - // then - assertThat(action.items).containsExactly(0, 1, 2, 3, 4, 5, 6, 7, 8, 9); - } - - @Test - public void should_consume_with_forEachRemaining() { - // given - PagingIterableSpliterator spliterator = - new PagingIterableSpliterator<>(iterableOfSize(10)); - TestConsumer action = new TestConsumer(); - // when - spliterator.forEachRemaining(action); - // then - assertThat(action.items).containsExactly(0, 1, 2, 3, 4, 5, 6, 7, 8, 9); - } - - @Test - @UseDataProvider("streams") - public void should_consume_stream(int size, int chunkSize, boolean parallel) { - // given - PagingIterableSpliterator spliterator = - PagingIterableSpliterator.builder(iterableOfSize(size)) - .withEstimatedSize(size) - .withChunkSize(chunkSize) - .build(); - // when - long count = stream(spliterator, parallel).count(); - // then - assertThat(count).isEqualTo(size); - } - - @DataProvider - public static Iterable streams() { - List> arguments = new ArrayList<>(); - arguments.add(Lists.newArrayList(10_000, 5_000, false)); - arguments.add(Lists.newArrayList(10_000, 1_000, false)); - arguments.add(Lists.newArrayList(10_000, 9_999, false)); - arguments.add(Lists.newArrayList(10_000, 1, false)); - arguments.add(Lists.newArrayList(10_000, 5_000, true)); - arguments.add(Lists.newArrayList(10_000, 1_000, true)); - arguments.add(Lists.newArrayList(10_000, 9_999, true)); - arguments.add(Lists.newArrayList(10_000, 1, true)); - return arguments; - } - - private static MockPagingIterable iterableOfSize(int size) { - return new MockPagingIterable<>( - IntStream.range(0, size).boxed().collect(Collectors.toList()).iterator()); - } - - private static class TestConsumer implements Consumer { - - private final List items = new ArrayList<>(); - - @Override - public void accept(Integer integer) { - items.add(integer); - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/PoolBehavior.java b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/PoolBehavior.java deleted file mode 100644 index 9b018f17531..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/PoolBehavior.java +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyBoolean; -import static org.mockito.ArgumentMatchers.anyMap; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.channel.ResponseCallback; -import com.datastax.oss.protocol.internal.Frame; -import com.datastax.oss.protocol.internal.Message; -import io.netty.channel.ChannelConfig; -import io.netty.channel.ChannelFuture; -import io.netty.channel.EventLoop; -import io.netty.channel.socket.DefaultSocketChannelConfig; -import io.netty.util.concurrent.ImmediateEventExecutor; -import io.netty.util.concurrent.Promise; -import java.util.concurrent.CompletableFuture; - -/** - * The simulated behavior of the connection pool for a given node in a {@link - * RequestHandlerTestHarness}. - * - *

This only covers a single attempt, if the node is to be tried multiple times there will be - * multiple instances of this class. - */ -public class PoolBehavior { - - final Node node; - final DriverChannel channel; - private final Promise writePromise; - private final CompletableFuture callbackFuture = new CompletableFuture<>(); - - public PoolBehavior(Node node, boolean createChannel) { - this.node = node; - if (!createChannel) { - this.channel = null; - this.writePromise = null; - } else { - this.channel = mock(DriverChannel.class); - EventLoop eventLoop = mock(EventLoop.class); - ChannelConfig config = mock(DefaultSocketChannelConfig.class); - this.writePromise = ImmediateEventExecutor.INSTANCE.newPromise(); - when(channel.preAcquireId()).thenReturn(true); - when(channel.write(any(Message.class), anyBoolean(), anyMap(), any(ResponseCallback.class))) - .thenAnswer( - invocation -> { - ResponseCallback callback = invocation.getArgument(3); - callback.onStreamIdAssigned(1); - callbackFuture.complete(callback); - return writePromise; - }); - ChannelFuture closeFuture = mock(ChannelFuture.class); - when(channel.closeFuture()).thenReturn(closeFuture); - when(channel.eventLoop()).thenReturn(eventLoop); - when(channel.config()).thenReturn(config); - } - } - - public void verifyWrite() { - verify(channel).write(any(Message.class), anyBoolean(), anyMap(), any(ResponseCallback.class)); - } - - public void verifyNoWrite() { - verify(channel, never()) - .write(any(Message.class), anyBoolean(), anyMap(), any(ResponseCallback.class)); - } - - public void setWriteSuccess() { - writePromise.setSuccess(null); - } - - public void setWriteFailure(Throwable cause) { - writePromise.setFailure(cause); - } - - public void setResponseSuccess(Frame responseFrame) { - callbackFuture.thenAccept(callback -> callback.onResponse(responseFrame)); - } - - public void setResponseFailure(Throwable cause) { - callbackFuture.thenAccept(callback -> callback.onFailure(cause)); - } - - public Node getNode() { - return node; - } - - public DriverChannel getChannel() { - return channel; - } - - /** Mocks a follow-up request on the same channel. */ - public void mockFollowupRequest(Class expectedMessage, Frame responseFrame) { - Promise writePromise2 = ImmediateEventExecutor.INSTANCE.newPromise(); - CompletableFuture callbackFuture2 = new CompletableFuture<>(); - when(channel.write(any(expectedMessage), anyBoolean(), anyMap(), any(ResponseCallback.class))) - .thenAnswer( - invocation -> { - callbackFuture2.complete(invocation.getArgument(3)); - return writePromise2; - }); - writePromise2.setSuccess(null); - callbackFuture2.thenAccept(callback -> callback.onResponse(responseFrame)); - } - - public void verifyCancellation() { - verify(channel).cancel(any(ResponseCallback.class)); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/QueryTraceFetcherTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/QueryTraceFetcherTest.java deleted file mode 100644 index dc238775bc1..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/QueryTraceFetcherTest.java +++ /dev/null @@ -1,397 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyNoMoreInteractions; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.DefaultConsistencyLevel; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.QueryTrace; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.cql.TraceEvent; -import com.datastax.oss.driver.api.core.uuid.Uuids; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.context.NettyOptions; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.protocol.internal.util.Bytes; -import io.netty.util.concurrent.EventExecutor; -import io.netty.util.concurrent.EventExecutorGroup; -import java.net.InetAddress; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.time.Instant; -import java.util.ArrayList; -import java.util.List; -import java.util.UUID; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.TimeUnit; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.ArgumentCaptor; -import org.mockito.Captor; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class QueryTraceFetcherTest { - - private static final UUID TRACING_ID = UUID.randomUUID(); - private static final ByteBuffer PAGING_STATE = Bytes.fromHexString("0xdeadbeef"); - private static final int PORT = 7000; - - @Mock private CqlSession session; - @Mock private InternalDriverContext context; - @Mock private DriverExecutionProfile config; - @Mock private DriverExecutionProfile traceConfig; - @Mock private NettyOptions nettyOptions; - @Mock private EventExecutorGroup adminEventExecutorGroup; - @Mock private EventExecutor eventExecutor; - private InetAddress address = InetAddress.getLoopbackAddress(); - - @Captor private ArgumentCaptor statementCaptor; - - @Before - public void setup() { - when(context.getNettyOptions()).thenReturn(nettyOptions); - when(nettyOptions.adminEventExecutorGroup()).thenReturn(adminEventExecutorGroup); - when(adminEventExecutorGroup.next()).thenReturn(eventExecutor); - // Always execute scheduled tasks immediately: - when(eventExecutor.schedule(any(Runnable.class), anyLong(), any(TimeUnit.class))) - .thenAnswer( - invocation -> { - Runnable runnable = invocation.getArgument(0); - runnable.run(); - // OK because the production code doesn't use the result: - return null; - }); - - when(config.getInt(DefaultDriverOption.REQUEST_TRACE_ATTEMPTS)).thenReturn(3); - // Doesn't really matter since we mock the scheduler - when(config.getDuration(DefaultDriverOption.REQUEST_TRACE_INTERVAL)).thenReturn(Duration.ZERO); - when(config.getString(DefaultDriverOption.REQUEST_CONSISTENCY)) - .thenReturn(DefaultConsistencyLevel.LOCAL_ONE.name()); - when(config.getString(DefaultDriverOption.REQUEST_TRACE_CONSISTENCY)) - .thenReturn(DefaultConsistencyLevel.ONE.name()); - - when(config.withString( - DefaultDriverOption.REQUEST_CONSISTENCY, DefaultConsistencyLevel.ONE.name())) - .thenReturn(traceConfig); - } - - @Test - public void should_succeed_when_both_queries_succeed_immediately() { - // Given - CompletionStage sessionRow = completeSessionRow(); - CompletionStage eventRows = singlePageEventRows(); - when(session.executeAsync(any(SimpleStatement.class))) - .thenAnswer(invocation -> sessionRow) - .thenAnswer(invocation -> eventRows); - - // When - QueryTraceFetcher fetcher = new QueryTraceFetcher(TRACING_ID, session, context, config); - CompletionStage traceFuture = fetcher.fetch(); - - // Then - verify(session, times(2)).executeAsync(statementCaptor.capture()); - List statements = statementCaptor.getAllValues(); - assertSessionQuery(statements.get(0)); - SimpleStatement statement = statements.get(1); - assertEventsQuery(statement); - verifyNoMoreInteractions(session); - - assertThatStage(traceFuture) - .isSuccess( - trace -> { - assertThat(trace.getTracingId()).isEqualTo(TRACING_ID); - assertThat(trace.getRequestType()).isEqualTo("mock request"); - assertThat(trace.getDurationMicros()).isEqualTo(42); - assertThat(trace.getCoordinatorAddress().getAddress()).isEqualTo(address); - assertThat(trace.getCoordinatorAddress().getPort()).isEqualTo(PORT); - assertThat(trace.getParameters()) - .hasSize(2) - .containsEntry("key1", "value1") - .containsEntry("key2", "value2"); - assertThat(trace.getStartedAt()).isEqualTo(0); - - List events = trace.getEvents(); - assertThat(events).hasSize(3); - for (int i = 0; i < events.size(); i++) { - TraceEvent event = events.get(i); - assertThat(event.getActivity()).isEqualTo("mock activity " + i); - assertThat(event.getTimestamp()).isEqualTo(i); - assertThat(event.getSourceAddress()).isNotNull(); - assertThat(event.getSourceAddress().getAddress()).isEqualTo(address); - assertThat(event.getSourceAddress().getPort()).isEqualTo(PORT); - assertThat(event.getSourceElapsedMicros()).isEqualTo(i); - assertThat(event.getThreadName()).isEqualTo("mock thread " + i); - } - }); - } - - /** - * This should not happen with a sane configuration, but we need to handle it in case {@link - * DefaultDriverOption#REQUEST_PAGE_SIZE} is set ridiculously low. - */ - @Test - public void should_succeed_when_events_query_is_paged() { - // Given - CompletionStage sessionRow = completeSessionRow(); - CompletionStage eventRows1 = multiPageEventRows1(); - CompletionStage eventRows2 = multiPageEventRows2(); - when(session.executeAsync(any(SimpleStatement.class))) - .thenAnswer(invocation -> sessionRow) - .thenAnswer(invocation -> eventRows1) - .thenAnswer(invocation -> eventRows2); - - // When - QueryTraceFetcher fetcher = new QueryTraceFetcher(TRACING_ID, session, context, config); - CompletionStage traceFuture = fetcher.fetch(); - - // Then - verify(session, times(3)).executeAsync(statementCaptor.capture()); - List statements = statementCaptor.getAllValues(); - assertSessionQuery(statements.get(0)); - assertEventsQuery(statements.get(1)); - assertEventsQuery(statements.get(2)); - assertThat(statements.get(2).getPagingState()).isEqualTo(PAGING_STATE); - verifyNoMoreInteractions(session); - - assertThatStage(traceFuture).isSuccess(trace -> assertThat(trace.getEvents()).hasSize(2)); - } - - @Test - public void should_retry_when_session_row_is_incomplete() { - // Given - CompletionStage sessionRow1 = incompleteSessionRow(); - CompletionStage sessionRow2 = completeSessionRow(); - CompletionStage eventRows = singlePageEventRows(); - when(session.executeAsync(any(SimpleStatement.class))) - .thenAnswer(invocation -> sessionRow1) - .thenAnswer(invocation -> sessionRow2) - .thenAnswer(invocation -> eventRows); - - // When - QueryTraceFetcher fetcher = new QueryTraceFetcher(TRACING_ID, session, context, config); - CompletionStage traceFuture = fetcher.fetch(); - - // Then - verify(session, times(3)).executeAsync(statementCaptor.capture()); - List statements = statementCaptor.getAllValues(); - assertSessionQuery(statements.get(0)); - assertSessionQuery(statements.get(1)); - assertEventsQuery(statements.get(2)); - verifyNoMoreInteractions(session); - - assertThatStage(traceFuture) - .isSuccess( - trace -> { - assertThat(trace.getTracingId()).isEqualTo(TRACING_ID); - assertThat(trace.getRequestType()).isEqualTo("mock request"); - assertThat(trace.getDurationMicros()).isEqualTo(42); - assertThat(trace.getCoordinatorAddress().getAddress()).isEqualTo(address); - assertThat(trace.getCoordinatorAddress().getPort()).isEqualTo(PORT); - assertThat(trace.getParameters()) - .hasSize(2) - .containsEntry("key1", "value1") - .containsEntry("key2", "value2"); - assertThat(trace.getStartedAt()).isEqualTo(0); - - List events = trace.getEvents(); - assertThat(events).hasSize(3); - for (int i = 0; i < events.size(); i++) { - TraceEvent event = events.get(i); - assertThat(event.getActivity()).isEqualTo("mock activity " + i); - assertThat(event.getTimestamp()).isEqualTo(i); - assertThat(event.getSourceAddress()).isNotNull(); - assertThat(event.getSourceAddress().getAddress()).isEqualTo(address); - assertThat(event.getSourceAddress().getPort()).isEqualTo(PORT); - assertThat(event.getSourceElapsedMicros()).isEqualTo(i); - assertThat(event.getThreadName()).isEqualTo("mock thread " + i); - } - }); - } - - @Test - public void should_fail_when_session_query_fails() { - // Given - RuntimeException mockError = new RuntimeException("mock error"); - when(session.executeAsync(any(SimpleStatement.class))) - .thenReturn(CompletableFutures.failedFuture(mockError)); - - // When - QueryTraceFetcher fetcher = new QueryTraceFetcher(TRACING_ID, session, context, config); - CompletionStage traceFuture = fetcher.fetch(); - - // Then - verify(session).executeAsync(statementCaptor.capture()); - SimpleStatement statement = statementCaptor.getValue(); - assertSessionQuery(statement); - verifyNoMoreInteractions(session); - - assertThatStage(traceFuture).isFailed(error -> assertThat(error).isSameAs(mockError)); - } - - @Test - public void should_fail_when_session_query_still_incomplete_after_max_tries() { - // Given - CompletionStage sessionRow1 = incompleteSessionRow(); - CompletionStage sessionRow2 = incompleteSessionRow(); - CompletionStage sessionRow3 = incompleteSessionRow(); - when(session.executeAsync(any(SimpleStatement.class))) - .thenAnswer(invocation -> sessionRow1) - .thenAnswer(invocation -> sessionRow2) - .thenAnswer(invocation -> sessionRow3); - - // When - QueryTraceFetcher fetcher = new QueryTraceFetcher(TRACING_ID, session, context, config); - CompletionStage traceFuture = fetcher.fetch(); - - // Then - verify(session, times(3)).executeAsync(statementCaptor.capture()); - List statements = statementCaptor.getAllValues(); - for (int i = 0; i < 3; i++) { - assertSessionQuery(statements.get(i)); - } - - assertThatStage(traceFuture) - .isFailed( - error -> - assertThat(error.getMessage()) - .isEqualTo( - String.format("Trace %s still not complete after 3 attempts", TRACING_ID))); - } - - private CompletionStage completeSessionRow() { - return sessionRow(42); - } - - private CompletionStage incompleteSessionRow() { - return sessionRow(null); - } - - private CompletionStage sessionRow(Integer duration) { - Row row = mock(Row.class); - ColumnDefinitions definitions = mock(ColumnDefinitions.class); - when(row.getColumnDefinitions()).thenReturn(definitions); - when(row.getString("request")).thenReturn("mock request"); - if (duration == null) { - when(row.isNull("duration")).thenReturn(true); - } else { - when(row.getInt("duration")).thenReturn(duration); - } - when(row.getInetAddress("coordinator")).thenReturn(address); - when(definitions.contains("coordinator_port")).thenReturn(true); - when(row.getInt("coordinator_port")).thenReturn(PORT); - when(row.getMap("parameters", String.class, String.class)) - .thenReturn(ImmutableMap.of("key1", "value1", "key2", "value2")); - when(row.isNull("started_at")).thenReturn(false); - when(row.getInstant("started_at")).thenReturn(Instant.EPOCH); - - AsyncResultSet rs = mock(AsyncResultSet.class); - when(rs.one()).thenReturn(row); - return CompletableFuture.completedFuture(rs); - } - - private CompletionStage singlePageEventRows() { - List rows = new ArrayList<>(); - for (int i = 0; i < 3; i++) { - rows.add(eventRow(i)); - } - - AsyncResultSet rs = mock(AsyncResultSet.class); - when(rs.currentPage()).thenReturn(rows); - - ExecutionInfo executionInfo = mock(ExecutionInfo.class); - when(executionInfo.getPagingState()).thenReturn(null); - when(rs.getExecutionInfo()).thenReturn(executionInfo); - - return CompletableFuture.completedFuture(rs); - } - - private CompletionStage multiPageEventRows1() { - AsyncResultSet rs = mock(AsyncResultSet.class); - - ImmutableList rows = ImmutableList.of(eventRow(0)); - when(rs.currentPage()).thenReturn(rows); - - ExecutionInfo executionInfo = mock(ExecutionInfo.class); - when(executionInfo.getPagingState()).thenReturn(PAGING_STATE); - when(rs.getExecutionInfo()).thenReturn(executionInfo); - - return CompletableFuture.completedFuture(rs); - } - - private CompletionStage multiPageEventRows2() { - AsyncResultSet rs = mock(AsyncResultSet.class); - - ImmutableList rows = ImmutableList.of(eventRow(1)); - when(rs.currentPage()).thenReturn(rows); - - ExecutionInfo executionInfo = mock(ExecutionInfo.class); - when(executionInfo.getPagingState()).thenReturn(null); - when(rs.getExecutionInfo()).thenReturn(executionInfo); - - return CompletableFuture.completedFuture(rs); - } - - private Row eventRow(int i) { - Row row = mock(Row.class); - ColumnDefinitions definitions = mock(ColumnDefinitions.class); - when(row.getColumnDefinitions()).thenReturn(definitions); - when(row.getString("activity")).thenReturn("mock activity " + i); - when(row.getUuid("event_id")).thenReturn(Uuids.startOf(i)); - when(row.getInetAddress("source")).thenReturn(address); - when(definitions.contains("source_port")).thenReturn(true); - when(row.getInt("source_port")).thenReturn(PORT); - when(row.getInt("source_elapsed")).thenReturn(i); - when(row.getString("thread")).thenReturn("mock thread " + i); - return row; - } - - private void assertSessionQuery(SimpleStatement statement) { - assertThat(statement.getQuery()) - .isEqualTo("SELECT * FROM system_traces.sessions WHERE session_id = ?"); - assertThat(statement.getPositionalValues()).containsOnly(TRACING_ID); - assertThat(statement.getExecutionProfile()).isEqualTo(traceConfig); - } - - private void assertEventsQuery(SimpleStatement statement) { - assertThat(statement.getQuery()) - .isEqualTo("SELECT * FROM system_traces.events WHERE session_id = ?"); - assertThat(statement.getPositionalValues()).containsOnly(TRACING_ID); - assertThat(statement.getExecutionProfile()).isEqualTo(traceConfig); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/RequestHandlerTestHarness.java b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/RequestHandlerTestHarness.java deleted file mode 100644 index 6a7657d5809..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/RequestHandlerTestHarness.java +++ /dev/null @@ -1,321 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyInt; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.DefaultConsistencyLevel; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.SessionMetric; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.specex.SpeculativeExecutionPolicy; -import com.datastax.oss.driver.api.core.time.TimestampGenerator; -import com.datastax.oss.driver.api.core.tracker.RequestIdGenerator; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.internal.core.DefaultConsistencyLevelRegistry; -import com.datastax.oss.driver.internal.core.ProtocolFeature; -import com.datastax.oss.driver.internal.core.ProtocolVersionRegistry; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.context.NettyOptions; -import com.datastax.oss.driver.internal.core.metadata.DefaultMetadata; -import com.datastax.oss.driver.internal.core.metadata.LoadBalancingPolicyWrapper; -import com.datastax.oss.driver.internal.core.metrics.SessionMetricUpdater; -import com.datastax.oss.driver.internal.core.pool.ChannelPool; -import com.datastax.oss.driver.internal.core.servererrors.DefaultWriteTypeRegistry; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.internal.core.session.throttling.PassThroughRequestThrottler; -import com.datastax.oss.driver.internal.core.tracker.NoopRequestTracker; -import com.datastax.oss.driver.internal.core.util.concurrent.CapturingTimer; -import com.datastax.oss.driver.internal.core.util.concurrent.CapturingTimer.CapturedTimeout; -import com.datastax.oss.protocol.internal.Frame; -import io.netty.channel.EventLoopGroup; -import java.time.Duration; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Queue; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentLinkedQueue; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; -import org.mockito.stubbing.OngoingStubbing; - -/** - * Provides the environment to test a request handler, where a query plan can be defined, and the - * behavior of each successive node simulated. - */ -public class RequestHandlerTestHarness implements AutoCloseable { - - public static Builder builder() { - return new Builder(); - } - - private final CapturingTimer timer = new CapturingTimer(); - private final Map pools; - - @Mock protected InternalDriverContext context; - @Mock protected DefaultSession session; - @Mock protected EventLoopGroup eventLoopGroup; - @Mock protected NettyOptions nettyOptions; - @Mock protected DriverConfig config; - @Mock protected DriverExecutionProfile defaultProfile; - @Mock protected LoadBalancingPolicyWrapper loadBalancingPolicyWrapper; - @Mock protected RetryPolicy retryPolicy; - @Mock protected SpeculativeExecutionPolicy speculativeExecutionPolicy; - @Mock protected TimestampGenerator timestampGenerator; - @Mock protected ProtocolVersionRegistry protocolVersionRegistry; - @Mock protected SessionMetricUpdater sessionMetricUpdater; - - protected RequestHandlerTestHarness(Builder builder) { - MockitoAnnotations.initMocks(this); - - when(nettyOptions.getTimer()).thenReturn(timer); - when(nettyOptions.ioEventLoopGroup()).thenReturn(eventLoopGroup); - when(context.getNettyOptions()).thenReturn(nettyOptions); - - when(defaultProfile.getName()).thenReturn(DriverExecutionProfile.DEFAULT_NAME); - // TODO make configurable in the test, also handle profiles - when(defaultProfile.getDuration(DefaultDriverOption.REQUEST_TIMEOUT)) - .thenReturn(Duration.ofMillis(500)); - when(defaultProfile.getString(DefaultDriverOption.REQUEST_CONSISTENCY)) - .thenReturn(DefaultConsistencyLevel.LOCAL_ONE.name()); - when(defaultProfile.getInt(DefaultDriverOption.REQUEST_PAGE_SIZE)).thenReturn(5000); - when(defaultProfile.getString(DefaultDriverOption.REQUEST_SERIAL_CONSISTENCY)) - .thenReturn(DefaultConsistencyLevel.SERIAL.name()); - when(defaultProfile.getBoolean(DefaultDriverOption.REQUEST_DEFAULT_IDEMPOTENCE)) - .thenReturn(builder.defaultIdempotence); - when(defaultProfile.getBoolean(DefaultDriverOption.PREPARE_ON_ALL_NODES)).thenReturn(true); - - when(config.getDefaultProfile()).thenReturn(defaultProfile); - when(context.getConfig()).thenReturn(config); - - when(loadBalancingPolicyWrapper.newQueryPlan( - any(Request.class), anyString(), any(Session.class))) - .thenReturn(builder.buildQueryPlan()); - when(context.getLoadBalancingPolicyWrapper()).thenReturn(loadBalancingPolicyWrapper); - - when(context.getRetryPolicy(anyString())).thenReturn(retryPolicy); - - // Disable speculative executions by default - when(speculativeExecutionPolicy.nextExecution( - any(Node.class), any(CqlIdentifier.class), any(Request.class), anyInt())) - .thenReturn(-1L); - when(context.getSpeculativeExecutionPolicy(anyString())).thenReturn(speculativeExecutionPolicy); - - when(context.getCodecRegistry()).thenReturn(CodecRegistry.DEFAULT); - - when(timestampGenerator.next()).thenReturn(Statement.NO_DEFAULT_TIMESTAMP); - when(context.getTimestampGenerator()).thenReturn(timestampGenerator); - - pools = builder.buildMockPools(); - when(session.getChannel(any(Node.class), anyString())) - .thenAnswer( - invocation -> { - Node node = invocation.getArgument(0); - return pools.get(node).next(); - }); - when(session.getRepreparePayloads()).thenReturn(new ConcurrentHashMap<>()); - - when(session.setKeyspace(any(CqlIdentifier.class))) - .thenReturn(CompletableFuture.completedFuture(null)); - - when(session.getMetricUpdater()).thenReturn(sessionMetricUpdater); - when(sessionMetricUpdater.isEnabled(any(SessionMetric.class), anyString())).thenReturn(true); - - when(session.getMetadata()).thenReturn(DefaultMetadata.EMPTY); - - when(context.getProtocolVersionRegistry()).thenReturn(protocolVersionRegistry); - when(protocolVersionRegistry.supports(any(ProtocolVersion.class), any(ProtocolFeature.class))) - .thenReturn(true); - - if (builder.protocolVersion != null) { - when(context.getProtocolVersion()).thenReturn(builder.protocolVersion); - } - - when(context.getConsistencyLevelRegistry()).thenReturn(new DefaultConsistencyLevelRegistry()); - - when(context.getWriteTypeRegistry()).thenReturn(new DefaultWriteTypeRegistry()); - - when(context.getRequestThrottler()).thenReturn(new PassThroughRequestThrottler(context)); - - when(context.getRequestTracker()).thenReturn(new NoopRequestTracker(context)); - - when(context.getRequestIdGenerator()) - .thenReturn(Optional.ofNullable(builder.requestIdGenerator)); - } - - public DefaultSession getSession() { - return session; - } - - public InternalDriverContext getContext() { - return context; - } - - public DriverChannel getChannel(Node node) { - ChannelPool pool = pools.get(node); - return pool.next(); - } - - /** - * Returns the next task that was scheduled on the request handler's admin executor. The test must - * run it manually. - */ - public CapturedTimeout nextScheduledTimeout() { - return timer.getNextTimeout(); - } - - @Override - public void close() { - timer.stop(); - } - - public static class Builder { - private final List poolBehaviors = new ArrayList<>(); - private boolean defaultIdempotence; - private ProtocolVersion protocolVersion; - private RequestIdGenerator requestIdGenerator; - - /** - * Sets the given node as the next one in the query plan; an empty pool will be simulated when - * it gets used. - */ - public Builder withEmptyPool(Node node) { - poolBehaviors.add(new PoolBehavior(node, false)); - return this; - } - - /** - * Sets the given node as the next one in the query plan; a channel write failure will be - * simulated when it gets used. - */ - public Builder withWriteFailure(Node node, Throwable cause) { - PoolBehavior behavior = new PoolBehavior(node, true); - behavior.setWriteFailure(cause); - poolBehaviors.add(behavior); - return this; - } - - /** - * Sets the given node as the next one in the query plan; the write to the channel will succeed, - * but a response failure will be simulated immediately after. - */ - public Builder withResponseFailure(Node node, Throwable cause) { - PoolBehavior behavior = new PoolBehavior(node, true); - behavior.setWriteSuccess(); - behavior.setResponseFailure(cause); - poolBehaviors.add(behavior); - return this; - } - - /** - * Sets the given node as the next one in the query plan; the write to the channel will succeed, - * and the given response will be simulated immediately after. - */ - public Builder withResponse(Node node, Frame response) { - PoolBehavior behavior = new PoolBehavior(node, true); - behavior.setWriteSuccess(); - behavior.setResponseSuccess(response); - poolBehaviors.add(behavior); - return this; - } - - public Builder withDefaultIdempotence(boolean defaultIdempotence) { - this.defaultIdempotence = defaultIdempotence; - return this; - } - - public Builder withProtocolVersion(ProtocolVersion protocolVersion) { - this.protocolVersion = protocolVersion; - return this; - } - - public Builder withRequestIdGenerator(RequestIdGenerator requestIdGenerator) { - this.requestIdGenerator = requestIdGenerator; - return this; - } - - /** - * Sets the given node as the next one in the query plan; the test code is responsible of - * calling the methods on the returned object to complete the write and the query. - */ - public PoolBehavior customBehavior(Node node) { - PoolBehavior behavior = new PoolBehavior(node, true); - poolBehaviors.add(behavior); - return behavior; - } - - public RequestHandlerTestHarness build() { - return new RequestHandlerTestHarness(this); - } - - private Queue buildQueryPlan() { - ConcurrentLinkedQueue queryPlan = new ConcurrentLinkedQueue<>(); - for (PoolBehavior behavior : poolBehaviors) { - // We don't want duplicates in the query plan: the only way a node is tried multiple times - // is if the retry policy returns a RETRY_SAME, the request handler does not re-read from - // the plan. - if (!queryPlan.contains(behavior.node)) { - queryPlan.offer(behavior.node); - } - } - return queryPlan; - } - - private Map buildMockPools() { - Map pools = new ConcurrentHashMap<>(); - Map> stubbings = new HashMap<>(); - for (PoolBehavior behavior : poolBehaviors) { - Node node = behavior.node; - ChannelPool pool = pools.computeIfAbsent(node, n -> mock(ChannelPool.class)); - - // The goal of the code below is to generate the equivalent of: - // - // when(pool.next()) - // .thenReturn(behavior1.channel) - // .thenReturn(behavior2.channel) - // ... - stubbings.compute( - node, - (sameNode, previous) -> { - if (previous == null) { - previous = when(pool.next()); - } - return previous.thenReturn(behavior.channel); - }); - } - return pools; - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/ResultSetTestBase.java b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/ResultSetTestBase.java deleted file mode 100644 index 54b215458fe..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/ResultSetTestBase.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.internal.core.util.CountingIterator; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import java.util.Arrays; -import java.util.Iterator; -import java.util.Queue; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; - -public abstract class ResultSetTestBase { - - /** Mocks an async result set where column 0 has type INT, with rows with the provided data. */ - protected AsyncResultSet mockPage(boolean nextPage, Integer... data) { - AsyncResultSet page = mock(AsyncResultSet.class); - - ColumnDefinitions columnDefinitions = mock(ColumnDefinitions.class); - when(page.getColumnDefinitions()).thenReturn(columnDefinitions); - - ExecutionInfo executionInfo = mock(ExecutionInfo.class); - when(page.getExecutionInfo()).thenReturn(executionInfo); - - if (nextPage) { - when(page.hasMorePages()).thenReturn(true); - when(page.fetchNextPage()).thenReturn(spy(new CompletableFuture<>())); - } else { - when(page.hasMorePages()).thenReturn(false); - when(page.fetchNextPage()).thenThrow(new IllegalStateException()); - } - - // Emulate DefaultAsyncResultSet's internals (this is a bit sketchy, maybe it would be better - // to use real DefaultAsyncResultSet instances) - Queue queue = Lists.newLinkedList(Arrays.asList(data)); - CountingIterator iterator = - new CountingIterator(queue.size()) { - @Override - protected Row computeNext() { - Integer index = queue.poll(); - return (index == null) ? endOfData() : mockRow(index); - } - }; - when(page.currentPage()).thenReturn(() -> iterator); - when(page.remaining()).thenAnswer(invocation -> iterator.remaining()); - - return page; - } - - private Row mockRow(int index) { - Row row = mock(Row.class); - when(row.getInt(0)).thenReturn(index); - return row; - } - - protected static void complete(CompletionStage stage, AsyncResultSet result) { - stage.toCompletableFuture().complete(result); - } - - protected void assertNextRow(Iterator iterator, int expectedValue) { - assertThat(iterator.hasNext()).isTrue(); - Row row = iterator.next(); - assertThat(row.getInt(0)).isEqualTo(expectedValue); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/ResultSetsTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/ResultSetsTest.java deleted file mode 100644 index 0b5860f7e95..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/ResultSetsTest.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import static com.datastax.oss.driver.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import java.util.Iterator; -import org.junit.Test; - -public class ResultSetsTest extends ResultSetTestBase { - - @Test - public void should_create_result_set_from_single_page() { - // Given - AsyncResultSet page1 = mockPage(false, 0, 1, 2); - - // When - ResultSet resultSet = ResultSets.newInstance(page1); - - // Then - assertThat(resultSet.getColumnDefinitions()).isSameAs(page1.getColumnDefinitions()); - assertThat(resultSet.getExecutionInfo()).isSameAs(page1.getExecutionInfo()); - assertThat(resultSet.getExecutionInfos()).containsExactly(page1.getExecutionInfo()); - - Iterator iterator = resultSet.iterator(); - - assertNextRow(iterator, 0); - assertNextRow(iterator, 1); - assertNextRow(iterator, 2); - - assertThat(iterator.hasNext()).isFalse(); - } - - @Test - public void should_create_result_set_from_multiple_pages() { - // Given - AsyncResultSet page1 = mockPage(true, 0, 1, 2); - AsyncResultSet page2 = mockPage(true, 3, 4, 5); - AsyncResultSet page3 = mockPage(false, 6, 7, 8); - - complete(page1.fetchNextPage(), page2); - complete(page2.fetchNextPage(), page3); - - // When - ResultSet resultSet = ResultSets.newInstance(page1); - - // Then - assertThat(resultSet.iterator().hasNext()).isTrue(); - - assertThat(resultSet.getColumnDefinitions()).isSameAs(page1.getColumnDefinitions()); - assertThat(resultSet.getExecutionInfo()).isSameAs(page1.getExecutionInfo()); - assertThat(resultSet.getExecutionInfos()).containsExactly(page1.getExecutionInfo()); - - Iterator iterator = resultSet.iterator(); - - assertNextRow(iterator, 0); - assertNextRow(iterator, 1); - assertNextRow(iterator, 2); - - assertThat(iterator.hasNext()).isTrue(); - // This should have triggered the fetch of page2 - assertThat(resultSet.getExecutionInfo()).isEqualTo(page2.getExecutionInfo()); - assertThat(resultSet.getExecutionInfos()) - .containsExactly(page1.getExecutionInfo(), page2.getExecutionInfo()); - - assertNextRow(iterator, 3); - assertNextRow(iterator, 4); - assertNextRow(iterator, 5); - - assertThat(iterator.hasNext()).isTrue(); - // This should have triggered the fetch of page3 - assertThat(resultSet.getExecutionInfo()).isEqualTo(page3.getExecutionInfo()); - assertThat(resultSet.getExecutionInfos()) - .containsExactly( - page1.getExecutionInfo(), page2.getExecutionInfo(), page3.getExecutionInfo()); - - assertNextRow(iterator, 6); - assertNextRow(iterator, 7); - assertNextRow(iterator, 8); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/StatementSizeTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/StatementSizeTest.java deleted file mode 100644 index dc3ab0702f7..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/StatementSizeTest.java +++ /dev/null @@ -1,292 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.cql; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.BatchStatement; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.ColumnDefinition; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.DefaultBatchType; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.time.TimestampGenerator; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.internal.core.DefaultProtocolVersionRegistry; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.shaded.guava.common.base.Charsets; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.response.result.ColumnSpec; -import com.datastax.oss.protocol.internal.response.result.RawType; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.nio.ByteBuffer; -import java.util.Collections; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -public class StatementSizeTest { - - private static final byte[] MOCK_PAGING_STATE = Bytes.getArray(Bytes.fromHexString("0xdeadbeef")); - private static final ByteBuffer MOCK_PAYLOAD_VALUE1 = Bytes.fromHexString("0xabcd"); - private static final ByteBuffer MOCK_PAYLOAD_VALUE2 = Bytes.fromHexString("0xef"); - private static final ImmutableMap MOCK_PAYLOAD = - ImmutableMap.of("key1", MOCK_PAYLOAD_VALUE1, "key2", MOCK_PAYLOAD_VALUE2); - private static final byte[] PREPARED_ID = Bytes.getArray(Bytes.fromHexString("0xaaaa")); - private static final byte[] RESULT_METADATA_ID = Bytes.getArray(Bytes.fromHexString("0xbbbb")); - - @Mock PreparedStatement preparedStatement; - @Mock InternalDriverContext driverContext; - @Mock DriverConfig config; - @Mock DriverExecutionProfile defaultProfile; - @Mock TimestampGenerator timestampGenerator; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - - ByteBuffer preparedId = ByteBuffer.wrap(PREPARED_ID); - when(preparedStatement.getId()).thenReturn(preparedId); - ByteBuffer resultMetadataId = ByteBuffer.wrap(RESULT_METADATA_ID); - when(preparedStatement.getResultMetadataId()).thenReturn(resultMetadataId); - - ColumnDefinitions columnDefinitions = - DefaultColumnDefinitions.valueOf( - ImmutableList.of( - phonyColumnDef("ks", "table", "c1", -1, ProtocolConstants.DataType.INT), - phonyColumnDef("ks", "table", "c2", -1, ProtocolConstants.DataType.VARCHAR))); - - when(preparedStatement.getVariableDefinitions()).thenReturn(columnDefinitions); - - when(driverContext.getProtocolVersion()).thenReturn(DefaultProtocolVersion.V5); - when(driverContext.getCodecRegistry()).thenReturn(CodecRegistry.DEFAULT); - when(driverContext.getProtocolVersionRegistry()) - .thenReturn(new DefaultProtocolVersionRegistry(null)); - when(config.getDefaultProfile()).thenReturn(defaultProfile); - when(driverContext.getConfig()).thenReturn(config); - when(driverContext.getTimestampGenerator()).thenReturn(timestampGenerator); - } - - private ColumnDefinition phonyColumnDef( - String keyspace, String table, String column, int index, int typeCode) { - return new DefaultColumnDefinition( - new ColumnSpec(keyspace, table, column, index, RawType.PRIMITIVES.get(typeCode)), - AttachmentPoint.NONE); - } - - @Test - public void should_measure_size_of_simple_statement() { - String queryString = "SELECT release_version FROM system.local WHERE key = ?"; - SimpleStatement statement = SimpleStatement.newInstance(queryString); - int expectedSize = - 9 // header - + (4 + queryString.getBytes(Charsets.UTF_8).length) // query string - + 2 // consistency level - + 2 // serial consistency level - + 4 // fetch size - + 8 // timestamp - + 4; // flags - - assertThat(v5SizeOf(statement)).isEqualTo(expectedSize); - - String value1 = "local"; - SimpleStatement statementWithAnonymousValue = SimpleStatement.newInstance(queryString, value1); - assertThat(v5SizeOf(statementWithAnonymousValue)) - .isEqualTo( - expectedSize - + 2 // size of number of values - + (4 + value1.getBytes(Charsets.UTF_8).length) // value - ); - - String key1 = "key"; - SimpleStatement statementWithNamedValue = - SimpleStatement.newInstance(queryString, ImmutableMap.of(key1, value1)); - assertThat(v5SizeOf(statementWithNamedValue)) - .isEqualTo( - expectedSize - + 2 // size of number of values - + (2 + key1.getBytes(Charsets.UTF_8).length) // key - + (4 + value1.getBytes(Charsets.UTF_8).length) // value - ); - - SimpleStatement statementWithPagingState = - statement.setPagingState(ByteBuffer.wrap(MOCK_PAGING_STATE)); - assertThat(v5SizeOf(statementWithPagingState)) - .isEqualTo(expectedSize + 4 + MOCK_PAGING_STATE.length); - - SimpleStatement statementWithPayload = statement.setCustomPayload(MOCK_PAYLOAD); - assertThat(v5SizeOf(statementWithPayload)) - .isEqualTo( - expectedSize - + 2 // size of number of keys in the map - // size of each key/value pair - + (2 + "key1".getBytes(Charsets.UTF_8).length) - + (4 + MOCK_PAYLOAD_VALUE1.remaining()) - + (2 + "key2".getBytes(Charsets.UTF_8).length) - + (4 + MOCK_PAYLOAD_VALUE2.remaining())); - - SimpleStatement statementWithKeyspace = statement.setKeyspace("testKeyspace"); - assertThat(v5SizeOf(statementWithKeyspace)) - .isEqualTo(expectedSize + 2 + "testKeyspace".getBytes(Charsets.UTF_8).length); - } - - @Test - public void should_measure_size_of_bound_statement() { - - BoundStatement statement = - newBoundStatement( - preparedStatement, - new ByteBuffer[] {ProtocolConstants.UNSET_VALUE, ProtocolConstants.UNSET_VALUE}); - - int expectedSize = - 9 // header size - + 4 // flags - + 2 // consistency level - + 2 // serial consistency level - + 8 // timestamp - + (2 + PREPARED_ID.length) - + (2 + RESULT_METADATA_ID.length) - + 2 // size of value list - + 2 * 4 // two null values (size = -1) - + 4 // fetch size - ; - assertThat(v5SizeOf(statement)).isEqualTo(expectedSize); - - BoundStatement withValues = statement.setInt(0, 0).setString(1, "test"); - expectedSize += - 4 // the size of the int value - + "test".getBytes(Charsets.UTF_8).length; - assertThat(v5SizeOf(withValues)).isEqualTo(expectedSize); - - BoundStatement withPagingState = withValues.setPagingState(ByteBuffer.wrap(MOCK_PAGING_STATE)); - expectedSize += 4 + MOCK_PAGING_STATE.length; - assertThat(v5SizeOf(withPagingState)).isEqualTo(expectedSize); - - BoundStatement withPayload = withPagingState.setCustomPayload(MOCK_PAYLOAD); - expectedSize += - 2 // size of number of keys in the map - // size of each key/value pair - + (2 + "key1".getBytes(Charsets.UTF_8).length) - + (4 + MOCK_PAYLOAD_VALUE1.remaining()) - + (2 + "key2".getBytes(Charsets.UTF_8).length) - + (4 + MOCK_PAYLOAD_VALUE2.remaining()); - assertThat(v5SizeOf(withPayload)).isEqualTo(expectedSize); - } - - @Test - public void should_measure_size_of_batch_statement() { - String queryString = "SELECT release_version FROM system.local"; - String key1 = "key"; - String value1 = "value"; - SimpleStatement statement1 = - SimpleStatement.newInstance(queryString, ImmutableMap.of(key1, value1)); - - BoundStatement statement2 = - newBoundStatement( - preparedStatement, - new ByteBuffer[] {ProtocolConstants.UNSET_VALUE, ProtocolConstants.UNSET_VALUE}) - .setInt(0, 0) - .setString(1, "test"); - BoundStatement statement3 = - newBoundStatement( - preparedStatement, - new ByteBuffer[] {ProtocolConstants.UNSET_VALUE, ProtocolConstants.UNSET_VALUE}) - .setInt(0, 0) - .setString(1, "test2"); - - BatchStatement batchStatement = - BatchStatement.newInstance(DefaultBatchType.UNLOGGED) - .add(statement1) - .add(statement2) - .add(statement3); - - int expectedSize = - 9 // header size - + 1 - + 2 // batch type + number of queries - // statements' type of id + id (query string/prepared id): - + 1 - + (4 + queryString.getBytes(Charsets.UTF_8).length) - + 1 - + (2 + PREPARED_ID.length) - + 1 - + (2 + PREPARED_ID.length) - // simple statement values - + 2 // size of number of values - + (2 + key1.getBytes(Charsets.UTF_8).length) // key - + (4 + value1.getBytes(Charsets.UTF_8).length) // value - // bound statements values - + (2 + (4 + 4) + (4 + "test".getBytes(Charsets.UTF_8).length)) - + (2 + (4 + 4) + (4 + "test2".getBytes(Charsets.UTF_8).length)) - + 2 // consistency level - + 2 // serial consistency level - + 8 // timestamp - + 4; // flags - assertThat(v5SizeOf(batchStatement)).isEqualTo(expectedSize); - - BatchStatement withPayload = batchStatement.setCustomPayload(MOCK_PAYLOAD); - expectedSize += - 2 // size of number of keys in the map - // size of each key/value pair - + (2 + "key1".getBytes(Charsets.UTF_8).length) - + (4 + MOCK_PAYLOAD_VALUE1.remaining()) - + (2 + "key2".getBytes(Charsets.UTF_8).length) - + (4 + MOCK_PAYLOAD_VALUE2.remaining()); - assertThat(v5SizeOf(withPayload)).isEqualTo(expectedSize); - } - - private int v5SizeOf(Statement statement) { - return statement.computeSizeInBytes(driverContext); - } - - private BoundStatement newBoundStatement( - PreparedStatement preparedStatement, ByteBuffer[] initialValues) { - return new DefaultBoundStatement( - preparedStatement, - preparedStatement.getVariableDefinitions(), - initialValues, - null, - null, - null, - null, - null, - Collections.emptyMap(), - null, - false, - -1, - null, - Integer.MIN_VALUE, - null, - null, - null, - CodecRegistry.DEFAULT, - DefaultProtocolVersion.V5, - null, - Statement.NO_NOW_IN_SECONDS); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/data/AccessibleByIdTestBase.java b/core/src/test/java/com/datastax/oss/driver/internal/core/data/AccessibleByIdTestBase.java deleted file mode 100644 index c27b55e3f25..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/data/AccessibleByIdTestBase.java +++ /dev/null @@ -1,485 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.data; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyZeroInteractions; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.data.GettableById; -import com.datastax.oss.driver.api.core.data.GettableByName; -import com.datastax.oss.driver.api.core.data.SettableById; -import com.datastax.oss.driver.api.core.data.SettableByName; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.CqlIntToStringCodec; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.nio.ByteBuffer; -import org.junit.Test; - -public abstract class AccessibleByIdTestBase< - T extends GettableById & SettableById & GettableByName & SettableByName> - extends AccessibleByIndexTestBase { - - private static final CqlIdentifier FIELD0_ID = CqlIdentifier.fromInternal("field0"); - private static final String FIELD0_NAME = "field0"; - - @Test - public void should_set_primitive_value_by_id() { - // Given - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - - // When - t = t.setInt(FIELD0_ID, 1); - - // Then - verify(codecRegistry).codecFor(DataTypes.INT, Integer.class); - verify(intCodec).encodePrimitive(1, ProtocolVersion.DEFAULT); - assertThat(t.getBytesUnsafe(FIELD0_ID)).isEqualTo(Bytes.fromHexString("0x00000001")); - } - - @Test - public void should_set_object_value_by_id() { - // Given - T t = newInstance(ImmutableList.of(DataTypes.TEXT), attachmentPoint); - - // When - t = t.setString(FIELD0_ID, "a"); - - // Then - verify(codecRegistry).codecFor(DataTypes.TEXT, String.class); - verify(textCodec).encode("a", ProtocolVersion.DEFAULT); - assertThat(t.getBytesUnsafe(FIELD0_ID)).isEqualTo(Bytes.fromHexString("0x61")); - } - - @Test - public void should_set_bytes_by_id() { - // Given - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - - // When - t = t.setBytesUnsafe(FIELD0_ID, Bytes.fromHexString("0x00000001")); - - // Then - verifyZeroInteractions(codecRegistry); - assertThat(t.getBytesUnsafe(FIELD0_ID)).isEqualTo(Bytes.fromHexString("0x00000001")); - } - - @Test - public void should_set_to_null_by_id() { - // Given - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - t = t.setBytesUnsafe(FIELD0_ID, Bytes.fromHexString("0x00000001")); - - // When - t = t.setToNull(FIELD0_ID); - - // Then - verifyZeroInteractions(codecRegistry); - assertThat(t.getBytesUnsafe(FIELD0_ID)).isNull(); - } - - @Test - public void should_set_with_explicit_class_by_id() { - // Given - CqlIntToStringCodec intToStringCodec = spy(new CqlIntToStringCodec()); - when(codecRegistry.codecFor(DataTypes.INT, String.class)).thenAnswer(i -> intToStringCodec); - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - - // When - t = t.set(FIELD0_ID, "1", String.class); - - // Then - verify(codecRegistry).codecFor(DataTypes.INT, String.class); - verify(intToStringCodec).encode("1", ProtocolVersion.DEFAULT); - assertThat(t.getBytesUnsafe(FIELD0_ID)).isEqualTo(Bytes.fromHexString("0x00000001")); - } - - @Test - public void should_set_with_explicit_type_by_id() { - // Given - CqlIntToStringCodec intToStringCodec = spy(new CqlIntToStringCodec()); - when(codecRegistry.codecFor(DataTypes.INT, GenericType.STRING)) - .thenAnswer(i -> intToStringCodec); - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - - // When - t = t.set(FIELD0_ID, "1", GenericType.STRING); - - // Then - verify(codecRegistry).codecFor(DataTypes.INT, GenericType.STRING); - verify(intToStringCodec).encode("1", ProtocolVersion.DEFAULT); - assertThat(t.getBytesUnsafe(FIELD0_ID)).isEqualTo(Bytes.fromHexString("0x00000001")); - } - - @Test - public void should_set_with_explicit_codec_by_id() { - // Given - CqlIntToStringCodec intToStringCodec = spy(new CqlIntToStringCodec()); - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - - // When - t = t.set(FIELD0_ID, "1", intToStringCodec); - - // Then - verifyZeroInteractions(codecRegistry); - verify(intToStringCodec).encode("1", ProtocolVersion.DEFAULT); - assertThat(t.getBytesUnsafe(FIELD0_ID)).isEqualTo(Bytes.fromHexString("0x00000001")); - } - - @Test - public void should_get_primitive_value_by_id() { - // Given - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - t = t.setBytesUnsafe(FIELD0_ID, Bytes.fromHexString("0x00000001")); - - // When - int i = t.getInt(FIELD0_ID); - - // Then - verify(codecRegistry).codecFor(DataTypes.INT, Integer.class); - verify(intCodec).decodePrimitive(any(ByteBuffer.class), eq(ProtocolVersion.DEFAULT)); - assertThat(i).isEqualTo(1); - } - - @Test - public void should_get_object_value_by_id() { - // Given - T t = newInstance(ImmutableList.of(DataTypes.TEXT), attachmentPoint); - t = t.setBytesUnsafe(FIELD0_ID, Bytes.fromHexString("0x61")); - - // When - String s = t.getString(FIELD0_ID); - - // Then - verify(codecRegistry).codecFor(DataTypes.TEXT, String.class); - verify(textCodec).decode(any(ByteBuffer.class), eq(ProtocolVersion.DEFAULT)); - assertThat(s).isEqualTo("a"); - } - - @Test - public void should_get_bytes_by_id() { - // Given - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - t = t.setBytesUnsafe(FIELD0_ID, Bytes.fromHexString("0x00000001")); - - // When - ByteBuffer bytes = t.getBytesUnsafe(FIELD0_ID); - - // Then - verifyZeroInteractions(codecRegistry); - assertThat(bytes).isEqualTo(Bytes.fromHexString("0x00000001")); - } - - @Test - public void should_test_if_null_by_id() { - // Given - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - t = t.setBytesUnsafe(FIELD0_ID, null); - - // When - boolean isNull = t.isNull(FIELD0_ID); - - // Then - verifyZeroInteractions(codecRegistry); - assertThat(isNull).isTrue(); - } - - @Test - public void should_get_with_explicit_class_by_id() { - // Given - CqlIntToStringCodec intToStringCodec = spy(new CqlIntToStringCodec()); - when(codecRegistry.codecFor(DataTypes.INT, String.class)).thenAnswer(i -> intToStringCodec); - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - t = t.setBytesUnsafe(FIELD0_ID, Bytes.fromHexString("0x00000001")); - - // When - String s = t.get(FIELD0_ID, String.class); - - // Then - verify(codecRegistry).codecFor(DataTypes.INT, String.class); - verify(intToStringCodec).decode(any(ByteBuffer.class), eq(ProtocolVersion.DEFAULT)); - assertThat(s).isEqualTo("1"); - } - - @Test - public void should_get_with_explicit_type_by_id() { - // Given - CqlIntToStringCodec intToStringCodec = spy(new CqlIntToStringCodec()); - when(codecRegistry.codecFor(DataTypes.INT, GenericType.STRING)) - .thenAnswer(i -> intToStringCodec); - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - t = t.setBytesUnsafe(FIELD0_ID, Bytes.fromHexString("0x00000001")); - - // When - String s = t.get(FIELD0_ID, GenericType.STRING); - - // Then - verify(codecRegistry).codecFor(DataTypes.INT, GenericType.STRING); - verify(intToStringCodec).decode(any(ByteBuffer.class), eq(ProtocolVersion.DEFAULT)); - assertThat(s).isEqualTo("1"); - } - - @Test - public void should_get_with_explicit_codec_by_id() { - // Given - CqlIntToStringCodec intToStringCodec = spy(new CqlIntToStringCodec()); - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - t = t.setBytesUnsafe(FIELD0_ID, Bytes.fromHexString("0x00000001")); - - // When - String s = t.get(FIELD0_ID, intToStringCodec); - - // Then - verifyZeroInteractions(codecRegistry); - verify(intToStringCodec).decode(any(ByteBuffer.class), eq(ProtocolVersion.DEFAULT)); - assertThat(s).isEqualTo("1"); - } - - @Test - public void should_set_primitive_value_by_name() { - // Given - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - - // When - t = t.setInt(FIELD0_NAME, 1); - - // Then - verify(codecRegistry).codecFor(DataTypes.INT, Integer.class); - verify(intCodec).encodePrimitive(1, ProtocolVersion.DEFAULT); - assertThat(t.getBytesUnsafe(FIELD0_NAME)).isEqualTo(Bytes.fromHexString("0x00000001")); - } - - @Test - public void should_set_object_value_by_name() { - // Given - T t = newInstance(ImmutableList.of(DataTypes.TEXT), attachmentPoint); - - // When - t = t.setString(FIELD0_NAME, "a"); - - // Then - verify(codecRegistry).codecFor(DataTypes.TEXT, String.class); - verify(textCodec).encode("a", ProtocolVersion.DEFAULT); - assertThat(t.getBytesUnsafe(FIELD0_NAME)).isEqualTo(Bytes.fromHexString("0x61")); - } - - @Test - public void should_set_bytes_by_name() { - // Given - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - - // When - t = t.setBytesUnsafe(FIELD0_NAME, Bytes.fromHexString("0x00000001")); - - // Then - verifyZeroInteractions(codecRegistry); - assertThat(t.getBytesUnsafe(FIELD0_NAME)).isEqualTo(Bytes.fromHexString("0x00000001")); - } - - @Test - public void should_set_to_null_by_name() { - // Given - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - t = t.setBytesUnsafe(FIELD0_NAME, Bytes.fromHexString("0x00000001")); - - // When - t = t.setToNull(FIELD0_NAME); - - // Then - verifyZeroInteractions(codecRegistry); - assertThat(t.getBytesUnsafe(FIELD0_NAME)).isNull(); - } - - @Test - public void should_set_with_explicit_class_by_name() { - // Given - CqlIntToStringCodec intToStringCodec = spy(new CqlIntToStringCodec()); - when(codecRegistry.codecFor(DataTypes.INT, String.class)).thenAnswer(i -> intToStringCodec); - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - - // When - t = t.set(FIELD0_NAME, "1", String.class); - - // Then - verify(codecRegistry).codecFor(DataTypes.INT, String.class); - verify(intToStringCodec).encode("1", ProtocolVersion.DEFAULT); - assertThat(t.getBytesUnsafe(FIELD0_NAME)).isEqualTo(Bytes.fromHexString("0x00000001")); - } - - @Test - public void should_set_with_explicit_type_by_name() { - // Given - CqlIntToStringCodec intToStringCodec = spy(new CqlIntToStringCodec()); - when(codecRegistry.codecFor(DataTypes.INT, GenericType.STRING)) - .thenAnswer(i -> intToStringCodec); - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - - // When - t = t.set(FIELD0_NAME, "1", GenericType.STRING); - - // Then - verify(codecRegistry).codecFor(DataTypes.INT, GenericType.STRING); - verify(intToStringCodec).encode("1", ProtocolVersion.DEFAULT); - assertThat(t.getBytesUnsafe(FIELD0_NAME)).isEqualTo(Bytes.fromHexString("0x00000001")); - } - - @Test - public void should_set_with_explicit_codec_by_name() { - // Given - CqlIntToStringCodec intToStringCodec = spy(new CqlIntToStringCodec()); - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - - // When - t = t.set(FIELD0_NAME, "1", intToStringCodec); - - // Then - verifyZeroInteractions(codecRegistry); - verify(intToStringCodec).encode("1", ProtocolVersion.DEFAULT); - assertThat(t.getBytesUnsafe(FIELD0_NAME)).isEqualTo(Bytes.fromHexString("0x00000001")); - } - - @Test - public void should_get_primitive_value_by_name() { - // Given - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - t = t.setBytesUnsafe(FIELD0_NAME, Bytes.fromHexString("0x00000001")); - - // When - int i = t.getInt(FIELD0_NAME); - - // Then - verify(codecRegistry).codecFor(DataTypes.INT, Integer.class); - verify(intCodec).decodePrimitive(any(ByteBuffer.class), eq(ProtocolVersion.DEFAULT)); - assertThat(i).isEqualTo(1); - } - - @Test - public void should_get_object_value_by_name() { - // Given - T t = newInstance(ImmutableList.of(DataTypes.TEXT), attachmentPoint); - t = t.setBytesUnsafe(FIELD0_NAME, Bytes.fromHexString("0x61")); - - // When - String s = t.getString(FIELD0_NAME); - - // Then - verify(codecRegistry).codecFor(DataTypes.TEXT, String.class); - verify(textCodec).decode(any(ByteBuffer.class), eq(ProtocolVersion.DEFAULT)); - assertThat(s).isEqualTo("a"); - } - - @Test - public void should_get_bytes_by_name() { - // Given - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - t = t.setBytesUnsafe(FIELD0_NAME, Bytes.fromHexString("0x00000001")); - - // When - ByteBuffer bytes = t.getBytesUnsafe(FIELD0_NAME); - - // Then - verifyZeroInteractions(codecRegistry); - assertThat(bytes).isEqualTo(Bytes.fromHexString("0x00000001")); - } - - @Test - public void should_test_if_null_by_name() { - // Given - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - t = t.setBytesUnsafe(FIELD0_NAME, null); - - // When - boolean isNull = t.isNull(FIELD0_NAME); - - // Then - verifyZeroInteractions(codecRegistry); - assertThat(isNull).isTrue(); - } - - @Test - public void should_get_with_explicit_class_by_name() { - // Given - CqlIntToStringCodec intToStringCodec = spy(new CqlIntToStringCodec()); - when(codecRegistry.codecFor(DataTypes.INT, String.class)).thenAnswer(i -> intToStringCodec); - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - t = t.setBytesUnsafe(FIELD0_NAME, Bytes.fromHexString("0x00000001")); - - // When - String s = t.get(FIELD0_NAME, String.class); - - // Then - verify(codecRegistry).codecFor(DataTypes.INT, String.class); - verify(intToStringCodec).decode(any(ByteBuffer.class), eq(ProtocolVersion.DEFAULT)); - assertThat(s).isEqualTo("1"); - } - - @Test - public void should_get_with_explicit_type_by_name() { - // Given - CqlIntToStringCodec intToStringCodec = spy(new CqlIntToStringCodec()); - when(codecRegistry.codecFor(DataTypes.INT, GenericType.STRING)) - .thenAnswer(i -> intToStringCodec); - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - t = t.setBytesUnsafe(FIELD0_NAME, Bytes.fromHexString("0x00000001")); - - // When - String s = t.get(FIELD0_NAME, GenericType.STRING); - - // Then - verify(codecRegistry).codecFor(DataTypes.INT, GenericType.STRING); - verify(intToStringCodec).decode(any(ByteBuffer.class), eq(ProtocolVersion.DEFAULT)); - assertThat(s).isEqualTo("1"); - } - - @Test - public void should_get_with_explicit_codec_by_name() { - // Given - CqlIntToStringCodec intToStringCodec = spy(new CqlIntToStringCodec()); - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - t = t.setBytesUnsafe(FIELD0_NAME, Bytes.fromHexString("0x00000001")); - - // When - String s = t.get(FIELD0_NAME, intToStringCodec); - - // Then - verifyZeroInteractions(codecRegistry); - verify(intToStringCodec).decode(any(ByteBuffer.class), eq(ProtocolVersion.DEFAULT)); - assertThat(s).isEqualTo("1"); - } - - @Test(expected = IllegalArgumentException.class) - @SuppressWarnings("CheckReturnValue") - public void should_fail_when_id_does_not_exists() { - final CqlIdentifier invalidField = CqlIdentifier.fromInternal("invalidField"); - // Given - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - - // When - t.setInt(invalidField, 1); - - // Then the method will throw IllegalArgumentException up to the client. - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/data/AccessibleByIndexTestBase.java b/core/src/test/java/com/datastax/oss/driver/internal/core/data/AccessibleByIndexTestBase.java deleted file mode 100644 index 94da926f2bc..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/data/AccessibleByIndexTestBase.java +++ /dev/null @@ -1,344 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.data; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyZeroInteractions; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.data.GettableByIndex; -import com.datastax.oss.driver.api.core.data.SettableByIndex; -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.PrimitiveIntCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.CqlIntToStringCodec; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.nio.ByteBuffer; -import java.util.List; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -public abstract class AccessibleByIndexTestBase> { - - protected abstract T newInstance(List dataTypes, AttachmentPoint attachmentPoint); - - protected abstract T newInstance( - List dataTypes, List values, AttachmentPoint attachmentPoint); - - @Mock protected AttachmentPoint attachmentPoint; - @Mock protected AttachmentPoint v3AttachmentPoint; - @Mock protected CodecRegistry codecRegistry; - protected PrimitiveIntCodec intCodec; - protected TypeCodec doubleCodec; - protected TypeCodec textCodec; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - - when(attachmentPoint.getCodecRegistry()).thenReturn(codecRegistry); - when(attachmentPoint.getProtocolVersion()).thenReturn(ProtocolVersion.DEFAULT); - - when(v3AttachmentPoint.getCodecRegistry()).thenReturn(codecRegistry); - when(v3AttachmentPoint.getProtocolVersion()).thenReturn(DefaultProtocolVersion.V3); - - intCodec = spy(TypeCodecs.INT); - doubleCodec = spy(TypeCodecs.DOUBLE); - textCodec = spy(TypeCodecs.TEXT); - - when(codecRegistry.codecFor(DataTypes.INT, Integer.class)).thenAnswer(i -> intCodec); - when(codecRegistry.codecFor(DataTypes.DOUBLE, Double.class)).thenAnswer(i -> doubleCodec); - when(codecRegistry.codecFor(DataTypes.TEXT, String.class)).thenAnswer(i -> textCodec); - - when(codecRegistry.codecFor(DataTypes.INT)).thenAnswer(i -> intCodec); - when(codecRegistry.codecFor(DataTypes.TEXT)).thenAnswer(t -> textCodec); - when(codecRegistry.codecFor(DataTypes.DOUBLE)).thenAnswer(d -> doubleCodec); - } - - @Test - public void should_set_primitive_value_by_index() { - // Given - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - - // When - t = t.setInt(0, 1); - - // Then - verify(codecRegistry).codecFor(DataTypes.INT, Integer.class); - verify(intCodec).encodePrimitive(1, ProtocolVersion.DEFAULT); - assertThat(t.getBytesUnsafe(0)).isEqualTo(Bytes.fromHexString("0x00000001")); - } - - @Test - public void should_set_object_value_by_index() { - // Given - T t = newInstance(ImmutableList.of(DataTypes.TEXT), attachmentPoint); - - // When - t = t.setString(0, "a"); - - // Then - verify(codecRegistry).codecFor(DataTypes.TEXT, String.class); - verify(textCodec).encode("a", ProtocolVersion.DEFAULT); - assertThat(t.getBytesUnsafe(0)).isEqualTo(Bytes.fromHexString("0x61")); - } - - @Test - public void should_set_bytes_by_index() { - // Given - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - - // When - t = t.setBytesUnsafe(0, Bytes.fromHexString("0x00000001")); - - // Then - verifyZeroInteractions(codecRegistry); - assertThat(t.getBytesUnsafe(0)).isEqualTo(Bytes.fromHexString("0x00000001")); - } - - @Test - public void should_set_to_null_by_index() { - // Given - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - t = t.setBytesUnsafe(0, Bytes.fromHexString("0x00000001")); - - // When - t = t.setToNull(0); - - // Then - verifyZeroInteractions(codecRegistry); - assertThat(t.getBytesUnsafe(0)).isNull(); - } - - @Test - public void should_set_with_explicit_class_by_index() { - // Given - CqlIntToStringCodec intToStringCodec = spy(new CqlIntToStringCodec()); - when(codecRegistry.codecFor(DataTypes.INT, String.class)).thenAnswer(i -> intToStringCodec); - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - - // When - t = t.set(0, "1", String.class); - - // Then - verify(codecRegistry).codecFor(DataTypes.INT, String.class); - verify(intToStringCodec).encode("1", ProtocolVersion.DEFAULT); - assertThat(t.getBytesUnsafe(0)).isEqualTo(Bytes.fromHexString("0x00000001")); - } - - @Test - public void should_set_with_explicit_type_by_index() { - // Given - CqlIntToStringCodec intToStringCodec = spy(new CqlIntToStringCodec()); - when(codecRegistry.codecFor(DataTypes.INT, GenericType.STRING)) - .thenAnswer(i -> intToStringCodec); - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - - // When - t = t.set(0, "1", GenericType.STRING); - - // Then - verify(codecRegistry).codecFor(DataTypes.INT, GenericType.STRING); - verify(intToStringCodec).encode("1", ProtocolVersion.DEFAULT); - assertThat(t.getBytesUnsafe(0)).isEqualTo(Bytes.fromHexString("0x00000001")); - } - - @Test - public void should_set_with_explicit_codec_by_index() { - // Given - CqlIntToStringCodec intToStringCodec = spy(new CqlIntToStringCodec()); - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - - // When - t = t.set(0, "1", intToStringCodec); - - // Then - verifyZeroInteractions(codecRegistry); - verify(intToStringCodec).encode("1", ProtocolVersion.DEFAULT); - assertThat(t.getBytesUnsafe(0)).isEqualTo(Bytes.fromHexString("0x00000001")); - } - - @Test - public void should_set_values_in_bulk() { - // Given - when(codecRegistry.codecFor(DataTypes.TEXT, "foo")).thenReturn(TypeCodecs.TEXT); - when(codecRegistry.codecFor(DataTypes.INT, 1)).thenReturn(TypeCodecs.INT); - - // When - T t = - newInstance( - ImmutableList.of(DataTypes.TEXT, DataTypes.INT), - ImmutableList.of("foo", 1), - attachmentPoint); - - // Then - assertThat(t.getString(0)).isEqualTo("foo"); - assertThat(t.getInt(1)).isEqualTo(1); - verify(codecRegistry).codecFor(DataTypes.TEXT, "foo"); - verify(codecRegistry).codecFor(DataTypes.INT, 1); - } - - @Test - public void should_set_values_in_bulk_when_not_enough_values() { - // Given - when(codecRegistry.codecFor(DataTypes.TEXT, "foo")).thenReturn(TypeCodecs.TEXT); - - // When - T t = - newInstance( - ImmutableList.of(DataTypes.TEXT, DataTypes.INT), - ImmutableList.of("foo"), - attachmentPoint); - - // Then - assertThat(t.getString(0)).isEqualTo("foo"); - assertThat(t.isNull(1)).isTrue(); - verify(codecRegistry).codecFor(DataTypes.TEXT, "foo"); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_set_values_in_bulk_when_too_many_values() { - newInstance( - ImmutableList.of(DataTypes.TEXT, DataTypes.INT), - ImmutableList.of("foo", 1, "bar"), - attachmentPoint); - } - - @Test - public void should_get_primitive_value_by_index() { - // Given - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - t = t.setBytesUnsafe(0, Bytes.fromHexString("0x00000001")); - - // When - int i = t.getInt(0); - - // Then - verify(codecRegistry).codecFor(DataTypes.INT, Integer.class); - verify(intCodec).decodePrimitive(any(ByteBuffer.class), eq(ProtocolVersion.DEFAULT)); - assertThat(i).isEqualTo(1); - } - - @Test - public void should_get_object_value_by_index() { - // Given - T t = newInstance(ImmutableList.of(DataTypes.TEXT), attachmentPoint); - t = t.setBytesUnsafe(0, Bytes.fromHexString("0x61")); - - // When - String s = t.getString(0); - - // Then - verify(codecRegistry).codecFor(DataTypes.TEXT, String.class); - verify(textCodec).decode(any(ByteBuffer.class), eq(ProtocolVersion.DEFAULT)); - assertThat(s).isEqualTo("a"); - } - - @Test - public void should_get_bytes_by_index() { - // Given - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - t = t.setBytesUnsafe(0, Bytes.fromHexString("0x00000001")); - - // When - ByteBuffer bytes = t.getBytesUnsafe(0); - - // Then - verifyZeroInteractions(codecRegistry); - assertThat(bytes).isEqualTo(Bytes.fromHexString("0x00000001")); - } - - @Test - public void should_test_if_null_by_index() { - // Given - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - t = t.setBytesUnsafe(0, null); - - // When - boolean isNull = t.isNull(0); - - // Then - verifyZeroInteractions(codecRegistry); - assertThat(isNull).isTrue(); - } - - @Test - public void should_get_with_explicit_class_by_index() { - // Given - CqlIntToStringCodec intToStringCodec = spy(new CqlIntToStringCodec()); - when(codecRegistry.codecFor(DataTypes.INT, String.class)).thenAnswer(i -> intToStringCodec); - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - t = t.setBytesUnsafe(0, Bytes.fromHexString("0x00000001")); - - // When - String s = t.get(0, String.class); - - // Then - verify(codecRegistry).codecFor(DataTypes.INT, String.class); - verify(intToStringCodec).decode(any(ByteBuffer.class), eq(ProtocolVersion.DEFAULT)); - assertThat(s).isEqualTo("1"); - } - - @Test - public void should_get_with_explicit_type_by_index() { - // Given - CqlIntToStringCodec intToStringCodec = spy(new CqlIntToStringCodec()); - when(codecRegistry.codecFor(DataTypes.INT, GenericType.STRING)) - .thenAnswer(i -> intToStringCodec); - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - t = t.setBytesUnsafe(0, Bytes.fromHexString("0x00000001")); - - // When - String s = t.get(0, GenericType.STRING); - - // Then - verify(codecRegistry).codecFor(DataTypes.INT, GenericType.STRING); - verify(intToStringCodec).decode(any(ByteBuffer.class), eq(ProtocolVersion.DEFAULT)); - assertThat(s).isEqualTo("1"); - } - - @Test - public void should_get_with_explicit_codec_by_index() { - // Given - CqlIntToStringCodec intToStringCodec = spy(new CqlIntToStringCodec()); - T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); - t = t.setBytesUnsafe(0, Bytes.fromHexString("0x00000001")); - - // When - String s = t.get(0, intToStringCodec); - - // Then - verifyZeroInteractions(codecRegistry); - verify(intToStringCodec).decode(any(ByteBuffer.class), eq(ProtocolVersion.DEFAULT)); - assertThat(s).isEqualTo("1"); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/data/DefaultTupleValueTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/data/DefaultTupleValueTest.java deleted file mode 100644 index aed357cb1cd..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/data/DefaultTupleValueTest.java +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.data; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.data.TupleValue; -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.TupleType; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.internal.SerializationHelper; -import com.datastax.oss.driver.internal.core.type.DefaultTupleType; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.util.List; -import org.junit.Test; - -public class DefaultTupleValueTest extends AccessibleByIndexTestBase { - - @Override - protected TupleValue newInstance(List dataTypes, AttachmentPoint attachmentPoint) { - DefaultTupleType type = new DefaultTupleType(dataTypes, attachmentPoint); - return type.newValue(); - } - - @Override - protected TupleValue newInstance( - List dataTypes, List values, AttachmentPoint attachmentPoint) { - DefaultTupleType type = new DefaultTupleType(dataTypes, attachmentPoint); - return type.newValue(values.toArray()); - } - - @Test - public void should_serialize_and_deserialize() { - DefaultTupleType type = - new DefaultTupleType(ImmutableList.of(DataTypes.INT, DataTypes.TEXT), attachmentPoint); - TupleValue in = type.newValue(); - in = in.setBytesUnsafe(0, Bytes.fromHexString("0x00000001")); - in = in.setBytesUnsafe(1, Bytes.fromHexString("0x61")); - - TupleValue out = SerializationHelper.serializeAndDeserialize(in); - - assertThat(out.getType()).isEqualTo(in.getType()); - assertThat(out.getType().isDetached()).isTrue(); - assertThat(Bytes.toHexString(out.getBytesUnsafe(0))).isEqualTo("0x00000001"); - assertThat(Bytes.toHexString(out.getBytesUnsafe(1))).isEqualTo("0x61"); - } - - @Test - public void should_support_null_items_when_setting_in_bulk() { - DefaultTupleType type = - new DefaultTupleType(ImmutableList.of(DataTypes.INT, DataTypes.TEXT), attachmentPoint); - when(codecRegistry.codecFor(DataTypes.INT)).thenReturn(TypeCodecs.INT); - when(codecRegistry.codecFor(DataTypes.TEXT, "foo")).thenReturn(TypeCodecs.TEXT); - TupleValue value = type.newValue(null, "foo"); - - assertThat(value.isNull(0)).isTrue(); - assertThat(value.getString(1)).isEqualTo("foo"); - } - - @Test - public void should_equate_instances_with_same_values_but_different_binary_representations() { - TupleType tupleType = DataTypes.tupleOf(DataTypes.VARINT); - - TupleValue tuple1 = tupleType.newValue().setBytesUnsafe(0, Bytes.fromHexString("0x01")); - TupleValue tuple2 = tupleType.newValue().setBytesUnsafe(0, Bytes.fromHexString("0x0001")); - - assertThat(tuple1).isEqualTo(tuple2); - assertThat(tuple1.hashCode()).isEqualTo(tuple2.hashCode()); - } - - @Test - public void should_not_equate_instances_with_same_binary_representation_but_different_types() { - TupleType tupleType1 = DataTypes.tupleOf(DataTypes.INT); - TupleType tupleType2 = DataTypes.tupleOf(DataTypes.VARINT); - - TupleValue tuple1 = tupleType1.newValue().setBytesUnsafe(0, Bytes.fromHexString("0x00000001")); - TupleValue tuple2 = tupleType2.newValue().setBytesUnsafe(0, Bytes.fromHexString("0x00000001")); - - assertThat(tuple1).isNotEqualTo(tuple2); - } - - @Test - public void should_equate_instances_with_different_protocol_versions() { - TupleType tupleType1 = DataTypes.tupleOf(DataTypes.TEXT); - tupleType1.attach(attachmentPoint); - - // use the V3 attachmentPoint for type2 - TupleType tupleType2 = DataTypes.tupleOf(DataTypes.TEXT); - tupleType2.attach(v3AttachmentPoint); - - TupleValue tuple1 = tupleType1.newValue().setBytesUnsafe(0, Bytes.fromHexString("0x01")); - TupleValue tuple2 = tupleType2.newValue().setBytesUnsafe(0, Bytes.fromHexString("0x01")); - - assertThat(tuple1).isEqualTo(tuple2); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/data/DefaultUdtValueTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/data/DefaultUdtValueTest.java deleted file mode 100644 index 6a9f2886783..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/data/DefaultUdtValueTest.java +++ /dev/null @@ -1,158 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.data; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.internal.SerializationHelper; -import com.datastax.oss.driver.internal.core.type.UserDefinedTypeBuilder; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.io.UnsupportedEncodingException; -import java.util.List; -import org.junit.Test; - -public class DefaultUdtValueTest extends AccessibleByIdTestBase { - - @Override - protected UdtValue newInstance(List dataTypes, AttachmentPoint attachmentPoint) { - UserDefinedTypeBuilder builder = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("ks"), CqlIdentifier.fromInternal("type")); - for (int i = 0; i < dataTypes.size(); i++) { - builder.withField(CqlIdentifier.fromInternal("field" + i), dataTypes.get(i)); - } - UserDefinedType userDefinedType = builder.build(); - userDefinedType.attach(attachmentPoint); - return userDefinedType.newValue(); - } - - @Override - protected UdtValue newInstance( - List dataTypes, List values, AttachmentPoint attachmentPoint) { - UserDefinedTypeBuilder builder = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("ks"), CqlIdentifier.fromInternal("type")); - for (int i = 0; i < dataTypes.size(); i++) { - builder.withField(CqlIdentifier.fromInternal("field" + i), dataTypes.get(i)); - } - UserDefinedType userDefinedType = builder.build(); - userDefinedType.attach(attachmentPoint); - return userDefinedType.newValue(values.toArray()); - } - - @Test - public void should_serialize_and_deserialize() { - UserDefinedType type = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("ks"), CqlIdentifier.fromInternal("type")) - .withField(CqlIdentifier.fromInternal("field1"), DataTypes.INT) - .withField(CqlIdentifier.fromInternal("field2"), DataTypes.TEXT) - .build(); - UdtValue in = type.newValue(); - in = in.setBytesUnsafe(0, Bytes.fromHexString("0x00000001")); - in = in.setBytesUnsafe(1, Bytes.fromHexString("0x61")); - - UdtValue out = SerializationHelper.serializeAndDeserialize(in); - - assertThat(out.getType()).isEqualTo(in.getType()); - assertThat(out.getType().isDetached()).isTrue(); - assertThat(Bytes.toHexString(out.getBytesUnsafe(0))).isEqualTo("0x00000001"); - assertThat(Bytes.toHexString(out.getBytesUnsafe(1))).isEqualTo("0x61"); - } - - @Test - public void should_support_null_items_when_setting_in_bulk() throws UnsupportedEncodingException { - UserDefinedType type = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("ks"), CqlIdentifier.fromInternal("type")) - .withField(CqlIdentifier.fromInternal("field1"), DataTypes.INT) - .withField(CqlIdentifier.fromInternal("field2"), DataTypes.TEXT) - .build(); - when(codecRegistry.codecFor(DataTypes.INT)).thenReturn(TypeCodecs.INT); - when(codecRegistry.codecFor(DataTypes.TEXT, "foo")).thenReturn(TypeCodecs.TEXT); - UdtValue value = type.newValue(null, "foo"); - - assertThat(value.isNull(0)).isTrue(); - assertThat(value.getString(1)).isEqualTo("foo"); - } - - @Test - public void should_equate_instances_with_same_values_but_different_binary_representations() { - UserDefinedType type = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("ks"), CqlIdentifier.fromInternal("type")) - .withField(CqlIdentifier.fromInternal("f"), DataTypes.VARINT) - .build(); - - UdtValue udt1 = type.newValue().setBytesUnsafe(0, Bytes.fromHexString("0x01")); - UdtValue udt2 = type.newValue().setBytesUnsafe(0, Bytes.fromHexString("0x0001")); - - assertThat(udt1).isEqualTo(udt2); - } - - @Test - public void should_format_to_string() { - UserDefinedType type = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("ks"), CqlIdentifier.fromInternal("type")) - .withField(CqlIdentifier.fromInternal("t"), DataTypes.TEXT) - .withField(CqlIdentifier.fromInternal("i"), DataTypes.INT) - .withField(CqlIdentifier.fromInternal("d"), DataTypes.DOUBLE) - .build(); - - UdtValue udt = type.newValue().setString("t", "foobar").setDouble("d", 3.14); - - assertThat(udt.getFormattedContents()).isEqualTo("{t:'foobar',i:NULL,d:3.14}"); - } - - @Test - public void should_equate_instances_with_different_protocol_versions() { - - UserDefinedType type1 = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("ks"), CqlIdentifier.fromInternal("type")) - .withField(CqlIdentifier.fromInternal("t"), DataTypes.TEXT) - .withField(CqlIdentifier.fromInternal("i"), DataTypes.INT) - .withField(CqlIdentifier.fromInternal("d"), DataTypes.DOUBLE) - .build(); - type1.attach(attachmentPoint); - - // create an idential type, but with a different attachment point - UserDefinedType type2 = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("ks"), CqlIdentifier.fromInternal("type")) - .withField(CqlIdentifier.fromInternal("t"), DataTypes.TEXT) - .withField(CqlIdentifier.fromInternal("i"), DataTypes.INT) - .withField(CqlIdentifier.fromInternal("d"), DataTypes.DOUBLE) - .build(); - type2.attach(v3AttachmentPoint); - UdtValue udt1 = - type1.newValue().setString("t", "some text string").setInt("i", 42).setDouble("d", 3.14); - UdtValue udt2 = - type2.newValue().setString("t", "some text string").setInt("i", 42).setDouble("d", 3.14); - assertThat(udt1).isEqualTo(udt2); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/data/IdentifierIndexTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/data/IdentifierIndexTest.java deleted file mode 100644 index 697a32fb029..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/data/IdentifierIndexTest.java +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.data; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.TestDataProviders; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.Locale; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class IdentifierIndexTest { - private static final CqlIdentifier Foo = CqlIdentifier.fromInternal("Foo"); - private static final CqlIdentifier foo = CqlIdentifier.fromInternal("foo"); - private static final CqlIdentifier fOO = CqlIdentifier.fromInternal("fOO"); - private IdentifierIndex index = - new IdentifierIndex(ImmutableList.of(Foo, foo, fOO, Foo, foo, fOO)); - - @Test - public void should_find_first_index_of_existing_identifier() { - assertThat(index.firstIndexOf(Foo)).isEqualTo(0); - assertThat(index.firstIndexOf(foo)).isEqualTo(1); - assertThat(index.firstIndexOf(fOO)).isEqualTo(2); - } - - @Test - public void should_not_find_index_of_nonexistent_identifier() { - assertThat(index.firstIndexOf(CqlIdentifier.fromInternal("FOO"))).isEqualTo(-1); - } - - @Test - @UseDataProvider(location = TestDataProviders.class, value = "locales") - public void should_find_first_index_of_case_insensitive_name(Locale locale) { - Locale def = Locale.getDefault(); - try { - Locale.setDefault(locale); - assertThat(index.firstIndexOf("foo")).isEqualTo(0); - assertThat(index.firstIndexOf("FOO")).isEqualTo(0); - assertThat(index.firstIndexOf("fOO")).isEqualTo(0); - } finally { - Locale.setDefault(def); - } - } - - @Test - @UseDataProvider(location = TestDataProviders.class, value = "locales") - public void should_not_find_first_index_of_nonexistent_case_insensitive_name(Locale locale) { - Locale def = Locale.getDefault(); - try { - Locale.setDefault(locale); - assertThat(index.firstIndexOf("bar")).isEqualTo(-1); - assertThat(index.firstIndexOf("BAR")).isEqualTo(-1); - assertThat(index.firstIndexOf("bAR")).isEqualTo(-1); - } finally { - Locale.setDefault(def); - } - } - - @Test - public void should_find_first_index_of_case_sensitive_name() { - assertThat(index.firstIndexOf("\"Foo\"")).isEqualTo(0); - assertThat(index.firstIndexOf("\"foo\"")).isEqualTo(1); - assertThat(index.firstIndexOf("\"fOO\"")).isEqualTo(2); - } - - @Test - public void should_not_find_index_of_nonexistent_case_sensitive_name() { - assertThat(index.firstIndexOf("\"FOO\"")).isEqualTo(-1); - } - - @Test - public void should_find_all_indices_of_existing_identifier() { - assertThat(index.allIndicesOf(Foo)).containsExactly(0, 3); - assertThat(index.allIndicesOf(foo)).containsExactly(1, 4); - assertThat(index.allIndicesOf(fOO)).containsExactly(2, 5); - } - - @Test - public void should_not_find_indices_of_nonexistent_identifier() { - assertThat(index.allIndicesOf(CqlIdentifier.fromInternal("FOO"))).isEmpty(); - } - - @Test - @UseDataProvider(location = TestDataProviders.class, value = "locales") - public void should_find_all_indices_of_case_insensitive_name(Locale locale) { - Locale def = Locale.getDefault(); - try { - Locale.setDefault(locale); - assertThat(index.allIndicesOf("foo")).containsExactly(0, 1, 2, 3, 4, 5); - assertThat(index.allIndicesOf("FOO")).containsExactly(0, 1, 2, 3, 4, 5); - assertThat(index.allIndicesOf("fOO")).containsExactly(0, 1, 2, 3, 4, 5); - } finally { - Locale.setDefault(def); - } - } - - @Test - @UseDataProvider(location = TestDataProviders.class, value = "locales") - public void should_not_find_indices_of_nonexistent_case_insensitive_name(Locale locale) { - Locale def = Locale.getDefault(); - try { - Locale.setDefault(locale); - assertThat(index.allIndicesOf("bar")).isEmpty(); - assertThat(index.allIndicesOf("BAR")).isEmpty(); - assertThat(index.allIndicesOf("bAR")).isEmpty(); - } finally { - Locale.setDefault(def); - } - } - - @Test - public void should_find_all_indices_of_case_sensitive_name() { - assertThat(index.allIndicesOf("\"Foo\"")).containsExactly(0, 3); - assertThat(index.allIndicesOf("\"foo\"")).containsExactly(1, 4); - assertThat(index.allIndicesOf("\"fOO\"")).containsExactly(2, 5); - } - - @Test - public void should_not_find_indices_of_nonexistent_case_sensitive_name() { - assertThat(index.allIndicesOf("\"FOO\"")).isEmpty(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyDcAgnosticTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyDcAgnosticTest.java deleted file mode 100644 index 3c832812662..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyDcAgnosticTest.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import java.util.Optional; -import org.junit.Before; -import org.junit.runner.RunWith; -import org.mockito.junit.MockitoJUnitRunner; - -// TODO fix unnecessary stubbing of config option in parent class (and stop using "silent" runner) -@RunWith(MockitoJUnitRunner.Silent.class) -public class BasicLoadBalancingPolicyDcAgnosticTest extends BasicLoadBalancingPolicyQueryPlanTest { - - @Before - @Override - public void setup() { - super.setup(); - when(defaultProfile.isDefined(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)) - .thenReturn(false); - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1)); - when(metadataManager.getMetadata()).thenReturn(metadata); - when(metadata.getTokenMap()).thenAnswer(invocation -> Optional.of(this.tokenMap)); - - // since there is no local datacenter defined, the policy should behave with DC awareness - // disabled and pick nodes regardless of their datacenters; we therefore expect all tests of - // BasicLoadBalancingPolicyQueryPlanTest to pass even with the below DC distribution. - when(node1.getDatacenter()).thenReturn("dc1"); - when(node2.getDatacenter()).thenReturn("dc2"); - when(node3.getDatacenter()).thenReturn("dc3"); - when(node4.getDatacenter()).thenReturn("dc4"); - when(node5.getDatacenter()).thenReturn(null); - - policy = createAndInitPolicy(); - - assertThat(policy.getLiveNodes().dc(null)).containsExactly(node1, node2, node3, node4, node5); - assertThat(policy.getLiveNodes().dcs()).isEmpty(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyDcFailoverTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyDcFailoverTest.java deleted file mode 100644 index dc955c6e5de..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyDcFailoverTest.java +++ /dev/null @@ -1,155 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.atLeast; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.metadata.DefaultNode; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import java.util.Map; -import java.util.UUID; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -// TODO fix unnecessary stubbing of config option in parent class (and stop using "silent" runner) -@RunWith(MockitoJUnitRunner.Silent.class) -public class BasicLoadBalancingPolicyDcFailoverTest extends BasicLoadBalancingPolicyQueryPlanTest { - - @Mock protected DefaultNode node6; - @Mock protected DefaultNode node7; - @Mock protected DefaultNode node8; - @Mock protected DefaultNode node9; - - @Test - @Override - public void should_prioritize_single_replica() { - when(request.getRoutingKeyspace()).thenReturn(KEYSPACE); - when(request.getRoutingKey()).thenReturn(ROUTING_KEY); - when(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)).thenReturn(ImmutableSet.of(node3)); - - // node3 always first, round-robin on the rest, then remote nodes - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node3, node1, node2, node4, node5, node7, node8); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node3, node2, node1, node4, node5, node7, node8); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node3, node1, node2, node4, node5, node7, node8); - - // Should not shuffle replicas since there is only one - verify(policy, never()).shuffleHead(any(), eq(1)); - // But should shuffle remote nodes - verify(policy, times(3)).shuffleHead(any(), eq(4)); - } - - @Test - @Override - public void should_prioritize_and_shuffle_replicas() { - when(request.getRoutingKeyspace()).thenReturn(KEYSPACE); - when(request.getRoutingKey()).thenReturn(ROUTING_KEY); - when(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)) - .thenReturn(ImmutableSet.of(node2, node3, node5, node8)); - - // node 5 and 8 being in a remote DC, they don't get a boost for being a replica - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node2, node3, node1, node4, node5, node7, node8); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node2, node3, node1, node4, node5, node7, node8); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node2, node3, node1, node4, node5, node7, node8); - - // should shuffle replicas - verify(policy, times(3)).shuffleHead(any(), eq(2)); - // should shuffle remote nodes - verify(policy, times(3)).shuffleHead(any(), eq(4)); - // No power of two choices with only two replicas - verify(session, never()).getPools(); - } - - @Override - protected void assertRoundRobinQueryPlans() { - // nodes 4 to 9 being in a remote DC, they always appear after nodes 1, 2, 3 - for (int i = 0; i < 3; i++) { - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node1, node2, node3, node4, node5, node7, node8); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node2, node3, node1, node4, node5, node7, node8); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node3, node1, node2, node4, node5, node7, node8); - } - // should shuffle remote nodes - verify(policy, atLeast(1)).shuffleHead(any(), eq(4)); - } - - @Override - protected BasicLoadBalancingPolicy createAndInitPolicy() { - when(node4.getDatacenter()).thenReturn("dc2"); - when(node5.getDatacenter()).thenReturn("dc2"); - when(node6.getDatacenter()).thenReturn("dc2"); - when(node7.getDatacenter()).thenReturn("dc3"); - when(node8.getDatacenter()).thenReturn("dc3"); - when(node9.getDatacenter()).thenReturn("dc3"); - // Accept 2 nodes per remote DC - when(defaultProfile.getInt( - DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_MAX_NODES_PER_REMOTE_DC)) - .thenReturn(2); - when(defaultProfile.getBoolean( - DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_ALLOW_FOR_LOCAL_CONSISTENCY_LEVELS)) - .thenReturn(false); - // Use a subclass to disable shuffling, we just spy to make sure that the shuffling method was - // called (makes tests easier) - BasicLoadBalancingPolicy policy = - spy( - new BasicLoadBalancingPolicy(context, DriverExecutionProfile.DEFAULT_NAME) { - @Override - protected void shuffleHead(Object[] currentNodes, int headLength) { - // nothing (keep in same order) - } - }); - Map nodes = - ImmutableMap.builder() - .put(UUID.randomUUID(), node1) - .put(UUID.randomUUID(), node2) - .put(UUID.randomUUID(), node3) - .put(UUID.randomUUID(), node4) - .put(UUID.randomUUID(), node5) - .put(UUID.randomUUID(), node6) - .put(UUID.randomUUID(), node7) - .put(UUID.randomUUID(), node8) - .put(UUID.randomUUID(), node9) - .build(); - policy.init(nodes, distanceReporter); - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1, node2, node3); - assertThat(policy.getLiveNodes().dc("dc2")).containsExactly(node4, node5); // only 2 allowed - assertThat(policy.getLiveNodes().dc("dc3")).containsExactly(node7, node8); // only 2 allowed - return policy; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyDistanceTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyDistanceTest.java deleted file mode 100644 index 5b2b6bf864d..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyDistanceTest.java +++ /dev/null @@ -1,224 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.BDDMockito.given; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistanceEvaluator; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.UUID; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -// TODO fix unnecessary stubbing of config option in parent class (and stop using "silent" runner) -@RunWith(MockitoJUnitRunner.Silent.class) -public class BasicLoadBalancingPolicyDistanceTest extends LoadBalancingPolicyTestBase { - - @Mock private NodeDistanceEvaluator nodeDistanceEvaluator; - - private ImmutableMap nodes; - - @Before - @Override - public void setup() { - super.setup(); - when(context.getNodeDistanceEvaluator(DriverExecutionProfile.DEFAULT_NAME)) - .thenReturn(nodeDistanceEvaluator); - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1, node2, node3)); - nodes = - ImmutableMap.of( - UUID.randomUUID(), node1, UUID.randomUUID(), node2, UUID.randomUUID(), node3); - } - - @Test - public void should_report_distance_reported_by_user_distance_reporter() { - // Given - given(node2.getDatacenter()).willReturn("dc2"); - given(nodeDistanceEvaluator.evaluateDistance(node1, "dc1")).willReturn(NodeDistance.LOCAL); - given(nodeDistanceEvaluator.evaluateDistance(node2, "dc1")).willReturn(NodeDistance.REMOTE); - given(nodeDistanceEvaluator.evaluateDistance(node3, "dc1")).willReturn(NodeDistance.IGNORED); - BasicLoadBalancingPolicy policy = createPolicy(); - // When - policy.init(nodes, distanceReporter); - // Then - verify(distanceReporter).setDistance(node1, NodeDistance.LOCAL); - verify(distanceReporter).setDistance(node2, NodeDistance.REMOTE); - verify(distanceReporter).setDistance(node3, NodeDistance.IGNORED); - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1); - } - - @Test - public void should_report_LOCAL_when_dc_agnostic() { - // Given - given(defaultProfile.isDefined(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)) - .willReturn(false); - given(node1.getDatacenter()).willReturn(null); - given(node2.getDatacenter()).willReturn("dc1"); - given(node3.getDatacenter()).willReturn("dc2"); - BasicLoadBalancingPolicy policy = createPolicy(); - // When - policy.init(nodes, distanceReporter); - // Then - verify(distanceReporter).setDistance(node1, NodeDistance.LOCAL); - verify(distanceReporter).setDistance(node2, NodeDistance.LOCAL); - verify(distanceReporter).setDistance(node3, NodeDistance.LOCAL); - assertThat(policy.getLiveNodes().dc(null)).containsExactly(node1, node2, node3); - } - - @Test - public void should_report_LOCAL_when_node_in_local_dc() { - // Given - BasicLoadBalancingPolicy policy = createPolicy(); - // When - policy.init(nodes, distanceReporter); - // Then - verify(distanceReporter).setDistance(node1, NodeDistance.LOCAL); - verify(distanceReporter).setDistance(node2, NodeDistance.LOCAL); - verify(distanceReporter).setDistance(node3, NodeDistance.LOCAL); - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1, node2, node3); - } - - @Test - public void should_report_IGNORED_when_node_not_in_local_dc() { - // Given - given(node1.getDatacenter()).willReturn(null); - given(node2.getDatacenter()).willReturn("dc2"); - given(node3.getDatacenter()).willReturn("dc3"); - BasicLoadBalancingPolicy policy = createPolicy(); - // When - policy.init(nodes, distanceReporter); - // Then - // Note: driver 3 would have reported LOCAL for node1 since its datacenter is null - verify(distanceReporter).setDistance(node1, NodeDistance.IGNORED); - verify(distanceReporter).setDistance(node2, NodeDistance.IGNORED); - verify(distanceReporter).setDistance(node3, NodeDistance.IGNORED); - assertThat(policy.getLiveNodes().dc(null)).isEmpty(); - assertThat(policy.getLiveNodes().dc("dc1")).isEmpty(); - assertThat(policy.getLiveNodes().dc("dc2")).isEmpty(); - assertThat(policy.getLiveNodes().dc("dc3")).isEmpty(); - } - - @Test - public void should_report_REMOTE_when_node_not_in_local_dc_and_dc_failover_enabled() { - // Given - given(node1.getDatacenter()).willReturn("dc2"); - given(node2.getDatacenter()).willReturn("dc3"); - given(node3.getDatacenter()).willReturn("dc4"); - given( - defaultProfile.getInt( - DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_MAX_NODES_PER_REMOTE_DC)) - .willReturn(1); - BasicLoadBalancingPolicy policy = createPolicy(); - // When - policy.init(nodes, distanceReporter); - // Then - verify(distanceReporter).setDistance(node1, NodeDistance.REMOTE); - verify(distanceReporter).setDistance(node2, NodeDistance.REMOTE); - verify(distanceReporter).setDistance(node3, NodeDistance.REMOTE); - assertThat(policy.getLiveNodes().dc("dc1")).isEmpty(); - assertThat(policy.getLiveNodes().dc("dc2")).containsExactly(node1); - assertThat(policy.getLiveNodes().dc("dc3")).containsExactly(node2); - assertThat(policy.getLiveNodes().dc("dc4")).containsExactly(node3); - } - - @Test - public void should_report_IGNORED_when_node_not_in_local_dc_and_too_many_nodes_for_dc_failover() { - // Given - given(node1.getDatacenter()).willReturn("dc2"); - given(node2.getDatacenter()).willReturn("dc2"); - given(node3.getDatacenter()).willReturn("dc2"); - given( - defaultProfile.getInt( - DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_MAX_NODES_PER_REMOTE_DC)) - .willReturn(2); - BasicLoadBalancingPolicy policy = createPolicy(); - // When - policy.init(nodes, distanceReporter); - // Then - verify(distanceReporter).setDistance(node1, NodeDistance.REMOTE); - verify(distanceReporter).setDistance(node2, NodeDistance.REMOTE); - verify(distanceReporter).setDistance(node3, NodeDistance.IGNORED); - assertThat(policy.getLiveNodes().dc("dc1")).isEmpty(); - assertThat(policy.getLiveNodes().dc("dc2")).containsExactly(node1, node2); - } - - @Test - public void should_report_REMOTE_when_remote_node_up_and_dc_failover() { - // Given - given(node1.getDatacenter()).willReturn("dc2"); - given(node2.getDatacenter()).willReturn("dc2"); - given(node3.getDatacenter()).willReturn("dc2"); - given(node4.getDatacenter()).willReturn("dc2"); - given( - defaultProfile.getInt( - DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_MAX_NODES_PER_REMOTE_DC)) - .willReturn(4); - BasicLoadBalancingPolicy policy = createPolicy(); - // When - policy.init(nodes, distanceReporter); - policy.onUp(node4); - // Then - verify(distanceReporter).setDistance(node1, NodeDistance.REMOTE); - verify(distanceReporter).setDistance(node2, NodeDistance.REMOTE); - verify(distanceReporter).setDistance(node3, NodeDistance.REMOTE); - verify(distanceReporter).setDistance(node4, NodeDistance.REMOTE); - assertThat(policy.getLiveNodes().dc("dc1")).isEmpty(); - assertThat(policy.getLiveNodes().dc("dc2")).containsExactly(node1, node2, node3, node4); - } - - @Test - public void should_report_IGNORED_when_remote_node_up_and_too_many_nodes_for_dc_failover() { - // Given - given(node1.getDatacenter()).willReturn("dc2"); - given(node2.getDatacenter()).willReturn("dc2"); - given(node3.getDatacenter()).willReturn("dc2"); - given(node4.getDatacenter()).willReturn("dc2"); - given( - defaultProfile.getInt( - DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_MAX_NODES_PER_REMOTE_DC)) - .willReturn(3); - BasicLoadBalancingPolicy policy = createPolicy(); - // When - policy.init(nodes, distanceReporter); - policy.onUp(node4); - // Then - verify(distanceReporter).setDistance(node1, NodeDistance.REMOTE); - verify(distanceReporter).setDistance(node2, NodeDistance.REMOTE); - verify(distanceReporter).setDistance(node3, NodeDistance.REMOTE); - verify(distanceReporter).setDistance(node4, NodeDistance.IGNORED); - assertThat(policy.getLiveNodes().dc("dc1")).isEmpty(); - assertThat(policy.getLiveNodes().dc("dc2")).containsExactly(node1, node2, node3); - } - - @NonNull - protected BasicLoadBalancingPolicy createPolicy() { - return new BasicLoadBalancingPolicy(context, DriverExecutionProfile.DEFAULT_NAME); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyEventsTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyEventsTest.java deleted file mode 100644 index 9959ddbd1bc..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyEventsTest.java +++ /dev/null @@ -1,171 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.reset; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistanceEvaluator; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.UUID; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -// TODO fix unnecessary stubbing of config option in parent class (and stop using "silent" runner) -@RunWith(MockitoJUnitRunner.Silent.class) -public class BasicLoadBalancingPolicyEventsTest extends LoadBalancingPolicyTestBase { - - @Mock private NodeDistanceEvaluator nodeDistanceEvaluator; - - private BasicLoadBalancingPolicy policy; - - @Before - @Override - public void setup() { - super.setup(); - when(context.getNodeDistanceEvaluator(DriverExecutionProfile.DEFAULT_NAME)) - .thenReturn(nodeDistanceEvaluator); - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1)); - policy = createAndInitPolicy(); - reset(distanceReporter); - } - - @Test - public void should_remove_down_node_from_live_set() { - // When - policy.onDown(node2); - - // Then - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1); - verify(distanceReporter, never()).setDistance(eq(node2), any(NodeDistance.class)); - // should have been called only once, during initialization, but not during onDown - verify(nodeDistanceEvaluator).evaluateDistance(node2, "dc1"); - } - - @Test - public void should_remove_removed_node_from_live_set() { - // When - policy.onRemove(node2); - - // Then - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1); - verify(distanceReporter, never()).setDistance(eq(node2), any(NodeDistance.class)); - // should have been called only once, during initialization, but not during onRemove - verify(nodeDistanceEvaluator).evaluateDistance(node2, "dc1"); - } - - @Test - public void should_set_added_node_to_local() { - // When - policy.onAdd(node3); - - // Then - verify(distanceReporter).setDistance(node3, NodeDistance.LOCAL); - verify(nodeDistanceEvaluator).evaluateDistance(node3, "dc1"); - // Not added to the live set yet, we're waiting for the pool to open - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1, node2); - } - - @Test - public void should_ignore_added_node_when_filtered() { - // Given - when(nodeDistanceEvaluator.evaluateDistance(node3, "dc1")).thenReturn(NodeDistance.IGNORED); - - // When - policy.onAdd(node3); - - // Then - verify(distanceReporter).setDistance(node3, NodeDistance.IGNORED); - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1, node2); - } - - @Test - public void should_ignore_added_node_when_remote_dc() { - // Given - when(node3.getDatacenter()).thenReturn("dc2"); - - // When - policy.onAdd(node3); - - // Then - verify(distanceReporter).setDistance(node3, NodeDistance.IGNORED); - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1, node2); - assertThat(policy.getLiveNodes().dc("dc2")).isEmpty(); - } - - @Test - public void should_add_up_node_to_live_set() { - // When - policy.onUp(node3); - - // Then - verify(distanceReporter).setDistance(node3, NodeDistance.LOCAL); - verify(nodeDistanceEvaluator).evaluateDistance(node3, "dc1"); - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1, node2, node3); - } - - @Test - public void should_ignore_up_node_when_filtered() { - // Given - when(nodeDistanceEvaluator.evaluateDistance(node3, "dc1")).thenReturn(NodeDistance.IGNORED); - - // When - policy.onUp(node3); - - // Then - verify(distanceReporter).setDistance(node3, NodeDistance.IGNORED); - verify(nodeDistanceEvaluator).evaluateDistance(node3, "dc1"); - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1, node2); - } - - @Test - public void should_ignore_up_node_when_remote_dc() { - // Given - when(node3.getDatacenter()).thenReturn("dc2"); - - // When - policy.onUp(node3); - - // Then - verify(distanceReporter).setDistance(node3, NodeDistance.IGNORED); - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1, node2); - assertThat(policy.getLiveNodes().dc("dc2")).isEmpty(); - } - - @NonNull - protected BasicLoadBalancingPolicy createAndInitPolicy() { - BasicLoadBalancingPolicy policy = - new BasicLoadBalancingPolicy(context, DriverExecutionProfile.DEFAULT_NAME); - policy.init( - ImmutableMap.of(UUID.randomUUID(), node1, UUID.randomUUID(), node2), distanceReporter); - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1, node2); - return policy; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyInitTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyInitTest.java deleted file mode 100644 index 1863e7357e1..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyInitTest.java +++ /dev/null @@ -1,225 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.filter; -import static org.mockito.Mockito.atLeast; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import ch.qos.logback.classic.Level; -import ch.qos.logback.classic.spi.ILoggingEvent; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.metadata.NodeState; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.UUID; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.junit.MockitoJUnitRunner; - -// TODO fix unnecessary stubbing of config option in parent class (and stop using "silent" runner) -@RunWith(MockitoJUnitRunner.Silent.class) -public class BasicLoadBalancingPolicyInitTest extends LoadBalancingPolicyTestBase { - - @Test - public void should_use_local_dc_if_provided_via_config() { - // Given - when(defaultProfile.isDefined(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)) - .thenReturn(true); - when(defaultProfile.getString(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)) - .thenReturn("dc1"); - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1)); - // the parent class sets the config option to "dc1" - BasicLoadBalancingPolicy policy = createPolicy(); - - // When - policy.init(ImmutableMap.of(UUID.randomUUID(), node1), distanceReporter); - - // Then - assertThat(policy.getLocalDatacenter()).isEqualTo("dc1"); - } - - @Test - public void should_use_local_dc_if_provided_via_context() { - // Given - when(context.getLocalDatacenter(DriverExecutionProfile.DEFAULT_NAME)).thenReturn("dc1"); - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1)); - // note: programmatic takes priority, the config won't even be inspected so no need to stub the - // option to null - BasicLoadBalancingPolicy policy = createPolicy(); - - // When - policy.init(ImmutableMap.of(UUID.randomUUID(), node1), distanceReporter); - - // Then - assertThat(policy.getLocalDatacenter()).isEqualTo("dc1"); - verify(defaultProfile, never()) - .getString(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER, null); - } - - @Test - public void should_not_infer_local_dc_if_not_provided() { - // Given - when(defaultProfile.isDefined(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)) - .thenReturn(false); - when(node1.getDatacenter()).thenReturn("dc1"); - when(node2.getDatacenter()).thenReturn("dc2"); - when(node3.getDatacenter()).thenReturn("dc3"); - BasicLoadBalancingPolicy policy = - new BasicLoadBalancingPolicy(context, DriverExecutionProfile.DEFAULT_NAME) {}; - - // When - policy.init( - ImmutableMap.of( - UUID.randomUUID(), node1, UUID.randomUUID(), node2, UUID.randomUUID(), node3), - distanceReporter); - - // Then - assertThat(policy.getLocalDatacenter()).isNull(); - // should not warn about contact points not being in the same DC - verify(appender, never()).doAppend(loggingEventCaptor.capture()); - } - - @Test - public void should_warn_if_contact_points_not_in_local_dc() { - // Given - when(context.getLocalDatacenter(DriverExecutionProfile.DEFAULT_NAME)).thenReturn("dc1"); - when(node2.getDatacenter()).thenReturn("dc2"); - when(node3.getDatacenter()).thenReturn("dc3"); - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1, node2, node3)); - BasicLoadBalancingPolicy policy = createPolicy(); - - // When - policy.init( - ImmutableMap.of( - UUID.randomUUID(), node1, UUID.randomUUID(), node2, UUID.randomUUID(), node3), - distanceReporter); - - // Then - verify(appender, atLeast(1)).doAppend(loggingEventCaptor.capture()); - Iterable warnLogs = - filter(loggingEventCaptor.getAllValues()).with("level", Level.WARN).get(); - assertThat(warnLogs).hasSize(1); - assertThat(warnLogs.iterator().next().getFormattedMessage()) - .contains( - "You specified dc1 as the local DC, but some contact points are from a different DC") - .contains("node2=dc2") - .contains("node3=dc3"); - } - - @Test - public void should_include_nodes_from_local_dc_if_local_dc_set() { - // Given - when(context.getLocalDatacenter(DriverExecutionProfile.DEFAULT_NAME)).thenReturn("dc1"); - when(node1.getState()).thenReturn(NodeState.UP); - when(node2.getState()).thenReturn(NodeState.DOWN); - when(node3.getState()).thenReturn(NodeState.UNKNOWN); - BasicLoadBalancingPolicy policy = createPolicy(); - - // When - policy.init( - ImmutableMap.of( - UUID.randomUUID(), node1, UUID.randomUUID(), node2, UUID.randomUUID(), node3), - distanceReporter); - - // Then - // Set distance for all nodes in the local DC - verify(distanceReporter).setDistance(node1, NodeDistance.LOCAL); - verify(distanceReporter).setDistance(node2, NodeDistance.LOCAL); - verify(distanceReporter).setDistance(node3, NodeDistance.LOCAL); - // But only include UP or UNKNOWN nodes in the live set - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1, node3); - } - - @Test - public void should_ignore_nodes_from_remote_dcs_if_local_dc_set() { - // Given - when(node2.getDatacenter()).thenReturn("dc2"); - when(node3.getDatacenter()).thenReturn("dc3"); - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1, node2)); - BasicLoadBalancingPolicy policy = createPolicy(); - - // When - policy.init( - ImmutableMap.of( - UUID.randomUUID(), node1, UUID.randomUUID(), node2, UUID.randomUUID(), node3), - distanceReporter); - - // Then - verify(distanceReporter).setDistance(node1, NodeDistance.LOCAL); - verify(distanceReporter).setDistance(node2, NodeDistance.IGNORED); - verify(distanceReporter).setDistance(node3, NodeDistance.IGNORED); - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1); - } - - @Test - public void should_not_ignore_nodes_from_remote_dcs_if_local_dc_not_set() { - // Given - when(defaultProfile.isDefined(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)) - .thenReturn(false); - when(node2.getDatacenter()).thenReturn("dc2"); - when(node3.getDatacenter()).thenReturn("dc3"); - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1, node2)); - BasicLoadBalancingPolicy policy = createPolicy(); - - // When - policy.init( - ImmutableMap.of( - UUID.randomUUID(), node1, UUID.randomUUID(), node2, UUID.randomUUID(), node3), - distanceReporter); - - // Then - verify(distanceReporter).setDistance(node1, NodeDistance.LOCAL); - verify(distanceReporter).setDistance(node2, NodeDistance.LOCAL); - verify(distanceReporter).setDistance(node3, NodeDistance.LOCAL); - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1, node2, node3); - } - - @Test - public void should_ignore_nodes_excluded_by_distance_reporter() { - // Given - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1, node2)); - when(context.getNodeDistanceEvaluator(DriverExecutionProfile.DEFAULT_NAME)) - .thenReturn((node, dc) -> node.equals(node1) ? NodeDistance.IGNORED : null); - - BasicLoadBalancingPolicy policy = createPolicy(); - - // When - policy.init( - ImmutableMap.of( - UUID.randomUUID(), node1, UUID.randomUUID(), node2, UUID.randomUUID(), node3), - distanceReporter); - - // Then - verify(distanceReporter).setDistance(node1, NodeDistance.IGNORED); - verify(distanceReporter).setDistance(node2, NodeDistance.LOCAL); - verify(distanceReporter).setDistance(node3, NodeDistance.LOCAL); - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node2, node3); - } - - @NonNull - protected BasicLoadBalancingPolicy createPolicy() { - return new BasicLoadBalancingPolicy(context, DriverExecutionProfile.DEFAULT_NAME); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyPreferredRemoteDcsTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyPreferredRemoteDcsTest.java deleted file mode 100644 index cefdfd31189..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyPreferredRemoteDcsTest.java +++ /dev/null @@ -1,184 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.atLeast; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.metadata.DefaultNode; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import java.util.Map; -import java.util.UUID; -import org.junit.Test; -import org.mockito.Mock; - -public class BasicLoadBalancingPolicyPreferredRemoteDcsTest - extends BasicLoadBalancingPolicyDcFailoverTest { - @Mock protected DefaultNode node10; - @Mock protected DefaultNode node11; - @Mock protected DefaultNode node12; - @Mock protected DefaultNode node13; - @Mock protected DefaultNode node14; - - @Override - @Test - public void should_prioritize_single_replica() { - when(request.getRoutingKeyspace()).thenReturn(KEYSPACE); - when(request.getRoutingKey()).thenReturn(ROUTING_KEY); - when(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)).thenReturn(ImmutableSet.of(node3)); - - // node3 always first, round-robin on the rest - assertThat(policy.newQueryPlan(request, session)) - .containsExactly( - node3, node1, node2, node4, node5, node9, node10, node6, node7, node12, node13); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly( - node3, node2, node4, node5, node1, node9, node10, node6, node7, node12, node13); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly( - node3, node4, node5, node1, node2, node9, node10, node6, node7, node12, node13); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly( - node3, node5, node1, node2, node4, node9, node10, node6, node7, node12, node13); - - // Should not shuffle replicas since there is only one - verify(policy, never()).shuffleHead(any(), eq(1)); - // But should shuffle remote nodes - verify(policy, times(12)).shuffleHead(any(), eq(2)); - } - - @Override - @Test - public void should_prioritize_and_shuffle_replicas() { - when(request.getRoutingKeyspace()).thenReturn(KEYSPACE); - when(request.getRoutingKey()).thenReturn(ROUTING_KEY); - when(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)) - .thenReturn(ImmutableSet.of(node1, node2, node3, node6, node9)); - - // node 6 and 9 being in a remote DC, they don't get a boost for being a replica - assertThat(policy.newQueryPlan(request, session)) - .containsExactly( - node1, node2, node3, node4, node5, node9, node10, node6, node7, node12, node13); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly( - node1, node2, node3, node5, node4, node9, node10, node6, node7, node12, node13); - - // should shuffle replicas - verify(policy, times(2)).shuffleHead(any(), eq(3)); - // should shuffle remote nodes - verify(policy, times(6)).shuffleHead(any(), eq(2)); - // No power of two choices with only two replicas - verify(session, never()).getPools(); - } - - @Override - protected void assertRoundRobinQueryPlans() { - for (int i = 0; i < 3; i++) { - assertThat(policy.newQueryPlan(request, session)) - .containsExactly( - node1, node2, node3, node4, node5, node9, node10, node6, node7, node12, node13); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly( - node2, node3, node4, node5, node1, node9, node10, node6, node7, node12, node13); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly( - node3, node4, node5, node1, node2, node9, node10, node6, node7, node12, node13); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly( - node4, node5, node1, node2, node3, node9, node10, node6, node7, node12, node13); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly( - node5, node1, node2, node3, node4, node9, node10, node6, node7, node12, node13); - } - - verify(policy, atLeast(15)).shuffleHead(any(), eq(2)); - } - - @Override - protected BasicLoadBalancingPolicy createAndInitPolicy() { - when(node4.getDatacenter()).thenReturn("dc1"); - when(node5.getDatacenter()).thenReturn("dc1"); - when(node6.getDatacenter()).thenReturn("dc2"); - when(node7.getDatacenter()).thenReturn("dc2"); - when(node8.getDatacenter()).thenReturn("dc2"); - when(node9.getDatacenter()).thenReturn("dc3"); - when(node10.getDatacenter()).thenReturn("dc3"); - when(node11.getDatacenter()).thenReturn("dc3"); - when(node12.getDatacenter()).thenReturn("dc4"); - when(node13.getDatacenter()).thenReturn("dc4"); - when(node14.getDatacenter()).thenReturn("dc4"); - - // Accept 2 nodes per remote DC - when(defaultProfile.getInt( - DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_MAX_NODES_PER_REMOTE_DC)) - .thenReturn(2); - when(defaultProfile.getBoolean( - DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_ALLOW_FOR_LOCAL_CONSISTENCY_LEVELS)) - .thenReturn(false); - - when(defaultProfile.getStringList( - DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_PREFERRED_REMOTE_DCS)) - .thenReturn(ImmutableList.of("dc3", "dc2")); - - // Use a subclass to disable shuffling, we just spy to make sure that the shuffling method was - // called (makes tests easier) - BasicLoadBalancingPolicy policy = - spy( - new BasicLoadBalancingPolicy(context, DriverExecutionProfile.DEFAULT_NAME) { - @Override - protected void shuffleHead(Object[] currentNodes, int headLength) { - // nothing (keep in same order) - } - }); - Map nodes = - ImmutableMap.builder() - .put(UUID.randomUUID(), node1) - .put(UUID.randomUUID(), node2) - .put(UUID.randomUUID(), node3) - .put(UUID.randomUUID(), node4) - .put(UUID.randomUUID(), node5) - .put(UUID.randomUUID(), node6) - .put(UUID.randomUUID(), node7) - .put(UUID.randomUUID(), node8) - .put(UUID.randomUUID(), node9) - .put(UUID.randomUUID(), node10) - .put(UUID.randomUUID(), node11) - .put(UUID.randomUUID(), node12) - .put(UUID.randomUUID(), node13) - .put(UUID.randomUUID(), node14) - .build(); - policy.init(nodes, distanceReporter); - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1, node2, node3, node4, node5); - assertThat(policy.getLiveNodes().dc("dc2")).containsExactly(node6, node7); // only 2 allowed - assertThat(policy.getLiveNodes().dc("dc3")).containsExactly(node9, node10); // only 2 allowed - assertThat(policy.getLiveNodes().dc("dc4")).containsExactly(node12, node13); // only 2 allowed - return policy; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyQueryPlanTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyQueryPlanTest.java deleted file mode 100644 index 50670ab317a..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyQueryPlanTest.java +++ /dev/null @@ -1,272 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing; - -import static java.util.Collections.emptySet; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyInt; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.then; -import static org.mockito.Mockito.atLeast; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Metadata; -import com.datastax.oss.driver.api.core.metadata.TokenMap; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.nio.ByteBuffer; -import java.util.Collections; -import java.util.Optional; -import java.util.UUID; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -// TODO fix unnecessary stubbing of config option in parent class (and stop using "silent" runner) -@RunWith(MockitoJUnitRunner.Silent.class) -public class BasicLoadBalancingPolicyQueryPlanTest extends LoadBalancingPolicyTestBase { - - protected static final CqlIdentifier KEYSPACE = CqlIdentifier.fromInternal("ks"); - protected static final ByteBuffer ROUTING_KEY = Bytes.fromHexString("0xdeadbeef"); - - @Mock protected Request request; - @Mock protected DefaultSession session; - @Mock protected Metadata metadata; - @Mock protected TokenMap tokenMap; - @Mock protected Token routingToken; - - protected BasicLoadBalancingPolicy policy; - - @Before - @Override - public void setup() { - super.setup(); - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1)); - when(metadataManager.getMetadata()).thenReturn(metadata); - when(metadata.getTokenMap()).thenAnswer(invocation -> Optional.of(this.tokenMap)); - - policy = createAndInitPolicy(); - } - - @Test - public void should_use_round_robin_when_no_request() { - // Given - request = null; - - // When - assertRoundRobinQueryPlans(); - - // Then - then(tokenMap).should(never()).getReplicas(any(CqlIdentifier.class), any(Token.class)); - then(tokenMap).should(never()).getReplicas(any(CqlIdentifier.class), any(ByteBuffer.class)); - } - - @Test - public void should_use_round_robin_when_no_session() { - // Given - session = null; - - // When - assertRoundRobinQueryPlans(); - - // Then - then(request).should(never()).getRoutingKey(); - then(request).should(never()).getRoutingToken(); - then(tokenMap).should(never()).getReplicas(any(CqlIdentifier.class), any(Token.class)); - then(tokenMap).should(never()).getReplicas(any(CqlIdentifier.class), any(ByteBuffer.class)); - } - - @Test - public void should_use_round_robin_when_request_has_no_routing_keyspace() { - // By default from Mockito: - assertThat(request.getKeyspace()).isNull(); - assertThat(request.getRoutingKeyspace()).isNull(); - - assertRoundRobinQueryPlans(); - - then(request).should(never()).getRoutingKey(); - then(request).should(never()).getRoutingToken(); - then(tokenMap).should(never()).getReplicas(any(CqlIdentifier.class), any(Token.class)); - then(tokenMap).should(never()).getReplicas(any(CqlIdentifier.class), any(ByteBuffer.class)); - } - - @Test - public void should_use_round_robin_when_request_has_no_routing_key_or_token() { - when(request.getRoutingKeyspace()).thenReturn(KEYSPACE); - assertThat(request.getRoutingKey()).isNull(); - assertThat(request.getRoutingToken()).isNull(); - - assertRoundRobinQueryPlans(); - - then(tokenMap).should(never()).getReplicas(any(CqlIdentifier.class), any(Token.class)); - then(tokenMap).should(never()).getReplicas(any(CqlIdentifier.class), any(ByteBuffer.class)); - } - - @Test - public void should_use_round_robin_when_token_map_absent() { - when(metadata.getTokenMap()).thenReturn(Optional.empty()); - - assertRoundRobinQueryPlans(); - - then(tokenMap).should(never()).getReplicas(any(CqlIdentifier.class), any(Token.class)); - then(tokenMap).should(never()).getReplicas(any(CqlIdentifier.class), any(ByteBuffer.class)); - } - - @Test - public void - should_use_round_robin_when_token_map_returns_no_replicas_using_request_keyspace_and_routing_key() { - when(request.getRoutingKeyspace()).thenReturn(KEYSPACE); - when(request.getRoutingKey()).thenReturn(ROUTING_KEY); - when(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)).thenReturn(Collections.emptySet()); - - assertRoundRobinQueryPlans(); - - then(tokenMap).should(atLeast(1)).getReplicas(KEYSPACE, ROUTING_KEY); - } - - @Test - public void - should_use_round_robin_when_token_map_returns_no_replicas_using_session_keyspace_and_routing_key() { - // Given - given(request.getKeyspace()).willReturn(null); - given(request.getRoutingKeyspace()).willReturn(null); - given(session.getKeyspace()).willReturn(Optional.of(KEYSPACE)); - given(request.getRoutingKey()).willReturn(ROUTING_KEY); - given(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)).willReturn(emptySet()); - // When - assertRoundRobinQueryPlans(); - // Then - then(tokenMap).should(atLeast(1)).getReplicas(KEYSPACE, ROUTING_KEY); - } - - @Test - public void - should_use_round_robin_when_token_map_returns_no_replicas_using_request_keyspace_and_routing_token() { - // Given - given(request.getKeyspace()).willReturn(null); - given(request.getRoutingKeyspace()).willReturn(KEYSPACE); - given(request.getRoutingToken()).willReturn(routingToken); - given(tokenMap.getReplicas(KEYSPACE, routingToken)).willReturn(emptySet()); - // When - assertRoundRobinQueryPlans(); - // Then - then(tokenMap).should(atLeast(1)).getReplicas(KEYSPACE, routingToken); - } - - @Test - public void should_use_round_robin_and_log_error_when_request_throws() { - // Given - given(request.getKeyspace()).willThrow(new NullPointerException()); - // When - policy.newQueryPlan(request, session); - // Then - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getValue().getFormattedMessage()) - .contains("Unexpected error while trying to compute query plan"); - } - - protected void assertRoundRobinQueryPlans() { - for (int i = 0; i < 3; i++) { - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node1, node2, node3, node4, node5); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node2, node3, node4, node5, node1); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node3, node4, node5, node1, node2); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node4, node5, node1, node2, node3); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node5, node1, node2, node3, node4); - } - } - - @Test - public void should_prioritize_single_replica() { - when(request.getRoutingKeyspace()).thenReturn(KEYSPACE); - when(request.getRoutingKey()).thenReturn(ROUTING_KEY); - when(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)).thenReturn(ImmutableSet.of(node3)); - - // node3 always first, round-robin on the rest - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node3, node1, node2, node4, node5); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node3, node2, node4, node5, node1); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node3, node4, node5, node1, node2); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node3, node5, node1, node2, node4); - - // Should not shuffle replicas since there is only one - verify(policy, never()).shuffleHead(any(), anyInt()); - } - - @Test - public void should_prioritize_and_shuffle_replicas() { - when(request.getRoutingKeyspace()).thenReturn(KEYSPACE); - when(request.getRoutingKey()).thenReturn(ROUTING_KEY); - when(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)).thenReturn(ImmutableSet.of(node3, node5)); - - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node3, node5, node1, node2, node4); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node3, node5, node2, node4, node1); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node3, node5, node4, node1, node2); - - verify(policy, times(3)).shuffleHead(any(), eq(2)); - // No power of two choices with only two replicas - verify(session, never()).getPools(); - } - - protected BasicLoadBalancingPolicy createAndInitPolicy() { - // Use a subclass to disable shuffling, we just spy to make sure that the shuffling method was - // called (makes tests easier) - BasicLoadBalancingPolicy policy = - spy( - new BasicLoadBalancingPolicy(context, DriverExecutionProfile.DEFAULT_NAME) { - @Override - protected void shuffleHead(Object[] currentNodes, int headLength) { - // nothing (keep in same order) - } - }); - policy.init( - ImmutableMap.of( - UUID.randomUUID(), node1, - UUID.randomUUID(), node2, - UUID.randomUUID(), node3, - UUID.randomUUID(), node4, - UUID.randomUUID(), node5), - distanceReporter); - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1, node2, node3, node4, node5); - return policy; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicyDcFailoverTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicyDcFailoverTest.java deleted file mode 100644 index dd9b74158f1..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicyDcFailoverTest.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing; - -import static com.datastax.oss.driver.api.core.config.DriverExecutionProfile.DEFAULT_NAME; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import java.util.Map; -import java.util.UUID; -import org.junit.runner.RunWith; -import org.mockito.junit.MockitoJUnitRunner; - -// TODO fix unnecessary stubbing of config option in parent class (and stop using "silent" runner) -@RunWith(MockitoJUnitRunner.Silent.class) -public class DcInferringLoadBalancingPolicyDcFailoverTest - extends BasicLoadBalancingPolicyDcFailoverTest { - - @Override - protected DcInferringLoadBalancingPolicy createAndInitPolicy() { - when(node4.getDatacenter()).thenReturn("dc2"); - when(node5.getDatacenter()).thenReturn("dc2"); - when(node6.getDatacenter()).thenReturn("dc2"); - when(node7.getDatacenter()).thenReturn("dc3"); - when(node8.getDatacenter()).thenReturn("dc3"); - when(node9.getDatacenter()).thenReturn("dc3"); - // Accept 2 nodes per remote DC - when(defaultProfile.getInt( - DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_MAX_NODES_PER_REMOTE_DC)) - .thenReturn(2); - when(defaultProfile.getBoolean( - DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_ALLOW_FOR_LOCAL_CONSISTENCY_LEVELS)) - .thenReturn(false); - // Use a subclass to disable shuffling, we just spy to make sure that the shuffling method was - // called (makes tests easier) - DcInferringLoadBalancingPolicy policy = - spy( - new DcInferringLoadBalancingPolicy(context, DEFAULT_NAME) { - @Override - protected void shuffleHead(Object[] array, int n) {} - }); - Map nodes = - ImmutableMap.builder() - .put(UUID.randomUUID(), node1) - .put(UUID.randomUUID(), node2) - .put(UUID.randomUUID(), node3) - .put(UUID.randomUUID(), node4) - .put(UUID.randomUUID(), node5) - .put(UUID.randomUUID(), node6) - .put(UUID.randomUUID(), node7) - .put(UUID.randomUUID(), node8) - .put(UUID.randomUUID(), node9) - .build(); - policy.init(nodes, distanceReporter); - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1, node2, node3); - assertThat(policy.getLiveNodes().dc("dc2")).containsExactly(node4, node5); // only 2 allowed - assertThat(policy.getLiveNodes().dc("dc3")).containsExactly(node7, node8); // only 2 allowed - return policy; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicyDistanceTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicyDistanceTest.java deleted file mode 100644 index 80c414aa8f2..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicyDistanceTest.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.catchThrowable; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import edu.umd.cs.findbugs.annotations.NonNull; -import org.junit.runner.RunWith; -import org.mockito.junit.MockitoJUnitRunner; - -// TODO fix unnecessary stubbing of config option in parent class (and stop using "silent" runner) -@RunWith(MockitoJUnitRunner.Silent.class) -public class DcInferringLoadBalancingPolicyDistanceTest - extends BasicLoadBalancingPolicyDistanceTest { - - @Override - public void should_report_LOCAL_when_dc_agnostic() { - // This policy cannot operate when contact points are from different DCs - Throwable error = catchThrowable(super::should_report_LOCAL_when_dc_agnostic); - assertThat(error) - .isInstanceOfSatisfying( - IllegalStateException.class, - ise -> - assertThat(ise) - .hasMessageContaining( - "No local DC was provided, but the contact points are from different DCs") - .hasMessageContaining("node1=null") - .hasMessageContaining("node2=dc1") - .hasMessageContaining("node3=dc2")); - } - - @NonNull - @Override - protected BasicLoadBalancingPolicy createPolicy() { - return new DcInferringLoadBalancingPolicy(context, DriverExecutionProfile.DEFAULT_NAME); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicyEventsTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicyEventsTest.java deleted file mode 100644 index 218d6338df9..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicyEventsTest.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.api.core.config.DriverExecutionProfile.DEFAULT_NAME; -import static org.mockito.Mockito.reset; - -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.UUID; - -public class DcInferringLoadBalancingPolicyEventsTest extends BasicLoadBalancingPolicyEventsTest { - - @Override - @NonNull - protected BasicLoadBalancingPolicy createAndInitPolicy() { - DcInferringLoadBalancingPolicy policy = - new DcInferringLoadBalancingPolicy(context, DEFAULT_NAME); - policy.init( - ImmutableMap.of(UUID.randomUUID(), node1, UUID.randomUUID(), node2), distanceReporter); - assertThat(policy.getLiveNodes().dc("dc1")).containsOnly(node1, node2); - reset(distanceReporter); - return policy; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicyInitTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicyInitTest.java deleted file mode 100644 index 20de3afe9c3..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicyInitTest.java +++ /dev/null @@ -1,235 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.catchThrowable; -import static org.assertj.core.api.Assertions.filter; -import static org.mockito.Mockito.atLeast; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import ch.qos.logback.classic.Level; -import ch.qos.logback.classic.spi.ILoggingEvent; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.metadata.NodeState; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.UUID; -import org.junit.Test; - -public class DcInferringLoadBalancingPolicyInitTest extends LoadBalancingPolicyTestBase { - - @Test - public void should_use_local_dc_if_provided_via_config() { - // Given - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1)); - // the parent class sets the config option to "dc1" - BasicLoadBalancingPolicy policy = createPolicy(); - - // When - policy.init(ImmutableMap.of(UUID.randomUUID(), node1), distanceReporter); - - // Then - assertThat(policy.getLocalDatacenter()).isEqualTo("dc1"); - } - - @Test - public void should_use_local_dc_if_provided_via_context() { - // Given - when(context.getLocalDatacenter(DriverExecutionProfile.DEFAULT_NAME)).thenReturn("dc1"); - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1)); - // note: programmatic takes priority, the config won't even be inspected so no need to stub the - // option to null - BasicLoadBalancingPolicy policy = createPolicy(); - - // When - policy.init(ImmutableMap.of(UUID.randomUUID(), node1), distanceReporter); - - // Then - assertThat(policy.getLocalDatacenter()).isEqualTo("dc1"); - verify(defaultProfile, never()) - .getString(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER, null); - } - - @Test - public void should_infer_local_dc_from_contact_points() { - // Given - when(defaultProfile.isDefined(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)) - .thenReturn(false); - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1)); - BasicLoadBalancingPolicy policy = createPolicy(); - - // When - policy.init(ImmutableMap.of(UUID.randomUUID(), node1), distanceReporter); - - // Then - assertThat(policy.getLocalDatacenter()).isEqualTo("dc1"); - } - - @Test - public void should_require_local_dc_if_contact_points_from_different_dcs() { - // Given - when(defaultProfile.isDefined(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)) - .thenReturn(false); - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1, node2)); - when(node2.getDatacenter()).thenReturn("dc2"); - BasicLoadBalancingPolicy policy = createPolicy(); - - // When - Throwable t = - catchThrowable( - () -> - policy.init( - ImmutableMap.of(UUID.randomUUID(), node1, UUID.randomUUID(), node2), - distanceReporter)); - - // Then - assertThat(t) - .isInstanceOf(IllegalStateException.class) - .hasMessageContaining( - "No local DC was provided, but the contact points are from different DCs: node1=dc1, node2=dc2"); - } - - @Test - public void should_require_local_dc_if_contact_points_have_null_dcs() { - // Given - when(defaultProfile.isDefined(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)) - .thenReturn(false); - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1, node2)); - when(node1.getDatacenter()).thenReturn(null); - when(node2.getDatacenter()).thenReturn(null); - BasicLoadBalancingPolicy policy = createPolicy(); - - // When - Throwable t = - catchThrowable( - () -> - policy.init( - ImmutableMap.of(UUID.randomUUID(), node1, UUID.randomUUID(), node2), - distanceReporter)); - - // Then - assertThat(t) - .isInstanceOf(IllegalStateException.class) - .hasMessageContaining( - "The local DC could not be inferred from contact points, please set it explicitly"); - } - - @Test - public void should_warn_if_contact_points_not_in_local_dc() { - // Given - when(node2.getDatacenter()).thenReturn("dc2"); - when(node3.getDatacenter()).thenReturn("dc3"); - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1, node2, node3)); - BasicLoadBalancingPolicy policy = createPolicy(); - - // When - policy.init( - ImmutableMap.of( - UUID.randomUUID(), node1, UUID.randomUUID(), node2, UUID.randomUUID(), node3), - distanceReporter); - - // Then - verify(appender, atLeast(1)).doAppend(loggingEventCaptor.capture()); - Iterable warnLogs = - filter(loggingEventCaptor.getAllValues()).with("level", Level.WARN).get(); - assertThat(warnLogs).hasSize(1); - assertThat(warnLogs.iterator().next().getFormattedMessage()) - .contains( - "You specified dc1 as the local DC, but some contact points are from a different DC") - .contains("node2=dc2") - .contains("node3=dc3"); - } - - @Test - public void should_include_nodes_from_local_dc() { - // Given - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1, node2)); - when(node1.getState()).thenReturn(NodeState.UP); - when(node2.getState()).thenReturn(NodeState.DOWN); - when(node3.getState()).thenReturn(NodeState.UNKNOWN); - BasicLoadBalancingPolicy policy = createPolicy(); - - // When - policy.init( - ImmutableMap.of( - UUID.randomUUID(), node1, UUID.randomUUID(), node2, UUID.randomUUID(), node3), - distanceReporter); - - // Then - // Set distance for all nodes in the local DC - verify(distanceReporter).setDistance(node1, NodeDistance.LOCAL); - verify(distanceReporter).setDistance(node2, NodeDistance.LOCAL); - verify(distanceReporter).setDistance(node3, NodeDistance.LOCAL); - // But only include UP or UNKNOWN nodes in the live set - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1, node3); - } - - @Test - public void should_ignore_nodes_from_remote_dcs() { - // Given - when(node2.getDatacenter()).thenReturn("dc2"); - when(node3.getDatacenter()).thenReturn("dc3"); - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1, node2)); - BasicLoadBalancingPolicy policy = createPolicy(); - - // When - policy.init( - ImmutableMap.of( - UUID.randomUUID(), node1, UUID.randomUUID(), node2, UUID.randomUUID(), node3), - distanceReporter); - - // Then - verify(distanceReporter).setDistance(node1, NodeDistance.LOCAL); - verify(distanceReporter).setDistance(node2, NodeDistance.IGNORED); - verify(distanceReporter).setDistance(node3, NodeDistance.IGNORED); - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1); - } - - @Test - public void should_ignore_nodes_excluded_by_distance_reporter() { - // Given - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1, node2)); - when(context.getNodeDistanceEvaluator(DriverExecutionProfile.DEFAULT_NAME)) - .thenReturn((node, dc) -> node.equals(node1) ? NodeDistance.IGNORED : null); - - BasicLoadBalancingPolicy policy = createPolicy(); - - // When - policy.init( - ImmutableMap.of( - UUID.randomUUID(), node1, UUID.randomUUID(), node2, UUID.randomUUID(), node3), - distanceReporter); - - // Then - verify(distanceReporter).setDistance(node1, NodeDistance.IGNORED); - verify(distanceReporter).setDistance(node2, NodeDistance.LOCAL); - verify(distanceReporter).setDistance(node3, NodeDistance.LOCAL); - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node2, node3); - } - - @NonNull - protected DcInferringLoadBalancingPolicy createPolicy() { - return new DcInferringLoadBalancingPolicy(context, DriverExecutionProfile.DEFAULT_NAME); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicyQueryPlanTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicyQueryPlanTest.java deleted file mode 100644 index 23d4636a615..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicyQueryPlanTest.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing; - -import static com.datastax.oss.driver.api.core.config.DriverExecutionProfile.DEFAULT_NAME; -import static org.mockito.Mockito.spy; - -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import java.util.UUID; - -public class DcInferringLoadBalancingPolicyQueryPlanTest - extends DefaultLoadBalancingPolicyQueryPlanTest { - - @Override - protected DcInferringLoadBalancingPolicy createAndInitPolicy() { - DcInferringLoadBalancingPolicy policy = - spy( - new DcInferringLoadBalancingPolicy(context, DEFAULT_NAME) { - @Override - protected void shuffleHead(Object[] array, int n) {} - - @Override - protected long nanoTime() { - return nanoTime; - } - - @Override - protected int diceRoll1d4() { - return diceRoll; - } - }); - policy.init( - ImmutableMap.of( - UUID.randomUUID(), node1, - UUID.randomUUID(), node2, - UUID.randomUUID(), node3, - UUID.randomUUID(), node4, - UUID.randomUUID(), node5), - distanceReporter); - return policy; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyDcFailoverTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyDcFailoverTest.java deleted file mode 100644 index f2e741fd756..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyDcFailoverTest.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing; - -import static com.datastax.oss.driver.api.core.config.DriverExecutionProfile.DEFAULT_NAME; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import java.util.Map; -import java.util.UUID; -import org.junit.runner.RunWith; -import org.mockito.junit.MockitoJUnitRunner; - -// TODO fix unnecessary stubbing of config option in parent class (and stop using "silent" runner) -@RunWith(MockitoJUnitRunner.Silent.class) -public class DefaultLoadBalancingPolicyDcFailoverTest - extends BasicLoadBalancingPolicyDcFailoverTest { - - @Override - protected DefaultLoadBalancingPolicy createAndInitPolicy() { - when(node4.getDatacenter()).thenReturn("dc2"); - when(node5.getDatacenter()).thenReturn("dc2"); - when(node6.getDatacenter()).thenReturn("dc2"); - when(node7.getDatacenter()).thenReturn("dc3"); - when(node8.getDatacenter()).thenReturn("dc3"); - when(node9.getDatacenter()).thenReturn("dc3"); - // Accept 2 nodes per remote DC - when(defaultProfile.getInt( - DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_MAX_NODES_PER_REMOTE_DC)) - .thenReturn(2); - when(defaultProfile.getBoolean( - DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_ALLOW_FOR_LOCAL_CONSISTENCY_LEVELS)) - .thenReturn(false); - // Use a subclass to disable shuffling, we just spy to make sure that the shuffling method was - // called (makes tests easier) - DefaultLoadBalancingPolicy policy = - spy( - new DefaultLoadBalancingPolicy(context, DEFAULT_NAME) { - @Override - protected void shuffleHead(Object[] array, int n) {} - }); - Map nodes = - ImmutableMap.builder() - .put(UUID.randomUUID(), node1) - .put(UUID.randomUUID(), node2) - .put(UUID.randomUUID(), node3) - .put(UUID.randomUUID(), node4) - .put(UUID.randomUUID(), node5) - .put(UUID.randomUUID(), node6) - .put(UUID.randomUUID(), node7) - .put(UUID.randomUUID(), node8) - .put(UUID.randomUUID(), node9) - .build(); - policy.init(nodes, distanceReporter); - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1, node2, node3); - assertThat(policy.getLiveNodes().dc("dc2")).containsExactly(node4, node5); // only 2 allowed - assertThat(policy.getLiveNodes().dc("dc3")).containsExactly(node7, node8); // only 2 allowed - return policy; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyDistanceTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyDistanceTest.java deleted file mode 100644 index 9cf30e048e9..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyDistanceTest.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.catchThrowable; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import edu.umd.cs.findbugs.annotations.NonNull; -import org.junit.runner.RunWith; -import org.mockito.junit.MockitoJUnitRunner; - -// TODO fix unnecessary stubbing of config option in parent class (and stop using "silent" runner) -@RunWith(MockitoJUnitRunner.Silent.class) -public class DefaultLoadBalancingPolicyDistanceTest extends BasicLoadBalancingPolicyDistanceTest { - - @Override - public void should_report_LOCAL_when_dc_agnostic() { - // This policy cannot operate in dc-agnostic mode - Throwable error = catchThrowable(super::should_report_LOCAL_when_dc_agnostic); - assertThat(error) - .isInstanceOfSatisfying( - IllegalStateException.class, - ise -> - assertThat(ise) - .hasMessageContaining("the local DC must be explicitly set") - .hasMessageContaining("node1=null") - .hasMessageContaining("node2=dc1") - .hasMessageContaining("node3=dc2") - .hasMessageContaining("Current DCs in this cluster are: dc1, dc2")); - } - - @NonNull - @Override - protected BasicLoadBalancingPolicy createPolicy() { - return new DefaultLoadBalancingPolicy(context, DriverExecutionProfile.DEFAULT_NAME); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyEventsTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyEventsTest.java deleted file mode 100644 index 17e926a29e0..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyEventsTest.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.api.core.config.DriverExecutionProfile.DEFAULT_NAME; -import static org.mockito.Mockito.reset; - -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.UUID; - -public class DefaultLoadBalancingPolicyEventsTest extends BasicLoadBalancingPolicyEventsTest { - - @Override - @NonNull - protected DefaultLoadBalancingPolicy createAndInitPolicy() { - DefaultLoadBalancingPolicy policy = new DefaultLoadBalancingPolicy(context, DEFAULT_NAME); - policy.init( - ImmutableMap.of(UUID.randomUUID(), node1, UUID.randomUUID(), node2), distanceReporter); - assertThat(policy.getLiveNodes().dc("dc1")).containsOnly(node1, node2); - reset(distanceReporter); - return policy; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyInitTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyInitTest.java deleted file mode 100644 index 7b875209743..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyInitTest.java +++ /dev/null @@ -1,205 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.assertj.core.api.Assertions.filter; -import static org.mockito.Mockito.atLeast; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import ch.qos.logback.classic.Level; -import ch.qos.logback.classic.spi.ILoggingEvent; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.metadata.NodeState; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.UUID; -import org.junit.Test; - -public class DefaultLoadBalancingPolicyInitTest extends LoadBalancingPolicyTestBase { - - @Test - public void should_use_local_dc_if_provided_via_config() { - // Given - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1)); - // the parent class sets the config option to "dc1" - DefaultLoadBalancingPolicy policy = createPolicy(); - - // When - policy.init(ImmutableMap.of(UUID.randomUUID(), node1), distanceReporter); - - // Then - assertThat(policy.getLocalDatacenter()).isEqualTo("dc1"); - } - - @Test - public void should_use_local_dc_if_provided_via_context() { - // Given - when(context.getLocalDatacenter(DriverExecutionProfile.DEFAULT_NAME)).thenReturn("dc1"); - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1)); - // note: programmatic takes priority, the config won't even be inspected so no need to stub the - // option to null - DefaultLoadBalancingPolicy policy = createPolicy(); - - // When - policy.init(ImmutableMap.of(UUID.randomUUID(), node1), distanceReporter); - - // Then - assertThat(policy.getLocalDatacenter()).isEqualTo("dc1"); - verify(defaultProfile, never()) - .getString(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER, null); - } - - @Test - public void should_infer_local_dc_if_no_explicit_contact_points() { - // Given - when(defaultProfile.isDefined(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)) - .thenReturn(false); - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1)); - when(metadataManager.wasImplicitContactPoint()).thenReturn(true); - DefaultLoadBalancingPolicy policy = createPolicy(); - - // When - policy.init(ImmutableMap.of(UUID.randomUUID(), node1), distanceReporter); - - // Then - assertThat(policy.getLocalDatacenter()).isEqualTo("dc1"); - } - - @Test - public void should_require_local_dc_if_explicit_contact_points() { - // Given - when(defaultProfile.isDefined(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)) - .thenReturn(false); - when(metadataManager.wasImplicitContactPoint()).thenReturn(false); - DefaultLoadBalancingPolicy policy = createPolicy(); - - // When - assertThatThrownBy( - () -> policy.init(ImmutableMap.of(UUID.randomUUID(), node2), distanceReporter)) - .isInstanceOf(IllegalStateException.class) - .hasMessageContaining( - "Since you provided explicit contact points, the local DC must be explicitly set"); - } - - @Test - public void should_warn_if_contact_points_not_in_local_dc() { - // Given - when(node2.getDatacenter()).thenReturn("dc2"); - when(node3.getDatacenter()).thenReturn("dc3"); - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1, node2, node3)); - DefaultLoadBalancingPolicy policy = createPolicy(); - - // When - policy.init( - ImmutableMap.of( - UUID.randomUUID(), node1, UUID.randomUUID(), node2, UUID.randomUUID(), node3), - distanceReporter); - - // Then - verify(appender, atLeast(1)).doAppend(loggingEventCaptor.capture()); - Iterable warnLogs = - filter(loggingEventCaptor.getAllValues()).with("level", Level.WARN).get(); - assertThat(warnLogs).hasSize(1); - assertThat(warnLogs.iterator().next().getFormattedMessage()) - .contains( - "You specified dc1 as the local DC, but some contact points are from a different DC") - .contains("node2=dc2") - .contains("node3=dc3"); - } - - @Test - public void should_include_nodes_from_local_dc() { - // Given - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1, node2)); - when(node1.getState()).thenReturn(NodeState.UP); - when(node2.getState()).thenReturn(NodeState.DOWN); - when(node3.getState()).thenReturn(NodeState.UNKNOWN); - DefaultLoadBalancingPolicy policy = createPolicy(); - - // When - policy.init( - ImmutableMap.of( - UUID.randomUUID(), node1, UUID.randomUUID(), node2, UUID.randomUUID(), node3), - distanceReporter); - - // Then - // Set distance for all nodes in the local DC - verify(distanceReporter).setDistance(node1, NodeDistance.LOCAL); - verify(distanceReporter).setDistance(node2, NodeDistance.LOCAL); - verify(distanceReporter).setDistance(node3, NodeDistance.LOCAL); - // But only include UP or UNKNOWN nodes in the live set - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1, node3); - } - - @Test - public void should_ignore_nodes_from_remote_dcs() { - // Given - when(node2.getDatacenter()).thenReturn("dc2"); - when(node3.getDatacenter()).thenReturn("dc3"); - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1, node2)); - DefaultLoadBalancingPolicy policy = createPolicy(); - - // When - policy.init( - ImmutableMap.of( - UUID.randomUUID(), node1, UUID.randomUUID(), node2, UUID.randomUUID(), node3), - distanceReporter); - - // Then - verify(distanceReporter).setDistance(node1, NodeDistance.LOCAL); - verify(distanceReporter).setDistance(node2, NodeDistance.IGNORED); - verify(distanceReporter).setDistance(node3, NodeDistance.IGNORED); - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1); - assertThat(policy.getLiveNodes().dc("dc2")).isEmpty(); - assertThat(policy.getLiveNodes().dc("dc3")).isEmpty(); - } - - @Test - public void should_ignore_nodes_excluded_by_distance_reporter() { - // Given - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1, node2)); - when(context.getNodeDistanceEvaluator(DriverExecutionProfile.DEFAULT_NAME)) - .thenReturn((node, dc) -> node.equals(node1) ? NodeDistance.IGNORED : null); - - BasicLoadBalancingPolicy policy = createPolicy(); - - // When - policy.init( - ImmutableMap.of( - UUID.randomUUID(), node1, UUID.randomUUID(), node2, UUID.randomUUID(), node3), - distanceReporter); - - // Then - verify(distanceReporter).setDistance(node1, NodeDistance.IGNORED); - verify(distanceReporter).setDistance(node2, NodeDistance.LOCAL); - verify(distanceReporter).setDistance(node3, NodeDistance.LOCAL); - assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node2, node3); - } - - @NonNull - protected DefaultLoadBalancingPolicy createPolicy() { - return new DefaultLoadBalancingPolicy(context, DriverExecutionProfile.DEFAULT_NAME); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyQueryPlanTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyQueryPlanTest.java deleted file mode 100644 index fff86a1b750..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyQueryPlanTest.java +++ /dev/null @@ -1,362 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing; - -import static com.datastax.oss.driver.api.core.config.DriverExecutionProfile.DEFAULT_NAME; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyInt; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.then; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; - -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.pool.ChannelPool; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import java.util.Optional; -import java.util.Queue; -import java.util.UUID; -import java.util.concurrent.atomic.AtomicLongArray; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; - -public class DefaultLoadBalancingPolicyQueryPlanTest extends BasicLoadBalancingPolicyQueryPlanTest { - - private static final long T0 = Long.MIN_VALUE; - private static final long T1 = 100; - private static final long T2 = 200; - private static final long T3 = 300; - - @Mock protected ChannelPool pool1; - @Mock protected ChannelPool pool2; - @Mock protected ChannelPool pool3; - @Mock protected ChannelPool pool4; - @Mock protected ChannelPool pool5; - - long nanoTime; - int diceRoll; - - private DefaultLoadBalancingPolicy dsePolicy; - - @Before - @Override - public void setup() { - nanoTime = T1; - diceRoll = 4; - given(node4.getDatacenter()).willReturn("dc1"); - given(node5.getDatacenter()).willReturn("dc1"); - given(session.getPools()) - .willReturn( - ImmutableMap.of( - node1, pool1, - node2, pool2, - node3, pool3, - node4, pool4, - node5, pool5)); - given(context.getMetadataManager()).willReturn(metadataManager); - given(metadataManager.getMetadata()).willReturn(metadata); - given(metadataManager.getContactPoints()).willReturn(ImmutableSet.of(node1)); - given(metadata.getTokenMap()).willAnswer(invocation -> Optional.of(tokenMap)); - super.setup(); - dsePolicy = (DefaultLoadBalancingPolicy) policy; - // Note: this assertion relies on the fact that policy.getLiveNodes() implementation preserves - // insertion order. - assertThat(dsePolicy.getLiveNodes().dc("dc1")) - .containsExactly(node1, node2, node3, node4, node5); - } - - @Test - public void should_prioritize_and_shuffle_2_replicas() { - // Given - given(request.getRoutingKeyspace()).willReturn(KEYSPACE); - given(request.getRoutingKey()).willReturn(ROUTING_KEY); - given(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)).willReturn(ImmutableSet.of(node3, node5)); - - // When - Queue plan1 = dsePolicy.newQueryPlan(request, session); - Queue plan2 = dsePolicy.newQueryPlan(request, session); - Queue plan3 = dsePolicy.newQueryPlan(request, session); - - // Then - // node3 and node5 always first, round-robin on the rest - assertThat(plan1).containsExactly(node3, node5, node1, node2, node4); - assertThat(plan2).containsExactly(node3, node5, node2, node4, node1); - assertThat(plan3).containsExactly(node3, node5, node4, node1, node2); - - then(dsePolicy).should(times(3)).shuffleHead(any(), anyInt()); - then(dsePolicy).should(never()).nanoTime(); - then(dsePolicy).should(never()).diceRoll1d4(); - } - - @Test - public void should_prioritize_and_shuffle_3_or_more_replicas_when_all_healthy_and_all_newly_up() { - // Given - given(request.getRoutingKeyspace()).willReturn(KEYSPACE); - given(request.getRoutingKey()).willReturn(ROUTING_KEY); - given(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)) - .willReturn(ImmutableSet.of(node1, node3, node5)); - dsePolicy.upTimes.put(node1, T1); - dsePolicy.upTimes.put(node3, T2); - dsePolicy.upTimes.put(node5, T3); // newest up replica - given(pool1.getInFlight()).willReturn(0); - given(pool3.getInFlight()).willReturn(0); - - // When - Queue plan1 = dsePolicy.newQueryPlan(request, session); - Queue plan2 = dsePolicy.newQueryPlan(request, session); - - // Then - // nodes 1, 3 and 5 always first, round-robin on the rest - // newest up replica is 5, not in first or second position - assertThat(plan1).containsExactly(node1, node3, node5, node2, node4); - assertThat(plan2).containsExactly(node1, node3, node5, node4, node2); - - then(dsePolicy).should(times(2)).shuffleHead(any(), anyInt()); - then(dsePolicy).should(times(2)).nanoTime(); - then(dsePolicy).should(never()).diceRoll1d4(); - } - - @Test - public void - should_prioritize_and_shuffle_3_or_more_replicas_when_all_healthy_and_some_newly_up_and_dice_roll_4() { - // Given - given(request.getRoutingKeyspace()).willReturn(KEYSPACE); - given(request.getRoutingKey()).willReturn(ROUTING_KEY); - given(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)) - .willReturn(ImmutableSet.of(node1, node3, node5)); - dsePolicy.upTimes.put(node1, T2); // newest up replica - dsePolicy.upTimes.put(node3, T1); - given(pool3.getInFlight()).willReturn(0); - given(pool5.getInFlight()).willReturn(0); - - // When - Queue plan1 = dsePolicy.newQueryPlan(request, session); - Queue plan2 = dsePolicy.newQueryPlan(request, session); - - // Then - // nodes 1, 3 and 5 always first, round-robin on the rest - // newest up replica is node1 in first position and diceRoll = 4 -> bubbles down - assertThat(plan1).containsExactly(node3, node5, node1, node2, node4); - assertThat(plan2).containsExactly(node3, node5, node1, node4, node2); - - then(dsePolicy).should(times(2)).shuffleHead(any(), anyInt()); - then(dsePolicy).should(times(2)).nanoTime(); - then(dsePolicy).should(times(2)).diceRoll1d4(); - } - - @Test - public void - should_prioritize_and_shuffle_3_or_more_replicas_when_all_healthy_and_some_newly_up_and_dice_roll_1() { - // Given - given(request.getRoutingKeyspace()).willReturn(KEYSPACE); - given(request.getRoutingKey()).willReturn(ROUTING_KEY); - given(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)) - .willReturn(ImmutableSet.of(node1, node3, node5)); - dsePolicy.upTimes.put(node1, T2); // newest up replica - dsePolicy.upTimes.put(node3, T1); - given(pool1.getInFlight()).willReturn(0); - given(pool3.getInFlight()).willReturn(0); - diceRoll = 1; - - // When - Queue plan1 = dsePolicy.newQueryPlan(request, session); - Queue plan2 = dsePolicy.newQueryPlan(request, session); - - // Then - // nodes 1, 3 and 5 always first, round-robin on the rest - // newest up replica is node1 in first position and diceRoll = 1 -> does not bubble down - assertThat(plan1).containsExactly(node1, node3, node5, node2, node4); - assertThat(plan2).containsExactly(node1, node3, node5, node4, node2); - - then(dsePolicy).should(times(2)).shuffleHead(any(), anyInt()); - then(dsePolicy).should(times(2)).nanoTime(); - then(dsePolicy).should(times(2)).diceRoll1d4(); - } - - @Test - public void should_prioritize_and_shuffle_3_or_more_replicas_when_first_unhealthy() { - // Given - given(request.getRoutingKeyspace()).willReturn(KEYSPACE); - given(request.getRoutingKey()).willReturn(ROUTING_KEY); - given(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)) - .willReturn(ImmutableSet.of(node1, node3, node5)); - given(pool1.getInFlight()).willReturn(100); // unhealthy - given(pool3.getInFlight()).willReturn(0); - given(pool5.getInFlight()).willReturn(0); - - dsePolicy.responseTimes.put( - node1, - dsePolicy - .new NodeResponseRateSample(new AtomicLongArray(new long[] {T0, T0}))); // unhealthy - - // When - Queue plan1 = dsePolicy.newQueryPlan(request, session); - Queue plan2 = dsePolicy.newQueryPlan(request, session); - - // Then - // nodes 1, 3 and 5 always first, round-robin on the rest - // node1 is unhealthy = 1 -> bubbles down - assertThat(plan1).containsExactly(node3, node5, node1, node2, node4); - assertThat(plan2).containsExactly(node3, node5, node1, node4, node2); - - then(dsePolicy).should(times(2)).shuffleHead(any(), anyInt()); - then(dsePolicy).should(times(2)).nanoTime(); - then(dsePolicy).should(never()).diceRoll1d4(); - } - - @Test - public void - should_not_treat_node_as_unhealthy_if_has_in_flight_exceeded_but_response_times_normal() { - // Given - given(request.getRoutingKeyspace()).willReturn(KEYSPACE); - given(request.getRoutingKey()).willReturn(ROUTING_KEY); - given(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)) - .willReturn(ImmutableSet.of(node1, node3, node5)); - given(pool1.getInFlight()).willReturn(100); // unhealthy - given(pool3.getInFlight()).willReturn(0); - given(pool5.getInFlight()).willReturn(0); - - dsePolicy.responseTimes.put( - node1, - dsePolicy.new NodeResponseRateSample(new AtomicLongArray(new long[] {T1, T1}))); // healthy - - // When - Queue plan1 = dsePolicy.newQueryPlan(request, session); - Queue plan2 = dsePolicy.newQueryPlan(request, session); - - // Then - // nodes 1, 3 and 5 always first, round-robin on the rest - // node1 has more in-flight than node3 -> swap - assertThat(plan1).containsExactly(node3, node1, node5, node2, node4); - assertThat(plan2).containsExactly(node3, node1, node5, node4, node2); - - then(dsePolicy).should(times(2)).shuffleHead(any(), anyInt()); - then(dsePolicy).should(times(2)).nanoTime(); - then(dsePolicy).should(never()).diceRoll1d4(); - } - - @Test - public void should_prioritize_and_shuffle_3_or_more_replicas_when_last_unhealthy() { - // Given - given(request.getRoutingKeyspace()).willReturn(KEYSPACE); - given(request.getRoutingKey()).willReturn(ROUTING_KEY); - given(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)) - .willReturn(ImmutableSet.of(node1, node3, node5)); - given(pool1.getInFlight()).willReturn(0); - given(pool3.getInFlight()).willReturn(0); - given(pool5.getInFlight()).willReturn(100); // unhealthy - - // When - Queue plan1 = dsePolicy.newQueryPlan(request, session); - Queue plan2 = dsePolicy.newQueryPlan(request, session); - - // Then - // nodes 1, 3 and 5 always first, round-robin on the rest - // node5 is unhealthy -> noop - assertThat(plan1).containsExactly(node1, node3, node5, node2, node4); - assertThat(plan2).containsExactly(node1, node3, node5, node4, node2); - - then(dsePolicy).should(times(2)).shuffleHead(any(), anyInt()); - then(dsePolicy).should(times(2)).nanoTime(); - then(dsePolicy).should(never()).diceRoll1d4(); - } - - @Test - public void should_prioritize_and_shuffle_3_or_more_replicas_when_majority_unhealthy() { - // Given - given(request.getRoutingKeyspace()).willReturn(KEYSPACE); - given(request.getRoutingKey()).willReturn(ROUTING_KEY); - given(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)) - .willReturn(ImmutableSet.of(node1, node3, node5)); - given(pool1.getInFlight()).willReturn(100); - given(pool3.getInFlight()).willReturn(100); - given(pool5.getInFlight()).willReturn(0); - - // When - Queue plan1 = dsePolicy.newQueryPlan(request, session); - Queue plan2 = dsePolicy.newQueryPlan(request, session); - - // Then - // nodes 1, 3 and 5 always first, round-robin on the rest - // majority of nodes unhealthy -> noop - assertThat(plan1).containsExactly(node1, node3, node5, node2, node4); - assertThat(plan2).containsExactly(node1, node3, node5, node4, node2); - - then(dsePolicy).should(times(2)).shuffleHead(any(), anyInt()); - then(dsePolicy).should(times(2)).nanoTime(); - then(dsePolicy).should(never()).diceRoll1d4(); - } - - @Test - public void should_reorder_first_two_replicas_when_first_has_more_in_flight_than_second() { - // Given - given(request.getRoutingKeyspace()).willReturn(KEYSPACE); - given(request.getRoutingKey()).willReturn(ROUTING_KEY); - given(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)) - .willReturn(ImmutableSet.of(node1, node3, node5)); - given(pool1.getInFlight()).willReturn(200); - given(pool3.getInFlight()).willReturn(100); - - // When - Queue plan1 = dsePolicy.newQueryPlan(request, session); - Queue plan2 = dsePolicy.newQueryPlan(request, session); - - // Then - // nodes 1, 3 and 5 always first, round-robin on the rest - // node1 has more in-flight than node3 -> swap - assertThat(plan1).containsExactly(node3, node1, node5, node2, node4); - assertThat(plan2).containsExactly(node3, node1, node5, node4, node2); - - then(dsePolicy).should(times(2)).shuffleHead(any(), anyInt()); - then(dsePolicy).should(times(2)).nanoTime(); - then(dsePolicy).should(never()).diceRoll1d4(); - } - - @Override - protected DefaultLoadBalancingPolicy createAndInitPolicy() { - DefaultLoadBalancingPolicy policy = - spy( - new DefaultLoadBalancingPolicy(context, DEFAULT_NAME) { - @Override - protected void shuffleHead(Object[] array, int n) {} - - @Override - protected long nanoTime() { - return nanoTime; - } - - @Override - protected int diceRoll1d4() { - return diceRoll; - } - }); - policy.init( - ImmutableMap.of( - UUID.randomUUID(), node1, - UUID.randomUUID(), node2, - UUID.randomUUID(), node3, - UUID.randomUUID(), node4, - UUID.randomUUID(), node5), - distanceReporter); - return policy; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyRequestTrackerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyRequestTrackerTest.java deleted file mode 100644 index 757af43ef67..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyRequestTrackerTest.java +++ /dev/null @@ -1,198 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.api.core.config.DriverExecutionProfile.DEFAULT_NAME; -import static org.mockito.BDDMockito.given; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import java.util.UUID; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; - -public class DefaultLoadBalancingPolicyRequestTrackerTest extends LoadBalancingPolicyTestBase { - - @Mock Request request; - @Mock DriverExecutionProfile profile; - final String logPrefix = "lbp-test-log-prefix"; - - private DefaultLoadBalancingPolicy policy; - private long nextNanoTime; - - @Before - @Override - public void setup() { - super.setup(); - given(metadataManager.getContactPoints()).willReturn(ImmutableSet.of(node1)); - policy = - new DefaultLoadBalancingPolicy(context, DEFAULT_NAME) { - @Override - protected long nanoTime() { - return nextNanoTime; - } - }; - policy.init( - ImmutableMap.of( - UUID.randomUUID(), node1, - UUID.randomUUID(), node2, - UUID.randomUUID(), node3), - distanceReporter); - } - - @Test - public void should_record_first_response_time_on_node_success() { - // Given - nextNanoTime = 123; - - // When - policy.onNodeSuccess(request, 0, profile, node1, logPrefix); - - // Then - assertThat(policy.responseTimes) - .hasEntrySatisfying(node1, value -> assertThat(value.oldest).isEqualTo(123L)) - .doesNotContainKeys(node2, node3); - assertThat(policy.isResponseRateInsufficient(node1, nextNanoTime)).isFalse(); - assertThat(policy.isResponseRateInsufficient(node2, nextNanoTime)).isFalse(); - assertThat(policy.isResponseRateInsufficient(node3, nextNanoTime)).isFalse(); - } - - @Test - public void should_record_second_response_time_on_node_success() { - // Given - should_record_first_response_time_on_node_success(); - nextNanoTime = 456; - - // When - policy.onNodeSuccess(request, 0, profile, node1, logPrefix); - - // Then - assertThat(policy.responseTimes) - .hasEntrySatisfying( - node1, - value -> { - // oldest value first - assertThat(value.oldest).isEqualTo(123); - assertThat(value.newest.getAsLong()).isEqualTo(456); - }) - .doesNotContainKeys(node2, node3); - assertThat(policy.isResponseRateInsufficient(node1, nextNanoTime)).isFalse(); - assertThat(policy.isResponseRateInsufficient(node2, nextNanoTime)).isFalse(); - assertThat(policy.isResponseRateInsufficient(node3, nextNanoTime)).isFalse(); - } - - @Test - public void should_record_further_response_times_on_node_success() { - // Given - should_record_second_response_time_on_node_success(); - nextNanoTime = 789; - - // When - policy.onNodeSuccess(request, 0, profile, node1, logPrefix); - policy.onNodeSuccess(request, 0, profile, node2, logPrefix); - - // Then - assertThat(policy.responseTimes) - .hasEntrySatisfying( - node1, - value -> { - // values should rotate left (bubble up) - assertThat(value.oldest).isEqualTo(456); - assertThat(value.newest.getAsLong()).isEqualTo(789); - }) - .hasEntrySatisfying(node2, value -> assertThat(value.oldest).isEqualTo(789)) - .doesNotContainKey(node3); - assertThat(policy.isResponseRateInsufficient(node1, nextNanoTime)).isFalse(); - assertThat(policy.isResponseRateInsufficient(node2, nextNanoTime)).isFalse(); - assertThat(policy.isResponseRateInsufficient(node3, nextNanoTime)).isFalse(); - } - - @Test - public void should_record_first_response_time_on_node_error() { - // Given - nextNanoTime = 123; - Throwable iae = new IllegalArgumentException(); - - // When - policy.onNodeError(request, iae, 0, profile, node1, logPrefix); - - // Then - assertThat(policy.responseTimes) - .hasEntrySatisfying(node1, value -> assertThat(value.oldest).isEqualTo(123L)) - .doesNotContainKeys(node2, node3); - assertThat(policy.isResponseRateInsufficient(node1, nextNanoTime)).isFalse(); - assertThat(policy.isResponseRateInsufficient(node2, nextNanoTime)).isFalse(); - assertThat(policy.isResponseRateInsufficient(node3, nextNanoTime)).isFalse(); - } - - @Test - public void should_record_second_response_time_on_node_error() { - // Given - should_record_first_response_time_on_node_error(); - nextNanoTime = 456; - Throwable iae = new IllegalArgumentException(); - - // When - policy.onNodeError(request, iae, 0, profile, node1, logPrefix); - - // Then - assertThat(policy.responseTimes) - .hasEntrySatisfying( - node1, - value -> { - // oldest value first - assertThat(value.oldest).isEqualTo(123); - assertThat(value.newest.getAsLong()).isEqualTo(456); - }) - .doesNotContainKeys(node2, node3); - assertThat(policy.isResponseRateInsufficient(node1, nextNanoTime)).isFalse(); - assertThat(policy.isResponseRateInsufficient(node2, nextNanoTime)).isFalse(); - assertThat(policy.isResponseRateInsufficient(node3, nextNanoTime)).isFalse(); - } - - @Test - public void should_record_further_response_times_on_node_error() { - // Given - should_record_second_response_time_on_node_error(); - nextNanoTime = 789; - Throwable iae = new IllegalArgumentException(); - - // When - policy.onNodeError(request, iae, 0, profile, node1, logPrefix); - policy.onNodeError(request, iae, 0, profile, node2, logPrefix); - - // Then - assertThat(policy.responseTimes) - .hasEntrySatisfying( - node1, - value -> { - // values should rotate left (bubble up) - assertThat(value.oldest).isEqualTo(456); - assertThat(value.newest.getAsLong()).isEqualTo(789); - }) - .hasEntrySatisfying(node2, value -> assertThat(value.oldest).isEqualTo(789)) - .doesNotContainKey(node3); - assertThat(policy.isResponseRateInsufficient(node1, nextNanoTime)).isFalse(); - assertThat(policy.isResponseRateInsufficient(node2, nextNanoTime)).isFalse(); - assertThat(policy.isResponseRateInsufficient(node3, nextNanoTime)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/LoadBalancingPolicyTestBase.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/LoadBalancingPolicyTestBase.java deleted file mode 100644 index c9149efa69f..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/LoadBalancingPolicyTestBase.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing; - -import static org.mockito.Mockito.anyString; -import static org.mockito.Mockito.when; - -import ch.qos.logback.classic.Logger; -import ch.qos.logback.classic.spi.ILoggingEvent; -import ch.qos.logback.core.Appender; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.DefaultConsistencyLevelRegistry; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.DefaultNode; -import com.datastax.oss.driver.internal.core.metadata.MetadataManager; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import org.junit.After; -import org.junit.Before; -import org.junit.runner.RunWith; -import org.mockito.ArgumentCaptor; -import org.mockito.Captor; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; -import org.slf4j.LoggerFactory; - -@RunWith(MockitoJUnitRunner.class) -public abstract class LoadBalancingPolicyTestBase { - - @Mock protected DefaultNode node1; - @Mock protected DefaultNode node2; - @Mock protected DefaultNode node3; - @Mock protected DefaultNode node4; - @Mock protected DefaultNode node5; - @Mock protected InternalDriverContext context; - @Mock protected DriverConfig config; - @Mock protected DriverExecutionProfile defaultProfile; - @Mock protected LoadBalancingPolicy.DistanceReporter distanceReporter; - @Mock protected Appender appender; - @Mock protected MetadataManager metadataManager; - - @Captor protected ArgumentCaptor loggingEventCaptor; - - protected Logger logger; - - @Before - public void setup() { - when(context.getSessionName()).thenReturn("test"); - when(context.getConfig()).thenReturn(config); - when(config.getProfile(DriverExecutionProfile.DEFAULT_NAME)).thenReturn(defaultProfile); - - when(defaultProfile.getName()).thenReturn(DriverExecutionProfile.DEFAULT_NAME); - when(defaultProfile.isDefined(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)) - .thenReturn(true); - when(defaultProfile.getString(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)) - .thenReturn("dc1"); - when(defaultProfile.getBoolean(DefaultDriverOption.LOAD_BALANCING_POLICY_SLOW_AVOIDANCE, true)) - .thenReturn(true); - when(defaultProfile.getInt( - DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_MAX_NODES_PER_REMOTE_DC)) - .thenReturn(0); - when(defaultProfile.getBoolean( - DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_ALLOW_FOR_LOCAL_CONSISTENCY_LEVELS)) - .thenReturn(false); - when(defaultProfile.getString(DefaultDriverOption.REQUEST_CONSISTENCY)).thenReturn("ONE"); - - when(context.getMetadataManager()).thenReturn(metadataManager); - - logger = - (Logger) LoggerFactory.getLogger("com.datastax.oss.driver.internal.core.loadbalancing"); - logger.addAppender(appender); - - for (Node node : ImmutableList.of(node1, node2, node3, node4, node5)) { - when(node.getDatacenter()).thenReturn("dc1"); - } - - when(context.getLocalDatacenter(anyString())).thenReturn(null); - when(context.getConsistencyLevelRegistry()).thenReturn(new DefaultConsistencyLevelRegistry()); - } - - @After - public void teardown() { - logger.detachAppender(appender); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/DcAgnosticNodeSetTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/DcAgnosticNodeSetTest.java deleted file mode 100644 index 0730bcd346c..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/DcAgnosticNodeSetTest.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing.nodeset; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; - -import com.datastax.oss.driver.api.core.metadata.Node; -import org.junit.Test; - -public class DcAgnosticNodeSetTest { - - @Test - public void should_add_node() { - DcAgnosticNodeSet set = new DcAgnosticNodeSet(); - Node node = mock(Node.class); - assertThat(set.add(node)).isTrue(); - assertThat(set.add(node)).isFalse(); - } - - @Test - public void should_remove_node() { - DcAgnosticNodeSet set = new DcAgnosticNodeSet(); - Node node = mock(Node.class); - set.add(node); - assertThat(set.remove(node)).isTrue(); - assertThat(set.remove(node)).isFalse(); - } - - @Test - public void should_return_all_nodes() { - DcAgnosticNodeSet set = new DcAgnosticNodeSet(); - Node node1 = mock(Node.class); - set.add(node1); - Node node2 = mock(Node.class); - set.add(node2); - assertThat(set.dc(null)).contains(node1, node2); - assertThat(set.dc("irrelevant")).contains(node1, node2); - } - - @Test - public void should_return_empty_dcs() { - DcAgnosticNodeSet set = new DcAgnosticNodeSet(); - assertThat(set.dcs()).isEmpty(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/MultiDcNodeSetTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/MultiDcNodeSetTest.java deleted file mode 100644 index 21c58cbb829..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/MultiDcNodeSetTest.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing.nodeset; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.metadata.Node; -import org.junit.Test; - -public class MultiDcNodeSetTest { - - @Test - public void should_add_node() { - MultiDcNodeSet set = new MultiDcNodeSet(); - Node node1 = mockNode("dc1"); - assertThat(set.add(node1)).isTrue(); - assertThat(set.add(node1)).isFalse(); - Node node2 = mockNode("dc2"); - assertThat(set.add(node2)).isTrue(); - assertThat(set.add(node2)).isFalse(); - } - - @Test - public void should_remove_node() { - MultiDcNodeSet set = new MultiDcNodeSet(); - Node node1 = mockNode("dc1"); - set.add(node1); - assertThat(set.remove(node1)).isTrue(); - assertThat(set.remove(node1)).isFalse(); - Node node2 = mockNode("dc2"); - set.add(node2); - assertThat(set.remove(node2)).isTrue(); - assertThat(set.remove(node2)).isFalse(); - } - - @Test - public void should_return_all_nodes_in_dc() { - MultiDcNodeSet set = new MultiDcNodeSet(); - Node node1 = mockNode("dc1"); - set.add(node1); - Node node2 = mockNode("dc1"); - set.add(node2); - Node node3 = mockNode("dc2"); - set.add(node3); - assertThat(set.dc("dc1")).contains(node1, node2); - assertThat(set.dc("dc2")).contains(node3); - assertThat(set.dc("dc3")).isEmpty(); - assertThat(set.dc(null)).isEmpty(); - } - - @Test - public void should_return_all_dcs() { - MultiDcNodeSet set = new MultiDcNodeSet(); - Node node1 = mockNode("dc1"); - set.add(node1); - Node node2 = mockNode("dc2"); - set.add(node2); - assertThat(set.dcs()).contains("dc1", "dc2"); - } - - private Node mockNode(String dc) { - Node node = mock(Node.class); - when(node.getDatacenter()).thenReturn(dc); - return node; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/SingleDcNodeSetTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/SingleDcNodeSetTest.java deleted file mode 100644 index 063c13c9386..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/SingleDcNodeSetTest.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.loadbalancing.nodeset; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.metadata.Node; -import org.junit.Test; - -public class SingleDcNodeSetTest { - - @Test - public void should_add_node() { - SingleDcNodeSet set = new SingleDcNodeSet("dc1"); - Node node1 = mockNode("dc1"); - assertThat(set.add(node1)).isTrue(); - assertThat(set.add(node1)).isFalse(); - Node node2 = mockNode("dc2"); - assertThat(set.add(node2)).isFalse(); - } - - @Test - public void should_remove_node() { - SingleDcNodeSet set = new SingleDcNodeSet("dc1"); - Node node = mockNode("dc1"); - set.add(node); - assertThat(set.remove(node)).isTrue(); - assertThat(set.remove(node)).isFalse(); - } - - @Test - public void should_return_all_nodes_if_local_dc() { - SingleDcNodeSet set = new SingleDcNodeSet("dc1"); - Node node1 = mockNode("dc1"); - set.add(node1); - Node node2 = mockNode("dc1"); - set.add(node2); - Node node3 = mockNode("dc2"); - set.add(node3); - assertThat(set.dc("dc1")).contains(node1, node2); - assertThat(set.dc("dc2")).isEmpty(); - assertThat(set.dc(null)).isEmpty(); - } - - @Test - public void should_return_only_local_dc() { - SingleDcNodeSet set = new SingleDcNodeSet("dc1"); - assertThat(set.dcs()).contains("dc1"); - } - - private Node mockNode(String dc) { - Node node = mock(Node.class); - when(node.getDatacenter()).thenReturn(dc); - return node; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/AddNodeRefreshTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/AddNodeRefreshTest.java deleted file mode 100644 index 8d337bcc7e3..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/AddNodeRefreshTest.java +++ /dev/null @@ -1,147 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.uuid.Uuids; -import com.datastax.oss.driver.internal.core.channel.ChannelFactory; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metrics.MetricsFactory; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import java.net.InetSocketAddress; -import java.util.Collections; -import java.util.Map; -import java.util.UUID; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class AddNodeRefreshTest { - - @Mock private InternalDriverContext context; - @Mock protected MetricsFactory metricsFactory; - @Mock private ChannelFactory channelFactory; - - private DefaultNode node1; - - @Before - public void setup() { - when(context.getMetricsFactory()).thenReturn(metricsFactory); - when(context.getChannelFactory()).thenReturn(channelFactory); - node1 = TestNodeFactory.newNode(1, context); - } - - @Test - public void should_add_new_node() { - // Given - DefaultMetadata oldMetadata = - new DefaultMetadata( - ImmutableMap.of(node1.getHostId(), node1), Collections.emptyMap(), null, null); - UUID newHostId = Uuids.random(); - DefaultEndPoint newEndPoint = TestNodeFactory.newEndPoint(2); - UUID newSchemaVersion = Uuids.random(); - DefaultNodeInfo newNodeInfo = - DefaultNodeInfo.builder() - .withHostId(newHostId) - .withEndPoint(newEndPoint) - .withDatacenter("dc1") - .withRack("rack2") - .withSchemaVersion(newSchemaVersion) - .build(); - AddNodeRefresh refresh = new AddNodeRefresh(newNodeInfo); - - // When - MetadataRefresh.Result result = refresh.compute(oldMetadata, false, context); - - // Then - Map newNodes = result.newMetadata.getNodes(); - assertThat(newNodes).containsOnlyKeys(node1.getHostId(), newHostId); - Node node2 = newNodes.get(newHostId); - assertThat(node2.getEndPoint()).isEqualTo(newEndPoint); - assertThat(node2.getDatacenter()).isEqualTo("dc1"); - assertThat(node2.getRack()).isEqualTo("rack2"); - assertThat(node2.getHostId()).isEqualTo(newHostId); - assertThat(node2.getSchemaVersion()).isEqualTo(newSchemaVersion); - assertThat(result.events).containsExactly(NodeStateEvent.added((DefaultNode) node2)); - } - - @Test - public void should_not_add_existing_node_with_same_id_and_endpoint() { - // Given - DefaultMetadata oldMetadata = - new DefaultMetadata( - ImmutableMap.of(node1.getHostId(), node1), Collections.emptyMap(), null, null); - DefaultNodeInfo newNodeInfo = - DefaultNodeInfo.builder() - .withHostId(node1.getHostId()) - .withEndPoint(node1.getEndPoint()) - .withDatacenter("dc1") - .withRack("rack2") - .build(); - AddNodeRefresh refresh = new AddNodeRefresh(newNodeInfo); - - // When - MetadataRefresh.Result result = refresh.compute(oldMetadata, false, context); - - // Then - assertThat(result.newMetadata.getNodes()).containsOnlyKeys(node1.getHostId()); - // Info is not copied over: - assertThat(node1.getDatacenter()).isNull(); - assertThat(node1.getRack()).isNull(); - assertThat(result.events).isEmpty(); - } - - @Test - public void should_add_existing_node_with_same_id_but_different_endpoint() { - // Given - DefaultMetadata oldMetadata = - new DefaultMetadata( - ImmutableMap.of(node1.getHostId(), node1), Collections.emptyMap(), null, null); - DefaultEndPoint newEndPoint = TestNodeFactory.newEndPoint(2); - InetSocketAddress newBroadcastRpcAddress = newEndPoint.resolve(); - UUID newSchemaVersion = Uuids.random(); - DefaultNodeInfo newNodeInfo = - DefaultNodeInfo.builder() - .withHostId(node1.getHostId()) - .withEndPoint(newEndPoint) - .withDatacenter("dc1") - .withRack("rack2") - .withSchemaVersion(newSchemaVersion) - .withBroadcastRpcAddress(newBroadcastRpcAddress) - .build(); - AddNodeRefresh refresh = new AddNodeRefresh(newNodeInfo); - - // When - MetadataRefresh.Result result = refresh.compute(oldMetadata, false, context); - - // Then - Map newNodes = result.newMetadata.getNodes(); - assertThat(newNodes).hasSize(1).containsEntry(node1.getHostId(), node1); - assertThat(node1.getEndPoint()).isEqualTo(newEndPoint); - assertThat(node1.getDatacenter()).isEqualTo("dc1"); - assertThat(node1.getRack()).isEqualTo("rack2"); - assertThat(node1.getSchemaVersion()).isEqualTo(newSchemaVersion); - assertThat(result.events).containsExactly(TopologyEvent.suggestUp(newBroadcastRpcAddress)); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/DefaultEndPointTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/DefaultEndPointTest.java deleted file mode 100644 index 7da8fb39415..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/DefaultEndPointTest.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; - -import java.net.InetSocketAddress; -import org.junit.Test; - -public class DefaultEndPointTest { - - @Test - public void should_create_from_host_name() { - DefaultEndPoint endPoint = new DefaultEndPoint(new InetSocketAddress("localhost", 9042)); - assertThat(endPoint.asMetricPrefix()).isEqualTo("localhost:9042"); - } - - @Test - public void should_create_from_literal_ipv4_address() { - DefaultEndPoint endPoint = new DefaultEndPoint(new InetSocketAddress("127.0.0.1", 9042)); - assertThat(endPoint.asMetricPrefix()).isEqualTo("127_0_0_1:9042"); - } - - @Test - public void should_create_from_literal_ipv6_address() { - DefaultEndPoint endPoint = new DefaultEndPoint(new InetSocketAddress("::1", 9042)); - assertThat(endPoint.asMetricPrefix()).isEqualTo("0:0:0:0:0:0:0:1:9042"); - } - - @Test - public void should_create_from_unresolved_address() { - InetSocketAddress address = InetSocketAddress.createUnresolved("test.com", 9042); - DefaultEndPoint endPoint = new DefaultEndPoint(address); - assertThat(endPoint.asMetricPrefix()).isEqualTo("test_com:9042"); - assertThat(address.isUnresolved()).isTrue(); - } - - @Test - public void should_reject_null_address() { - assertThatThrownBy(() -> new DefaultEndPoint(null)) - .isInstanceOf(NullPointerException.class) - .hasMessage("address can't be null"); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/DefaultMetadataTokenMapTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/DefaultMetadataTokenMapTest.java deleted file mode 100644 index b463f9caa7b..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/DefaultMetadataTokenMapTest.java +++ /dev/null @@ -1,172 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; -import com.datastax.oss.driver.internal.core.channel.ChannelFactory; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.token.DefaultReplicationStrategyFactory; -import com.datastax.oss.driver.internal.core.metadata.token.Murmur3TokenFactory; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import java.util.Collections; -import java.util.Map; -import java.util.UUID; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class DefaultMetadataTokenMapTest { - - // Simulate the simplest setup possible for a functional token map. We're not testing the token - // map itself, only how the metadata interacts with it. - private static final String TOKEN1 = "-9000000000000000000"; - private static final String TOKEN2 = "9000000000000000000"; - private static final Node NODE1 = mockNode(TOKEN1); - private static final Node NODE2 = mockNode(TOKEN2); - private static final CqlIdentifier KEYSPACE_NAME = CqlIdentifier.fromInternal("ks"); - private static final KeyspaceMetadata KEYSPACE = - mockKeyspace( - KEYSPACE_NAME, - ImmutableMap.of( - "class", "org.apache.cassandra.locator.SimpleStrategy", "replication_factor", "1")); - - @Mock private InternalDriverContext context; - @Mock private ChannelFactory channelFactory; - - @Before - public void setup() { - when(context.getChannelFactory()).thenReturn(channelFactory); - DefaultReplicationStrategyFactory replicationStrategyFactory = - new DefaultReplicationStrategyFactory(context); - when(context.getReplicationStrategyFactory()).thenReturn(replicationStrategyFactory); - } - - @Test - public void should_not_build_token_map_when_initializing_with_contact_points() { - DefaultMetadata contactPointsMetadata = - new DefaultMetadata( - ImmutableMap.of(NODE1.getHostId(), NODE1), Collections.emptyMap(), null, null); - assertThat(contactPointsMetadata.getTokenMap()).isNotPresent(); - } - - @Test - public void should_build_minimal_token_map_on_first_refresh() { - DefaultMetadata contactPointsMetadata = - new DefaultMetadata( - ImmutableMap.of(NODE1.getHostId(), NODE1), Collections.emptyMap(), null, null); - DefaultMetadata firstRefreshMetadata = - contactPointsMetadata.withNodes( - ImmutableMap.of(NODE1.getHostId(), NODE1), - true, - true, - new Murmur3TokenFactory(), - context); - assertThat(firstRefreshMetadata.getTokenMap().get().getTokenRanges()).hasSize(1); - } - - @Test - public void should_not_build_token_map_when_disabled() { - DefaultMetadata contactPointsMetadata = - new DefaultMetadata( - ImmutableMap.of(NODE1.getHostId(), NODE1), Collections.emptyMap(), null, null); - DefaultMetadata firstRefreshMetadata = - contactPointsMetadata.withNodes( - ImmutableMap.of(NODE1.getHostId(), NODE1), - false, - true, - new Murmur3TokenFactory(), - context); - assertThat(firstRefreshMetadata.getTokenMap()).isNotPresent(); - } - - @Test - public void should_stay_empty_on_first_refresh_if_partitioner_missing() { - DefaultMetadata contactPointsMetadata = - new DefaultMetadata( - ImmutableMap.of(NODE1.getHostId(), NODE1), Collections.emptyMap(), null, null); - DefaultMetadata firstRefreshMetadata = - contactPointsMetadata.withNodes( - ImmutableMap.of(NODE1.getHostId(), NODE1), true, true, null, context); - assertThat(firstRefreshMetadata.getTokenMap()).isNotPresent(); - } - - @Test - public void should_update_minimal_token_map_if_new_node_and_still_no_schema() { - DefaultMetadata contactPointsMetadata = - new DefaultMetadata( - ImmutableMap.of(NODE1.getHostId(), NODE1), Collections.emptyMap(), null, null); - DefaultMetadata firstRefreshMetadata = - contactPointsMetadata.withNodes( - ImmutableMap.of(NODE1.getHostId(), NODE1), - true, - true, - new Murmur3TokenFactory(), - context); - DefaultMetadata secondRefreshMetadata = - firstRefreshMetadata.withNodes( - ImmutableMap.of(NODE1.getHostId(), NODE1, NODE2.getHostId(), NODE2), - true, - false, - null, - context); - assertThat(secondRefreshMetadata.getTokenMap().get().getTokenRanges()).hasSize(2); - } - - @Test - public void should_update_token_map_when_schema_changes() { - DefaultMetadata contactPointsMetadata = - new DefaultMetadata( - ImmutableMap.of(NODE1.getHostId(), NODE1), Collections.emptyMap(), null, null); - DefaultMetadata firstRefreshMetadata = - contactPointsMetadata.withNodes( - ImmutableMap.of(NODE1.getHostId(), NODE1), - true, - true, - new Murmur3TokenFactory(), - context); - DefaultMetadata schemaRefreshMetadata = - firstRefreshMetadata.withSchema(ImmutableMap.of(KEYSPACE_NAME, KEYSPACE), true, context); - assertThat(schemaRefreshMetadata.getTokenMap().get().getTokenRanges(KEYSPACE_NAME, NODE1)) - .isNotEmpty(); - } - - private static DefaultNode mockNode(String token) { - DefaultNode node = mock(DefaultNode.class); - when(node.getHostId()).thenReturn(UUID.randomUUID()); - when(node.getRawTokens()).thenReturn(ImmutableSet.of(token)); - return node; - } - - private static KeyspaceMetadata mockKeyspace( - CqlIdentifier name, Map replicationConfig) { - KeyspaceMetadata keyspace = mock(KeyspaceMetadata.class); - when(keyspace.getName()).thenReturn(name); - when(keyspace.getReplication()).thenReturn(replicationConfig); - return keyspace; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/DefaultNodeTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/DefaultNodeTest.java deleted file mode 100644 index 6a53fe3e433..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/DefaultNodeTest.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.internal.core.context.MockedDriverContextFactory; -import java.net.InetSocketAddress; -import java.util.UUID; -import org.junit.Test; - -public class DefaultNodeTest { - - private final String uuidStr = "1e4687e6-f94e-432e-a792-216f89ef265f"; - private final UUID hostId = UUID.fromString(uuidStr); - private final EndPoint endPoint = new DefaultEndPoint(new InetSocketAddress("localhost", 9042)); - - @Test - public void should_have_expected_string_representation() { - - DefaultNode node = new DefaultNode(endPoint, MockedDriverContextFactory.defaultDriverContext()); - node.hostId = hostId; - - String expected = - String.format( - "Node(endPoint=localhost/127.0.0.1:9042, hostId=1e4687e6-f94e-432e-a792-216f89ef265f, hashCode=%x)", - node.hashCode()); - assertThat(node.toString()).isEqualTo(expected); - } - - @Test - public void should_have_expected_string_representation_if_hostid_is_null() { - - DefaultNode node = new DefaultNode(endPoint, MockedDriverContextFactory.defaultDriverContext()); - node.hostId = null; - - String expected = - String.format( - "Node(endPoint=localhost/127.0.0.1:9042, hostId=null, hashCode=%x)", node.hashCode()); - assertThat(node.toString()).isEqualTo(expected); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/DefaultTopologyMonitorTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/DefaultTopologyMonitorTest.java deleted file mode 100644 index dd40f233518..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/DefaultTopologyMonitorTest.java +++ /dev/null @@ -1,805 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.assertj.core.api.Assertions.fail; -import static org.assertj.core.api.Assertions.filter; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.Mockito.atLeast; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import ch.qos.logback.classic.Level; -import ch.qos.logback.classic.Logger; -import ch.qos.logback.classic.spi.ILoggingEvent; -import ch.qos.logback.core.Appender; -import com.datastax.oss.driver.api.core.addresstranslation.AddressTranslator; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.ssl.SslEngineFactory; -import com.datastax.oss.driver.internal.core.addresstranslation.PassThroughAddressTranslator; -import com.datastax.oss.driver.internal.core.adminrequest.AdminResult; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.internal.core.adminrequest.UnexpectedResponseException; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.control.ControlConnection; -import com.datastax.oss.driver.internal.core.metrics.MetricsFactory; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import com.datastax.oss.driver.shaded.guava.common.collect.Iterators; -import com.datastax.oss.driver.shaded.guava.common.collect.Maps; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.response.Error; -import com.google.common.collect.Streams; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.net.UnknownHostException; -import java.time.Duration; -import java.util.ArrayDeque; -import java.util.Arrays; -import java.util.Collections; -import java.util.Iterator; -import java.util.Map; -import java.util.Optional; -import java.util.Queue; -import java.util.UUID; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.ArgumentCaptor; -import org.mockito.Captor; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; -import org.slf4j.LoggerFactory; - -@RunWith(DataProviderRunner.class) -public class DefaultTopologyMonitorTest { - - private static final InetSocketAddress ADDRESS2 = new InetSocketAddress("127.0.0.2", 9042); - - @Mock private InternalDriverContext context; - @Mock private DriverConfig config; - @Mock private DriverExecutionProfile defaultConfig; - @Mock private ControlConnection controlConnection; - @Mock private DriverChannel channel; - @Mock protected MetricsFactory metricsFactory; - - @Mock private Appender appender; - @Captor private ArgumentCaptor loggingEventCaptor; - - @Mock private SslEngineFactory sslEngineFactory; - - private DefaultNode node1; - private DefaultNode node2; - - private TestTopologyMonitor topologyMonitor; - - private Logger logger; - private Level initialLogLevel; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - when(context.getMetricsFactory()).thenReturn(metricsFactory); - - node1 = TestNodeFactory.newNode(1, context); - node2 = TestNodeFactory.newNode(2, context); - - when(defaultConfig.getDuration(DefaultDriverOption.CONTROL_CONNECTION_TIMEOUT)) - .thenReturn(Duration.ofSeconds(1)); - when(config.getDefaultProfile()).thenReturn(defaultConfig); - when(context.getConfig()).thenReturn(config); - - AddressTranslator addressTranslator = spy(new PassThroughAddressTranslator(context)); - when(context.getAddressTranslator()).thenReturn(addressTranslator); - - when(channel.getEndPoint()).thenReturn(node1.getEndPoint()); - when(controlConnection.channel()).thenReturn(channel); - when(context.getControlConnection()).thenReturn(controlConnection); - - topologyMonitor = new TestTopologyMonitor(context); - - logger = (Logger) LoggerFactory.getLogger(DefaultTopologyMonitor.class); - initialLogLevel = logger.getLevel(); - logger.setLevel(Level.INFO); - logger.addAppender(appender); - } - - @After - public void teardown() { - logger.detachAppender(appender); - logger.setLevel(initialLogLevel); - } - - @Test - public void should_initialize_control_connection() { - // When - topologyMonitor.init(); - - // Then - verify(controlConnection).init(true, false, true); - } - - @Test - public void should_not_refresh_control_node() { - // When - CompletionStage> futureInfo = topologyMonitor.refreshNode(node1); - - // Then - assertThatStage(futureInfo).isSuccess(maybeInfo -> assertThat(maybeInfo.isPresent()).isFalse()); - } - - @Test - public void should_refresh_node_from_peers_if_broadcast_address_is_present() { - // Given - node2.broadcastAddress = ADDRESS2; - topologyMonitor.isSchemaV2 = false; - topologyMonitor.stubQueries( - new StubbedQuery( - "SELECT * FROM system.peers WHERE peer = :address", - ImmutableMap.of("address", ADDRESS2.getAddress()), - mockResult(mockPeersRow(2, node2.getHostId())))); - - // When - CompletionStage> futureInfo = topologyMonitor.refreshNode(node2); - - // Then - assertThatStage(futureInfo) - .isSuccess( - maybeInfo -> { - assertThat(maybeInfo.isPresent()).isTrue(); - NodeInfo info = maybeInfo.get(); - assertThat(info.getDatacenter()).isEqualTo("dc2"); - }); - } - - @Test - public void should_refresh_node_from_peers_if_broadcast_address_is_present_v2() { - // Given - node2.broadcastAddress = ADDRESS2; - topologyMonitor.isSchemaV2 = true; - topologyMonitor.stubQueries( - new StubbedQuery( - "SELECT * FROM system.peers_v2 WHERE peer = :address and peer_port = :port", - ImmutableMap.of("address", ADDRESS2.getAddress(), "port", 9042), - mockResult(mockPeersV2Row(2, node2.getHostId())))); - - // When - CompletionStage> futureInfo = topologyMonitor.refreshNode(node2); - - // Then - assertThatStage(futureInfo) - .isSuccess( - maybeInfo -> { - assertThat(maybeInfo.isPresent()).isTrue(); - NodeInfo info = maybeInfo.get(); - assertThat(info.getDatacenter()).isEqualTo("dc2"); - assertThat(info.getBroadcastAddress().get().getPort()).isEqualTo(7002); - }); - } - - @Test - public void should_refresh_node_from_peers_if_broadcast_address_is_not_present() { - // Given - topologyMonitor.isSchemaV2 = false; - node2.broadcastAddress = null; - AdminRow peer3 = mockPeersRow(3, UUID.randomUUID()); - AdminRow peer2 = mockPeersRow(2, node2.getHostId()); - topologyMonitor.stubQueries( - new StubbedQuery("SELECT * FROM system.peers", mockResult(peer3, peer2))); - - // When - CompletionStage> futureInfo = topologyMonitor.refreshNode(node2); - - // Then - assertThatStage(futureInfo) - .isSuccess( - maybeInfo -> { - assertThat(maybeInfo.isPresent()).isTrue(); - NodeInfo info = maybeInfo.get(); - assertThat(info.getDatacenter()).isEqualTo("dc2"); - }); - // The rpc_address in each row should have been tried, only the last row should have been - // converted - verify(peer3).getUuid("host_id"); - verify(peer3, never()).getString(anyString()); - - verify(peer2, times(2)).getUuid("host_id"); - verify(peer2).getString("data_center"); - } - - @Test - public void should_refresh_node_from_peers_if_broadcast_address_is_not_present_V2() { - // Given - topologyMonitor.isSchemaV2 = true; - node2.broadcastAddress = null; - AdminRow peer3 = mockPeersV2Row(3, UUID.randomUUID()); - AdminRow peer2 = mockPeersV2Row(2, node2.getHostId()); - topologyMonitor.stubQueries( - new StubbedQuery("SELECT * FROM system.peers_v2", mockResult(peer3, peer2))); - - // When - CompletionStage> futureInfo = topologyMonitor.refreshNode(node2); - - // Then - assertThatStage(futureInfo) - .isSuccess( - maybeInfo -> { - assertThat(maybeInfo.isPresent()).isTrue(); - NodeInfo info = maybeInfo.get(); - assertThat(info.getDatacenter()).isEqualTo("dc2"); - }); - // The host_id in each row should have been tried, only the last row should have been - // converted - verify(peer3).getUuid("host_id"); - verify(peer3, never()).getString(anyString()); - - verify(peer2, times(2)).getUuid("host_id"); - verify(peer2).getString("data_center"); - } - - @Test - public void should_get_new_node_from_peers() { - // Given - AdminRow peer3 = mockPeersRow(4, UUID.randomUUID()); - AdminRow peer2 = mockPeersRow(3, node2.getHostId()); - AdminRow peer1 = mockPeersRow(2, node1.getHostId()); - topologyMonitor.isSchemaV2 = false; - topologyMonitor.stubQueries( - new StubbedQuery("SELECT * FROM system.peers", mockResult(peer3, peer2, peer1))); - - // When - CompletionStage> futureInfo = topologyMonitor.getNewNodeInfo(ADDRESS2); - - // Then - assertThatStage(futureInfo) - .isSuccess( - maybeInfo -> { - assertThat(maybeInfo.isPresent()).isTrue(); - NodeInfo info = maybeInfo.get(); - assertThat(info.getDatacenter()).isEqualTo("dc2"); - }); - // The rpc_address in each row should have been tried, only the last row should have been - // converted - verify(peer3).getInetAddress("rpc_address"); - verify(peer3, never()).getString(anyString()); - - verify(peer2).getInetAddress("rpc_address"); - verify(peer2, never()).getString(anyString()); - - verify(peer1).getInetAddress("rpc_address"); - verify(peer1).getString("data_center"); - } - - @Test - public void should_get_new_node_from_peers_v2() { - // Given - AdminRow peer3 = mockPeersV2Row(4, UUID.randomUUID()); - AdminRow peer2 = mockPeersV2Row(3, node2.getHostId()); - AdminRow peer1 = mockPeersV2Row(2, node1.getHostId()); - topologyMonitor.isSchemaV2 = true; - topologyMonitor.stubQueries( - new StubbedQuery("SELECT * FROM system.peers_v2", mockResult(peer3, peer2, peer1))); - - // When - CompletionStage> futureInfo = topologyMonitor.getNewNodeInfo(ADDRESS2); - - // Then - assertThatStage(futureInfo) - .isSuccess( - maybeInfo -> { - assertThat(maybeInfo.isPresent()).isTrue(); - NodeInfo info = maybeInfo.get(); - assertThat(info.getDatacenter()).isEqualTo("dc2"); - }); - // The natove in each row should have been tried, only the last row should have been - // converted - verify(peer3).getInetAddress("native_address"); - verify(peer3, never()).getString(anyString()); - - verify(peer2).getInetAddress("native_address"); - verify(peer2, never()).getString(anyString()); - - verify(peer1).getInetAddress("native_address"); - verify(peer1).getString("data_center"); - } - - @Test - public void should_refresh_node_list_from_local_and_peers() { - // Given - AdminRow local = mockLocalRow(1, node1.getHostId()); - AdminRow peer3 = mockPeersRow(3, UUID.randomUUID()); - AdminRow peer2 = mockPeersRow(2, node2.getHostId()); - topologyMonitor.stubQueries( - new StubbedQuery("SELECT * FROM system.local", mockResult(local)), - new StubbedQuery("SELECT * FROM system.peers_v2", Collections.emptyMap(), null, true), - new StubbedQuery("SELECT * FROM system.peers", mockResult(peer3, peer2))); - - // When - CompletionStage> futureInfos = topologyMonitor.refreshNodeList(); - - // Then - assertThatStage(futureInfos) - .isSuccess( - infos -> { - Iterator iterator = infos.iterator(); - NodeInfo info1 = iterator.next(); - assertThat(info1.getEndPoint()).isEqualTo(node1.getEndPoint()); - assertThat(info1.getDatacenter()).isEqualTo("dc1"); - NodeInfo info3 = iterator.next(); - assertThat(info3.getEndPoint().resolve()) - .isEqualTo(new InetSocketAddress("127.0.0.3", 9042)); - assertThat(info3.getDatacenter()).isEqualTo("dc3"); - NodeInfo info2 = iterator.next(); - assertThat(info2.getEndPoint()).isEqualTo(node2.getEndPoint()); - assertThat(info2.getDatacenter()).isEqualTo("dc2"); - }); - } - - @Test - @UseDataProvider("columnsToCheckV1") - public void should_skip_invalid_peers_row(String columnToCheck) { - // Given - topologyMonitor.isSchemaV2 = false; - node2.broadcastAddress = ADDRESS2; - AdminRow peer2 = mockPeersRow(2, node2.getHostId()); - when(peer2.isNull(columnToCheck)).thenReturn(true); - topologyMonitor.stubQueries( - new StubbedQuery( - "SELECT * FROM system.peers WHERE peer = :address", - ImmutableMap.of("address", ADDRESS2.getAddress()), - mockResult(peer2))); - - // When - CompletionStage> futureInfo = topologyMonitor.refreshNode(node2); - - // Then - assertThatStage(futureInfo).isSuccess(maybeInfo -> assertThat(maybeInfo).isEmpty()); - assertThat(node2.broadcastAddress).isNotNull().isEqualTo(ADDRESS2); - assertLog( - Level.WARN, - "[null] Found invalid row in system.peers for peer: /127.0.0.2. " - + "This is likely a gossip or snitch issue, this node will be ignored."); - } - - @Test - @UseDataProvider("columnsToCheckV2") - public void should_skip_invalid_peers_row_v2(String columnToCheck) { - // Given - topologyMonitor.isSchemaV2 = true; - node2.broadcastAddress = ADDRESS2; - AdminRow peer2 = mockPeersV2Row(2, node2.getHostId()); - when(peer2.isNull(columnToCheck)).thenReturn(true); - topologyMonitor.stubQueries( - new StubbedQuery( - "SELECT * FROM system.peers_v2 WHERE peer = :address and peer_port = :port", - ImmutableMap.of("address", ADDRESS2.getAddress(), "port", 9042), - mockResult(peer2))); - - // When - CompletionStage> futureInfo = topologyMonitor.refreshNode(node2); - - // Then - assertThatStage(futureInfo).isSuccess(maybeInfo -> assertThat(maybeInfo).isEmpty()); - assertThat(node2.broadcastAddress).isNotNull().isEqualTo(ADDRESS2); - assertLog( - Level.WARN, - "[null] Found invalid row in system.peers_v2 for peer: /127.0.0.2. " - + "This is likely a gossip or snitch issue, this node will be ignored."); - } - - @Test - public void should_stop_executing_queries_once_closed() { - // Given - topologyMonitor.close(); - - // When - CompletionStage> futureInfos = topologyMonitor.refreshNodeList(); - - // Then - assertThatStage(futureInfos) - .isFailed(error -> assertThat(error).isInstanceOf(IllegalStateException.class)); - } - - @Test - public void should_warn_when_control_host_found_in_system_peers() { - // Given - AdminRow local = mockLocalRow(1, node1.getHostId()); - AdminRow peer1 = mockPeersRow(1, node2.getHostId()); // invalid - AdminRow peer2 = mockPeersRow(2, node2.getHostId()); - AdminRow peer3 = mockPeersRow(3, UUID.randomUUID()); - topologyMonitor.stubQueries( - new StubbedQuery("SELECT * FROM system.local", mockResult(local)), - new StubbedQuery("SELECT * FROM system.peers_v2", Collections.emptyMap(), null, true), - new StubbedQuery("SELECT * FROM system.peers", mockResult(peer3, peer2, peer1))); - - // When - CompletionStage> futureInfos = topologyMonitor.refreshNodeList(); - - // Then - assertThatStage(futureInfos) - .isSuccess( - infos -> - assertThat(infos) - .hasSize(3) - .extractingResultOf("getEndPoint") - .containsOnlyOnce(node1.getEndPoint())); - assertLogContains( - Level.WARN, - "[null] Control node /127.0.0.1:9042 has an entry for itself in system.peers: " - + "this entry will be ignored. This is likely due to a misconfiguration; " - + "please verify your rpc_address configuration in cassandra.yaml on " - + "all nodes in your cluster."); - } - - @Test - public void should_warn_when_control_host_found_in_system_peers_v2() { - // Given - AdminRow local = mockLocalRow(1, node1.getHostId()); - AdminRow peer3 = mockPeersRow(3, UUID.randomUUID()); - AdminRow peer2 = mockPeersRow(2, node2.getHostId()); - AdminRow peer1 = mockPeersRow(1, node2.getHostId()); // invalid - topologyMonitor.stubQueries( - new StubbedQuery("SELECT * FROM system.local", mockResult(local)), - new StubbedQuery("SELECT * FROM system.peers_v2", mockResult(peer3, peer2, peer1))); - - // When - CompletionStage> futureInfos = topologyMonitor.refreshNodeList(); - - // Then - assertThatStage(futureInfos) - .isSuccess( - infos -> - assertThat(infos) - .hasSize(3) - .extractingResultOf("getEndPoint") - .containsOnlyOnce(node1.getEndPoint())); - assertLogContains( - Level.WARN, - "[null] Control node /127.0.0.1:9042 has an entry for itself in system.peers_v2: " - + "this entry will be ignored. This is likely due to a misconfiguration; " - + "please verify your rpc_address configuration in cassandra.yaml on " - + "all nodes in your cluster."); - } - - // Confirm the base case of extracting peer info from peers_v2, no SSL involved - @Test - public void should_get_peer_address_info_peers_v2() { - // Given - AdminRow local = mockLocalRow(1, node1.getHostId()); - AdminRow peer2 = mockPeersV2Row(3, node2.getHostId()); - AdminRow peer1 = mockPeersV2Row(2, node1.getHostId()); - topologyMonitor.isSchemaV2 = true; - topologyMonitor.stubQueries( - new StubbedQuery("SELECT * FROM system.local", mockResult(local)), - new StubbedQuery("SELECT * FROM system.peers_v2", mockResult(peer2, peer1))); - when(context.getSslEngineFactory()).thenReturn(Optional.empty()); - - // When - CompletionStage> futureInfos = topologyMonitor.refreshNodeList(); - - // Then - assertThatStage(futureInfos) - .isSuccess( - infos -> { - Iterator iterator = infos.iterator(); - // First NodeInfo is for local, skip past that - iterator.next(); - NodeInfo peer2nodeInfo = iterator.next(); - assertThat(peer2nodeInfo.getEndPoint().resolve()) - .isEqualTo(new InetSocketAddress("127.0.0.3", 9042)); - NodeInfo peer1nodeInfo = iterator.next(); - assertThat(peer1nodeInfo.getEndPoint().resolve()) - .isEqualTo(new InetSocketAddress("127.0.0.2", 9042)); - }); - } - - // Confirm the base case of extracting peer info from DSE peers table, no SSL involved - @Test - public void should_get_peer_address_info_peers_dse() { - // Given - AdminRow local = mockLocalRow(1, node1.getHostId()); - AdminRow peer2 = mockPeersRowDse(3, node2.getHostId()); - AdminRow peer1 = mockPeersRowDse(2, node1.getHostId()); - topologyMonitor.isSchemaV2 = true; - topologyMonitor.stubQueries( - new StubbedQuery("SELECT * FROM system.local", mockResult(local)), - new StubbedQuery("SELECT * FROM system.peers_v2", Maps.newHashMap(), null, true), - new StubbedQuery("SELECT * FROM system.peers", mockResult(peer2, peer1))); - when(context.getSslEngineFactory()).thenReturn(Optional.empty()); - - // When - CompletionStage> futureInfos = topologyMonitor.refreshNodeList(); - - // Then - assertThatStage(futureInfos) - .isSuccess( - infos -> { - Iterator iterator = infos.iterator(); - // First NodeInfo is for local, skip past that - iterator.next(); - NodeInfo peer2nodeInfo = iterator.next(); - assertThat(peer2nodeInfo.getEndPoint().resolve()) - .isEqualTo(new InetSocketAddress("127.0.0.3", 9042)); - NodeInfo peer1nodeInfo = iterator.next(); - assertThat(peer1nodeInfo.getEndPoint().resolve()) - .isEqualTo(new InetSocketAddress("127.0.0.2", 9042)); - }); - } - - // Confirm the base case of extracting peer info from DSE peers table, this time with SSL - @Test - public void should_get_peer_address_info_peers_dse_with_ssl() { - // Given - AdminRow local = mockLocalRow(1, node1.getHostId()); - AdminRow peer2 = mockPeersRowDseWithSsl(3, node2.getHostId()); - AdminRow peer1 = mockPeersRowDseWithSsl(2, node1.getHostId()); - topologyMonitor.isSchemaV2 = true; - topologyMonitor.stubQueries( - new StubbedQuery("SELECT * FROM system.local", mockResult(local)), - new StubbedQuery("SELECT * FROM system.peers_v2", Maps.newHashMap(), null, true), - new StubbedQuery("SELECT * FROM system.peers", mockResult(peer2, peer1))); - when(context.getSslEngineFactory()).thenReturn(Optional.of(sslEngineFactory)); - - // When - CompletionStage> futureInfos = topologyMonitor.refreshNodeList(); - - // Then - assertThatStage(futureInfos) - .isSuccess( - infos -> { - Iterator iterator = infos.iterator(); - // First NodeInfo is for local, skip past that - iterator.next(); - NodeInfo peer2nodeInfo = iterator.next(); - assertThat(peer2nodeInfo.getEndPoint().resolve()) - .isEqualTo(new InetSocketAddress("127.0.0.3", 9043)); - NodeInfo peer1nodeInfo = iterator.next(); - assertThat(peer1nodeInfo.getEndPoint().resolve()) - .isEqualTo(new InetSocketAddress("127.0.0.2", 9043)); - }); - } - - @DataProvider - public static Object[][] columnsToCheckV1() { - return new Object[][] {{"rpc_address"}, {"host_id"}, {"data_center"}, {"rack"}, {"tokens"}}; - } - - @DataProvider - public static Object[][] columnsToCheckV2() { - return new Object[][] { - {"native_address"}, {"native_port"}, {"host_id"}, {"data_center"}, {"rack"}, {"tokens"} - }; - } - - /** Mocks the query execution logic. */ - private static class TestTopologyMonitor extends DefaultTopologyMonitor { - - private final Queue queries = new ArrayDeque<>(); - - private TestTopologyMonitor(InternalDriverContext context) { - super(context); - port = 9042; - } - - private void stubQueries(StubbedQuery... queries) { - this.queries.addAll(Arrays.asList(queries)); - } - - @Override - protected CompletionStage query( - DriverChannel channel, String queryString, Map parameters) { - StubbedQuery nextQuery = queries.poll(); - assertThat(nextQuery).isNotNull(); - assertThat(nextQuery.queryString).isEqualTo(queryString); - assertThat(nextQuery.parameters).isEqualTo(parameters); - if (nextQuery.error) { - Message error = - new Error( - ProtocolConstants.ErrorCode.SERVER_ERROR, - "Unknown keyspace/cf pair (system.peers_v2)"); - return CompletableFutures.failedFuture(new UnexpectedResponseException(queryString, error)); - } - return CompletableFuture.completedFuture(nextQuery.result); - } - } - - private static class StubbedQuery { - private final String queryString; - private final Map parameters; - private final AdminResult result; - private final boolean error; - - private StubbedQuery( - String queryString, Map parameters, AdminResult result, boolean error) { - this.queryString = queryString; - this.parameters = parameters; - this.result = result; - this.error = error; - } - - private StubbedQuery(String queryString, Map parameters, AdminResult result) { - this(queryString, parameters, result, false); - } - - private StubbedQuery(String queryString, AdminResult result) { - this(queryString, Collections.emptyMap(), result); - } - } - - private AdminRow mockLocalRow(int i, UUID hostId) { - try { - AdminRow row = mock(AdminRow.class); - when(row.isNull("host_id")).thenReturn(hostId == null); - when(row.getUuid("host_id")).thenReturn(hostId); - when(row.getInetAddress("broadcast_address")) - .thenReturn(InetAddress.getByName("127.0.0." + i)); - when(row.isNull("data_center")).thenReturn(false); - when(row.getString("data_center")).thenReturn("dc" + i); - when(row.getInetAddress("listen_address")).thenReturn(InetAddress.getByName("127.0.0." + i)); - when(row.isNull("rack")).thenReturn(false); - when(row.getString("rack")).thenReturn("rack" + i); - when(row.getString("release_version")).thenReturn("release_version" + i); - - // The driver should not use this column for the local row, because it can contain the - // non-broadcast RPC address. Simulate the bug to ensure it's handled correctly. - when(row.isNull("rpc_address")).thenReturn(false); - when(row.getInetAddress("rpc_address")).thenReturn(InetAddress.getByName("0.0.0.0")); - - when(row.isNull("tokens")).thenReturn(false); - when(row.getSetOfString("tokens")).thenReturn(ImmutableSet.of("token" + i)); - when(row.contains("peer")).thenReturn(false); - return row; - } catch (UnknownHostException e) { - fail("unexpected", e); - return null; - } - } - - private AdminRow mockPeersRow(int i, UUID hostId) { - try { - AdminRow row = mock(AdminRow.class); - when(row.isNull("host_id")).thenReturn(hostId == null); - when(row.getUuid("host_id")).thenReturn(hostId); - when(row.getInetAddress("peer")).thenReturn(InetAddress.getByName("127.0.0." + i)); - when(row.isNull("data_center")).thenReturn(false); - when(row.getString("data_center")).thenReturn("dc" + i); - when(row.isNull("rack")).thenReturn(false); - when(row.getString("rack")).thenReturn("rack" + i); - when(row.getString("release_version")).thenReturn("release_version" + i); - when(row.isNull("rpc_address")).thenReturn(false); - when(row.getInetAddress("rpc_address")).thenReturn(InetAddress.getByName("127.0.0." + i)); - when(row.isNull("tokens")).thenReturn(false); - when(row.getSetOfString("tokens")).thenReturn(ImmutableSet.of("token" + i)); - when(row.contains("peer")).thenReturn(true); - - when(row.isNull("native_address")).thenReturn(true); - when(row.isNull("native_port")).thenReturn(true); - - return row; - } catch (UnknownHostException e) { - fail("unexpected", e); - return null; - } - } - - private AdminRow mockPeersV2Row(int i, UUID hostId) { - try { - AdminRow row = mock(AdminRow.class); - when(row.isNull("host_id")).thenReturn(hostId == null); - when(row.getUuid("host_id")).thenReturn(hostId); - when(row.getInetAddress("peer")).thenReturn(InetAddress.getByName("127.0.0." + i)); - when(row.getInteger("peer_port")).thenReturn(7000 + i); - when(row.isNull("data_center")).thenReturn(false); - when(row.getString("data_center")).thenReturn("dc" + i); - when(row.isNull("rack")).thenReturn(false); - when(row.getString("rack")).thenReturn("rack" + i); - when(row.getString("release_version")).thenReturn("release_version" + i); - when(row.isNull("native_address")).thenReturn(false); - when(row.getInetAddress("native_address")).thenReturn(InetAddress.getByName("127.0.0." + i)); - when(row.isNull("native_port")).thenReturn(false); - when(row.getInteger("native_port")).thenReturn(9042); - when(row.isNull("tokens")).thenReturn(false); - when(row.getSetOfString("tokens")).thenReturn(ImmutableSet.of("token" + i)); - when(row.contains("peer")).thenReturn(true); - when(row.contains("peer_port")).thenReturn(true); - when(row.contains("native_port")).thenReturn(true); - - when(row.isNull("rpc_address")).thenReturn(true); - return row; - } catch (UnknownHostException e) { - fail("unexpected", e); - return null; - } - } - - // Mock row for DSE ~6.8 - private AdminRow mockPeersRowDse(int i, UUID hostId) { - try { - AdminRow row = mock(AdminRow.class); - when(row.contains("peer")).thenReturn(true); - when(row.isNull("data_center")).thenReturn(false); - when(row.getString("data_center")).thenReturn("dc" + i); - when(row.getString("dse_version")).thenReturn("6.8.30"); - when(row.contains("graph")).thenReturn(true); - when(row.isNull("host_id")).thenReturn(hostId == null); - when(row.getUuid("host_id")).thenReturn(hostId); - when(row.getInetAddress("peer")).thenReturn(InetAddress.getByName("127.0.0." + i)); - when(row.isNull("rack")).thenReturn(false); - when(row.getString("rack")).thenReturn("rack" + i); - when(row.isNull("native_transport_address")).thenReturn(false); - when(row.getInetAddress("native_transport_address")) - .thenReturn(InetAddress.getByName("127.0.0." + i)); - when(row.isNull("native_transport_port")).thenReturn(false); - when(row.getInteger("native_transport_port")).thenReturn(9042); - when(row.isNull("tokens")).thenReturn(false); - when(row.getSetOfString("tokens")).thenReturn(ImmutableSet.of("token" + i)); - when(row.isNull("rpc_address")).thenReturn(false); - - return row; - } catch (UnknownHostException e) { - fail("unexpected", e); - return null; - } - } - - private AdminRow mockPeersRowDseWithSsl(int i, UUID hostId) { - AdminRow row = mockPeersRowDse(i, hostId); - when(row.isNull("native_transport_port_ssl")).thenReturn(false); - when(row.getInteger("native_transport_port_ssl")).thenReturn(9043); - return row; - } - - private AdminResult mockResult(AdminRow... rows) { - AdminResult result = mock(AdminResult.class); - when(result.iterator()).thenReturn(Iterators.forArray(rows)); - return result; - } - - private void assertLog(Level level, String message) { - verify(appender, atLeast(1)).doAppend(loggingEventCaptor.capture()); - Iterable logs = - filter(loggingEventCaptor.getAllValues()).with("level", level).get(); - assertThat(logs).hasSize(1); - assertThat(logs.iterator().next().getFormattedMessage()).contains(message); - } - - private void assertLogContains(Level level, String message) { - verify(appender, atLeast(1)).doAppend(loggingEventCaptor.capture()); - Iterable logs = - filter(loggingEventCaptor.getAllValues()).with("level", level).get(); - assertThat( - Streams.stream(logs).map(ILoggingEvent::getFormattedMessage).anyMatch(message::contains)); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/FullNodeListRefreshTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/FullNodeListRefreshTest.java deleted file mode 100644 index 679ec1be037..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/FullNodeListRefreshTest.java +++ /dev/null @@ -1,180 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.core.uuid.Uuids; -import com.datastax.oss.driver.internal.core.channel.ChannelFactory; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metrics.MetricsFactory; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import java.util.Collections; -import java.util.UUID; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class FullNodeListRefreshTest { - - @Mock private InternalDriverContext context; - @Mock protected MetricsFactory metricsFactory; - @Mock private ChannelFactory channelFactory; - - private DefaultNode node1; - private DefaultNode node2; - private EndPoint endPoint3; - private UUID hostId3; - - @Before - public void setup() { - when(context.getMetricsFactory()).thenReturn(metricsFactory); - when(context.getChannelFactory()).thenReturn(channelFactory); - - node1 = TestNodeFactory.newNode(1, context); - node2 = TestNodeFactory.newNode(2, context); - - endPoint3 = TestNodeFactory.newEndPoint(3); - hostId3 = UUID.randomUUID(); - } - - @Test - public void should_add_and_remove_nodes() { - // Given - DefaultMetadata oldMetadata = - new DefaultMetadata( - ImmutableMap.of(node1.getHostId(), node1, node2.getHostId(), node2), - Collections.emptyMap(), - null, - null); - Iterable newInfos = - ImmutableList.of( - DefaultNodeInfo.builder() - .withEndPoint(node2.getEndPoint()) - .withHostId(node2.getHostId()) - .build(), - DefaultNodeInfo.builder().withEndPoint(endPoint3).withHostId(hostId3).build()); - FullNodeListRefresh refresh = new FullNodeListRefresh(newInfos); - - // When - MetadataRefresh.Result result = refresh.compute(oldMetadata, false, context); - - // Then - assertThat(result.newMetadata.getNodes()).containsOnlyKeys(node2.getHostId(), hostId3); - DefaultNode node3 = (DefaultNode) result.newMetadata.getNodes().get(hostId3); - assertThat(result.events) - .containsOnly(NodeStateEvent.removed(node1), NodeStateEvent.added(node3)); - } - - @Test - public void should_update_existing_nodes() { - // Given - DefaultMetadata oldMetadata = - new DefaultMetadata( - ImmutableMap.of(node1.getHostId(), node1, node2.getHostId(), node2), - Collections.emptyMap(), - null, - null); - - UUID schemaVersion1 = Uuids.random(); - UUID schemaVersion2 = Uuids.random(); - Iterable newInfos = - ImmutableList.of( - DefaultNodeInfo.builder() - .withEndPoint(node1.getEndPoint()) - .withDatacenter("dc1") - .withRack("rack1") - .withHostId(node1.getHostId()) - .withSchemaVersion(schemaVersion1) - .build(), - DefaultNodeInfo.builder() - .withEndPoint(node2.getEndPoint()) - .withDatacenter("dc1") - .withRack("rack2") - .withHostId(node2.getHostId()) - .withSchemaVersion(schemaVersion2) - .build()); - FullNodeListRefresh refresh = new FullNodeListRefresh(newInfos); - - // When - MetadataRefresh.Result result = refresh.compute(oldMetadata, false, context); - - // Then - assertThat(result.newMetadata.getNodes()) - .containsOnlyKeys(node1.getHostId(), node2.getHostId()); - assertThat(node1.getDatacenter()).isEqualTo("dc1"); - assertThat(node1.getRack()).isEqualTo("rack1"); - assertThat(node1.getSchemaVersion()).isEqualTo(schemaVersion1); - assertThat(node2.getDatacenter()).isEqualTo("dc1"); - assertThat(node2.getRack()).isEqualTo("rack2"); - assertThat(node2.getSchemaVersion()).isEqualTo(schemaVersion2); - assertThat(result.events).isEmpty(); - } - - @Test - public void should_ignore_duplicate_host_ids() { - // Given - DefaultMetadata oldMetadata = - new DefaultMetadata( - ImmutableMap.of(node1.getHostId(), node1, node2.getHostId(), node2), - Collections.emptyMap(), - null, - null); - - Iterable newInfos = - ImmutableList.of( - DefaultNodeInfo.builder() - .withEndPoint(node1.getEndPoint()) - .withDatacenter("dc1") - .withRack("rack1") - .withHostId(node1.getHostId()) - .build(), - DefaultNodeInfo.builder() - .withEndPoint(node2.getEndPoint()) - .withDatacenter("dc1") - .withRack("rack2") - .withHostId(node2.getHostId()) - .build(), - // Duplicate host id for node 2, should be ignored: - DefaultNodeInfo.builder() - .withEndPoint(node2.getEndPoint()) - .withDatacenter("dc1") - .withRack("rack3") - .withHostId(node2.getHostId()) - .build()); - FullNodeListRefresh refresh = new FullNodeListRefresh(newInfos); - - // When - MetadataRefresh.Result result = refresh.compute(oldMetadata, false, context); - - // Then - assertThat(result.newMetadata.getNodes()) - .containsOnlyKeys(node1.getHostId(), node2.getHostId()); - assertThat(node1.getDatacenter()).isEqualTo("dc1"); - assertThat(node1.getRack()).isEqualTo("rack1"); - assertThat(node2.getDatacenter()).isEqualTo("dc1"); - assertThat(node2.getRack()).isEqualTo("rack2"); - assertThat(result.events).isEmpty(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/InitialNodeListRefreshTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/InitialNodeListRefreshTest.java deleted file mode 100644 index 3787bf8fe10..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/InitialNodeListRefreshTest.java +++ /dev/null @@ -1,186 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.channel.ChannelFactory; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metrics.MetricsFactory; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import java.util.Map; -import java.util.UUID; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class InitialNodeListRefreshTest { - - @Mock private InternalDriverContext context; - @Mock protected MetricsFactory metricsFactory; - @Mock private ChannelFactory channelFactory; - - private DefaultNode contactPoint1; - private DefaultNode contactPoint2; - private EndPoint endPoint3; - private UUID hostId1; - private UUID hostId2; - private UUID hostId3; - private UUID hostId4; - private UUID hostId5; - - @Before - public void setup() { - when(context.getMetricsFactory()).thenReturn(metricsFactory); - when(context.getChannelFactory()).thenReturn(channelFactory); - - contactPoint1 = TestNodeFactory.newContactPoint(1, context); - contactPoint2 = TestNodeFactory.newContactPoint(2, context); - - endPoint3 = TestNodeFactory.newEndPoint(3); - hostId1 = UUID.randomUUID(); - hostId2 = UUID.randomUUID(); - hostId3 = UUID.randomUUID(); - hostId4 = UUID.randomUUID(); - hostId5 = UUID.randomUUID(); - } - - @Test - public void should_copy_contact_points_on_first_endpoint_match_only() { - // Given - Iterable newInfos = - ImmutableList.of( - DefaultNodeInfo.builder() - .withEndPoint(contactPoint1.getEndPoint()) - // in practice there are more fields, but hostId is enough to validate the logic - .withHostId(hostId1) - .build(), - DefaultNodeInfo.builder() - .withEndPoint(contactPoint2.getEndPoint()) - .withHostId(hostId2) - .build(), - DefaultNodeInfo.builder().withEndPoint(endPoint3).withHostId(hostId3).build(), - DefaultNodeInfo.builder() - // address translator can translate node addresses to the same endpoints - .withEndPoint(contactPoint2.getEndPoint()) - .withHostId(hostId4) - .build(), - DefaultNodeInfo.builder() - // address translator can translate node addresses to the same endpoints - .withEndPoint(endPoint3) - .withHostId(hostId5) - .build()); - InitialNodeListRefresh refresh = - new InitialNodeListRefresh(newInfos, ImmutableSet.of(contactPoint1, contactPoint2)); - - // When - MetadataRefresh.Result result = refresh.compute(DefaultMetadata.EMPTY, false, context); - - // Then - // contact points have been copied to the metadata, and completed with missing information - Map newNodes = result.newMetadata.getNodes(); - assertThat(newNodes).containsOnlyKeys(hostId1, hostId2, hostId3, hostId4, hostId5); - assertThat(newNodes.get(hostId1)).isEqualTo(contactPoint1); - assertThat(contactPoint1.getHostId()).isEqualTo(hostId1); - assertThat(newNodes.get(hostId2)).isEqualTo(contactPoint2); - assertThat(contactPoint2.getHostId()).isEqualTo(hostId2); - // And - // node has been added for the new endpoint - assertThat(newNodes.get(hostId3).getEndPoint()).isEqualTo(endPoint3); - assertThat(newNodes.get(hostId3).getHostId()).isEqualTo(hostId3); - // And - // nodes have been added for duplicated endpoints - assertThat(newNodes.get(hostId4).getEndPoint()).isEqualTo(contactPoint2.getEndPoint()); - assertThat(newNodes.get(hostId4).getHostId()).isEqualTo(hostId4); - assertThat(newNodes.get(hostId5).getEndPoint()).isEqualTo(endPoint3); - assertThat(newNodes.get(hostId5).getHostId()).isEqualTo(hostId5); - assertThat(result.events) - .containsExactlyInAnyOrder( - NodeStateEvent.added((DefaultNode) newNodes.get(hostId3)), - NodeStateEvent.added((DefaultNode) newNodes.get(hostId4)), - NodeStateEvent.added((DefaultNode) newNodes.get(hostId5))); - } - - @Test - public void should_add_other_nodes() { - // Given - Iterable newInfos = - ImmutableList.of( - DefaultNodeInfo.builder() - .withEndPoint(contactPoint1.getEndPoint()) - // in practice there are more fields, but hostId is enough to validate the logic - .withHostId(hostId1) - .build(), - DefaultNodeInfo.builder() - .withEndPoint(contactPoint2.getEndPoint()) - .withHostId(hostId2) - .build(), - DefaultNodeInfo.builder().withEndPoint(endPoint3).withHostId(hostId3).build()); - InitialNodeListRefresh refresh = - new InitialNodeListRefresh(newInfos, ImmutableSet.of(contactPoint1, contactPoint2)); - - // When - MetadataRefresh.Result result = refresh.compute(DefaultMetadata.EMPTY, false, context); - - // Then - // new node created in addition to the contact points - Map newNodes = result.newMetadata.getNodes(); - assertThat(newNodes).containsOnlyKeys(hostId1, hostId2, hostId3); - Node node3 = newNodes.get(hostId3); - assertThat(node3.getEndPoint()).isEqualTo(endPoint3); - assertThat(node3.getHostId()).isEqualTo(hostId3); - } - - @Test - public void should_ignore_duplicate_host_ids() { - // Given - Iterable newInfos = - ImmutableList.of( - DefaultNodeInfo.builder() - .withEndPoint(contactPoint1.getEndPoint()) - // in practice there are more fields, but hostId is enough to validate the logic - .withHostId(hostId1) - .withDatacenter("dc1") - .build(), - DefaultNodeInfo.builder() - .withEndPoint(contactPoint1.getEndPoint()) - .withDatacenter("dc2") - .withHostId(hostId1) - .build()); - InitialNodeListRefresh refresh = - new InitialNodeListRefresh(newInfos, ImmutableSet.of(contactPoint1)); - - // When - MetadataRefresh.Result result = refresh.compute(DefaultMetadata.EMPTY, false, context); - - // Then - // only the first nodeInfo should have been copied - Map newNodes = result.newMetadata.getNodes(); - assertThat(newNodes).containsOnlyKeys(hostId1); - assertThat(newNodes.get(hostId1)).isEqualTo(contactPoint1); - assertThat(contactPoint1.getHostId()).isEqualTo(hostId1); - assertThat(contactPoint1.getDatacenter()).isEqualTo("dc1"); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/LoadBalancingPolicyWrapperTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/LoadBalancingPolicyWrapperTest.java deleted file mode 100644 index 1a0292e3947..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/LoadBalancingPolicyWrapperTest.java +++ /dev/null @@ -1,272 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyMap; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; -import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy.DistanceReporter; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.metadata.Metadata; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeState; -import com.datastax.oss.driver.internal.core.context.EventBus; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metrics.MetricsFactory; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import java.util.Map; -import java.util.Objects; -import java.util.Queue; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.ArgumentCaptor; -import org.mockito.Captor; -import org.mockito.InOrder; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class LoadBalancingPolicyWrapperTest { - - private DefaultNode node1; - private DefaultNode node2; - private DefaultNode node3; - - private Set contactPoints; - private Queue defaultPolicyQueryPlan; - - @Mock private InternalDriverContext context; - @Mock private LoadBalancingPolicy policy1; - @Mock private LoadBalancingPolicy policy2; - @Mock private LoadBalancingPolicy policy3; - private EventBus eventBus; - @Mock private MetadataManager metadataManager; - @Mock private Metadata metadata; - @Mock protected MetricsFactory metricsFactory; - @Captor private ArgumentCaptor> initNodesCaptor; - - private LoadBalancingPolicyWrapper wrapper; - - @Before - public void setup() { - when(context.getMetricsFactory()).thenReturn(metricsFactory); - - node1 = TestNodeFactory.newNode(1, context); - node2 = TestNodeFactory.newNode(2, context); - node3 = TestNodeFactory.newNode(3, context); - - contactPoints = ImmutableSet.of(node1, node2); - Map allNodes = - ImmutableMap.of( - Objects.requireNonNull(node1.getHostId()), node1, - Objects.requireNonNull(node2.getHostId()), node2, - Objects.requireNonNull(node3.getHostId()), node3); - when(metadataManager.getMetadata()).thenReturn(metadata); - when(metadata.getNodes()).thenReturn(allNodes); - when(metadataManager.getContactPoints()).thenReturn(contactPoints); - when(context.getMetadataManager()).thenReturn(metadataManager); - - defaultPolicyQueryPlan = Lists.newLinkedList(ImmutableList.of(node3, node2, node1)); - when(policy1.newQueryPlan(null, null)).thenReturn(defaultPolicyQueryPlan); - - eventBus = spy(new EventBus("test")); - when(context.getEventBus()).thenReturn(eventBus); - - wrapper = - new LoadBalancingPolicyWrapper( - context, - ImmutableMap.of( - DriverExecutionProfile.DEFAULT_NAME, - policy1, - "profile1", - policy1, - "profile2", - policy2, - "profile3", - policy3)); - } - - @Test - public void should_build_query_plan_from_contact_points_before_init() { - // When - Queue queryPlan = wrapper.newQueryPlan(); - - // Then - for (LoadBalancingPolicy policy : ImmutableList.of(policy1, policy2, policy3)) { - verify(policy, never()).newQueryPlan(null, null); - } - assertThat(queryPlan).hasSameElementsAs(contactPoints); - } - - @Test - public void should_fetch_query_plan_from_policy_after_init() { - // Given - wrapper.init(); - for (LoadBalancingPolicy policy : ImmutableList.of(policy1, policy2, policy3)) { - verify(policy).init(anyMap(), any(DistanceReporter.class)); - } - - // When - Queue queryPlan = wrapper.newQueryPlan(); - - // Then - // no-arg newQueryPlan() uses the default profile - verify(policy1).newQueryPlan(null, null); - assertThat(queryPlan).isEqualTo(defaultPolicyQueryPlan); - } - - @Test - public void should_init_policies_with_all_nodes() { - // Given - node1.state = NodeState.UP; - node2.state = NodeState.UNKNOWN; - node3.state = NodeState.DOWN; - - // When - wrapper.init(); - - // Then - for (LoadBalancingPolicy policy : ImmutableList.of(policy1, policy2, policy3)) { - verify(policy).init(initNodesCaptor.capture(), any(DistanceReporter.class)); - Map initNodes = initNodesCaptor.getValue(); - assertThat(initNodes.values()).containsOnly(node1, node2, node3); - } - } - - @Test - public void should_propagate_distances_from_policies() { - // Given - wrapper.init(); - ArgumentCaptor captor1 = ArgumentCaptor.forClass(DistanceReporter.class); - verify(policy1).init(anyMap(), captor1.capture()); - DistanceReporter distanceReporter1 = captor1.getValue(); - ArgumentCaptor captor2 = ArgumentCaptor.forClass(DistanceReporter.class); - verify(policy2).init(anyMap(), captor2.capture()); - DistanceReporter distanceReporter2 = captor1.getValue(); - ArgumentCaptor captor3 = ArgumentCaptor.forClass(DistanceReporter.class); - verify(policy3).init(anyMap(), captor3.capture()); - DistanceReporter distanceReporter3 = captor3.getValue(); - - InOrder inOrder = inOrder(eventBus); - - // When - distanceReporter1.setDistance(node1, NodeDistance.REMOTE); - - // Then - // first event defines the distance - inOrder.verify(eventBus).fire(new DistanceEvent(NodeDistance.REMOTE, node1)); - - // When - distanceReporter2.setDistance(node1, NodeDistance.REMOTE); - - // Then - // event is ignored if the node is already at this distance - inOrder.verify(eventBus, times(0)).fire(any(DistanceEvent.class)); - - // When - distanceReporter2.setDistance(node1, NodeDistance.LOCAL); - - // Then - // event is applied if it sets a smaller distance - inOrder.verify(eventBus).fire(new DistanceEvent(NodeDistance.LOCAL, node1)); - - // When - distanceReporter3.setDistance(node1, NodeDistance.IGNORED); - - // Then - // event is ignored if the node is already at a closer distance - inOrder.verify(eventBus, times(0)).fire(any(DistanceEvent.class)); - } - - @Test - public void should_not_propagate_node_states_to_policies_until_init() { - // When - eventBus.fire(NodeStateEvent.changed(NodeState.UNKNOWN, NodeState.UP, node1)); - - // Then - for (LoadBalancingPolicy policy : ImmutableList.of(policy1, policy2, policy3)) { - verify(policy, never()).onUp(node1); - } - } - - @Test - public void should_propagate_node_states_to_policies_after_init() { - // Given - wrapper.init(); - - // When - eventBus.fire(NodeStateEvent.changed(NodeState.UNKNOWN, NodeState.UP, node1)); - - // Then - for (LoadBalancingPolicy policy : ImmutableList.of(policy1, policy2, policy3)) { - verify(policy).onUp(node1); - } - } - - @Test - public void should_accumulate_events_during_init_and_replay() throws InterruptedException { - // Given - // Hack to obtain concurrency: the main thread releases another thread and blocks; then the - // other thread fires an event on the bus and unblocks the main thread. - CountDownLatch eventLatch = new CountDownLatch(1); - CountDownLatch initLatch = new CountDownLatch(1); - - // When - Runnable runnable = - () -> { - try { - eventLatch.await(); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - eventBus.fire(NodeStateEvent.changed(NodeState.UNKNOWN, NodeState.DOWN, node1)); - initLatch.countDown(); - }; - Thread thread = new Thread(runnable); - thread.start(); - wrapper.init(); - - // Then - // unblock the thread that will fire the event, and waits until it finishes - eventLatch.countDown(); - boolean ok = initLatch.await(500, TimeUnit.MILLISECONDS); - assertThat(ok).isTrue(); - for (LoadBalancingPolicy policy : ImmutableList.of(policy1, policy2, policy3)) { - verify(policy).onDown(node1); - } - thread.join(500); - assertThat(thread.isAlive()).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/MetadataManagerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/MetadataManagerTest.java deleted file mode 100644 index f9a909400f9..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/MetadataManagerTest.java +++ /dev/null @@ -1,356 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.awaitility.Awaitility.await; -import static org.mockito.ArgumentMatchers.anyBoolean; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.timeout; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.context.EventBus; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.context.NettyOptions; -import com.datastax.oss.driver.internal.core.control.ControlConnection; -import com.datastax.oss.driver.internal.core.metadata.schema.parsing.SchemaParserFactory; -import com.datastax.oss.driver.internal.core.metadata.schema.queries.SchemaQueriesFactory; -import com.datastax.oss.driver.internal.core.metrics.MetricsFactory; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import io.netty.channel.DefaultEventLoopGroup; -import java.net.InetSocketAddress; -import java.time.Duration; -import java.util.Collections; -import java.util.List; -import java.util.Optional; -import java.util.UUID; -import java.util.concurrent.Callable; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.TimeUnit; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -public class MetadataManagerTest { - - // Don't use 1 because that's the default when no contact points are provided - private static final EndPoint END_POINT2 = TestNodeFactory.newEndPoint(2); - private static final EndPoint END_POINT3 = TestNodeFactory.newEndPoint(3); - - @Mock private InternalDriverContext context; - @Mock private NettyOptions nettyOptions; - @Mock private ControlConnection controlConnection; - @Mock private TopologyMonitor topologyMonitor; - @Mock private DriverConfig config; - @Mock private DriverExecutionProfile defaultProfile; - @Mock private EventBus eventBus; - @Mock private SchemaQueriesFactory schemaQueriesFactory; - @Mock private SchemaParserFactory schemaParserFactory; - @Mock protected MetricsFactory metricsFactory; - - private DefaultEventLoopGroup adminEventLoopGroup; - - private TestMetadataManager metadataManager; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - - adminEventLoopGroup = new DefaultEventLoopGroup(1); - when(nettyOptions.adminEventExecutorGroup()).thenReturn(adminEventLoopGroup); - when(context.getNettyOptions()).thenReturn(nettyOptions); - - when(context.getTopologyMonitor()).thenReturn(topologyMonitor); - when(context.getControlConnection()).thenReturn(controlConnection); - - when(defaultProfile.getDuration(DefaultDriverOption.METADATA_SCHEMA_WINDOW)) - .thenReturn(Duration.ZERO); - when(defaultProfile.getInt(DefaultDriverOption.METADATA_SCHEMA_MAX_EVENTS)).thenReturn(1); - when(config.getDefaultProfile()).thenReturn(defaultProfile); - when(context.getConfig()).thenReturn(config); - - when(context.getEventBus()).thenReturn(eventBus); - when(context.getSchemaQueriesFactory()).thenReturn(schemaQueriesFactory); - when(context.getSchemaParserFactory()).thenReturn(schemaParserFactory); - - when(context.getMetricsFactory()).thenReturn(metricsFactory); - - metadataManager = new TestMetadataManager(context); - } - - @After - public void teardown() { - adminEventLoopGroup.shutdownGracefully(100, 200, TimeUnit.MILLISECONDS); - } - - @Test - public void should_add_contact_points() { - // When - metadataManager.addContactPoints(ImmutableSet.of(END_POINT2)); - - // Then - assertThat(metadataManager.getContactPoints()) - .extracting(Node::getEndPoint) - .containsOnly(END_POINT2); - assertThat(metadataManager.wasImplicitContactPoint()).isFalse(); - } - - @Test - public void should_use_default_if_no_contact_points_provided() { - // When - metadataManager.addContactPoints(Collections.emptySet()); - - // Then - assertThat(metadataManager.getContactPoints()) - .extracting(Node::getEndPoint) - .containsOnly(MetadataManager.DEFAULT_CONTACT_POINT); - assertThat(metadataManager.wasImplicitContactPoint()).isTrue(); - } - - @Test - public void should_copy_contact_points_on_refresh_of_all_nodes() { - // Given - // Run previous scenario to trigger the addition of the default contact point: - should_use_default_if_no_contact_points_provided(); - - NodeInfo info1 = mock(NodeInfo.class); - NodeInfo info2 = mock(NodeInfo.class); - List infos = ImmutableList.of(info1, info2); - when(topologyMonitor.refreshNodeList()).thenReturn(CompletableFuture.completedFuture(infos)); - - // When - CompletionStage refreshNodesFuture = metadataManager.refreshNodes(); - waitForPendingAdminTasks(() -> metadataManager.refreshes.size() == 1); - - // Then - assertThatStage(refreshNodesFuture).isSuccess(); - assertThat(metadataManager.refreshes).hasSize(1); - InitialNodeListRefresh refresh = (InitialNodeListRefresh) metadataManager.refreshes.get(0); - assertThat(refresh.contactPoints) - .extracting(Node::getEndPoint) - .containsOnly(MetadataManager.DEFAULT_CONTACT_POINT); - assertThat(refresh.nodeInfos).containsExactlyInAnyOrder(info1, info2); - } - - @Test - public void should_refresh_all_nodes() { - // Given - // Run previous scenario to trigger the addition of the default contact point and a first - // refresh: - should_copy_contact_points_on_refresh_of_all_nodes(); - // Discard that first refresh, we don't really care about it in the context of this test, only - // that the next one won't be the first - metadataManager.refreshes.clear(); - - NodeInfo info1 = mock(NodeInfo.class); - NodeInfo info2 = mock(NodeInfo.class); - List infos = ImmutableList.of(info1, info2); - when(topologyMonitor.refreshNodeList()).thenReturn(CompletableFuture.completedFuture(infos)); - - // When - CompletionStage refreshNodesFuture = metadataManager.refreshNodes(); - waitForPendingAdminTasks(() -> metadataManager.refreshes.size() == 1); - - // Then - assertThatStage(refreshNodesFuture).isSuccess(); - assertThat(metadataManager.refreshes).hasSize(1); - FullNodeListRefresh refresh = (FullNodeListRefresh) metadataManager.refreshes.get(0); - assertThat(refresh.nodeInfos).containsExactlyInAnyOrder(info1, info2); - } - - @Test - public void should_refresh_single_node() { - // Given - Node node = TestNodeFactory.newNode(2, context); - NodeInfo info = mock(NodeInfo.class); - when(info.getDatacenter()).thenReturn("dc1"); - when(info.getHostId()).thenReturn(UUID.randomUUID()); - when(info.getEndPoint()).thenReturn(node.getEndPoint()); - when(topologyMonitor.refreshNode(node)) - .thenReturn(CompletableFuture.completedFuture(Optional.of(info))); - - // When - CompletionStage refreshNodeFuture = metadataManager.refreshNode(node); - - // Then - // the info should have been copied to the node - assertThatStage(refreshNodeFuture).isSuccess(); - verify(info, timeout(500)).getDatacenter(); - assertThat(node.getDatacenter()).isEqualTo("dc1"); - } - - @Test - public void should_ignore_node_refresh_if_topology_monitor_does_not_have_info() { - // Given - Node node = mock(Node.class); - when(topologyMonitor.refreshNode(node)) - .thenReturn(CompletableFuture.completedFuture(Optional.empty())); - - // When - CompletionStage refreshNodeFuture = metadataManager.refreshNode(node); - - // Then - assertThatStage(refreshNodeFuture).isSuccess(); - } - - @Test - public void should_add_node() { - // Given - InetSocketAddress broadcastRpcAddress = ((InetSocketAddress) END_POINT2.resolve()); - NodeInfo info = mock(NodeInfo.class); - when(info.getBroadcastRpcAddress()).thenReturn(Optional.of(broadcastRpcAddress)); - when(topologyMonitor.getNewNodeInfo(broadcastRpcAddress)) - .thenReturn(CompletableFuture.completedFuture(Optional.of(info))); - - // When - metadataManager.addNode(broadcastRpcAddress); - waitForPendingAdminTasks(() -> metadataManager.addNodeCount == 1); - - // Then - assertThat(metadataManager.refreshes).hasSize(1); - AddNodeRefresh refresh = (AddNodeRefresh) metadataManager.refreshes.get(0); - assertThat(refresh.newNodeInfo).isEqualTo(info); - } - - @Test - public void should_not_add_node_if_broadcast_rpc_address_does_not_match() { - // Given - InetSocketAddress broadcastRpcAddress2 = ((InetSocketAddress) END_POINT2.resolve()); - InetSocketAddress broadcastRpcAddress3 = ((InetSocketAddress) END_POINT3.resolve()); - NodeInfo info = mock(NodeInfo.class); - when(topologyMonitor.getNewNodeInfo(broadcastRpcAddress2)) - .thenReturn(CompletableFuture.completedFuture(Optional.of(info))); - when(info.getBroadcastRpcAddress()) - .thenReturn( - Optional.of(broadcastRpcAddress3) // Does not match the address we got the info with - ); - - // When - metadataManager.addNode(broadcastRpcAddress2); - waitForPendingAdminTasks(() -> metadataManager.addNodeCount == 1); - - // Then - assertThat(metadataManager.refreshes).isEmpty(); - } - - @Test - public void should_not_add_node_if_topology_monitor_does_not_have_info() { - // Given - InetSocketAddress broadcastRpcAddress2 = ((InetSocketAddress) END_POINT2.resolve()); - when(topologyMonitor.getNewNodeInfo(broadcastRpcAddress2)) - .thenReturn(CompletableFuture.completedFuture(Optional.empty())); - - // When - metadataManager.addNode(broadcastRpcAddress2); - waitForPendingAdminTasks(() -> metadataManager.addNodeCount == 1); - - // Then - assertThat(metadataManager.refreshes).isEmpty(); - } - - @Test - public void should_remove_node() { - // Given - InetSocketAddress broadcastRpcAddress2 = ((InetSocketAddress) END_POINT2.resolve()); - - // When - metadataManager.removeNode(broadcastRpcAddress2); - waitForPendingAdminTasks(() -> metadataManager.removeNodeCount == 1); - - // Then - assertThat(metadataManager.refreshes).hasSize(1); - RemoveNodeRefresh refresh = (RemoveNodeRefresh) metadataManager.refreshes.get(0); - assertThat(refresh.broadcastRpcAddressToRemove).isEqualTo(broadcastRpcAddress2); - } - - @Test - public void refreshSchema_should_work() { - // Given - IllegalStateException expectedException = new IllegalStateException("Error we're testing"); - when(schemaQueriesFactory.newInstance()).thenThrow(expectedException); - when(topologyMonitor.refreshNodeList()) - .thenReturn(CompletableFuture.completedFuture(ImmutableList.of(mock(NodeInfo.class)))); - when(topologyMonitor.checkSchemaAgreement()) - .thenReturn(CompletableFuture.completedFuture(Boolean.TRUE)); - when(controlConnection.init(anyBoolean(), anyBoolean(), anyBoolean())) - .thenReturn(CompletableFuture.completedFuture(null)); - metadataManager.refreshNodes(); // required internal state setup for this - waitForPendingAdminTasks(() -> metadataManager.refreshes.size() == 1); // sanity check - - // When - CompletionStage result = - metadataManager.refreshSchema("foo", true, true); - - // Then - waitForPendingAdminTasks(() -> result.toCompletableFuture().isDone()); - assertThatStage(result).isFailed(t -> assertThat(t).isEqualTo(expectedException)); - } - - private static class TestMetadataManager extends MetadataManager { - - private List refreshes = new CopyOnWriteArrayList<>(); - private volatile int addNodeCount = 0; - private volatile int removeNodeCount = 0; - - public TestMetadataManager(InternalDriverContext context) { - super(context); - } - - @Override - Void apply(MetadataRefresh refresh) { - // Do not execute refreshes, just store them for inspection in the test - refreshes.add(refresh); - return null; - } - - @Override - public void addNode(InetSocketAddress broadcastRpcAddress) { - // Keep track of addNode calls for condition checking - synchronized (this) { - ++addNodeCount; - } - super.addNode(broadcastRpcAddress); - } - - @Override - public void removeNode(InetSocketAddress broadcastRpcAddress) { - // Keep track of removeNode calls for condition checking - synchronized (this) { - ++removeNodeCount; - } - super.removeNode(broadcastRpcAddress); - } - } - - // Wait for all the tasks on the pool's admin executor to complete. - private void waitForPendingAdminTasks(Callable condition) { - await().atMost(500, TimeUnit.MILLISECONDS).until(condition); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/MultiplexingNodeStateListenerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/MultiplexingNodeStateListenerTest.java deleted file mode 100644 index 8e9f591510a..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/MultiplexingNodeStateListenerTest.java +++ /dev/null @@ -1,196 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.BDDMockito.willThrow; -import static org.mockito.Mockito.verify; - -import ch.qos.logback.classic.Level; -import ch.qos.logback.classic.Logger; -import ch.qos.logback.classic.spi.ILoggingEvent; -import ch.qos.logback.core.Appender; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeStateListener; -import com.datastax.oss.driver.api.core.session.Session; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.ArgumentCaptor; -import org.mockito.Captor; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; -import org.slf4j.LoggerFactory; - -@RunWith(MockitoJUnitRunner.Strict.class) -public class MultiplexingNodeStateListenerTest { - - @Mock private NodeStateListener child1; - @Mock private NodeStateListener child2; - @Mock private Node node; - @Mock private Session session; - - @Mock private Appender appender; - @Captor private ArgumentCaptor loggingEventCaptor; - - private Logger logger; - private Level initialLogLevel; - - @Before - public void addAppenders() { - logger = (Logger) LoggerFactory.getLogger(MultiplexingNodeStateListener.class); - initialLogLevel = logger.getLevel(); - logger.setLevel(Level.WARN); - logger.addAppender(appender); - } - - @After - public void removeAppenders() { - logger.detachAppender(appender); - logger.setLevel(initialLogLevel); - } - - @Test - public void should_register() { - // given - MultiplexingNodeStateListener listener = new MultiplexingNodeStateListener(); - // when - listener.register(child1); - listener.register(child2); - // then - assertThat(listener).extracting("listeners").asList().hasSize(2).contains(child1, child2); - } - - @Test - public void should_flatten_child_multiplexing_listener_via_constructor() { - // given - MultiplexingNodeStateListener listener = - new MultiplexingNodeStateListener(new MultiplexingNodeStateListener(child1, child2)); - // when - // then - assertThat(listener).extracting("listeners").asList().hasSize(2).contains(child1, child2); - } - - @Test - public void should_flatten_child_multiplexing_listener_via_register() { - // given - MultiplexingNodeStateListener listener = new MultiplexingNodeStateListener(); - // when - listener.register(new MultiplexingNodeStateListener(child1, child2)); - // then - assertThat(listener).extracting("listeners").asList().hasSize(2).contains(child1, child2); - } - - @Test - public void should_notify_onUp() { - // given - MultiplexingNodeStateListener listener = new MultiplexingNodeStateListener(child1, child2); - willThrow(new NullPointerException()).given(child1).onUp(node); - // when - listener.onUp(node); - // then - verify(child1).onUp(node); - verify(child2).onUp(node); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while notifying node state listener child1 of an onUp event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onDown() { - // given - MultiplexingNodeStateListener listener = new MultiplexingNodeStateListener(child1, child2); - willThrow(new NullPointerException()).given(child1).onDown(node); - // when - listener.onDown(node); - // then - verify(child1).onDown(node); - verify(child2).onDown(node); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while notifying node state listener child1 of an onDown event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onAdd() { - // given - MultiplexingNodeStateListener listener = new MultiplexingNodeStateListener(child1, child2); - willThrow(new NullPointerException()).given(child1).onAdd(node); - // when - listener.onAdd(node); - // then - verify(child1).onAdd(node); - verify(child2).onAdd(node); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while notifying node state listener child1 of an onAdd event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onRemove() { - // given - MultiplexingNodeStateListener listener = new MultiplexingNodeStateListener(child1, child2); - willThrow(new NullPointerException()).given(child1).onRemove(node); - // when - listener.onRemove(node); - // then - verify(child1).onRemove(node); - verify(child2).onRemove(node); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while notifying node state listener child1 of an onRemove event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onSessionReady() { - // given - MultiplexingNodeStateListener listener = new MultiplexingNodeStateListener(child1, child2); - willThrow(new NullPointerException()).given(child1).onSessionReady(session); - // when - listener.onSessionReady(session); - // then - verify(child1).onSessionReady(session); - verify(child2).onSessionReady(session); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while notifying node state listener child1 of an onSessionReady event. (NullPointerException: null)"); - } - - @Test - public void should_notify_close() throws Exception { - // given - MultiplexingNodeStateListener listener = new MultiplexingNodeStateListener(child1, child2); - Exception child1Error = new NullPointerException(); - willThrow(child1Error).given(child1).close(); - // when - listener.close(); - // then - verify(child1).close(); - verify(child2).close(); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while closing node state listener child1. (NullPointerException: null)"); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/NodeStateManagerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/NodeStateManagerTest.java deleted file mode 100644 index d99b06a33ae..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/NodeStateManagerTest.java +++ /dev/null @@ -1,546 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Metadata; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeState; -import com.datastax.oss.driver.internal.core.channel.ChannelEvent; -import com.datastax.oss.driver.internal.core.context.EventBus; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.context.NettyOptions; -import com.datastax.oss.driver.internal.core.metrics.MetricsFactory; -import com.datastax.oss.driver.internal.core.util.concurrent.BlockingOperation; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.util.concurrent.Uninterruptibles; -import io.netty.channel.DefaultEventLoopGroup; -import io.netty.util.concurrent.Future; -import java.net.InetSocketAddress; -import java.time.Duration; -import java.util.Collections; -import java.util.UUID; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -public class NodeStateManagerTest { - private static final InetSocketAddress NEW_ADDRESS = new InetSocketAddress("127.0.0.3", 9042); - - @Mock private InternalDriverContext context; - @Mock private DriverConfig config; - @Mock private DriverExecutionProfile defaultProfile; - @Mock private NettyOptions nettyOptions; - @Mock private MetadataManager metadataManager; - @Mock protected MetricsFactory metricsFactory; - private DefaultNode node1, node2; - private EventBus eventBus; - private DefaultEventLoopGroup adminEventLoopGroup; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - - // Disable debouncing by default, tests that need it will override - when(defaultProfile.getDuration(DefaultDriverOption.METADATA_TOPOLOGY_WINDOW)) - .thenReturn(Duration.ofSeconds(0)); - when(defaultProfile.getInt(DefaultDriverOption.METADATA_TOPOLOGY_MAX_EVENTS)).thenReturn(1); - when(config.getDefaultProfile()).thenReturn(defaultProfile); - when(context.getConfig()).thenReturn(config); - - this.eventBus = spy(new EventBus("test")); - when(context.getEventBus()).thenReturn(eventBus); - - adminEventLoopGroup = new DefaultEventLoopGroup(1, new BlockingOperation.SafeThreadFactory()); - when(nettyOptions.adminEventExecutorGroup()).thenReturn(adminEventLoopGroup); - when(context.getNettyOptions()).thenReturn(nettyOptions); - - when(context.getMetricsFactory()).thenReturn(metricsFactory); - node1 = TestNodeFactory.newNode(1, context); - node2 = TestNodeFactory.newNode(2, context); - ImmutableMap nodes = - ImmutableMap.builder() - .put(node1.getHostId(), node1) - .put(node2.getHostId(), node2) - .build(); - Metadata metadata = new DefaultMetadata(nodes, Collections.emptyMap(), null, null); - when(metadataManager.getMetadata()).thenReturn(metadata); - when(metadataManager.refreshNode(any(Node.class))) - .thenReturn(CompletableFuture.completedFuture(null)); - when(context.getMetadataManager()).thenReturn(metadataManager); - } - - @After - public void teardown() { - adminEventLoopGroup.shutdownGracefully(100, 200, TimeUnit.MILLISECONDS); - } - - @Test - public void should_ignore_up_event_if_node_is_already_up_or_forced_down() { - new NodeStateManager(context); - - for (NodeState oldState : ImmutableList.of(NodeState.UP, NodeState.FORCED_DOWN)) { - // Given - node1.state = oldState; - - // When - eventBus.fire(TopologyEvent.suggestUp(node1.getBroadcastRpcAddress().get())); - waitForPendingAdminTasks(); - - // Then - assertThat(node1.state).isEqualTo(oldState); - } - verify(eventBus, never()).fire(any(NodeStateEvent.class)); - } - - @Test - public void should_apply_up_event_if_node_is_unknown_or_down() { - new NodeStateManager(context); - - int i = 0; - for (NodeState oldState : ImmutableList.of(NodeState.UNKNOWN, NodeState.DOWN)) { - // Given - node1.state = oldState; - - // When - eventBus.fire(TopologyEvent.suggestUp(node1.getBroadcastRpcAddress().get())); - waitForPendingAdminTasks(); - - // Then - assertThat(node1.state).isEqualTo(NodeState.UP); - if (oldState != NodeState.UNKNOWN) { - verify(metadataManager, times(++i)).refreshNode(node1); - } - verify(eventBus).fire(NodeStateEvent.changed(oldState, NodeState.UP, node1)); - } - } - - @Test - public void should_refresh_node_list_if_up_event_and_not_in_metadata() { - // Given - new NodeStateManager(context); - - // When - eventBus.fire(TopologyEvent.suggestUp(NEW_ADDRESS)); - waitForPendingAdminTasks(); - - // Then - verify(eventBus, never()).fire(any(NodeStateEvent.class)); - verify(metadataManager).refreshNodes(); - } - - @Test - public void should_ignore_down_event_if_node_is_down_or_forced_down() { - new NodeStateManager(context); - - for (NodeState oldState : ImmutableList.of(NodeState.DOWN, NodeState.FORCED_DOWN)) { - // Given - node1.state = oldState; - - // When - eventBus.fire(TopologyEvent.suggestDown(node1.getBroadcastRpcAddress().get())); - waitForPendingAdminTasks(); - - // Then - assertThat(node1.state).isEqualTo(oldState); - } - verify(eventBus, never()).fire(any(NodeStateEvent.class)); - } - - @Test - public void should_ignore_down_event_if_node_has_active_connections() { - new NodeStateManager(context); - node1.state = NodeState.UP; - eventBus.fire(ChannelEvent.channelOpened(node1)); - waitForPendingAdminTasks(); - assertThat(node1.openConnections).isEqualTo(1); - - // When - eventBus.fire(TopologyEvent.suggestDown(node1.getBroadcastRpcAddress().get())); - waitForPendingAdminTasks(); - - // Then - assertThat(node1.state).isEqualTo(NodeState.UP); - verify(eventBus, never()).fire(any(NodeStateEvent.class)); - } - - @Test - public void should_apply_down_event_if_node_has_no_active_connections() { - new NodeStateManager(context); - - for (NodeState oldState : ImmutableList.of(NodeState.UP, NodeState.UNKNOWN)) { - // Given - node1.state = oldState; - assertThat(node1.openConnections).isEqualTo(0); - - // When - eventBus.fire(TopologyEvent.suggestDown(node1.getBroadcastRpcAddress().get())); - waitForPendingAdminTasks(); - - // Then - assertThat(node1.state).isEqualTo(NodeState.DOWN); - verify(eventBus).fire(NodeStateEvent.changed(oldState, NodeState.DOWN, node1)); - } - } - - @Test - public void should_ignore_down_event_if_not_in_metadata() { - // Given - new NodeStateManager(context); - - // When - eventBus.fire(TopologyEvent.suggestDown(NEW_ADDRESS)); - waitForPendingAdminTasks(); - - // Then - verify(eventBus, never()).fire(any(NodeStateEvent.class)); - verify(metadataManager, never()).addNode(NEW_ADDRESS); - } - - @Test - public void should_ignore_force_down_event_if_already_forced_down() { - // Given - new NodeStateManager(context); - node1.state = NodeState.FORCED_DOWN; - - // When - eventBus.fire(TopologyEvent.forceDown(node1.getBroadcastRpcAddress().get())); - waitForPendingAdminTasks(); - - // Then - assertThat(node1.state).isEqualTo(NodeState.FORCED_DOWN); - verify(eventBus, never()).fire(any(NodeStateEvent.class)); - } - - @Test - public void should_apply_force_down_event_over_any_other_state() { - new NodeStateManager(context); - - for (NodeState oldState : ImmutableList.of(NodeState.UNKNOWN, NodeState.DOWN, NodeState.UP)) { - // Given - node1.state = oldState; - - // When - eventBus.fire(TopologyEvent.forceDown(node1.getBroadcastRpcAddress().get())); - waitForPendingAdminTasks(); - - // Then - assertThat(node1.state).isEqualTo(NodeState.FORCED_DOWN); - verify(eventBus).fire(NodeStateEvent.changed(oldState, NodeState.FORCED_DOWN, node1)); - } - } - - @Test - public void should_ignore_force_down_event_if_not_in_metadata() { - // Given - new NodeStateManager(context); - - // When - eventBus.fire(TopologyEvent.forceDown(NEW_ADDRESS)); - waitForPendingAdminTasks(); - - // Then - verify(eventBus, never()).fire(any(NodeStateEvent.class)); - verify(metadataManager, never()).addNode(NEW_ADDRESS); - } - - @Test - public void should_ignore_force_up_event_if_node_is_already_up() { - // Given - new NodeStateManager(context); - node1.state = NodeState.UP; - - // When - eventBus.fire(TopologyEvent.forceUp(node1.getBroadcastRpcAddress().get())); - waitForPendingAdminTasks(); - - // Then - assertThat(node1.state).isEqualTo(NodeState.UP); - verify(eventBus, never()).fire(any(NodeStateEvent.class)); - } - - @Test - public void should_apply_force_up_event_if_node_is_not_up() { - new NodeStateManager(context); - - int i = 0; - for (NodeState oldState : - ImmutableList.of(NodeState.UNKNOWN, NodeState.DOWN, NodeState.FORCED_DOWN)) { - // Given - node1.state = oldState; - - // When - eventBus.fire(TopologyEvent.forceUp(node1.getBroadcastRpcAddress().get())); - waitForPendingAdminTasks(); - - // Then - assertThat(node1.state).isEqualTo(NodeState.UP); - verify(eventBus).fire(NodeStateEvent.changed(oldState, NodeState.UP, node1)); - if (oldState != NodeState.UNKNOWN) { - verify(metadataManager, times(++i)).refreshNode(node1); - } - } - } - - @Test - public void should_add_node_if_force_up_and_not_in_metadata() { - // Given - new NodeStateManager(context); - - // When - eventBus.fire(TopologyEvent.forceUp(NEW_ADDRESS)); - waitForPendingAdminTasks(); - - // Then - verify(eventBus, never()).fire(any(NodeStateEvent.class)); - verify(metadataManager).addNode(NEW_ADDRESS); - } - - @Test - public void should_notify_metadata_of_node_addition() { - // Given - new NodeStateManager(context); - InetSocketAddress newAddress = NEW_ADDRESS; - - // When - eventBus.fire(TopologyEvent.suggestAdded(newAddress)); - waitForPendingAdminTasks(); - - // Then - verify(metadataManager).addNode(newAddress); - } - - @Test - public void should_ignore_addition_of_existing_node() { - // Given - new NodeStateManager(context); - - // When - eventBus.fire(TopologyEvent.suggestAdded(node1.getBroadcastRpcAddress().get())); - waitForPendingAdminTasks(); - - // Then - verify(metadataManager, never()).addNode(any(InetSocketAddress.class)); - } - - @Test - public void should_notify_metadata_of_node_removal() { - // Given - new NodeStateManager(context); - - // When - eventBus.fire(TopologyEvent.suggestRemoved(node1.getBroadcastRpcAddress().get())); - waitForPendingAdminTasks(); - - // Then - verify(metadataManager).removeNode(node1.getBroadcastRpcAddress().get()); - } - - @Test - public void should_ignore_removal_of_nonexistent_node() { - // Given - new NodeStateManager(context); - InetSocketAddress newAddress = NEW_ADDRESS; - - // When - eventBus.fire(TopologyEvent.suggestRemoved(newAddress)); - waitForPendingAdminTasks(); - - // Then - verify(metadataManager, never()).removeNode(any(InetSocketAddress.class)); - } - - @Test - public void should_coalesce_topology_events() { - // Given - when(defaultProfile.getDuration(DefaultDriverOption.METADATA_TOPOLOGY_WINDOW)) - .thenReturn(Duration.ofDays(1)); - when(defaultProfile.getInt(DefaultDriverOption.METADATA_TOPOLOGY_MAX_EVENTS)).thenReturn(5); - new NodeStateManager(context); - node1.state = NodeState.FORCED_DOWN; - node2.state = NodeState.DOWN; - - // When - eventBus.fire(TopologyEvent.suggestDown(node1.getBroadcastRpcAddress().get())); - eventBus.fire(TopologyEvent.forceUp(node1.getBroadcastRpcAddress().get())); - eventBus.fire(TopologyEvent.suggestDown(node2.getBroadcastRpcAddress().get())); - eventBus.fire(TopologyEvent.suggestDown(node1.getBroadcastRpcAddress().get())); - eventBus.fire(TopologyEvent.suggestUp(node2.getBroadcastRpcAddress().get())); - waitForPendingAdminTasks(); - - // Then - // down / forceUp / down => keep the last forced event => forceUp - assertThat(node1.state).isEqualTo(NodeState.UP); - // down / up => keep the last => up - assertThat(node2.state).isEqualTo(NodeState.UP); - } - - @Test - public void should_track_open_connections() { - new NodeStateManager(context); - - assertThat(node1.openConnections).isEqualTo(0); - - eventBus.fire(ChannelEvent.channelOpened(node1)); - eventBus.fire(ChannelEvent.channelOpened(node1)); - waitForPendingAdminTasks(); - assertThat(node1.openConnections).isEqualTo(2); - - eventBus.fire(ChannelEvent.channelClosed(node1)); - waitForPendingAdminTasks(); - assertThat(node1.openConnections).isEqualTo(1); - } - - @Test - public void should_mark_node_up_if_down_or_unknown_and_connection_opened() { - new NodeStateManager(context); - - for (NodeState oldState : ImmutableList.of(NodeState.DOWN, NodeState.UNKNOWN)) { - // Given - node1.state = oldState; - - // When - eventBus.fire(ChannelEvent.channelOpened(node1)); - waitForPendingAdminTasks(); - - // Then - assertThat(node1.state).isEqualTo(NodeState.UP); - verify(eventBus).fire(NodeStateEvent.changed(oldState, NodeState.UP, node1)); - } - } - - @Test - public void should_not_mark_node_up_if_forced_down_and_connection_opened() { - // Given - new NodeStateManager(context); - node1.state = NodeState.FORCED_DOWN; - - // When - eventBus.fire(ChannelEvent.channelOpened(node1)); - waitForPendingAdminTasks(); - - // Then - assertThat(node1.state).isEqualTo(NodeState.FORCED_DOWN); - verify(eventBus, never()).fire(any(NodeStateEvent.class)); - } - - @Test - public void should_track_reconnections() { - new NodeStateManager(context); - - assertThat(node1.reconnections).isEqualTo(0); - - eventBus.fire(ChannelEvent.reconnectionStarted(node1)); - eventBus.fire(ChannelEvent.reconnectionStarted(node1)); - waitForPendingAdminTasks(); - assertThat(node1.reconnections).isEqualTo(2); - - eventBus.fire(ChannelEvent.reconnectionStopped(node1)); - waitForPendingAdminTasks(); - assertThat(node1.reconnections).isEqualTo(1); - } - - @Test - public void should_mark_node_down_if_reconnection_starts_with_no_connections() { - new NodeStateManager(context); - - node1.state = NodeState.UP; - node1.openConnections = 1; - - eventBus.fire(ChannelEvent.channelClosed(node1)); - eventBus.fire(ChannelEvent.reconnectionStarted(node1)); - waitForPendingAdminTasks(); - - assertThat(node1.state).isEqualTo(NodeState.DOWN); - verify(eventBus).fire(NodeStateEvent.changed(NodeState.UP, NodeState.DOWN, node1)); - } - - @Test - public void should_mark_node_down_if_no_connections_and_reconnection_already_started() { - new NodeStateManager(context); - - node1.state = NodeState.UP; - node1.openConnections = 1; - - eventBus.fire(ChannelEvent.reconnectionStarted(node1)); - eventBus.fire(ChannelEvent.channelClosed(node1)); - waitForPendingAdminTasks(); - - assertThat(node1.state).isEqualTo(NodeState.DOWN); - verify(eventBus).fire(NodeStateEvent.changed(NodeState.UP, NodeState.DOWN, node1)); - } - - @Test - public void should_keep_node_up_if_reconnection_starts_with_some_connections() { - new NodeStateManager(context); - - node1.state = NodeState.UP; - node1.openConnections = 2; - - eventBus.fire(ChannelEvent.channelClosed(node1)); - eventBus.fire(ChannelEvent.reconnectionStarted(node1)); - waitForPendingAdminTasks(); - - assertThat(node1.state).isEqualTo(NodeState.UP); - verify(eventBus, never()).fire(any(NodeStateEvent.class)); - } - - @Test - public void should_ignore_events_when_closed() throws Exception { - NodeStateManager manager = new NodeStateManager(context); - assertThat(node1.reconnections).isEqualTo(0); - - manager.close(); - - eventBus.fire(ChannelEvent.reconnectionStarted(node1)); - waitForPendingAdminTasks(); - - assertThat(node1.reconnections).isEqualTo(0); - } - - // Wait for all the tasks on the pool's admin executor to complete. - private void waitForPendingAdminTasks() { - // This works because the event loop group is single-threaded - Future f = adminEventLoopGroup.schedule(() -> null, 5, TimeUnit.NANOSECONDS); - try { - Uninterruptibles.getUninterruptibly(f, 100, TimeUnit.MILLISECONDS); - } catch (ExecutionException e) { - fail("unexpected error", e.getCause()); - } catch (TimeoutException e) { - fail("timed out while waiting for admin tasks to complete", e); - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/PeerRowValidatorTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/PeerRowValidatorTest.java deleted file mode 100644 index c1a189259d7..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/PeerRowValidatorTest.java +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.datastax.oss.driver.internal.core.metadata; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class PeerRowValidatorTest { - - @DataProvider - public static Object[][] nullColumnsV1() { - return new Object[][] { - {"rpc_address"}, {"host_id"}, {"data_center"}, {"rack"}, {"tokens"}, {"schema_version"} - }; - } - - @DataProvider - public static Object[][] nullColumnsV2() { - return new Object[][] { - {"native_address"}, - {"native_port"}, - {"host_id"}, - {"data_center"}, - {"rack"}, - {"tokens"}, - {"schema_version"} - }; - } - - @Test - @UseDataProvider("nullColumnsV1") - public void should_fail_for_invalid_peer_v1(String nullColumn) { - assertThat(PeerRowValidator.isValid(mockRowV1(nullColumn))).isFalse(); - } - - @Test - @UseDataProvider("nullColumnsV2") - public void should_fail_for_invalid_peer_v2(String nullColumn) { - assertThat(PeerRowValidator.isValid(mockRowV2(nullColumn))).isFalse(); - } - - @Test - public void should_succeed_for_valid_peer_v1() { - AdminRow peerRow = mock(AdminRow.class); - when(peerRow.isNull("host_id")).thenReturn(false); - when(peerRow.isNull("rpc_address")).thenReturn(false); - when(peerRow.isNull("native_address")).thenReturn(true); - when(peerRow.isNull("native_port")).thenReturn(true); - when(peerRow.isNull("data_center")).thenReturn(false); - when(peerRow.isNull("rack")).thenReturn(false); - when(peerRow.isNull("tokens")).thenReturn(false); - when(peerRow.isNull("schema_version")).thenReturn(false); - - assertThat(PeerRowValidator.isValid(peerRow)).isTrue(); - } - - @Test - public void should_succeed_for_valid_peer_v2() { - AdminRow peerRow = mock(AdminRow.class); - when(peerRow.isNull("host_id")).thenReturn(false); - when(peerRow.isNull("rpc_address")).thenReturn(true); - when(peerRow.isNull("native_address")).thenReturn(false); - when(peerRow.isNull("native_port")).thenReturn(false); - when(peerRow.isNull("data_center")).thenReturn(false); - when(peerRow.isNull("rack")).thenReturn(false); - when(peerRow.isNull("tokens")).thenReturn(false); - when(peerRow.isNull("schema_version")).thenReturn(false); - - assertThat(PeerRowValidator.isValid(peerRow)).isTrue(); - } - - private AdminRow mockRowV1(String nullColumn) { - AdminRow peerRow = mock(AdminRow.class); - when(peerRow.isNull("host_id")).thenReturn(nullColumn.equals("host_id")); - when(peerRow.isNull("rpc_address")).thenReturn(nullColumn.equals("rpc_address")); - when(peerRow.isNull("native_address")).thenReturn(true); - when(peerRow.isNull("native_port")).thenReturn(true); - when(peerRow.isNull("data_center")).thenReturn(nullColumn.equals("data_center")); - when(peerRow.isNull("rack")).thenReturn(nullColumn.equals("rack")); - when(peerRow.isNull("tokens")).thenReturn(nullColumn.equals("tokens")); - when(peerRow.isNull("schema_version")).thenReturn(nullColumn.equals("schema_version")); - - return peerRow; - } - - private AdminRow mockRowV2(String nullColumn) { - AdminRow peerRow = mock(AdminRow.class); - when(peerRow.isNull("host_id")).thenReturn(nullColumn.equals("host_id")); - when(peerRow.isNull("native_address")).thenReturn(nullColumn.equals("native_address")); - when(peerRow.isNull("native_port")).thenReturn(nullColumn.equals("native_port")); - when(peerRow.isNull("rpc_address")).thenReturn(true); - when(peerRow.isNull("data_center")).thenReturn(nullColumn.equals("data_center")); - when(peerRow.isNull("rack")).thenReturn(nullColumn.equals("rack")); - when(peerRow.isNull("tokens")).thenReturn(nullColumn.equals("tokens")); - when(peerRow.isNull("schema_version")).thenReturn(nullColumn.equals("schema_version")); - - return peerRow; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/RemoveNodeRefreshTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/RemoveNodeRefreshTest.java deleted file mode 100644 index f2a4b36a3c3..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/RemoveNodeRefreshTest.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.internal.core.channel.ChannelFactory; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metrics.MetricsFactory; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import java.util.Collections; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class RemoveNodeRefreshTest { - - @Mock private InternalDriverContext context; - @Mock protected MetricsFactory metricsFactory; - @Mock private ChannelFactory channelFactory; - - private DefaultNode node1; - private DefaultNode node2; - - @Before - public void setup() { - when(context.getMetricsFactory()).thenReturn(metricsFactory); - when(context.getChannelFactory()).thenReturn(channelFactory); - node1 = TestNodeFactory.newNode(1, context); - node2 = TestNodeFactory.newNode(2, context); - } - - @Test - public void should_remove_existing_node() { - // Given - DefaultMetadata oldMetadata = - new DefaultMetadata( - ImmutableMap.of(node1.getHostId(), node1, node2.getHostId(), node2), - Collections.emptyMap(), - null, - null); - RemoveNodeRefresh refresh = new RemoveNodeRefresh(node2.getBroadcastRpcAddress().get()); - - // When - MetadataRefresh.Result result = refresh.compute(oldMetadata, false, context); - - // Then - assertThat(result.newMetadata.getNodes()).containsOnlyKeys(node1.getHostId()); - assertThat(result.events).containsExactly(NodeStateEvent.removed(node2)); - } - - @Test - public void should_not_remove_nonexistent_node() { - // Given - DefaultMetadata oldMetadata = - new DefaultMetadata( - ImmutableMap.of(node1.getHostId(), node1), Collections.emptyMap(), null, null); - RemoveNodeRefresh refresh = new RemoveNodeRefresh(node2.getBroadcastRpcAddress().get()); - - // When - MetadataRefresh.Result result = refresh.compute(oldMetadata, false, context); - - // Then - assertThat(result.newMetadata.getNodes()).containsOnlyKeys(node1.getHostId()); - assertThat(result.events).isEmpty(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/SchemaAgreementCheckerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/SchemaAgreementCheckerTest.java deleted file mode 100644 index 5e0dfbd7802..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/SchemaAgreementCheckerTest.java +++ /dev/null @@ -1,334 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Metadata; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeState; -import com.datastax.oss.driver.internal.core.adminrequest.AdminResult; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metrics.MetricsFactory; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.Iterators; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import io.netty.channel.EventLoop; -import java.time.Duration; -import java.util.ArrayDeque; -import java.util.Arrays; -import java.util.Map; -import java.util.Objects; -import java.util.Queue; -import java.util.UUID; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.TimeUnit; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -@RunWith(DataProviderRunner.class) -public class SchemaAgreementCheckerTest { - - private static final UUID VERSION1 = UUID.randomUUID(); - private static final UUID VERSION2 = UUID.randomUUID(); - - private static final UUID NODE_2_HOST_ID = UUID.randomUUID(); - - @Mock private InternalDriverContext context; - @Mock private DriverConfig config; - @Mock private DriverExecutionProfile defaultConfig; - @Mock private DriverChannel channel; - @Mock private EventLoop eventLoop; - @Mock private MetadataManager metadataManager; - @Mock private MetricsFactory metricsFactory; - @Mock private Metadata metadata; - @Mock private DefaultNode node1; - @Mock private DefaultNode node2; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - when(context.getMetricsFactory()).thenReturn(metricsFactory); - - node1 = TestNodeFactory.newNode(1, context); - node2 = TestNodeFactory.newNode(2, NODE_2_HOST_ID, context); - - when(defaultConfig.getDuration(DefaultDriverOption.CONTROL_CONNECTION_TIMEOUT)) - .thenReturn(Duration.ofSeconds(1)); - when(defaultConfig.getDuration(DefaultDriverOption.CONTROL_CONNECTION_AGREEMENT_INTERVAL)) - .thenReturn(Duration.ofMillis(200)); - when(defaultConfig.getDuration(DefaultDriverOption.CONTROL_CONNECTION_AGREEMENT_TIMEOUT)) - .thenReturn(Duration.ofSeconds(10)); - when(defaultConfig.getBoolean(DefaultDriverOption.CONTROL_CONNECTION_AGREEMENT_WARN)) - .thenReturn(true); - when(config.getDefaultProfile()).thenReturn(defaultConfig); - when(context.getConfig()).thenReturn(config); - - Map nodes = - ImmutableMap.of( - Objects.requireNonNull(node1.getHostId()), - node1, - Objects.requireNonNull(node2.getHostId()), - node2); - when(metadata.getNodes()).thenReturn(nodes); - when(metadataManager.getMetadata()).thenReturn(metadata); - when(context.getMetadataManager()).thenReturn(metadataManager); - - node2.state = NodeState.UP; - - when(eventLoop.schedule(any(Runnable.class), anyLong(), any(TimeUnit.class))) - .thenAnswer( - invocation -> { // Ignore delay and run immediately: - Runnable task = invocation.getArgument(0); - task.run(); - return null; - }); - when(channel.eventLoop()).thenReturn(eventLoop); - } - - @Test - public void should_skip_if_timeout_is_zero() { - // Given - when(defaultConfig.getDuration(DefaultDriverOption.CONTROL_CONNECTION_AGREEMENT_TIMEOUT)) - .thenReturn(Duration.ZERO); - TestSchemaAgreementChecker checker = new TestSchemaAgreementChecker(channel, context); - - // When - CompletionStage future = checker.run(); - - // Then - assertThatStage(future).isSuccess(b -> assertThat(b).isFalse()); - } - - @Test - public void should_succeed_if_only_one_node() { - // Given - TestSchemaAgreementChecker checker = new TestSchemaAgreementChecker(channel, context); - checker.stubQueries( - new StubbedQuery( - "SELECT schema_version FROM system.local WHERE key='local'", - mockResult(mockLocalRow(VERSION1))), - new StubbedQuery("SELECT * FROM system.peers", mockResult(/*empty*/ ))); - - // When - CompletionStage future = checker.run(); - - // Then - assertThatStage(future).isSuccess(b -> assertThat(b).isTrue()); - } - - @Test - public void should_succeed_if_versions_match_on_first_try() { - // Given - TestSchemaAgreementChecker checker = new TestSchemaAgreementChecker(channel, context); - checker.stubQueries( - new StubbedQuery( - "SELECT schema_version FROM system.local WHERE key='local'", - mockResult(mockLocalRow(VERSION1))), - new StubbedQuery("SELECT * FROM system.peers", mockResult(mockValidPeerRow(VERSION1)))); - - // When - CompletionStage future = checker.run(); - - // Then - assertThatStage(future).isSuccess(b -> assertThat(b).isTrue()); - } - - @Test - public void should_ignore_down_peers() { - // Given - TestSchemaAgreementChecker checker = new TestSchemaAgreementChecker(channel, context); - node2.state = NodeState.DOWN; - checker.stubQueries( - new StubbedQuery( - "SELECT schema_version FROM system.local WHERE key='local'", - mockResult(mockLocalRow(VERSION1))), - new StubbedQuery("SELECT * FROM system.peers", mockResult(mockValidPeerRow(VERSION2)))); - - // When - CompletionStage future = checker.run(); - - // Then - assertThatStage(future).isSuccess(b -> assertThat(b).isTrue()); - } - - @DataProvider - public static Object[][] malformedPeer() { - return new Object[][] { - // missing host id - {mockPeerRow(null, VERSION2, true, true, true, true)}, - // missing schema version - {mockPeerRow(NODE_2_HOST_ID, null, true, true, true, true)}, - // missing datacenter - {mockPeerRow(NODE_2_HOST_ID, VERSION2, false, true, true, true)}, - // missing rack - {mockPeerRow(NODE_2_HOST_ID, VERSION2, true, false, true, true)}, - // missing RPC address - {mockPeerRow(NODE_2_HOST_ID, VERSION2, true, true, false, true)}, - // missing tokens - {mockPeerRow(NODE_2_HOST_ID, VERSION2, true, true, true, false)}, - }; - } - - @Test - @UseDataProvider("malformedPeer") - public void should_ignore_malformed_rows(AdminRow malformedPeer) { - // Given - TestSchemaAgreementChecker checker = new TestSchemaAgreementChecker(channel, context); - checker.stubQueries( - new StubbedQuery( - "SELECT schema_version FROM system.local WHERE key='local'", - mockResult(mockLocalRow(VERSION1))), - new StubbedQuery("SELECT * FROM system.peers", mockResult(malformedPeer))); - - // When - CompletionStage future = checker.run(); - - // Then - assertThatStage(future).isSuccess(b -> assertThat(b).isTrue()); - } - - @Test - public void should_reschedule_if_versions_do_not_match_on_first_try() { - // Given - TestSchemaAgreementChecker checker = new TestSchemaAgreementChecker(channel, context); - checker.stubQueries( - // First round - new StubbedQuery( - "SELECT schema_version FROM system.local WHERE key='local'", - mockResult(mockLocalRow(VERSION1))), - new StubbedQuery("SELECT * FROM system.peers", mockResult(mockValidPeerRow(VERSION2))), - - // Second round - new StubbedQuery( - "SELECT schema_version FROM system.local WHERE key='local'", - mockResult(mockLocalRow(VERSION1))), - new StubbedQuery("SELECT * FROM system.peers", mockResult(mockValidPeerRow(VERSION1)))); - - // When - CompletionStage future = checker.run(); - - // Then - assertThatStage(future).isSuccess(b -> assertThat(b).isTrue()); - } - - @Test - public void should_fail_if_versions_do_not_match_after_timeout() { - // Given - when(defaultConfig.getDuration(DefaultDriverOption.CONTROL_CONNECTION_AGREEMENT_TIMEOUT)) - .thenReturn(Duration.ofNanos(10)); - TestSchemaAgreementChecker checker = new TestSchemaAgreementChecker(channel, context); - checker.stubQueries( - new StubbedQuery( - "SELECT schema_version FROM system.local WHERE key='local'", - mockResult(mockLocalRow(VERSION1))), - new StubbedQuery("SELECT * FROM system.peers", mockResult(mockValidPeerRow(VERSION1)))); - - // When - CompletionStage future = checker.run(); - - // Then - assertThatStage(future).isSuccess(b -> assertThat(b).isFalse()); - } - - /** Extend to mock the query execution logic. */ - private static class TestSchemaAgreementChecker extends SchemaAgreementChecker { - - private final Queue queries = new ArrayDeque<>(); - - TestSchemaAgreementChecker(DriverChannel channel, InternalDriverContext context) { - super(channel, context, "test"); - } - - private void stubQueries(StubbedQuery... queries) { - this.queries.addAll(Arrays.asList(queries)); - } - - @Override - protected CompletionStage query(String queryString) { - StubbedQuery nextQuery = queries.poll(); - assertThat(nextQuery).isNotNull(); - assertThat(queryString).isEqualTo(nextQuery.queryString); - return CompletableFuture.completedFuture(nextQuery.result); - } - } - - private static class StubbedQuery { - private final String queryString; - private final AdminResult result; - - private StubbedQuery(String queryString, AdminResult result) { - this.queryString = queryString; - this.result = result; - } - } - - private AdminRow mockLocalRow(@SuppressWarnings("SameParameterValue") UUID schemaVersion) { - AdminRow row = mock(AdminRow.class); - when(row.getUuid("host_id")).thenReturn(node1.getHostId()); - when(row.getUuid("schema_version")).thenReturn(schemaVersion); - return row; - } - - private AdminRow mockValidPeerRow(UUID schemaVersion) { - return mockPeerRow(node2.getHostId(), schemaVersion, true, true, true, true); - } - - private static AdminRow mockPeerRow( - UUID hostId, - UUID schemaVersion, - boolean hasDatacenter, - boolean hasRack, - boolean hasRpcAddress, - boolean hasTokens) { - AdminRow row = mock(AdminRow.class); - when(row.getUuid("host_id")).thenReturn(hostId); - when(row.isNull("host_id")).thenReturn(hostId == null); - when(row.getUuid("schema_version")).thenReturn(schemaVersion); - when(row.isNull("schema_version")).thenReturn(schemaVersion == null); - when(row.isNull("data_center")).thenReturn(!hasDatacenter); - when(row.isNull("rack")).thenReturn(!hasRack); - when(row.isNull("tokens")).thenReturn(!hasTokens); - when(row.isNull("rpc_address")).thenReturn(!hasRpcAddress); - when(row.isNull("native_address")).thenReturn(true); - when(row.isNull("native_port")).thenReturn(true); - return row; - } - - private AdminResult mockResult(AdminRow... rows) { - AdminResult result = mock(AdminResult.class); - when(result.iterator()).thenReturn(Iterators.forArray(rows)); - return result; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/TestNodeFactory.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/TestNodeFactory.java deleted file mode 100644 index 7986834bca2..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/TestNodeFactory.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata; - -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import java.net.InetSocketAddress; -import java.util.UUID; - -public class TestNodeFactory { - - public static DefaultNode newNode(int lastIpByte, InternalDriverContext context) { - DefaultNode node = newContactPoint(lastIpByte, context); - node.hostId = UUID.randomUUID(); - node.broadcastRpcAddress = ((InetSocketAddress) node.getEndPoint().resolve()); - return node; - } - - public static DefaultNode newNode(int lastIpByte, UUID hostId, InternalDriverContext context) { - DefaultNode node = newContactPoint(lastIpByte, context); - node.hostId = hostId; - node.broadcastRpcAddress = ((InetSocketAddress) node.getEndPoint().resolve()); - return node; - } - - public static DefaultNode newContactPoint(int lastIpByte, InternalDriverContext context) { - DefaultEndPoint endPoint = newEndPoint(lastIpByte); - return new DefaultNode(endPoint, context); - } - - public static DefaultEndPoint newEndPoint(int lastByteOfIp) { - return new DefaultEndPoint(new InetSocketAddress("127.0.0." + lastByteOfIp, 9042)); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/IndexMetadataTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/IndexMetadataTest.java deleted file mode 100644 index b772d243976..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/IndexMetadataTest.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.IndexKind; -import com.datastax.oss.driver.api.core.metadata.schema.IndexMetadata; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import org.junit.Test; - -public class IndexMetadataTest { - - @Test - public void should_describe_custom_index_class_correctly() { - IndexMetadata indexMetadata = - new DefaultIndexMetadata( - CqlIdentifier.fromCql("ks1"), - CqlIdentifier.fromCql("myTable"), - CqlIdentifier.fromCql("myName"), - IndexKind.CUSTOM, - "myTarget", - ImmutableMap.of("class_name", "com.datastax.MyClass")); - String describe = indexMetadata.describe(true); - assertThat(describe) - .contains( - "CREATE CUSTOM INDEX myname ON ks1.mytable (myTarget)\n" - + "USING 'com.datastax.MyClass'"); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/MultiplexingSchemaChangeListenerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/MultiplexingSchemaChangeListenerTest.java deleted file mode 100644 index a7dee02f5e3..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/MultiplexingSchemaChangeListenerTest.java +++ /dev/null @@ -1,452 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.BDDMockito.willThrow; -import static org.mockito.Mockito.verify; - -import ch.qos.logback.classic.Level; -import ch.qos.logback.classic.Logger; -import ch.qos.logback.classic.spi.ILoggingEvent; -import ch.qos.logback.core.Appender; -import com.datastax.oss.driver.api.core.metadata.schema.AggregateMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.FunctionMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.SchemaChangeListener; -import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.ViewMetadata; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.ArgumentCaptor; -import org.mockito.Captor; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; -import org.slf4j.LoggerFactory; - -@RunWith(MockitoJUnitRunner.Strict.class) -public class MultiplexingSchemaChangeListenerTest { - - @Mock private SchemaChangeListener child1; - @Mock private SchemaChangeListener child2; - @Mock private Session session; - @Mock private KeyspaceMetadata keyspace1, keyspace2; - @Mock private TableMetadata table1, table2; - @Mock private UserDefinedType userDefinedType1, userDefinedType2; - @Mock private FunctionMetadata function1, function2; - @Mock private AggregateMetadata aggregate1, aggregate2; - @Mock private ViewMetadata view1, view2; - - @Mock private Appender appender; - @Captor private ArgumentCaptor loggingEventCaptor; - - private Logger logger; - private Level initialLogLevel; - - @Before - public void addAppenders() { - logger = (Logger) LoggerFactory.getLogger(MultiplexingSchemaChangeListener.class); - initialLogLevel = logger.getLevel(); - logger.setLevel(Level.WARN); - logger.addAppender(appender); - } - - @After - public void removeAppenders() { - logger.detachAppender(appender); - logger.setLevel(initialLogLevel); - } - - @Test - public void should_register() { - // given - MultiplexingSchemaChangeListener listener = new MultiplexingSchemaChangeListener(); - // when - listener.register(child1); - listener.register(child2); - // then - assertThat(listener).extracting("listeners").asList().hasSize(2).contains(child1, child2); - } - - @Test - public void should_flatten_child_multiplexing_listener_via_constructor() { - // given - MultiplexingSchemaChangeListener listener = - new MultiplexingSchemaChangeListener(new MultiplexingSchemaChangeListener(child1, child2)); - // when - // then - assertThat(listener).extracting("listeners").asList().hasSize(2).contains(child1, child2); - } - - @Test - public void should_flatten_child_multiplexing_listener_via_register() { - // given - MultiplexingSchemaChangeListener listener = new MultiplexingSchemaChangeListener(); - // when - listener.register(new MultiplexingSchemaChangeListener(child1, child2)); - // then - assertThat(listener).extracting("listeners").asList().hasSize(2).contains(child1, child2); - } - - @Test - public void should_notify_onKeyspaceCreated() { - // given - MultiplexingSchemaChangeListener listener = - new MultiplexingSchemaChangeListener(child1, child2); - willThrow(new NullPointerException()).given(child1).onKeyspaceCreated(keyspace1); - // when - listener.onKeyspaceCreated(keyspace1); - // then - verify(child1).onKeyspaceCreated(keyspace1); - verify(child2).onKeyspaceCreated(keyspace1); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while notifying schema change listener child1 of an onKeyspaceCreated event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onKeyspaceDropped() { - // given - MultiplexingSchemaChangeListener listener = - new MultiplexingSchemaChangeListener(child1, child2); - willThrow(new NullPointerException()).given(child1).onKeyspaceDropped(keyspace1); - // when - listener.onKeyspaceDropped(keyspace1); - // then - verify(child1).onKeyspaceDropped(keyspace1); - verify(child2).onKeyspaceDropped(keyspace1); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while notifying schema change listener child1 of an onKeyspaceDropped event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onKeyspaceUpdated() { - // given - MultiplexingSchemaChangeListener listener = - new MultiplexingSchemaChangeListener(child1, child2); - willThrow(new NullPointerException()).given(child1).onKeyspaceUpdated(keyspace1, keyspace2); - // when - listener.onKeyspaceUpdated(keyspace1, keyspace2); - // then - verify(child1).onKeyspaceUpdated(keyspace1, keyspace2); - verify(child2).onKeyspaceUpdated(keyspace1, keyspace2); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while notifying schema change listener child1 of an onKeyspaceUpdated event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onTableCreated() { - // given - MultiplexingSchemaChangeListener listener = - new MultiplexingSchemaChangeListener(child1, child2); - willThrow(new NullPointerException()).given(child1).onTableCreated(table1); - // when - listener.onTableCreated(table1); - // then - verify(child1).onTableCreated(table1); - verify(child2).onTableCreated(table1); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while notifying schema change listener child1 of an onTableCreated event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onTableDropped() { - // given - MultiplexingSchemaChangeListener listener = - new MultiplexingSchemaChangeListener(child1, child2); - willThrow(new NullPointerException()).given(child1).onTableDropped(table1); - // when - listener.onTableDropped(table1); - // then - verify(child1).onTableDropped(table1); - verify(child2).onTableDropped(table1); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while notifying schema change listener child1 of an onTableDropped event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onTableUpdated() { - // given - MultiplexingSchemaChangeListener listener = - new MultiplexingSchemaChangeListener(child1, child2); - willThrow(new NullPointerException()).given(child1).onTableUpdated(table1, table2); - // when - listener.onTableUpdated(table1, table2); - // then - verify(child1).onTableUpdated(table1, table2); - verify(child2).onTableUpdated(table1, table2); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while notifying schema change listener child1 of an onTableUpdated event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onUserDefinedTypeCreated() { - // given - MultiplexingSchemaChangeListener listener = - new MultiplexingSchemaChangeListener(child1, child2); - willThrow(new NullPointerException()).given(child1).onUserDefinedTypeCreated(userDefinedType1); - // when - listener.onUserDefinedTypeCreated(userDefinedType1); - // then - verify(child1).onUserDefinedTypeCreated(userDefinedType1); - verify(child2).onUserDefinedTypeCreated(userDefinedType1); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while notifying schema change listener child1 of an onUserDefinedTypeCreated event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onUserDefinedTypeDropped() { - // given - MultiplexingSchemaChangeListener listener = - new MultiplexingSchemaChangeListener(child1, child2); - willThrow(new NullPointerException()).given(child1).onUserDefinedTypeDropped(userDefinedType1); - // when - listener.onUserDefinedTypeDropped(userDefinedType1); - // then - verify(child1).onUserDefinedTypeDropped(userDefinedType1); - verify(child2).onUserDefinedTypeDropped(userDefinedType1); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while notifying schema change listener child1 of an onUserDefinedTypeDropped event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onUserDefinedTypeUpdated() { - // given - MultiplexingSchemaChangeListener listener = - new MultiplexingSchemaChangeListener(child1, child2); - willThrow(new NullPointerException()) - .given(child1) - .onUserDefinedTypeUpdated(userDefinedType1, userDefinedType2); - // when - listener.onUserDefinedTypeUpdated(userDefinedType1, userDefinedType2); - // then - verify(child1).onUserDefinedTypeUpdated(userDefinedType1, userDefinedType2); - verify(child2).onUserDefinedTypeUpdated(userDefinedType1, userDefinedType2); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while notifying schema change listener child1 of an onUserDefinedTypeUpdated event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onFunctionCreated() { - // given - MultiplexingSchemaChangeListener listener = - new MultiplexingSchemaChangeListener(child1, child2); - willThrow(new NullPointerException()).given(child1).onFunctionCreated(function1); - // when - listener.onFunctionCreated(function1); - // then - verify(child1).onFunctionCreated(function1); - verify(child2).onFunctionCreated(function1); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while notifying schema change listener child1 of an onFunctionCreated event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onFunctionDropped() { - // given - MultiplexingSchemaChangeListener listener = - new MultiplexingSchemaChangeListener(child1, child2); - willThrow(new NullPointerException()).given(child1).onFunctionDropped(function1); - // when - listener.onFunctionDropped(function1); - // then - verify(child1).onFunctionDropped(function1); - verify(child2).onFunctionDropped(function1); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while notifying schema change listener child1 of an onFunctionDropped event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onFunctionUpdated() { - // given - MultiplexingSchemaChangeListener listener = - new MultiplexingSchemaChangeListener(child1, child2); - willThrow(new NullPointerException()).given(child1).onFunctionUpdated(function1, function2); - // when - listener.onFunctionUpdated(function1, function2); - // then - verify(child1).onFunctionUpdated(function1, function2); - verify(child2).onFunctionUpdated(function1, function2); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while notifying schema change listener child1 of an onFunctionUpdated event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onAggregateCreated() { - // given - MultiplexingSchemaChangeListener listener = - new MultiplexingSchemaChangeListener(child1, child2); - willThrow(new NullPointerException()).given(child1).onAggregateCreated(aggregate1); - // when - listener.onAggregateCreated(aggregate1); - // then - verify(child1).onAggregateCreated(aggregate1); - verify(child2).onAggregateCreated(aggregate1); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while notifying schema change listener child1 of an onAggregateCreated event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onAggregateDropped() { - // given - MultiplexingSchemaChangeListener listener = - new MultiplexingSchemaChangeListener(child1, child2); - willThrow(new NullPointerException()).given(child1).onAggregateDropped(aggregate1); - // when - listener.onAggregateDropped(aggregate1); - // then - verify(child1).onAggregateDropped(aggregate1); - verify(child2).onAggregateDropped(aggregate1); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while notifying schema change listener child1 of an onAggregateDropped event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onAggregateUpdated() { - // given - MultiplexingSchemaChangeListener listener = - new MultiplexingSchemaChangeListener(child1, child2); - willThrow(new NullPointerException()).given(child1).onAggregateUpdated(aggregate1, aggregate2); - // when - listener.onAggregateUpdated(aggregate1, aggregate2); - // then - verify(child1).onAggregateUpdated(aggregate1, aggregate2); - verify(child2).onAggregateUpdated(aggregate1, aggregate2); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while notifying schema change listener child1 of an onAggregateUpdated event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onViewCreated() { - // given - MultiplexingSchemaChangeListener listener = - new MultiplexingSchemaChangeListener(child1, child2); - willThrow(new NullPointerException()).given(child1).onViewCreated(view1); - // when - listener.onViewCreated(view1); - // then - verify(child1).onViewCreated(view1); - verify(child2).onViewCreated(view1); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while notifying schema change listener child1 of an onViewCreated event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onViewDropped() { - // given - MultiplexingSchemaChangeListener listener = - new MultiplexingSchemaChangeListener(child1, child2); - willThrow(new NullPointerException()).given(child1).onViewDropped(view1); - // when - listener.onViewDropped(view1); - // then - verify(child1).onViewDropped(view1); - verify(child2).onViewDropped(view1); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while notifying schema change listener child1 of an onViewDropped event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onViewUpdated() { - // given - MultiplexingSchemaChangeListener listener = - new MultiplexingSchemaChangeListener(child1, child2); - willThrow(new NullPointerException()).given(child1).onViewUpdated(view1, view2); - // when - listener.onViewUpdated(view1, view2); - // then - verify(child1).onViewUpdated(view1, view2); - verify(child2).onViewUpdated(view1, view2); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while notifying schema change listener child1 of an onViewUpdated event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onSessionReady() { - // given - MultiplexingSchemaChangeListener listener = - new MultiplexingSchemaChangeListener(child1, child2); - willThrow(new NullPointerException()).given(child1).onSessionReady(session); - // when - listener.onSessionReady(session); - // then - verify(child1).onSessionReady(session); - verify(child2).onSessionReady(session); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while notifying schema change listener child1 of an onSessionReady event. (NullPointerException: null)"); - } - - @Test - public void should_notify_close() throws Exception { - // given - MultiplexingSchemaChangeListener listener = - new MultiplexingSchemaChangeListener(child1, child2); - Exception child1Error = new NullPointerException(); - willThrow(child1Error).given(child1).close(); - // when - listener.close(); - // then - verify(child1).close(); - verify(child2).close(); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while closing schema change listener child1. (NullPointerException: null)"); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/TableMetadataTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/TableMetadataTest.java deleted file mode 100644 index 03d63230992..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/TableMetadataTest.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema; - -import static com.datastax.oss.driver.api.core.CqlIdentifier.fromCql; -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; -import com.datastax.oss.driver.internal.core.type.DefaultVectorType; -import com.datastax.oss.driver.internal.core.type.PrimitiveType; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.protocol.internal.ProtocolConstants.DataType; -import com.google.common.collect.ImmutableList; -import java.util.UUID; -import org.junit.Test; - -public class TableMetadataTest { - - /** Tests CASSJAVA-2 */ - @Test - public void should_describe_table_with_vector_correctly() { - TableMetadata tableMetadata = - new DefaultTableMetadata( - fromCql("ks"), - fromCql("tb"), - UUID.randomUUID(), - false, - false, - ImmutableList.of( - new DefaultColumnMetadata( - fromCql("ks"), - fromCql("ks"), - fromCql("tb"), - new PrimitiveType(DataType.ASCII), - false)), - ImmutableMap.of(), - ImmutableMap.of( - fromCql("a"), - new DefaultColumnMetadata( - fromCql("ks"), - fromCql("ks"), - fromCql("tb"), - new DefaultVectorType(new PrimitiveType(DataType.INT), 3), - false)), - ImmutableMap.of(), - ImmutableMap.of()); - - String describe1 = tableMetadata.describe(true); - - assertThat(describe1).contains("vector,"); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/AggregateParserTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/AggregateParserTest.java deleted file mode 100644 index 9cf5ba60983..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/AggregateParserTest.java +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.parsing; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.metadata.schema.AggregateMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.FunctionSignature; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.util.Collections; -import java.util.Optional; -import org.junit.Before; -import org.junit.Test; - -public class AggregateParserTest extends SchemaParserTestBase { - - private static final AdminRow SUM_AND_TO_STRING_ROW_2_2 = - mockAggregateRow( - "ks", - "sum_and_to_string", - ImmutableList.of("org.apache.cassandra.db.marshal.Int32Type"), - "plus", - "org.apache.cassandra.db.marshal.Int32Type", - "to_string", - "org.apache.cassandra.db.marshal.UTF8Type", - Bytes.fromHexString("0x00000000")); - - static final AdminRow SUM_AND_TO_STRING_ROW_3_0 = - mockAggregateRow( - "ks", - "sum_and_to_string", - ImmutableList.of("int"), - "plus", - "int", - "to_string", - "text", - "0"); - - @Before - @Override - public void setup() { - super.setup(); - when(context.getCodecRegistry()).thenReturn(CodecRegistry.DEFAULT); - when(context.getProtocolVersion()).thenReturn(ProtocolVersion.DEFAULT); - } - - @Test - public void should_parse_modern_table() { - AggregateParser parser = new AggregateParser(new DataTypeCqlNameParser(), context); - AggregateMetadata aggregate = - parser.parseAggregate(SUM_AND_TO_STRING_ROW_3_0, KEYSPACE_ID, Collections.emptyMap()); - - assertThat(aggregate.getKeyspace().asInternal()).isEqualTo("ks"); - assertThat(aggregate.getSignature().getName().asInternal()).isEqualTo("sum_and_to_string"); - assertThat(aggregate.getSignature().getParameterTypes()).containsExactly(DataTypes.INT); - - FunctionSignature stateFuncSignature = aggregate.getStateFuncSignature(); - assertThat(stateFuncSignature.getName().asInternal()).isEqualTo("plus"); - assertThat(stateFuncSignature.getParameterTypes()) - .containsExactly(DataTypes.INT, DataTypes.INT); - assertThat(aggregate.getStateType()).isEqualTo(DataTypes.INT); - - Optional finalFuncSignature = aggregate.getFinalFuncSignature(); - assertThat(finalFuncSignature).isPresent(); - assertThat(finalFuncSignature) - .hasValueSatisfying( - signature -> { - assertThat(signature.getName().asInternal()).isEqualTo("to_string"); - assertThat(signature.getParameterTypes()).containsExactly(DataTypes.INT); - }); - assertThat(aggregate.getReturnType()).isEqualTo(DataTypes.TEXT); - - assertThat(aggregate.getInitCond().get()).isInstanceOf(Integer.class).isEqualTo(0); - } - - @Test - public void should_parse_legacy_table() { - AggregateParser parser = new AggregateParser(new DataTypeClassNameParser(), context); - AggregateMetadata aggregate = - parser.parseAggregate(SUM_AND_TO_STRING_ROW_2_2, KEYSPACE_ID, Collections.emptyMap()); - - assertThat(aggregate.getKeyspace().asInternal()).isEqualTo("ks"); - assertThat(aggregate.getSignature().getName().asInternal()).isEqualTo("sum_and_to_string"); - assertThat(aggregate.getSignature().getParameterTypes()).containsExactly(DataTypes.INT); - - FunctionSignature stateFuncSignature = aggregate.getStateFuncSignature(); - assertThat(stateFuncSignature.getName().asInternal()).isEqualTo("plus"); - assertThat(stateFuncSignature.getParameterTypes()) - .containsExactly(DataTypes.INT, DataTypes.INT); - assertThat(aggregate.getStateType()).isEqualTo(DataTypes.INT); - - Optional finalFuncSignature = aggregate.getFinalFuncSignature(); - assertThat(finalFuncSignature).isPresent(); - assertThat(finalFuncSignature) - .hasValueSatisfying( - signature -> { - assertThat(signature.getName().asInternal()).isEqualTo("to_string"); - assertThat(signature.getParameterTypes()).containsExactly(DataTypes.INT); - }); - assertThat(aggregate.getReturnType()).isEqualTo(DataTypes.TEXT); - - assertThat(aggregate.getInitCond().get()).isInstanceOf(Integer.class).isEqualTo(0); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeClassNameParserTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeClassNameParserTest.java deleted file mode 100644 index 84f5c09317f..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeClassNameParserTest.java +++ /dev/null @@ -1,223 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.parsing; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verify; - -import com.datastax.oss.driver.TestDataProviders; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.SetType; -import com.datastax.oss.driver.api.core.type.TupleType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.Locale; -import java.util.Map; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -@RunWith(DataProviderRunner.class) -public class DataTypeClassNameParserTest { - - private static final CqlIdentifier KEYSPACE_ID = CqlIdentifier.fromInternal("ks"); - - @Mock private InternalDriverContext context; - private DataTypeClassNameParser parser; - - @Before - public void setUp() throws Exception { - MockitoAnnotations.initMocks(this); - parser = new DataTypeClassNameParser(); - } - - @Test - @UseDataProvider(location = TestDataProviders.class, value = "locales") - public void should_parse_native_types(Locale locale) { - Locale def = Locale.getDefault(); - try { - Locale.setDefault(locale); - for (Map.Entry entry : - DataTypeClassNameParser.NATIVE_TYPES_BY_CLASS_NAME.entrySet()) { - - String className = entry.getKey(); - DataType expectedType = entry.getValue(); - - assertThat(parse(className)).isEqualTo(expectedType); - } - } finally { - Locale.setDefault(def); - } - } - - @Test - @UseDataProvider(location = TestDataProviders.class, value = "locales") - public void should_parse_collection_types(Locale locale) { - Locale def = Locale.getDefault(); - try { - Locale.setDefault(locale); - assertThat( - parse( - "org.apache.cassandra.db.marshal.ListType(" - + "org.apache.cassandra.db.marshal.UTF8Type)")) - .isEqualTo(DataTypes.listOf(DataTypes.TEXT)); - - assertThat( - parse( - "org.apache.cassandra.db.marshal.FrozenType(" - + ("org.apache.cassandra.db.marshal.ListType(" - + "org.apache.cassandra.db.marshal.UTF8Type))"))) - .isEqualTo(DataTypes.frozenListOf(DataTypes.TEXT)); - - assertThat( - parse( - "org.apache.cassandra.db.marshal.SetType(" - + "org.apache.cassandra.db.marshal.UTF8Type)")) - .isEqualTo(DataTypes.setOf(DataTypes.TEXT)); - - assertThat( - parse( - "org.apache.cassandra.db.marshal.MapType(" - + "org.apache.cassandra.db.marshal.UTF8Type," - + "org.apache.cassandra.db.marshal.UTF8Type)")) - .isEqualTo(DataTypes.mapOf(DataTypes.TEXT, DataTypes.TEXT)); - - assertThat( - parse( - "org.apache.cassandra.db.marshal.MapType(" - + "org.apache.cassandra.db.marshal.UTF8Type," - + "org.apache.cassandra.db.marshal.FrozenType(" - + ("org.apache.cassandra.db.marshal.MapType(" - + "org.apache.cassandra.db.marshal.Int32Type," - + "org.apache.cassandra.db.marshal.Int32Type)))"))) - .isEqualTo( - DataTypes.mapOf(DataTypes.TEXT, DataTypes.frozenMapOf(DataTypes.INT, DataTypes.INT))); - } finally { - Locale.setDefault(def); - } - } - - @Test - @UseDataProvider(location = TestDataProviders.class, value = "locales") - public void should_parse_user_type_when_definition_not_already_available(Locale locale) { - Locale def = Locale.getDefault(); - try { - Locale.setDefault(locale); - UserDefinedType addressType = - (UserDefinedType) - parse( - "org.apache.cassandra.db.marshal.UserType(" - + "foo,61646472657373," - + ("737472656574:org.apache.cassandra.db.marshal.UTF8Type," - + "7a6970636f6465:org.apache.cassandra.db.marshal.Int32Type," - + ("70686f6e6573:org.apache.cassandra.db.marshal.SetType(" - + "org.apache.cassandra.db.marshal.UserType(foo,70686f6e65," - + "6e616d65:org.apache.cassandra.db.marshal.UTF8Type," - + "6e756d626572:org.apache.cassandra.db.marshal.UTF8Type)") - + "))")); - - assertThat(addressType.getKeyspace().asInternal()).isEqualTo("foo"); - assertThat(addressType.getName().asInternal()).isEqualTo("address"); - assertThat(addressType.isFrozen()).isTrue(); - assertThat(addressType.getFieldNames().size()).isEqualTo(3); - - assertThat(addressType.getFieldNames().get(0).asInternal()).isEqualTo("street"); - assertThat(addressType.getFieldTypes().get(0)).isEqualTo(DataTypes.TEXT); - - assertThat(addressType.getFieldNames().get(1).asInternal()).isEqualTo("zipcode"); - assertThat(addressType.getFieldTypes().get(1)).isEqualTo(DataTypes.INT); - - assertThat(addressType.getFieldNames().get(2).asInternal()).isEqualTo("phones"); - DataType phonesType = addressType.getFieldTypes().get(2); - assertThat(phonesType).isInstanceOf(SetType.class); - UserDefinedType phoneType = ((UserDefinedType) ((SetType) phonesType).getElementType()); - - assertThat(phoneType.getKeyspace().asInternal()).isEqualTo("foo"); - assertThat(phoneType.getName().asInternal()).isEqualTo("phone"); - assertThat(phoneType.isFrozen()).isTrue(); - assertThat(phoneType.getFieldNames().size()).isEqualTo(2); - - assertThat(phoneType.getFieldNames().get(0).asInternal()).isEqualTo("name"); - assertThat(phoneType.getFieldTypes().get(0)).isEqualTo(DataTypes.TEXT); - - assertThat(phoneType.getFieldNames().get(1).asInternal()).isEqualTo("number"); - assertThat(phoneType.getFieldTypes().get(1)).isEqualTo(DataTypes.TEXT); - } finally { - Locale.setDefault(def); - } - } - - @Test - @UseDataProvider(location = TestDataProviders.class, value = "locales") - public void should_make_a_frozen_copy_user_type_when_definition_already_available(Locale locale) { - Locale def = Locale.getDefault(); - try { - Locale.setDefault(locale); - UserDefinedType existing = mock(UserDefinedType.class); - - parse( - "org.apache.cassandra.db.marshal.UserType(foo,70686f6e65," - + "6e616d65:org.apache.cassandra.db.marshal.UTF8Type," - + "6e756d626572:org.apache.cassandra.db.marshal.UTF8Type)", - ImmutableMap.of(CqlIdentifier.fromInternal("phone"), existing)); - - verify(existing).copy(true); - } finally { - Locale.setDefault(def); - } - } - - @Test - @UseDataProvider(location = TestDataProviders.class, value = "locales") - public void should_parse_tuple(Locale locale) { - Locale def = Locale.getDefault(); - try { - Locale.setDefault(locale); - TupleType tupleType = - (TupleType) - parse( - "org.apache.cassandra.db.marshal.TupleType(" - + "org.apache.cassandra.db.marshal.Int32Type," - + "org.apache.cassandra.db.marshal.UTF8Type," - + "org.apache.cassandra.db.marshal.FloatType)"); - - assertThat(tupleType.getComponentTypes().size()).isEqualTo(3); - assertThat(tupleType.getComponentTypes().get(0)).isEqualTo(DataTypes.INT); - assertThat(tupleType.getComponentTypes().get(1)).isEqualTo(DataTypes.TEXT); - assertThat(tupleType.getComponentTypes().get(2)).isEqualTo(DataTypes.FLOAT); - } finally { - Locale.setDefault(def); - } - } - - private DataType parse(String toParse) { - return parse(toParse, null); - } - - private DataType parse(String toParse, Map existingTypes) { - return parser.parse(KEYSPACE_ID, toParse, existingTypes, context); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeCqlNameParserTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeCqlNameParserTest.java deleted file mode 100644 index 04ebaf4d68a..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeCqlNameParserTest.java +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.parsing; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.ListType; -import com.datastax.oss.driver.api.core.type.MapType; -import com.datastax.oss.driver.api.core.type.TupleType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.schema.ShallowUserDefinedType; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import java.util.Map; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; - -public class DataTypeCqlNameParserTest { - - private static final CqlIdentifier KEYSPACE_ID = CqlIdentifier.fromInternal("ks"); - - @Mock private InternalDriverContext context; - private DataTypeCqlNameParser parser; - - @Before - public void setUp() throws Exception { - parser = new DataTypeCqlNameParser(); - } - - @Test - public void should_parse_native_types() { - for (Map.Entry entry : - DataTypeCqlNameParser.NATIVE_TYPES_BY_NAME.entrySet()) { - - String className = entry.getKey(); - DataType expectedType = entry.getValue(); - - assertThat(parse(className)).isEqualTo(expectedType); - } - } - - @Test - public void should_parse_collection_types() { - assertThat(parse("list")).isEqualTo(DataTypes.listOf(DataTypes.TEXT)); - assertThat(parse("frozen>")).isEqualTo(DataTypes.frozenListOf(DataTypes.TEXT)); - assertThat(parse("set")).isEqualTo(DataTypes.setOf(DataTypes.TEXT)); - assertThat(parse("map")).isEqualTo(DataTypes.mapOf(DataTypes.TEXT, DataTypes.TEXT)); - assertThat(parse("map>>")) - .isEqualTo( - DataTypes.mapOf(DataTypes.TEXT, DataTypes.frozenMapOf(DataTypes.INT, DataTypes.INT))); - } - - @Test - public void should_parse_top_level_user_type_as_shallow() { - UserDefinedType addressType = (UserDefinedType) parse("address"); - assertThat(addressType).isInstanceOf(ShallowUserDefinedType.class); - assertThat(addressType.getKeyspace()).isEqualTo(KEYSPACE_ID); - assertThat(addressType.getName().asInternal()).isEqualTo("address"); - assertThat(addressType.isFrozen()).isFalse(); - - UserDefinedType frozenAddressType = (UserDefinedType) parse("frozen
"); - assertThat(frozenAddressType).isInstanceOf(ShallowUserDefinedType.class); - assertThat(frozenAddressType.getKeyspace()).isEqualTo(KEYSPACE_ID); - assertThat(frozenAddressType.getName().asInternal()).isEqualTo("address"); - assertThat(frozenAddressType.isFrozen()).isTrue(); - } - - @Test - public void should_reuse_existing_user_type_when_not_top_level() { - UserDefinedType addressType = mock(UserDefinedType.class); - UserDefinedType frozenAddressType = mock(UserDefinedType.class); - when(addressType.copy(false)).thenReturn(addressType); - when(addressType.copy(true)).thenReturn(frozenAddressType); - - ImmutableMap existingTypes = - ImmutableMap.of(CqlIdentifier.fromInternal("address"), addressType); - - ListType listOfAddress = (ListType) parse("list
", existingTypes); - assertThat(listOfAddress.getElementType()).isEqualTo(addressType); - - ListType listOfFrozenAddress = (ListType) parse("list>", existingTypes); - assertThat(listOfFrozenAddress.getElementType()).isEqualTo(frozenAddressType); - } - - @Test - public void should_parse_tuple() { - TupleType tupleType = (TupleType) parse("tuple"); - - assertThat(tupleType.getComponentTypes().size()).isEqualTo(3); - assertThat(tupleType.getComponentTypes().get(0)).isEqualTo(DataTypes.INT); - assertThat(tupleType.getComponentTypes().get(1)).isEqualTo(DataTypes.TEXT); - assertThat(tupleType.getComponentTypes().get(2)).isEqualTo(DataTypes.FLOAT); - } - - @Test - public void should_parse_udt_named_like_collection_type() { - // Those are all valid UDT names! - assertThat(parse("tuple")).isInstanceOf(UserDefinedType.class); - assertThat(parse("list")).isInstanceOf(UserDefinedType.class); - assertThat(parse("map")).isInstanceOf(UserDefinedType.class); - assertThat(parse("frozen")).isInstanceOf(UserDefinedType.class); - - MapType mapType = (MapType) parse("map"); - assertThat(mapType.getKeyType()).isInstanceOf(UserDefinedType.class); - assertThat(mapType.getValueType()).isInstanceOf(UserDefinedType.class); - } - - private DataType parse(String toParse) { - return parse(toParse, null); - } - - private DataType parse(String toParse, Map existingTypes) { - return parser.parse(KEYSPACE_ID, toParse, existingTypes, context); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/FunctionParserTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/FunctionParserTest.java deleted file mode 100644 index ab2d2e725ea..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/FunctionParserTest.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.parsing; - -import static com.datastax.oss.driver.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.FunctionMetadata; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import java.util.Collections; -import org.junit.Test; - -public class FunctionParserTest extends SchemaParserTestBase { - - private static final AdminRow ID_ROW_2_2 = - mockFunctionRow( - "ks", - "id", - ImmutableList.of("i"), - ImmutableList.of("org.apache.cassandra.db.marshal.Int32Type"), - "return i;", - false, - "java", - "org.apache.cassandra.db.marshal.Int32Type"); - - static final AdminRow ID_ROW_3_0 = - mockFunctionRow( - "ks", - "id", - ImmutableList.of("i"), - ImmutableList.of("int"), - "return i;", - false, - "java", - "int"); - - @Test - public void should_parse_modern_table() { - FunctionParser parser = new FunctionParser(new DataTypeCqlNameParser(), context); - FunctionMetadata function = - parser.parseFunction(ID_ROW_3_0, KEYSPACE_ID, Collections.emptyMap()); - - assertThat(function.getKeyspace().asInternal()).isEqualTo("ks"); - assertThat(function.getSignature().getName().asInternal()).isEqualTo("id"); - assertThat(function.getSignature().getParameterTypes()).containsExactly(DataTypes.INT); - assertThat(function.getParameterNames()).containsExactly(CqlIdentifier.fromInternal("i")); - assertThat(function.getBody()).isEqualTo("return i;"); - assertThat(function.isCalledOnNullInput()).isFalse(); - assertThat(function.getLanguage()).isEqualTo("java"); - assertThat(function.getReturnType()).isEqualTo(DataTypes.INT); - } - - @Test - public void should_parse_legacy_table() { - FunctionParser parser = new FunctionParser(new DataTypeClassNameParser(), context); - FunctionMetadata function = - parser.parseFunction(ID_ROW_2_2, KEYSPACE_ID, Collections.emptyMap()); - - assertThat(function.getKeyspace().asInternal()).isEqualTo("ks"); - assertThat(function.getSignature().getName().asInternal()).isEqualTo("id"); - assertThat(function.getSignature().getParameterTypes()).containsExactly(DataTypes.INT); - assertThat(function.getParameterNames()).containsExactly(CqlIdentifier.fromInternal("i")); - assertThat(function.getBody()).isEqualTo("return i;"); - assertThat(function.isCalledOnNullInput()).isFalse(); - assertThat(function.getLanguage()).isEqualTo("java"); - assertThat(function.getReturnType()).isEqualTo(DataTypes.INT); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/SchemaParserTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/SchemaParserTest.java deleted file mode 100644 index a08a6cba838..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/SchemaParserTest.java +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.parsing; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.FunctionSignature; -import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.internal.core.metadata.MetadataRefresh; -import com.datastax.oss.driver.internal.core.metadata.schema.queries.CassandraSchemaRows; -import com.datastax.oss.driver.internal.core.metadata.schema.queries.SchemaRows; -import com.datastax.oss.driver.internal.core.metadata.schema.refresh.SchemaRefresh; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import java.util.Map; -import java.util.function.Consumer; -import org.junit.Test; - -public class SchemaParserTest extends SchemaParserTestBase { - - @Test - public void should_parse_modern_keyspace_row() { - SchemaRefresh refresh = - (SchemaRefresh) - parse(rows -> rows.withKeyspaces(ImmutableList.of(mockModernKeyspaceRow("ks")))); - - assertThat(refresh.newKeyspaces).hasSize(1); - KeyspaceMetadata keyspace = refresh.newKeyspaces.values().iterator().next(); - checkKeyspace(keyspace); - } - - @Test - public void should_parse_legacy_keyspace_row() { - SchemaRefresh refresh = - (SchemaRefresh) - parse(rows -> rows.withKeyspaces(ImmutableList.of(mockLegacyKeyspaceRow("ks")))); - - assertThat(refresh.newKeyspaces).hasSize(1); - KeyspaceMetadata keyspace = refresh.newKeyspaces.values().iterator().next(); - checkKeyspace(keyspace); - } - - @Test - public void should_parse_keyspace_with_all_children() { - // Needed to parse the aggregate - when(context.getCodecRegistry()).thenReturn(CodecRegistry.DEFAULT); - - SchemaRefresh refresh = - (SchemaRefresh) - parse( - rows -> - rows.withKeyspaces(ImmutableList.of(mockModernKeyspaceRow("ks"))) - .withTypes( - ImmutableList.of( - mockTypeRow( - "ks", "t", ImmutableList.of("i"), ImmutableList.of("int")))) - .withTables(ImmutableList.of(TableParserTest.TABLE_ROW_3_0)) - .withColumns(TableParserTest.COLUMN_ROWS_3_0) - .withIndexes(TableParserTest.INDEX_ROWS_3_0) - .withViews(ImmutableList.of(ViewParserTest.VIEW_ROW_3_0)) - .withColumns(ViewParserTest.COLUMN_ROWS_3_0) - .withFunctions(ImmutableList.of(FunctionParserTest.ID_ROW_3_0)) - .withAggregates( - ImmutableList.of(AggregateParserTest.SUM_AND_TO_STRING_ROW_3_0))); - - assertThat(refresh.newKeyspaces).hasSize(1); - KeyspaceMetadata keyspace = refresh.newKeyspaces.values().iterator().next(); - checkKeyspace(keyspace); - - assertThat(keyspace.getUserDefinedTypes()) - .hasSize(1) - .containsKey(CqlIdentifier.fromInternal("t")); - assertThat(keyspace.getTables()).hasSize(1).containsKey(CqlIdentifier.fromInternal("foo")); - assertThat(keyspace.getViews()) - .hasSize(1) - .containsKey(CqlIdentifier.fromInternal("alltimehigh")); - assertThat(keyspace.getFunctions()) - .hasSize(1) - .containsKey(new FunctionSignature(CqlIdentifier.fromInternal("id"), DataTypes.INT)); - assertThat(keyspace.getAggregates()) - .hasSize(1) - .containsKey( - new FunctionSignature(CqlIdentifier.fromInternal("sum_and_to_string"), DataTypes.INT)); - } - - // Common assertions, the keyspace has the same info in all of our single keyspace examples - private void checkKeyspace(KeyspaceMetadata keyspace) { - assertThat(keyspace.getName().asInternal()).isEqualTo("ks"); - assertThat(keyspace.isDurableWrites()).isTrue(); - assertThat(keyspace.getReplication()) - .hasSize(2) - .containsEntry("class", "org.apache.cassandra.locator.SimpleStrategy") - .containsEntry("replication_factor", "1"); - } - - @Test - public void should_parse_multiple_keyspaces() { - SchemaRefresh refresh = - (SchemaRefresh) - parse( - rows -> - rows.withKeyspaces( - ImmutableList.of( - mockModernKeyspaceRow("ks1"), mockModernKeyspaceRow("ks2"))) - .withTypes( - ImmutableList.of( - mockTypeRow( - "ks1", "t1", ImmutableList.of("i"), ImmutableList.of("int")), - mockTypeRow( - "ks2", "t2", ImmutableList.of("i"), ImmutableList.of("int"))))); - - Map keyspaces = refresh.newKeyspaces; - assertThat(keyspaces).hasSize(2); - KeyspaceMetadata ks1 = keyspaces.get(CqlIdentifier.fromInternal("ks1")); - KeyspaceMetadata ks2 = keyspaces.get(CqlIdentifier.fromInternal("ks2")); - - assertThat(ks1.getName().asInternal()).isEqualTo("ks1"); - assertThat(ks1.getUserDefinedTypes()).hasSize(1).containsKey(CqlIdentifier.fromInternal("t1")); - assertThat(ks2.getName().asInternal()).isEqualTo("ks2"); - assertThat(ks2.getUserDefinedTypes()).hasSize(1).containsKey(CqlIdentifier.fromInternal("t2")); - } - - private MetadataRefresh parse(Consumer builderConfig) { - CassandraSchemaRows.Builder builder = - new CassandraSchemaRows.Builder(NODE_3_0, keyspaceFilter, "test"); - builderConfig.accept(builder); - SchemaRows rows = builder.build(); - return new CassandraSchemaParser(rows, context).parse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/SchemaParserTestBase.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/SchemaParserTestBase.java deleted file mode 100644 index e5f0c732f7a..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/SchemaParserTestBase.java +++ /dev/null @@ -1,317 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.parsing; - -import static org.assertj.core.api.Assertions.fail; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.DefaultMetadata; -import com.datastax.oss.driver.internal.core.metadata.schema.queries.KeyspaceFilter; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import java.nio.ByteBuffer; -import java.util.Collections; -import java.util.List; -import org.junit.Before; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.Silent.class) -public abstract class SchemaParserTestBase { - - protected static final Node NODE_2_2 = mockNode(Version.V2_2_0); - protected static final Node NODE_3_0 = mockNode(Version.V3_0_0); - protected static final CqlIdentifier KEYSPACE_ID = CqlIdentifier.fromInternal("ks"); - @Mock protected DefaultMetadata currentMetadata; - @Mock protected InternalDriverContext context; - @Mock protected KeyspaceFilter keyspaceFilter; - - @Before - public void setup() { - when(keyspaceFilter.includes(anyString())).thenReturn(true); - } - - protected static AdminRow mockFunctionRow( - String keyspace, - String name, - List argumentNames, - List argumentTypes, - String body, - boolean calledOnNullInput, - String language, - String returnType) { - - AdminRow row = mock(AdminRow.class); - - when(row.contains("keyspace_name")).thenReturn(true); - when(row.contains("function_name")).thenReturn(true); - when(row.contains("argument_names")).thenReturn(true); - when(row.contains("argument_types")).thenReturn(true); - when(row.contains("body")).thenReturn(true); - when(row.contains("called_on_null_input")).thenReturn(true); - when(row.contains("language")).thenReturn(true); - when(row.contains("return_type")).thenReturn(true); - - when(row.getString("keyspace_name")).thenReturn(keyspace); - when(row.getString("function_name")).thenReturn(name); - when(row.getListOfString("argument_names")).thenReturn(argumentNames); - when(row.getListOfString("argument_types")).thenReturn(argumentTypes); - when(row.getString("body")).thenReturn(body); - when(row.getBoolean("called_on_null_input")).thenReturn(calledOnNullInput); - when(row.getString("language")).thenReturn(language); - when(row.getString("return_type")).thenReturn(returnType); - - return row; - } - - protected static AdminRow mockAggregateRow( - String keyspace, - String name, - List argumentTypes, - String stateFunc, - String stateType, - String finalFunc, - String returnType, - Object initCond) { - - AdminRow row = mock(AdminRow.class); - - when(row.contains("keyspace_name")).thenReturn(true); - when(row.contains("aggregate_name")).thenReturn(true); - when(row.contains("argument_types")).thenReturn(true); - when(row.contains("state_func")).thenReturn(true); - when(row.contains("state_type")).thenReturn(true); - when(row.contains("final_func")).thenReturn(true); - when(row.contains("return_type")).thenReturn(true); - when(row.contains("initcond")).thenReturn(true); - - when(row.getString("keyspace_name")).thenReturn(keyspace); - when(row.getString("aggregate_name")).thenReturn(name); - when(row.getListOfString("argument_types")).thenReturn(argumentTypes); - when(row.getString("state_func")).thenReturn(stateFunc); - when(row.getString("state_type")).thenReturn(stateType); - when(row.getString("final_func")).thenReturn(finalFunc); - when(row.getString("return_type")).thenReturn(returnType); - - if (initCond instanceof ByteBuffer) { - when(row.isString("initcond")).thenReturn(false); - when(row.getByteBuffer("initcond")).thenReturn(((ByteBuffer) initCond)); - } else if (initCond instanceof String) { - when(row.isString("initcond")).thenReturn(true); - when(row.getString("initcond")).thenReturn(((String) initCond)); - } else { - fail("Unsupported initcond type" + initCond.getClass()); - } - - return row; - } - - protected static AdminRow mockTypeRow( - String keyspace, String name, List fieldNames, List fieldTypes) { - AdminRow row = mock(AdminRow.class); - - when(row.getString("keyspace_name")).thenReturn(keyspace); - when(row.getString("type_name")).thenReturn(name); - when(row.getListOfString("field_names")).thenReturn(fieldNames); - when(row.getListOfString("field_types")).thenReturn(fieldTypes); - - return row; - } - - protected static AdminRow mockLegacyTableRow(String keyspace, String name, String comparator) { - AdminRow row = mock(AdminRow.class); - - when(row.contains("table_name")).thenReturn(false); - - when(row.getString("keyspace_name")).thenReturn(keyspace); - when(row.getString("columnfamily_name")).thenReturn(name); - when(row.getBoolean("is_dense")).thenReturn(false); - when(row.getString("comparator")).thenReturn(comparator); - when(row.isString("caching")).thenReturn(true); - when(row.getString("caching")) - .thenReturn("{\"keys\":\"ALL\", \"rows_per_partition\":\"NONE\"}"); - when(row.getString("compaction_strategy_class")) - .thenReturn("org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy"); - when(row.getString("compaction_strategy_options")).thenReturn("{\"mock_option\":\"1\"}"); - - return row; - } - - protected static AdminRow mockLegacyColumnRow( - String keyspaceName, - String tableName, - String name, - String kind, - String dataType, - Integer position) { - return mockLegacyColumnRow( - keyspaceName, tableName, name, kind, dataType, position, null, null, null); - } - - protected static AdminRow mockLegacyColumnRow( - String keyspaceName, - String tableName, - String name, - String kind, - String dataType, - int position, - String indexName, - String indexType, - String indexOptions) { - AdminRow row = mock(AdminRow.class); - - when(row.contains("validator")).thenReturn(true); - - when(row.getString("keyspace_name")).thenReturn(keyspaceName); - when(row.getString("columnfamily_name")).thenReturn(tableName); - when(row.getString("column_name")).thenReturn(name); - when(row.getString("type")).thenReturn(kind); - when(row.getString("validator")).thenReturn(dataType); - when(row.getInteger("component_index")).thenReturn(position); - when(row.getString("index_name")).thenReturn(indexName); - when(row.getString("index_type")).thenReturn(indexType); - when(row.getString("index_options")).thenReturn(indexOptions); - - return row; - } - - protected static AdminRow mockModernTableRow(String keyspace, String name) { - AdminRow row = mock(AdminRow.class); - - when(row.contains("flags")).thenReturn(true); - when(row.contains("table_name")).thenReturn(true); - - when(row.getString("keyspace_name")).thenReturn(keyspace); - when(row.getString("table_name")).thenReturn(name); - when(row.getSetOfString("flags")).thenReturn(ImmutableSet.of("compound")); - when(row.isString("caching")).thenReturn(false); - when(row.get("caching", RelationParser.MAP_OF_TEXT_TO_TEXT)) - .thenReturn(ImmutableMap.of("keys", "ALL", "rows_per_partition", "NONE")); - when(row.get("compaction", RelationParser.MAP_OF_TEXT_TO_TEXT)) - .thenReturn( - ImmutableMap.of( - "class", - "org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy", - "mock_option", - "1")); - - return row; - } - - protected static AdminRow mockModernColumnRow( - String keyspaceName, - String tableName, - String name, - String kind, - String dataType, - String clusteringOrder, - Integer position) { - AdminRow row = mock(AdminRow.class); - - when(row.contains("kind")).thenReturn(true); - when(row.contains("position")).thenReturn(true); - when(row.contains("clustering_order")).thenReturn(true); - - when(row.getString("keyspace_name")).thenReturn(keyspaceName); - when(row.getString("table_name")).thenReturn(tableName); - when(row.getString("column_name")).thenReturn(name); - when(row.getString("kind")).thenReturn(kind); - when(row.getString("type")).thenReturn(dataType); - when(row.getInteger("position")).thenReturn(position); - when(row.getString("clustering_order")).thenReturn(clusteringOrder); - - return row; - } - - protected static AdminRow mockIndexRow( - String keyspaceName, - String tableName, - String name, - String kind, - ImmutableMap options) { - AdminRow row = mock(AdminRow.class); - - when(row.getString("keyspace_name")).thenReturn(keyspaceName); - when(row.getString("table_name")).thenReturn(tableName); - when(row.getString("index_name")).thenReturn(name); - when(row.getString("kind")).thenReturn(kind); - when(row.getMapOfStringToString("options")).thenReturn(options); - - return row; - } - - protected static AdminRow mockViewRow( - String keyspaceName, - String viewName, - String baseTableName, - boolean includeAllColumns, - String whereClause) { - AdminRow row = mock(AdminRow.class); - - when(row.getString("keyspace_name")).thenReturn(keyspaceName); - when(row.getString("view_name")).thenReturn(viewName); - when(row.getString("base_table_name")).thenReturn(baseTableName); - when(row.getBoolean("include_all_columns")).thenReturn(includeAllColumns); - when(row.getString("where_clause")).thenReturn(whereClause); - - return row; - } - - protected static AdminRow mockModernKeyspaceRow(String keyspaceName) { - AdminRow row = mock(AdminRow.class); - - when(row.getString("keyspace_name")).thenReturn(keyspaceName); - when(row.getBoolean("durable_writes")).thenReturn(true); - - when(row.contains("strategy_class")).thenReturn(false); - when(row.getMapOfStringToString("replication")) - .thenReturn( - ImmutableMap.of( - "class", "org.apache.cassandra.locator.SimpleStrategy", "replication_factor", "1")); - - return row; - } - - protected static AdminRow mockLegacyKeyspaceRow(String keyspaceName) { - AdminRow row = mock(AdminRow.class); - - when(row.getString("keyspace_name")).thenReturn(keyspaceName); - when(row.getBoolean("durable_writes")).thenReturn(true); - - when(row.contains("strategy_class")).thenReturn(true); - when(row.getString("strategy_class")).thenReturn("org.apache.cassandra.locator.SimpleStrategy"); - when(row.getString("strategy_options")).thenReturn("{\"replication_factor\":\"1\"}"); - - return row; - } - - private static Node mockNode(Version version) { - Node node = mock(Node.class); - when(node.getExtras()).thenReturn(Collections.emptyMap()); - when(node.getCassandraVersion()).thenReturn(version); - return node; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/TableParserTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/TableParserTest.java deleted file mode 100644 index a316473d071..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/TableParserTest.java +++ /dev/null @@ -1,217 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.parsing; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.schema.ClusteringOrder; -import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.IndexKind; -import com.datastax.oss.driver.api.core.metadata.schema.IndexMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.internal.core.metadata.schema.queries.CassandraSchemaRows; -import com.datastax.oss.driver.internal.core.metadata.schema.queries.SchemaRows; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import java.util.Collections; -import java.util.Iterator; -import java.util.Map; -import org.junit.Test; - -public class TableParserTest extends SchemaParserTestBase { - - private static final AdminRow TABLE_ROW_2_2 = - mockLegacyTableRow( - "ks", - "foo", - "org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.Int32Type,org.apache.cassandra.db.marshal.Int32Type,org.apache.cassandra.db.marshal.UTF8Type)"); - private static final ImmutableList COLUMN_ROWS_2_2 = - ImmutableList.of( - mockLegacyColumnRow( - "ks", "foo", "k2", "partition_key", "org.apache.cassandra.db.marshal.UTF8Type", 1), - mockLegacyColumnRow( - "ks", "foo", "k1", "partition_key", "org.apache.cassandra.db.marshal.Int32Type", 0), - mockLegacyColumnRow( - "ks", "foo", "cc1", "clustering_key", "org.apache.cassandra.db.marshal.Int32Type", 0), - mockLegacyColumnRow( - "ks", - "foo", - "cc2", - "clustering_key", - "org.apache.cassandra.db.marshal.ReversedType(org.apache.cassandra.db.marshal.Int32Type)", - 1), - mockLegacyColumnRow( - "ks", - "foo", - "v", - "regular", - "org.apache.cassandra.db.marshal.ReversedType(org.apache.cassandra.db.marshal.Int32Type)", - -1, - "foo_v_idx", - "COMPOSITES", - "{}")); - - static final AdminRow TABLE_ROW_3_0 = mockModernTableRow("ks", "foo"); - static final ImmutableList COLUMN_ROWS_3_0 = - ImmutableList.of( - mockModernColumnRow("ks", "foo", "k2", "partition_key", "text", "none", 1), - mockModernColumnRow("ks", "foo", "k1", "partition_key", "int", "none", 0), - mockModernColumnRow("ks", "foo", "cc1", "clustering", "int", "asc", 0), - mockModernColumnRow("ks", "foo", "cc2", "clustering", "int", "desc", 1), - mockModernColumnRow("ks", "foo", "v", "regular", "int", "none", -1)); - static final ImmutableList INDEX_ROWS_3_0 = - ImmutableList.of( - mockIndexRow("ks", "foo", "foo_v_idx", "COMPOSITES", ImmutableMap.of("target", "v"))); - - @Test - public void should_skip_when_no_column_rows() { - SchemaRows rows = legacyRows(TABLE_ROW_2_2, Collections.emptyList()); - TableParser parser = new TableParser(rows, context); - TableMetadata table = parser.parseTable(TABLE_ROW_2_2, KEYSPACE_ID, Collections.emptyMap()); - - assertThat(table).isNull(); - } - - @Test - public void should_parse_legacy_tables() { - SchemaRows rows = legacyRows(TABLE_ROW_2_2, COLUMN_ROWS_2_2); - TableParser parser = new TableParser(rows, context); - TableMetadata table = parser.parseTable(TABLE_ROW_2_2, KEYSPACE_ID, Collections.emptyMap()); - - checkTable(table); - - assertThat(table.getOptions().get(CqlIdentifier.fromInternal("caching"))) - .isEqualTo("{\"keys\":\"ALL\", \"rows_per_partition\":\"NONE\"}"); - } - - @Test - public void should_parse_modern_tables() { - SchemaRows rows = modernRows(TABLE_ROW_3_0, COLUMN_ROWS_3_0, INDEX_ROWS_3_0); - TableParser parser = new TableParser(rows, context); - TableMetadata table = parser.parseTable(TABLE_ROW_3_0, KEYSPACE_ID, Collections.emptyMap()); - - checkTable(table); - - @SuppressWarnings("unchecked") - Map caching = - (Map) table.getOptions().get(CqlIdentifier.fromInternal("caching")); - assertThat(caching) - .hasSize(2) - .containsEntry("keys", "ALL") - .containsEntry("rows_per_partition", "NONE"); - } - - /** Covers two additional Cassandra 4.0 options added in JAVA-2090. */ - @Test - public void should_parse_read_repair_and_additional_write_policy() { - AdminRow tableRow40 = mockModernTableRow("ks", "foo"); - when(tableRow40.get("read_repair", TypeCodecs.TEXT)).thenReturn("NONE"); - when(tableRow40.get("additional_write_policy", TypeCodecs.TEXT)).thenReturn("40p"); - - SchemaRows rows = modernRows(tableRow40, COLUMN_ROWS_3_0, INDEX_ROWS_3_0); - TableParser parser = new TableParser(rows, context); - TableMetadata table = parser.parseTable(tableRow40, KEYSPACE_ID, Collections.emptyMap()); - - checkTable(table); - - assertThat(table.getOptions()) - .containsEntry(CqlIdentifier.fromInternal("read_repair"), "NONE") - .containsEntry(CqlIdentifier.fromInternal("additional_write_policy"), "40p"); - } - - // Shared between 2.2 and 3.0 tests, all expected values are the same except the 'caching' option - private void checkTable(TableMetadata table) { - assertThat(table.getKeyspace().asInternal()).isEqualTo("ks"); - assertThat(table.getName().asInternal()).isEqualTo("foo"); - - assertThat(table.getPartitionKey()).hasSize(2); - ColumnMetadata pk0 = table.getPartitionKey().get(0); - assertThat(pk0.getName().asInternal()).isEqualTo("k1"); - assertThat(pk0.getType()).isEqualTo(DataTypes.INT); - ColumnMetadata pk1 = table.getPartitionKey().get(1); - assertThat(pk1.getName().asInternal()).isEqualTo("k2"); - assertThat(pk1.getType()).isEqualTo(DataTypes.TEXT); - - assertThat(table.getClusteringColumns().entrySet()).hasSize(2); - Iterator clusteringColumnsIterator = - table.getClusteringColumns().keySet().iterator(); - ColumnMetadata clusteringColumn1 = clusteringColumnsIterator.next(); - assertThat(clusteringColumn1.getName().asInternal()).isEqualTo("cc1"); - ColumnMetadata clusteringColumn2 = clusteringColumnsIterator.next(); - assertThat(clusteringColumn2.getName().asInternal()).isEqualTo("cc2"); - assertThat(table.getClusteringColumns().values()) - .containsExactly(ClusteringOrder.ASC, ClusteringOrder.DESC); - - assertThat(table.getColumns()) - .containsOnlyKeys( - CqlIdentifier.fromInternal("k1"), - CqlIdentifier.fromInternal("k2"), - CqlIdentifier.fromInternal("cc1"), - CqlIdentifier.fromInternal("cc2"), - CqlIdentifier.fromInternal("v")); - ColumnMetadata regularColumn = table.getColumns().get(CqlIdentifier.fromInternal("v")); - assertThat(regularColumn.getName().asInternal()).isEqualTo("v"); - assertThat(regularColumn.getType()).isEqualTo(DataTypes.INT); - - assertThat(table.getIndexes()).containsOnlyKeys(CqlIdentifier.fromInternal("foo_v_idx")); - IndexMetadata index = table.getIndexes().get(CqlIdentifier.fromInternal("foo_v_idx")); - assertThat(index.getKeyspace().asInternal()).isEqualTo("ks"); - assertThat(index.getTable().asInternal()).isEqualTo("foo"); - assertThat(index.getName().asInternal()).isEqualTo("foo_v_idx"); - assertThat(index.getClassName()).isNotPresent(); - assertThat(index.getKind()).isEqualTo(IndexKind.COMPOSITES); - assertThat(index.getTarget()).isEqualTo("v"); - - assertThat(table.getIndex("foo_v_idx")).hasValue(index); - - @SuppressWarnings("unchecked") - Map compaction = - (Map) table.getOptions().get(CqlIdentifier.fromInternal("compaction")); - assertThat(compaction) - .hasSize(2) - .containsEntry("class", "org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy") - .containsEntry("mock_option", "1"); - } - - private SchemaRows legacyRows(AdminRow tableRow, Iterable columnRows) { - return rows(tableRow, columnRows, null, NODE_2_2); - } - - private SchemaRows modernRows( - AdminRow tableRow, Iterable columnRows, Iterable indexesRows) { - return rows(tableRow, columnRows, indexesRows, NODE_3_0); - } - - private SchemaRows rows( - AdminRow tableRow, Iterable columnRows, Iterable indexesRows, Node node) { - CassandraSchemaRows.Builder builder = - new CassandraSchemaRows.Builder(node, keyspaceFilter, "test") - .withTables(ImmutableList.of(tableRow)) - .withColumns(columnRows); - if (indexesRows != null) { - builder.withIndexes(indexesRows); - } - return builder.build(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/UserDefinedTypeListParserTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/UserDefinedTypeListParserTest.java deleted file mode 100644 index f90d07ebe6d..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/UserDefinedTypeListParserTest.java +++ /dev/null @@ -1,229 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.parsing; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.ListType; -import com.datastax.oss.driver.api.core.type.MapType; -import com.datastax.oss.driver.api.core.type.SetType; -import com.datastax.oss.driver.api.core.type.TupleType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import java.util.Map; -import org.junit.Test; - -public class UserDefinedTypeListParserTest extends SchemaParserTestBase { - - private static final AdminRow PERSON_ROW_2_2 = - mockTypeRow( - "ks", - "person", - ImmutableList.of("first_name", "last_name", "address"), - ImmutableList.of( - "org.apache.cassandra.db.marshal.UTF8Type", - "org.apache.cassandra.db.marshal.UTF8Type", - "org.apache.cassandra.db.marshal.UserType(" - + "ks,61646472657373," // address - + "737472656574:org.apache.cassandra.db.marshal.UTF8Type," // street - + "7a6970636f6465:org.apache.cassandra.db.marshal.Int32Type)")); // zipcode - - private static final AdminRow PERSON_ROW_3_0 = - mockTypeRow( - "ks", - "person", - ImmutableList.of("first_name", "last_name", "address"), - ImmutableList.of("text", "text", "address")); - - private static final AdminRow ADDRESS_ROW_3_0 = - mockTypeRow( - "ks", "address", ImmutableList.of("street", "zipcode"), ImmutableList.of("text", "int")); - - @Test - public void should_parse_modern_table() { - UserDefinedTypeParser parser = new UserDefinedTypeParser(new DataTypeCqlNameParser(), context); - Map types = - parser.parse(KEYSPACE_ID, PERSON_ROW_3_0, ADDRESS_ROW_3_0); - - assertThat(types).hasSize(2); - UserDefinedType personType = types.get(CqlIdentifier.fromInternal("person")); - UserDefinedType addressType = types.get(CqlIdentifier.fromInternal("address")); - - assertThat(personType.getKeyspace().asInternal()).isEqualTo("ks"); - assertThat(personType.getName().asInternal()).isEqualTo("person"); - assertThat(personType.getFieldNames()) - .containsExactly( - CqlIdentifier.fromInternal("first_name"), - CqlIdentifier.fromInternal("last_name"), - CqlIdentifier.fromInternal("address")); - assertThat(personType.getFieldTypes().get(0)).isEqualTo(DataTypes.TEXT); - assertThat(personType.getFieldTypes().get(1)).isEqualTo(DataTypes.TEXT); - assertThat(personType.getFieldTypes().get(2)).isSameAs(addressType); - } - - @Test - public void should_parse_legacy_table() { - UserDefinedTypeParser parser = - new UserDefinedTypeParser(new DataTypeClassNameParser(), context); - // no need to add a column for the address type, because in 2.2 UDTs are always fully redefined - // in column and field types (instead of referencing an existing type) - Map types = parser.parse(KEYSPACE_ID, PERSON_ROW_2_2); - - assertThat(types).hasSize(1); - UserDefinedType personType = types.get(CqlIdentifier.fromInternal("person")); - - assertThat(personType.getKeyspace().asInternal()).isEqualTo("ks"); - assertThat(personType.getName().asInternal()).isEqualTo("person"); - assertThat(personType.getFieldNames()) - .containsExactly( - CqlIdentifier.fromInternal("first_name"), - CqlIdentifier.fromInternal("last_name"), - CqlIdentifier.fromInternal("address")); - assertThat(personType.getFieldTypes().get(0)).isEqualTo(DataTypes.TEXT); - assertThat(personType.getFieldTypes().get(1)).isEqualTo(DataTypes.TEXT); - UserDefinedType addressType = ((UserDefinedType) personType.getFieldTypes().get(2)); - assertThat(addressType.getKeyspace().asInternal()).isEqualTo("ks"); - assertThat(addressType.getName().asInternal()).isEqualTo("address"); - assertThat(addressType.getFieldNames()) - .containsExactly( - CqlIdentifier.fromInternal("street"), CqlIdentifier.fromInternal("zipcode")); - } - - @Test - public void should_parse_empty_list() { - UserDefinedTypeParser parser = new UserDefinedTypeParser(new DataTypeCqlNameParser(), context); - assertThat(parser.parse(KEYSPACE_ID /* no types*/)).isEmpty(); - } - - @Test - public void should_parse_singleton_list() { - UserDefinedTypeParser parser = new UserDefinedTypeParser(new DataTypeCqlNameParser(), context); - Map types = - parser.parse( - KEYSPACE_ID, mockTypeRow("ks", "t", ImmutableList.of("i"), ImmutableList.of("int"))); - - assertThat(types).hasSize(1); - UserDefinedType type = types.get(CqlIdentifier.fromInternal("t")); - assertThat(type.getKeyspace().asInternal()).isEqualTo("ks"); - assertThat(type.getName().asInternal()).isEqualTo("t"); - assertThat(type.getFieldNames()).containsExactly(CqlIdentifier.fromInternal("i")); - assertThat(type.getFieldTypes()).containsExactly(DataTypes.INT); - } - - @Test - public void should_resolve_list_dependency() { - UserDefinedTypeParser parser = new UserDefinedTypeParser(new DataTypeCqlNameParser(), context); - Map types = - parser.parse( - KEYSPACE_ID, - mockTypeRow( - "ks", "a", ImmutableList.of("bs"), ImmutableList.of("frozen>>")), - mockTypeRow("ks", "b", ImmutableList.of("i"), ImmutableList.of("int"))); - - assertThat(types).hasSize(2); - UserDefinedType aType = types.get(CqlIdentifier.fromInternal("a")); - UserDefinedType bType = types.get(CqlIdentifier.fromInternal("b")); - assertThat(((ListType) aType.getFieldTypes().get(0)).getElementType()).isEqualTo(bType); - } - - @Test - public void should_resolve_set_dependency() { - UserDefinedTypeParser parser = new UserDefinedTypeParser(new DataTypeCqlNameParser(), context); - Map types = - parser.parse( - KEYSPACE_ID, - mockTypeRow( - "ks", "a", ImmutableList.of("bs"), ImmutableList.of("frozen>>")), - mockTypeRow("ks", "b", ImmutableList.of("i"), ImmutableList.of("int"))); - - assertThat(types).hasSize(2); - UserDefinedType aType = types.get(CqlIdentifier.fromInternal("a")); - UserDefinedType bType = types.get(CqlIdentifier.fromInternal("b")); - assertThat(((SetType) aType.getFieldTypes().get(0)).getElementType()).isEqualTo(bType); - } - - @Test - public void should_resolve_map_dependency() { - UserDefinedTypeParser parser = new UserDefinedTypeParser(new DataTypeCqlNameParser(), context); - Map types = - parser.parse( - KEYSPACE_ID, - mockTypeRow( - "ks", - "a1", - ImmutableList.of("bs"), - ImmutableList.of("frozen>>")), - mockTypeRow( - "ks", - "a2", - ImmutableList.of("bs"), - ImmutableList.of("frozen, int>>")), - mockTypeRow("ks", "b", ImmutableList.of("i"), ImmutableList.of("int"))); - - assertThat(types).hasSize(3); - UserDefinedType a1Type = types.get(CqlIdentifier.fromInternal("a1")); - UserDefinedType a2Type = types.get(CqlIdentifier.fromInternal("a2")); - UserDefinedType bType = types.get(CqlIdentifier.fromInternal("b")); - assertThat(((MapType) a1Type.getFieldTypes().get(0)).getValueType()).isEqualTo(bType); - assertThat(((MapType) a2Type.getFieldTypes().get(0)).getKeyType()).isEqualTo(bType); - } - - @Test - public void should_resolve_tuple_dependency() { - UserDefinedTypeParser parser = new UserDefinedTypeParser(new DataTypeCqlNameParser(), context); - Map types = - parser.parse( - KEYSPACE_ID, - mockTypeRow( - "ks", - "a", - ImmutableList.of("b"), - ImmutableList.of("frozen>>")), - mockTypeRow("ks", "b", ImmutableList.of("i"), ImmutableList.of("int"))); - - assertThat(types).hasSize(2); - UserDefinedType aType = types.get(CqlIdentifier.fromInternal("a")); - UserDefinedType bType = types.get(CqlIdentifier.fromInternal("b")); - assertThat(((TupleType) aType.getFieldTypes().get(0)).getComponentTypes().get(1)) - .isEqualTo(bType); - } - - @Test - public void should_resolve_nested_dependency() { - UserDefinedTypeParser parser = new UserDefinedTypeParser(new DataTypeCqlNameParser(), context); - Map types = - parser.parse( - KEYSPACE_ID, - mockTypeRow( - "ks", - "a", - ImmutableList.of("bs"), - ImmutableList.of("frozen>>>>")), - mockTypeRow("ks", "b", ImmutableList.of("i"), ImmutableList.of("int"))); - - assertThat(types).hasSize(2); - UserDefinedType aType = types.get(CqlIdentifier.fromInternal("a")); - UserDefinedType bType = types.get(CqlIdentifier.fromInternal("b")); - TupleType tupleType = (TupleType) aType.getFieldTypes().get(0); - ListType listType = (ListType) tupleType.getComponentTypes().get(1); - assertThat(listType.getElementType()).isEqualTo(bType); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/ViewParserTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/ViewParserTest.java deleted file mode 100644 index 1ba471e08f5..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/ViewParserTest.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.parsing; - -import static com.datastax.oss.driver.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.ViewMetadata; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.internal.core.metadata.schema.queries.CassandraSchemaRows; -import com.datastax.oss.driver.internal.core.metadata.schema.queries.SchemaRows; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import java.util.Collections; -import java.util.Iterator; -import org.junit.Test; - -public class ViewParserTest extends SchemaParserTestBase { - - static final AdminRow VIEW_ROW_3_0 = - mockViewRow("ks", "alltimehigh", "scores", false, "game IS NOT NULL"); - static final ImmutableList COLUMN_ROWS_3_0 = - ImmutableList.of( - mockModernColumnRow("ks", "alltimehigh", "game", "partition_key", "text", "none", 0), - mockModernColumnRow("ks", "alltimehigh", "score", "clustering", "int", "desc", 0), - mockModernColumnRow("ks", "alltimehigh", "user", "clustering", "text", "asc", 1), - mockModernColumnRow("ks", "alltimehigh", "year", "clustering", "int", "asc", 2), - mockModernColumnRow("ks", "alltimehigh", "month", "clustering", "int", "asc", 3), - mockModernColumnRow("ks", "alltimehigh", "day", "clustering", "int", "asc", 4)); - - @Test - public void should_skip_when_no_column_rows() { - SchemaRows rows = rows(VIEW_ROW_3_0, Collections.emptyList()); - ViewParser parser = new ViewParser(rows, context); - ViewMetadata view = parser.parseView(VIEW_ROW_3_0, KEYSPACE_ID, Collections.emptyMap()); - - assertThat(view).isNull(); - } - - @Test - public void should_parse_view() { - SchemaRows rows = rows(VIEW_ROW_3_0, COLUMN_ROWS_3_0); - ViewParser parser = new ViewParser(rows, context); - ViewMetadata view = parser.parseView(VIEW_ROW_3_0, KEYSPACE_ID, Collections.emptyMap()); - - assertThat(view.getKeyspace().asInternal()).isEqualTo("ks"); - assertThat(view.getName().asInternal()).isEqualTo("alltimehigh"); - assertThat(view.getBaseTable().asInternal()).isEqualTo("scores"); - - assertThat(view.getPartitionKey()).hasSize(1); - ColumnMetadata pk0 = view.getPartitionKey().get(0); - assertThat(pk0.getName().asInternal()).isEqualTo("game"); - assertThat(pk0.getType()).isEqualTo(DataTypes.TEXT); - - assertThat(view.getClusteringColumns().entrySet()).hasSize(5); - Iterator clusteringColumnsIterator = - view.getClusteringColumns().keySet().iterator(); - assertThat(clusteringColumnsIterator.next().getName().asInternal()).isEqualTo("score"); - assertThat(clusteringColumnsIterator.next().getName().asInternal()).isEqualTo("user"); - assertThat(clusteringColumnsIterator.next().getName().asInternal()).isEqualTo("year"); - assertThat(clusteringColumnsIterator.next().getName().asInternal()).isEqualTo("month"); - assertThat(clusteringColumnsIterator.next().getName().asInternal()).isEqualTo("day"); - - assertThat(view.getColumns()) - .containsOnlyKeys( - CqlIdentifier.fromInternal("game"), - CqlIdentifier.fromInternal("score"), - CqlIdentifier.fromInternal("user"), - CqlIdentifier.fromInternal("year"), - CqlIdentifier.fromInternal("month"), - CqlIdentifier.fromInternal("day")); - } - - private SchemaRows rows(AdminRow viewRow, Iterable columnRows) { - return new CassandraSchemaRows.Builder(NODE_3_0, keyspaceFilter, "test") - .withViews(ImmutableList.of(viewRow)) - .withColumns(columnRows) - .build(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra21SchemaQueriesTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra21SchemaQueriesTest.java deleted file mode 100644 index 2dd216474df..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra21SchemaQueriesTest.java +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.queries; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.adminrequest.AdminResult; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import java.util.Collections; -import java.util.Queue; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.LinkedBlockingDeque; -import org.junit.Test; - -// Note: we don't repeat the other tests in Cassandra3SchemaQueriesTest because the logic is -// shared, this class just validates the query strings. -public class Cassandra21SchemaQueriesTest extends SchemaQueriesTest { - - @Test - public void should_query() { - when(config.getStringList( - DefaultDriverOption.METADATA_SCHEMA_REFRESHED_KEYSPACES, Collections.emptyList())) - .thenReturn(Collections.emptyList()); - when(node.getCassandraVersion()).thenReturn(Version.V2_1_0); - - SchemaQueriesWithMockedChannel queries = - new SchemaQueriesWithMockedChannel(driverChannel, node, config, "test"); - - CompletionStage result = queries.execute(); - - // Keyspace - Call call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system.schema_keyspaces"); - call.result.complete( - mockResult(mockRow("keyspace_name", "ks1"), mockRow("keyspace_name", "ks2"))); - - // Types - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system.schema_usertypes"); - call.result.complete(mockResult(mockRow("keyspace_name", "ks1", "type_name", "type"))); - - // Tables - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system.schema_columnfamilies"); - call.result.complete(mockResult(mockRow("keyspace_name", "ks1", "columnfamily_name", "foo"))); - - // Columns - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system.schema_columns"); - call.result.complete( - mockResult( - mockRow("keyspace_name", "ks1", "columnfamily_name", "foo", "column_name", "k"))); - - channel.runPendingTasks(); - - assertThatStage(result) - .isSuccess( - rows -> { - assertThat(rows.getNode()).isEqualTo(node); - - // Keyspace - assertThat(rows.keyspaces()).hasSize(2); - assertThat(rows.keyspaces().get(0).getString("keyspace_name")).isEqualTo("ks1"); - assertThat(rows.keyspaces().get(1).getString("keyspace_name")).isEqualTo("ks2"); - - // Types - assertThat(rows.types().keySet()).containsOnly(KS1_ID); - assertThat(rows.types().get(KS1_ID)).hasSize(1); - assertThat(rows.types().get(KS1_ID).iterator().next().getString("type_name")) - .isEqualTo("type"); - - // Tables - assertThat(rows.tables().keySet()).containsOnly(KS1_ID); - assertThat(rows.tables().get(KS1_ID)).hasSize(1); - assertThat(rows.tables().get(KS1_ID).iterator().next().getString("columnfamily_name")) - .isEqualTo("foo"); - - // Rows - assertThat(rows.columns().keySet()).containsOnly(KS1_ID); - assertThat(rows.columns().get(KS1_ID).keySet()).containsOnly(FOO_ID); - assertThat( - rows.columns() - .get(KS1_ID) - .get(FOO_ID) - .iterator() - .next() - .getString("column_name")) - .isEqualTo("k"); - - // No views, functions or aggregates in this version - assertThat(rows.views().keySet()).isEmpty(); - assertThat(rows.functions().keySet()).isEmpty(); - assertThat(rows.aggregates().keySet()).isEmpty(); - }); - } - - /** Extends the class under test to mock the query execution logic. */ - static class SchemaQueriesWithMockedChannel extends Cassandra21SchemaQueries { - - final Queue calls = new LinkedBlockingDeque<>(); - - SchemaQueriesWithMockedChannel( - DriverChannel channel, Node node, DriverExecutionProfile config, String logPrefix) { - super(channel, node, config, logPrefix); - } - - @Override - protected CompletionStage query(String query) { - Call call = new Call(query); - calls.add(call); - return call.result; - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra22SchemaQueriesTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra22SchemaQueriesTest.java deleted file mode 100644 index fd28be59120..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra22SchemaQueriesTest.java +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.queries; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Metadata; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.adminrequest.AdminResult; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import java.util.Collections; -import java.util.Queue; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.LinkedBlockingDeque; -import org.junit.Test; - -// Note: we don't repeat the other tests in Cassandra3SchemaQueriesTest because the logic is -// shared, this class just validates the query strings. -public class Cassandra22SchemaQueriesTest extends SchemaQueriesTest { - - @Test - public void should_query() { - when(config.getStringList( - DefaultDriverOption.METADATA_SCHEMA_REFRESHED_KEYSPACES, Collections.emptyList())) - .thenReturn(Collections.emptyList()); - when(node.getCassandraVersion()).thenReturn(Version.V2_2_0); - - SchemaQueriesWithMockedChannel queries = - new SchemaQueriesWithMockedChannel(driverChannel, node, null, config, "test"); - - CompletionStage result = queries.execute(); - - // Keyspace - Call call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system.schema_keyspaces"); - call.result.complete( - mockResult(mockRow("keyspace_name", "ks1"), mockRow("keyspace_name", "ks2"))); - - // Types - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system.schema_usertypes"); - call.result.complete(mockResult(mockRow("keyspace_name", "ks1", "type_name", "type"))); - - // Tables - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system.schema_columnfamilies"); - call.result.complete(mockResult(mockRow("keyspace_name", "ks1", "columnfamily_name", "foo"))); - - // Columns - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system.schema_columns"); - call.result.complete( - mockResult( - mockRow("keyspace_name", "ks1", "columnfamily_name", "foo", "column_name", "k"))); - - // Functions - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system.schema_functions"); - call.result.complete(mockResult(mockRow("keyspace_name", "ks2", "function_name", "add"))); - - // Aggregates - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system.schema_aggregates"); - call.result.complete(mockResult(mockRow("keyspace_name", "ks2", "aggregate_name", "add"))); - - channel.runPendingTasks(); - - assertThatStage(result) - .isSuccess( - rows -> { - assertThat(rows.getNode()).isEqualTo(node); - - // Keyspace - assertThat(rows.keyspaces()).hasSize(2); - assertThat(rows.keyspaces().get(0).getString("keyspace_name")).isEqualTo("ks1"); - assertThat(rows.keyspaces().get(1).getString("keyspace_name")).isEqualTo("ks2"); - - // Types - assertThat(rows.types().keySet()).containsOnly(KS1_ID); - assertThat(rows.types().get(KS1_ID)).hasSize(1); - assertThat(rows.types().get(KS1_ID).iterator().next().getString("type_name")) - .isEqualTo("type"); - - // Tables - assertThat(rows.tables().keySet()).containsOnly(KS1_ID); - assertThat(rows.tables().get(KS1_ID)).hasSize(1); - assertThat(rows.tables().get(KS1_ID).iterator().next().getString("columnfamily_name")) - .isEqualTo("foo"); - - // Rows - assertThat(rows.columns().keySet()).containsOnly(KS1_ID); - assertThat(rows.columns().get(KS1_ID).keySet()).containsOnly(FOO_ID); - assertThat( - rows.columns() - .get(KS1_ID) - .get(FOO_ID) - .iterator() - .next() - .getString("column_name")) - .isEqualTo("k"); - - // Functions - assertThat(rows.functions().keySet()).containsOnly(KS2_ID); - assertThat(rows.functions().get(KS2_ID)).hasSize(1); - assertThat(rows.functions().get(KS2_ID).iterator().next().getString("function_name")) - .isEqualTo("add"); - - // Aggregates - assertThat(rows.aggregates().keySet()).containsOnly(KS2_ID); - assertThat(rows.aggregates().get(KS2_ID)).hasSize(1); - assertThat( - rows.aggregates().get(KS2_ID).iterator().next().getString("aggregate_name")) - .isEqualTo("add"); - - // No views in this version - assertThat(rows.views().keySet()).isEmpty(); - }); - } - - /** Extends the class under test to mock the query execution logic. */ - static class SchemaQueriesWithMockedChannel extends Cassandra22SchemaQueries { - - final Queue calls = new LinkedBlockingDeque<>(); - - SchemaQueriesWithMockedChannel( - DriverChannel channel, - Node node, - CompletableFuture refreshFuture, - DriverExecutionProfile config, - String logPrefix) { - super(channel, node, config, logPrefix); - } - - @Override - protected CompletionStage query(String query) { - Call call = new Call(query); - calls.add(call); - return call.result; - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra3SchemaQueriesTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra3SchemaQueriesTest.java deleted file mode 100644 index 3b533e89ed5..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra3SchemaQueriesTest.java +++ /dev/null @@ -1,365 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.queries; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.adminrequest.AdminResult; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import java.util.Collections; -import java.util.Queue; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.LinkedBlockingDeque; -import org.junit.Before; -import org.junit.Test; - -public class Cassandra3SchemaQueriesTest extends SchemaQueriesTest { - - @Before - @Override - public void setup() { - super.setup(); - - // By default, no keyspace filter - when(config.getStringList( - DefaultDriverOption.METADATA_SCHEMA_REFRESHED_KEYSPACES, Collections.emptyList())) - .thenReturn(Collections.emptyList()); - when(node.getCassandraVersion()).thenReturn(Version.V3_0_0); - } - - @Test - public void should_query_without_keyspace_filter() { - should_query_with_where_clause(""); - } - - @Test - public void should_query_with_keyspace_filter() { - when(config.getStringList( - DefaultDriverOption.METADATA_SCHEMA_REFRESHED_KEYSPACES, Collections.emptyList())) - .thenReturn(ImmutableList.of("ks1", "ks2")); - - should_query_with_where_clause(" WHERE keyspace_name IN ('ks1','ks2')"); - } - - private void should_query_with_where_clause(String whereClause) { - SchemaQueriesWithMockedChannel queries = - new SchemaQueriesWithMockedChannel(driverChannel, node, config, "test"); - CompletionStage result = queries.execute(); - - // Keyspace - Call call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system_schema.keyspaces" + whereClause); - call.result.complete( - mockResult(mockRow("keyspace_name", "ks1"), mockRow("keyspace_name", "ks2"))); - - // Types - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system_schema.types" + whereClause); - call.result.complete(mockResult(mockRow("keyspace_name", "ks1", "type_name", "type"))); - - // Tables - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system_schema.tables" + whereClause); - call.result.complete(mockResult(mockRow("keyspace_name", "ks1", "table_name", "foo"))); - - // Columns - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system_schema.columns" + whereClause); - call.result.complete( - mockResult(mockRow("keyspace_name", "ks1", "table_name", "foo", "column_name", "k"))); - - // Indexes - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system_schema.indexes" + whereClause); - call.result.complete( - mockResult(mockRow("keyspace_name", "ks1", "table_name", "foo", "index_name", "index"))); - - // Views - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system_schema.views" + whereClause); - call.result.complete(mockResult(mockRow("keyspace_name", "ks2", "view_name", "foo"))); - - // Functions - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system_schema.functions" + whereClause); - call.result.complete(mockResult(mockRow("keyspace_name", "ks2", "function_name", "add"))); - - // Aggregates - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system_schema.aggregates" + whereClause); - call.result.complete(mockResult(mockRow("keyspace_name", "ks2", "aggregate_name", "add"))); - - channel.runPendingTasks(); - - assertThatStage(result) - .isSuccess( - rows -> { - assertThat(rows.getNode()).isEqualTo(node); - - // Keyspace - assertThat(rows.keyspaces()).hasSize(2); - assertThat(rows.keyspaces().get(0).getString("keyspace_name")).isEqualTo("ks1"); - assertThat(rows.keyspaces().get(1).getString("keyspace_name")).isEqualTo("ks2"); - - // Types - assertThat(rows.types().keySet()).containsOnly(KS1_ID); - assertThat(rows.types().get(KS1_ID)).hasSize(1); - assertThat(rows.types().get(KS1_ID).iterator().next().getString("type_name")) - .isEqualTo("type"); - - // Tables - assertThat(rows.tables().keySet()).containsOnly(KS1_ID); - assertThat(rows.tables().get(KS1_ID)).hasSize(1); - assertThat(rows.tables().get(KS1_ID).iterator().next().getString("table_name")) - .isEqualTo("foo"); - - // Columns - assertThat(rows.columns().keySet()).containsOnly(KS1_ID); - assertThat(rows.columns().get(KS1_ID).keySet()).containsOnly(FOO_ID); - assertThat( - rows.columns() - .get(KS1_ID) - .get(FOO_ID) - .iterator() - .next() - .getString("column_name")) - .isEqualTo("k"); - - // Indexes - assertThat(rows.indexes().keySet()).containsOnly(KS1_ID); - assertThat(rows.indexes().get(KS1_ID).keySet()).containsOnly(FOO_ID); - assertThat( - rows.indexes() - .get(KS1_ID) - .get(FOO_ID) - .iterator() - .next() - .getString("index_name")) - .isEqualTo("index"); - - // Views - assertThat(rows.views().keySet()).containsOnly(KS2_ID); - assertThat(rows.views().get(KS2_ID)).hasSize(1); - assertThat(rows.views().get(KS2_ID).iterator().next().getString("view_name")) - .isEqualTo("foo"); - - // Functions - assertThat(rows.functions().keySet()).containsOnly(KS2_ID); - assertThat(rows.functions().get(KS2_ID)).hasSize(1); - assertThat(rows.functions().get(KS2_ID).iterator().next().getString("function_name")) - .isEqualTo("add"); - - // Aggregates - assertThat(rows.aggregates().keySet()).containsOnly(KS2_ID); - assertThat(rows.aggregates().get(KS2_ID)).hasSize(1); - assertThat( - rows.aggregates().get(KS2_ID).iterator().next().getString("aggregate_name")) - .isEqualTo("add"); - }); - } - - @Test - public void should_query_with_paging() { - SchemaQueriesWithMockedChannel queries = - new SchemaQueriesWithMockedChannel(driverChannel, node, config, "test"); - CompletionStage result = queries.execute(); - - // Keyspace - Call call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system_schema.keyspaces"); - call.result.complete(mockResult(mockRow("keyspace_name", "ks1"))); - - // No types - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system_schema.types"); - call.result.complete(mockResult(/*empty*/ )); - - // Tables - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system_schema.tables"); - call.result.complete(mockResult(mockRow("keyspace_name", "ks1", "table_name", "foo"))); - - // Columns: paged - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system_schema.columns"); - - AdminResult page2 = - mockResult(mockRow("keyspace_name", "ks1", "table_name", "foo", "column_name", "v")); - AdminResult page1 = - mockResult(page2, mockRow("keyspace_name", "ks1", "table_name", "foo", "column_name", "k")); - call.result.complete(page1); - - // No indexes - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system_schema.indexes"); - call.result.complete(mockResult(/*empty*/ )); - - // No views - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system_schema.views"); - call.result.complete(mockResult(/*empty*/ )); - - // No functions - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system_schema.functions"); - call.result.complete(mockResult(/*empty*/ )); - - // No aggregates - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system_schema.aggregates"); - call.result.complete(mockResult(/*empty*/ )); - - channel.runPendingTasks(); - - assertThatStage(result) - .isSuccess( - rows -> { - assertThat(rows.columns().keySet()).containsOnly(KS1_ID); - assertThat(rows.columns().get(KS1_ID).keySet()).containsOnly(FOO_ID); - assertThat(rows.columns().get(KS1_ID).get(FOO_ID)) - .extracting(r -> r.getString("column_name")) - .containsExactly("k", "v"); - }); - } - - @Test - public void should_ignore_malformed_rows() { - SchemaQueriesWithMockedChannel queries = - new SchemaQueriesWithMockedChannel(driverChannel, node, config, "test"); - CompletionStage result = queries.execute(); - - // Keyspace - Call call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system_schema.keyspaces"); - call.result.complete(mockResult(mockRow("keyspace_name", "ks1"))); - - // No types - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system_schema.types"); - call.result.complete(mockResult(/*empty*/ )); - - // Tables - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system_schema.tables"); - call.result.complete( - mockResult( - mockRow("keyspace_name", "ks", "table_name", "foo"), - // Missing keyspace name: - mockRow("table_name", "foo"))); - - // Columns - call = queries.calls.poll(); - call.result.complete( - mockResult( - mockRow("keyspace_name", "ks", "table_name", "foo", "column_name", "k"), - // Missing keyspace name: - mockRow("table_name", "foo", "column_name", "k"), - // Missing table name: - mockRow("keyspace_name", "ks", "column_name", "k"))); - - AdminResult page2 = - mockResult(mockRow("keyspace_name", "ks1", "table_name", "foo", "column_name", "v")); - AdminResult page1 = - mockResult(page2, mockRow("keyspace_name", "ks1", "table_name", "foo", "column_name", "k")); - call.result.complete(page1); - - // No indexes - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system_schema.indexes"); - call.result.complete(mockResult(/*empty*/ )); - - // No views - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system_schema.views"); - call.result.complete(mockResult(/*empty*/ )); - - // No functions - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system_schema.functions"); - call.result.complete(mockResult(/*empty*/ )); - - // No aggregates - call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system_schema.aggregates"); - call.result.complete(mockResult(/*empty*/ )); - - channel.runPendingTasks(); - - assertThatStage(result) - .isSuccess( - rows -> { - assertThat(rows.tables().keySet()).containsOnly(KS_ID); - assertThat(rows.tables().get(KS_ID)).hasSize(1); - assertThat(rows.tables().get(KS_ID).iterator().next().getString("table_name")) - .isEqualTo("foo"); - - assertThat(rows.columns().keySet()).containsOnly(KS_ID); - assertThat(rows.columns().get(KS_ID).keySet()).containsOnly(FOO_ID); - assertThat( - rows.columns() - .get(KS_ID) - .get(FOO_ID) - .iterator() - .next() - .getString("column_name")) - .isEqualTo("k"); - }); - } - - @Test - public void should_abort_if_query_fails() { - SchemaQueriesWithMockedChannel queries = - new SchemaQueriesWithMockedChannel(driverChannel, node, config, "test"); - CompletionStage result = queries.execute(); - - Exception mockQueryError = new Exception("mock query error"); - - Call call = queries.calls.poll(); - assertThat(call.query).isEqualTo("SELECT * FROM system_schema.keyspaces"); - call.result.completeExceptionally(mockQueryError); - - channel.runPendingTasks(); - - assertThatStage(result).isFailed(throwable -> assertThat(throwable).isEqualTo(mockQueryError)); - } - - /** Extends the class under test to mock the query execution logic. */ - static class SchemaQueriesWithMockedChannel extends Cassandra3SchemaQueries { - - final Queue calls = new LinkedBlockingDeque<>(); - - SchemaQueriesWithMockedChannel( - DriverChannel channel, Node node, DriverExecutionProfile config, String logPrefix) { - super(channel, node, config, logPrefix); - } - - @Override - protected CompletionStage query(String query) { - Call call = new Call(query); - calls.add(call); - return call.result; - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/DefaultSchemaQueriesFactoryTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/DefaultSchemaQueriesFactoryTest.java deleted file mode 100644 index f9ac6c05576..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/DefaultSchemaQueriesFactoryTest.java +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.queries; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.dse.driver.api.core.metadata.DseNodeProperties; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.Optional; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class DefaultSchemaQueriesFactoryTest { - - enum Expected { - CASS_21(Cassandra21SchemaQueries.class), - CASS_22(Cassandra22SchemaQueries.class), - CASS_3(Cassandra3SchemaQueries.class), - CASS_4(Cassandra4SchemaQueries.class), - DSE_6_8(Dse68SchemaQueries.class); - - final Class clz; - - Expected(Class clz) { - this.clz = clz; - } - - public Class getClz() { - return clz; - } - } - - private static ImmutableList> cassandraVersions = - ImmutableList.>builder() - .add(ImmutableList.of("2.1.0", Optional.empty(), Expected.CASS_21)) - .add(ImmutableList.of("2.2.0", Optional.empty(), Expected.CASS_22)) - .add(ImmutableList.of("2.2.1", Optional.empty(), Expected.CASS_22)) - // Not a real version, just documenting behaviour of existing impl - .add(ImmutableList.of("2.3.0", Optional.empty(), Expected.CASS_22)) - // We now return you to real versions - .add(ImmutableList.of("3.0.0", Optional.empty(), Expected.CASS_3)) - .add(ImmutableList.of("3.0.1", Optional.empty(), Expected.CASS_3)) - .add(ImmutableList.of("3.1.0", Optional.empty(), Expected.CASS_3)) - .add(ImmutableList.of("4.0.0", Optional.empty(), Expected.CASS_4)) - .add(ImmutableList.of("4.0.1", Optional.empty(), Expected.CASS_4)) - .add(ImmutableList.of("4.1.0", Optional.empty(), Expected.CASS_4)) - .build(); - - private static ImmutableList> dseVersions = - ImmutableList.>builder() - // DSE 6.0.0 - .add(ImmutableList.of("4.0.0.2284", Optional.of("6.0.0"), Expected.CASS_3)) - // DSE 6.0.1 - .add(ImmutableList.of("4.0.0.2349", Optional.of("6.0.1"), Expected.CASS_3)) - // DSE 6.0.2 moved to DSE version (minus dots) in an extra element - .add(ImmutableList.of("4.0.0.602", Optional.of("6.0.2"), Expected.CASS_3)) - // DSE 6.7.0 continued with the same idea - .add(ImmutableList.of("4.0.0.670", Optional.of("6.7.0"), Expected.CASS_4)) - // DSE 6.8.0 does the same - .add(ImmutableList.of("4.0.0.680", Optional.of("6.8.0"), Expected.DSE_6_8)) - .build(); - - private static ImmutableList> allVersions = - ImmutableList.>builder() - .addAll(cassandraVersions) - .addAll(dseVersions) - .build(); - - @DataProvider(format = "%m %p[1] => %p[0]") - public static Iterable expected() { - - return allVersions; - } - - @Test - @UseDataProvider("expected") - public void should_return_correct_schema_queries_impl( - String cassandraVersion, Optional dseVersion, Expected expected) { - - final Node mockNode = mock(Node.class); - when(mockNode.getCassandraVersion()).thenReturn(Version.parse(cassandraVersion)); - dseVersion.ifPresent( - versionStr -> { - when(mockNode.getExtras()) - .thenReturn( - ImmutableMap.of( - DseNodeProperties.DSE_VERSION, Version.parse(versionStr))); - }); - - DefaultSchemaQueriesFactory factory = buildFactory(); - - @SuppressWarnings("unchecked") - SchemaQueries queries = factory.newInstance(mockNode, mock(DriverChannel.class)); - - assertThat(queries.getClass()).isEqualTo(expected.getClz()); - } - - private DefaultSchemaQueriesFactory buildFactory() { - - final DriverExecutionProfile mockProfile = mock(DriverExecutionProfile.class); - final DriverConfig mockConfig = mock(DriverConfig.class); - when(mockConfig.getDefaultProfile()).thenReturn(mockProfile); - final InternalDriverContext mockInternalCtx = mock(InternalDriverContext.class); - when(mockInternalCtx.getConfig()).thenReturn(mockConfig); - - return new DefaultSchemaQueriesFactory(mockInternalCtx); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/KeyspaceFilterTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/KeyspaceFilterTest.java deleted file mode 100644 index 7e2f6219eac..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/KeyspaceFilterTest.java +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.queries; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import java.util.Arrays; -import java.util.Set; -import java.util.stream.Collectors; -import org.junit.Test; - -public class KeyspaceFilterTest { - - private static final ImmutableSet KEYSPACES = - ImmutableSet.of( - "system", "inventory_test", "inventory_prod", "customers_test", "customers_prod"); - - @Test - public void should_not_filter_when_no_rules() { - KeyspaceFilter filter = KeyspaceFilter.newInstance("test", Arrays.asList()); - assertThat(filter.getWhereClause()).isEmpty(); - assertThat(apply(filter, KEYSPACES)).isEqualTo(KEYSPACES); - } - - @Test - public void should_filter_on_server_when_only_exact_rules() { - KeyspaceFilter filter = - KeyspaceFilter.newInstance( - "test", Arrays.asList("inventory_test", "customers_test", "!system")); - // Note that exact excludes are redundant in this case: either they match an include and will be - // ignored, or they don't and the keyspace is already ignored. - // We let it slide, but a warning is logged. - assertThat(filter.getWhereClause()) - .isEqualTo(" WHERE keyspace_name IN ('inventory_test','customers_test')"); - assertThat(apply(filter, KEYSPACES)).containsOnly("inventory_test", "customers_test"); - } - - @Test - public void should_ignore_exact_exclude_that_collides_with_exact_include() { - KeyspaceFilter filter = - KeyspaceFilter.newInstance("test", Arrays.asList("inventory_test", "!inventory_test")); - assertThat(filter.getWhereClause()).isEqualTo(" WHERE keyspace_name IN ('inventory_test')"); - assertThat(apply(filter, KEYSPACES)).containsOnly("inventory_test"); - - // Order does not matter - filter = KeyspaceFilter.newInstance("test", Arrays.asList("!inventory_test", "inventory_test")); - assertThat(filter.getWhereClause()).isEqualTo(" WHERE keyspace_name IN ('inventory_test')"); - assertThat(apply(filter, KEYSPACES)).containsOnly("inventory_test"); - } - - @Test - public void should_apply_disjoint_exact_and_regex_rules() { - KeyspaceFilter filter = - KeyspaceFilter.newInstance("test", Arrays.asList("inventory_test", "/^customers.*/")); - assertThat(filter.getWhereClause()).isEmpty(); - assertThat(apply(filter, KEYSPACES)) - .containsOnly("inventory_test", "customers_test", "customers_prod"); - - filter = KeyspaceFilter.newInstance("test", Arrays.asList("!system", "!/^inventory.*/")); - assertThat(filter.getWhereClause()).isEmpty(); - assertThat(apply(filter, KEYSPACES)).containsOnly("customers_test", "customers_prod"); - - // The remaining cases could be simplified, but they are supported nevertheless: - /*redundant:*/ - filter = KeyspaceFilter.newInstance("test", Arrays.asList("!/^customers.*/", "inventory_test")); - assertThat(filter.getWhereClause()).isEmpty(); - assertThat(apply(filter, KEYSPACES)).containsOnly("inventory_test", "inventory_prod", "system"); - - /*redundant:*/ - filter = KeyspaceFilter.newInstance("test", Arrays.asList("/^customers.*/", "!system")); - assertThat(filter.getWhereClause()).isEmpty(); - assertThat(apply(filter, KEYSPACES)).containsOnly("customers_test", "customers_prod"); - } - - @Test - public void should_apply_intersecting_exact_and_regex_rules() { - // Include all customer keyspaces except one: - KeyspaceFilter filter = - KeyspaceFilter.newInstance("test", Arrays.asList("/^customers.*/", "!customers_test")); - assertThat(filter.getWhereClause()).isEmpty(); - assertThat(apply(filter, KEYSPACES)).containsOnly("customers_prod"); - - // Exclude all customer keyspaces except one (also implies include every other keyspace): - filter = KeyspaceFilter.newInstance("test", Arrays.asList("!/^customers.*/", "customers_test")); - assertThat(filter.getWhereClause()).isEmpty(); - assertThat(apply(filter, KEYSPACES)) - .containsOnly("customers_test", "inventory_test", "inventory_prod", "system"); - } - - @Test - public void should_apply_intersecting_regex_rules() { - KeyspaceFilter filter = - KeyspaceFilter.newInstance("test", Arrays.asList("/^customers.*/", "!/.*test$/")); - assertThat(filter.getWhereClause()).isEmpty(); - assertThat(apply(filter, KEYSPACES)).containsOnly("customers_prod"); - - // Throwing an exact name in the mix doesn't change the other rules - filter = - KeyspaceFilter.newInstance( - "test", Arrays.asList("inventory_prod", "/^customers.*/", "!/.*test$/")); - assertThat(filter.getWhereClause()).isEmpty(); - assertThat(apply(filter, KEYSPACES)).containsOnly("inventory_prod", "customers_prod"); - } - - @Test - public void should_skip_malformed_rule() { - KeyspaceFilter filter = - KeyspaceFilter.newInstance("test", Arrays.asList("inventory_test", "customers_test", "//")); - assertThat(filter.getWhereClause()) - .isEqualTo(" WHERE keyspace_name IN ('inventory_test','customers_test')"); - assertThat(apply(filter, KEYSPACES)).containsOnly("inventory_test", "customers_test"); - } - - @Test - public void should_skip_invalid_regex() { - KeyspaceFilter filter = - KeyspaceFilter.newInstance( - "test", Arrays.asList("inventory_test", "customers_test", "/*/")); - assertThat(filter.getWhereClause()) - .isEqualTo(" WHERE keyspace_name IN ('inventory_test','customers_test')"); - assertThat(apply(filter, KEYSPACES)).containsOnly("inventory_test", "customers_test"); - } - - private static Set apply(KeyspaceFilter filter, Set keyspaces) { - return keyspaces.stream().filter(filter::includes).collect(Collectors.toSet()); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/SchemaQueriesTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/SchemaQueriesTest.java deleted file mode 100644 index e0da405993b..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/SchemaQueriesTest.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.queries; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.adminrequest.AdminResult; -import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.shaded.guava.common.collect.Iterators; -import io.netty.channel.embedded.EmbeddedChannel; -import java.time.Duration; -import java.util.concurrent.CompletableFuture; -import org.junit.Before; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public abstract class SchemaQueriesTest { - - protected static final CqlIdentifier KS_ID = CqlIdentifier.fromInternal("ks"); - protected static final CqlIdentifier KS1_ID = CqlIdentifier.fromInternal("ks1"); - protected static final CqlIdentifier KS2_ID = CqlIdentifier.fromInternal("ks2"); - protected static final CqlIdentifier FOO_ID = CqlIdentifier.fromInternal("foo"); - - @Mock protected Node node; - @Mock protected DriverExecutionProfile config; - @Mock protected DriverChannel driverChannel; - protected EmbeddedChannel channel; - - @Before - public void setup() { - // Whatever, not actually used because the requests are mocked - when(config.getDuration(DefaultDriverOption.METADATA_SCHEMA_REQUEST_TIMEOUT)) - .thenReturn(Duration.ZERO); - when(config.getInt(DefaultDriverOption.METADATA_SCHEMA_REQUEST_PAGE_SIZE)).thenReturn(5000); - - channel = new EmbeddedChannel(); - driverChannel = mock(DriverChannel.class); - when(driverChannel.eventLoop()).thenReturn(channel.eventLoop()); - } - - protected static AdminRow mockRow(String... values) { - AdminRow row = mock(AdminRow.class); - assertThat(values.length % 2).as("Expecting an even number of parameters").isZero(); - for (int i = 0; i < values.length / 2; i++) { - when(row.getString(values[i * 2])).thenReturn(values[i * 2 + 1]); - } - return row; - } - - protected static AdminResult mockResult(AdminRow... rows) { - return mockResult(null, rows); - } - - protected static AdminResult mockResult(AdminResult next, AdminRow... rows) { - AdminResult result = mock(AdminResult.class); - if (next == null) { - when(result.hasNextPage()).thenReturn(false); - } else { - when(result.hasNextPage()).thenReturn(true); - when(result.nextPage()).thenReturn(CompletableFuture.completedFuture(next)); - } - when(result.iterator()).thenReturn(Iterators.forArray(rows)); - return result; - } - - protected static class Call { - final String query; - final CompletableFuture result; - - Call(String query) { - this.query = query; - this.result = new CompletableFuture<>(); - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/refresh/SchemaRefreshTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/refresh/SchemaRefreshTest.java deleted file mode 100644 index 4f124d2c4a0..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/refresh/SchemaRefreshTest.java +++ /dev/null @@ -1,166 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.schema.refresh; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.core.channel.ChannelFactory; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.DefaultMetadata; -import com.datastax.oss.driver.internal.core.metadata.MetadataRefresh; -import com.datastax.oss.driver.internal.core.metadata.schema.DefaultKeyspaceMetadata; -import com.datastax.oss.driver.internal.core.metadata.schema.events.KeyspaceChangeEvent; -import com.datastax.oss.driver.internal.core.metadata.schema.events.TypeChangeEvent; -import com.datastax.oss.driver.internal.core.type.UserDefinedTypeBuilder; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import java.util.Collections; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class SchemaRefreshTest { - - private static final UserDefinedType OLD_T1 = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("ks1"), CqlIdentifier.fromInternal("t1")) - .withField(CqlIdentifier.fromInternal("i"), DataTypes.INT) - .build(); - private static final UserDefinedType OLD_T2 = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("ks1"), CqlIdentifier.fromInternal("t2")) - .withField(CqlIdentifier.fromInternal("i"), DataTypes.INT) - .build(); - private static final DefaultKeyspaceMetadata OLD_KS1 = newKeyspace("ks1", true, OLD_T1, OLD_T2); - - @Mock private InternalDriverContext context; - @Mock private ChannelFactory channelFactory; - private DefaultMetadata oldMetadata; - - @Before - public void setup() { - when(context.getChannelFactory()).thenReturn(channelFactory); - oldMetadata = - DefaultMetadata.EMPTY.withSchema( - ImmutableMap.of(OLD_KS1.getName(), OLD_KS1), false, context); - } - - @Test - public void should_detect_dropped_keyspace() { - SchemaRefresh refresh = new SchemaRefresh(Collections.emptyMap()); - MetadataRefresh.Result result = refresh.compute(oldMetadata, false, context); - assertThat(result.newMetadata.getKeyspaces()).isEmpty(); - assertThat(result.events).containsExactly(KeyspaceChangeEvent.dropped(OLD_KS1)); - } - - @Test - public void should_detect_created_keyspace() { - DefaultKeyspaceMetadata ks2 = newKeyspace("ks2", true); - SchemaRefresh refresh = - new SchemaRefresh(ImmutableMap.of(OLD_KS1.getName(), OLD_KS1, ks2.getName(), ks2)); - MetadataRefresh.Result result = refresh.compute(oldMetadata, false, context); - assertThat(result.newMetadata.getKeyspaces()).hasSize(2); - assertThat(result.events).containsExactly(KeyspaceChangeEvent.created(ks2)); - } - - @Test - public void should_detect_top_level_update_in_keyspace() { - // Change only one top-level option (durable writes) - DefaultKeyspaceMetadata newKs1 = newKeyspace("ks1", false, OLD_T1, OLD_T2); - SchemaRefresh refresh = new SchemaRefresh(ImmutableMap.of(OLD_KS1.getName(), newKs1)); - MetadataRefresh.Result result = refresh.compute(oldMetadata, false, context); - assertThat(result.newMetadata.getKeyspaces()).hasSize(1); - assertThat(result.events).containsExactly(KeyspaceChangeEvent.updated(OLD_KS1, newKs1)); - } - - @Test - public void should_detect_updated_children_in_keyspace() { - // Drop one type, modify the other and add a third one - UserDefinedType newT2 = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("ks1"), CqlIdentifier.fromInternal("t2")) - .withField(CqlIdentifier.fromInternal("i"), DataTypes.TEXT) - .build(); - UserDefinedType t3 = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("ks1"), CqlIdentifier.fromInternal("t3")) - .withField(CqlIdentifier.fromInternal("i"), DataTypes.INT) - .build(); - DefaultKeyspaceMetadata newKs1 = newKeyspace("ks1", true, newT2, t3); - - SchemaRefresh refresh = new SchemaRefresh(ImmutableMap.of(OLD_KS1.getName(), newKs1)); - MetadataRefresh.Result result = refresh.compute(oldMetadata, false, context); - assertThat(result.newMetadata.getKeyspaces().get(OLD_KS1.getName())).isEqualTo(newKs1); - assertThat(result.events) - .containsExactly( - TypeChangeEvent.dropped(OLD_T1), - TypeChangeEvent.updated(OLD_T2, newT2), - TypeChangeEvent.created(t3)); - } - - @Test - public void should_detect_top_level_change_and_children_changes() { - // Drop one type, modify the other and add a third one - UserDefinedType newT2 = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("ks1"), CqlIdentifier.fromInternal("t2")) - .withField(CqlIdentifier.fromInternal("i"), DataTypes.TEXT) - .build(); - UserDefinedType t3 = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("ks1"), CqlIdentifier.fromInternal("t3")) - .withField(CqlIdentifier.fromInternal("i"), DataTypes.INT) - .build(); - // Also disable durable writes - DefaultKeyspaceMetadata newKs1 = newKeyspace("ks1", false, newT2, t3); - - SchemaRefresh refresh = new SchemaRefresh(ImmutableMap.of(OLD_KS1.getName(), newKs1)); - MetadataRefresh.Result result = refresh.compute(oldMetadata, false, context); - assertThat(result.newMetadata.getKeyspaces().get(OLD_KS1.getName())).isEqualTo(newKs1); - assertThat(result.events) - .containsExactly( - KeyspaceChangeEvent.updated(OLD_KS1, newKs1), - TypeChangeEvent.dropped(OLD_T1), - TypeChangeEvent.updated(OLD_T2, newT2), - TypeChangeEvent.created(t3)); - } - - private static DefaultKeyspaceMetadata newKeyspace( - String name, boolean durableWrites, UserDefinedType... userTypes) { - ImmutableMap.Builder typesMapBuilder = ImmutableMap.builder(); - for (UserDefinedType type : userTypes) { - typesMapBuilder.put(type.getName(), type); - } - return new DefaultKeyspaceMetadata( - CqlIdentifier.fromInternal(name), - durableWrites, - false, - Collections.emptyMap(), - typesMapBuilder.build(), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap()); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/ByteOrderedTokenRangeTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/ByteOrderedTokenRangeTest.java deleted file mode 100644 index 238f4e0687a..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/ByteOrderedTokenRangeTest.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.protocol.internal.util.Bytes; -import org.junit.Test; - -/** @see TokenRangeTest */ -public class ByteOrderedTokenRangeTest { - - private static final String MIN = "0x"; - - @Test - public void should_split_range() { - assertThat(range("0x0a", "0x0d").splitEvenly(3)) - .containsExactly(range("0x0a", "0x0b"), range("0x0b", "0x0c"), range("0x0c", "0x0d")); - } - - @Test - public void should_split_range_producing_empty_splits_near_ring_end() { - // 0x00 is the first token following min. - // This is an edge case where we want to make sure we don't accidentally generate the ]min,min] - // range (which is the whole ring): - assertThat(range(MIN, "0x00").splitEvenly(3)) - .containsExactly(range(MIN, "0x00"), range("0x00", "0x00"), range("0x00", "0x00")); - } - - @Test - public void should_split_range_when_padding_produces_same_token() { - // To compute the ranges, we pad with trailing zeroes until the range is big enough for the - // number of splits. - // But in this case padding produces the same token 0x1100, so adding more zeroes wouldn't help. - assertThat(range("0x11", "0x1100").splitEvenly(3)) - .containsExactly( - range("0x11", "0x1100"), range("0x1100", "0x1100"), range("0x1100", "0x1100")); - } - - @Test - public void should_split_range_that_wraps_around_the_ring() { - assertThat(range("0x0d", "0x0a").splitEvenly(2)) - .containsExactly(range("0x0d", "0x8c"), range("0x8c", "0x0a")); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_split_whole_ring() { - range(MIN, MIN).splitEvenly(1); - } - - private ByteOrderedTokenRange range(String start, String end) { - return new ByteOrderedTokenRange( - new ByteOrderedToken(Bytes.fromHexString(start)), - new ByteOrderedToken(Bytes.fromHexString(end))); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/DefaultTokenMapTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/DefaultTokenMapTest.java deleted file mode 100644 index 3170e2dd6b2..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/DefaultTokenMapTest.java +++ /dev/null @@ -1,384 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.metadata.token.TokenRange; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.DefaultNode; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import java.nio.ByteBuffer; -import java.util.List; -import java.util.Map; -import java.util.Set; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class DefaultTokenMapTest { - - private static final String DC1 = "DC1"; - private static final String DC2 = "DC2"; - private static final String RACK1 = "RACK1"; - private static final String RACK2 = "RACK2"; - - private static final CqlIdentifier KS1 = CqlIdentifier.fromInternal("ks1"); - private static final CqlIdentifier KS2 = CqlIdentifier.fromInternal("ks2"); - - private static final TokenFactory TOKEN_FACTORY = new Murmur3TokenFactory(); - - private static final String TOKEN1 = "-9000000000000000000"; - private static final String TOKEN2 = "-6000000000000000000"; - private static final String TOKEN3 = "4000000000000000000"; - private static final String TOKEN4 = "9000000000000000000"; - private static final TokenRange RANGE12 = range(TOKEN1, TOKEN2); - private static final TokenRange RANGE23 = range(TOKEN2, TOKEN3); - private static final TokenRange RANGE34 = range(TOKEN3, TOKEN4); - private static final TokenRange RANGE41 = range(TOKEN4, TOKEN1); - private static final TokenRange FULL_RING = - range(TOKEN_FACTORY.minToken(), TOKEN_FACTORY.minToken()); - - // Some random routing keys that land in the ranges above (they were generated manually) - private static ByteBuffer ROUTING_KEY12 = TypeCodecs.BIGINT.encode(2L, DefaultProtocolVersion.V3); - private static ByteBuffer ROUTING_KEY23 = TypeCodecs.BIGINT.encode(0L, DefaultProtocolVersion.V3); - private static ByteBuffer ROUTING_KEY34 = TypeCodecs.BIGINT.encode(1L, DefaultProtocolVersion.V3); - private static ByteBuffer ROUTING_KEY41 = - TypeCodecs.BIGINT.encode(99L, DefaultProtocolVersion.V3); - - private static final ImmutableMap REPLICATE_ON_BOTH_DCS = - ImmutableMap.of( - "class", "org.apache.cassandra.locator.NetworkTopologyStrategy", DC1, "1", DC2, "1"); - private static final ImmutableMap REPLICATE_ON_DC1 = - ImmutableMap.of("class", "org.apache.cassandra.locator.NetworkTopologyStrategy", DC1, "1"); - - @Mock private InternalDriverContext context; - private ReplicationStrategyFactory replicationStrategyFactory; - - @Before - public void setup() { - replicationStrategyFactory = new DefaultReplicationStrategyFactory(context); - } - - @Test - public void should_build_token_map() { - // Given - Node node1 = mockNode(DC1, RACK1, ImmutableSet.of(TOKEN1)); - Node node2 = mockNode(DC2, RACK2, ImmutableSet.of(TOKEN2)); - Node node3 = mockNode(DC1, RACK1, ImmutableSet.of(TOKEN3)); - Node node4 = mockNode(DC2, RACK2, ImmutableSet.of(TOKEN4)); - List nodes = ImmutableList.of(node1, node2, node3, node4); - List keyspaces = - ImmutableList.of( - mockKeyspace(KS1, REPLICATE_ON_BOTH_DCS), mockKeyspace(KS2, REPLICATE_ON_DC1)); - - // When - DefaultTokenMap tokenMap = - DefaultTokenMap.build(nodes, keyspaces, TOKEN_FACTORY, replicationStrategyFactory, "test"); - - // Then - assertThat(tokenMap.getTokenRanges()).containsExactly(RANGE12, RANGE23, RANGE34, RANGE41); - - // For KS1, each node gets its primary range, plus the one of the previous node in the other DC - assertThat(tokenMap.getTokenRanges(KS1, node1)).containsOnly(RANGE41, RANGE34); - assertThat(tokenMap.getTokenRanges(KS1, node2)).containsOnly(RANGE12, RANGE41); - assertThat(tokenMap.getTokenRanges(KS1, node3)).containsOnly(RANGE23, RANGE12); - assertThat(tokenMap.getTokenRanges(KS1, node4)).containsOnly(RANGE34, RANGE23); - - assertThat(tokenMap.getReplicas(KS1, RANGE12)).containsOnly(node2, node3); - assertThat(tokenMap.getReplicas(KS1, RANGE23)).containsOnly(node3, node4); - assertThat(tokenMap.getReplicas(KS1, RANGE34)).containsOnly(node1, node4); - assertThat(tokenMap.getReplicas(KS1, RANGE41)).containsOnly(node1, node2); - - assertThat(tokenMap.getReplicas(KS1, ROUTING_KEY12)).containsOnly(node2, node3); - assertThat(tokenMap.getReplicas(KS1, ROUTING_KEY23)).containsOnly(node3, node4); - assertThat(tokenMap.getReplicas(KS1, ROUTING_KEY34)).containsOnly(node1, node4); - assertThat(tokenMap.getReplicas(KS1, ROUTING_KEY41)).containsOnly(node1, node2); - - // KS2 is only replicated on DC1 - assertThat(tokenMap.getTokenRanges(KS2, node1)).containsOnly(RANGE41, RANGE34); - assertThat(tokenMap.getTokenRanges(KS2, node3)).containsOnly(RANGE23, RANGE12); - assertThat(tokenMap.getTokenRanges(KS2, node2)).isEmpty(); - assertThat(tokenMap.getTokenRanges(KS2, node4)).isEmpty(); - - assertThat(tokenMap.getReplicas(KS2, RANGE12)).containsOnly(node3); - assertThat(tokenMap.getReplicas(KS2, RANGE23)).containsOnly(node3); - assertThat(tokenMap.getReplicas(KS2, RANGE34)).containsOnly(node1); - assertThat(tokenMap.getReplicas(KS2, RANGE41)).containsOnly(node1); - - assertThat(tokenMap.getReplicas(KS2, ROUTING_KEY12)).containsOnly(node3); - assertThat(tokenMap.getReplicas(KS2, ROUTING_KEY23)).containsOnly(node3); - assertThat(tokenMap.getReplicas(KS2, ROUTING_KEY34)).containsOnly(node1); - assertThat(tokenMap.getReplicas(KS2, ROUTING_KEY41)).containsOnly(node1); - } - - @Test - public void should_build_token_map_with_single_node() { - // Given - Node node1 = mockNode(DC1, RACK1, ImmutableSet.of(TOKEN1)); - List nodes = ImmutableList.of(node1); - List keyspaces = - ImmutableList.of( - mockKeyspace(KS1, REPLICATE_ON_BOTH_DCS), mockKeyspace(KS2, REPLICATE_ON_DC1)); - - // When - DefaultTokenMap tokenMap = - DefaultTokenMap.build(nodes, keyspaces, TOKEN_FACTORY, replicationStrategyFactory, "test"); - - // Then - assertThat(tokenMap.getTokenRanges()).containsExactly(FULL_RING); - - assertThat(tokenMap.getTokenRanges(KS1, node1)).containsOnly(FULL_RING); - assertThat(tokenMap.getReplicas(KS1, FULL_RING)).containsOnly(node1); - assertThat(tokenMap.getReplicas(KS1, ROUTING_KEY12)).containsOnly(node1); - assertThat(tokenMap.getReplicas(KS1, ROUTING_KEY23)).containsOnly(node1); - assertThat(tokenMap.getReplicas(KS1, ROUTING_KEY34)).containsOnly(node1); - assertThat(tokenMap.getReplicas(KS1, ROUTING_KEY41)).containsOnly(node1); - - assertThat(tokenMap.getTokenRanges(KS2, node1)).containsOnly(FULL_RING); - assertThat(tokenMap.getReplicas(KS2, FULL_RING)).containsOnly(node1); - assertThat(tokenMap.getReplicas(KS2, ROUTING_KEY12)).containsOnly(node1); - assertThat(tokenMap.getReplicas(KS2, ROUTING_KEY23)).containsOnly(node1); - assertThat(tokenMap.getReplicas(KS2, ROUTING_KEY34)).containsOnly(node1); - assertThat(tokenMap.getReplicas(KS2, ROUTING_KEY41)).containsOnly(node1); - } - - @Test - public void should_refresh_when_keyspace_replication_has_not_changed() { - // Given - Node node1 = mockNode(DC1, RACK1, ImmutableSet.of(TOKEN1)); - Node node2 = mockNode(DC2, RACK2, ImmutableSet.of(TOKEN2)); - Node node3 = mockNode(DC1, RACK1, ImmutableSet.of(TOKEN3)); - Node node4 = mockNode(DC2, RACK2, ImmutableSet.of(TOKEN4)); - List nodes = ImmutableList.of(node1, node2, node3, node4); - List oldKeyspaces = - ImmutableList.of( - mockKeyspace(KS1, REPLICATE_ON_BOTH_DCS), mockKeyspace(KS2, REPLICATE_ON_DC1)); - DefaultTokenMap oldTokenMap = - DefaultTokenMap.build( - nodes, oldKeyspaces, TOKEN_FACTORY, replicationStrategyFactory, "test"); - - // When - // The schema gets refreshed, but no keyspaces are created or dropped, and the replication - // settings do not change (since we mock everything it looks the same here, but it could be a - // new table, etc). - List newKeyspaces = - ImmutableList.of( - mockKeyspace(KS1, REPLICATE_ON_BOTH_DCS), mockKeyspace(KS2, REPLICATE_ON_DC1)); - DefaultTokenMap newTokenMap = - oldTokenMap.refresh(nodes, newKeyspaces, replicationStrategyFactory); - - // Then - // Nothing was recomputed - assertThat(newTokenMap.tokenRanges).isSameAs(oldTokenMap.tokenRanges); - assertThat(newTokenMap.tokenRangesByPrimary).isSameAs(oldTokenMap.tokenRangesByPrimary); - assertThat(newTokenMap.replicationConfigs).isSameAs(oldTokenMap.replicationConfigs); - assertThat(newTokenMap.keyspaceMaps).isSameAs(oldTokenMap.keyspaceMaps); - } - - @Test - public void should_refresh_when_new_keyspace_with_existing_replication() { - // Given - Node node1 = mockNode(DC1, RACK1, ImmutableSet.of(TOKEN1)); - Node node2 = mockNode(DC2, RACK2, ImmutableSet.of(TOKEN2)); - Node node3 = mockNode(DC1, RACK1, ImmutableSet.of(TOKEN3)); - Node node4 = mockNode(DC2, RACK2, ImmutableSet.of(TOKEN4)); - List nodes = ImmutableList.of(node1, node2, node3, node4); - List oldKeyspaces = - ImmutableList.of(mockKeyspace(KS1, REPLICATE_ON_BOTH_DCS)); - DefaultTokenMap oldTokenMap = - DefaultTokenMap.build( - nodes, oldKeyspaces, TOKEN_FACTORY, replicationStrategyFactory, "test"); - assertThat(oldTokenMap.keyspaceMaps).containsOnlyKeys(REPLICATE_ON_BOTH_DCS); - - // When - List newKeyspaces = - ImmutableList.of( - mockKeyspace(KS1, REPLICATE_ON_BOTH_DCS), mockKeyspace(KS2, REPLICATE_ON_BOTH_DCS)); - DefaultTokenMap newTokenMap = - oldTokenMap.refresh(nodes, newKeyspaces, replicationStrategyFactory); - - // Then - assertThat(newTokenMap.tokenRanges).isSameAs(oldTokenMap.tokenRanges); - assertThat(newTokenMap.tokenRangesByPrimary).isSameAs(oldTokenMap.tokenRangesByPrimary); - assertThat(newTokenMap.keyspaceMaps).isEqualTo(oldTokenMap.keyspaceMaps); - assertThat(newTokenMap.replicationConfigs) - .hasSize(2) - .containsEntry(KS1, REPLICATE_ON_BOTH_DCS) - .containsEntry(KS2, REPLICATE_ON_BOTH_DCS); - } - - @Test - public void should_refresh_when_new_keyspace_with_new_replication() { - // Given - Node node1 = mockNode(DC1, RACK1, ImmutableSet.of(TOKEN1)); - Node node2 = mockNode(DC2, RACK2, ImmutableSet.of(TOKEN2)); - Node node3 = mockNode(DC1, RACK1, ImmutableSet.of(TOKEN3)); - Node node4 = mockNode(DC2, RACK2, ImmutableSet.of(TOKEN4)); - List nodes = ImmutableList.of(node1, node2, node3, node4); - List oldKeyspaces = - ImmutableList.of(mockKeyspace(KS1, REPLICATE_ON_BOTH_DCS)); - DefaultTokenMap oldTokenMap = - DefaultTokenMap.build( - nodes, oldKeyspaces, TOKEN_FACTORY, replicationStrategyFactory, "test"); - assertThat(oldTokenMap.keyspaceMaps).containsOnlyKeys(REPLICATE_ON_BOTH_DCS); - - // When - List newKeyspaces = - ImmutableList.of( - mockKeyspace(KS1, REPLICATE_ON_BOTH_DCS), mockKeyspace(KS2, REPLICATE_ON_DC1)); - DefaultTokenMap newTokenMap = - oldTokenMap.refresh(nodes, newKeyspaces, replicationStrategyFactory); - - // Then - assertThat(newTokenMap.tokenRanges).isSameAs(oldTokenMap.tokenRanges); - assertThat(newTokenMap.tokenRangesByPrimary).isSameAs(oldTokenMap.tokenRangesByPrimary); - assertThat(newTokenMap.keyspaceMaps).containsOnlyKeys(REPLICATE_ON_BOTH_DCS, REPLICATE_ON_DC1); - assertThat(newTokenMap.replicationConfigs) - .hasSize(2) - .containsEntry(KS1, REPLICATE_ON_BOTH_DCS) - .containsEntry(KS2, REPLICATE_ON_DC1); - } - - @Test - public void should_refresh_when_dropped_keyspace_with_replication_still_used() { - // Given - Node node1 = mockNode(DC1, RACK1, ImmutableSet.of(TOKEN1)); - Node node2 = mockNode(DC2, RACK2, ImmutableSet.of(TOKEN2)); - Node node3 = mockNode(DC1, RACK1, ImmutableSet.of(TOKEN3)); - Node node4 = mockNode(DC2, RACK2, ImmutableSet.of(TOKEN4)); - List nodes = ImmutableList.of(node1, node2, node3, node4); - List oldKeyspaces = - ImmutableList.of( - mockKeyspace(KS1, REPLICATE_ON_BOTH_DCS), mockKeyspace(KS2, REPLICATE_ON_BOTH_DCS)); - DefaultTokenMap oldTokenMap = - DefaultTokenMap.build( - nodes, oldKeyspaces, TOKEN_FACTORY, replicationStrategyFactory, "test"); - assertThat(oldTokenMap.keyspaceMaps).containsOnlyKeys(REPLICATE_ON_BOTH_DCS); - - // When - List newKeyspaces = - ImmutableList.of(mockKeyspace(KS1, REPLICATE_ON_BOTH_DCS)); - DefaultTokenMap newTokenMap = - oldTokenMap.refresh(nodes, newKeyspaces, replicationStrategyFactory); - - // Then - assertThat(newTokenMap.tokenRanges).isSameAs(oldTokenMap.tokenRanges); - assertThat(newTokenMap.tokenRangesByPrimary).isSameAs(oldTokenMap.tokenRangesByPrimary); - assertThat(newTokenMap.keyspaceMaps).containsOnlyKeys(REPLICATE_ON_BOTH_DCS); - assertThat(newTokenMap.replicationConfigs).hasSize(1).containsEntry(KS1, REPLICATE_ON_BOTH_DCS); - } - - @Test - public void should_refresh_when_dropped_keyspace_with_replication_not_used_anymore() { - // Given - Node node1 = mockNode(DC1, RACK1, ImmutableSet.of(TOKEN1)); - Node node2 = mockNode(DC2, RACK2, ImmutableSet.of(TOKEN2)); - Node node3 = mockNode(DC1, RACK1, ImmutableSet.of(TOKEN3)); - Node node4 = mockNode(DC2, RACK2, ImmutableSet.of(TOKEN4)); - List nodes = ImmutableList.of(node1, node2, node3, node4); - List oldKeyspaces = - ImmutableList.of( - mockKeyspace(KS1, REPLICATE_ON_BOTH_DCS), mockKeyspace(KS2, REPLICATE_ON_DC1)); - DefaultTokenMap oldTokenMap = - DefaultTokenMap.build( - nodes, oldKeyspaces, TOKEN_FACTORY, replicationStrategyFactory, "test"); - assertThat(oldTokenMap.keyspaceMaps).containsOnlyKeys(REPLICATE_ON_BOTH_DCS, REPLICATE_ON_DC1); - - // When - List newKeyspaces = - ImmutableList.of(mockKeyspace(KS1, REPLICATE_ON_BOTH_DCS)); - DefaultTokenMap newTokenMap = - oldTokenMap.refresh(nodes, newKeyspaces, replicationStrategyFactory); - - // Then - assertThat(newTokenMap.tokenRanges).isSameAs(oldTokenMap.tokenRanges); - assertThat(newTokenMap.tokenRangesByPrimary).isSameAs(oldTokenMap.tokenRangesByPrimary); - assertThat(newTokenMap.keyspaceMaps).containsOnlyKeys(REPLICATE_ON_BOTH_DCS); - assertThat(newTokenMap.replicationConfigs).hasSize(1).containsEntry(KS1, REPLICATE_ON_BOTH_DCS); - } - - @Test - public void should_refresh_when_updated_keyspace_with_different_replication() { - // Given - Node node1 = mockNode(DC1, RACK1, ImmutableSet.of(TOKEN1)); - Node node2 = mockNode(DC2, RACK2, ImmutableSet.of(TOKEN2)); - Node node3 = mockNode(DC1, RACK1, ImmutableSet.of(TOKEN3)); - Node node4 = mockNode(DC2, RACK2, ImmutableSet.of(TOKEN4)); - List nodes = ImmutableList.of(node1, node2, node3, node4); - List oldKeyspaces = - ImmutableList.of( - mockKeyspace(KS1, REPLICATE_ON_BOTH_DCS), mockKeyspace(KS2, REPLICATE_ON_DC1)); - DefaultTokenMap oldTokenMap = - DefaultTokenMap.build( - nodes, oldKeyspaces, TOKEN_FACTORY, replicationStrategyFactory, "test"); - assertThat(oldTokenMap.keyspaceMaps).containsOnlyKeys(REPLICATE_ON_BOTH_DCS, REPLICATE_ON_DC1); - - // When - List newKeyspaces = - ImmutableList.of( - mockKeyspace(KS1, REPLICATE_ON_BOTH_DCS), mockKeyspace(KS2, REPLICATE_ON_BOTH_DCS)); - DefaultTokenMap newTokenMap = - oldTokenMap.refresh(nodes, newKeyspaces, replicationStrategyFactory); - - // Then - assertThat(newTokenMap.tokenRanges).isSameAs(oldTokenMap.tokenRanges); - assertThat(newTokenMap.tokenRangesByPrimary).isSameAs(oldTokenMap.tokenRangesByPrimary); - assertThat(newTokenMap.keyspaceMaps).containsOnlyKeys(REPLICATE_ON_BOTH_DCS); - assertThat(newTokenMap.replicationConfigs) - .hasSize(2) - .containsEntry(KS1, REPLICATE_ON_BOTH_DCS) - .containsEntry(KS2, REPLICATE_ON_BOTH_DCS); - } - - private DefaultNode mockNode(String dc, String rack, Set tokens) { - DefaultNode node = mock(DefaultNode.class); - when(node.getDatacenter()).thenReturn(dc); - when(node.getRack()).thenReturn(rack); - when(node.getRawTokens()).thenReturn(tokens); - return node; - } - - private KeyspaceMetadata mockKeyspace(CqlIdentifier name, Map replicationConfig) { - KeyspaceMetadata keyspace = mock(KeyspaceMetadata.class); - when(keyspace.getName()).thenReturn(name); - when(keyspace.getReplication()).thenReturn(replicationConfig); - return keyspace; - } - - private static TokenRange range(String start, String end) { - return range(TOKEN_FACTORY.parse(start), TOKEN_FACTORY.parse(end)); - } - - private static TokenRange range(Token startToken, Token endToken) { - return new Murmur3TokenRange((Murmur3Token) startToken, (Murmur3Token) endToken); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/Murmur3TokenRangeTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/Murmur3TokenRangeTest.java deleted file mode 100644 index e5c1a0fc47c..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/Murmur3TokenRangeTest.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import static com.datastax.oss.driver.Assertions.assertThat; - -import org.junit.Test; - -/** @see TokenRangeTest */ -public class Murmur3TokenRangeTest { - - private static final long MIN = -9223372036854775808L; - private static final long MAX = 9223372036854775807L; - - @Test - public void should_split_range() { - assertThat(range(MIN, 4611686018427387904L).splitEvenly(3)) - .containsExactly( - range(MIN, -4611686018427387904L), - range(-4611686018427387904L, 0), - range(0, 4611686018427387904L)); - } - - @Test - public void should_split_range_that_wraps_around_the_ring() { - assertThat(range(4611686018427387904L, 0).splitEvenly(3)) - .containsExactly( - range(4611686018427387904L, -9223372036854775807L), - range(-9223372036854775807L, -4611686018427387903L), - range(-4611686018427387903L, 0)); - } - - @Test - public void should_split_range_when_division_not_integral() { - assertThat(range(0, 11).splitEvenly(3)).containsExactly(range(0, 4), range(4, 8), range(8, 11)); - } - - @Test - public void should_split_range_producing_empty_splits() { - assertThat(range(0, 2).splitEvenly(5)) - .containsExactly(range(0, 1), range(1, 2), range(2, 2), range(2, 2), range(2, 2)); - } - - @Test - public void should_split_range_producing_empty_splits_near_ring_end() { - // These are edge cases where we want to make sure we don't accidentally generate the ]min,min] - // range (which is the whole ring) - assertThat(range(MAX, MIN).splitEvenly(3)) - .containsExactly(range(MAX, MAX), range(MAX, MAX), range(MAX, MIN)); - - assertThat(range(MIN, MIN + 1).splitEvenly(3)) - .containsExactly(range(MIN, MIN + 1), range(MIN + 1, MIN + 1), range(MIN + 1, MIN + 1)); - } - - @Test - public void should_split_whole_ring() { - assertThat(range(MIN, MIN).splitEvenly(3)) - .containsExactly( - range(MIN, -3074457345618258603L), - range(-3074457345618258603L, 3074457345618258602L), - range(3074457345618258602L, MIN)); - } - - private Murmur3TokenRange range(long start, long end) { - return new Murmur3TokenRange(new Murmur3Token(start), new Murmur3Token(end)); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/NetworkTopologyReplicationStrategyTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/NetworkTopologyReplicationStrategyTest.java deleted file mode 100644 index 42dc5e69199..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/NetworkTopologyReplicationStrategyTest.java +++ /dev/null @@ -1,674 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyInt; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import ch.qos.logback.classic.Level; -import ch.qos.logback.classic.Logger; -import ch.qos.logback.classic.spi.ILoggingEvent; -import ch.qos.logback.core.Appender; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.atomic.AtomicInteger; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.ArgumentCaptor; -import org.mockito.Captor; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; -import org.slf4j.LoggerFactory; - -@RunWith(MockitoJUnitRunner.class) -public class NetworkTopologyReplicationStrategyTest { - - private static final String DC1 = "DC1"; - private static final String DC2 = "DC2"; - private static final String DC3 = "DC3"; - private static final String RACK11 = "RACK11"; - private static final String RACK12 = "RACK12"; - private static final String RACK21 = "RACK21"; - private static final String RACK22 = "RACK22"; - private static final String RACK31 = "RACK31"; - - private static final Token TOKEN01 = new Murmur3Token(-9000000000000000000L); - private static final Token TOKEN02 = new Murmur3Token(-8000000000000000000L); - private static final Token TOKEN03 = new Murmur3Token(-7000000000000000000L); - private static final Token TOKEN04 = new Murmur3Token(-6000000000000000000L); - private static final Token TOKEN05 = new Murmur3Token(-5000000000000000000L); - private static final Token TOKEN06 = new Murmur3Token(-4000000000000000000L); - private static final Token TOKEN07 = new Murmur3Token(-3000000000000000000L); - private static final Token TOKEN08 = new Murmur3Token(-2000000000000000000L); - private static final Token TOKEN09 = new Murmur3Token(-1000000000000000000L); - private static final Token TOKEN10 = new Murmur3Token(0L); - private static final Token TOKEN11 = new Murmur3Token(1000000000000000000L); - private static final Token TOKEN12 = new Murmur3Token(2000000000000000000L); - private static final Token TOKEN13 = new Murmur3Token(3000000000000000000L); - private static final Token TOKEN14 = new Murmur3Token(4000000000000000000L); - private static final Token TOKEN15 = new Murmur3Token(5000000000000000000L); - private static final Token TOKEN16 = new Murmur3Token(6000000000000000000L); - private static final Token TOKEN17 = new Murmur3Token(7000000000000000000L); - private static final Token TOKEN18 = new Murmur3Token(8000000000000000000L); - private static final Token TOKEN19 = new Murmur3Token(9000000000000000000L); - - @Mock private Node node1, node2, node3, node4, node5, node6, node7, node8; - - @Mock private Appender appender; - @Captor private ArgumentCaptor loggingEventCaptor; - - /** 4 tokens, 2 nodes in 2 DCs, RF = 1 in each DC. */ - @Test - public void should_compute_for_simple_layout() { - // Given - List ring = ImmutableList.of(TOKEN01, TOKEN04, TOKEN14, TOKEN19); - locate(node1, DC1, RACK11); - locate(node2, DC2, RACK21); - Map tokenToPrimary = - ImmutableMap.of(TOKEN01, node1, TOKEN04, node2, TOKEN14, node1, TOKEN19, node2); - ReplicationStrategy strategy = - new NetworkTopologyReplicationStrategy(ImmutableMap.of(DC1, "1", DC2, "1"), "test"); - - // When - Map> replicasByToken = strategy.computeReplicasByToken(tokenToPrimary, ring); - - // Then - assertThat(replicasByToken.keySet().size()).isEqualTo(ring.size()); - // Note: this also asserts the iteration order of the sets (unlike containsEntry(token, set)) - assertThat(replicasByToken.get(TOKEN01)).containsExactly(node1, node2); - assertThat(replicasByToken.get(TOKEN04)).containsExactly(node2, node1); - assertThat(replicasByToken.get(TOKEN14)).isSameAs(replicasByToken.get(TOKEN01)); - assertThat(replicasByToken.get(TOKEN19)).isSameAs(replicasByToken.get(TOKEN04)); - } - - /** 8 tokens, 4 nodes in 2 DCs in the same racks, RF = 1 in each DC. */ - @Test - public void should_compute_for_simple_layout_with_multiple_nodes_per_rack() { - // Given - List ring = - ImmutableList.of(TOKEN01, TOKEN03, TOKEN05, TOKEN07, TOKEN13, TOKEN15, TOKEN17, TOKEN19); - locate(node1, DC1, RACK11); - locate(node2, DC2, RACK21); - locate(node3, DC1, RACK11); - locate(node4, DC2, RACK21); - Map tokenToPrimary = - ImmutableMap.builder() - .put(TOKEN01, node1) - .put(TOKEN03, node2) - .put(TOKEN05, node3) - .put(TOKEN07, node4) - .put(TOKEN13, node1) - .put(TOKEN15, node2) - .put(TOKEN17, node3) - .put(TOKEN19, node4) - .build(); - ReplicationStrategy strategy = - new NetworkTopologyReplicationStrategy(ImmutableMap.of(DC1, "1", DC2, "1"), "test"); - - // When - Map> replicasByToken = strategy.computeReplicasByToken(tokenToPrimary, ring); - - // Then - assertThat(replicasByToken.keySet().size()).isEqualTo(ring.size()); - assertThat(replicasByToken.get(TOKEN01)).containsExactly(node1, node2); - assertThat(replicasByToken.get(TOKEN03)).containsExactly(node2, node3); - assertThat(replicasByToken.get(TOKEN05)).containsExactly(node3, node4); - assertThat(replicasByToken.get(TOKEN07)).containsExactly(node4, node1); - assertThat(replicasByToken.get(TOKEN13)).isSameAs(replicasByToken.get(TOKEN01)); - assertThat(replicasByToken.get(TOKEN15)).isSameAs(replicasByToken.get(TOKEN03)); - assertThat(replicasByToken.get(TOKEN17)).isSameAs(replicasByToken.get(TOKEN05)); - assertThat(replicasByToken.get(TOKEN19)).isSameAs(replicasByToken.get(TOKEN07)); - } - - /** 6 tokens, 3 nodes in 3 DCs, RF = 1 in each DC. */ - @Test - public void should_compute_for_simple_layout_with_3_dcs() { - // Given - List ring = ImmutableList.of(TOKEN01, TOKEN05, TOKEN09, TOKEN11, TOKEN15, TOKEN19); - locate(node1, DC1, RACK11); - locate(node2, DC2, RACK21); - locate(node3, DC3, RACK31); - Map tokenToPrimary = - ImmutableMap.builder() - .put(TOKEN01, node1) - .put(TOKEN05, node2) - .put(TOKEN09, node3) - .put(TOKEN11, node1) - .put(TOKEN15, node2) - .put(TOKEN19, node3) - .build(); - ReplicationStrategy strategy = - new NetworkTopologyReplicationStrategy( - ImmutableMap.of(DC1, "1", DC2, "1", DC3, "1"), "test"); - - // When - Map> replicasByToken = strategy.computeReplicasByToken(tokenToPrimary, ring); - - // Then - assertThat(replicasByToken.keySet().size()).isEqualTo(ring.size()); - assertThat(replicasByToken.get(TOKEN01)).containsExactly(node1, node2, node3); - assertThat(replicasByToken.get(TOKEN05)).containsExactly(node2, node3, node1); - assertThat(replicasByToken.get(TOKEN09)).containsExactly(node3, node1, node2); - assertThat(replicasByToken.get(TOKEN11)).isSameAs(replicasByToken.get(TOKEN01)); - assertThat(replicasByToken.get(TOKEN15)).isSameAs(replicasByToken.get(TOKEN05)); - assertThat(replicasByToken.get(TOKEN19)).isSameAs(replicasByToken.get(TOKEN09)); - } - - /** 10 tokens, 4 nodes in 2 DCs, RF = 2 in each DC, 1 node owns 4 tokens, the others only 2. */ - @Test - public void should_compute_for_unbalanced_ring() { - // Given - List ring = - ImmutableList.of( - TOKEN01, TOKEN03, TOKEN05, TOKEN07, TOKEN09, TOKEN11, TOKEN13, TOKEN15, TOKEN17, - TOKEN19); - locate(node1, DC1, RACK11); - locate(node2, DC2, RACK21); - locate(node3, DC1, RACK11); - locate(node4, DC2, RACK21); - Map tokenToPrimary = - ImmutableMap.builder() - .put(TOKEN01, node1) - .put(TOKEN03, node1) - .put(TOKEN05, node2) - .put(TOKEN07, node3) - .put(TOKEN09, node4) - .put(TOKEN11, node1) - .put(TOKEN13, node1) - .put(TOKEN15, node2) - .put(TOKEN17, node3) - .put(TOKEN19, node4) - .build(); - ReplicationStrategy strategy = - new NetworkTopologyReplicationStrategy(ImmutableMap.of(DC1, "2", DC2, "2"), "test"); - - // When - Map> replicasByToken = strategy.computeReplicasByToken(tokenToPrimary, ring); - - // Then - assertThat(replicasByToken.keySet().size()).isEqualTo(ring.size()); - assertThat(replicasByToken.get(TOKEN01)).containsExactly(node1, node2, node3, node4); - assertThat(replicasByToken.get(TOKEN03)).isSameAs(replicasByToken.get(TOKEN01)); - assertThat(replicasByToken.get(TOKEN05)).containsExactly(node2, node3, node4, node1); - assertThat(replicasByToken.get(TOKEN07)).containsExactly(node3, node4, node1, node2); - assertThat(replicasByToken.get(TOKEN09)).containsExactly(node4, node1, node2, node3); - assertThat(replicasByToken.get(TOKEN11)).isSameAs(replicasByToken.get(TOKEN01)); - assertThat(replicasByToken.get(TOKEN13)).isSameAs(replicasByToken.get(TOKEN01)); - assertThat(replicasByToken.get(TOKEN15)).isSameAs(replicasByToken.get(TOKEN05)); - assertThat(replicasByToken.get(TOKEN17)).isSameAs(replicasByToken.get(TOKEN07)); - assertThat(replicasByToken.get(TOKEN19)).isSameAs(replicasByToken.get(TOKEN09)); - ; - } - - /** 16 tokens, 8 nodes in 2 DCs with 2 per rack, RF = 2 in each DC. */ - @Test - public void should_compute_with_multiple_racks_per_dc() { - // Given - List ring = - ImmutableList.of( - TOKEN01, TOKEN02, TOKEN03, TOKEN04, TOKEN05, TOKEN06, TOKEN07, TOKEN08, TOKEN12, - TOKEN13, TOKEN14, TOKEN15, TOKEN16, TOKEN17, TOKEN18, TOKEN19); - locate(node1, DC1, RACK11); - locate(node2, DC2, RACK21); - locate(node3, DC1, RACK12); - locate(node4, DC2, RACK22); - locate(node5, DC1, RACK11); - locate(node6, DC2, RACK21); - locate(node7, DC1, RACK12); - locate(node8, DC2, RACK22); - Map tokenToPrimary = - ImmutableMap.builder() - .put(TOKEN01, node1) - .put(TOKEN02, node2) - .put(TOKEN03, node3) - .put(TOKEN04, node4) - .put(TOKEN05, node5) - .put(TOKEN06, node6) - .put(TOKEN07, node7) - .put(TOKEN08, node8) - .put(TOKEN12, node1) - .put(TOKEN13, node2) - .put(TOKEN14, node3) - .put(TOKEN15, node4) - .put(TOKEN16, node5) - .put(TOKEN17, node6) - .put(TOKEN18, node7) - .put(TOKEN19, node8) - .build(); - ReplicationStrategy strategy = - new NetworkTopologyReplicationStrategy(ImmutableMap.of(DC1, "2", DC2, "2"), "test"); - - // When - Map> replicasByToken = strategy.computeReplicasByToken(tokenToPrimary, ring); - - // Then - assertThat(replicasByToken.keySet().size()).isEqualTo(ring.size()); - assertThat(replicasByToken.get(TOKEN01)).containsExactly(node1, node2, node3, node4); - assertThat(replicasByToken.get(TOKEN02)).containsExactly(node2, node3, node4, node5); - assertThat(replicasByToken.get(TOKEN03)).containsExactly(node3, node4, node5, node6); - assertThat(replicasByToken.get(TOKEN04)).containsExactly(node4, node5, node6, node7); - assertThat(replicasByToken.get(TOKEN05)).containsExactly(node5, node6, node7, node8); - assertThat(replicasByToken.get(TOKEN06)).containsExactly(node6, node7, node8, node1); - assertThat(replicasByToken.get(TOKEN07)).containsExactly(node7, node8, node1, node2); - assertThat(replicasByToken.get(TOKEN08)).containsExactly(node8, node1, node2, node3); - assertThat(replicasByToken.get(TOKEN12)).isSameAs(replicasByToken.get(TOKEN01)); - assertThat(replicasByToken.get(TOKEN13)).isSameAs(replicasByToken.get(TOKEN02)); - assertThat(replicasByToken.get(TOKEN14)).isSameAs(replicasByToken.get(TOKEN03)); - assertThat(replicasByToken.get(TOKEN15)).isSameAs(replicasByToken.get(TOKEN04)); - assertThat(replicasByToken.get(TOKEN16)).isSameAs(replicasByToken.get(TOKEN05)); - assertThat(replicasByToken.get(TOKEN17)).isSameAs(replicasByToken.get(TOKEN06)); - assertThat(replicasByToken.get(TOKEN18)).isSameAs(replicasByToken.get(TOKEN07)); - assertThat(replicasByToken.get(TOKEN19)).isSameAs(replicasByToken.get(TOKEN08)); - } - - /** - * 16 tokens, 8 nodes in 2 DCs with 2 per rack, RF = 3 in each DC. - * - *

The nodes that are in the same rack occupy consecutive positions on the ring. We want to - * reproduce the case where we hit the same rack when we look for the second replica of a DC; the - * expected behavior is to skip the node and go to the next rack, and come back to the first rack - * for the third replica. - */ - @Test - public void should_pick_dc_replicas_in_different_racks_first() { - // Given - List ring = - ImmutableList.of( - TOKEN01, TOKEN02, TOKEN03, TOKEN04, TOKEN05, TOKEN06, TOKEN07, TOKEN08, TOKEN12, - TOKEN13, TOKEN14, TOKEN15, TOKEN16, TOKEN17, TOKEN18, TOKEN19); - locate(node1, DC1, RACK11); - locate(node2, DC2, RACK21); - locate(node3, DC1, RACK11); - locate(node4, DC2, RACK21); - locate(node5, DC1, RACK12); - locate(node6, DC2, RACK22); - locate(node7, DC1, RACK12); - locate(node8, DC2, RACK22); - Map tokenToPrimary = - ImmutableMap.builder() - .put(TOKEN01, node1) - .put(TOKEN02, node2) - .put(TOKEN03, node3) - .put(TOKEN04, node4) - .put(TOKEN05, node5) - .put(TOKEN06, node6) - .put(TOKEN07, node7) - .put(TOKEN08, node8) - .put(TOKEN12, node1) - .put(TOKEN13, node2) - .put(TOKEN14, node3) - .put(TOKEN15, node4) - .put(TOKEN16, node5) - .put(TOKEN17, node6) - .put(TOKEN18, node7) - .put(TOKEN19, node8) - .build(); - ReplicationStrategy strategy = - new NetworkTopologyReplicationStrategy(ImmutableMap.of(DC1, "3", DC2, "3"), "test"); - - // When - Map> replicasByToken = strategy.computeReplicasByToken(tokenToPrimary, ring); - - // Then - assertThat(replicasByToken.keySet().size()).isEqualTo(ring.size()); - assertThat(replicasByToken.get(TOKEN01)) - .containsExactly(node1, node2, node5, node3, node6, node4); - assertThat(replicasByToken.get(TOKEN02)) - .containsExactly(node2, node3, node5, node6, node4, node7); - assertThat(replicasByToken.get(TOKEN03)) - .containsExactly(node3, node4, node5, node6, node7, node8); - assertThat(replicasByToken.get(TOKEN04)) - .containsExactly(node4, node5, node6, node8, node1, node7); - assertThat(replicasByToken.get(TOKEN05)) - .containsExactly(node5, node6, node1, node7, node2, node8); - assertThat(replicasByToken.get(TOKEN06)) - .containsExactly(node6, node7, node1, node2, node8, node3); - assertThat(replicasByToken.get(TOKEN07)) - .containsExactly(node7, node8, node1, node2, node3, node4); - assertThat(replicasByToken.get(TOKEN08)) - .containsExactly(node8, node1, node2, node4, node5, node3); - assertThat(replicasByToken.get(TOKEN12)).isSameAs(replicasByToken.get(TOKEN01)); - assertThat(replicasByToken.get(TOKEN13)).isSameAs(replicasByToken.get(TOKEN02)); - assertThat(replicasByToken.get(TOKEN14)).isSameAs(replicasByToken.get(TOKEN03)); - assertThat(replicasByToken.get(TOKEN15)).isSameAs(replicasByToken.get(TOKEN04)); - assertThat(replicasByToken.get(TOKEN16)).isSameAs(replicasByToken.get(TOKEN05)); - assertThat(replicasByToken.get(TOKEN17)).isSameAs(replicasByToken.get(TOKEN06)); - assertThat(replicasByToken.get(TOKEN18)).isSameAs(replicasByToken.get(TOKEN07)); - assertThat(replicasByToken.get(TOKEN19)).isSameAs(replicasByToken.get(TOKEN08)); - } - - /** - * 16 tokens, 8 nodes in 2 DCs with 2 per rack, RF = 3 in each DC. - * - *

This is the same scenario as {@link #should_pick_dc_replicas_in_different_racks_first()}, - * except that each node owns consecutive tokens on the ring. - */ - @Test - public void should_pick_dc_replicas_in_different_racks_first_when_nodes_own_consecutive_tokens() { - // When - Map> replicasByToken = computeWithDifferentRacksAndConsecutiveTokens(3); - - // Then - assertThat(replicasByToken.keySet().size()).isEqualTo(16); - assertThat(replicasByToken.get(TOKEN01)) - .containsExactly(node1, node5, node3, node2, node6, node4); - assertThat(replicasByToken.get(TOKEN02)).isSameAs(replicasByToken.get(TOKEN01)); - assertThat(replicasByToken.get(TOKEN03)) - .containsExactly(node3, node5, node7, node2, node6, node4); - assertThat(replicasByToken.get(TOKEN04)).isSameAs(replicasByToken.get(TOKEN03)); - assertThat(replicasByToken.get(TOKEN05)) - .containsExactly(node5, node2, node6, node4, node1, node7); - assertThat(replicasByToken.get(TOKEN06)).isSameAs(replicasByToken.get(TOKEN05)); - assertThat(replicasByToken.get(TOKEN07)) - .containsExactly(node7, node2, node6, node4, node1, node3); - assertThat(replicasByToken.get(TOKEN08)).isSameAs(replicasByToken.get(TOKEN07)); - assertThat(replicasByToken.get(TOKEN12)) - .containsExactly(node2, node6, node4, node1, node5, node3); - assertThat(replicasByToken.get(TOKEN13)).isSameAs(replicasByToken.get(TOKEN12)); - assertThat(replicasByToken.get(TOKEN14)) - .containsExactly(node4, node6, node8, node1, node5, node3); - assertThat(replicasByToken.get(TOKEN15)).isSameAs(replicasByToken.get(TOKEN14)); - assertThat(replicasByToken.get(TOKEN16)) - .containsExactly(node6, node1, node5, node3, node2, node8); - assertThat(replicasByToken.get(TOKEN17)).isSameAs(replicasByToken.get(TOKEN16)); - assertThat(replicasByToken.get(TOKEN18)) - .containsExactly(node8, node1, node5, node3, node2, node4); - assertThat(replicasByToken.get(TOKEN19)).isSameAs(replicasByToken.get(TOKEN18)); - } - - /** - * 16 tokens, 8 nodes in 2 DCs with 2 per rack, RF = 4 in each DC. - * - *

This is the same test as {@link - * #should_pick_dc_replicas_in_different_racks_first_when_nodes_own_consecutive_tokens()}, except - * for the replication factors. - */ - @Test - public void should_pick_dc_replicas_in_different_racks_first_when_all_nodes_contain_all_data() { - // When - Map> replicasByToken = computeWithDifferentRacksAndConsecutiveTokens(4); - - // Then - assertThat(replicasByToken.keySet().size()).isEqualTo(16); - assertThat(replicasByToken.get(TOKEN01)) - .containsExactly(node1, node5, node3, node7, node2, node6, node4, node8); - assertThat(replicasByToken.get(TOKEN02)).isSameAs(replicasByToken.get(TOKEN01)); - assertThat(replicasByToken.get(TOKEN03)) - .containsExactly(node3, node5, node7, node2, node6, node4, node8, node1); - assertThat(replicasByToken.get(TOKEN04)).isSameAs(replicasByToken.get(TOKEN03)); - assertThat(replicasByToken.get(TOKEN05)) - .containsExactly(node5, node2, node6, node4, node8, node1, node7, node3); - assertThat(replicasByToken.get(TOKEN06)).isSameAs(replicasByToken.get(TOKEN05)); - assertThat(replicasByToken.get(TOKEN07)) - .containsExactly(node7, node2, node6, node4, node8, node1, node3, node5); - assertThat(replicasByToken.get(TOKEN08)).isSameAs(replicasByToken.get(TOKEN07)); - assertThat(replicasByToken.get(TOKEN12)) - .containsExactly(node2, node6, node4, node8, node1, node5, node3, node7); - assertThat(replicasByToken.get(TOKEN13)).isSameAs(replicasByToken.get(TOKEN12)); - assertThat(replicasByToken.get(TOKEN14)) - .containsExactly(node4, node6, node8, node1, node5, node3, node7, node2); - assertThat(replicasByToken.get(TOKEN15)).isSameAs(replicasByToken.get(TOKEN14)); - assertThat(replicasByToken.get(TOKEN16)) - .containsExactly(node6, node1, node5, node3, node7, node2, node8, node4); - assertThat(replicasByToken.get(TOKEN17)).isSameAs(replicasByToken.get(TOKEN16)); - assertThat(replicasByToken.get(TOKEN18)) - .containsExactly(node8, node1, node5, node3, node7, node2, node4, node6); - assertThat(replicasByToken.get(TOKEN19)).isSameAs(replicasByToken.get(TOKEN18)); - } - - private Map> computeWithDifferentRacksAndConsecutiveTokens( - int replicationFactor) { - List ring = - ImmutableList.of( - TOKEN01, TOKEN02, TOKEN03, TOKEN04, TOKEN05, TOKEN06, TOKEN07, TOKEN08, TOKEN12, - TOKEN13, TOKEN14, TOKEN15, TOKEN16, TOKEN17, TOKEN18, TOKEN19); - locate(node1, DC1, RACK11); - locate(node2, DC2, RACK21); - locate(node3, DC1, RACK11); - locate(node4, DC2, RACK21); - locate(node5, DC1, RACK12); - locate(node6, DC2, RACK22); - locate(node7, DC1, RACK12); - locate(node8, DC2, RACK22); - Map tokenToPrimary = - ImmutableMap.builder() - .put(TOKEN01, node1) - .put(TOKEN02, node1) - .put(TOKEN03, node3) - .put(TOKEN04, node3) - .put(TOKEN05, node5) - .put(TOKEN06, node5) - .put(TOKEN07, node7) - .put(TOKEN08, node7) - .put(TOKEN12, node2) - .put(TOKEN13, node2) - .put(TOKEN14, node4) - .put(TOKEN15, node4) - .put(TOKEN16, node6) - .put(TOKEN17, node6) - .put(TOKEN18, node8) - .put(TOKEN19, node8) - .build(); - ReplicationStrategy strategy = - new NetworkTopologyReplicationStrategy( - ImmutableMap.of( - DC1, Integer.toString(replicationFactor), DC2, Integer.toString(replicationFactor)), - "test"); - - return strategy.computeReplicasByToken(tokenToPrimary, ring); - } - - /** - * 18 tokens, 6 nodes in 2 DCs with 2 in rack 1 and 1 in rack 2, RF = 2 in each DC. - * - *

This is taken from a real-life cluster. - */ - @Test - public void should_compute_complex_layout() { - // When - Map> replicasByToken = computeComplexLayout(2); - - // Then - assertThat(replicasByToken.keySet().size()).isEqualTo(18); - assertThat(replicasByToken.get(TOKEN01)).containsExactly(node1, node5, node2, node6); - assertThat(replicasByToken.get(TOKEN02)).isSameAs(replicasByToken.get(TOKEN01)); - assertThat(replicasByToken.get(TOKEN03)).containsExactly(node5, node3, node2, node6); - assertThat(replicasByToken.get(TOKEN04)).containsExactly(node3, node5, node2, node6); - assertThat(replicasByToken.get(TOKEN05)).isSameAs(replicasByToken.get(TOKEN01)); - assertThat(replicasByToken.get(TOKEN06)).containsExactly(node5, node2, node6, node3); - assertThat(replicasByToken.get(TOKEN07)).containsExactly(node2, node6, node3, node5); - assertThat(replicasByToken.get(TOKEN08)).containsExactly(node6, node3, node4, node5); - assertThat(replicasByToken.get(TOKEN09)).containsExactly(node3, node4, node5, node6); - assertThat(replicasByToken.get(TOKEN10)).containsExactly(node4, node5, node6, node3); - assertThat(replicasByToken.get(TOKEN11)).containsExactly(node5, node4, node6, node3); - assertThat(replicasByToken.get(TOKEN12)).containsExactly(node4, node6, node3, node5); - assertThat(replicasByToken.get(TOKEN13)).isSameAs(replicasByToken.get(TOKEN12)); - assertThat(replicasByToken.get(TOKEN14)).isSameAs(replicasByToken.get(TOKEN07)); - assertThat(replicasByToken.get(TOKEN15)).containsExactly(node6, node3, node2, node5); - assertThat(replicasByToken.get(TOKEN16)).containsExactly(node3, node2, node6, node5); - assertThat(replicasByToken.get(TOKEN17)).containsExactly(node2, node6, node1, node5); - assertThat(replicasByToken.get(TOKEN18)).containsExactly(node6, node1, node5, node2); - } - - /** - * 18 tokens, 6 nodes in 2 DCs with 2 in rack 1 and 1 in rack 2, RF = 4 in each DC. - * - *

This is the same test as {@link #should_compute_complex_layout()}, but with RF = 4, which is - * too high for this cluster (it would require 8 nodes). - */ - @Test - public void should_compute_complex_layout_with_rf_too_high() { - // When - Map> replicasByToken = computeComplexLayout(4); - - // Then - assertThat(replicasByToken.keySet().size()).isEqualTo(18); - assertThat(replicasByToken.get(TOKEN01)) - .containsExactly(node1, node5, node3, node2, node6, node4); - assertThat(replicasByToken.get(TOKEN02)).isSameAs(replicasByToken.get(TOKEN01)); - assertThat(replicasByToken.get(TOKEN03)) - .containsExactly(node5, node3, node1, node2, node6, node4); - assertThat(replicasByToken.get(TOKEN04)) - .containsExactly(node3, node5, node1, node2, node6, node4); - assertThat(replicasByToken.get(TOKEN05)) - .containsExactly(node1, node5, node2, node6, node3, node4); - assertThat(replicasByToken.get(TOKEN06)) - .containsExactly(node5, node2, node6, node3, node4, node1); - assertThat(replicasByToken.get(TOKEN07)) - .containsExactly(node2, node6, node3, node4, node5, node1); - assertThat(replicasByToken.get(TOKEN08)) - .containsExactly(node6, node3, node4, node5, node2, node1); - assertThat(replicasByToken.get(TOKEN09)) - .containsExactly(node3, node4, node5, node6, node2, node1); - assertThat(replicasByToken.get(TOKEN10)) - .containsExactly(node4, node5, node6, node2, node3, node1); - assertThat(replicasByToken.get(TOKEN11)) - .containsExactly(node5, node4, node6, node2, node3, node1); - assertThat(replicasByToken.get(TOKEN12)) - .containsExactly(node4, node6, node2, node3, node5, node1); - assertThat(replicasByToken.get(TOKEN13)).isSameAs(replicasByToken.get(TOKEN12)); - assertThat(replicasByToken.get(TOKEN14)) - .containsExactly(node2, node6, node3, node5, node1, node4); - assertThat(replicasByToken.get(TOKEN15)) - .containsExactly(node6, node3, node2, node5, node1, node4); - assertThat(replicasByToken.get(TOKEN16)) - .containsExactly(node3, node2, node6, node5, node1, node4); - assertThat(replicasByToken.get(TOKEN17)) - .containsExactly(node2, node6, node1, node5, node3, node4); - assertThat(replicasByToken.get(TOKEN18)) - .containsExactly(node6, node1, node5, node3, node2, node4); - } - - private Map> computeComplexLayout(int replicationFactor) { - List ring = - ImmutableList.of( - TOKEN01, TOKEN02, TOKEN03, TOKEN04, TOKEN05, TOKEN06, TOKEN07, TOKEN08, TOKEN09, - TOKEN10, TOKEN11, TOKEN12, TOKEN13, TOKEN14, TOKEN15, TOKEN16, TOKEN17, TOKEN18); - locate(node1, DC1, RACK11); - locate(node2, DC2, RACK21); - locate(node3, DC1, RACK11); - locate(node4, DC2, RACK21); - locate(node5, DC1, RACK12); - locate(node6, DC2, RACK22); - Map tokenToPrimary = - ImmutableMap.builder() - .put(TOKEN01, node1) - .put(TOKEN02, node1) - .put(TOKEN03, node5) - .put(TOKEN04, node3) - .put(TOKEN05, node1) - .put(TOKEN06, node5) - .put(TOKEN07, node2) - .put(TOKEN08, node6) - .put(TOKEN09, node3) - .put(TOKEN10, node4) - .put(TOKEN11, node5) - .put(TOKEN12, node4) - .put(TOKEN13, node4) - .put(TOKEN14, node2) - .put(TOKEN15, node6) - .put(TOKEN16, node3) - .put(TOKEN17, node2) - .put(TOKEN18, node6) - .build(); - ReplicationStrategy strategy = - new NetworkTopologyReplicationStrategy( - ImmutableMap.of( - DC1, Integer.toString(replicationFactor), DC2, Integer.toString(replicationFactor)), - "test"); - - return strategy.computeReplicasByToken(tokenToPrimary, ring); - } - - /** - * When the replication factors are invalid (user error) and a datacenter has a replication factor - * that cannot be met, we want to quickly abort and move on to the next DC (instead of keeping - * scanning the ring in vain, which results in quadratic complexity). We also log a warning to - * give the user a chance to fix their settings. - * - * @see JAVA-702 - * @see JAVA-859 - */ - @Test - public void should_abort_early_and_log_when_bad_replication_factor_cannot_be_met() { - // Given - List ring = ImmutableList.of(TOKEN01, TOKEN04, TOKEN14, TOKEN19); - locate(node1, DC1, RACK11); - locate(node2, DC2, RACK21); - Map tokenToPrimary = - ImmutableMap.of(TOKEN01, node1, TOKEN04, node2, TOKEN14, node1, TOKEN19, node2); - Logger logger = (Logger) LoggerFactory.getLogger(NetworkTopologyReplicationStrategy.class); - logger.addAppender(appender); - - try { - // When - int traversedTokensForValidSettings = - countTraversedTokens(ring, tokenToPrimary, ImmutableMap.of(DC1, "1", DC2, "1")); - - // Then - // No logs: - verify(appender, never()).doAppend(any(ILoggingEvent.class)); - - // When - int traversedTokensForInvalidSettings = - countTraversedTokens(ring, tokenToPrimary, ImmutableMap.of(DC1, "1", DC2, "1", DC3, "1")); - // Did not take more steps than the valid settings - assertThat(traversedTokensForInvalidSettings).isEqualTo(traversedTokensForValidSettings); - // Did log: - verify(appender).doAppend(loggingEventCaptor.capture()); - ILoggingEvent log = loggingEventCaptor.getValue(); - assertThat(log.getLevel()).isEqualTo(Level.WARN); - assertThat(log.getMessage()).contains("could not achieve replication factor"); - } finally { - logger.detachAppender(appender); - } - } - - // Counts the number of steps on the ring for a particular computation - private int countTraversedTokens( - List ring, - Map tokenToPrimary, - ImmutableMap replicationConfig) { - AtomicInteger count = new AtomicInteger(); - List ringSpy = spy(ring); - when(ringSpy.get(anyInt())) - .thenAnswer( - invocation -> { - count.incrementAndGet(); - return invocation.callRealMethod(); - }); - new NetworkTopologyReplicationStrategy(replicationConfig, "test") - .computeReplicasByToken(tokenToPrimary, ringSpy); - return count.get(); - } - - private void locate(Node node, String dc, String rack) { - when(node.getDatacenter()).thenReturn(dc); - when(node.getRack()).thenReturn(rack); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/RandomTokenRangeTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/RandomTokenRangeTest.java deleted file mode 100644 index 54ac8a99738..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/RandomTokenRangeTest.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import static org.assertj.core.api.Assertions.assertThat; - -import java.math.BigInteger; -import org.junit.Test; - -public class RandomTokenRangeTest { - - private static final String MIN = "-1"; - private static final String MAX = "170141183460469231731687303715884105728"; - - @Test - public void should_split_range() { - assertThat(range("0", "127605887595351923798765477786913079296").splitEvenly(3)) - .containsExactly( - range("0", "42535295865117307932921825928971026432"), - range( - "42535295865117307932921825928971026432", "85070591730234615865843651857942052864"), - range( - "85070591730234615865843651857942052864", - "127605887595351923798765477786913079296")); - } - - @Test - public void should_split_range_that_wraps_around_the_ring() { - assertThat( - range( - "127605887595351923798765477786913079296", - "85070591730234615865843651857942052864") - .splitEvenly(3)) - .containsExactly( - range("127605887595351923798765477786913079296", "0"), - range("0", "42535295865117307932921825928971026432"), - range( - "42535295865117307932921825928971026432", - "85070591730234615865843651857942052864")); - } - - @Test - public void should_split_range_producing_empty_splits_near_ring_end() { - // These are edge cases where we want to make sure we don't accidentally generate the ]min,min] - // range (which is the whole ring) - assertThat(range(MAX, MIN).splitEvenly(3)) - .containsExactly(range(MAX, MAX), range(MAX, MAX), range(MAX, MIN)); - - assertThat(range(MIN, "0").splitEvenly(3)) - .containsExactly(range(MIN, "0"), range("0", "0"), range("0", "0")); - } - - @Test - public void should_split_whole_ring() { - assertThat(range(MIN, MIN).splitEvenly(3)) - .containsExactly( - range(MIN, "56713727820156410577229101238628035242"), - range( - "56713727820156410577229101238628035242", - "113427455640312821154458202477256070485"), - range("113427455640312821154458202477256070485", MIN)); - } - - private RandomTokenRange range(String start, String end) { - return new RandomTokenRange( - new RandomToken(new BigInteger(start)), new RandomToken(new BigInteger(end))); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/ReplicationFactorTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/ReplicationFactorTest.java deleted file mode 100644 index d58d13933c2..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/ReplicationFactorTest.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import static com.datastax.oss.driver.Assertions.assertThat; - -import org.junit.Test; - -public class ReplicationFactorTest { - @Test - public void should_parse_factor_from_string() { - ReplicationFactor transFactor = ReplicationFactor.fromString("3/1"); - assertThat(transFactor.fullReplicas()).isEqualTo(2); - assertThat(transFactor.hasTransientReplicas()).isTrue(); - assertThat(transFactor.transientReplicas()).isEqualTo(1); - - ReplicationFactor factor = ReplicationFactor.fromString("3"); - assertThat(factor.fullReplicas()).isEqualTo(3); - assertThat(factor.hasTransientReplicas()).isFalse(); - assertThat(factor.transientReplicas()).isEqualTo(0); - } - - @Test - public void should_create_string_from_factor() { - ReplicationFactor transFactor = new ReplicationFactor(3, 1); - assertThat(transFactor.toString()).isEqualTo("3/1"); - ReplicationFactor factor = new ReplicationFactor(3); - assertThat(factor.toString()).isEqualTo("3"); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/SimpleReplicationStrategyTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/SimpleReplicationStrategyTest.java deleted file mode 100644 index 517d8cfdb84..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/SimpleReplicationStrategyTest.java +++ /dev/null @@ -1,212 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import static com.datastax.oss.driver.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class SimpleReplicationStrategyTest { - - private static final Token TOKEN01 = new Murmur3Token(-9000000000000000000L); - private static final Token TOKEN02 = new Murmur3Token(-8000000000000000000L); - private static final Token TOKEN03 = new Murmur3Token(-7000000000000000000L); - private static final Token TOKEN04 = new Murmur3Token(-6000000000000000000L); - private static final Token TOKEN05 = new Murmur3Token(-5000000000000000000L); - private static final Token TOKEN06 = new Murmur3Token(-4000000000000000000L); - private static final Token TOKEN07 = new Murmur3Token(-3000000000000000000L); - private static final Token TOKEN08 = new Murmur3Token(-2000000000000000000L); - private static final Token TOKEN09 = new Murmur3Token(-1000000000000000000L); - private static final Token TOKEN10 = new Murmur3Token(0L); - private static final Token TOKEN11 = new Murmur3Token(1000000000000000000L); - private static final Token TOKEN12 = new Murmur3Token(2000000000000000000L); - private static final Token TOKEN13 = new Murmur3Token(3000000000000000000L); - private static final Token TOKEN14 = new Murmur3Token(4000000000000000000L); - private static final Token TOKEN15 = new Murmur3Token(5000000000000000000L); - private static final Token TOKEN16 = new Murmur3Token(6000000000000000000L); - private static final Token TOKEN17 = new Murmur3Token(7000000000000000000L); - private static final Token TOKEN18 = new Murmur3Token(8000000000000000000L); - private static final Token TOKEN19 = new Murmur3Token(9000000000000000000L); - - @Mock private Node node1, node2, node3, node4, node5, node6; - - /** 4 tokens, 2 nodes, RF = 2. */ - @Test - public void should_compute_for_simple_layout() { - // Given - List ring = ImmutableList.of(TOKEN01, TOKEN06, TOKEN14, TOKEN19); - Map tokenToPrimary = - ImmutableMap.of(TOKEN01, node1, TOKEN06, node2, TOKEN14, node1, TOKEN19, node2); - SimpleReplicationStrategy strategy = new SimpleReplicationStrategy(new ReplicationFactor(2)); - - // When - Map> replicasByToken = strategy.computeReplicasByToken(tokenToPrimary, ring); - - // Then - assertThat(replicasByToken.keySet().size()).isEqualTo(ring.size()); - // Note: this also asserts the iteration order of the sets (unlike containsEntry(token, set)) - assertThat(replicasByToken.get(TOKEN01)).containsExactly(node1, node2); - assertThat(replicasByToken.get(TOKEN06)).containsExactly(node2, node1); - assertThat(replicasByToken.get(TOKEN14)).isSameAs(replicasByToken.get(TOKEN01)); - assertThat(replicasByToken.get(TOKEN19)).isSameAs(replicasByToken.get(TOKEN06)); - } - - /** 4 tokens, 2 nodes owning 2 consecutive tokens each, RF = 2. */ - @Test - public void should_compute_when_nodes_own_consecutive_tokens() { - // Given - List ring = ImmutableList.of(TOKEN01, TOKEN06, TOKEN14, TOKEN19); - Map tokenToPrimary = - ImmutableMap.of(TOKEN01, node1, TOKEN06, node1, TOKEN14, node2, TOKEN19, node2); - SimpleReplicationStrategy strategy = new SimpleReplicationStrategy(new ReplicationFactor(2)); - - // When - Map> replicasByToken = strategy.computeReplicasByToken(tokenToPrimary, ring); - - // Then - assertThat(replicasByToken.keySet().size()).isEqualTo(ring.size()); - assertThat(replicasByToken.get(TOKEN01)).containsExactly(node1, node2); - assertThat(replicasByToken.get(TOKEN06)).isSameAs(replicasByToken.get(TOKEN01)); - assertThat(replicasByToken.get(TOKEN14)).containsExactly(node2, node1); - assertThat(replicasByToken.get(TOKEN19)).isSameAs(replicasByToken.get(TOKEN14)); - } - - /** 4 tokens, 1 node owns 3 of them, RF = 2. */ - @Test - public void should_compute_when_ring_unbalanced() { - // Given - List ring = ImmutableList.of(TOKEN01, TOKEN06, TOKEN14, TOKEN19); - Map tokenToPrimary = - ImmutableMap.of(TOKEN01, node1, TOKEN06, node1, TOKEN14, node2, TOKEN19, node1); - SimpleReplicationStrategy strategy = new SimpleReplicationStrategy(new ReplicationFactor(2)); - - // When - Map> replicasByToken = strategy.computeReplicasByToken(tokenToPrimary, ring); - - // Then - assertThat(replicasByToken.keySet().size()).isEqualTo(ring.size()); - assertThat(replicasByToken.get(TOKEN01)).containsExactly(node1, node2); - assertThat(replicasByToken.get(TOKEN06)).containsExactly(node1, node2); - assertThat(replicasByToken.get(TOKEN14)).containsExactly(node2, node1); - assertThat(replicasByToken.get(TOKEN19)).containsExactly(node1, node2); - } - - /** 4 tokens, 2 nodes, RF = 6 (too large, should be <= number of nodes). */ - @Test - public void should_compute_when_replication_factor_is_larger_than_cluster_size() { - // Given - List ring = ImmutableList.of(TOKEN01, TOKEN06, TOKEN14, TOKEN19); - Map tokenToPrimary = - ImmutableMap.of(TOKEN01, node1, TOKEN06, node2, TOKEN14, node1, TOKEN19, node2); - SimpleReplicationStrategy strategy = new SimpleReplicationStrategy(new ReplicationFactor(6)); - - // When - Map> replicasByToken = strategy.computeReplicasByToken(tokenToPrimary, ring); - - // Then - assertThat(replicasByToken.keySet().size()).isEqualTo(ring.size()); - assertThat(replicasByToken.get(TOKEN01)).containsExactly(node1, node2); - assertThat(replicasByToken.get(TOKEN06)).containsExactly(node2, node1); - assertThat(replicasByToken.get(TOKEN14)).isSameAs(replicasByToken.get(TOKEN01)); - assertThat(replicasByToken.get(TOKEN19)).isSameAs(replicasByToken.get(TOKEN06)); - } - - @Test - public void should_compute_for_complex_layout() { - // Given - List ring = - ImmutableList.builder() - .add(TOKEN01) - .add(TOKEN02) - .add(TOKEN03) - .add(TOKEN04) - .add(TOKEN05) - .add(TOKEN06) - .add(TOKEN07) - .add(TOKEN08) - .add(TOKEN09) - .add(TOKEN10) - .add(TOKEN11) - .add(TOKEN12) - .add(TOKEN13) - .add(TOKEN14) - .add(TOKEN15) - .add(TOKEN16) - .add(TOKEN17) - .add(TOKEN18) - .build(); - Map tokenToPrimary = - ImmutableMap.builder() - .put(TOKEN01, node1) - .put(TOKEN02, node1) - .put(TOKEN03, node5) - .put(TOKEN04, node3) - .put(TOKEN05, node1) - .put(TOKEN06, node5) - .put(TOKEN07, node2) - .put(TOKEN08, node6) - .put(TOKEN09, node3) - .put(TOKEN10, node4) - .put(TOKEN11, node5) - .put(TOKEN12, node4) - .put(TOKEN13, node4) - .put(TOKEN14, node2) - .put(TOKEN15, node6) - .put(TOKEN16, node3) - .put(TOKEN17, node2) - .put(TOKEN18, node6) - .build(); - - SimpleReplicationStrategy strategy = new SimpleReplicationStrategy(new ReplicationFactor(3)); - - // When - Map> replicasByToken = strategy.computeReplicasByToken(tokenToPrimary, ring); - - // Then - assertThat(replicasByToken.keySet().size()).isEqualTo(ring.size()); - assertThat(replicasByToken.get(TOKEN01)).containsExactly(node1, node5, node3); - assertThat(replicasByToken.get(TOKEN02)).isSameAs(replicasByToken.get(TOKEN01)); - assertThat(replicasByToken.get(TOKEN03)).containsExactly(node5, node3, node1); - assertThat(replicasByToken.get(TOKEN04)).containsExactly(node3, node1, node5); - assertThat(replicasByToken.get(TOKEN05)).containsExactly(node1, node5, node2); - assertThat(replicasByToken.get(TOKEN06)).containsExactly(node5, node2, node6); - assertThat(replicasByToken.get(TOKEN07)).containsExactly(node2, node6, node3); - assertThat(replicasByToken.get(TOKEN08)).containsExactly(node6, node3, node4); - assertThat(replicasByToken.get(TOKEN09)).containsExactly(node3, node4, node5); - assertThat(replicasByToken.get(TOKEN10)).containsExactly(node4, node5, node2); - assertThat(replicasByToken.get(TOKEN11)).containsExactly(node5, node4, node2); - assertThat(replicasByToken.get(TOKEN12)).containsExactly(node4, node2, node6); - assertThat(replicasByToken.get(TOKEN13)).isSameAs(replicasByToken.get(TOKEN12)); - assertThat(replicasByToken.get(TOKEN14)).isSameAs(replicasByToken.get(TOKEN07)); - assertThat(replicasByToken.get(TOKEN15)).containsExactly(node6, node3, node2); - assertThat(replicasByToken.get(TOKEN16)).containsExactly(node3, node2, node6); - assertThat(replicasByToken.get(TOKEN17)).containsExactly(node2, node6, node1); - assertThat(replicasByToken.get(TOKEN18)).containsExactly(node6, node1, node5); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/TokenRangeAssert.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/TokenRangeAssert.java deleted file mode 100644 index 7fcd56ba86e..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/TokenRangeAssert.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.metadata.token.TokenRange; -import java.util.List; -import org.assertj.core.api.AbstractAssert; - -public class TokenRangeAssert extends AbstractAssert { - - public TokenRangeAssert(TokenRange actual) { - super(actual, TokenRangeAssert.class); - } - - public TokenRangeAssert startsWith(Token token) { - assertThat(actual.getStart()).isEqualTo(token); - return this; - } - - public TokenRangeAssert endsWith(Token token) { - assertThat(actual.getEnd()).isEqualTo(token); - return this; - } - - public TokenRangeAssert isEmpty() { - assertThat(actual.isEmpty()).isTrue(); - return this; - } - - public TokenRangeAssert isNotEmpty() { - assertThat(actual.isEmpty()).isFalse(); - return this; - } - - public TokenRangeAssert isWrappedAround() { - assertThat(actual.isWrappedAround()).isTrue(); - - List unwrapped = actual.unwrap(); - assertThat(unwrapped.size()) - .as("%s should unwrap to two ranges, but unwrapped to %s", actual, unwrapped) - .isEqualTo(2); - - return this; - } - - public TokenRangeAssert isNotWrappedAround() { - assertThat(actual.isWrappedAround()).isFalse(); - assertThat(actual.unwrap()).containsExactly(actual); - return this; - } - - public TokenRangeAssert unwrapsTo(TokenRange... subRanges) { - assertThat(actual.unwrap()).containsExactly(subRanges); - return this; - } - - public TokenRangeAssert intersects(TokenRange that) { - assertThat(actual.intersects(that)).as("%s should intersect %s", actual, that).isTrue(); - assertThat(that.intersects(actual)).as("%s should intersect %s", that, actual).isTrue(); - return this; - } - - public TokenRangeAssert doesNotIntersect(TokenRange... that) { - for (TokenRange thatRange : that) { - assertThat(actual.intersects(thatRange)) - .as("%s should not intersect %s", actual, thatRange) - .isFalse(); - assertThat(thatRange.intersects(actual)) - .as("%s should not intersect %s", thatRange, actual) - .isFalse(); - } - return this; - } - - public TokenRangeAssert contains(Token token, boolean isStart) { - assertThat(((TokenRangeBase) actual).contains(actual, token, isStart)).isTrue(); - return this; - } - - public TokenRangeAssert doesNotContain(Token token, boolean isStart) { - assertThat(((TokenRangeBase) actual).contains(actual, token, isStart)).isFalse(); - return this; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/TokenRangeTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/TokenRangeTest.java deleted file mode 100644 index 77cfbb30d77..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/TokenRangeTest.java +++ /dev/null @@ -1,286 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metadata.token; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.junit.Assert.fail; - -import com.datastax.oss.driver.api.core.metadata.token.TokenRange; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import java.util.List; -import org.junit.Test; - -/** - * Covers the methods that don't depend on the underlying factory (we use Murmur3 as the - * implementation here). - * - * @see Murmur3TokenRangeTest - * @see ByteOrderedTokenRangeTest - * @see RandomTokenRangeTest - */ -public class TokenRangeTest { - - private Murmur3Token min = Murmur3TokenFactory.MIN_TOKEN; - - @Test - public void should_check_intersection() { - // NB - to make the test more visual, we use watch face numbers - assertThat(range(3, 9)) - .doesNotIntersect(range(11, 1)) - .doesNotIntersect(range(1, 2)) - .doesNotIntersect(range(11, 3)) - .doesNotIntersect(range(2, 3)) - .doesNotIntersect(range(3, 3)) - .intersects(range(2, 6)) - .intersects(range(2, 10)) - .intersects(range(6, 10)) - .intersects(range(4, 8)) - .intersects(range(3, 9)) - .doesNotIntersect(range(9, 10)) - .doesNotIntersect(range(10, 11)); - assertThat(range(9, 3)) - .doesNotIntersect(range(5, 7)) - .doesNotIntersect(range(7, 8)) - .doesNotIntersect(range(5, 9)) - .doesNotIntersect(range(8, 9)) - .doesNotIntersect(range(9, 9)) - .intersects(range(8, 2)) - .intersects(range(8, 4)) - .intersects(range(2, 4)) - .intersects(range(10, 2)) - .intersects(range(9, 3)) - .doesNotIntersect(range(3, 4)) - .doesNotIntersect(range(4, 5)); - assertThat(range(3, 3)).doesNotIntersect(range(3, 3)); - - // Reminder: minToken serves as both lower and upper bound - assertThat(minTo(5)) - .doesNotIntersect(range(6, 7)) - .doesNotIntersect(toMax(6)) - .intersects(range(6, 4)) - .intersects(range(2, 4)) - .intersects(minTo(4)) - .intersects(minTo(5)); - - assertThat(toMax(5)) - .doesNotIntersect(range(3, 4)) - .doesNotIntersect(minTo(4)) - .intersects(range(6, 7)) - .intersects(range(4, 1)) - .intersects(toMax(6)) - .intersects(toMax(5)); - - assertThat(fullRing()) - .intersects(range(3, 4)) - .intersects(toMax(3)) - .intersects(minTo(3)) - .doesNotIntersect(range(3, 3)); - } - - @Test - public void should_compute_intersection() { - assertThat(range(3, 9).intersectWith(range(2, 4))).isEqualTo(ImmutableList.of(range(3, 4))); - assertThat(range(3, 9).intersectWith(range(3, 5))).isEqualTo(ImmutableList.of(range(3, 5))); - assertThat(range(3, 9).intersectWith(range(4, 6))).isEqualTo(ImmutableList.of(range(4, 6))); - assertThat(range(3, 9).intersectWith(range(7, 9))).isEqualTo(ImmutableList.of(range(7, 9))); - assertThat(range(3, 9).intersectWith(range(8, 10))).isEqualTo(ImmutableList.of(range(8, 9))); - } - - @Test - public void should_compute_intersection_with_ranges_around_ring() { - // If a range wraps the ring (like 10, -10 does) this will produce two separate intersected - // ranges. - assertThat(range(10, -10).intersectWith(range(-20, 20))) - .isEqualTo(ImmutableList.of(range(10, 20), range(-20, -10))); - assertThat(range(-20, 20).intersectWith(range(10, -10))) - .isEqualTo(ImmutableList.of(range(10, 20), range(-20, -10))); - - // If both ranges wrap the ring, they should be merged together wrapping across the range. - assertThat(range(10, -30).intersectWith(range(20, -20))) - .isEqualTo(ImmutableList.of(range(20, -30))); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_compute_intersection_when_ranges_dont_intersect() { - range(1, 2).intersectWith(range(2, 3)); - } - - @Test - public void should_merge_with_other_range() { - assertThat(range(3, 9).mergeWith(range(2, 3))).isEqualTo(range(2, 9)); - assertThat(range(3, 9).mergeWith(range(2, 4))).isEqualTo(range(2, 9)); - assertThat(range(3, 9).mergeWith(range(11, 3))).isEqualTo(range(11, 9)); - assertThat(range(3, 9).mergeWith(range(11, 4))).isEqualTo(range(11, 9)); - - assertThat(range(3, 9).mergeWith(range(4, 8))).isEqualTo(range(3, 9)); - assertThat(range(3, 9).mergeWith(range(3, 9))).isEqualTo(range(3, 9)); - assertThat(range(3, 9).mergeWith(range(3, 3))).isEqualTo(range(3, 9)); - assertThat(range(3, 3).mergeWith(range(3, 9))).isEqualTo(range(3, 9)); - - assertThat(range(3, 9).mergeWith(range(9, 11))).isEqualTo(range(3, 11)); - assertThat(range(3, 9).mergeWith(range(8, 11))).isEqualTo(range(3, 11)); - assertThat(range(3, 9).mergeWith(range(9, 1))).isEqualTo(range(3, 1)); - assertThat(range(3, 9).mergeWith(range(8, 1))).isEqualTo(range(3, 1)); - - assertThat(range(3, 9).mergeWith(range(9, 3))).isEqualTo(fullRing()); - assertThat(range(3, 9).mergeWith(range(9, 4))).isEqualTo(fullRing()); - assertThat(range(3, 10).mergeWith(range(9, 4))).isEqualTo(fullRing()); - - assertThat(range(9, 3).mergeWith(range(8, 9))).isEqualTo(range(8, 3)); - assertThat(range(9, 3).mergeWith(range(8, 10))).isEqualTo(range(8, 3)); - assertThat(range(9, 3).mergeWith(range(4, 9))).isEqualTo(range(4, 3)); - assertThat(range(9, 3).mergeWith(range(4, 10))).isEqualTo(range(4, 3)); - - assertThat(range(9, 3).mergeWith(range(10, 2))).isEqualTo(range(9, 3)); - assertThat(range(9, 3).mergeWith(range(9, 3))).isEqualTo(range(9, 3)); - assertThat(range(9, 3).mergeWith(range(9, 9))).isEqualTo(range(9, 3)); - assertThat(range(9, 9).mergeWith(range(9, 3))).isEqualTo(range(9, 3)); - - assertThat(range(9, 3).mergeWith(range(3, 5))).isEqualTo(range(9, 5)); - assertThat(range(9, 3).mergeWith(range(2, 5))).isEqualTo(range(9, 5)); - assertThat(range(9, 3).mergeWith(range(3, 7))).isEqualTo(range(9, 7)); - assertThat(range(9, 3).mergeWith(range(2, 7))).isEqualTo(range(9, 7)); - - assertThat(range(9, 3).mergeWith(range(3, 9))).isEqualTo(fullRing()); - assertThat(range(9, 3).mergeWith(range(3, 10))).isEqualTo(fullRing()); - - assertThat(range(3, 3).mergeWith(range(3, 3))).isEqualTo(range(3, 3)); - - assertThat(toMax(5).mergeWith(range(6, 7))).isEqualTo(toMax(5)); - assertThat(toMax(5).mergeWith(minTo(3))).isEqualTo(range(5, 3)); - assertThat(toMax(5).mergeWith(range(3, 5))).isEqualTo(toMax(3)); - - assertThat(minTo(5).mergeWith(range(2, 3))).isEqualTo(minTo(5)); - assertThat(minTo(5).mergeWith(toMax(7))).isEqualTo(range(7, 5)); - assertThat(minTo(5).mergeWith(range(5, 7))).isEqualTo(minTo(7)); - } - - @Test(expected = IllegalArgumentException.class) - public void should_not_merge_with_nonadjacent_and_disjoint_ranges() { - range(0, 5).mergeWith(range(7, 14)); - } - - @Test - public void should_return_non_empty_range_if_other_range_is_empty() { - assertThat(range(1, 5).mergeWith(range(5, 5))).isEqualTo(range(1, 5)); - } - - @Test - public void should_unwrap_to_non_wrapping_ranges() { - assertThat(range(9, 3)).unwrapsTo(toMax(9), minTo(3)); - assertThat(range(3, 9)).isNotWrappedAround(); - assertThat(toMax(3)).isNotWrappedAround(); - assertThat(minTo(3)).isNotWrappedAround(); - assertThat(range(3, 3)).isNotWrappedAround(); - assertThat(fullRing()).isNotWrappedAround(); - } - - @Test - public void should_split_evenly() { - // Simply exercise splitEvenly, split logic is exercised in the test of each TokenRange - // implementation - List splits = range(3, 9).splitEvenly(3); - - assertThat(splits).hasSize(3); - assertThat(splits).containsExactly(range(3, 5), range(5, 7), range(7, 9)); - } - - @Test - public void should_throw_error_with_less_than_1_splits() { - for (int i = -255; i < 1; i++) { - try { - range(0, 1).splitEvenly(i); - fail("Expected error when providing " + i + " splits."); - } catch (IllegalArgumentException e) { - // expected. - } - } - } - - @Test(expected = IllegalArgumentException.class) - public void should_not_split_empty_token_range() { - range(0, 0).splitEvenly(1); - } - - @Test - public void should_create_empty_token_ranges_if_too_many_splits() { - TokenRange range = range(0, 10); - - List ranges = range.splitEvenly(255); - assertThat(ranges).hasSize(255); - - for (int i = 0; i < ranges.size(); i++) { - TokenRange tr = ranges.get(i); - if (i < 10) { - assertThat(tr).isEqualTo(range(i, i + 1)); - } else { - assertThat(tr.isEmpty()); - } - } - } - - @Test - public void should_check_if_range_contains_token() { - // ]1,2] contains 2, but it does not contain the start of ]2,3] - assertThat(range(1, 2)) - .contains(new Murmur3Token(2), false) - .doesNotContain(new Murmur3Token(2), true); - // ]1,2] does not contain 1, but it contains the start of ]1,3] - assertThat(range(1, 2)) - .doesNotContain(new Murmur3Token(1), false) - .contains(new Murmur3Token(1), true); - - // ]2,1] contains the start of ]min,5] - assertThat(range(2, 1)).contains(min, true); - - // ]min, 1] does not contain min, but it contains the start of ]min, 2] - assertThat(minTo(1)).doesNotContain(min, false).contains(min, true); - // ]1, min] contains min, but not the start of ]min, 2] - assertThat(toMax(1)).contains(min, false).doesNotContain(min, true); - - // An empty range contains nothing - assertThat(range(1, 1)) - .doesNotContain(new Murmur3Token(1), true) - .doesNotContain(new Murmur3Token(1), false) - .doesNotContain(min, true) - .doesNotContain(min, false); - - // The whole ring contains everything - assertThat(fullRing()) - .contains(min, true) - .contains(min, false) - .contains(new Murmur3Token(1), true) - .contains(new Murmur3Token(1), false); - } - - private TokenRange range(long start, long end) { - return new Murmur3TokenRange(new Murmur3Token(start), new Murmur3Token(end)); - } - - private TokenRange minTo(long end) { - return new Murmur3TokenRange(min, new Murmur3Token(end)); - } - - private TokenRange toMax(long start) { - return new Murmur3TokenRange(new Murmur3Token(start), min); - } - - private TokenRange fullRing() { - return new Murmur3TokenRange(Murmur3TokenFactory.MIN_TOKEN, Murmur3TokenFactory.MIN_TOKEN); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricIdGeneratorTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricIdGeneratorTest.java deleted file mode 100644 index 13efda4b352..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricIdGeneratorTest.java +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.BDDMockito.given; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; -import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -@RunWith(DataProviderRunner.class) -public class DefaultMetricIdGeneratorTest { - - @Mock private InternalDriverContext context; - - @Mock private DriverConfig config; - - @Mock private DriverExecutionProfile profile; - - @Mock private Node node; - - @Mock private EndPoint endpoint; - - @Before - public void setUp() throws Exception { - MockitoAnnotations.initMocks(this); - given(context.getConfig()).willReturn(config); - given(context.getSessionName()).willReturn("s0"); - given(config.getDefaultProfile()).willReturn(profile); - given(node.getEndPoint()).willReturn(endpoint); - given(endpoint.asMetricPrefix()).willReturn("10_1_2_3:9042"); - } - - @Test - @UseDataProvider("sessionMetrics") - public void should_generate_session_metric(String prefix, String expectedName) { - // given - given(profile.getString(DefaultDriverOption.METRICS_ID_GENERATOR_PREFIX, "")) - .willReturn(prefix); - DefaultMetricIdGenerator generator = new DefaultMetricIdGenerator(context); - // when - MetricId id = generator.sessionMetricId(DefaultSessionMetric.CONNECTED_NODES); - // then - assertThat(id.getName()).isEqualTo(expectedName); - assertThat(id.getTags()).isEmpty(); - } - - @Test - @UseDataProvider("nodeMetrics") - public void should_generate_node_metric(String prefix, String expectedName) { - // given - given(profile.getString(DefaultDriverOption.METRICS_ID_GENERATOR_PREFIX, "")) - .willReturn(prefix); - DefaultMetricIdGenerator generator = new DefaultMetricIdGenerator(context); - // when - MetricId id = generator.nodeMetricId(node, DefaultNodeMetric.CQL_MESSAGES); - // then - assertThat(id.getName()).isEqualTo(expectedName); - assertThat(id.getTags()).isEmpty(); - } - - @DataProvider - public static Object[][] sessionMetrics() { - String suffix = DefaultSessionMetric.CONNECTED_NODES.getPath(); - return new Object[][] { - new Object[] {"", "s0." + suffix}, - new Object[] {"cassandra", "cassandra.s0." + suffix}, - new Object[] {"app.cassandra", "app.cassandra.s0." + suffix} - }; - } - - @DataProvider - public static Object[][] nodeMetrics() { - String suffix = DefaultNodeMetric.CQL_MESSAGES.getPath(); - return new Object[][] { - new Object[] {"", "s0.nodes.10_1_2_3:9042." + suffix}, - new Object[] {"cassandra", "cassandra.s0.nodes.10_1_2_3:9042." + suffix}, - new Object[] {"app.cassandra", "app.cassandra.s0.nodes.10_1_2_3:9042." + suffix} - }; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricIdTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricIdTest.java deleted file mode 100644 index 339f9235dc2..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricIdTest.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.google.common.collect.ImmutableMap; -import org.junit.Test; - -public class DefaultMetricIdTest { - - @Test - public void testGetName() { - DefaultMetricId id = new DefaultMetricId("metric1", ImmutableMap.of()); - assertThat(id.getName()).isEqualTo("metric1"); - } - - @Test - public void testGetTags() { - DefaultMetricId id = - new DefaultMetricId("metric1", ImmutableMap.of("tag1", "value1", "tag2", "value2")); - assertThat(id.getTags()) - .hasSize(2) - .containsEntry("tag1", "value1") - .containsEntry("tag2", "value2"); - } - - @Test - public void testEquals() { - DefaultMetricId id1 = - new DefaultMetricId("metric1", ImmutableMap.of("tag1", "value1", "tag2", "value2")); - DefaultMetricId id2 = - new DefaultMetricId("metric1", ImmutableMap.of("tag1", "value1", "tag2", "value2")); - DefaultMetricId id3 = - new DefaultMetricId("metric2", ImmutableMap.of("tag1", "value1", "tag2", "value2")); - DefaultMetricId id4 = new DefaultMetricId("metric1", ImmutableMap.of("tag2", "value2")); - assertThat(id1).isEqualTo(id2).isNotEqualTo(id3).isNotEqualTo(id4); - } - - @Test - public void testHashCode() { - DefaultMetricId id1 = - new DefaultMetricId("metric1", ImmutableMap.of("tag1", "value1", "tag2", "value2")); - DefaultMetricId id2 = - new DefaultMetricId("metric1", ImmutableMap.of("tag1", "value1", "tag2", "value2")); - assertThat(id1).hasSameHashCodeAs(id2); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/DropwizardMetricsFactoryTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/DropwizardMetricsFactoryTest.java deleted file mode 100644 index e5983c4f4fd..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/DropwizardMetricsFactoryTest.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.codahale.metrics.MetricRegistry; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import java.time.Duration; -import java.util.Collections; -import java.util.List; -import org.junit.Test; - -public class DropwizardMetricsFactoryTest { - - @Test - public void should_throw_if_registry_of_wrong_type() { - // given - InternalDriverContext context = mock(InternalDriverContext.class); - DriverExecutionProfile profile = mock(DriverExecutionProfile.class); - DriverConfig config = mock(DriverConfig.class); - List enabledMetrics = - Collections.singletonList(DefaultSessionMetric.CQL_REQUESTS.getPath()); - // when - when(config.getDefaultProfile()).thenReturn(profile); - when(context.getConfig()).thenReturn(config); - when(context.getSessionName()).thenReturn("MockSession"); - // registry object is not a registry type - when(context.getMetricRegistry()).thenReturn(Integer.MAX_VALUE); - when(profile.getDuration(DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER)) - .thenReturn(Duration.ofHours(1)); - when(profile.getStringList(DefaultDriverOption.METRICS_SESSION_ENABLED)) - .thenReturn(enabledMetrics); - // then - try { - new DropwizardMetricsFactory(context); - fail( - "MetricsFactory should require correct registry object type: " - + MetricRegistry.class.getName()); - } catch (IllegalArgumentException iae) { - assertThat(iae.getMessage()) - .isEqualTo( - "Unexpected Metrics registry object. " - + "Expected registry object to be of type '%s', but was '%s'", - MetricRegistry.class.getName(), Integer.class.getName()); - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/DropwizardNodeMetricUpdaterTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/DropwizardNodeMetricUpdaterTest.java deleted file mode 100644 index ccc42a7027d..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/DropwizardNodeMetricUpdaterTest.java +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.timeout; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import ch.qos.logback.classic.Level; -import com.codahale.metrics.MetricRegistry; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.config.DriverOption; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; -import com.datastax.oss.driver.api.core.metrics.NodeMetric; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.util.LoggerTest; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.time.Duration; -import java.util.Collections; -import java.util.Set; -import java.util.function.Supplier; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class DropwizardNodeMetricUpdaterTest { - - @Test - public void should_log_warning_when_provided_eviction_time_setting_is_too_low() { - // given - LoggerTest.LoggerSetup logger = - LoggerTest.setupTestLogger(AbstractMetricUpdater.class, Level.WARN); - Node node = mock(Node.class); - InternalDriverContext context = mock(InternalDriverContext.class); - DriverExecutionProfile profile = mock(DriverExecutionProfile.class); - DriverConfig config = mock(DriverConfig.class); - Set enabledMetrics = Collections.singleton(DefaultNodeMetric.CQL_MESSAGES); - Duration expireAfter = AbstractMetricUpdater.MIN_EXPIRE_AFTER.minusMinutes(1); - - // when - when(context.getSessionName()).thenReturn("prefix"); - when(context.getConfig()).thenReturn(config); - when(config.getDefaultProfile()).thenReturn(profile); - when(profile.getDuration(DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER)) - .thenReturn(expireAfter); - - DropwizardNodeMetricUpdater updater = - new DropwizardNodeMetricUpdater(node, context, enabledMetrics, new MetricRegistry()) { - @Override - protected void initializeGauge( - NodeMetric metric, DriverExecutionProfile profile, Supplier supplier) { - // do nothing - } - - @Override - protected void initializeCounter(NodeMetric metric, DriverExecutionProfile profile) { - // do nothing - } - - @Override - protected void initializeHdrTimer( - NodeMetric metric, - DriverExecutionProfile profile, - DriverOption highestLatency, - DriverOption significantDigits, - DriverOption interval) { - // do nothing - } - }; - - // then - assertThat(updater.getExpireAfter()).isEqualTo(AbstractMetricUpdater.MIN_EXPIRE_AFTER); - verify(logger.appender, timeout(500).times(1)).doAppend(logger.loggingEventCaptor.capture()); - assertThat(logger.loggingEventCaptor.getValue().getMessage()).isNotNull(); - assertThat(logger.loggingEventCaptor.getValue().getFormattedMessage()) - .contains( - String.format( - "[prefix] Value too low for %s: %s. Forcing to %s instead.", - DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER.getPath(), - expireAfter, - AbstractMetricUpdater.MIN_EXPIRE_AFTER)); - } - - @Test - @UseDataProvider(value = "acceptableEvictionTimes") - public void should_not_log_warning_when_provided_eviction_time_setting_is_acceptable( - Duration expireAfter) { - // given - LoggerTest.LoggerSetup logger = - LoggerTest.setupTestLogger(AbstractMetricUpdater.class, Level.WARN); - Node node = mock(Node.class); - InternalDriverContext context = mock(InternalDriverContext.class); - DriverExecutionProfile profile = mock(DriverExecutionProfile.class); - DriverConfig config = mock(DriverConfig.class); - Set enabledMetrics = Collections.singleton(DefaultNodeMetric.CQL_MESSAGES); - - // when - when(context.getSessionName()).thenReturn("prefix"); - when(context.getConfig()).thenReturn(config); - when(config.getDefaultProfile()).thenReturn(profile); - when(profile.getDuration(DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER)) - .thenReturn(expireAfter); - - DropwizardNodeMetricUpdater updater = - new DropwizardNodeMetricUpdater(node, context, enabledMetrics, new MetricRegistry()) { - @Override - protected void initializeGauge( - NodeMetric metric, DriverExecutionProfile profile, Supplier supplier) { - // do nothing - } - - @Override - protected void initializeCounter(NodeMetric metric, DriverExecutionProfile profile) { - // do nothing - } - - @Override - protected void initializeHdrTimer( - NodeMetric metric, - DriverExecutionProfile profile, - DriverOption highestLatency, - DriverOption significantDigits, - DriverOption interval) { - // do nothing - } - }; - - // then - assertThat(updater.getExpireAfter()).isEqualTo(expireAfter); - verify(logger.appender, timeout(500).times(0)).doAppend(logger.loggingEventCaptor.capture()); - } - - @DataProvider - public static Object[][] acceptableEvictionTimes() { - return new Object[][] { - {AbstractMetricUpdater.MIN_EXPIRE_AFTER}, - {AbstractMetricUpdater.MIN_EXPIRE_AFTER.plusMinutes(1)} - }; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/NoopMetricsFactoryTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/NoopMetricsFactoryTest.java deleted file mode 100644 index 3a563be4453..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/NoopMetricsFactoryTest.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import ch.qos.logback.classic.Level; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.util.LoggerTest; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import java.util.Collections; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class NoopMetricsFactoryTest { - - @Test - public void should_log_warning_when_metrics_enabled() { - // given - InternalDriverContext context = mock(InternalDriverContext.class); - DriverConfig config = mock(DriverConfig.class); - DriverExecutionProfile profile = mock(DriverExecutionProfile.class); - when(context.getSessionName()).thenReturn("MockSession"); - when(context.getConfig()).thenReturn(config); - when(config.getDefaultProfile()).thenReturn(profile); - when(profile.getStringList(DefaultDriverOption.METRICS_SESSION_ENABLED)) - .thenReturn(Collections.singletonList(DefaultSessionMetric.CQL_REQUESTS.getPath())); - LoggerTest.LoggerSetup logger = - LoggerTest.setupTestLogger(NoopMetricsFactory.class, Level.WARN); - - // when - new NoopMetricsFactory(context); - - // then - verify(logger.appender, times(1)).doAppend(logger.loggingEventCaptor.capture()); - assertThat(logger.loggingEventCaptor.getValue().getMessage()).isNotNull(); - assertThat(logger.loggingEventCaptor.getValue().getFormattedMessage()) - .contains("[MockSession] Some session-level or node-level metrics were enabled"); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/TaggingMetricIdGeneratorTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/TaggingMetricIdGeneratorTest.java deleted file mode 100644 index 809a7419ba4..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/TaggingMetricIdGeneratorTest.java +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.metrics; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.BDDMockito.given; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; -import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.google.common.collect.ImmutableMap; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.Map; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -@RunWith(DataProviderRunner.class) -public class TaggingMetricIdGeneratorTest { - - @Mock private InternalDriverContext context; - - @Mock private DriverConfig config; - - @Mock private DriverExecutionProfile profile; - - @Mock private Node node; - - @Mock private EndPoint endpoint; - - @Before - public void setUp() throws Exception { - MockitoAnnotations.initMocks(this); - given(context.getConfig()).willReturn(config); - given(context.getSessionName()).willReturn("s0"); - given(config.getDefaultProfile()).willReturn(profile); - given(node.getEndPoint()).willReturn(endpoint); - given(endpoint.toString()).willReturn("/10.1.2.3:9042"); - } - - @Test - @UseDataProvider("sessionMetrics") - public void should_generate_session_metric( - String prefix, String expectedName, Map expectedTags) { - // given - given(profile.getString(DefaultDriverOption.METRICS_ID_GENERATOR_PREFIX, "")) - .willReturn(prefix); - TaggingMetricIdGenerator generator = new TaggingMetricIdGenerator(context); - // when - MetricId id = generator.sessionMetricId(DefaultSessionMetric.CONNECTED_NODES); - // then - assertThat(id.getName()).isEqualTo(expectedName); - assertThat(id.getTags()).isEqualTo(expectedTags); - } - - @Test - @UseDataProvider("nodeMetrics") - public void should_generate_node_metric( - String prefix, String expectedName, Map expectedTags) { - // given - given(profile.getString(DefaultDriverOption.METRICS_ID_GENERATOR_PREFIX, "")) - .willReturn(prefix); - TaggingMetricIdGenerator generator = new TaggingMetricIdGenerator(context); - // when - MetricId id = generator.nodeMetricId(node, DefaultNodeMetric.CQL_MESSAGES); - // then - assertThat(id.getName()).isEqualTo(expectedName); - assertThat(id.getTags()).isEqualTo(expectedTags); - } - - @DataProvider - public static Object[][] sessionMetrics() { - String suffix = DefaultSessionMetric.CONNECTED_NODES.getPath(); - ImmutableMap tags = ImmutableMap.of("session", "s0"); - return new Object[][] { - new Object[] {"", "session." + suffix, tags}, - new Object[] {"cassandra", "cassandra.session." + suffix, tags}, - new Object[] {"app.cassandra", "app.cassandra.session." + suffix, tags} - }; - } - - @DataProvider - public static Object[][] nodeMetrics() { - String suffix = DefaultNodeMetric.CQL_MESSAGES.getPath(); - ImmutableMap tags = ImmutableMap.of("session", "s0", "node", "/10.1.2.3:9042"); - return new Object[][] { - new Object[] {"", "nodes." + suffix, tags}, - new Object[] {"cassandra", "cassandra.nodes." + suffix, tags}, - new Object[] {"app.cassandra", "app.cassandra.nodes." + suffix, tags} - }; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/os/JnrLibcTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/os/JnrLibcTest.java deleted file mode 100644 index 30dee7847c4..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/os/JnrLibcTest.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.os; - -import static org.assertj.core.api.Assertions.assertThat; - -import java.time.Instant; -import java.time.temporal.ChronoUnit; -import java.util.Optional; -import org.junit.Test; - -/** - * Explicitly test native impl based on jnr's POSIX impl. This test should pass on any platform - * which is supported by jnr. - */ -public class JnrLibcTest { - - @Test - public void should_be_available() { - - Libc impl = new JnrLibc(); - assertThat(impl.available()).isTrue(); - } - - @Test - public void should_support_getpid() { - Libc impl = new JnrLibc(); - Optional val = impl.getpid(); - assertThat(val).isNotEmpty(); - assertThat(val.get()).isGreaterThan(1); - } - - @Test - public void should_support_gettimeofday() { - Libc impl = new JnrLibc(); - Optional val = impl.gettimeofday(); - assertThat(val).isNotEmpty(); - assertThat(val.get()).isGreaterThan(0); - - Instant now = Instant.now(); - Instant rvInstant = Instant.EPOCH.plus(val.get(), ChronoUnit.MICROS); - assertThat(rvInstant.isAfter(now.minusSeconds(1))).isTrue(); - assertThat(rvInstant.isBefore(now.plusSeconds(1))).isTrue(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/os/NativeTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/os/NativeTest.java deleted file mode 100644 index aeaf28d1fdf..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/os/NativeTest.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.os; - -import static org.assertj.core.api.Assertions.assertThat; - -import org.junit.Test; - -public class NativeTest { - - /** Verifies that {@link Native#getCpu()} returns non-empty cpu architecture */ - @Test - public void should_return_cpu_info() { - assertThat(Native.getCpu()).isNotEmpty(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolInitTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolInitTest.java deleted file mode 100644 index 5c7257d8c3f..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolInitTest.java +++ /dev/null @@ -1,190 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.pool; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.awaitility.Awaitility.await; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.InvalidKeyspaceException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; -import com.datastax.oss.driver.internal.core.channel.ChannelEvent; -import com.datastax.oss.driver.internal.core.channel.ClusterNameMismatchException; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.channel.MockChannelFactoryHelper; -import com.datastax.oss.driver.internal.core.metadata.TopologyEvent; -import java.time.Duration; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import org.junit.Test; -import org.mockito.InOrder; - -public class ChannelPoolInitTest extends ChannelPoolTestBase { - - @Test - public void should_initialize_when_all_channels_succeed() throws Exception { - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)).thenReturn(3); - - DriverChannel channel1 = newMockDriverChannel(1); - DriverChannel channel2 = newMockDriverChannel(2); - DriverChannel channel3 = newMockDriverChannel(3); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - .success(node, channel1) - .success(node, channel2) - .success(node, channel3) - .build(); - - CompletionStage poolFuture = - ChannelPool.init(node, null, NodeDistance.LOCAL, context, "test"); - - factoryHelper.waitForCalls(node, 3); - - assertThatStage(poolFuture) - .isSuccess(pool -> assertThat(pool.channels).containsOnly(channel1, channel2, channel3)); - verify(eventBus, VERIFY_TIMEOUT.times(3)).fire(ChannelEvent.channelOpened(node)); - - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_initialize_when_all_channels_fail() throws Exception { - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)).thenReturn(3); - - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - .failure(node, "mock channel init failure") - .failure(node, "mock channel init failure") - .failure(node, "mock channel init failure") - .build(); - - CompletionStage poolFuture = - ChannelPool.init(node, null, NodeDistance.LOCAL, context, "test"); - - factoryHelper.waitForCalls(node, 3); - - assertThatStage(poolFuture).isSuccess(pool -> assertThat(pool.channels).isEmpty()); - verify(eventBus, never()).fire(ChannelEvent.channelOpened(node)); - verify(nodeMetricUpdater, VERIFY_TIMEOUT.times(3)) - .incrementCounter(DefaultNodeMetric.CONNECTION_INIT_ERRORS, null); - - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_indicate_when_keyspace_failed_on_all_channels() { - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)).thenReturn(3); - - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - .failure(node, new InvalidKeyspaceException("invalid keyspace")) - .failure(node, new InvalidKeyspaceException("invalid keyspace")) - .failure(node, new InvalidKeyspaceException("invalid keyspace")) - .build(); - - CompletionStage poolFuture = - ChannelPool.init(node, null, NodeDistance.LOCAL, context, "test"); - - factoryHelper.waitForCalls(node, 3); - assertThatStage(poolFuture) - .isSuccess( - pool -> { - assertThat(pool.isInvalidKeyspace()).isTrue(); - verify(nodeMetricUpdater, VERIFY_TIMEOUT.times(3)) - .incrementCounter(DefaultNodeMetric.CONNECTION_INIT_ERRORS, null); - }); - } - - @Test - public void should_fire_force_down_event_when_cluster_name_does_not_match() throws Exception { - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)).thenReturn(3); - - ClusterNameMismatchException error = - new ClusterNameMismatchException(node.getEndPoint(), "actual", "expected"); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - .failure(node, error) - .failure(node, error) - .failure(node, error) - .build(); - - ChannelPool.init(node, null, NodeDistance.LOCAL, context, "test"); - - factoryHelper.waitForCalls(node, 3); - - verify(eventBus, VERIFY_TIMEOUT) - .fire(TopologyEvent.forceDown(node.getBroadcastRpcAddress().get())); - verify(eventBus, never()).fire(ChannelEvent.channelOpened(node)); - - verify(nodeMetricUpdater, VERIFY_TIMEOUT.times(3)) - .incrementCounter(DefaultNodeMetric.CONNECTION_INIT_ERRORS, null); - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_reconnect_when_init_incomplete() throws Exception { - // Short delay so we don't have to wait in the test - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofNanos(1)); - - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)).thenReturn(2); - - DriverChannel channel1 = newMockDriverChannel(1); - DriverChannel channel2 = newMockDriverChannel(2); - CompletableFuture channel2Future = new CompletableFuture<>(); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - // Init: 1 channel fails, the other succeeds - .failure(node, "mock channel init failure") - .success(node, channel1) - // 1st reconnection - .pending(node, channel2Future) - .build(); - InOrder inOrder = inOrder(eventBus); - - CompletionStage poolFuture = - ChannelPool.init(node, null, NodeDistance.LOCAL, context, "test"); - - factoryHelper.waitForCalls(node, 2); - - assertThatStage(poolFuture).isSuccess(); - ChannelPool pool = poolFuture.toCompletableFuture().get(); - assertThat(pool.channels).containsOnly(channel1); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node)); - - // A reconnection should have been scheduled - verify(reconnectionSchedule, VERIFY_TIMEOUT).nextDelay(); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStarted(node)); - - channel2Future.complete(channel2); - factoryHelper.waitForCalls(node, 1); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node)); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStopped(node)); - - await().untilAsserted(() -> assertThat(pool.channels).containsOnly(channel1, channel2)); - - verify(nodeMetricUpdater, VERIFY_TIMEOUT) - .incrementCounter(DefaultNodeMetric.CONNECTION_INIT_ERRORS, null); - factoryHelper.verifyNoMoreCalls(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolKeyspaceTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolKeyspaceTest.java deleted file mode 100644 index 4273a51f891..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolKeyspaceTest.java +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.pool; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.internal.core.channel.ChannelEvent; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.channel.MockChannelFactoryHelper; -import java.time.Duration; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import org.junit.Test; - -public class ChannelPoolKeyspaceTest extends ChannelPoolTestBase { - - @Test - public void should_switch_keyspace_on_existing_channels() throws Exception { - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)).thenReturn(2); - - DriverChannel channel1 = newMockDriverChannel(1); - DriverChannel channel2 = newMockDriverChannel(2); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - .success(node, channel1) - .success(node, channel2) - .build(); - - CompletionStage poolFuture = - ChannelPool.init(node, null, NodeDistance.LOCAL, context, "test"); - - factoryHelper.waitForCalls(node, 2); - - assertThatStage(poolFuture).isSuccess(); - ChannelPool pool = poolFuture.toCompletableFuture().get(); - assertThat(pool.channels).containsOnly(channel1, channel2); - - CqlIdentifier newKeyspace = CqlIdentifier.fromCql("new_keyspace"); - CompletionStage setKeyspaceFuture = pool.setKeyspace(newKeyspace); - - verify(channel1, VERIFY_TIMEOUT).setKeyspace(newKeyspace); - verify(channel2, VERIFY_TIMEOUT).setKeyspace(newKeyspace); - - assertThatStage(setKeyspaceFuture).isSuccess(); - - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_switch_keyspace_on_pending_channels() throws Exception { - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofNanos(1)); - - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)).thenReturn(2); - - DriverChannel channel1 = newMockDriverChannel(1); - CompletableFuture channel1Future = new CompletableFuture<>(); - DriverChannel channel2 = newMockDriverChannel(2); - CompletableFuture channel2Future = new CompletableFuture<>(); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - // init - .failure(node, "mock channel init failure") - .failure(node, "mock channel init failure") - // reconnection - .pending(node, channel1Future) - .pending(node, channel2Future) - .build(); - - CompletionStage poolFuture = - ChannelPool.init(node, null, NodeDistance.LOCAL, context, "test"); - - factoryHelper.waitForCalls(node, 2); - - assertThatStage(poolFuture).isSuccess(); - ChannelPool pool = poolFuture.toCompletableFuture().get(); - - // Check that reconnection has kicked in, but do not complete it yet - verify(reconnectionSchedule, VERIFY_TIMEOUT).nextDelay(); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStarted(node)); - factoryHelper.waitForCalls(node, 2); - - // Switch keyspace, it succeeds immediately since there is no active channel - CqlIdentifier newKeyspace = CqlIdentifier.fromCql("new_keyspace"); - CompletionStage setKeyspaceFuture = pool.setKeyspace(newKeyspace); - assertThatStage(setKeyspaceFuture).isSuccess(); - - // Now let the two channels succeed to complete the reconnection - channel1Future.complete(channel1); - channel2Future.complete(channel2); - - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStopped(node)); - verify(channel1, VERIFY_TIMEOUT).setKeyspace(newKeyspace); - verify(channel2, VERIFY_TIMEOUT).setKeyspace(newKeyspace); - - factoryHelper.verifyNoMoreCalls(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolReconnectTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolReconnectTest.java deleted file mode 100644 index c4538f78bdb..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolReconnectTest.java +++ /dev/null @@ -1,191 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.pool; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.awaitility.Awaitility.await; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.internal.core.channel.ChannelEvent; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.channel.MockChannelFactoryHelper; -import io.netty.channel.ChannelPromise; -import java.time.Duration; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import org.junit.Test; -import org.mockito.InOrder; - -public class ChannelPoolReconnectTest extends ChannelPoolTestBase { - - @Test - public void should_reconnect_when_channel_closes() throws Exception { - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofNanos(1)); - - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)).thenReturn(2); - - DriverChannel channel1 = newMockDriverChannel(1); - DriverChannel channel2 = newMockDriverChannel(2); - DriverChannel channel3 = newMockDriverChannel(3); - CompletableFuture channel3Future = new CompletableFuture<>(); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - // init - .success(node, channel1) - .success(node, channel2) - // reconnection - .pending(node, channel3Future) - .build(); - InOrder inOrder = inOrder(eventBus); - - CompletionStage poolFuture = - ChannelPool.init(node, null, NodeDistance.LOCAL, context, "test"); - - factoryHelper.waitForCalls(node, 2); - - assertThatStage(poolFuture).isSuccess(); - ChannelPool pool = poolFuture.toCompletableFuture().get(); - assertThat(pool.channels).containsOnly(channel1, channel2); - inOrder.verify(eventBus, VERIFY_TIMEOUT.times(2)).fire(ChannelEvent.channelOpened(node)); - - // Simulate fatal error on channel2 - ((ChannelPromise) channel2.closeFuture()) - .setFailure(new Exception("mock channel init failure")); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelClosed(node)); - - verify(reconnectionSchedule, VERIFY_TIMEOUT).nextDelay(); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStarted(node)); - factoryHelper.waitForCall(node); - - channel3Future.complete(channel3); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node)); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStopped(node)); - - await().untilAsserted(() -> assertThat(pool.channels).containsOnly(channel1, channel3)); - - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_reconnect_when_channel_starts_graceful_shutdown() throws Exception { - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofNanos(1)); - - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)).thenReturn(2); - - DriverChannel channel1 = newMockDriverChannel(1); - DriverChannel channel2 = newMockDriverChannel(2); - DriverChannel channel3 = newMockDriverChannel(3); - CompletableFuture channel3Future = new CompletableFuture<>(); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - // init - .success(node, channel1) - .success(node, channel2) - // reconnection - .pending(node, channel3Future) - .build(); - InOrder inOrder = inOrder(eventBus); - - CompletionStage poolFuture = - ChannelPool.init(node, null, NodeDistance.LOCAL, context, "test"); - - factoryHelper.waitForCalls(node, 2); - - assertThatStage(poolFuture).isSuccess(); - ChannelPool pool = poolFuture.toCompletableFuture().get(); - assertThat(pool.channels).containsOnly(channel1, channel2); - inOrder.verify(eventBus, VERIFY_TIMEOUT.times(2)).fire(ChannelEvent.channelOpened(node)); - - // Simulate graceful shutdown on channel2 - ((ChannelPromise) channel2.closeStartedFuture()).setSuccess(); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelClosed(node)); - - verify(reconnectionSchedule, VERIFY_TIMEOUT).nextDelay(); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStarted(node)); - factoryHelper.waitForCall(node); - - channel3Future.complete(channel3); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node)); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStopped(node)); - - await().untilAsserted(() -> assertThat(pool.channels).containsOnly(channel1, channel3)); - - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_let_current_attempt_complete_when_reconnecting_now() - throws ExecutionException, InterruptedException { - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofNanos(1)); - - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)).thenReturn(1); - - DriverChannel channel1 = newMockDriverChannel(1); - DriverChannel channel2 = newMockDriverChannel(2); - CompletableFuture channel2Future = new CompletableFuture<>(); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - // init - .success(node, channel1) - // reconnection - .pending(node, channel2Future) - .build(); - - InOrder inOrder = inOrder(eventBus); - - // Initial connection - CompletionStage poolFuture = - ChannelPool.init(node, null, NodeDistance.LOCAL, context, "test"); - factoryHelper.waitForCalls(node, 1); - assertThatStage(poolFuture).isSuccess(); - ChannelPool pool = poolFuture.toCompletableFuture().get(); - inOrder.verify(eventBus, VERIFY_TIMEOUT.times(1)).fire(ChannelEvent.channelOpened(node)); - - // Kill channel1, reconnection begins and starts initializing channel2, but the initialization - // is still pending (channel2Future not completed) - ((ChannelPromise) channel1.closeStartedFuture()).setSuccess(); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelClosed(node)); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStarted(node)); - verify(reconnectionSchedule, VERIFY_TIMEOUT).nextDelay(); - factoryHelper.waitForCalls(node, 1); - - // Force a reconnection, should not try to create a new channel since we have a pending one - pool.reconnectNow(); - TimeUnit.MILLISECONDS.sleep(200); - factoryHelper.verifyNoMoreCalls(); - inOrder.verify(eventBus, never()).fire(any()); - - // Complete the initialization of channel2, reconnection succeeds - channel2Future.complete(channel2); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node)); - verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStopped(node)); - - await().untilAsserted(() -> assertThat(pool.channels).containsOnly(channel2)); - - factoryHelper.verifyNoMoreCalls(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolResizeTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolResizeTest.java deleted file mode 100644 index 6992bb7742a..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolResizeTest.java +++ /dev/null @@ -1,417 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.pool; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.awaitility.Awaitility.await; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.internal.core.channel.ChannelEvent; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.channel.MockChannelFactoryHelper; -import com.datastax.oss.driver.internal.core.config.ConfigChangeEvent; -import java.time.Duration; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.TimeUnit; -import org.junit.Test; -import org.mockito.InOrder; - -public class ChannelPoolResizeTest extends ChannelPoolTestBase { - - @Test - public void should_shrink_outside_of_reconnection() throws Exception { - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_REMOTE_SIZE)).thenReturn(4); - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)).thenReturn(2); - - DriverChannel channel1 = newMockDriverChannel(1); - DriverChannel channel2 = newMockDriverChannel(2); - DriverChannel channel3 = newMockDriverChannel(3); - DriverChannel channel4 = newMockDriverChannel(4); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - .success(node, channel1) - .success(node, channel2) - .success(node, channel3) - .success(node, channel4) - .build(); - InOrder inOrder = inOrder(eventBus); - - CompletionStage poolFuture = - ChannelPool.init(node, null, NodeDistance.REMOTE, context, "test"); - - factoryHelper.waitForCalls(node, 4); - - assertThatStage(poolFuture).isSuccess(); - ChannelPool pool = poolFuture.toCompletableFuture().get(); - assertThat(pool.channels).containsOnly(channel1, channel2, channel3, channel4); - inOrder.verify(eventBus, VERIFY_TIMEOUT.times(4)).fire(ChannelEvent.channelOpened(node)); - - pool.resize(NodeDistance.LOCAL); - - inOrder.verify(eventBus, VERIFY_TIMEOUT.times(2)).fire(ChannelEvent.channelClosed(node)); - - await().untilAsserted(() -> assertThat(pool.channels).containsOnly(channel3, channel4)); - - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_shrink_during_reconnection() throws Exception { - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofNanos(1)); - - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_REMOTE_SIZE)).thenReturn(4); - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)).thenReturn(2); - - DriverChannel channel1 = newMockDriverChannel(1); - DriverChannel channel2 = newMockDriverChannel(2); - DriverChannel channel3 = newMockDriverChannel(3); - CompletableFuture channel3Future = new CompletableFuture<>(); - DriverChannel channel4 = newMockDriverChannel(4); - CompletableFuture channel4Future = new CompletableFuture<>(); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - // init - .success(node, channel1) - .success(node, channel2) - .failure(node, "mock channel init failure") - .failure(node, "mock channel init failure") - // reconnection - .pending(node, channel3Future) - .pending(node, channel4Future) - .build(); - InOrder inOrder = inOrder(eventBus); - - CompletionStage poolFuture = - ChannelPool.init(node, null, NodeDistance.REMOTE, context, "test"); - - factoryHelper.waitForCalls(node, 4); - - inOrder.verify(eventBus, VERIFY_TIMEOUT.times(2)).fire(ChannelEvent.channelOpened(node)); - assertThatStage(poolFuture).isSuccess(); - ChannelPool pool = poolFuture.toCompletableFuture().get(); - assertThat(pool.channels).containsOnly(channel1, channel2); - - // A reconnection should have been scheduled to add the missing channels, don't complete yet - verify(reconnectionSchedule).nextDelay(); - inOrder.verify(eventBus).fire(ChannelEvent.reconnectionStarted(node)); - - pool.resize(NodeDistance.LOCAL); - - TimeUnit.MILLISECONDS.sleep(200); - - // Now allow the reconnected channels to complete initialization - channel3Future.complete(channel3); - channel4Future.complete(channel4); - - factoryHelper.waitForCalls(node, 2); - - // Pool should have shrunk back to 2. We keep the most recent channels so 1 and 2 get closed. - inOrder.verify(eventBus, VERIFY_TIMEOUT.times(2)).fire(ChannelEvent.channelOpened(node)); - inOrder.verify(eventBus, VERIFY_TIMEOUT.times(2)).fire(ChannelEvent.channelClosed(node)); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStopped(node)); - await().untilAsserted(() -> assertThat(pool.channels).containsOnly(channel3, channel4)); - - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_grow_outside_of_reconnection() throws Exception { - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofNanos(1)); - - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)).thenReturn(2); - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_REMOTE_SIZE)).thenReturn(4); - - DriverChannel channel1 = newMockDriverChannel(1); - DriverChannel channel2 = newMockDriverChannel(2); - DriverChannel channel3 = newMockDriverChannel(3); - DriverChannel channel4 = newMockDriverChannel(4); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - // init - .success(node, channel1) - .success(node, channel2) - // growth attempt - .success(node, channel3) - .success(node, channel4) - .build(); - InOrder inOrder = inOrder(eventBus); - - CompletionStage poolFuture = - ChannelPool.init(node, null, NodeDistance.LOCAL, context, "test"); - - factoryHelper.waitForCalls(node, 2); - inOrder.verify(eventBus, VERIFY_TIMEOUT.times(2)).fire(ChannelEvent.channelOpened(node)); - - assertThatStage(poolFuture).isSuccess(); - ChannelPool pool = poolFuture.toCompletableFuture().get(); - assertThat(pool.channels).containsOnly(channel1, channel2); - - pool.resize(NodeDistance.REMOTE); - - // The resizing should have triggered a reconnection - verify(reconnectionSchedule, VERIFY_TIMEOUT).nextDelay(); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStarted(node)); - - factoryHelper.waitForCalls(node, 2); - inOrder.verify(eventBus, VERIFY_TIMEOUT.times(2)).fire(ChannelEvent.channelOpened(node)); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStopped(node)); - - await() - .untilAsserted( - () -> assertThat(pool.channels).containsOnly(channel1, channel2, channel3, channel4)); - - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_grow_during_reconnection() throws Exception { - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofNanos(1)); - - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)).thenReturn(2); - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_REMOTE_SIZE)).thenReturn(4); - - DriverChannel channel1 = newMockDriverChannel(1); - DriverChannel channel2 = newMockDriverChannel(2); - CompletableFuture channel2Future = new CompletableFuture<>(); - DriverChannel channel3 = newMockDriverChannel(3); - CompletableFuture channel3Future = new CompletableFuture<>(); - DriverChannel channel4 = newMockDriverChannel(4); - CompletableFuture channel4Future = new CompletableFuture<>(); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - // init - .success(node, channel1) - .failure(node, "mock channel init failure") - // first reconnection attempt - .pending(node, channel2Future) - // extra reconnection attempt after we realize the pool must grow - .pending(node, channel3Future) - .pending(node, channel4Future) - .build(); - InOrder inOrder = inOrder(eventBus); - - CompletionStage poolFuture = - ChannelPool.init(node, null, NodeDistance.LOCAL, context, "test"); - - factoryHelper.waitForCalls(node, 2); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node)); - - assertThatStage(poolFuture).isSuccess(); - ChannelPool pool = poolFuture.toCompletableFuture().get(); - assertThat(pool.channels).containsOnly(channel1); - - // A reconnection should have been scheduled to add the missing channel, don't complete yet - verify(reconnectionSchedule, VERIFY_TIMEOUT).nextDelay(); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStarted(node)); - - pool.resize(NodeDistance.REMOTE); - - TimeUnit.MILLISECONDS.sleep(200); - - // Complete the channel for the first reconnection, bringing the count to 2 - channel2Future.complete(channel2); - factoryHelper.waitForCall(node); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node)); - - await().untilAsserted(() -> assertThat(pool.channels).containsOnly(channel1, channel2)); - - // A second attempt should have been scheduled since we're now still under the target size - verify(reconnectionSchedule, VERIFY_TIMEOUT.times(2)).nextDelay(); - // Same reconnection is still running, no additional events - inOrder.verify(eventBus, never()).fire(ChannelEvent.reconnectionStopped(node)); - inOrder.verify(eventBus, never()).fire(ChannelEvent.reconnectionStarted(node)); - - // Two more channels get opened, bringing us to the target count - factoryHelper.waitForCalls(node, 2); - channel3Future.complete(channel3); - channel4Future.complete(channel4); - inOrder.verify(eventBus, VERIFY_TIMEOUT.times(2)).fire(ChannelEvent.channelOpened(node)); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStopped(node)); - - await() - .untilAsserted( - () -> assertThat(pool.channels).containsOnly(channel1, channel2, channel3, channel4)); - - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_resize_outside_of_reconnection_if_config_changes() throws Exception { - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofNanos(1)); - - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)).thenReturn(2); - - DriverChannel channel1 = newMockDriverChannel(1); - DriverChannel channel2 = newMockDriverChannel(2); - DriverChannel channel3 = newMockDriverChannel(3); - DriverChannel channel4 = newMockDriverChannel(4); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - // init - .success(node, channel1) - .success(node, channel2) - // growth attempt - .success(node, channel3) - .success(node, channel4) - .build(); - InOrder inOrder = inOrder(eventBus); - - CompletionStage poolFuture = - ChannelPool.init(node, null, NodeDistance.LOCAL, context, "test"); - - factoryHelper.waitForCalls(node, 2); - inOrder.verify(eventBus, VERIFY_TIMEOUT.times(2)).fire(ChannelEvent.channelOpened(node)); - - assertThatStage(poolFuture).isSuccess(); - ChannelPool pool = poolFuture.toCompletableFuture().get(); - assertThat(pool.channels).containsOnly(channel1, channel2); - - // Simulate a configuration change - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)).thenReturn(4); - eventBus.fire(ConfigChangeEvent.INSTANCE); - - // It should have triggered a reconnection - verify(reconnectionSchedule, VERIFY_TIMEOUT).nextDelay(); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStarted(node)); - - factoryHelper.waitForCalls(node, 2); - inOrder.verify(eventBus, VERIFY_TIMEOUT.times(2)).fire(ChannelEvent.channelOpened(node)); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStopped(node)); - - await() - .untilAsserted( - () -> assertThat(pool.channels).containsOnly(channel1, channel2, channel3, channel4)); - - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_resize_during_reconnection_if_config_changes() throws Exception { - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofNanos(1)); - - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)).thenReturn(2); - - DriverChannel channel1 = newMockDriverChannel(1); - DriverChannel channel2 = newMockDriverChannel(2); - CompletableFuture channel2Future = new CompletableFuture<>(); - DriverChannel channel3 = newMockDriverChannel(3); - CompletableFuture channel3Future = new CompletableFuture<>(); - DriverChannel channel4 = newMockDriverChannel(4); - CompletableFuture channel4Future = new CompletableFuture<>(); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - // init - .success(node, channel1) - .failure(node, "mock channel init failure") - // first reconnection attempt - .pending(node, channel2Future) - // extra reconnection attempt after we realize the pool must grow - .pending(node, channel3Future) - .pending(node, channel4Future) - .build(); - InOrder inOrder = inOrder(eventBus); - - CompletionStage poolFuture = - ChannelPool.init(node, null, NodeDistance.LOCAL, context, "test"); - - factoryHelper.waitForCalls(node, 2); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node)); - - assertThatStage(poolFuture).isSuccess(); - ChannelPool pool = poolFuture.toCompletableFuture().get(); - assertThat(pool.channels).containsOnly(channel1); - - // A reconnection should have been scheduled to add the missing channel, don't complete yet - verify(reconnectionSchedule, VERIFY_TIMEOUT).nextDelay(); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStarted(node)); - - // Simulate a configuration change - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)).thenReturn(4); - eventBus.fire(ConfigChangeEvent.INSTANCE); - TimeUnit.MILLISECONDS.sleep(200); - - // Complete the channel for the first reconnection, bringing the count to 2 - channel2Future.complete(channel2); - factoryHelper.waitForCall(node); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node)); - - await().untilAsserted(() -> assertThat(pool.channels).containsOnly(channel1, channel2)); - - // A second attempt should have been scheduled since we're now still under the target size - verify(reconnectionSchedule, VERIFY_TIMEOUT.times(2)).nextDelay(); - // Same reconnection is still running, no additional events - inOrder.verify(eventBus, never()).fire(ChannelEvent.reconnectionStopped(node)); - inOrder.verify(eventBus, never()).fire(ChannelEvent.reconnectionStarted(node)); - - // Two more channels get opened, bringing us to the target count - factoryHelper.waitForCalls(node, 2); - channel3Future.complete(channel3); - channel4Future.complete(channel4); - inOrder.verify(eventBus, VERIFY_TIMEOUT.times(2)).fire(ChannelEvent.channelOpened(node)); - inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStopped(node)); - - await() - .untilAsserted( - () -> assertThat(pool.channels).containsOnly(channel1, channel2, channel3, channel4)); - - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_ignore_config_change_if_not_relevant() throws Exception { - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofNanos(1)); - - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)).thenReturn(2); - - DriverChannel channel1 = newMockDriverChannel(1); - DriverChannel channel2 = newMockDriverChannel(2); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - .success(node, channel1) - .success(node, channel2) - .build(); - InOrder inOrder = inOrder(eventBus); - - CompletionStage poolFuture = - ChannelPool.init(node, null, NodeDistance.LOCAL, context, "test"); - - factoryHelper.waitForCalls(node, 2); - inOrder.verify(eventBus, VERIFY_TIMEOUT.times(2)).fire(ChannelEvent.channelOpened(node)); - - assertThatStage(poolFuture).isSuccess(); - ChannelPool pool = poolFuture.toCompletableFuture().get(); - assertThat(pool.channels).containsOnly(channel1, channel2); - - // Config changes, but not for our distance - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_REMOTE_SIZE)).thenReturn(1); - eventBus.fire(ConfigChangeEvent.INSTANCE); - TimeUnit.MILLISECONDS.sleep(200); - - // It should not have triggered a reconnection - verify(reconnectionSchedule, never()).nextDelay(); - - factoryHelper.verifyNoMoreCalls(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolShutdownTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolShutdownTest.java deleted file mode 100644 index b40bcb4aa39..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolShutdownTest.java +++ /dev/null @@ -1,173 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.pool; - -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.internal.core.channel.ChannelEvent; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.channel.MockChannelFactoryHelper; -import io.netty.channel.ChannelPromise; -import java.time.Duration; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import org.junit.Test; -import org.mockito.InOrder; - -public class ChannelPoolShutdownTest extends ChannelPoolTestBase { - - @Test - public void should_close_all_channels_when_closed() throws Exception { - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofNanos(1)); - - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)).thenReturn(3); - - DriverChannel channel1 = newMockDriverChannel(1); - DriverChannel channel2 = newMockDriverChannel(2); - DriverChannel channel3 = newMockDriverChannel(3); - DriverChannel channel4 = newMockDriverChannel(4); - CompletableFuture channel4Future = new CompletableFuture<>(); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - // init - .success(node, channel1) - .success(node, channel2) - .success(node, channel3) - // reconnection - .pending(node, channel4Future) - .build(); - InOrder inOrder = inOrder(eventBus); - - CompletionStage poolFuture = - ChannelPool.init(node, null, NodeDistance.LOCAL, context, "test"); - - factoryHelper.waitForCalls(node, 3); - inOrder.verify(eventBus, VERIFY_TIMEOUT.times(3)).fire(ChannelEvent.channelOpened(node)); - - assertThatStage(poolFuture).isSuccess(); - ChannelPool pool = poolFuture.toCompletableFuture().get(); - - // Simulate graceful shutdown on channel3 - ((ChannelPromise) channel3.closeStartedFuture()).setSuccess(); - inOrder.verify(eventBus, VERIFY_TIMEOUT.times(1)).fire(ChannelEvent.channelClosed(node)); - - // Reconnection should have kicked in and started to open channel4, do not complete it yet - verify(reconnectionSchedule, VERIFY_TIMEOUT).nextDelay(); - factoryHelper.waitForCalls(node, 1); - - CompletionStage closeFuture = pool.closeAsync(); - - // The two original channels were closed normally - verify(channel1, VERIFY_TIMEOUT).close(); - verify(channel2, VERIFY_TIMEOUT).close(); - inOrder.verify(eventBus, VERIFY_TIMEOUT.times(2)).fire(ChannelEvent.channelClosed(node)); - // The closing channel was not closed again - verify(channel3, never()).close(); - - // Complete the reconnecting channel - channel4Future.complete(channel4); - - // It should be force-closed once we find out the pool was closed - verify(channel4, VERIFY_TIMEOUT).forceClose(); - // No events because the channel was never really associated to the pool - inOrder.verify(eventBus, never()).fire(ChannelEvent.channelOpened(node)); - inOrder.verify(eventBus, never()).fire(ChannelEvent.channelClosed(node)); - - // We don't wait for reconnected channels to close, so the pool only depends on channel 1 to 3 - ((ChannelPromise) channel1.closeFuture()).setSuccess(); - ((ChannelPromise) channel2.closeFuture()).setSuccess(); - ((ChannelPromise) channel3.closeFuture()).setSuccess(); - - assertThatStage(closeFuture).isSuccess(); - - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_force_close_all_channels_when_force_closed() throws Exception { - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofNanos(1)); - - when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)).thenReturn(3); - - DriverChannel channel1 = newMockDriverChannel(1); - DriverChannel channel2 = newMockDriverChannel(2); - DriverChannel channel3 = newMockDriverChannel(3); - DriverChannel channel4 = newMockDriverChannel(4); - CompletableFuture channel4Future = new CompletableFuture<>(); - MockChannelFactoryHelper factoryHelper = - MockChannelFactoryHelper.builder(channelFactory) - // init - .success(node, channel1) - .success(node, channel2) - .success(node, channel3) - // reconnection - .pending(node, channel4Future) - .build(); - InOrder inOrder = inOrder(eventBus); - - CompletionStage poolFuture = - ChannelPool.init(node, null, NodeDistance.LOCAL, context, "test"); - - factoryHelper.waitForCalls(node, 3); - - assertThatStage(poolFuture).isSuccess(); - ChannelPool pool = poolFuture.toCompletableFuture().get(); - inOrder.verify(eventBus, VERIFY_TIMEOUT.times(3)).fire(ChannelEvent.channelOpened(node)); - - // Simulate graceful shutdown on channel3 - ((ChannelPromise) channel3.closeStartedFuture()).setSuccess(); - inOrder.verify(eventBus, VERIFY_TIMEOUT.times(1)).fire(ChannelEvent.channelClosed(node)); - - // Reconnection should have kicked in and started to open a channel, do not complete it yet - verify(reconnectionSchedule, VERIFY_TIMEOUT).nextDelay(); - factoryHelper.waitForCalls(node, 1); - - CompletionStage closeFuture = pool.forceCloseAsync(); - - // The three original channels were force-closed - verify(channel1, VERIFY_TIMEOUT).forceClose(); - verify(channel2, VERIFY_TIMEOUT).forceClose(); - verify(channel3, VERIFY_TIMEOUT).forceClose(); - // Only two events because the one for channel3 was sent earlier - inOrder.verify(eventBus, VERIFY_TIMEOUT.times(2)).fire(ChannelEvent.channelClosed(node)); - - // Complete the reconnecting channel - channel4Future.complete(channel4); - - // It should be force-closed once we find out the pool was closed - verify(channel4, VERIFY_TIMEOUT).forceClose(); - // No events because the channel was never really associated to the pool - inOrder.verify(eventBus, never()).fire(ChannelEvent.channelOpened(node)); - inOrder.verify(eventBus, never()).fire(ChannelEvent.channelClosed(node)); - - // We don't wait for reconnected channels to close, so the pool only depends on channel 1-3 - ((ChannelPromise) channel1.closeFuture()).setSuccess(); - ((ChannelPromise) channel2.closeFuture()).setSuccess(); - ((ChannelPromise) channel3.closeFuture()).setSuccess(); - - assertThatStage(closeFuture).isSuccess(); - - factoryHelper.verifyNoMoreCalls(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolTestBase.java b/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolTestBase.java deleted file mode 100644 index 2f8056e49e0..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolTestBase.java +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.pool; - -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.timeout; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.connection.ReconnectionPolicy; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.channel.ChannelFactory; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.context.EventBus; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.context.NettyOptions; -import com.datastax.oss.driver.internal.core.metadata.DefaultNode; -import com.datastax.oss.driver.internal.core.metadata.TestNodeFactory; -import com.datastax.oss.driver.internal.core.metrics.MetricsFactory; -import com.datastax.oss.driver.internal.core.metrics.NodeMetricUpdater; -import io.netty.channel.Channel; -import io.netty.channel.DefaultChannelPromise; -import io.netty.channel.DefaultEventLoopGroup; -import io.netty.channel.EventLoop; -import java.time.Duration; -import java.util.concurrent.TimeUnit; -import org.junit.After; -import org.junit.Before; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; -import org.mockito.verification.VerificationWithTimeout; - -abstract class ChannelPoolTestBase { - - /** How long we wait when verifying mocks for async invocations */ - protected static final VerificationWithTimeout VERIFY_TIMEOUT = timeout(2000); - - @Mock protected InternalDriverContext context; - @Mock private DriverConfig config; - @Mock protected DriverExecutionProfile defaultProfile; - @Mock private ReconnectionPolicy reconnectionPolicy; - @Mock protected ReconnectionPolicy.ReconnectionSchedule reconnectionSchedule; - @Mock private NettyOptions nettyOptions; - @Mock protected ChannelFactory channelFactory; - @Mock protected MetricsFactory metricsFactory; - @Mock protected NodeMetricUpdater nodeMetricUpdater; - protected DefaultNode node; - protected EventBus eventBus; - private DefaultEventLoopGroup adminEventLoopGroup; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - - adminEventLoopGroup = new DefaultEventLoopGroup(1); - - when(context.getNettyOptions()).thenReturn(nettyOptions); - when(nettyOptions.adminEventExecutorGroup()).thenReturn(adminEventLoopGroup); - when(context.getConfig()).thenReturn(config); - when(config.getDefaultProfile()).thenReturn(defaultProfile); - this.eventBus = spy(new EventBus("test")); - when(context.getEventBus()).thenReturn(eventBus); - when(context.getChannelFactory()).thenReturn(channelFactory); - - when(context.getReconnectionPolicy()).thenReturn(reconnectionPolicy); - when(reconnectionPolicy.newNodeSchedule(any(Node.class))).thenReturn(reconnectionSchedule); - // By default, set a large reconnection delay. Tests that care about reconnection will override - // it. - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofDays(1)); - - when(context.getMetricsFactory()).thenReturn(metricsFactory); - when(metricsFactory.newNodeUpdater(any(Node.class))).thenReturn(nodeMetricUpdater); - - node = TestNodeFactory.newNode(1, context); - } - - @After - public void teardown() { - adminEventLoopGroup.shutdownGracefully(100, 200, TimeUnit.MILLISECONDS); - } - - DriverChannel newMockDriverChannel(int id) { - DriverChannel driverChannel = mock(DriverChannel.class); - EventLoop adminExecutor = adminEventLoopGroup.next(); - Channel channel = mock(Channel.class); - DefaultChannelPromise closeFuture = new DefaultChannelPromise(channel, adminExecutor); - DefaultChannelPromise closeStartedFuture = new DefaultChannelPromise(channel, adminExecutor); - when(driverChannel.close()).thenReturn(closeFuture); - when(driverChannel.forceClose()).thenReturn(closeFuture); - when(driverChannel.closeFuture()).thenReturn(closeFuture); - when(driverChannel.closeStartedFuture()).thenReturn(closeStartedFuture); - when(driverChannel.setKeyspace(any(CqlIdentifier.class))) - .thenReturn(adminExecutor.newSucceededFuture(null)); - when(driverChannel.toString()).thenReturn("channel" + id); - return driverChannel; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelSetTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelSetTest.java deleted file mode 100644 index 628110bc1df..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelSetTest.java +++ /dev/null @@ -1,179 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.pool; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -public class ChannelSetTest { - @Mock private DriverChannel channel1, channel2, channel3; - private ChannelSet set; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - set = new ChannelSet(); - } - - @Test - public void should_return_null_when_empty() { - assertThat(set.size()).isEqualTo(0); - assertThat(set.next()).isNull(); - } - - @Test - public void should_return_element_when_single() { - // Given - when(channel1.preAcquireId()).thenReturn(true); - - // When - set.add(channel1); - - // Then - assertThat(set.size()).isEqualTo(1); - assertThat(set.next()).isEqualTo(channel1); - verify(channel1, never()).getAvailableIds(); - verify(channel1).preAcquireId(); - } - - @Test - public void should_return_null_when_single_but_full() { - // Given - when(channel1.preAcquireId()).thenReturn(false); - - // When - set.add(channel1); - - // Then - assertThat(set.next()).isNull(); - verify(channel1).preAcquireId(); - } - - @Test - public void should_return_most_available_when_multiple() { - // Given - when(channel1.getAvailableIds()).thenReturn(2); - when(channel2.getAvailableIds()).thenReturn(12); - when(channel3.getAvailableIds()).thenReturn(8); - when(channel2.preAcquireId()).thenReturn(true); - - // When - set.add(channel1); - set.add(channel2); - set.add(channel3); - - // Then - assertThat(set.size()).isEqualTo(3); - assertThat(set.next()).isEqualTo(channel2); - verify(channel1).getAvailableIds(); - verify(channel2).getAvailableIds(); - verify(channel3).getAvailableIds(); - verify(channel2).preAcquireId(); - - // When - when(channel1.getAvailableIds()).thenReturn(15); - when(channel1.preAcquireId()).thenReturn(true); - - // Then - assertThat(set.next()).isEqualTo(channel1); - verify(channel1).preAcquireId(); - } - - @Test - public void should_return_null_when_multiple_but_all_full() { - // Given - when(channel1.getAvailableIds()).thenReturn(0); - when(channel2.getAvailableIds()).thenReturn(0); - when(channel3.getAvailableIds()).thenReturn(0); - - // When - set.add(channel1); - set.add(channel2); - set.add(channel3); - - // Then - assertThat(set.next()).isNull(); - } - - @Test - public void should_remove_channels() { - // Given - when(channel1.getAvailableIds()).thenReturn(2); - when(channel2.getAvailableIds()).thenReturn(12); - when(channel3.getAvailableIds()).thenReturn(8); - when(channel2.preAcquireId()).thenReturn(true); - - set.add(channel1); - set.add(channel2); - set.add(channel3); - assertThat(set.next()).isEqualTo(channel2); - - // When - set.remove(channel2); - when(channel3.preAcquireId()).thenReturn(true); - - // Then - assertThat(set.size()).isEqualTo(2); - assertThat(set.next()).isEqualTo(channel3); - - // When - set.remove(channel3); - when(channel1.preAcquireId()).thenReturn(true); - - // Then - assertThat(set.size()).isEqualTo(1); - assertThat(set.next()).isEqualTo(channel1); - - // When - set.remove(channel1); - - // Then - assertThat(set.size()).isEqualTo(0); - assertThat(set.next()).isNull(); - } - - /** - * Check that {@link ChannelSet#next()} doesn't spin forever if it keeps racing (see comments in - * the implementation). - */ - @Test - public void should_not_loop_indefinitely_if_acquisition_keeps_failing() { - // Given - when(channel1.getAvailableIds()).thenReturn(2); - when(channel2.getAvailableIds()).thenReturn(12); - when(channel3.getAvailableIds()).thenReturn(8); - // channel2 is the most available but we keep failing to acquire (simulating the race condition) - when(channel2.preAcquireId()).thenReturn(false); - - // When - set.add(channel1); - set.add(channel2); - set.add(channel3); - - // Then - assertThat(set.next()).isNull(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/BuiltInCompressorsTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/BuiltInCompressorsTest.java deleted file mode 100644 index 1911c7c7227..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/BuiltInCompressorsTest.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.protocol; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; - -import com.datastax.oss.driver.TestDataProviders; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.protocol.internal.NoopCompressor; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.Locale; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -@RunWith(DataProviderRunner.class) -public class BuiltInCompressorsTest { - - @Mock private DriverContext context; - - @Before - public void setUp() { - MockitoAnnotations.initMocks(this); - } - - @Test - @UseDataProvider(location = TestDataProviders.class, value = "locales") - public void should_create_instance_for_supported_algorithms(Locale locale) { - Locale def = Locale.getDefault(); - try { - Locale.setDefault(locale); - assertThat(BuiltInCompressors.newInstance("lz4", context)).isInstanceOf(Lz4Compressor.class); - assertThat(BuiltInCompressors.newInstance("snappy", context)) - .isInstanceOf(SnappyCompressor.class); - assertThat(BuiltInCompressors.newInstance("none", context)) - .isInstanceOf(NoopCompressor.class); - assertThat(BuiltInCompressors.newInstance("LZ4", context)).isInstanceOf(Lz4Compressor.class); - assertThat(BuiltInCompressors.newInstance("SNAPPY", context)) - .isInstanceOf(SnappyCompressor.class); - assertThat(BuiltInCompressors.newInstance("NONE", context)) - .isInstanceOf(NoopCompressor.class); - } finally { - Locale.setDefault(def); - } - } - - @Test - public void should_throw_when_unsupported_algorithm() { - assertThatThrownBy(() -> BuiltInCompressors.newInstance("GZIP", context)) - .isInstanceOf(IllegalArgumentException.class) - .hasMessageContaining("Unsupported compression algorithm 'GZIP'"); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/ByteBufPrimitiveCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/ByteBufPrimitiveCodecTest.java deleted file mode 100644 index 895a650b292..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/ByteBufPrimitiveCodecTest.java +++ /dev/null @@ -1,407 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.protocol; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; - -import com.datastax.oss.driver.internal.core.util.ByteBufs; -import com.datastax.oss.protocol.internal.util.Bytes; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufAllocator; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.nio.ByteBuffer; -import org.junit.Test; - -/** - * Note: we don't test trivial methods that simply delegate to ByteBuf, nor default implementations - * inherited from {@link com.datastax.oss.protocol.internal.PrimitiveCodec}. - */ -public class ByteBufPrimitiveCodecTest { - private ByteBufPrimitiveCodec codec = new ByteBufPrimitiveCodec(ByteBufAllocator.DEFAULT); - - @Test - public void should_concatenate() { - ByteBuf left = ByteBufs.wrap(0xca, 0xfe); - ByteBuf right = ByteBufs.wrap(0xba, 0xbe); - assertThat(codec.concat(left, right)).containsExactly("0xcafebabe"); - } - - @Test - public void should_concatenate_slices() { - ByteBuf left = ByteBufs.wrap(0x00, 0xca, 0xfe, 0x00).slice(1, 2); - ByteBuf right = ByteBufs.wrap(0x00, 0x00, 0xba, 0xbe, 0x00).slice(2, 2); - - assertThat(codec.concat(left, right)).containsExactly("0xcafebabe"); - } - - @Test - public void should_read_inet_v4() { - ByteBuf source = - ByteBufs.wrap( - // length (as a byte) - 0x04, - // address - 0x7f, - 0x00, - 0x00, - 0x01, - // port (as an int) - 0x00, - 0x00, - 0x23, - 0x52); - InetSocketAddress inet = codec.readInet(source); - assertThat(inet.getAddress().getHostAddress()).isEqualTo("127.0.0.1"); - assertThat(inet.getPort()).isEqualTo(9042); - } - - @Test - public void should_read_inet_v6() { - ByteBuf lengthAndAddress = allocate(17); - lengthAndAddress.writeByte(16); - lengthAndAddress.writeLong(0); - lengthAndAddress.writeLong(1); - ByteBuf source = - codec.concat( - lengthAndAddress, - // port (as an int) - ByteBufs.wrap(0x00, 0x00, 0x23, 0x52)); - InetSocketAddress inet = codec.readInet(source); - assertThat(inet.getAddress().getHostAddress()).isEqualTo("0:0:0:0:0:0:0:1"); - assertThat(inet.getPort()).isEqualTo(9042); - } - - @Test - public void should_fail_to_read_inet_if_length_invalid() { - ByteBuf source = - ByteBufs.wrap( - // length (as a byte) - 0x03, - // address - 0x7f, - 0x00, - 0x01, - // port (as an int) - 0x00, - 0x00, - 0x23, - 0x52); - assertThatThrownBy(() -> codec.readInet(source)) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage("Invalid address length: 3 ([127, 0, 1])"); - } - - @Test - public void should_read_inetaddr_v4() { - ByteBuf source = - ByteBufs.wrap( - // length (as a byte) - 0x04, - // address - 0x7f, - 0x00, - 0x00, - 0x01); - InetAddress inetAddr = codec.readInetAddr(source); - assertThat(inetAddr.getHostAddress()).isEqualTo("127.0.0.1"); - } - - @Test - public void should_read_inetaddr_v6() { - ByteBuf source = allocate(17); - source.writeByte(16); - source.writeLong(0); - source.writeLong(1); - InetAddress inetAddr = codec.readInetAddr(source); - assertThat(inetAddr.getHostAddress()).isEqualTo("0:0:0:0:0:0:0:1"); - } - - @Test - public void should_fail_to_read_inetaddr_if_length_invalid() { - ByteBuf source = - ByteBufs.wrap( - // length (as a byte) - 0x03, - // address - 0x7f, - 0x00, - 0x01); - assertThatThrownBy(() -> codec.readInetAddr(source)) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage("Invalid address length: 3 ([127, 0, 1])"); - } - - @Test - public void should_read_bytes() { - ByteBuf source = - ByteBufs.wrap( - // length (as an int) - 0x00, - 0x00, - 0x00, - 0x04, - // contents - 0xca, - 0xfe, - 0xba, - 0xbe); - ByteBuffer bytes = codec.readBytes(source); - assertThat(Bytes.toHexString(bytes)).isEqualTo("0xcafebabe"); - } - - @Test - public void should_read_bytes_when_extra_data() { - ByteBuf source = - ByteBufs.wrap( - // length (as an int) - 0x00, - 0x00, - 0x00, - 0x04, - // contents - 0xca, - 0xfe, - 0xba, - 0xbe, - 0xde, - 0xda, - 0xdd); - ByteBuffer bytes = codec.readBytes(source); - assertThat(Bytes.toHexString(bytes)).isEqualTo("0xcafebabe"); - } - - @Test - public void read_bytes_should_udpate_reader_index() { - ByteBuf source = - ByteBufs.wrap( - // length (as an int) - 0x00, - 0x00, - 0x00, - 0x04, - // contents - 0xca, - 0xfe, - 0xba, - 0xbe, - 0xde, - 0xda, - 0xdd); - codec.readBytes(source); - - assertThat(source.readerIndex()).isEqualTo(8); - } - - @Test - public void read_bytes_should_throw_when_not_enough_content() { - ByteBuf source = - ByteBufs.wrap( - // length (as an int) : 4 bytes - 0x00, - 0x00, - 0x00, - 0x04, - // contents : only 2 bytes - 0xca, - 0xfe); - assertThatThrownBy(() -> codec.readBytes(source)).isInstanceOf(IndexOutOfBoundsException.class); - } - - @Test - public void should_read_null_bytes() { - ByteBuf source = ByteBufs.wrap(0xFF, 0xFF, 0xFF, 0xFF); // -1 (as an int) - assertThat(codec.readBytes(source)).isNull(); - } - - @Test - public void should_read_short_bytes() { - ByteBuf source = - ByteBufs.wrap( - // length (as an unsigned short) - 0x00, - 0x04, - // contents - 0xca, - 0xfe, - 0xba, - 0xbe); - assertThat(Bytes.toHexString(codec.readShortBytes(source))).isEqualTo("0xcafebabe"); - } - - @Test - public void should_read_string() { - ByteBuf source = - ByteBufs.wrap( - // length (as an unsigned short) - 0x00, - 0x05, - // UTF-8 contents - 0x68, - 0x65, - 0x6c, - 0x6c, - 0x6f); - assertThat(codec.readString(source)).isEqualTo("hello"); - } - - @Test - public void should_fail_to_read_string_if_not_enough_characters() { - ByteBuf source = codec.allocate(2); - source.writeShort(4); - - assertThatThrownBy(() -> codec.readString(source)) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage("Not enough bytes to read an UTF-8 serialized string of size 4"); - } - - @Test - public void should_read_long_string() { - ByteBuf source = - ByteBufs.wrap( - // length (as an int) - 0x00, - 0x00, - 0x00, - 0x05, - // UTF-8 contents - 0x68, - 0x65, - 0x6c, - 0x6c, - 0x6f); - assertThat(codec.readLongString(source)).isEqualTo("hello"); - } - - @Test - public void should_fail_to_read_long_string_if_not_enough_characters() { - ByteBuf source = codec.allocate(4); - source.writeInt(4); - - assertThatThrownBy(() -> codec.readLongString(source)) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage("Not enough bytes to read an UTF-8 serialized string of size 4"); - } - - @Test - public void should_write_inet_v4() throws Exception { - ByteBuf dest = allocate(1 + 4 + 4); - InetSocketAddress inet = new InetSocketAddress(InetAddress.getByName("127.0.0.1"), 9042); - codec.writeInet(inet, dest); - assertThat(dest) - .containsExactly( - "0x04" // size as a byte - + "7f000001" // address - + "00002352" // port - ); - } - - @Test - public void should_write_inet_v6() throws Exception { - ByteBuf dest = allocate(1 + 16 + 4); - InetSocketAddress inet = new InetSocketAddress(InetAddress.getByName("::1"), 9042); - codec.writeInet(inet, dest); - assertThat(dest) - .containsExactly( - "0x10" // size as a byte - + "00000000000000000000000000000001" // address - + "00002352" // port - ); - } - - @Test - public void should_write_inetaddr_v4() throws Exception { - ByteBuf dest = allocate(1 + 4); - InetAddress inetAddr = InetAddress.getByName("127.0.0.1"); - codec.writeInetAddr(inetAddr, dest); - assertThat(dest) - .containsExactly( - "0x04" // size as a byte - + "7f000001" // address - ); - } - - @Test - public void should_write_inetaddr_v6() throws Exception { - ByteBuf dest = allocate(1 + 16); - InetAddress inetAddr = InetAddress.getByName("::1"); - codec.writeInetAddr(inetAddr, dest); - assertThat(dest) - .containsExactly( - "0x10" // size as a byte - + "00000000000000000000000000000001" // address - ); - } - - @Test - public void should_write_string() { - ByteBuf dest = allocate(7); - codec.writeString("hello", dest); - assertThat(dest) - .containsExactly( - "0x0005" // size as an unsigned short - + "68656c6c6f" // UTF-8 contents - ); - } - - @Test - public void should_write_long_string() { - ByteBuf dest = allocate(9); - codec.writeLongString("hello", dest); - assertThat(dest) - .containsExactly( - "0x00000005" - + // size as an int - "68656c6c6f" // UTF-8 contents - ); - } - - @Test - public void should_write_bytes() { - ByteBuf dest = allocate(8); - codec.writeBytes(Bytes.fromHexString("0xcafebabe"), dest); - assertThat(dest) - .containsExactly( - "0x00000004" - + // size as an int - "cafebabe"); - } - - @Test - public void should_write_short_bytes() { - ByteBuf dest = allocate(6); - codec.writeShortBytes(new byte[] {(byte) 0xca, (byte) 0xfe, (byte) 0xba, (byte) 0xbe}, dest); - assertThat(dest) - .containsExactly( - "0x0004" - + // size as an unsigned short - "cafebabe"); - } - - @Test - public void should_write_null_bytes() { - ByteBuf dest = allocate(4); - codec.writeBytes((ByteBuffer) null, dest); - assertThat(dest).containsExactly("0xFFFFFFFF"); - } - - private static ByteBuf allocate(int length) { - return ByteBufAllocator.DEFAULT.buffer(length); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/BytesToSegmentDecoderTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/BytesToSegmentDecoderTest.java deleted file mode 100644 index d151da309c1..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/BytesToSegmentDecoderTest.java +++ /dev/null @@ -1,151 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.protocol; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; - -import com.datastax.oss.driver.api.core.connection.CrcMismatchException; -import com.datastax.oss.protocol.internal.Compressor; -import com.datastax.oss.protocol.internal.Segment; -import com.datastax.oss.protocol.internal.SegmentCodec; -import com.google.common.base.Strings; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufUtil; -import io.netty.buffer.Unpooled; -import io.netty.buffer.UnpooledByteBufAllocator; -import io.netty.channel.embedded.EmbeddedChannel; -import io.netty.handler.codec.DecoderException; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class BytesToSegmentDecoderTest { - - // Hard-coded test data, the values were generated with our encoding methods. - // We're not really testing the decoding itself here, only that our subclass calls the - // LengthFieldBasedFrameDecoder parent constructor with the right parameters. - private static final ByteBuf REGULAR_HEADER = byteBuf("04000201f9f2"); - private static final ByteBuf REGULAR_PAYLOAD = byteBuf("00000001"); - private static final ByteBuf REGULAR_TRAILER = byteBuf("1fd6022d"); - private static final ByteBuf REGULAR_WRONG_HEADER = byteBuf("04000202f9f2"); - private static final ByteBuf REGULAR_WRONG_TRAILER = byteBuf("1fd6022e"); - - private static final ByteBuf MAX_HEADER = byteBuf("ffff03254047"); - private static final ByteBuf MAX_PAYLOAD = - byteBuf(Strings.repeat("01", Segment.MAX_PAYLOAD_LENGTH)); - private static final ByteBuf MAX_TRAILER = byteBuf("a05c2f13"); - - private static final ByteBuf LZ4_HEADER = byteBuf("120020000491c94f"); - private static final ByteBuf LZ4_PAYLOAD_UNCOMPRESSED = - byteBuf("00000001000000010000000100000001"); - private static final ByteBuf LZ4_PAYLOAD_COMPRESSED = - byteBuf("f00100000001000000010000000100000001"); - private static final ByteBuf LZ4_TRAILER = byteBuf("2bd67f90"); - - private static final Compressor LZ4_COMPRESSOR = new Lz4Compressor("test"); - - private EmbeddedChannel channel; - - @Before - public void setup() { - channel = new EmbeddedChannel(); - } - - @Test - public void should_decode_regular_segment() { - channel.pipeline().addLast(newDecoder(Compressor.none())); - channel.writeInbound(Unpooled.wrappedBuffer(REGULAR_HEADER, REGULAR_PAYLOAD, REGULAR_TRAILER)); - Segment segment = channel.readInbound(); - assertThat(segment.isSelfContained).isTrue(); - assertThat(segment.payload).isEqualTo(REGULAR_PAYLOAD); - } - - @Test - public void should_decode_max_length_segment() { - channel.pipeline().addLast(newDecoder(Compressor.none())); - channel.writeInbound(Unpooled.wrappedBuffer(MAX_HEADER, MAX_PAYLOAD, MAX_TRAILER)); - Segment segment = channel.readInbound(); - assertThat(segment.isSelfContained).isTrue(); - assertThat(segment.payload).isEqualTo(MAX_PAYLOAD); - } - - @Test - public void should_decode_segment_from_multiple_incoming_chunks() { - channel.pipeline().addLast(newDecoder(Compressor.none())); - // Send the header in two slices, to cover the case where the length can't be read the first - // time: - ByteBuf headerStart = REGULAR_HEADER.slice(0, 3); - ByteBuf headerEnd = REGULAR_HEADER.slice(3, 3); - channel.writeInbound(headerStart); - channel.writeInbound(headerEnd); - channel.writeInbound(REGULAR_PAYLOAD.duplicate()); - channel.writeInbound(REGULAR_TRAILER.duplicate()); - Segment segment = channel.readInbound(); - assertThat(segment.isSelfContained).isTrue(); - assertThat(segment.payload).isEqualTo(REGULAR_PAYLOAD); - } - - @Test - public void should_decode_compressed_segment() { - channel.pipeline().addLast(newDecoder(LZ4_COMPRESSOR)); - // We need a contiguous buffer for this one, because of how our decompressor operates - ByteBuf buffer = Unpooled.wrappedBuffer(LZ4_HEADER, LZ4_PAYLOAD_COMPRESSED, LZ4_TRAILER).copy(); - channel.writeInbound(buffer); - Segment segment = channel.readInbound(); - assertThat(segment.isSelfContained).isTrue(); - assertThat(segment.payload).isEqualTo(LZ4_PAYLOAD_UNCOMPRESSED); - } - - @Test - public void should_surface_header_crc_mismatch() { - try { - channel.pipeline().addLast(newDecoder(Compressor.none())); - channel.writeInbound( - Unpooled.wrappedBuffer(REGULAR_WRONG_HEADER, REGULAR_PAYLOAD, REGULAR_TRAILER)); - fail("Expected a " + DecoderException.class.getSimpleName()); - } catch (DecoderException exception) { - assertThat(exception).hasCauseInstanceOf(CrcMismatchException.class); - } - } - - @Test - public void should_surface_trailer_crc_mismatch() { - try { - channel.pipeline().addLast(newDecoder(Compressor.none())); - channel.writeInbound( - Unpooled.wrappedBuffer(REGULAR_HEADER, REGULAR_PAYLOAD, REGULAR_WRONG_TRAILER)); - fail("Expected a " + DecoderException.class.getSimpleName()); - } catch (DecoderException exception) { - assertThat(exception).hasCauseInstanceOf(CrcMismatchException.class); - } - } - - private BytesToSegmentDecoder newDecoder(Compressor compressor) { - return new BytesToSegmentDecoder( - new SegmentCodec<>( - new ByteBufPrimitiveCodec(UnpooledByteBufAllocator.DEFAULT), compressor)); - } - - private static ByteBuf byteBuf(String hex) { - return Unpooled.unreleasableBuffer( - Unpooled.wrappedBuffer(ByteBufUtil.decodeHexDump(hex)).asReadOnly()); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/FrameDecoderTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/FrameDecoderTest.java deleted file mode 100644 index 0ab61771da0..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/FrameDecoderTest.java +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.protocol; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; - -import com.datastax.oss.driver.api.core.connection.FrameTooLongException; -import com.datastax.oss.driver.internal.core.channel.ChannelHandlerTestBase; -import com.datastax.oss.driver.internal.core.util.ByteBufs; -import com.datastax.oss.protocol.internal.Compressor; -import com.datastax.oss.protocol.internal.Frame; -import com.datastax.oss.protocol.internal.FrameCodec; -import com.datastax.oss.protocol.internal.response.AuthSuccess; -import io.netty.buffer.ByteBuf; -import io.netty.handler.codec.LengthFieldBasedFrameDecoder; -import org.junit.Before; -import org.junit.Test; - -public class FrameDecoderTest extends ChannelHandlerTestBase { - // A valid binary payload for a response frame. - private static final ByteBuf VALID_PAYLOAD = - ByteBufs.fromHexString( - "0x84" // response frame, protocol version 4 - + "00" // flags (none) - + "002a" // stream id (42) - + "10" // opcode for AUTH_SUCCESS message - + "00000008" // body length - + "00000004cafebabe" // body - ); - - // A binary payload that is invalid because the protocol version is not supported by the codec - private static final ByteBuf INVALID_PAYLOAD = - ByteBufs.fromHexString( - "0xFF" // response frame, protocol version 127 - + "00002a100000000800000004cafebabe"); - - private FrameCodec frameCodec; - - @Before - @Override - public void setup() { - super.setup(); - frameCodec = - FrameCodec.defaultClient(new ByteBufPrimitiveCodec(channel.alloc()), Compressor.none()); - } - - @Test - public void should_decode_valid_payload() { - // Given - FrameDecoder decoder = new FrameDecoder(frameCodec, 1024); - channel.pipeline().addLast(decoder); - - // When - // The decoder releases the buffer, so make sure we retain it for the other tests - VALID_PAYLOAD.retain(); - channel.writeInbound(VALID_PAYLOAD.duplicate()); - Frame frame = readInboundFrame(); - - // Then - assertThat(frame.message).isInstanceOf(AuthSuccess.class); - } - - /** - * Checks that an exception carrying the stream id is thrown when decoding fails in the {@link - * LengthFieldBasedFrameDecoder} code. - */ - @Test - public void should_fail_to_decode_if_payload_is_valid_but_too_long() { - // Given - FrameDecoder decoder = new FrameDecoder(frameCodec, VALID_PAYLOAD.readableBytes() - 1); - channel.pipeline().addLast(decoder); - - // When - VALID_PAYLOAD.retain(); - try { - channel.writeInbound(VALID_PAYLOAD.duplicate()); - fail("expected an exception"); - } catch (FrameDecodingException e) { - // Then - assertThat(e.streamId).isEqualTo(42); - assertThat(e.getCause()).isInstanceOf(FrameTooLongException.class); - } - } - - /** Checks that an exception carrying the stream id is thrown when decoding fails in our code. */ - @Test - public void should_fail_to_decode_if_payload_cannot_be_decoded() { - // Given - FrameDecoder decoder = new FrameDecoder(frameCodec, 1024); - channel.pipeline().addLast(decoder); - - // When - INVALID_PAYLOAD.retain(); - try { - channel.writeInbound(INVALID_PAYLOAD.duplicate()); - fail("expected an exception"); - } catch (FrameDecodingException e) { - // Then - assertThat(e.streamId).isEqualTo(42); - assertThat(e.getCause()).isInstanceOf(IllegalArgumentException.class); - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/SegmentToFrameDecoderTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/SegmentToFrameDecoderTest.java deleted file mode 100644 index 2886adeab4e..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/SegmentToFrameDecoderTest.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.protocol; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.shaded.guava.common.base.Strings; -import com.datastax.oss.protocol.internal.Compressor; -import com.datastax.oss.protocol.internal.Frame; -import com.datastax.oss.protocol.internal.FrameCodec; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.ProtocolV5ClientCodecs; -import com.datastax.oss.protocol.internal.ProtocolV5ServerCodecs; -import com.datastax.oss.protocol.internal.Segment; -import com.datastax.oss.protocol.internal.request.AuthResponse; -import com.datastax.oss.protocol.internal.response.result.Void; -import com.datastax.oss.protocol.internal.util.Bytes; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.UnpooledByteBufAllocator; -import io.netty.channel.embedded.EmbeddedChannel; -import java.util.Collections; -import org.junit.Before; -import org.junit.Test; - -public class SegmentToFrameDecoderTest { - - private static final FrameCodec FRAME_CODEC = - new FrameCodec<>( - new ByteBufPrimitiveCodec(UnpooledByteBufAllocator.DEFAULT), - Compressor.none(), - new ProtocolV5ClientCodecs(), - new ProtocolV5ServerCodecs()); - - private EmbeddedChannel channel; - - @Before - public void setup() { - channel = new EmbeddedChannel(); - channel.pipeline().addLast(new SegmentToFrameDecoder(FRAME_CODEC, "test")); - } - - @Test - public void should_decode_self_contained() { - ByteBuf payload = UnpooledByteBufAllocator.DEFAULT.buffer(); - payload.writeBytes(encodeFrame(Void.INSTANCE)); - payload.writeBytes(encodeFrame(new AuthResponse(Bytes.fromHexString("0xabcdef")))); - - channel.writeInbound(new Segment<>(payload, true)); - - Frame frame1 = channel.readInbound(); - assertThat(frame1.message).isInstanceOf(Void.class); - Frame frame2 = channel.readInbound(); - assertThat(frame2.message).isInstanceOf(AuthResponse.class); - } - - @Test - public void should_decode_sequence_of_slices() { - ByteBuf encodedFrame = - encodeFrame(new AuthResponse(Bytes.fromHexString("0x" + Strings.repeat("aa", 1011)))); - int sliceLength = 100; - do { - ByteBuf payload = - encodedFrame.readRetainedSlice(Math.min(sliceLength, encodedFrame.readableBytes())); - channel.writeInbound(new Segment<>(payload, false)); - } while (encodedFrame.isReadable()); - - Frame frame = channel.readInbound(); - assertThat(frame.message).isInstanceOf(AuthResponse.class); - } - - private static ByteBuf encodeFrame(Message message) { - Frame frame = - Frame.forResponse( - ProtocolConstants.Version.V5, - 1, - null, - Collections.emptyMap(), - Collections.emptyList(), - message); - return FRAME_CODEC.encode(frame); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/SliceWriteListenerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/SliceWriteListenerTest.java deleted file mode 100644 index 736bcb66d56..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/SliceWriteListenerTest.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.protocol; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import io.netty.channel.ChannelPromise; -import io.netty.channel.embedded.EmbeddedChannel; -import org.junit.Before; -import org.junit.Test; - -public class SliceWriteListenerTest { - - private final EmbeddedChannel channel = new EmbeddedChannel(); - - private ChannelPromise framePromise, slicePromise1, slicePromise2, slicePromise3; - - @Before - public void setup() { - framePromise = channel.newPromise(); - slicePromise1 = channel.newPromise(); - slicePromise2 = channel.newPromise(); - slicePromise3 = channel.newPromise(); - - ByteBufSegmentBuilder.SliceWriteListener listener = - new ByteBufSegmentBuilder.SliceWriteListener( - framePromise, ImmutableList.of(slicePromise1, slicePromise2, slicePromise3)); - slicePromise1.addListener(listener); - slicePromise2.addListener(listener); - slicePromise3.addListener(listener); - - assertThat(framePromise.isDone()).isFalse(); - } - - @Test - public void should_succeed_frame_if_all_slices_succeed() { - slicePromise1.setSuccess(); - assertThat(framePromise.isDone()).isFalse(); - slicePromise2.setSuccess(); - assertThat(framePromise.isDone()).isFalse(); - slicePromise3.setSuccess(); - - assertThat(framePromise.isSuccess()).isTrue(); - } - - @Test - public void should_fail_frame_and_cancel_remaining_slices_if_one_slice_fails() { - slicePromise1.setSuccess(); - assertThat(framePromise.isDone()).isFalse(); - Exception failure = new Exception("test"); - slicePromise2.setFailure(failure); - - assertThat(framePromise.isDone()).isTrue(); - assertThat(framePromise.isSuccess()).isFalse(); - assertThat(framePromise.cause()).isEqualTo(failure); - - assertThat(slicePromise3.isCancelled()).isTrue(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/session/DefaultSessionPoolsTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/session/DefaultSessionPoolsTest.java deleted file mode 100644 index 58d1783038d..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/session/DefaultSessionPoolsTest.java +++ /dev/null @@ -1,932 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.session; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.awaitility.Awaitility.await; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.timeout; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.addresstranslation.AddressTranslator; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.connection.ReconnectionPolicy; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.metadata.Metadata; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeState; -import com.datastax.oss.driver.api.core.metadata.NodeStateListener; -import com.datastax.oss.driver.api.core.metadata.schema.SchemaChangeListener; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.specex.SpeculativeExecutionPolicy; -import com.datastax.oss.driver.api.core.tracker.RequestTracker; -import com.datastax.oss.driver.internal.core.context.EventBus; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.context.NettyOptions; -import com.datastax.oss.driver.internal.core.control.ControlConnection; -import com.datastax.oss.driver.internal.core.metadata.DefaultEndPoint; -import com.datastax.oss.driver.internal.core.metadata.DefaultNode; -import com.datastax.oss.driver.internal.core.metadata.DistanceEvent; -import com.datastax.oss.driver.internal.core.metadata.LoadBalancingPolicyWrapper; -import com.datastax.oss.driver.internal.core.metadata.MetadataManager; -import com.datastax.oss.driver.internal.core.metadata.NodeStateEvent; -import com.datastax.oss.driver.internal.core.metadata.TestNodeFactory; -import com.datastax.oss.driver.internal.core.metadata.TopologyMonitor; -import com.datastax.oss.driver.internal.core.metrics.MetricsFactory; -import com.datastax.oss.driver.internal.core.pool.ChannelPool; -import com.datastax.oss.driver.internal.core.pool.ChannelPoolFactory; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import io.netty.channel.DefaultEventLoopGroup; -import io.netty.util.concurrent.DefaultPromise; -import io.netty.util.concurrent.GlobalEventExecutor; -import java.time.Duration; -import java.util.Collections; -import java.util.Optional; -import java.util.UUID; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.TimeUnit; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; -import org.mockito.verification.VerificationWithTimeout; - -public class DefaultSessionPoolsTest { - - private static final CqlIdentifier KEYSPACE = CqlIdentifier.fromInternal("ks"); - /** How long we wait when verifying mocks for async invocations */ - protected static final VerificationWithTimeout VERIFY_TIMEOUT = timeout(500); - - @Mock private InternalDriverContext context; - @Mock private NettyOptions nettyOptions; - @Mock private ChannelPoolFactory channelPoolFactory; - @Mock private MetadataManager metadataManager; - @Mock private TopologyMonitor topologyMonitor; - @Mock private LoadBalancingPolicyWrapper loadBalancingPolicyWrapper; - @Mock private DriverConfigLoader configLoader; - @Mock private Metadata metadata; - @Mock private DriverConfig config; - @Mock private DriverExecutionProfile defaultProfile; - @Mock private ReconnectionPolicy reconnectionPolicy; - @Mock private RetryPolicy retryPolicy; - @Mock private SpeculativeExecutionPolicy speculativeExecutionPolicy; - @Mock private AddressTranslator addressTranslator; - @Mock private ControlConnection controlConnection; - @Mock private MetricsFactory metricsFactory; - @Mock private NodeStateListener nodeStateListener; - @Mock private SchemaChangeListener schemaChangeListener; - @Mock private RequestTracker requestTracker; - - private DefaultNode node1; - private DefaultNode node2; - private DefaultNode node3; - private EventBus eventBus; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - - DefaultEventLoopGroup adminEventLoopGroup = new DefaultEventLoopGroup(1); - when(nettyOptions.adminEventExecutorGroup()).thenReturn(adminEventLoopGroup); - when(context.getNettyOptions()).thenReturn(nettyOptions); - - // Config: - when(defaultProfile.getBoolean(DefaultDriverOption.REQUEST_WARN_IF_SET_KEYSPACE)) - .thenReturn(true); - when(defaultProfile.getBoolean(DefaultDriverOption.REPREPARE_ENABLED)).thenReturn(false); - when(defaultProfile.isDefined(DefaultDriverOption.PROTOCOL_VERSION)).thenReturn(true); - when(defaultProfile.getDuration(DefaultDriverOption.METADATA_TOPOLOGY_WINDOW)) - .thenReturn(Duration.ZERO); - when(defaultProfile.getInt(DefaultDriverOption.METADATA_TOPOLOGY_MAX_EVENTS)).thenReturn(1); - when(config.getDefaultProfile()).thenReturn(defaultProfile); - when(context.getConfig()).thenReturn(config); - - // Init sequence: - when(metadataManager.refreshNodes()).thenReturn(CompletableFuture.completedFuture(null)); - when(metadataManager.refreshSchema(null, false, true)) - .thenReturn(CompletableFuture.completedFuture(null)); - when(context.getMetadataManager()).thenReturn(metadataManager); - - when(topologyMonitor.init()).thenReturn(CompletableFuture.completedFuture(null)); - when(context.getTopologyMonitor()).thenReturn(topologyMonitor); - - when(context.getLoadBalancingPolicyWrapper()).thenReturn(loadBalancingPolicyWrapper); - - when(context.getConfigLoader()).thenReturn(configLoader); - - when(context.getMetricsFactory()).thenReturn(metricsFactory); - - // Runtime behavior: - when(context.getSessionName()).thenReturn("test"); - - when(context.getChannelPoolFactory()).thenReturn(channelPoolFactory); - - eventBus = spy(new EventBus("test")); - when(context.getEventBus()).thenReturn(eventBus); - - node1 = mockLocalNode(1); - node2 = mockLocalNode(2); - node3 = mockLocalNode(3); - @SuppressWarnings("ConstantConditions") - ImmutableMap nodes = - ImmutableMap.of( - node1.getHostId(), node1, - node2.getHostId(), node2, - node3.getHostId(), node3); - when(metadata.getNodes()).thenReturn(nodes); - when(metadataManager.getMetadata()).thenReturn(metadata); - - PoolManager poolManager = new PoolManager(context); - when(context.getPoolManager()).thenReturn(poolManager); - - // Shutdown sequence: - when(context.getReconnectionPolicy()).thenReturn(reconnectionPolicy); - when(context.getRetryPolicy(DriverExecutionProfile.DEFAULT_NAME)).thenReturn(retryPolicy); - when(context.getSpeculativeExecutionPolicies()) - .thenReturn( - ImmutableMap.of(DriverExecutionProfile.DEFAULT_NAME, speculativeExecutionPolicy)); - when(context.getAddressTranslator()).thenReturn(addressTranslator); - when(context.getNodeStateListener()).thenReturn(nodeStateListener); - when(context.getSchemaChangeListener()).thenReturn(schemaChangeListener); - when(context.getRequestTracker()).thenReturn(requestTracker); - - when(metadataManager.closeAsync()).thenReturn(CompletableFuture.completedFuture(null)); - when(metadataManager.forceCloseAsync()).thenReturn(CompletableFuture.completedFuture(null)); - - when(topologyMonitor.closeAsync()).thenReturn(CompletableFuture.completedFuture(null)); - when(topologyMonitor.forceCloseAsync()).thenReturn(CompletableFuture.completedFuture(null)); - - when(context.getControlConnection()).thenReturn(controlConnection); - when(controlConnection.closeAsync()).thenReturn(CompletableFuture.completedFuture(null)); - when(controlConnection.forceCloseAsync()).thenReturn(CompletableFuture.completedFuture(null)); - - DefaultPromise nettyCloseFuture = new DefaultPromise<>(GlobalEventExecutor.INSTANCE); - nettyCloseFuture.setSuccess(null); - when(nettyOptions.onClose()).thenAnswer(invocation -> nettyCloseFuture); - } - - @Test - public void should_initialize_pools_with_distances() { - when(node3.getDistance()).thenReturn(NodeDistance.REMOTE); - - CompletableFuture pool1Future = new CompletableFuture<>(); - CompletableFuture pool2Future = new CompletableFuture<>(); - CompletableFuture pool3Future = new CompletableFuture<>(); - ChannelPool pool1 = mockPool(node1); - ChannelPool pool2 = mockPool(node2); - ChannelPool pool3 = mockPool(node3); - MockChannelPoolFactoryHelper factoryHelper = - MockChannelPoolFactoryHelper.builder(channelPoolFactory) - .pending(node1, KEYSPACE, NodeDistance.LOCAL, pool1Future) - .pending(node2, KEYSPACE, NodeDistance.LOCAL, pool2Future) - .pending(node3, KEYSPACE, NodeDistance.REMOTE, pool3Future) - .build(); - - CompletionStage initFuture = newSession(); - - factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node2, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.REMOTE); - - assertThatStage(initFuture).isNotDone(); - - pool1Future.complete(pool1); - pool2Future.complete(pool2); - pool3Future.complete(pool3); - - assertThatStage(initFuture) - .isSuccess( - session -> - assertThat(((DefaultSession) session).getPools()) - .containsValues(pool1, pool2, pool3)); - } - - @Test - public void should_not_connect_to_ignored_nodes() { - when(node2.getDistance()).thenReturn(NodeDistance.IGNORED); - - ChannelPool pool1 = mockPool(node1); - ChannelPool pool3 = mockPool(node3); - MockChannelPoolFactoryHelper factoryHelper = - MockChannelPoolFactoryHelper.builder(channelPoolFactory) - // Initial connection - .success(node1, KEYSPACE, NodeDistance.LOCAL, pool1) - .success(node3, KEYSPACE, NodeDistance.LOCAL, pool3) - .build(); - - CompletionStage initFuture = newSession(); - - factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - assertThatStage(initFuture) - .isSuccess( - session -> - assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool3)); - } - - @Test - public void should_not_connect_to_forced_down_nodes() { - when(node2.getState()).thenReturn(NodeState.FORCED_DOWN); - - ChannelPool pool1 = mockPool(node1); - ChannelPool pool3 = mockPool(node3); - MockChannelPoolFactoryHelper factoryHelper = - MockChannelPoolFactoryHelper.builder(channelPoolFactory) - // Initial connection - .success(node1, KEYSPACE, NodeDistance.LOCAL, pool1) - .success(node3, KEYSPACE, NodeDistance.LOCAL, pool3) - .build(); - - CompletionStage initFuture = newSession(); - - factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - assertThatStage(initFuture) - .isSuccess( - session -> - assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool3)); - } - - @Test - public void should_adjust_distance_if_changed_while_init() { - CompletableFuture pool1Future = new CompletableFuture<>(); - CompletableFuture pool2Future = new CompletableFuture<>(); - CompletableFuture pool3Future = new CompletableFuture<>(); - ChannelPool pool1 = mockPool(node1); - ChannelPool pool2 = mockPool(node2); - ChannelPool pool3 = mockPool(node3); - MockChannelPoolFactoryHelper factoryHelper = - MockChannelPoolFactoryHelper.builder(channelPoolFactory) - .pending(node1, KEYSPACE, NodeDistance.LOCAL, pool1Future) - .pending(node2, KEYSPACE, NodeDistance.LOCAL, pool2Future) - .pending(node3, KEYSPACE, NodeDistance.LOCAL, pool3Future) - .build(); - - CompletionStage initFuture = newSession(); - - factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node2, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - - assertThatStage(initFuture).isNotDone(); - - // Distance changes while init still pending - eventBus.fire(new DistanceEvent(NodeDistance.REMOTE, node2)); - - pool1Future.complete(pool1); - pool2Future.complete(pool2); - pool3Future.complete(pool3); - - verify(pool2, VERIFY_TIMEOUT).resize(NodeDistance.REMOTE); - - assertThatStage(initFuture) - .isSuccess( - session -> - assertThat(((DefaultSession) session).getPools()) - .containsValues(pool1, pool2, pool3)); - } - - @Test - public void should_remove_pool_if_ignored_while_init() { - CompletableFuture pool1Future = new CompletableFuture<>(); - CompletableFuture pool2Future = new CompletableFuture<>(); - CompletableFuture pool3Future = new CompletableFuture<>(); - ChannelPool pool1 = mockPool(node1); - ChannelPool pool2 = mockPool(node2); - ChannelPool pool3 = mockPool(node3); - MockChannelPoolFactoryHelper factoryHelper = - MockChannelPoolFactoryHelper.builder(channelPoolFactory) - .pending(node1, KEYSPACE, NodeDistance.LOCAL, pool1Future) - .pending(node2, KEYSPACE, NodeDistance.LOCAL, pool2Future) - .pending(node3, KEYSPACE, NodeDistance.LOCAL, pool3Future) - .build(); - - CompletionStage initFuture = newSession(); - - factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node2, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - - assertThatStage(initFuture).isNotDone(); - - // Distance changes while init still pending - eventBus.fire(new DistanceEvent(NodeDistance.IGNORED, node2)); - - pool1Future.complete(pool1); - pool2Future.complete(pool2); - pool3Future.complete(pool3); - - verify(pool2, VERIFY_TIMEOUT).closeAsync(); - - assertThatStage(initFuture) - .isSuccess( - session -> - assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool3)); - } - - @Test - public void should_remove_pool_if_forced_down_while_init() { - CompletableFuture pool1Future = new CompletableFuture<>(); - CompletableFuture pool2Future = new CompletableFuture<>(); - CompletableFuture pool3Future = new CompletableFuture<>(); - ChannelPool pool1 = mockPool(node1); - ChannelPool pool2 = mockPool(node2); - ChannelPool pool3 = mockPool(node3); - MockChannelPoolFactoryHelper factoryHelper = - MockChannelPoolFactoryHelper.builder(channelPoolFactory) - .pending(node1, KEYSPACE, NodeDistance.LOCAL, pool1Future) - .pending(node2, KEYSPACE, NodeDistance.LOCAL, pool2Future) - .pending(node3, KEYSPACE, NodeDistance.LOCAL, pool3Future) - .build(); - - CompletionStage initFuture = newSession(); - - factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node2, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - - assertThatStage(initFuture).isNotDone(); - - // Forced down while init still pending - eventBus.fire(NodeStateEvent.changed(NodeState.UP, NodeState.FORCED_DOWN, node2)); - - pool1Future.complete(pool1); - pool2Future.complete(pool2); - pool3Future.complete(pool3); - - verify(pool2, VERIFY_TIMEOUT).closeAsync(); - - assertThatStage(initFuture) - .isSuccess( - session -> - assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool3)); - } - - @Test - public void should_resize_pool_if_distance_changes() { - ChannelPool pool1 = mockPool(node1); - ChannelPool pool2 = mockPool(node2); - ChannelPool pool3 = mockPool(node3); - MockChannelPoolFactoryHelper factoryHelper = - MockChannelPoolFactoryHelper.builder(channelPoolFactory) - .success(node1, KEYSPACE, NodeDistance.LOCAL, pool1) - .success(node2, KEYSPACE, NodeDistance.LOCAL, pool2) - .success(node3, KEYSPACE, NodeDistance.LOCAL, pool3) - .build(); - - CompletionStage initFuture = newSession(); - - factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node2, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - assertThatStage(initFuture).isSuccess(); - - eventBus.fire(new DistanceEvent(NodeDistance.REMOTE, node2)); - verify(pool2, timeout(500)).resize(NodeDistance.REMOTE); - } - - @Test - public void should_remove_pool_if_node_becomes_ignored() { - ChannelPool pool1 = mockPool(node1); - ChannelPool pool2 = mockPool(node2); - ChannelPool pool3 = mockPool(node3); - MockChannelPoolFactoryHelper factoryHelper = - MockChannelPoolFactoryHelper.builder(channelPoolFactory) - .success(node1, KEYSPACE, NodeDistance.LOCAL, pool1) - .success(node2, KEYSPACE, NodeDistance.LOCAL, pool2) - .success(node3, KEYSPACE, NodeDistance.LOCAL, pool3) - .build(); - - CompletionStage initFuture = newSession(); - - factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node2, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - assertThatStage(initFuture).isSuccess(); - - eventBus.fire(new DistanceEvent(NodeDistance.IGNORED, node2)); - verify(pool2, timeout(500)).closeAsync(); - - Session session = CompletableFutures.getCompleted(initFuture.toCompletableFuture()); - await() - .untilAsserted( - () -> assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool3)); - } - - @Test - public void should_do_nothing_if_node_becomes_ignored_but_was_already_ignored() - throws InterruptedException { - ChannelPool pool1 = mockPool(node1); - ChannelPool pool2 = mockPool(node2); - ChannelPool pool3 = mockPool(node3); - MockChannelPoolFactoryHelper factoryHelper = - MockChannelPoolFactoryHelper.builder(channelPoolFactory) - .success(node1, KEYSPACE, NodeDistance.LOCAL, pool1) - .success(node2, KEYSPACE, NodeDistance.LOCAL, pool2) - .success(node3, KEYSPACE, NodeDistance.LOCAL, pool3) - .build(); - - CompletionStage initFuture = newSession(); - - factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node2, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - assertThatStage(initFuture).isSuccess(); - - eventBus.fire(new DistanceEvent(NodeDistance.IGNORED, node2)); - verify(pool2, timeout(100)).closeAsync(); - - Session session = CompletableFutures.getCompleted(initFuture.toCompletableFuture()); - assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool3); - - // Fire the same event again, nothing should happen - eventBus.fire(new DistanceEvent(NodeDistance.IGNORED, node2)); - TimeUnit.MILLISECONDS.sleep(200); - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_recreate_pool_if_node_becomes_not_ignored() { - when(node2.getDistance()).thenReturn(NodeDistance.IGNORED); - - ChannelPool pool1 = mockPool(node1); - ChannelPool pool2 = mockPool(node2); - ChannelPool pool3 = mockPool(node3); - MockChannelPoolFactoryHelper factoryHelper = - MockChannelPoolFactoryHelper.builder(channelPoolFactory) - // Initial connection - .success(node1, KEYSPACE, NodeDistance.LOCAL, pool1) - .success(node3, KEYSPACE, NodeDistance.LOCAL, pool3) - // When node2 becomes not ignored - .success(node2, KEYSPACE, NodeDistance.LOCAL, pool2) - .build(); - - CompletionStage initFuture = newSession(); - - factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - assertThatStage(initFuture).isSuccess(); - Session session = CompletableFutures.getCompleted(initFuture.toCompletableFuture()); - assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool3); - - eventBus.fire(new DistanceEvent(NodeDistance.LOCAL, node2)); - - factoryHelper.waitForCall(node2, KEYSPACE, NodeDistance.LOCAL); - await() - .untilAsserted( - () -> - assertThat(((DefaultSession) session).getPools()) - .containsValues(pool1, pool2, pool3)); - } - - @Test - public void should_remove_pool_if_node_is_forced_down() { - ChannelPool pool1 = mockPool(node1); - ChannelPool pool2 = mockPool(node2); - ChannelPool pool3 = mockPool(node3); - MockChannelPoolFactoryHelper factoryHelper = - MockChannelPoolFactoryHelper.builder(channelPoolFactory) - .success(node1, KEYSPACE, NodeDistance.LOCAL, pool1) - .success(node2, KEYSPACE, NodeDistance.LOCAL, pool2) - .success(node3, KEYSPACE, NodeDistance.LOCAL, pool3) - .build(); - - CompletionStage initFuture = newSession(); - - factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node2, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - assertThatStage(initFuture).isSuccess(); - - eventBus.fire(NodeStateEvent.changed(NodeState.UP, NodeState.FORCED_DOWN, node2)); - verify(pool2, timeout(500)).closeAsync(); - - Session session = CompletableFutures.getCompleted(initFuture.toCompletableFuture()); - await() - .untilAsserted( - () -> assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool3)); - } - - @Test - public void should_recreate_pool_if_node_is_forced_back_up() { - when(node2.getState()).thenReturn(NodeState.FORCED_DOWN); - - ChannelPool pool1 = mockPool(node1); - ChannelPool pool2 = mockPool(node2); - ChannelPool pool3 = mockPool(node3); - MockChannelPoolFactoryHelper factoryHelper = - MockChannelPoolFactoryHelper.builder(channelPoolFactory) - // init - .success(node1, KEYSPACE, NodeDistance.LOCAL, pool1) - .success(node3, KEYSPACE, NodeDistance.LOCAL, pool3) - // when node2 comes back up - .success(node2, KEYSPACE, NodeDistance.LOCAL, pool2) - .build(); - - CompletionStage initFuture = newSession(); - - factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - assertThatStage(initFuture).isSuccess(); - Session session = CompletableFutures.getCompleted(initFuture.toCompletableFuture()); - assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool3); - - eventBus.fire(NodeStateEvent.changed(NodeState.FORCED_DOWN, NodeState.UP, node2)); - factoryHelper.waitForCall(node2, KEYSPACE, NodeDistance.LOCAL); - await() - .untilAsserted( - () -> - assertThat(((DefaultSession) session).getPools()) - .containsValues(pool1, pool2, pool3)); - } - - @Test - public void should_not_recreate_pool_if_node_is_forced_back_up_but_ignored() { - when(node2.getState()).thenReturn(NodeState.FORCED_DOWN); - when(node2.getDistance()).thenReturn(NodeDistance.IGNORED); - - ChannelPool pool1 = mockPool(node1); - ChannelPool pool3 = mockPool(node3); - MockChannelPoolFactoryHelper factoryHelper = - MockChannelPoolFactoryHelper.builder(channelPoolFactory) - // init - .success(node1, KEYSPACE, NodeDistance.LOCAL, pool1) - .success(node3, KEYSPACE, NodeDistance.LOCAL, pool3) - .build(); - - CompletionStage initFuture = newSession(); - - factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - assertThatStage(initFuture).isSuccess(); - Session session = CompletableFutures.getCompleted(initFuture.toCompletableFuture()); - assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool3); - - eventBus.fire(NodeStateEvent.changed(NodeState.FORCED_DOWN, NodeState.UP, node2)); - await() - .untilAsserted( - () -> assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool3)); - factoryHelper.verifyNoMoreCalls(); - } - - @Test - public void should_adjust_distance_if_changed_while_recreating() { - when(node2.getDistance()).thenReturn(NodeDistance.IGNORED); - - ChannelPool pool1 = mockPool(node1); - ChannelPool pool2 = mockPool(node2); - CompletableFuture pool2Future = new CompletableFuture<>(); - ChannelPool pool3 = mockPool(node3); - MockChannelPoolFactoryHelper factoryHelper = - MockChannelPoolFactoryHelper.builder(channelPoolFactory) - // Initial connection - .success(node1, KEYSPACE, NodeDistance.LOCAL, pool1) - .success(node3, KEYSPACE, NodeDistance.LOCAL, pool3) - // When node2 becomes not ignored - .pending(node2, KEYSPACE, NodeDistance.LOCAL, pool2Future) - .build(); - - CompletionStage initFuture = newSession(); - - factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - assertThatStage(initFuture).isSuccess(); - Session session = CompletableFutures.getCompleted(initFuture.toCompletableFuture()); - assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool3); - - eventBus.fire(new DistanceEvent(NodeDistance.LOCAL, node2)); - - factoryHelper.waitForCall(node2, KEYSPACE, NodeDistance.LOCAL); - - // Distance changes again while pool init is in progress - eventBus.fire(new DistanceEvent(NodeDistance.REMOTE, node2)); - - // Now pool init succeeds - pool2Future.complete(pool2); - - // Pool should have been adjusted - verify(pool2, VERIFY_TIMEOUT).resize(NodeDistance.REMOTE); - await() - .untilAsserted( - () -> - assertThat(((DefaultSession) session).getPools()) - .containsValues(pool1, pool2, pool3)); - } - - @Test - public void should_remove_pool_if_ignored_while_recreating() { - when(node2.getDistance()).thenReturn(NodeDistance.IGNORED); - - ChannelPool pool1 = mockPool(node1); - ChannelPool pool2 = mockPool(node2); - CompletableFuture pool2Future = new CompletableFuture<>(); - ChannelPool pool3 = mockPool(node3); - MockChannelPoolFactoryHelper factoryHelper = - MockChannelPoolFactoryHelper.builder(channelPoolFactory) - // Initial connection - .success(node1, KEYSPACE, NodeDistance.LOCAL, pool1) - .success(node3, KEYSPACE, NodeDistance.LOCAL, pool3) - // When node2 becomes not ignored - .pending(node2, KEYSPACE, NodeDistance.LOCAL, pool2Future) - .build(); - - CompletionStage initFuture = newSession(); - - factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - assertThatStage(initFuture).isSuccess(); - Session session = CompletableFutures.getCompleted(initFuture.toCompletableFuture()); - assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool3); - - eventBus.fire(new DistanceEvent(NodeDistance.LOCAL, node2)); - - factoryHelper.waitForCall(node2, KEYSPACE, NodeDistance.LOCAL); - - // Distance changes to ignored while pool init is in progress - eventBus.fire(new DistanceEvent(NodeDistance.IGNORED, node2)); - - // Now pool init succeeds - pool2Future.complete(pool2); - - // Pool should have been closed - verify(pool2, VERIFY_TIMEOUT).closeAsync(); - - await() - .untilAsserted( - () -> assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool3)); - } - - @Test - public void should_remove_pool_if_forced_down_while_recreating() { - when(node2.getDistance()).thenReturn(NodeDistance.IGNORED); - - ChannelPool pool1 = mockPool(node1); - ChannelPool pool2 = mockPool(node2); - CompletableFuture pool2Future = new CompletableFuture<>(); - ChannelPool pool3 = mockPool(node3); - MockChannelPoolFactoryHelper factoryHelper = - MockChannelPoolFactoryHelper.builder(channelPoolFactory) - // Initial connection - .success(node1, KEYSPACE, NodeDistance.LOCAL, pool1) - .success(node3, KEYSPACE, NodeDistance.LOCAL, pool3) - // When node2 becomes not ignored - .pending(node2, KEYSPACE, NodeDistance.LOCAL, pool2Future) - .build(); - - CompletionStage initFuture = newSession(); - - factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - assertThatStage(initFuture).isSuccess(); - Session session = CompletableFutures.getCompleted(initFuture.toCompletableFuture()); - assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool3); - - eventBus.fire(new DistanceEvent(NodeDistance.LOCAL, node2)); - - factoryHelper.waitForCall(node2, KEYSPACE, NodeDistance.LOCAL); - - // Forced down while pool init is in progress - eventBus.fire(NodeStateEvent.changed(NodeState.UP, NodeState.FORCED_DOWN, node2)); - - // Now pool init succeeds - pool2Future.complete(pool2); - - // Pool should have been closed - verify(pool2, VERIFY_TIMEOUT).closeAsync(); - await() - .untilAsserted( - () -> assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool3)); - } - - @Test - public void should_close_all_pools_when_closing() { - ChannelPool pool1 = mockPool(node1); - ChannelPool pool2 = mockPool(node2); - ChannelPool pool3 = mockPool(node3); - MockChannelPoolFactoryHelper factoryHelper = - MockChannelPoolFactoryHelper.builder(channelPoolFactory) - .success(node1, KEYSPACE, NodeDistance.LOCAL, pool1) - .success(node2, KEYSPACE, NodeDistance.LOCAL, pool2) - .success(node3, KEYSPACE, NodeDistance.LOCAL, pool3) - .build(); - - CompletionStage initFuture = newSession(); - - factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node2, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - assertThatStage(initFuture).isSuccess(); - Session session = CompletableFutures.getCompleted(initFuture.toCompletableFuture()); - - CompletionStage closeFuture = session.closeAsync(); - assertThatStage(closeFuture).isSuccess(); - - verify(pool1, VERIFY_TIMEOUT).closeAsync(); - verify(pool2, VERIFY_TIMEOUT).closeAsync(); - verify(pool3, VERIFY_TIMEOUT).closeAsync(); - } - - @Test - public void should_force_close_all_pools_when_force_closing() { - ChannelPool pool1 = mockPool(node1); - ChannelPool pool2 = mockPool(node2); - ChannelPool pool3 = mockPool(node3); - MockChannelPoolFactoryHelper factoryHelper = - MockChannelPoolFactoryHelper.builder(channelPoolFactory) - .success(node1, KEYSPACE, NodeDistance.LOCAL, pool1) - .success(node2, KEYSPACE, NodeDistance.LOCAL, pool2) - .success(node3, KEYSPACE, NodeDistance.LOCAL, pool3) - .build(); - - CompletionStage initFuture = newSession(); - - factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node2, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - assertThatStage(initFuture).isSuccess(); - Session session = CompletableFutures.getCompleted(initFuture.toCompletableFuture()); - - CompletionStage closeFuture = session.forceCloseAsync(); - assertThatStage(closeFuture).isSuccess(); - - verify(pool1, VERIFY_TIMEOUT).forceCloseAsync(); - verify(pool2, VERIFY_TIMEOUT).forceCloseAsync(); - verify(pool3, VERIFY_TIMEOUT).forceCloseAsync(); - } - - @Test - public void should_close_pool_if_recreated_while_closing() { - when(node2.getState()).thenReturn(NodeState.FORCED_DOWN); - - ChannelPool pool1 = mockPool(node1); - ChannelPool pool2 = mockPool(node2); - CompletableFuture pool2Future = new CompletableFuture<>(); - ChannelPool pool3 = mockPool(node3); - MockChannelPoolFactoryHelper factoryHelper = - MockChannelPoolFactoryHelper.builder(channelPoolFactory) - // init - .success(node1, KEYSPACE, NodeDistance.LOCAL, pool1) - .success(node3, KEYSPACE, NodeDistance.LOCAL, pool3) - // when node2 comes back up - .pending(node2, KEYSPACE, NodeDistance.LOCAL, pool2Future) - .build(); - - CompletionStage initFuture = newSession(); - - factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - assertThatStage(initFuture).isSuccess(); - Session session = CompletableFutures.getCompleted(initFuture.toCompletableFuture()); - assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool3); - - // node2 comes back up, start initializing a pool for it - eventBus.fire(NodeStateEvent.changed(NodeState.FORCED_DOWN, NodeState.UP, node2)); - factoryHelper.waitForCall(node2, KEYSPACE, NodeDistance.LOCAL); - - // but the session gets closed before pool init completes - CompletionStage closeFuture = session.closeAsync(); - assertThatStage(closeFuture).isSuccess(); - - // now pool init completes - pool2Future.complete(pool2); - - // Pool should have been closed - verify(pool2, VERIFY_TIMEOUT).forceCloseAsync(); - } - - @Test - public void should_set_keyspace_on_all_pools() { - ChannelPool pool1 = mockPool(node1); - ChannelPool pool2 = mockPool(node2); - ChannelPool pool3 = mockPool(node3); - MockChannelPoolFactoryHelper factoryHelper = - MockChannelPoolFactoryHelper.builder(channelPoolFactory) - .success(node1, KEYSPACE, NodeDistance.LOCAL, pool1) - .success(node2, KEYSPACE, NodeDistance.LOCAL, pool2) - .success(node3, KEYSPACE, NodeDistance.LOCAL, pool3) - .build(); - - CompletionStage initFuture = newSession(); - - factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node2, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - assertThatStage(initFuture).isSuccess(); - Session session = CompletableFutures.getCompleted(initFuture.toCompletableFuture()); - - CqlIdentifier newKeyspace = CqlIdentifier.fromInternal("newKeyspace"); - ((DefaultSession) session).setKeyspace(newKeyspace); - - verify(pool1, VERIFY_TIMEOUT).setKeyspace(newKeyspace); - verify(pool2, VERIFY_TIMEOUT).setKeyspace(newKeyspace); - verify(pool3, VERIFY_TIMEOUT).setKeyspace(newKeyspace); - } - - @Test - public void should_set_keyspace_on_pool_if_recreated_while_switching_keyspace() { - when(node2.getState()).thenReturn(NodeState.FORCED_DOWN); - - ChannelPool pool1 = mockPool(node1); - ChannelPool pool2 = mockPool(node2); - CompletableFuture pool2Future = new CompletableFuture<>(); - ChannelPool pool3 = mockPool(node3); - MockChannelPoolFactoryHelper factoryHelper = - MockChannelPoolFactoryHelper.builder(channelPoolFactory) - // init - .success(node1, KEYSPACE, NodeDistance.LOCAL, pool1) - .success(node3, KEYSPACE, NodeDistance.LOCAL, pool3) - // when node2 comes back up - .pending(node2, KEYSPACE, NodeDistance.LOCAL, pool2Future) - .build(); - - CompletionStage initFuture = newSession(); - - factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); - factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - assertThatStage(initFuture).isSuccess(); - DefaultSession session = - (DefaultSession) CompletableFutures.getCompleted(initFuture.toCompletableFuture()); - assertThat(session.getPools()).containsValues(pool1, pool3); - - // node2 comes back up, start initializing a pool for it - eventBus.fire(NodeStateEvent.changed(NodeState.FORCED_DOWN, NodeState.UP, node2)); - factoryHelper.waitForCall(node2, KEYSPACE, NodeDistance.LOCAL); - - // Keyspace gets changed on the session in the meantime, node2's pool will miss it - CqlIdentifier newKeyspace = CqlIdentifier.fromInternal("newKeyspace"); - session.setKeyspace(newKeyspace); - verify(pool1, VERIFY_TIMEOUT).setKeyspace(newKeyspace); - verify(pool3, VERIFY_TIMEOUT).setKeyspace(newKeyspace); - - // now pool init completes - pool2Future.complete(pool2); - - // Pool should have been closed - verify(pool2, VERIFY_TIMEOUT).setKeyspace(newKeyspace); - } - - private ChannelPool mockPool(Node node) { - ChannelPool pool = mock(ChannelPool.class); - when(pool.getNode()).thenReturn(node); - when(pool.getInitialKeyspaceName()).thenReturn(KEYSPACE); - when(pool.setKeyspace(any(CqlIdentifier.class))) - .thenReturn(CompletableFuture.completedFuture(null)); - CompletableFuture closeFuture = new CompletableFuture<>(); - when(pool.closeFuture()).thenReturn(closeFuture); - when(pool.closeAsync()) - .then( - i -> { - closeFuture.complete(null); - return closeFuture; - }); - when(pool.forceCloseAsync()) - .then( - i -> { - closeFuture.complete(null); - return closeFuture; - }); - return pool; - } - - private CompletionStage newSession() { - return DefaultSession.init(context, Collections.emptySet(), KEYSPACE); - } - - private static DefaultNode mockLocalNode(int i) { - DefaultNode node = mock(DefaultNode.class); - when(node.getHostId()).thenReturn(UUID.randomUUID()); - DefaultEndPoint endPoint = TestNodeFactory.newEndPoint(i); - when(node.getEndPoint()).thenReturn(endPoint); - when(node.getBroadcastRpcAddress()).thenReturn(Optional.of(endPoint.resolve())); - when(node.getDistance()).thenReturn(NodeDistance.LOCAL); - when(node.toString()).thenReturn("node" + i); - return node; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/session/MockChannelPoolFactoryHelper.java b/core/src/test/java/com/datastax/oss/driver/internal/core/session/MockChannelPoolFactoryHelper.java deleted file mode 100644 index 6c3dc7f3689..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/session/MockChannelPoolFactoryHelper.java +++ /dev/null @@ -1,227 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.session; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.timeout; -import static org.mockito.Mockito.verifyZeroInteractions; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.pool.ChannelPool; -import com.datastax.oss.driver.internal.core.pool.ChannelPoolFactory; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.driver.shaded.guava.common.collect.ListMultimap; -import com.datastax.oss.driver.shaded.guava.common.collect.MultimapBuilder; -import com.datastax.oss.driver.shaded.guava.common.collect.Sets; -import java.util.ArrayDeque; -import java.util.Deque; -import java.util.HashMap; -import java.util.Map; -import java.util.Objects; -import java.util.Set; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import org.mockito.ArgumentCaptor; -import org.mockito.InOrder; -import org.mockito.internal.util.MockUtil; -import org.mockito.stubbing.OngoingStubbing; - -public class MockChannelPoolFactoryHelper { - - public static MockChannelPoolFactoryHelper.Builder builder( - ChannelPoolFactory channelPoolFactory) { - return new MockChannelPoolFactoryHelper.Builder(channelPoolFactory); - } - - private final ChannelPoolFactory channelPoolFactory; - private final InOrder inOrder; - // If waitForCalls sees more invocations than expected, the difference is stored here - private final Map previous = new HashMap<>(); - - private MockChannelPoolFactoryHelper(ChannelPoolFactory channelPoolFactory) { - this.channelPoolFactory = channelPoolFactory; - this.inOrder = inOrder(channelPoolFactory); - } - - public void waitForCall(Node node, CqlIdentifier keyspace, NodeDistance distance) { - waitForCalls(node, keyspace, distance, 1); - } - - /** - * Waits for a given number of calls to {@code ChannelPoolFactory.init()}. - * - *

Because we test asynchronous, non-blocking code, there might already be more calls than - * expected when this method is called. If so, the extra calls are stored and stored and will be - * taken into account next time. - */ - public void waitForCalls(Node node, CqlIdentifier keyspace, NodeDistance distance, int expected) { - Params params = new Params(node, keyspace, distance); - int fromLastTime = previous.getOrDefault(params, 0); - if (fromLastTime >= expected) { - previous.put(params, fromLastTime - expected); - return; - } - expected -= fromLastTime; - - // Because we test asynchronous, non-blocking code, there might have been already more - // invocations than expected. Use `atLeast` and a captor to find out. - ArgumentCaptor contextCaptor = - ArgumentCaptor.forClass(InternalDriverContext.class); - inOrder - .verify(channelPoolFactory, timeout(500).atLeast(expected)) - .init(eq(node), eq(keyspace), eq(distance), contextCaptor.capture(), eq("test")); - int actual = contextCaptor.getAllValues().size(); - - int extras = actual - expected; - if (extras > 0) { - previous.compute(params, (k, v) -> (v == null) ? extras : v + extras); - } - } - - public void verifyNoMoreCalls() { - inOrder - .verify(channelPoolFactory, timeout(500).times(0)) - .init( - any(Node.class), - any(CqlIdentifier.class), - any(NodeDistance.class), - any(InternalDriverContext.class), - any(String.class)); - - Set counts = Sets.newHashSet(previous.values()); - if (!counts.isEmpty()) { - assertThat(counts).containsExactly(0); - } - } - - public static class Builder { - private final ChannelPoolFactory channelPoolFactory; - private final ListMultimap invocations = - MultimapBuilder.hashKeys().arrayListValues().build(); - - private Builder(ChannelPoolFactory channelPoolFactory) { - assertThat(MockUtil.isMock(channelPoolFactory)).as("expected a mock").isTrue(); - verifyZeroInteractions(channelPoolFactory); - this.channelPoolFactory = channelPoolFactory; - } - - public Builder success( - Node node, CqlIdentifier keyspaceName, NodeDistance distance, ChannelPool pool) { - invocations.put(new Params(node, keyspaceName, distance), pool); - return this; - } - - public Builder failure( - Node node, CqlIdentifier keyspaceName, NodeDistance distance, String error) { - invocations.put(new Params(node, keyspaceName, distance), new Exception(error)); - return this; - } - - public Builder failure( - Node node, CqlIdentifier keyspaceName, NodeDistance distance, Throwable error) { - invocations.put(new Params(node, keyspaceName, distance), error); - return this; - } - - public Builder pending( - Node node, - CqlIdentifier keyspaceName, - NodeDistance distance, - CompletionStage future) { - invocations.put(new Params(node, keyspaceName, distance), future); - return this; - } - - public MockChannelPoolFactoryHelper build() { - stub(); - return new MockChannelPoolFactoryHelper(channelPoolFactory); - } - - private void stub() { - for (Params params : invocations.keySet()) { - Deque> results = new ArrayDeque<>(); - for (Object object : invocations.get(params)) { - if (object instanceof ChannelPool) { - results.add(CompletableFuture.completedFuture(((ChannelPool) object))); - } else if (object instanceof Throwable) { - results.add(CompletableFutures.failedFuture(((Throwable) object))); - } else if (object instanceof CompletableFuture) { - @SuppressWarnings("unchecked") - CompletionStage future = (CompletionStage) object; - results.add(future); - } else { - fail("unexpected type: " + object.getClass()); - } - } - if (results.size() > 0) { - CompletionStage first = results.poll(); - OngoingStubbing> ongoingStubbing = - when(channelPoolFactory.init( - eq(params.node), - eq(params.keyspace), - eq(params.distance), - any(InternalDriverContext.class), - eq("test"))) - .thenReturn(first); - for (CompletionStage result : results) { - ongoingStubbing.thenReturn(result); - } - } - } - } - } - - private static class Params { - private final Node node; - private final CqlIdentifier keyspace; - private final NodeDistance distance; - - private Params(Node node, CqlIdentifier keyspace, NodeDistance distance) { - this.node = node; - this.keyspace = keyspace; - this.distance = distance; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof Params) { - Params that = (Params) other; - return Objects.equals(this.node, that.node) - && Objects.equals(this.keyspace, that.keyspace) - && Objects.equals(this.distance, that.distance); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(node, keyspace, distance); - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/session/PoolManagerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/session/PoolManagerTest.java deleted file mode 100644 index 60483da4c72..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/session/PoolManagerTest.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.session; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.internal.core.context.EventBus; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.context.NettyOptions; -import io.netty.channel.DefaultEventLoopGroup; -import java.util.concurrent.ConcurrentHashMap; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -public class PoolManagerTest { - @Mock private InternalDriverContext context; - @Mock private NettyOptions nettyOptions; - @Mock private DriverConfig config; - @Mock private DriverExecutionProfile defaultProfile; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - - DefaultEventLoopGroup adminEventLoopGroup = new DefaultEventLoopGroup(1); - when(nettyOptions.adminEventExecutorGroup()).thenReturn(adminEventLoopGroup); - when(context.getNettyOptions()).thenReturn(nettyOptions); - when(context.getEventBus()).thenReturn(new EventBus("test")); - when(config.getDefaultProfile()).thenReturn(defaultProfile); - when(context.getConfig()).thenReturn(config); - } - - @Test - public void should_use_weak_values_if_config_is_true_or_undefined() { - when(defaultProfile.getBoolean(DefaultDriverOption.PREPARED_CACHE_WEAK_VALUES, true)) - .thenReturn(true); - // As weak values map class is MapMakerInternalMap - assertThat(new PoolManager(context).getRepreparePayloads()) - .isNotInstanceOf(ConcurrentHashMap.class); - } - - @Test - public void should_not_use_weak_values_if_config_is_false() { - when(defaultProfile.getBoolean(DefaultDriverOption.PREPARED_CACHE_WEAK_VALUES, true)) - .thenReturn(false); - assertThat(new PoolManager(context).getRepreparePayloads()) - .isInstanceOf(ConcurrentHashMap.class); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/session/ReprepareOnUpTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/session/ReprepareOnUpTest.java deleted file mode 100644 index 555ed2e8806..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/session/ReprepareOnUpTest.java +++ /dev/null @@ -1,409 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.session; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.internal.core.adminrequest.AdminResult; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.TopologyMonitor; -import com.datastax.oss.driver.internal.core.metrics.MetricsFactory; -import com.datastax.oss.driver.internal.core.metrics.SessionMetricUpdater; -import com.datastax.oss.driver.internal.core.pool.ChannelPool; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.request.Prepare; -import com.datastax.oss.protocol.internal.request.Query; -import com.datastax.oss.protocol.internal.response.result.ColumnSpec; -import com.datastax.oss.protocol.internal.response.result.DefaultRows; -import com.datastax.oss.protocol.internal.response.result.RawType; -import com.datastax.oss.protocol.internal.response.result.Rows; -import com.datastax.oss.protocol.internal.response.result.RowsMetadata; -import com.datastax.oss.protocol.internal.util.Bytes; -import io.netty.util.concurrent.EventExecutor; -import io.netty.util.concurrent.ImmediateEventExecutor; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.ArrayDeque; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.Queue; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -public class ReprepareOnUpTest { - @Mock private ChannelPool pool; - @Mock private DriverChannel channel; - @Mock private InternalDriverContext context; - @Mock private DriverConfig config; - @Mock private DriverExecutionProfile defaultProfile; - @Mock private TopologyMonitor topologyMonitor; - @Mock private MetricsFactory metricsFactory; - @Mock private SessionMetricUpdater metricUpdater; - private Runnable whenPrepared; - private CompletionStage done; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - - when(pool.next()).thenReturn(channel); - - when(config.getDefaultProfile()).thenReturn(defaultProfile); - when(defaultProfile.getBoolean(DefaultDriverOption.REPREPARE_CHECK_SYSTEM_TABLE)) - .thenReturn(true); - when(defaultProfile.getDuration(DefaultDriverOption.REPREPARE_TIMEOUT)) - .thenReturn(Duration.ofMillis(500)); - when(defaultProfile.getInt(DefaultDriverOption.REPREPARE_MAX_STATEMENTS)).thenReturn(0); - when(defaultProfile.getInt(DefaultDriverOption.REPREPARE_MAX_PARALLELISM)).thenReturn(100); - when(context.getConfig()).thenReturn(config); - - when(context.getMetricsFactory()).thenReturn(metricsFactory); - when(metricsFactory.getSessionUpdater()).thenReturn(metricUpdater); - - done = new CompletableFuture<>(); - whenPrepared = () -> ((CompletableFuture) done).complete(null); - } - - @Test - public void should_complete_immediately_if_no_prepared_statements() { - // Given - MockReprepareOnUp reprepareOnUp = - new MockReprepareOnUp( - "test", - pool, - ImmediateEventExecutor.INSTANCE, - getMockPayloads(/*none*/ ), - context, - whenPrepared); - - // When - reprepareOnUp.start(); - - // Then - assertThatStage(done).isSuccess(v -> assertThat(reprepareOnUp.queries).isEmpty()); - } - - @Test - public void should_reprepare_all_if_system_table_query_fails() { - MockReprepareOnUp reprepareOnUp = - new MockReprepareOnUp( - "test", - pool, - ImmediateEventExecutor.INSTANCE, - getMockPayloads('a', 'b', 'c', 'd', 'e', 'f'), - context, - whenPrepared); - - reprepareOnUp.start(); - - MockAdminQuery adminQuery = reprepareOnUp.queries.poll(); - assertThat(adminQuery).isNotNull(); - assertThat(adminQuery.request).isInstanceOf(Query.class); - assertThat(((Query) adminQuery.request).query) - .isEqualTo("SELECT prepared_id FROM system.prepared_statements"); - adminQuery.resultFuture.completeExceptionally(new RuntimeException("mock error")); - - for (char c = 'a'; c <= 'f'; c++) { - adminQuery = reprepareOnUp.queries.poll(); - assertThat(adminQuery).isNotNull(); - assertThat(adminQuery.request).isInstanceOf(Prepare.class); - assertThat(((Prepare) adminQuery.request).cqlQuery).isEqualTo("mock query " + c); - adminQuery.resultFuture.complete(null); - } - - assertThatStage(done).isSuccess(v -> assertThat(reprepareOnUp.queries).isEmpty()); - } - - @Test - public void should_reprepare_all_if_system_table_empty() { - MockReprepareOnUp reprepareOnUp = - new MockReprepareOnUp( - "test", - pool, - ImmediateEventExecutor.INSTANCE, - getMockPayloads('a', 'b', 'c', 'd', 'e', 'f'), - context, - whenPrepared); - - reprepareOnUp.start(); - - MockAdminQuery adminQuery = reprepareOnUp.queries.poll(); - assertThat(adminQuery).isNotNull(); - assertThat(adminQuery.request).isInstanceOf(Query.class); - assertThat(((Query) adminQuery.request).query) - .isEqualTo("SELECT prepared_id FROM system.prepared_statements"); - // server knows no ids: - adminQuery.resultFuture.complete( - new AdminResult(preparedIdRows(/*none*/ ), null, DefaultProtocolVersion.DEFAULT)); - - for (char c = 'a'; c <= 'f'; c++) { - adminQuery = reprepareOnUp.queries.poll(); - assertThat(adminQuery).isNotNull(); - assertThat(adminQuery.request).isInstanceOf(Prepare.class); - assertThat(((Prepare) adminQuery.request).cqlQuery).isEqualTo("mock query " + c); - adminQuery.resultFuture.complete(null); - } - - assertThatStage(done).isSuccess(v -> assertThat(reprepareOnUp.queries).isEmpty()); - } - - @Test - public void should_reprepare_all_if_system_query_disabled() { - when(defaultProfile.getBoolean(DefaultDriverOption.REPREPARE_CHECK_SYSTEM_TABLE)) - .thenReturn(false); - - MockReprepareOnUp reprepareOnUp = - new MockReprepareOnUp( - "test", - pool, - ImmediateEventExecutor.INSTANCE, - getMockPayloads('a', 'b', 'c', 'd', 'e', 'f'), - context, - whenPrepared); - - reprepareOnUp.start(); - - MockAdminQuery adminQuery; - for (char c = 'a'; c <= 'f'; c++) { - adminQuery = reprepareOnUp.queries.poll(); - assertThat(adminQuery).isNotNull(); - assertThat(adminQuery.request).isInstanceOf(Prepare.class); - assertThat(((Prepare) adminQuery.request).cqlQuery).isEqualTo("mock query " + c); - adminQuery.resultFuture.complete(null); - } - - assertThatStage(done).isSuccess(v -> assertThat(reprepareOnUp.queries).isEmpty()); - } - - @Test - public void should_not_reprepare_already_known_statements() { - MockReprepareOnUp reprepareOnUp = - new MockReprepareOnUp( - "test", - pool, - ImmediateEventExecutor.INSTANCE, - getMockPayloads('a', 'b', 'c', 'd', 'e', 'f'), - context, - whenPrepared); - - reprepareOnUp.start(); - - MockAdminQuery adminQuery = reprepareOnUp.queries.poll(); - assertThat(adminQuery).isNotNull(); - assertThat(adminQuery.request).isInstanceOf(Query.class); - assertThat(((Query) adminQuery.request).query) - .isEqualTo("SELECT prepared_id FROM system.prepared_statements"); - // server knows d, e and f already: - adminQuery.resultFuture.complete( - new AdminResult(preparedIdRows('d', 'e', 'f'), null, DefaultProtocolVersion.DEFAULT)); - - for (char c = 'a'; c <= 'c'; c++) { - adminQuery = reprepareOnUp.queries.poll(); - assertThat(adminQuery).isNotNull(); - assertThat(adminQuery.request).isInstanceOf(Prepare.class); - assertThat(((Prepare) adminQuery.request).cqlQuery).isEqualTo("mock query " + c); - adminQuery.resultFuture.complete(null); - } - - assertThatStage(done).isSuccess(v -> assertThat(reprepareOnUp.queries).isEmpty()); - } - - @Test - public void should_proceed_if_schema_agreement_not_reached() { - when(topologyMonitor.checkSchemaAgreement()) - .thenReturn(CompletableFuture.completedFuture(false)); - should_not_reprepare_already_known_statements(); - } - - @Test - public void should_proceed_if_schema_agreement_fails() { - when(topologyMonitor.checkSchemaAgreement()) - .thenReturn(CompletableFutures.failedFuture(new RuntimeException("test"))); - should_not_reprepare_already_known_statements(); - } - - @Test - public void should_limit_number_of_statements_to_reprepare() { - when(defaultProfile.getInt(DefaultDriverOption.REPREPARE_MAX_STATEMENTS)).thenReturn(3); - - MockReprepareOnUp reprepareOnUp = - new MockReprepareOnUp( - "test", - pool, - ImmediateEventExecutor.INSTANCE, - getMockPayloads('a', 'b', 'c', 'd', 'e', 'f'), - context, - whenPrepared); - - reprepareOnUp.start(); - - MockAdminQuery adminQuery = reprepareOnUp.queries.poll(); - assertThat(adminQuery).isNotNull(); - assertThat(adminQuery.request).isInstanceOf(Query.class); - assertThat(((Query) adminQuery.request).query) - .isEqualTo("SELECT prepared_id FROM system.prepared_statements"); - // server knows no ids: - adminQuery.resultFuture.complete( - new AdminResult(preparedIdRows(/*none*/ ), null, DefaultProtocolVersion.DEFAULT)); - - for (char c = 'a'; c <= 'c'; c++) { - adminQuery = reprepareOnUp.queries.poll(); - assertThat(adminQuery).isNotNull(); - assertThat(adminQuery.request).isInstanceOf(Prepare.class); - assertThat(((Prepare) adminQuery.request).cqlQuery).isEqualTo("mock query " + c); - adminQuery.resultFuture.complete(null); - } - - assertThatStage(done).isSuccess(v -> assertThat(reprepareOnUp.queries).isEmpty()); - } - - @Test - public void should_limit_number_of_statements_reprepared_in_parallel() { - when(defaultProfile.getInt(DefaultDriverOption.REPREPARE_MAX_PARALLELISM)).thenReturn(3); - - MockReprepareOnUp reprepareOnUp = - new MockReprepareOnUp( - "test", - pool, - ImmediateEventExecutor.INSTANCE, - getMockPayloads('a', 'b', 'c', 'd', 'e', 'f'), - context, - whenPrepared); - - reprepareOnUp.start(); - - MockAdminQuery adminQuery = reprepareOnUp.queries.poll(); - assertThat(adminQuery).isNotNull(); - assertThat(adminQuery.request).isInstanceOf(Query.class); - assertThat(((Query) adminQuery.request).query) - .isEqualTo("SELECT prepared_id FROM system.prepared_statements"); - // server knows no ids => will reprepare all 6: - adminQuery.resultFuture.complete( - new AdminResult(preparedIdRows(/*none*/ ), null, DefaultProtocolVersion.DEFAULT)); - - // 3 statements have enqueued, we've not completed the queries yet so no more should be sent: - assertThat(reprepareOnUp.queries.size()).isEqualTo(3); - - // As we complete each statement, another one should enqueue: - for (char c = 'a'; c <= 'c'; c++) { - adminQuery = reprepareOnUp.queries.poll(); - assertThat(adminQuery).isNotNull(); - assertThat(adminQuery.request).isInstanceOf(Prepare.class); - assertThat(((Prepare) adminQuery.request).cqlQuery).isEqualTo("mock query " + c); - adminQuery.resultFuture.complete(null); - assertThat(reprepareOnUp.queries.size()).isEqualTo(3); - } - - // Complete the last 3: - for (char c = 'd'; c <= 'f'; c++) { - adminQuery = reprepareOnUp.queries.poll(); - assertThat(adminQuery).isNotNull(); - assertThat(adminQuery.request).isInstanceOf(Prepare.class); - assertThat(((Prepare) adminQuery.request).cqlQuery).isEqualTo("mock query " + c); - adminQuery.resultFuture.complete(null); - } - - assertThatStage(done).isSuccess(v -> assertThat(reprepareOnUp.queries).isEmpty()); - } - - private Map getMockPayloads(char... values) { - ImmutableMap.Builder builder = ImmutableMap.builder(); - for (char value : values) { - ByteBuffer id = Bytes.fromHexString("0x0" + value); - builder.put( - id, new RepreparePayload(id, "mock query " + value, null, Collections.emptyMap())); - } - return builder.build(); - } - - /** Bypasses the channel to make testing easier. */ - private static class MockReprepareOnUp extends ReprepareOnUp { - - private Queue queries = new ArrayDeque<>(); - - MockReprepareOnUp( - String logPrefix, - ChannelPool pool, - EventExecutor adminExecutor, - Map repreparePayloads, - InternalDriverContext context, - Runnable whenPrepared) { - super(logPrefix, pool, adminExecutor, repreparePayloads, context, whenPrepared); - } - - @Override - protected CompletionStage queryAsync( - Message message, Map customPayload, String debugString) { - CompletableFuture resultFuture = new CompletableFuture<>(); - queries.add(new MockAdminQuery(message, resultFuture)); - return resultFuture; - } - - @Override - protected CompletionStage prepareAsync( - Message message, Map customPayload) { - CompletableFuture resultFuture = new CompletableFuture<>(); - queries.add(new MockAdminQuery(message, resultFuture)); - return resultFuture; - } - } - - private static class MockAdminQuery { - private final Message request; - private final CompletableFuture resultFuture; - - @SuppressWarnings("unchecked") - public MockAdminQuery(Message request, CompletableFuture resultFuture) { - this.request = request; - this.resultFuture = (CompletableFuture) resultFuture; - } - } - - private Rows preparedIdRows(char... values) { - ColumnSpec preparedIdSpec = - new ColumnSpec( - "system", - "prepared_statements", - "prepared_id", - 0, - RawType.PRIMITIVES.get(ProtocolConstants.DataType.BLOB)); - RowsMetadata rowsMetadata = - new RowsMetadata(ImmutableList.of(preparedIdSpec), null, null, null); - Queue> data = new ArrayDeque<>(); - for (char value : values) { - data.add(ImmutableList.of(Bytes.fromHexString("0x0" + value))); - } - return new DefaultRows(rowsMetadata, data); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/session/throttling/ConcurrencyLimitingRequestThrottlerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/session/throttling/ConcurrencyLimitingRequestThrottlerTest.java deleted file mode 100644 index 7eb682070cd..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/session/throttling/ConcurrencyLimitingRequestThrottlerTest.java +++ /dev/null @@ -1,366 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.session.throttling; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.RequestThrottlingException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.session.throttling.Throttled; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import java.util.List; -import java.util.concurrent.CountDownLatch; -import java.util.function.Consumer; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class ConcurrencyLimitingRequestThrottlerTest { - - @Mock private DriverContext context; - @Mock private DriverConfig config; - @Mock private DriverExecutionProfile defaultProfile; - - private ConcurrencyLimitingRequestThrottler throttler; - - @Before - public void setup() { - when(context.getConfig()).thenReturn(config); - when(config.getDefaultProfile()).thenReturn(defaultProfile); - - when(defaultProfile.getInt(DefaultDriverOption.REQUEST_THROTTLER_MAX_CONCURRENT_REQUESTS)) - .thenReturn(5); - when(defaultProfile.getInt(DefaultDriverOption.REQUEST_THROTTLER_MAX_QUEUE_SIZE)) - .thenReturn(10); - - throttler = new ConcurrencyLimitingRequestThrottler(context); - } - - @Test - public void should_start_immediately_when_under_capacity() { - // Given - MockThrottled request = new MockThrottled(); - - // When - throttler.register(request); - - // Then - assertThatStage(request.ended).isSuccess(wasDelayed -> assertThat(wasDelayed).isFalse()); - assertThat(throttler.getConcurrentRequests()).isEqualTo(1); - assertThat(throttler.getQueue()).isEmpty(); - } - - @Test - public void should_allow_new_request_when_active_one_succeeds() { - should_allow_new_request_when_active_one_completes(throttler::signalSuccess); - } - - @Test - public void should_allow_new_request_when_active_one_fails() { - should_allow_new_request_when_active_one_completes( - request -> throttler.signalError(request, new RuntimeException("mock error"))); - } - - @Test - public void should_allow_new_request_when_active_one_times_out() { - should_allow_new_request_when_active_one_completes(throttler::signalTimeout); - } - - @Test - public void should_allow_new_request_when_active_one_canceled() { - should_allow_new_request_when_active_one_completes(throttler::signalCancel); - } - - private void should_allow_new_request_when_active_one_completes( - Consumer completeCallback) { - // Given - MockThrottled first = new MockThrottled(); - throttler.register(first); - assertThatStage(first.ended).isSuccess(wasDelayed -> assertThat(wasDelayed).isFalse()); - for (int i = 0; i < 4; i++) { // fill to capacity - throttler.register(new MockThrottled()); - } - assertThat(throttler.getConcurrentRequests()).isEqualTo(5); - assertThat(throttler.getQueue()).isEmpty(); - - // When - completeCallback.accept(first); - assertThat(throttler.getConcurrentRequests()).isEqualTo(4); - assertThat(throttler.getQueue()).isEmpty(); - MockThrottled incoming = new MockThrottled(); - throttler.register(incoming); - - // Then - assertThatStage(incoming.ended).isSuccess(wasDelayed -> assertThat(wasDelayed).isFalse()); - assertThat(throttler.getConcurrentRequests()).isEqualTo(5); - assertThat(throttler.getQueue()).isEmpty(); - } - - @Test - public void should_enqueue_when_over_capacity() { - // Given - for (int i = 0; i < 5; i++) { - throttler.register(new MockThrottled()); - } - assertThat(throttler.getConcurrentRequests()).isEqualTo(5); - assertThat(throttler.getQueue()).isEmpty(); - - // When - MockThrottled incoming = new MockThrottled(); - throttler.register(incoming); - - // Then - assertThatStage(incoming.ended).isNotDone(); - assertThat(throttler.getConcurrentRequests()).isEqualTo(5); - assertThat(throttler.getQueue()).containsExactly(incoming); - } - - @Test - public void should_dequeue_when_active_succeeds() { - should_dequeue_when_active_completes(throttler::signalSuccess); - } - - @Test - public void should_dequeue_when_active_fails() { - should_dequeue_when_active_completes( - request -> throttler.signalError(request, new RuntimeException("mock error"))); - } - - @Test - public void should_dequeue_when_active_times_out() { - should_dequeue_when_active_completes(throttler::signalTimeout); - } - - private void should_dequeue_when_active_completes(Consumer completeCallback) { - // Given - MockThrottled first = new MockThrottled(); - throttler.register(first); - assertThatStage(first.ended).isSuccess(wasDelayed -> assertThat(wasDelayed).isFalse()); - for (int i = 0; i < 4; i++) { - throttler.register(new MockThrottled()); - } - - MockThrottled incoming = new MockThrottled(); - throttler.register(incoming); - assertThatStage(incoming.ended).isNotDone(); - - // When - completeCallback.accept(first); - - // Then - assertThatStage(incoming.ended).isSuccess(wasDelayed -> assertThat(wasDelayed).isTrue()); - assertThat(throttler.getConcurrentRequests()).isEqualTo(5); - assertThat(throttler.getQueue()).isEmpty(); - } - - @Test - public void should_reject_when_queue_is_full() { - // Given - for (int i = 0; i < 15; i++) { - throttler.register(new MockThrottled()); - } - assertThat(throttler.getConcurrentRequests()).isEqualTo(5); - assertThat(throttler.getQueue()).hasSize(10); - - // When - MockThrottled incoming = new MockThrottled(); - throttler.register(incoming); - - // Then - assertThatStage(incoming.ended) - .isFailed(error -> assertThat(error).isInstanceOf(RequestThrottlingException.class)); - } - - @Test - public void should_remove_timed_out_request_from_queue() { - // Given - for (int i = 0; i < 5; i++) { - throttler.register(new MockThrottled()); - } - MockThrottled queued1 = new MockThrottled(); - throttler.register(queued1); - MockThrottled queued2 = new MockThrottled(); - throttler.register(queued2); - - // When - throttler.signalTimeout(queued1); - - // Then - assertThatStage(queued2.ended).isNotDone(); - assertThat(throttler.getConcurrentRequests()).isEqualTo(5); - assertThat(throttler.getQueue()).hasSize(1); - } - - @Test - public void should_reject_enqueued_when_closing() { - // Given - for (int i = 0; i < 5; i++) { - throttler.register(new MockThrottled()); - } - List enqueued = Lists.newArrayList(); - for (int i = 0; i < 10; i++) { - MockThrottled request = new MockThrottled(); - throttler.register(request); - assertThatStage(request.ended).isNotDone(); - enqueued.add(request); - } - - // When - throttler.close(); - - // Then - for (MockThrottled request : enqueued) { - assertThatStage(request.ended) - .isFailed(error -> assertThat(error).isInstanceOf(RequestThrottlingException.class)); - } - - // When - MockThrottled request = new MockThrottled(); - throttler.register(request); - - // Then - assertThatStage(request.ended) - .isFailed(error -> assertThat(error).isInstanceOf(RequestThrottlingException.class)); - } - - @Test - public void should_run_throttle_callbacks_concurrently() throws InterruptedException { - // Given - - // a task is enqueued, which when in onThrottleReady, will stall latch countDown()ed - // register() should automatically start onThrottleReady on same thread - - // start a parallel thread - CountDownLatch firstRelease = new CountDownLatch(1); - MockThrottled first = new MockThrottled(firstRelease); - Runnable r = - () -> { - throttler.register(first); - first.ended.toCompletableFuture().thenRun(() -> throttler.signalSuccess(first)); - }; - Thread t = new Thread(r); - t.start(); - - // wait for the registration threads to reach await state - assertThatStage(first.started).isSuccess(); - assertThatStage(first.ended).isNotDone(); - - // When - // we concurrently submit a second shorter task - MockThrottled second = new MockThrottled(); - // (on a second thread, so that we can join and force a timeout in case - // registration is delayed) - Thread t2 = new Thread(() -> throttler.register(second)); - t2.start(); - t2.join(1_000); - - // Then - // registration will trigger callback, should complete ~immediately - assertThatStage(second.ended).isSuccess(wasDelayed -> assertThat(wasDelayed).isFalse()); - // first should still be unfinished - assertThatStage(first.started).isDone(); - assertThatStage(first.ended).isNotDone(); - // now finish, and verify - firstRelease.countDown(); - assertThatStage(first.ended).isSuccess(wasDelayed -> assertThat(wasDelayed).isFalse()); - - t.join(1_000); - } - - @Test - public void should_enqueue_tasks_quickly_when_callbacks_blocked() throws InterruptedException { - // Given - - // Multiple tasks are registered, up to the limit, and proceed into their - // callback - - // start five parallel threads - final int THREADS = 5; - Thread[] threads = new Thread[THREADS]; - CountDownLatch[] latches = new CountDownLatch[THREADS]; - MockThrottled[] throttled = new MockThrottled[THREADS]; - for (int i = 0; i < threads.length; i++) { - latches[i] = new CountDownLatch(1); - final MockThrottled itThrottled = new MockThrottled(latches[i]); - throttled[i] = itThrottled; - threads[i] = - new Thread( - () -> { - throttler.register(itThrottled); - itThrottled - .ended - .toCompletableFuture() - .thenRun(() -> throttler.signalSuccess(itThrottled)); - }); - threads[i].start(); - } - - // wait for the registration threads to be launched - // they are all waiting now - for (int i = 0; i < throttled.length; i++) { - assertThatStage(throttled[i].started).isSuccess(); - assertThatStage(throttled[i].ended).isNotDone(); - } - - // When - // we concurrently submit another task - MockThrottled last = new MockThrottled(); - throttler.register(last); - - // Then - // registration will enqueue the callback, and it should not - // take any time to proceed (ie: we should not be blocked) - // and there should be an element in the queue - assertThatStage(last.started).isNotDone(); - assertThatStage(last.ended).isNotDone(); - assertThat(throttler.getQueue()).containsExactly(last); - - // we still have not released, so old throttled threads should be waiting - for (int i = 0; i < throttled.length; i++) { - assertThatStage(throttled[i].started).isDone(); - assertThatStage(throttled[i].ended).isNotDone(); - } - - // now let us release .. - for (int i = 0; i < latches.length; i++) { - latches[i].countDown(); - } - - // .. and check everything finished up OK - for (int i = 0; i < latches.length; i++) { - assertThatStage(throttled[i].started).isSuccess(); - assertThatStage(throttled[i].ended).isSuccess(); - } - - // for good measure, we will also wait for the enqueued to complete - assertThatStage(last.started).isSuccess(); - assertThatStage(last.ended).isSuccess(); - - for (int i = 0; i < threads.length; i++) { - threads[i].join(1_000); - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/session/throttling/MockThrottled.java b/core/src/test/java/com/datastax/oss/driver/internal/core/session/throttling/MockThrottled.java deleted file mode 100644 index 9e54e3d511f..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/session/throttling/MockThrottled.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.session.throttling; - -import com.datastax.oss.driver.api.core.RequestThrottlingException; -import com.datastax.oss.driver.api.core.session.throttling.Throttled; -import com.datastax.oss.driver.shaded.guava.common.util.concurrent.Uninterruptibles; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.CountDownLatch; - -class MockThrottled implements Throttled { - final CompletionStage started = new CompletableFuture<>(); - final CompletionStage ended = new CompletableFuture<>(); - final CountDownLatch canRelease; - - public MockThrottled() { - this(new CountDownLatch(0)); - } - - /* - * The releaseLatch can be provided to add some delay before the - * task readiness/fail callbacks complete. This can be used, eg, to - * imitate a slow callback. - */ - public MockThrottled(CountDownLatch releaseLatch) { - this.canRelease = releaseLatch; - } - - @Override - public void onThrottleReady(boolean wasDelayed) { - started.toCompletableFuture().complete(null); - awaitRelease(); - ended.toCompletableFuture().complete(wasDelayed); - } - - @Override - public void onThrottleFailure(@NonNull RequestThrottlingException error) { - started.toCompletableFuture().complete(null); - awaitRelease(); - ended.toCompletableFuture().completeExceptionally(error); - } - - private void awaitRelease() { - Uninterruptibles.awaitUninterruptibly(canRelease); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/session/throttling/RateLimitingRequestThrottlerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/session/throttling/RateLimitingRequestThrottlerTest.java deleted file mode 100644 index 1e15610bf7b..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/session/throttling/RateLimitingRequestThrottlerTest.java +++ /dev/null @@ -1,330 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.session.throttling; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.RequestThrottlingException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.session.throttling.Throttled; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.context.NettyOptions; -import com.datastax.oss.driver.internal.core.util.concurrent.ScheduledTaskCapturingEventLoop; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import io.netty.channel.EventLoopGroup; -import java.time.Duration; -import java.util.List; -import java.util.concurrent.TimeUnit; -import java.util.function.Consumer; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.Silent.class) -public class RateLimitingRequestThrottlerTest { - - private static final long ONE_HUNDRED_MILLISECONDS = - TimeUnit.NANOSECONDS.convert(100, TimeUnit.MILLISECONDS); - private static final long TWO_HUNDRED_MILLISECONDS = - TimeUnit.NANOSECONDS.convert(200, TimeUnit.MILLISECONDS); - private static final long TWO_SECONDS = TimeUnit.NANOSECONDS.convert(2, TimeUnit.SECONDS); - - // Note: we trigger scheduled task manually, so this is for verification purposes only, it doesn't - // need to be consistent with the actual throttling rate. - private static final Duration DRAIN_INTERVAL = Duration.ofMillis(10); - - @Mock private InternalDriverContext context; - @Mock private DriverConfig config; - @Mock private DriverExecutionProfile defaultProfile; - @Mock private NettyOptions nettyOptions; - @Mock private EventLoopGroup adminGroup; - - private ScheduledTaskCapturingEventLoop adminExecutor; - private SettableNanoClock clock = new SettableNanoClock(); - - private RateLimitingRequestThrottler throttler; - - @Before - public void setup() { - when(context.getConfig()).thenReturn(config); - when(config.getDefaultProfile()).thenReturn(defaultProfile); - - when(defaultProfile.getInt(DefaultDriverOption.REQUEST_THROTTLER_MAX_REQUESTS_PER_SECOND)) - .thenReturn(5); - when(defaultProfile.getInt(DefaultDriverOption.REQUEST_THROTTLER_MAX_QUEUE_SIZE)) - .thenReturn(10); - - // Set to match the time to reissue one permit. Although it does not matter in practice, since - // the executor is mocked and we trigger tasks manually. - when(defaultProfile.getDuration(DefaultDriverOption.REQUEST_THROTTLER_DRAIN_INTERVAL)) - .thenReturn(DRAIN_INTERVAL); - - when(context.getNettyOptions()).thenReturn(nettyOptions); - when(nettyOptions.adminEventExecutorGroup()).thenReturn(adminGroup); - adminExecutor = new ScheduledTaskCapturingEventLoop(adminGroup); - when(adminGroup.next()).thenReturn(adminExecutor); - - throttler = new RateLimitingRequestThrottler(context, clock); - } - - /** Note: the throttler starts with 1 second worth of permits, so at t=0 we have 5 available. */ - @Test - public void should_start_immediately_when_under_capacity() { - // Given - MockThrottled request = new MockThrottled(); - - // When - throttler.register(request); - - // Then - assertThatStage(request.ended).isSuccess(wasDelayed -> assertThat(wasDelayed).isFalse()); - assertThat(throttler.getStoredPermits()).isEqualTo(4); - assertThat(throttler.getQueue()).isEmpty(); - } - - @Test - public void should_allow_new_request_when_under_rate() { - // Given - for (int i = 0; i < 5; i++) { - throttler.register(new MockThrottled()); - } - assertThat(throttler.getStoredPermits()).isEqualTo(0); - - // When - clock.add(TWO_HUNDRED_MILLISECONDS); - MockThrottled request = new MockThrottled(); - throttler.register(request); - - // Then - assertThatStage(request.ended).isSuccess(wasDelayed -> assertThat(wasDelayed).isFalse()); - assertThat(throttler.getStoredPermits()).isEqualTo(0); - assertThat(throttler.getQueue()).isEmpty(); - } - - @Test - public void should_enqueue_when_over_rate() { - // Given - for (int i = 0; i < 5; i++) { - throttler.register(new MockThrottled()); - } - assertThat(throttler.getStoredPermits()).isEqualTo(0); - - // When - // (do not advance time) - MockThrottled request = new MockThrottled(); - throttler.register(request); - - // Then - assertThatStage(request.ended).isNotDone(); - assertThat(throttler.getStoredPermits()).isEqualTo(0); - assertThat(throttler.getQueue()).containsExactly(request); - - ScheduledTaskCapturingEventLoop.CapturedTask task = adminExecutor.nextTask(); - assertThat(task).isNotNull(); - assertThat(task.getInitialDelay(TimeUnit.NANOSECONDS)).isEqualTo(DRAIN_INTERVAL.toNanos()); - } - - @Test - public void should_reject_when_queue_is_full() { - // Given - for (int i = 0; i < 15; i++) { - throttler.register(new MockThrottled()); - } - assertThat(throttler.getStoredPermits()).isEqualTo(0); - assertThat(throttler.getQueue()).hasSize(10); - - // When - clock.add(TWO_HUNDRED_MILLISECONDS); // even if time has passed, queued items have priority - MockThrottled request = new MockThrottled(); - throttler.register(request); - - // Then - assertThatStage(request.ended) - .isFailed(error -> assertThat(error).isInstanceOf(RequestThrottlingException.class)); - } - - @Test - public void should_remove_timed_out_request_from_queue() { - testRemoveInvalidEventFromQueue(throttler::signalTimeout); - } - - @Test - public void should_remove_cancel_request_from_queue() { - testRemoveInvalidEventFromQueue(throttler::signalCancel); - } - - private void testRemoveInvalidEventFromQueue(Consumer completeCallback) { - // Given - for (int i = 0; i < 5; i++) { - throttler.register(new MockThrottled()); - } - MockThrottled queued1 = new MockThrottled(); - throttler.register(queued1); - MockThrottled queued2 = new MockThrottled(); - throttler.register(queued2); - - // When - completeCallback.accept(queued1); - - // Then - assertThatStage(queued2.ended).isNotDone(); - assertThat(throttler.getStoredPermits()).isEqualTo(0); - assertThat(throttler.getQueue()).containsExactly(queued2); - } - - @Test - public void should_dequeue_when_draining_task_runs() { - // Given - for (int i = 0; i < 5; i++) { - throttler.register(new MockThrottled()); - } - - MockThrottled queued1 = new MockThrottled(); - throttler.register(queued1); - assertThatStage(queued1.ended).isNotDone(); - MockThrottled queued2 = new MockThrottled(); - throttler.register(queued2); - assertThatStage(queued2.ended).isNotDone(); - assertThat(throttler.getStoredPermits()).isEqualTo(0); - assertThat(throttler.getQueue()).hasSize(2); - - ScheduledTaskCapturingEventLoop.CapturedTask task = adminExecutor.nextTask(); - assertThat(task).isNotNull(); - assertThat(task.getInitialDelay(TimeUnit.NANOSECONDS)).isEqualTo(DRAIN_INTERVAL.toNanos()); - - // When - // (do not advance clock => no new permits) - task.run(); - - // Then - assertThat(throttler.getStoredPermits()).isEqualTo(0); - assertThat(throttler.getQueue()).containsExactly(queued1, queued2); - // task reschedules itself since it did not empty the queue - task = adminExecutor.nextTask(); - assertThat(task).isNotNull(); - assertThat(task.getInitialDelay(TimeUnit.NANOSECONDS)).isEqualTo(DRAIN_INTERVAL.toNanos()); - - // When - clock.add(TWO_HUNDRED_MILLISECONDS); // 1 extra permit issued - task.run(); - - // Then - assertThatStage(queued1.ended).isSuccess(wasDelayed -> assertThat(wasDelayed).isTrue()); - assertThatStage(queued2.ended).isNotDone(); - assertThat(throttler.getStoredPermits()).isEqualTo(0); - assertThat(throttler.getQueue()).containsExactly(queued2); - // task reschedules itself since it did not empty the queue - task = adminExecutor.nextTask(); - assertThat(task).isNotNull(); - assertThat(task.getInitialDelay(TimeUnit.NANOSECONDS)).isEqualTo(DRAIN_INTERVAL.toNanos()); - - // When - clock.add(TWO_HUNDRED_MILLISECONDS); - task.run(); - - // Then - assertThatStage(queued2.ended).isSuccess(wasDelayed -> assertThat(wasDelayed).isTrue()); - assertThat(throttler.getStoredPermits()).isEqualTo(0); - assertThat(throttler.getQueue()).isEmpty(); - assertThat(adminExecutor.nextTask()).isNull(); - } - - @Test - public void should_store_new_permits_up_to_threshold() { - // Given - for (int i = 0; i < 5; i++) { - throttler.register(new MockThrottled()); - } - assertThat(throttler.getStoredPermits()).isEqualTo(0); - - // When - clock.add(TWO_SECONDS); // should store at most 1 second worth of permits - - // Then - // acquire to trigger the throttler to update its permits - throttler.register(new MockThrottled()); - assertThat(throttler.getStoredPermits()).isEqualTo(4); - } - - /** - * Ensure that permits are still created if we try to acquire faster than the minimal interval to - * create one permit. In an early version of the code there was a bug where we would reset the - * elapsed time on each acquisition attempt, and never regenerate permits. - */ - @Test - public void should_keep_accumulating_time_if_no_permits_created() { - // Given - for (int i = 0; i < 5; i++) { - throttler.register(new MockThrottled()); - } - assertThat(throttler.getStoredPermits()).isEqualTo(0); - - // When - clock.add(ONE_HUNDRED_MILLISECONDS); - - // Then - MockThrottled queued = new MockThrottled(); - throttler.register(queued); - assertThatStage(queued.ended).isNotDone(); - - // When - clock.add(ONE_HUNDRED_MILLISECONDS); - adminExecutor.nextTask().run(); - - // Then - assertThatStage(queued.ended).isSuccess(wasDelayed -> assertThat(wasDelayed).isTrue()); - } - - @Test - public void should_reject_enqueued_when_closing() { - // Given - for (int i = 0; i < 5; i++) { - throttler.register(new MockThrottled()); - } - List enqueued = Lists.newArrayList(); - for (int i = 0; i < 10; i++) { - MockThrottled request = new MockThrottled(); - throttler.register(request); - assertThatStage(request.ended).isNotDone(); - enqueued.add(request); - } - - // When - throttler.close(); - - // Then - for (MockThrottled request : enqueued) { - assertThatStage(request.ended) - .isFailed(error -> assertThat(error).isInstanceOf(RequestThrottlingException.class)); - } - - // When - MockThrottled request = new MockThrottled(); - throttler.register(request); - - // Then - assertThatStage(request.ended) - .isFailed(error -> assertThat(error).isInstanceOf(RequestThrottlingException.class)); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/session/throttling/SettableNanoClock.java b/core/src/test/java/com/datastax/oss/driver/internal/core/session/throttling/SettableNanoClock.java deleted file mode 100644 index 1489d1da345..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/session/throttling/SettableNanoClock.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.session.throttling; - -class SettableNanoClock implements NanoClock { - - private volatile long nanoTime; - - @Override - public long nanoTime() { - return nanoTime; - } - - // This is racy, but in our tests it's never read concurrently - @SuppressWarnings({"NonAtomicVolatileUpdate", "NonAtomicOperationOnVolatileField"}) - void add(long increment) { - nanoTime += increment; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/ssl/ReloadingKeyManagerFactoryTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/ssl/ReloadingKeyManagerFactoryTest.java deleted file mode 100644 index d07b45c21df..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/ssl/ReloadingKeyManagerFactoryTest.java +++ /dev/null @@ -1,270 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.ssl; - -import static java.nio.file.StandardCopyOption.COPY_ATTRIBUTES; -import static java.nio.file.StandardCopyOption.REPLACE_EXISTING; - -import java.io.IOException; -import java.io.InputStream; -import java.math.BigInteger; -import java.net.InetSocketAddress; -import java.net.SocketAddress; -import java.net.SocketException; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.security.KeyManagementException; -import java.security.KeyStore; -import java.security.NoSuchAlgorithmException; -import java.security.SecureRandom; -import java.security.cert.X509Certificate; -import java.util.Optional; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.TimeUnit; -import java.util.function.Consumer; -import java.util.function.Supplier; -import javax.net.ssl.KeyManagerFactory; -import javax.net.ssl.SSLContext; -import javax.net.ssl.SSLPeerUnverifiedException; -import javax.net.ssl.SSLServerSocket; -import javax.net.ssl.SSLSocket; -import javax.net.ssl.TrustManagerFactory; -import org.junit.Assert; -import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class ReloadingKeyManagerFactoryTest { - private static final Logger logger = - LoggerFactory.getLogger(ReloadingKeyManagerFactoryTest.class); - - static final Path CERT_BASE = - Paths.get( - ReloadingKeyManagerFactoryTest.class - .getResource( - String.format("/%s/certs/", ReloadingKeyManagerFactoryTest.class.getSimpleName())) - .getPath()); - static final Path SERVER_KEYSTORE_PATH = CERT_BASE.resolve("server.keystore"); - static final Path SERVER_TRUSTSTORE_PATH = CERT_BASE.resolve("server.truststore"); - - static final Path ORIGINAL_CLIENT_KEYSTORE_PATH = CERT_BASE.resolve("client-original.keystore"); - static final Path ALTERNATE_CLIENT_KEYSTORE_PATH = CERT_BASE.resolve("client-alternate.keystore"); - static final BigInteger ORIGINAL_CLIENT_KEYSTORE_CERT_SERIAL = - convertSerial("7372a966"); // 1936894310 - static final BigInteger ALTERNATE_CLIENT_KEYSTORE_CERT_SERIAL = - convertSerial("e50bf31"); // 240172849 - - // File at this path will change content - static final Path TMP_CLIENT_KEYSTORE_PATH; - - static { - try { - TMP_CLIENT_KEYSTORE_PATH = - Files.createTempFile(ReloadingKeyManagerFactoryTest.class.getSimpleName(), null); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - - static final Path CLIENT_TRUSTSTORE_PATH = CERT_BASE.resolve("client.truststore"); - static final String CERTSTORE_PASSWORD = "changeit"; - - private static TrustManagerFactory buildTrustManagerFactory() { - TrustManagerFactory tmf; - try (InputStream tsf = Files.newInputStream(CLIENT_TRUSTSTORE_PATH)) { - KeyStore ts = KeyStore.getInstance("JKS"); - char[] password = CERTSTORE_PASSWORD.toCharArray(); - ts.load(tsf, password); - tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); - tmf.init(ts); - } catch (Exception e) { - throw new RuntimeException(e); - } - return tmf; - } - - private static SSLContext buildServerSslContext() { - try { - SSLContext context = SSLContext.getInstance("SSL"); - - TrustManagerFactory tmf; - try (InputStream tsf = Files.newInputStream(SERVER_TRUSTSTORE_PATH)) { - KeyStore ts = KeyStore.getInstance("JKS"); - char[] password = CERTSTORE_PASSWORD.toCharArray(); - ts.load(tsf, password); - tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); - tmf.init(ts); - } - - KeyManagerFactory kmf; - try (InputStream ksf = Files.newInputStream(SERVER_KEYSTORE_PATH)) { - KeyStore ks = KeyStore.getInstance("JKS"); - char[] password = CERTSTORE_PASSWORD.toCharArray(); - ks.load(ksf, password); - kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); - kmf.init(ks, password); - } - - context.init(kmf.getKeyManagers(), tmf.getTrustManagers(), new SecureRandom()); - return context; - } catch (Exception e) { - throw new RuntimeException(e); - } - } - - @Test - public void client_certificates_should_reload() throws Exception { - Files.copy( - ORIGINAL_CLIENT_KEYSTORE_PATH, TMP_CLIENT_KEYSTORE_PATH, REPLACE_EXISTING, COPY_ATTRIBUTES); - - final BlockingQueue> peerCertificates = - new LinkedBlockingQueue<>(1); - - // Create a listening socket. Make sure there's no backlog so each accept is in order. - SSLContext serverSslContext = buildServerSslContext(); - final SSLServerSocket server = - (SSLServerSocket) serverSslContext.getServerSocketFactory().createServerSocket(); - server.bind(new InetSocketAddress(0), 1); - server.setUseClientMode(false); - server.setNeedClientAuth(true); - Thread serverThread = - new Thread( - () -> { - while (true) { - try { - logger.info("Server accepting client"); - final SSLSocket conn = (SSLSocket) server.accept(); - logger.info("Server accepted client {}", conn); - conn.addHandshakeCompletedListener( - event -> { - boolean offer; - try { - // Transfer certificates to client thread once handshake is complete, so - // it can safely close - // the socket - offer = - peerCertificates.offer( - Optional.of((X509Certificate[]) event.getPeerCertificates())); - } catch (SSLPeerUnverifiedException e) { - offer = peerCertificates.offer(Optional.empty()); - } - Assert.assertTrue(offer); - }); - logger.info("Server starting handshake"); - // Without this, client handshake blocks - conn.startHandshake(); - } catch (IOException e) { - // Not sure why I sometimes see ~thousands of these locally - if (e instanceof SocketException && e.getMessage().contains("Socket closed")) - return; - logger.info("Server accept error", e); - } - } - }); - serverThread.setName(String.format("%s-serverThread", this.getClass().getSimpleName())); - serverThread.setDaemon(true); - serverThread.start(); - - final ReloadingKeyManagerFactory kmf = - ReloadingKeyManagerFactory.create( - TMP_CLIENT_KEYSTORE_PATH, CERTSTORE_PASSWORD, Optional.empty()); - // Need a tmf that tells the server to send its certs - final TrustManagerFactory tmf = buildTrustManagerFactory(); - - // Check original client certificate - testClientCertificates( - kmf, - tmf, - server.getLocalSocketAddress(), - () -> { - try { - return peerCertificates.poll(10, TimeUnit.SECONDS); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - }, - certs -> { - Assert.assertEquals(1, certs.length); - X509Certificate cert = certs[0]; - Assert.assertEquals(ORIGINAL_CLIENT_KEYSTORE_CERT_SERIAL, cert.getSerialNumber()); - }); - - // Update keystore content - logger.info("Updating keystore file with new content"); - Files.copy( - ALTERNATE_CLIENT_KEYSTORE_PATH, - TMP_CLIENT_KEYSTORE_PATH, - REPLACE_EXISTING, - COPY_ATTRIBUTES); - kmf.reload(); - - // Check that alternate client certificate was applied - testClientCertificates( - kmf, - tmf, - server.getLocalSocketAddress(), - () -> { - try { - return peerCertificates.poll(30, TimeUnit.SECONDS); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - }, - certs -> { - Assert.assertEquals(1, certs.length); - X509Certificate cert = certs[0]; - Assert.assertEquals(ALTERNATE_CLIENT_KEYSTORE_CERT_SERIAL, cert.getSerialNumber()); - }); - - kmf.close(); - server.close(); - } - - private static void testClientCertificates( - KeyManagerFactory kmf, - TrustManagerFactory tmf, - SocketAddress serverAddress, - Supplier> certsSupplier, - Consumer certsConsumer) - throws NoSuchAlgorithmException, KeyManagementException, IOException { - SSLContext clientSslContext = SSLContext.getInstance("TLS"); - clientSslContext.init(kmf.getKeyManagers(), tmf.getTrustManagers(), null); - final SSLSocket client = (SSLSocket) clientSslContext.getSocketFactory().createSocket(); - logger.info("Client connecting"); - client.connect(serverAddress); - logger.info("Client doing handshake"); - client.startHandshake(); - - final Optional lastCertificate = certsSupplier.get(); - logger.info("Client got its certificate back from the server; closing socket"); - client.close(); - Assert.assertNotNull(lastCertificate); - Assert.assertTrue(lastCertificate.isPresent()); - logger.info("Client got its certificate back from server: {}", lastCertificate); - - certsConsumer.accept(lastCertificate.get()); - } - - private static BigInteger convertSerial(String hex) { - final BigInteger serial = new BigInteger(Integer.valueOf(hex, 16).toString()); - logger.info("Serial hex {} is {}", hex, serial); - return serial; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/time/AtomicTimestampGeneratorTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/time/AtomicTimestampGeneratorTest.java deleted file mode 100644 index f1827eb8a86..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/time/AtomicTimestampGeneratorTest.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.time; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.fail; -import static org.mockito.Mockito.when; - -import java.util.SortedSet; -import java.util.concurrent.ConcurrentSkipListSet; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; -import org.junit.Test; -import org.mockito.stubbing.OngoingStubbing; - -public class AtomicTimestampGeneratorTest extends MonotonicTimestampGeneratorTestBase { - @Override - protected MonotonicTimestampGenerator newInstance(Clock clock) { - return new AtomicTimestampGenerator(clock, context); - } - - @Test - public void should_share_timestamps_across_all_threads() throws Exception { - // Prepare to generate 1000 timestamps with the clock frozen at 1 - OngoingStubbing stub = when(clock.currentTimeMicros()); - for (int i = 0; i < 1000; i++) { - stub = stub.thenReturn(1L); - } - - MonotonicTimestampGenerator generator = newInstance(clock); - - final int testThreadsCount = 2; - assertThat(1000 % testThreadsCount).isZero(); - - final SortedSet allTimestamps = new ConcurrentSkipListSet(); - ExecutorService executor = Executors.newFixedThreadPool(testThreadsCount); - for (int i = 0; i < testThreadsCount; i++) { - executor.submit( - () -> { - for (int j = 0; j < 1000 / testThreadsCount; j++) { - allTimestamps.add(generator.next()); - } - }); - } - executor.shutdown(); - if (!executor.awaitTermination(1, TimeUnit.SECONDS)) { - fail("Expected executor to shut down cleanly"); - } - - assertThat(allTimestamps).hasSize(1000); - assertThat(allTimestamps.first()).isEqualTo(1); - assertThat(allTimestamps.last()).isEqualTo(1000); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/time/MonotonicTimestampGeneratorTestBase.java b/core/src/test/java/com/datastax/oss/driver/internal/core/time/MonotonicTimestampGeneratorTestBase.java deleted file mode 100644 index 7074dd4ccc2..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/time/MonotonicTimestampGeneratorTestBase.java +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.time; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import ch.qos.logback.classic.Level; -import ch.qos.logback.classic.Logger; -import ch.qos.logback.classic.spi.ILoggingEvent; -import ch.qos.logback.core.Appender; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import java.time.Duration; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.mockito.ArgumentCaptor; -import org.mockito.Captor; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; -import org.mockito.stubbing.OngoingStubbing; -import org.slf4j.LoggerFactory; - -abstract class MonotonicTimestampGeneratorTestBase { - - @Mock protected Clock clock; - @Mock protected InternalDriverContext context; - @Mock private DriverConfig config; - @Mock protected DriverExecutionProfile defaultProfile; - - @Mock private Appender appender; - @Captor private ArgumentCaptor loggingEventCaptor; - - private Logger logger; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - - when(config.getDefaultProfile()).thenReturn(defaultProfile); - when(context.getConfig()).thenReturn(config); - - // Disable warnings by default - when(defaultProfile.getDuration( - DefaultDriverOption.TIMESTAMP_GENERATOR_DRIFT_WARNING_THRESHOLD, Duration.ZERO)) - .thenReturn(Duration.ZERO); - // Actual value doesn't really matter since we only test the first warning - when(defaultProfile.getDuration(DefaultDriverOption.TIMESTAMP_GENERATOR_DRIFT_WARNING_INTERVAL)) - .thenReturn(Duration.ofSeconds(10)); - - logger = (Logger) LoggerFactory.getLogger(MonotonicTimestampGenerator.class); - logger.addAppender(appender); - } - - @After - public void teardown() { - logger.detachAppender(appender); - } - - protected abstract MonotonicTimestampGenerator newInstance(Clock clock); - - @Test - public void should_use_clock_if_it_keeps_increasing() { - OngoingStubbing stub = when(clock.currentTimeMicros()); - for (long l = 1; l < 5; l++) { - stub = stub.thenReturn(l); - } - - MonotonicTimestampGenerator generator = newInstance(clock); - - for (long l = 1; l < 5; l++) { - assertThat(generator.next()).isEqualTo(l); - } - } - - @Test - public void should_increment_if_clock_does_not_increase() { - when(clock.currentTimeMicros()).thenReturn(1L, 1L, 1L, 5L); - - MonotonicTimestampGenerator generator = newInstance(clock); - - assertThat(generator.next()).isEqualTo(1); - assertThat(generator.next()).isEqualTo(2); - assertThat(generator.next()).isEqualTo(3); - assertThat(generator.next()).isEqualTo(5); - } - - @Test - public void should_warn_if_timestamps_drift() { - when(defaultProfile.getDuration( - DefaultDriverOption.TIMESTAMP_GENERATOR_DRIFT_WARNING_THRESHOLD, Duration.ZERO)) - .thenReturn(Duration.ofNanos(2 * 1000)); - when(clock.currentTimeMicros()).thenReturn(1L, 1L, 1L, 1L, 1L); - - MonotonicTimestampGenerator generator = newInstance(clock); - - assertThat(generator.next()).isEqualTo(1); - assertThat(generator.next()).isEqualTo(2); - assertThat(generator.next()).isEqualTo(3); - assertThat(generator.next()).isEqualTo(4); - // Clock still at 1, last returned timestamp is 4 (> 1 + 2), should warn - assertThat(generator.next()).isEqualTo(5); - - verify(appender).doAppend(loggingEventCaptor.capture()); - ILoggingEvent log = loggingEventCaptor.getValue(); - assertThat(log.getLevel()).isEqualTo(Level.WARN); - assertThat(log.getMessage()).contains("Clock skew detected"); - } - - @Test - public void should_go_back_to_clock_if_new_tick_high_enough() { - when(clock.currentTimeMicros()).thenReturn(1L, 1L, 1L, 1L, 1L, 10L); - - MonotonicTimestampGenerator generator = newInstance(clock); - - for (long l = 1; l <= 5; l++) { - // Clock at 1, keep incrementing - assertThat(generator.next()).isEqualTo(l); - } - - // Last returned is 5, but clock has ticked to 10, should use that. - assertThat(generator.next()).isEqualTo(10); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/time/ThreadLocalTimestampGeneratorTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/time/ThreadLocalTimestampGeneratorTest.java deleted file mode 100644 index 5d9ed8b2ceb..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/time/ThreadLocalTimestampGeneratorTest.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.time; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static com.datastax.oss.driver.Assertions.fail; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import java.util.List; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; -import org.junit.Test; -import org.mockito.stubbing.OngoingStubbing; - -public class ThreadLocalTimestampGeneratorTest extends MonotonicTimestampGeneratorTestBase { - @Override - protected MonotonicTimestampGenerator newInstance(Clock clock) { - return new ThreadLocalTimestampGenerator(clock, context); - } - - @Test - public void should_confine_timestamps_to_thread() throws Exception { - final int testThreadsCount = 2; - - // Prepare to generate 1000 timestamps for each thread, with the clock frozen at 1 - OngoingStubbing stub = when(clock.currentTimeMicros()); - for (int i = 0; i < testThreadsCount * 1000; i++) { - stub = stub.thenReturn(1L); - } - - MonotonicTimestampGenerator generator = newInstance(clock); - - List> futures = new CopyOnWriteArrayList<>(); - ExecutorService executor = Executors.newFixedThreadPool(testThreadsCount); - for (int i = 0; i < testThreadsCount; i++) { - executor.submit( - () -> { - try { - for (long l = 1; l <= 1000; l++) { - assertThat(generator.next()).isEqualTo(l); - } - futures.add(CompletableFuture.completedFuture(null)); - } catch (Throwable t) { - futures.add(CompletableFutures.failedFuture(t)); - } - }); - } - executor.shutdown(); - if (!executor.awaitTermination(1, TimeUnit.SECONDS)) { - fail("Expected executor to shut down cleanly"); - } - - assertThat(futures).hasSize(testThreadsCount); - for (CompletionStage future : futures) { - assertThatStage(future).isSuccess(); - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/tracker/MultiplexingRequestTrackerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/tracker/MultiplexingRequestTrackerTest.java deleted file mode 100644 index 8dcad99b459..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/tracker/MultiplexingRequestTrackerTest.java +++ /dev/null @@ -1,213 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.tracker; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.BDDMockito.given; -import static org.mockito.BDDMockito.willThrow; -import static org.mockito.Mockito.verify; - -import ch.qos.logback.classic.Level; -import ch.qos.logback.classic.Logger; -import ch.qos.logback.classic.spi.ILoggingEvent; -import ch.qos.logback.core.Appender; -import com.datastax.oss.driver.api.core.DriverExecutionException; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.tracker.RequestTracker; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.ArgumentCaptor; -import org.mockito.Captor; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; -import org.slf4j.LoggerFactory; - -@RunWith(MockitoJUnitRunner.Strict.class) -public class MultiplexingRequestTrackerTest { - - @Mock private RequestTracker child1; - @Mock private RequestTracker child2; - @Mock private Request request; - @Mock private DriverExecutionProfile profile; - @Mock private Node node; - @Mock private Session session; - - @Mock private Appender appender; - @Captor private ArgumentCaptor loggingEventCaptor; - - private Logger logger; - private Level initialLogLevel; - - private final Exception error = new DriverExecutionException(new NullPointerException()); - - @Before - public void addAppenders() { - logger = (Logger) LoggerFactory.getLogger(MultiplexingRequestTracker.class); - initialLogLevel = logger.getLevel(); - logger.setLevel(Level.WARN); - logger.addAppender(appender); - } - - @After - public void removeAppenders() { - logger.detachAppender(appender); - logger.setLevel(initialLogLevel); - } - - @Test - public void should_register() { - // given - MultiplexingRequestTracker tracker = new MultiplexingRequestTracker(); - // when - tracker.register(child1); - tracker.register(child2); - // then - assertThat(tracker).extracting("trackers").asList().hasSize(2).contains(child1, child2); - } - - @Test - public void should_flatten_child_multiplexing_tracker_via_constructor() { - // given - MultiplexingRequestTracker tracker = - new MultiplexingRequestTracker(new MultiplexingRequestTracker(child1, child2)); - // when - // then - assertThat(tracker).extracting("trackers").asList().hasSize(2).contains(child1, child2); - } - - @Test - public void should_flatten_child_multiplexing_tracker_via_register() { - // given - MultiplexingRequestTracker tracker = new MultiplexingRequestTracker(); - // when - tracker.register(new MultiplexingRequestTracker(child1, child2)); - // then - assertThat(tracker).extracting("trackers").asList().hasSize(2).contains(child1, child2); - } - - @Test - public void should_notify_onSuccess() { - // given - MultiplexingRequestTracker tracker = new MultiplexingRequestTracker(child1, child2); - willThrow(new NullPointerException()) - .given(child1) - .onSuccess(request, 123456L, profile, node, "test"); - // when - tracker.onSuccess(request, 123456L, profile, node, "test"); - // then - verify(child1).onSuccess(request, 123456L, profile, node, "test"); - verify(child2).onSuccess(request, 123456L, profile, node, "test"); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "[test] Unexpected error while notifying request tracker child1 of an onSuccess event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onError() { - // given - MultiplexingRequestTracker tracker = new MultiplexingRequestTracker(child1, child2); - willThrow(new NullPointerException()) - .given(child1) - .onError(request, error, 123456L, profile, node, "test"); - // when - tracker.onError(request, error, 123456L, profile, node, "test"); - // then - verify(child1).onError(request, error, 123456L, profile, node, "test"); - verify(child2).onError(request, error, 123456L, profile, node, "test"); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "[test] Unexpected error while notifying request tracker child1 of an onError event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onNodeSuccess() { - // given - MultiplexingRequestTracker tracker = new MultiplexingRequestTracker(child1, child2); - willThrow(new NullPointerException()) - .given(child1) - .onNodeSuccess(request, 123456L, profile, node, "test"); - // when - tracker.onNodeSuccess(request, 123456L, profile, node, "test"); - // then - verify(child1).onNodeSuccess(request, 123456L, profile, node, "test"); - verify(child2).onNodeSuccess(request, 123456L, profile, node, "test"); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "[test] Unexpected error while notifying request tracker child1 of an onNodeSuccess event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onNodeError() { - // given - MultiplexingRequestTracker tracker = new MultiplexingRequestTracker(child1, child2); - willThrow(new NullPointerException()) - .given(child1) - .onNodeError(request, error, 123456L, profile, node, "test"); - // when - tracker.onNodeError(request, error, 123456L, profile, node, "test"); - // then - verify(child1).onNodeError(request, error, 123456L, profile, node, "test"); - verify(child2).onNodeError(request, error, 123456L, profile, node, "test"); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "[test] Unexpected error while notifying request tracker child1 of an onNodeError event. (NullPointerException: null)"); - } - - @Test - public void should_notify_onSessionReady() { - // given - MultiplexingRequestTracker tracker = new MultiplexingRequestTracker(child1, child2); - willThrow(new NullPointerException()).given(child1).onSessionReady(session); - given(session.getName()).willReturn("test"); - // when - tracker.onSessionReady(session); - // then - verify(child1).onSessionReady(session); - verify(child2).onSessionReady(session); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "[test] Unexpected error while notifying request tracker child1 of an onSessionReady event. (NullPointerException: null)"); - } - - @Test - public void should_notify_close() throws Exception { - // given - MultiplexingRequestTracker tracker = new MultiplexingRequestTracker(child1, child2); - Exception child1Error = new NullPointerException(); - willThrow(child1Error).given(child1).close(); - // when - tracker.close(); - // then - verify(child1).close(); - verify(child2).close(); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) - .contains( - "Unexpected error while closing request tracker child1. (NullPointerException: null)"); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/tracker/RequestIdGeneratorTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/tracker/RequestIdGeneratorTest.java deleted file mode 100644 index fb1883e125f..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/tracker/RequestIdGeneratorTest.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.tracker; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.tracker.RequestIdGenerator; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import org.apache.commons.lang3.RandomStringUtils; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.Strict.class) -public class RequestIdGeneratorTest { - @Mock private InternalDriverContext context; - @Mock private Statement statement; - - @Test - public void uuid_generator_should_generate() { - // given - UuidRequestIdGenerator generator = new UuidRequestIdGenerator(context); - // when - String parentId = generator.getSessionRequestId(); - String requestId = generator.getNodeRequestId(statement, parentId); - // then - // e.g. "550e8400-e29b-41d4-a716-446655440000", which is 36 characters long - assertThat(parentId.length()).isEqualTo(36); - // e.g. "550e8400-e29b-41d4-a716-446655440000-550e8400-e29b-41d4-a716-446655440000", which is 73 - // characters long - assertThat(requestId.length()).isEqualTo(73); - } - - @Test - public void w3c_generator_should_generate() { - // given - W3CContextRequestIdGenerator generator = new W3CContextRequestIdGenerator(context); - // when - String parentId = generator.getSessionRequestId(); - String requestId = generator.getNodeRequestId(statement, parentId); - // then - // e.g. "4bf92f3577b34da6a3ce929d0e0e4736", which is 32 characters long - assertThat(parentId.length()).isEqualTo(32); - // According to W3C "traceparent" spec, - // https://www.w3.org/TR/trace-context/#traceparent-header-field-values - // e.g. "00-4bf92f3577b34da6a3ce929d0e0e4736-a3ce929d0e0e4736-01", which 55 characters long - assertThat(requestId.length()).isEqualTo(55); - } - - @Test - public void w3c_generator_default_payloadkey() { - W3CContextRequestIdGenerator w3cGenerator = new W3CContextRequestIdGenerator(context); - assertThat(w3cGenerator.getCustomPayloadKey()) - .isEqualTo(RequestIdGenerator.DEFAULT_PAYLOAD_KEY); - } - - @Test - public void w3c_generator_provided_payloadkey() { - String someString = RandomStringUtils.random(12); - W3CContextRequestIdGenerator w3cGenerator = new W3CContextRequestIdGenerator(someString); - assertThat(w3cGenerator.getCustomPayloadKey()).isEqualTo(someString); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/tracker/RequestLogFormatterTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/tracker/RequestLogFormatterTest.java deleted file mode 100644 index e9fb518b51f..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/tracker/RequestLogFormatterTest.java +++ /dev/null @@ -1,291 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.tracker; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.cql.BatchStatement; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.ColumnDefinition; -import com.datastax.oss.driver.api.core.cql.DefaultBatchType; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.internal.core.cql.DefaultColumnDefinition; -import com.datastax.oss.driver.internal.core.cql.DefaultColumnDefinitions; -import com.datastax.oss.driver.internal.core.cql.DefaultPreparedStatement; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.protocol.internal.response.result.ColumnSpec; -import com.datastax.oss.protocol.internal.response.result.RawType; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.util.Collections; -import java.util.Map; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class RequestLogFormatterTest { - - @Mock private DriverContext context; - private final ProtocolVersion protocolVersion = DefaultProtocolVersion.V4; - - private RequestLogFormatter formatter; - - @Before - public void setup() { - when(context.getCodecRegistry()).thenReturn(CodecRegistry.DEFAULT); - when(context.getProtocolVersion()).thenReturn(protocolVersion); - - formatter = new RequestLogFormatter(context); - } - - @Test - public void should_format_simple_statement_without_values() { - SimpleStatement statement = - SimpleStatement.newInstance("SELECT release_version FROM system.local"); - - assertThat( - formatRequest( - statement, Integer.MAX_VALUE, false, Integer.MAX_VALUE, Integer.MAX_VALUE)) - .isEqualTo("[0 values] SELECT release_version FROM system.local"); - - assertThat( - formatRequest(statement, Integer.MAX_VALUE, true, Integer.MAX_VALUE, Integer.MAX_VALUE)) - .isEqualTo("[0 values] SELECT release_version FROM system.local"); - - assertThat(formatRequest(statement, 20, false, Integer.MAX_VALUE, Integer.MAX_VALUE)) - .isEqualTo("[0 values] SELECT release_versi..."); - } - - @Test - public void should_format_simple_statement_with_positional_values() { - SimpleStatement statement = - SimpleStatement.builder("UPDATE foo SET v=? WHERE k=?") - .addPositionalValue(Bytes.fromHexString("0xdeadbeef")) - .addPositionalValue(0) - .build(); - - assertThat( - formatRequest( - statement, Integer.MAX_VALUE, false, Integer.MAX_VALUE, Integer.MAX_VALUE)) - .isEqualTo("[2 values] UPDATE foo SET v=? WHERE k=?"); - - assertThat( - formatRequest(statement, Integer.MAX_VALUE, true, Integer.MAX_VALUE, Integer.MAX_VALUE)) - .isEqualTo("[2 values] UPDATE foo SET v=? WHERE k=? [v0=0xdeadbeef, v1=0]"); - - assertThat(formatRequest(statement, Integer.MAX_VALUE, true, 1, Integer.MAX_VALUE)) - .isEqualTo( - "[2 values] UPDATE foo SET v=? WHERE k=? [v0=0xdeadbeef, ...]"); - - assertThat(formatRequest(statement, Integer.MAX_VALUE, true, Integer.MAX_VALUE, 4)) - .isEqualTo("[2 values] UPDATE foo SET v=? WHERE k=? [v0=0xde..., v1=0]"); - } - - @Test - public void should_format_simple_statement_with_named_values() { - SimpleStatement statement = - SimpleStatement.builder("UPDATE foo SET v=:v WHERE k=:k") - .addNamedValue("v", Bytes.fromHexString("0xdeadbeef")) - .addNamedValue("k", 0) - .build(); - - assertThat( - formatRequest( - statement, Integer.MAX_VALUE, false, Integer.MAX_VALUE, Integer.MAX_VALUE)) - .isEqualTo("[2 values] UPDATE foo SET v=:v WHERE k=:k"); - - assertThat( - formatRequest(statement, Integer.MAX_VALUE, true, Integer.MAX_VALUE, Integer.MAX_VALUE)) - .isEqualTo("[2 values] UPDATE foo SET v=:v WHERE k=:k [v=0xdeadbeef, k=0]"); - - assertThat(formatRequest(statement, Integer.MAX_VALUE, true, 1, Integer.MAX_VALUE)) - .isEqualTo( - "[2 values] UPDATE foo SET v=:v WHERE k=:k [v=0xdeadbeef, ...]"); - - assertThat(formatRequest(statement, Integer.MAX_VALUE, true, Integer.MAX_VALUE, 4)) - .isEqualTo("[2 values] UPDATE foo SET v=:v WHERE k=:k [v=0xde..., k=0]"); - } - - @Test - public void should_format_bound_statement() { - PreparedStatement preparedStatement = - mockPreparedStatement( - "UPDATE foo SET v=? WHERE k=?", - ImmutableMap.of("v", DataTypes.BLOB, "k", DataTypes.INT)); - BoundStatement statement = preparedStatement.bind(Bytes.fromHexString("0xdeadbeef"), 0); - - assertThat( - formatRequest( - statement, Integer.MAX_VALUE, false, Integer.MAX_VALUE, Integer.MAX_VALUE)) - .isEqualTo("[2 values] UPDATE foo SET v=? WHERE k=?"); - - assertThat( - formatRequest(statement, Integer.MAX_VALUE, true, Integer.MAX_VALUE, Integer.MAX_VALUE)) - .isEqualTo("[2 values] UPDATE foo SET v=? WHERE k=? [v=0xdeadbeef, k=0]"); - - assertThat(formatRequest(statement, Integer.MAX_VALUE, true, 1, Integer.MAX_VALUE)) - .isEqualTo( - "[2 values] UPDATE foo SET v=? WHERE k=? [v=0xdeadbeef, ...]"); - - assertThat(formatRequest(statement, Integer.MAX_VALUE, true, Integer.MAX_VALUE, 4)) - .isEqualTo("[2 values] UPDATE foo SET v=? WHERE k=? [v=0xde..., k=0]"); - } - - @Test - public void should_format_bound_statement_with_unset_values() { - PreparedStatement preparedStatement = - mockPreparedStatement( - "UPDATE foo SET v=? WHERE k=?", - ImmutableMap.of("v", DataTypes.BLOB, "k", DataTypes.INT)); - BoundStatement statement = preparedStatement.bind().setInt("k", 0); - assertThat( - formatRequest(statement, Integer.MAX_VALUE, true, Integer.MAX_VALUE, Integer.MAX_VALUE)) - .isEqualTo("[2 values] UPDATE foo SET v=? WHERE k=? [v=, k=0]"); - } - - @Test - public void should_format_batch_statement() { - SimpleStatement statement1 = - SimpleStatement.builder("UPDATE foo SET v=? WHERE k=?") - .addNamedValue("v", Bytes.fromHexString("0xdeadbeef")) - .addNamedValue("k", 0) - .build(); - - PreparedStatement preparedStatement = - mockPreparedStatement( - "UPDATE foo SET v=? WHERE k=?", - ImmutableMap.of("v", DataTypes.BLOB, "k", DataTypes.INT)); - BoundStatement statement2 = preparedStatement.bind(Bytes.fromHexString("0xabcdef"), 1); - - BatchStatement batch = - BatchStatement.builder(DefaultBatchType.UNLOGGED) - .addStatements(statement1, statement2) - .build(); - - assertThat(formatRequest(batch, Integer.MAX_VALUE, false, Integer.MAX_VALUE, Integer.MAX_VALUE)) - .isEqualTo( - "[2 statements, 4 values] " - + "BEGIN UNLOGGED BATCH " - + "UPDATE foo SET v=? WHERE k=?; " - + "UPDATE foo SET v=? WHERE k=?; " - + "APPLY BATCH"); - - assertThat(formatRequest(batch, 20, false, Integer.MAX_VALUE, Integer.MAX_VALUE)) - .isEqualTo("[2 statements, 4 values] BEGIN UNLOGGED BATCH..."); - - assertThat(formatRequest(batch, Integer.MAX_VALUE, true, Integer.MAX_VALUE, Integer.MAX_VALUE)) - .isEqualTo( - "[2 statements, 4 values] " - + "BEGIN UNLOGGED BATCH " - + "UPDATE foo SET v=? WHERE k=?; " - + "UPDATE foo SET v=? WHERE k=?; " - + "APPLY BATCH " - + "[v=0xdeadbeef, k=0]" - + "[v=0xabcdef, k=1]"); - - assertThat(formatRequest(batch, Integer.MAX_VALUE, true, 3, Integer.MAX_VALUE)) - .isEqualTo( - "[2 statements, 4 values] " - + "BEGIN UNLOGGED BATCH " - + "UPDATE foo SET v=? WHERE k=?; " - + "UPDATE foo SET v=? WHERE k=?; " - + "APPLY BATCH " - + "[v=0xdeadbeef, k=0]" - + "[v=0xabcdef, ...]"); - - assertThat(formatRequest(batch, Integer.MAX_VALUE, true, 2, Integer.MAX_VALUE)) - .isEqualTo( - "[2 statements, 4 values] " - + "BEGIN UNLOGGED BATCH " - + "UPDATE foo SET v=? WHERE k=?; " - + "UPDATE foo SET v=? WHERE k=?; " - + "APPLY BATCH " - + "[v=0xdeadbeef, k=0]" - + "[...]"); - - assertThat(formatRequest(batch, Integer.MAX_VALUE, true, Integer.MAX_VALUE, 4)) - .isEqualTo( - "[2 statements, 4 values] " - + "BEGIN UNLOGGED BATCH " - + "UPDATE foo SET v=? WHERE k=?; " - + "UPDATE foo SET v=? WHERE k=?; " - + "APPLY BATCH " - + "[v=0xde..., k=0]" - + "[v=0xab..., k=1]"); - } - - private String formatRequest( - Request request, int maxQueryLength, boolean showValues, int maxValues, int maxValueLength) { - StringBuilder builder = new StringBuilder(); - formatter.appendRequest( - request, maxQueryLength, showValues, maxValues, maxValueLength, builder); - return builder.toString(); - } - - private PreparedStatement mockPreparedStatement(String query, Map variables) { - ImmutableList.Builder definitions = ImmutableList.builder(); - int i = 0; - for (Map.Entry entry : variables.entrySet()) { - definitions.add( - new DefaultColumnDefinition( - new ColumnSpec( - "test", - "foo", - entry.getKey(), - i, - RawType.PRIMITIVES.get(entry.getValue().getProtocolCode())), - context)); - } - return new DefaultPreparedStatement( - Bytes.fromHexString("0x"), - query, - DefaultColumnDefinitions.valueOf(definitions.build()), - Collections.emptyList(), - null, - null, - null, - Collections.emptyMap(), - null, - null, - null, - null, - null, - Collections.emptyMap(), - null, - null, - null, - Integer.MIN_VALUE, - null, - null, - false, - context.getCodecRegistry(), - context.getProtocolVersion()); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/DataTypeDetachableTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/DataTypeDetachableTest.java deleted file mode 100644 index d798df8d191..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/DataTypeDetachableTest.java +++ /dev/null @@ -1,188 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.ListType; -import com.datastax.oss.driver.api.core.type.MapType; -import com.datastax.oss.driver.api.core.type.SetType; -import com.datastax.oss.driver.api.core.type.TupleType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.SerializationHelper; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -public class DataTypeDetachableTest { - - @Mock private AttachmentPoint attachmentPoint; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - } - - @Test - public void simple_types_should_never_be_detached() { - // Because simple types don't need the codec registry, we consider them as always attached by - // default - for (DataType simpleType : ImmutableList.of(DataTypes.INT, DataTypes.custom("some.class"))) { - assertThat(simpleType.isDetached()).isFalse(); - assertThat(SerializationHelper.serializeAndDeserialize(simpleType).isDetached()).isFalse(); - } - } - - @Test - public void manually_created_tuple_should_be_detached() { - TupleType tuple = DataTypes.tupleOf(DataTypes.INT, DataTypes.TEXT); - assertThat(tuple.isDetached()).isTrue(); - } - - @Test - public void attaching_tuple_should_attach_all_of_its_subtypes() { - TupleType tuple1 = DataTypes.tupleOf(DataTypes.INT); - TupleType tuple2 = DataTypes.tupleOf(DataTypes.TEXT, tuple1); - - assertThat(tuple1.isDetached()).isTrue(); - assertThat(tuple2.isDetached()).isTrue(); - - tuple2.attach(attachmentPoint); - - assertThat(tuple1.isDetached()).isFalse(); - } - - @Test - public void manually_created_udt_should_be_detached() { - UserDefinedType udt = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("ks"), CqlIdentifier.fromInternal("type")) - .withField(CqlIdentifier.fromInternal("field1"), DataTypes.INT) - .withField(CqlIdentifier.fromInternal("field2"), DataTypes.TEXT) - .build(); - assertThat(udt.isDetached()).isTrue(); - } - - @Test - public void attaching_udt_should_attach_all_of_its_subtypes() { - TupleType tuple = DataTypes.tupleOf(DataTypes.INT); - UserDefinedType udt = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("ks"), CqlIdentifier.fromInternal("type")) - .withField(CqlIdentifier.fromInternal("field1"), DataTypes.INT) - .withField(CqlIdentifier.fromInternal("field2"), tuple) - .build(); - - assertThat(tuple.isDetached()).isTrue(); - assertThat(udt.isDetached()).isTrue(); - - udt.attach(attachmentPoint); - - assertThat(tuple.isDetached()).isFalse(); - } - - @Test - public void list_should_be_attached_if_its_element_is() { - TupleType tuple = DataTypes.tupleOf(DataTypes.INT); - ListType list = DataTypes.listOf(tuple); - - assertThat(tuple.isDetached()).isTrue(); - assertThat(list.isDetached()).isTrue(); - - tuple.attach(attachmentPoint); - - assertThat(list.isDetached()).isFalse(); - } - - @Test - public void attaching_list_should_attach_its_element() { - TupleType tuple = DataTypes.tupleOf(DataTypes.INT); - ListType list = DataTypes.listOf(tuple); - - assertThat(tuple.isDetached()).isTrue(); - assertThat(list.isDetached()).isTrue(); - - list.attach(attachmentPoint); - - assertThat(tuple.isDetached()).isFalse(); - } - - @Test - public void set_should_be_attached_if_its_element_is() { - TupleType tuple = DataTypes.tupleOf(DataTypes.INT); - SetType set = DataTypes.setOf(tuple); - - assertThat(tuple.isDetached()).isTrue(); - assertThat(set.isDetached()).isTrue(); - - tuple.attach(attachmentPoint); - - assertThat(set.isDetached()).isFalse(); - } - - @Test - public void attaching_set_should_attach_its_element() { - TupleType tuple = DataTypes.tupleOf(DataTypes.INT); - SetType set = DataTypes.setOf(tuple); - - assertThat(tuple.isDetached()).isTrue(); - assertThat(set.isDetached()).isTrue(); - - set.attach(attachmentPoint); - - assertThat(tuple.isDetached()).isFalse(); - } - - @Test - public void map_should_be_attached_if_its_elements_are() { - TupleType tuple1 = DataTypes.tupleOf(DataTypes.INT); - TupleType tuple2 = DataTypes.tupleOf(DataTypes.TEXT); - MapType map = DataTypes.mapOf(tuple1, tuple2); - - assertThat(tuple1.isDetached()).isTrue(); - assertThat(tuple2.isDetached()).isTrue(); - assertThat(map.isDetached()).isTrue(); - - tuple1.attach(attachmentPoint); - assertThat(map.isDetached()).isTrue(); - - tuple2.attach(attachmentPoint); - assertThat(map.isDetached()).isFalse(); - } - - @Test - public void attaching_map_should_attach_all_of_its_subtypes() { - TupleType tuple1 = DataTypes.tupleOf(DataTypes.INT); - TupleType tuple2 = DataTypes.tupleOf(DataTypes.TEXT); - MapType map = DataTypes.mapOf(tuple1, tuple2); - - assertThat(tuple1.isDetached()).isTrue(); - assertThat(tuple2.isDetached()).isTrue(); - - map.attach(attachmentPoint); - - assertThat(tuple1.isDetached()).isFalse(); - assertThat(tuple2.isDetached()).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/DataTypeSerializationTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/DataTypeSerializationTest.java deleted file mode 100644 index ccf53dd3a65..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/DataTypeSerializationTest.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.TupleType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.internal.SerializationHelper; -import org.junit.Test; - -public class DataTypeSerializationTest { - - @Test - public void should_serialize_and_deserialize() { - TupleType tuple = DataTypes.tupleOf(DataTypes.INT, DataTypes.TEXT); - UserDefinedType udt = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("ks"), CqlIdentifier.fromInternal("type")) - .withField(CqlIdentifier.fromInternal("field1"), DataTypes.INT) - .withField(CqlIdentifier.fromInternal("field2"), DataTypes.TEXT) - .build(); - - // Because primitive and custom types never use the codec registry, we consider them always - // attached - should_serialize_and_deserialize(DataTypes.INT, false); - should_serialize_and_deserialize(DataTypes.custom("some.class.name"), false); - - should_serialize_and_deserialize(tuple, true); - should_serialize_and_deserialize(udt, true); - should_serialize_and_deserialize(DataTypes.listOf(DataTypes.INT), false); - should_serialize_and_deserialize(DataTypes.listOf(tuple), true); - should_serialize_and_deserialize(DataTypes.setOf(udt), true); - should_serialize_and_deserialize(DataTypes.mapOf(tuple, udt), true); - } - - private void should_serialize_and_deserialize(DataType in, boolean expectDetached) { - // When - DataType out = SerializationHelper.serializeAndDeserialize(in); - - // Then - assertThat(out).isEqualTo(in); - assertThat(out.isDetached()).isEqualTo(expectDetached); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/PrimitiveTypeTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/PrimitiveTypeTest.java deleted file mode 100644 index f9ae1d24f77..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/PrimitiveTypeTest.java +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.TestDataProviders; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.Locale; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class PrimitiveTypeTest { - - @Test - public void should_report_protocol_code() { - assertThat(DataTypes.ASCII.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.ASCII); - assertThat(DataTypes.BIGINT.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.BIGINT); - assertThat(DataTypes.BLOB.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.BLOB); - assertThat(DataTypes.BOOLEAN.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.BOOLEAN); - assertThat(DataTypes.COUNTER.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.COUNTER); - assertThat(DataTypes.DECIMAL.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.DECIMAL); - assertThat(DataTypes.DOUBLE.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.DOUBLE); - assertThat(DataTypes.FLOAT.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.FLOAT); - assertThat(DataTypes.INT.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.INT); - assertThat(DataTypes.TIMESTAMP.getProtocolCode()) - .isEqualTo(ProtocolConstants.DataType.TIMESTAMP); - assertThat(DataTypes.UUID.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.UUID); - assertThat(DataTypes.VARINT.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.VARINT); - assertThat(DataTypes.TIMEUUID.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.TIMEUUID); - assertThat(DataTypes.INET.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.INET); - assertThat(DataTypes.DATE.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.DATE); - assertThat(DataTypes.TEXT.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.VARCHAR); - assertThat(DataTypes.TIME.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.TIME); - assertThat(DataTypes.SMALLINT.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.SMALLINT); - assertThat(DataTypes.TINYINT.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.TINYINT); - assertThat(DataTypes.DURATION.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.DURATION); - } - - @Test - @UseDataProvider(location = TestDataProviders.class, value = "locales") - public void should_format_as_cql(Locale locale) { - Locale def = Locale.getDefault(); - try { - Locale.setDefault(locale); - assertThat(DataTypes.ASCII.asCql(true, true)).isEqualTo("ascii"); - assertThat(DataTypes.BIGINT.asCql(true, true)).isEqualTo("bigint"); - assertThat(DataTypes.BLOB.asCql(true, true)).isEqualTo("blob"); - assertThat(DataTypes.BOOLEAN.asCql(true, true)).isEqualTo("boolean"); - assertThat(DataTypes.COUNTER.asCql(true, true)).isEqualTo("counter"); - assertThat(DataTypes.DECIMAL.asCql(true, true)).isEqualTo("decimal"); - assertThat(DataTypes.DOUBLE.asCql(true, true)).isEqualTo("double"); - assertThat(DataTypes.FLOAT.asCql(true, true)).isEqualTo("float"); - assertThat(DataTypes.INT.asCql(true, true)).isEqualTo("int"); - assertThat(DataTypes.TIMESTAMP.asCql(true, true)).isEqualTo("timestamp"); - assertThat(DataTypes.UUID.asCql(true, true)).isEqualTo("uuid"); - assertThat(DataTypes.VARINT.asCql(true, true)).isEqualTo("varint"); - assertThat(DataTypes.TIMEUUID.asCql(true, true)).isEqualTo("timeuuid"); - assertThat(DataTypes.INET.asCql(true, true)).isEqualTo("inet"); - assertThat(DataTypes.DATE.asCql(true, true)).isEqualTo("date"); - assertThat(DataTypes.TEXT.asCql(true, true)).isEqualTo("text"); - assertThat(DataTypes.TIME.asCql(true, true)).isEqualTo("time"); - assertThat(DataTypes.SMALLINT.asCql(true, true)).isEqualTo("smallint"); - assertThat(DataTypes.TINYINT.asCql(true, true)).isEqualTo("tinyint"); - assertThat(DataTypes.DURATION.asCql(true, true)).isEqualTo("duration"); - } finally { - Locale.setDefault(def); - } - } - - @Test - @UseDataProvider(location = TestDataProviders.class, value = "locales") - public void should_format_as_string(Locale locale) { - Locale def = Locale.getDefault(); - try { - Locale.setDefault(locale); - assertThat(DataTypes.ASCII.toString()).isEqualTo("ASCII"); - assertThat(DataTypes.BIGINT.toString()).isEqualTo("BIGINT"); - assertThat(DataTypes.BLOB.toString()).isEqualTo("BLOB"); - assertThat(DataTypes.BOOLEAN.toString()).isEqualTo("BOOLEAN"); - assertThat(DataTypes.COUNTER.toString()).isEqualTo("COUNTER"); - assertThat(DataTypes.DECIMAL.toString()).isEqualTo("DECIMAL"); - assertThat(DataTypes.DOUBLE.toString()).isEqualTo("DOUBLE"); - assertThat(DataTypes.FLOAT.toString()).isEqualTo("FLOAT"); - assertThat(DataTypes.INT.toString()).isEqualTo("INT"); - assertThat(DataTypes.TIMESTAMP.toString()).isEqualTo("TIMESTAMP"); - assertThat(DataTypes.UUID.toString()).isEqualTo("UUID"); - assertThat(DataTypes.VARINT.toString()).isEqualTo("VARINT"); - assertThat(DataTypes.TIMEUUID.toString()).isEqualTo("TIMEUUID"); - assertThat(DataTypes.INET.toString()).isEqualTo("INET"); - assertThat(DataTypes.DATE.toString()).isEqualTo("DATE"); - assertThat(DataTypes.TEXT.toString()).isEqualTo("TEXT"); - assertThat(DataTypes.TIME.toString()).isEqualTo("TIME"); - assertThat(DataTypes.SMALLINT.toString()).isEqualTo("SMALLINT"); - assertThat(DataTypes.TINYINT.toString()).isEqualTo("TINYINT"); - assertThat(DataTypes.DURATION.toString()).isEqualTo("DURATION"); - } finally { - Locale.setDefault(def); - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/AsciiCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/AsciiCodecTest.java deleted file mode 100644 index 43c01ea35dc..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/AsciiCodecTest.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import org.junit.Test; - -public class AsciiCodecTest extends CodecTestBase { - public AsciiCodecTest() { - this.codec = TypeCodecs.ASCII; - } - - @Test - public void should_encode() { - assertThat(encode("hello")).isEqualTo("0x68656c6c6f"); - assertThat(encode(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_encode_non_ascii() { - encode("hëllo"); - } - - @Test - public void should_decode() { - assertThat(decode("0x68656c6c6f")).isEqualTo("hello"); - assertThat(decode("0x")).isEmpty(); - assertThat(decode(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_non_ascii() { - decode("0x68c3ab6c6c6f"); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/BigIntCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/BigIntCodecTest.java deleted file mode 100644 index c5360c90a7b..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/BigIntCodecTest.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import org.junit.Test; - -public class BigIntCodecTest extends CodecTestBase { - - public BigIntCodecTest() { - this.codec = TypeCodecs.BIGINT; - } - - @Test - public void should_encode() { - assertThat(encode(1L)).isEqualTo("0x0000000000000001"); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - assertThat(decode("0x0000000000000001")).isEqualTo(1L); - assertThat(decode("0x")).isNull(); - assertThat(decode(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_too_many_bytes() { - decode("0x0000000000000000" + "0000"); - } - - @Test - public void should_format() { - assertThat(format(1L)).isEqualTo("1"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_parse() { - assertThat(parse("1")).isEqualTo(1L); - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_invalid_input() { - parse("not a number"); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_if_out_of_range() { - parse(Long.MAX_VALUE + "0"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(Long.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(long.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(Integer.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(Long.class)).isTrue(); - assertThat(codec.accepts(long.class)).isTrue(); - assertThat(codec.accepts(Integer.class)).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(123L)).isTrue(); - assertThat(codec.accepts(Long.MIN_VALUE)).isTrue(); - assertThat(codec.accepts(Integer.MIN_VALUE)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/BlobCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/BlobCodecTest.java deleted file mode 100644 index ec1ab294911..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/BlobCodecTest.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.nio.ByteBuffer; -import java.nio.MappedByteBuffer; -import org.junit.Test; - -public class BlobCodecTest extends CodecTestBase { - private static final ByteBuffer BUFFER = Bytes.fromHexString("0xcafebabe"); - - public BlobCodecTest() { - this.codec = TypeCodecs.BLOB; - } - - @Test - public void should_encode() { - assertThat(encode(BUFFER)).isEqualTo("0xcafebabe"); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_not_share_position_between_input_and_encoded() { - int inputPosition = BUFFER.position(); - ByteBuffer encoded = codec.encode(BUFFER, ProtocolVersion.DEFAULT); - // Read from the encoded buffer to change its position - encoded.get(); - // The input buffer should not be affected - assertThat(BUFFER.position()).isEqualTo(inputPosition); - } - - @Test - public void should_decode() { - assertThat(decode("0xcafebabe")).isEqualTo(BUFFER); - assertThat(decode("0x").capacity()).isEqualTo(0); - assertThat(decode(null)).isNull(); - } - - @Test - public void should_not_share_position_between_decoded_and_input() { - int inputPosition = BUFFER.position(); - ByteBuffer decoded = codec.decode(BUFFER, ProtocolVersion.DEFAULT); - // Read from the decoded buffer to change its position - decoded.get(); - // The input buffer should not be affected - assertThat(BUFFER.position()).isEqualTo(inputPosition); - } - - @Test - public void should_format() { - assertThat(format(BUFFER)).isEqualTo("0xcafebabe"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_parse() { - assertThat(parse("0xcafebabe")).isEqualTo(BUFFER); - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_invalid_input() { - parse("not a blob"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(ByteBuffer.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(MappedByteBuffer.class))) - .isFalse(); // covariance not allowed - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(ByteBuffer.class)).isTrue(); - assertThat(codec.accepts(MappedByteBuffer.class)).isFalse(); // covariance not allowed - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(BUFFER)).isTrue(); - assertThat(codec.accepts(MappedByteBuffer.allocate(0))).isTrue(); // covariance allowed - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/BooleanCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/BooleanCodecTest.java deleted file mode 100644 index 57fcef1235d..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/BooleanCodecTest.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import org.junit.Test; - -public class BooleanCodecTest extends CodecTestBase { - - public BooleanCodecTest() { - this.codec = TypeCodecs.BOOLEAN; - } - - @Test - public void should_encode() { - assertThat(encode(false)).isEqualTo("0x00"); - assertThat(encode(true)).isEqualTo("0x01"); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - assertThat(decode("0x00")).isFalse(); - assertThat(decode("0x01")).isTrue(); - assertThat(decode("0x")).isNull(); - assertThat(decode(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_too_many_bytes() { - decode("0x0000"); - } - - @Test - public void should_format() { - assertThat(format(true)).isEqualTo("true"); - assertThat(format(false)).isEqualTo("false"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_parse() { - assertThat(parse("true")).isEqualTo(true); - assertThat(parse("false")).isEqualTo(false); - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_invalid_input() { - parse("maybe"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(Boolean.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(boolean.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(Integer.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(Boolean.class)).isTrue(); - assertThat(codec.accepts(boolean.class)).isTrue(); - assertThat(codec.accepts(Integer.class)).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(true)).isTrue(); - assertThat(codec.accepts(Boolean.TRUE)).isTrue(); - assertThat(codec.accepts(Integer.MIN_VALUE)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CodecTestBase.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CodecTestBase.java deleted file mode 100644 index 8a00cceda09..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CodecTestBase.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.nio.ByteBuffer; - -public class CodecTestBase { - protected TypeCodec codec; - - protected String encode(T t, ProtocolVersion protocolVersion) { - assertThat(codec).as("Must set codec before calling this method").isNotNull(); - ByteBuffer bytes = codec.encode(t, protocolVersion); - return (bytes == null) ? null : Bytes.toHexString(bytes); - } - - protected String encode(T t) { - return encode(t, ProtocolVersion.DEFAULT); - } - - protected T decode(String hexString, ProtocolVersion protocolVersion) { - assertThat(codec).as("Must set codec before calling this method").isNotNull(); - ByteBuffer bytes = (hexString == null) ? null : Bytes.fromHexString(hexString); - // Decode twice, to assert that decode leaves the input buffer in its original state - codec.decode(bytes, protocolVersion); - return codec.decode(bytes, protocolVersion); - } - - protected T decode(String hexString) { - return decode(hexString, ProtocolVersion.DEFAULT); - } - - protected String format(T t) { - assertThat(codec).as("Must set codec before calling this method").isNotNull(); - return codec.format(t); - } - - protected T parse(String s) { - assertThat(codec).as("Must set codec before calling this method").isNotNull(); - return codec.parse(s); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CounterCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CounterCodecTest.java deleted file mode 100644 index c18c6e76d7c..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CounterCodecTest.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import org.junit.Test; - -public class CounterCodecTest extends CodecTestBase { - - public CounterCodecTest() { - this.codec = TypeCodecs.COUNTER; - } - - @Test - public void should_encode() { - assertThat(encode(1L)).isEqualTo("0x0000000000000001"); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - assertThat(decode("0x0000000000000001")).isEqualTo(1L); - assertThat(decode("0x")).isNull(); - assertThat(decode(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_too_many_bytes() { - decode("0x0000000000000000" + "0000"); - } - - @Test - public void should_format() { - assertThat(format(1L)).isEqualTo("1"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_parse() { - assertThat(parse("1")).isEqualTo(1L); - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_invalid_input() { - parse("not a number"); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_if_out_of_range() { - parse(Long.MAX_VALUE + "0"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(Long.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(long.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(Integer.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(Long.class)).isTrue(); - assertThat(codec.accepts(long.class)).isTrue(); - assertThat(codec.accepts(Integer.class)).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(123L)).isTrue(); - assertThat(codec.accepts(Long.MIN_VALUE)).isTrue(); - assertThat(codec.accepts(Integer.MIN_VALUE)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CqlDurationCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CqlDurationCodecTest.java deleted file mode 100644 index 43526f72e57..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CqlDurationCodecTest.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.data.CqlDuration; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import org.junit.Test; - -public class CqlDurationCodecTest extends CodecTestBase { - - private static final CqlDuration DURATION = CqlDuration.newInstance(1, 2, 3); - - public CqlDurationCodecTest() { - this.codec = TypeCodecs.DURATION; - } - - @Test - public void should_encode() { - assertThat(encode(DURATION)) - .isEqualTo( - "0x" - + "02" // 1 (encoded as 2 because of zig-zag encoding) - + "04" // 2 (same) - + "06" // 3 (same) - ); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - assertThat(decode("0x020406")).isEqualTo(DURATION); - assertThat(decode("0x")).isNull(); - assertThat(decode(null)).isNull(); - } - - @Test(expected = IllegalStateException.class) - public void should_fail_to_decode_if_not_enough_bytes() { - decode("0x0000"); - } - - @Test - public void should_format() { - assertThat(format(DURATION)).isEqualTo("1mo2d3ns"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_parse() { - assertThat(parse("1mo2d3ns")).isEqualTo(DURATION); - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_invalid_input() { - parse("not a duration"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(CqlDuration.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(Integer.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(CqlDuration.class)).isTrue(); - assertThat(codec.accepts(Integer.class)).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(DURATION)).isTrue(); - assertThat(codec.accepts(Integer.MIN_VALUE)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CqlIntToStringCodec.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CqlIntToStringCodec.java deleted file mode 100644 index 4f04f3defec..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CqlIntToStringCodec.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.type.codec.MappingCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** - * A sample user codec implementation that we use in our tests. - * - *

It maps a CQL string to a Java string containing its textual representation. - */ -public class CqlIntToStringCodec extends MappingCodec { - - public CqlIntToStringCodec() { - super(TypeCodecs.INT, GenericType.STRING); - } - - @Nullable - @Override - protected String innerToOuter(@Nullable Integer value) { - return value == null ? null : value.toString(); - } - - @Nullable - @Override - protected Integer outerToInner(@Nullable String value) { - return value == null ? null : Integer.parseInt(value); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CustomCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CustomCodecTest.java deleted file mode 100644 index a832b51cfec..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CustomCodecTest.java +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.nio.ByteBuffer; -import java.nio.MappedByteBuffer; -import org.junit.Test; - -public class CustomCodecTest extends CodecTestBase { - private static final ByteBuffer BUFFER = Bytes.fromHexString("0xcafebabe"); - - public CustomCodecTest() { - this.codec = TypeCodecs.custom(DataTypes.custom("com.test.MyClass")); - } - - @Test - public void should_encode() { - assertThat(encode(BUFFER)).isEqualTo("0xcafebabe"); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_not_share_position_between_input_and_encoded() { - int inputPosition = BUFFER.position(); - ByteBuffer encoded = codec.encode(BUFFER, ProtocolVersion.DEFAULT); - // Read from the encoded buffer to change its position - encoded.get(); - // The input buffer should not be affected - assertThat(BUFFER.position()).isEqualTo(inputPosition); - } - - @Test - public void should_decode() { - assertThat(decode("0xcafebabe")).isEqualTo(BUFFER); - assertThat(decode("0x").capacity()).isEqualTo(0); - assertThat(decode(null)).isNull(); - } - - @Test - public void should_not_share_position_between_decoded_and_input() { - int inputPosition = BUFFER.position(); - ByteBuffer decoded = codec.decode(BUFFER, ProtocolVersion.DEFAULT); - // Read from the decoded buffer to change its position - decoded.get(); - // The input buffer should not be affected - assertThat(BUFFER.position()).isEqualTo(inputPosition); - } - - @Test - public void should_format() { - assertThat(format(BUFFER)).isEqualTo("0xcafebabe"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_parse() { - assertThat(parse("0xcafebabe")).isEqualTo(BUFFER); - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_invalid_input() { - parse("not a blob"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(ByteBuffer.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(MappedByteBuffer.class))) - .isFalse(); // covariance not allowed - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(ByteBuffer.class)).isTrue(); - assertThat(codec.accepts(MappedByteBuffer.class)).isFalse(); // covariance not allowed - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(BUFFER)).isTrue(); - assertThat(codec.accepts(MappedByteBuffer.allocate(0))).isTrue(); // covariance allowed - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/DateCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/DateCodecTest.java deleted file mode 100644 index 48388fbc692..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/DateCodecTest.java +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import java.time.LocalDate; -import org.junit.Test; - -public class DateCodecTest extends CodecTestBase { - - private static final LocalDate EPOCH = LocalDate.ofEpochDay(0); - private static final LocalDate MIN = LocalDate.parse("-5877641-06-23"); - private static final LocalDate MAX = LocalDate.parse("+5881580-07-11"); - - public DateCodecTest() { - this.codec = TypeCodecs.DATE; - } - - @Test - public void should_encode() { - // Dates are encoded as a number of days since the epoch, stored on 8 bytes with 0 in the - // middle. - assertThat(encode(MIN)).isEqualTo("0x00000000"); - // The "middle" is the one that has only the most significant bit set (because it has the same - // number of values before and after it, determined by all possible combinations of the - // remaining bits) - assertThat(encode(EPOCH)).isEqualTo("0x80000000"); - assertThat(encode(MAX)).isEqualTo("0xffffffff"); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - assertThat(decode("0x00000000")).isEqualTo(MIN); - assertThat(decode("0x80000000")).isEqualTo(EPOCH); - assertThat(decode("0xffffffff")).isEqualTo(MAX); - assertThat(decode(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_too_many_bytes() { - decode("0x00000000" + "0000"); - } - - @Test - public void should_format() { - // No need to test various values because the codec delegates directly to the JDK's formatter, - // which we assume does its job correctly. - assertThat(format(EPOCH)).isEqualTo("'1970-01-01'"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_parse() { - // Raw number - assertThat(parse("0")).isEqualTo(MIN); - assertThat(parse("2147483648")).isEqualTo(EPOCH); - - // Date format - assertThat(parse("'-5877641-06-23'")).isEqualTo(MIN); - assertThat(parse("'1970-01-01'")).isEqualTo(EPOCH); - assertThat(parse("'2014-01-01'")).isEqualTo(LocalDate.parse("2014-01-01")); - - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_invalid_input() { - parse("not a date"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(LocalDate.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(Integer.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(LocalDate.class)).isTrue(); - assertThat(codec.accepts(Integer.class)).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(EPOCH)).isTrue(); - assertThat(codec.accepts(Integer.MIN_VALUE)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/DecimalCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/DecimalCodecTest.java deleted file mode 100644 index eac360fdcc5..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/DecimalCodecTest.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import java.math.BigDecimal; -import org.junit.Test; - -public class DecimalCodecTest extends CodecTestBase { - - public DecimalCodecTest() { - this.codec = TypeCodecs.DECIMAL; - } - - @Test - public void should_encode() { - assertThat(encode(BigDecimal.ONE)) - .isEqualTo( - "0x" - + "00000000" // scale - + "01" // unscaled value - ); - assertThat(encode(BigDecimal.valueOf(128, 4))) - .isEqualTo( - "0x" - + "00000004" // scale - + "0080" // unscaled value - ); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - assertThat(decode("0x0000000001")).isEqualTo(BigDecimal.ONE); - assertThat(decode("0x000000040080")).isEqualTo(BigDecimal.valueOf(128, 4)); - assertThat(decode("0x")).isNull(); - assertThat(decode(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_not_enough_bytes() { - decode("0x0000"); - } - - @Test - public void should_format() { - assertThat(format(BigDecimal.ONE)).isEqualTo("1"); - assertThat(format(BigDecimal.valueOf(128, 4))).isEqualTo("0.0128"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_parse() { - assertThat(parse("1")).isEqualTo(BigDecimal.ONE); - assertThat(parse("0.0128")).isEqualTo(BigDecimal.valueOf(128, 4)); - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_invalid_input() { - parse("not a decimal"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(BigDecimal.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(Integer.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(BigDecimal.class)).isTrue(); - assertThat(codec.accepts(Integer.class)).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(BigDecimal.ONE)).isTrue(); - assertThat(codec.accepts(Integer.MIN_VALUE)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/DoubleCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/DoubleCodecTest.java deleted file mode 100644 index f27081aa784..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/DoubleCodecTest.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import org.junit.Test; - -public class DoubleCodecTest extends CodecTestBase { - - public DoubleCodecTest() { - this.codec = TypeCodecs.DOUBLE; - } - - @Test - public void should_encode() { - // Our codec relies on the JDK's ByteBuffer API. We're not testing the JDK, so no need to try - // a thousand different values. - assertThat(encode(0.0)).isEqualTo("0x0000000000000000"); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - assertThat(decode("0x0000000000000000")).isEqualTo(0.0); - assertThat(decode("0x")).isNull(); - assertThat(decode(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_too_many_bytes() { - decode("0x0000"); - } - - @Test - public void should_format() { - assertThat(format(0.0)).isEqualTo("0.0"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_parse() { - assertThat(parse("0.0")).isEqualTo(0.0); - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_invalid_input() { - parse("not a double"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(Double.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(double.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(Integer.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(Double.class)).isTrue(); - assertThat(codec.accepts(double.class)).isTrue(); - assertThat(codec.accepts(Integer.class)).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(123.45d)).isTrue(); - assertThat(codec.accepts(Double.MIN_VALUE)).isTrue(); - assertThat(codec.accepts(Integer.MIN_VALUE)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/FloatCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/FloatCodecTest.java deleted file mode 100644 index 62d5b549153..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/FloatCodecTest.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import org.junit.Test; - -public class FloatCodecTest extends CodecTestBase { - - public FloatCodecTest() { - this.codec = TypeCodecs.FLOAT; - } - - @Test - public void should_encode() { - // Our codec relies on the JDK's ByteBuffer API. We're not testing the JDK, so no need to try - // a thousand different values. - assertThat(encode(0.0f)).isEqualTo("0x00000000"); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - assertThat(decode("0x00000000")).isEqualTo(0.0f); - assertThat(decode("0x")).isNull(); - assertThat(decode(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_too_many_bytes() { - decode("0x0000"); - } - - @Test - public void should_format() { - assertThat(format(0.0f)).isEqualTo("0.0"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_parse() { - assertThat(parse("0.0")).isEqualTo(0.0f); - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_invalid_input() { - parse("not a float"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(Float.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(float.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(Integer.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(Float.class)).isTrue(); - assertThat(codec.accepts(float.class)).isTrue(); - assertThat(codec.accepts(Integer.class)).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(123.45f)).isTrue(); - assertThat(codec.accepts(Float.MIN_VALUE)).isTrue(); - assertThat(codec.accepts(Integer.MIN_VALUE)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/InetCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/InetCodecTest.java deleted file mode 100644 index e10fa695ba0..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/InetCodecTest.java +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; - -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.shaded.guava.common.base.Strings; -import java.net.Inet4Address; -import java.net.InetAddress; -import java.net.UnknownHostException; -import org.junit.Test; - -public class InetCodecTest extends CodecTestBase { - - private static final InetAddress V4_ADDRESS; - private static final InetAddress V6_ADDRESS; - - static { - try { - V4_ADDRESS = InetAddress.getByName("127.0.0.1"); - V6_ADDRESS = InetAddress.getByName("::1"); - } catch (UnknownHostException e) { - fail("unexpected error", e); - throw new AssertionError(); // never reached - } - } - - public InetCodecTest() { - this.codec = TypeCodecs.INET; - } - - @Test - public void should_encode() { - assertThat(encode(V4_ADDRESS)).isEqualTo("0x7f000001"); - assertThat(encode(V6_ADDRESS)).isEqualTo("0x00000000000000000000000000000001"); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - assertThat(decode("0x7f000001")).isEqualTo(V4_ADDRESS); - assertThat(decode("0x00000000000000000000000000000001")).isEqualTo(V6_ADDRESS); - assertThat(decode("0x")).isNull(); - assertThat(decode(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_not_enough_bytes() { - decode("0x0000"); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_incorrect_byte_count() { - decode("0x" + Strings.repeat("00", 7)); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_too_many_bytes() { - decode("0x" + Strings.repeat("00", 17)); - } - - @Test - public void should_format() { - assertThat(format(V4_ADDRESS)).isEqualTo("'127.0.0.1'"); - assertThat(format(V6_ADDRESS)).isEqualTo("'0:0:0:0:0:0:0:1'"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_parse() { - assertThat(parse("'127.0.0.1'")).isEqualTo(V4_ADDRESS); - assertThat(parse("'0:0:0:0:0:0:0:1'")).isEqualTo(V6_ADDRESS); - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_invalid_input() { - parse("not an address"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(InetAddress.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(Inet4Address.class))) - .isFalse(); // covariance not allowed - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(InetAddress.class)).isTrue(); - assertThat(codec.accepts(Inet4Address.class)).isFalse(); // covariance not allowed - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(V4_ADDRESS)).isTrue(); // covariance allowed - assertThat(codec.accepts(V6_ADDRESS)).isTrue(); // covariance allowed - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/IntCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/IntCodecTest.java deleted file mode 100644 index b5268a7e844..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/IntCodecTest.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import org.junit.Test; - -public class IntCodecTest extends CodecTestBase { - - public IntCodecTest() { - this.codec = TypeCodecs.INT; - } - - @Test - public void should_encode() { - // Our codec relies on the JDK's ByteBuffer API. We're not testing the JDK, so no need to try - // a thousand different values. - assertThat(encode(0)).isEqualTo("0x00000000"); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - assertThat(decode("0x00000000")).isEqualTo(0); - assertThat(decode("0x")).isNull(); - assertThat(decode(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_not_enough_bytes() { - decode("0x0000"); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_too_many_bytes() { - decode("0x0000000000000000"); - } - - @Test - public void should_format() { - assertThat(format(0)).isEqualTo("0"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_parse() { - assertThat(parse("0")).isEqualTo(0); - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_invalid_input() { - parse("not an int"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(Integer.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(int.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(Long.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(Integer.class)).isTrue(); - assertThat(codec.accepts(int.class)).isTrue(); - assertThat(codec.accepts(Long.class)).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(123)).isTrue(); - assertThat(codec.accepts(Integer.MIN_VALUE)).isTrue(); - assertThat(codec.accepts(Long.MIN_VALUE)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/ListCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/ListCodecTest.java deleted file mode 100644 index 975aa3a1428..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/ListCodecTest.java +++ /dev/null @@ -1,155 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.util.ArrayList; -import java.util.List; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -public class ListCodecTest extends CodecTestBase> { - - @Mock private TypeCodec elementCodec; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - - when(elementCodec.getCqlType()).thenReturn(DataTypes.INT); - when(elementCodec.getJavaType()).thenReturn(GenericType.INTEGER); - codec = TypeCodecs.listOf(elementCodec); - } - - @Test - public void should_encode_null() { - assertThat(encode(null)).isNull(); - } - - @Test - public void should_encode_empty_list() { - assertThat(encode(new ArrayList<>())).isEqualTo("0x00000000"); - } - - @Test - public void should_encode_non_empty_list() { - when(elementCodec.encode(1, ProtocolVersion.DEFAULT)).thenReturn(Bytes.fromHexString("0x01")); - when(elementCodec.encode(2, ProtocolVersion.DEFAULT)).thenReturn(Bytes.fromHexString("0x0002")); - when(elementCodec.encode(3, ProtocolVersion.DEFAULT)) - .thenReturn(Bytes.fromHexString("0x000003")); - - assertThat(encode(ImmutableList.of(1, 2, 3))) - .isEqualTo( - "0x" - + "00000003" // number of elements - + "0000000101" // size + contents of element 1 - + "000000020002" // size + contents of element 2 - + "00000003000003" // size + contents of element 3 - ); - } - - @Test - public void should_decode_null_as_empty_list() { - assertThat(decode(null)).isEmpty(); - } - - @Test - public void should_decode_empty_list() { - assertThat(decode("0x00000000")).isEmpty(); - } - - @Test - public void should_decode_non_empty_list() { - when(elementCodec.decode(Bytes.fromHexString("0x01"), ProtocolVersion.DEFAULT)).thenReturn(1); - when(elementCodec.decode(Bytes.fromHexString("0x0002"), ProtocolVersion.DEFAULT)).thenReturn(2); - when(elementCodec.decode(Bytes.fromHexString("0x000003"), ProtocolVersion.DEFAULT)) - .thenReturn(3); - - assertThat(decode("0x" + "00000003" + "0000000101" + "000000020002" + "00000003000003")) - .containsExactly(1, 2, 3); - } - - @Test - public void should_decode_list_with_null_elements() { - when(elementCodec.decode(Bytes.fromHexString("0x0002"), ProtocolVersion.DEFAULT)).thenReturn(2); - assertThat( - decode( - "0x" - + "00000002" // number of elements - + "FFFFFFFF" // size of element 1 (-1 for null) - + "00000002" // size of element 2 - + "0002" // contents of element 2 - )) - .containsExactly(null, 2); - } - - @Test - public void should_format_null_list() { - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_format_empty_list() { - assertThat(format(new ArrayList<>())).isEqualTo("[]"); - } - - @Test - public void should_format_non_empty_list() { - when(elementCodec.format(1)).thenReturn("a"); - when(elementCodec.format(2)).thenReturn("b"); - when(elementCodec.format(3)).thenReturn("c"); - - assertThat(format(ImmutableList.of(1, 2, 3))).isEqualTo("[a,b,c]"); - } - - @Test - public void should_parse_null_or_empty_string() { - assertThat(parse(null)).isNull(); - assertThat(parse("")).isNull(); - } - - @Test - public void should_parse_empty_list() { - assertThat(parse("[]")).isEmpty(); - } - - @Test - public void should_parse_non_empty_list() { - when(elementCodec.parse("a")).thenReturn(1); - when(elementCodec.parse("b")).thenReturn(2); - when(elementCodec.parse("c")).thenReturn(3); - - assertThat(parse("[a,b,c]")).containsExactly(1, 2, 3); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_malformed_list() { - parse("not a list"); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/MapCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/MapCodecTest.java deleted file mode 100644 index 94cb33a5a99..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/MapCodecTest.java +++ /dev/null @@ -1,189 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.util.LinkedHashMap; -import java.util.Map; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -public class MapCodecTest extends CodecTestBase> { - - @Mock private TypeCodec keyCodec; - @Mock private TypeCodec valueCodec; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - - when(keyCodec.getCqlType()).thenReturn(DataTypes.TEXT); - when(keyCodec.getJavaType()).thenReturn(GenericType.STRING); - - when(valueCodec.getCqlType()).thenReturn(DataTypes.INT); - when(valueCodec.getJavaType()).thenReturn(GenericType.INTEGER); - codec = TypeCodecs.mapOf(keyCodec, valueCodec); - } - - @Test - public void should_encode_null() { - assertThat(encode(null)).isNull(); - } - - @Test - public void should_encode_empty_map() { - assertThat(encode(new LinkedHashMap<>())).isEqualTo("0x00000000"); - } - - @Test - public void should_encode_non_empty_map() { - when(keyCodec.encode("a", ProtocolVersion.DEFAULT)).thenReturn(Bytes.fromHexString("0x10")); - when(keyCodec.encode("b", ProtocolVersion.DEFAULT)).thenReturn(Bytes.fromHexString("0x2000")); - when(keyCodec.encode("c", ProtocolVersion.DEFAULT)).thenReturn(Bytes.fromHexString("0x300000")); - - when(valueCodec.encode(1, ProtocolVersion.DEFAULT)).thenReturn(Bytes.fromHexString("0x01")); - when(valueCodec.encode(2, ProtocolVersion.DEFAULT)).thenReturn(Bytes.fromHexString("0x0002")); - when(valueCodec.encode(3, ProtocolVersion.DEFAULT)).thenReturn(Bytes.fromHexString("0x000003")); - - assertThat(encode(ImmutableMap.of("a", 1, "b", 2, "c", 3))) - .isEqualTo( - "0x" - + "00000003" // number of key-value pairs - + "0000000110" // size + contents of key 1 - + "0000000101" // size + contents of value 1 - + "000000022000" // size + contents of key 2 - + "000000020002" // size + contents of value 2 - + "00000003300000" // size + contents of key 3 - + "00000003000003" // size + contents of value 3 - ); - } - - @Test - public void should_decode_null_as_empty_map() { - assertThat(decode(null)).isEmpty(); - } - - @Test - public void should_decode_empty_map() { - assertThat(decode("0x00000000")).isEmpty(); - } - - @Test - public void should_decode_non_empty_map() { - when(keyCodec.decode(Bytes.fromHexString("0x10"), ProtocolVersion.DEFAULT)).thenReturn("a"); - when(keyCodec.decode(Bytes.fromHexString("0x2000"), ProtocolVersion.DEFAULT)).thenReturn("b"); - when(keyCodec.decode(Bytes.fromHexString("0x300000"), ProtocolVersion.DEFAULT)).thenReturn("c"); - - when(valueCodec.decode(Bytes.fromHexString("0x01"), ProtocolVersion.DEFAULT)).thenReturn(1); - when(valueCodec.decode(Bytes.fromHexString("0x0002"), ProtocolVersion.DEFAULT)).thenReturn(2); - when(valueCodec.decode(Bytes.fromHexString("0x000003"), ProtocolVersion.DEFAULT)).thenReturn(3); - - assertThat( - decode( - "0x" - + "00000003" - + "0000000110" - + "0000000101" - + "000000022000" - + "000000020002" - + "00000003300000" - + "00000003000003")) - .containsOnlyKeys("a", "b", "c") - .containsEntry("a", 1) - .containsEntry("b", 2) - .containsEntry("c", 3); - } - - @Test - public void should_decode_map_with_null_elements() { - when(keyCodec.decode(Bytes.fromHexString("0x10"), ProtocolVersion.DEFAULT)).thenReturn("a"); - when(valueCodec.decode(Bytes.fromHexString("0x0002"), ProtocolVersion.DEFAULT)).thenReturn(2); - assertThat(decode("0x" + "00000002" + "0000000110" + "FFFFFFFF" + "FFFFFFFF" + "000000020002")) - .containsOnlyKeys("a", null) - .containsEntry("a", null) - .containsEntry(null, 2); - } - - @Test - public void should_format_null_map() { - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_format_empty_map() { - assertThat(format(new LinkedHashMap<>())).isEqualTo("{}"); - } - - @Test - public void should_format_non_empty_map() { - when(keyCodec.format("a")).thenReturn("foo"); - when(keyCodec.format("b")).thenReturn("bar"); - when(keyCodec.format("c")).thenReturn("baz"); - - when(valueCodec.format(1)).thenReturn("qux"); - when(valueCodec.format(2)).thenReturn("quux"); - when(valueCodec.format(3)).thenReturn("quuz"); - - assertThat(format(ImmutableMap.of("a", 1, "b", 2, "c", 3))) - .isEqualTo("{foo:qux,bar:quux,baz:quuz}"); - } - - @Test - public void should_parse_null_or_empty_string() { - assertThat(parse(null)).isNull(); - assertThat(parse("")).isNull(); - } - - @Test - public void should_parse_empty_map() { - assertThat(parse("{}")).isEmpty(); - } - - @Test - public void should_parse_non_empty_map() { - when(keyCodec.parse("foo")).thenReturn("a"); - when(keyCodec.parse("bar")).thenReturn("b"); - when(keyCodec.parse("baz")).thenReturn("c"); - - when(valueCodec.parse("qux")).thenReturn(1); - when(valueCodec.parse("quux")).thenReturn(2); - when(valueCodec.parse("quuz")).thenReturn(3); - - assertThat(parse("{foo:qux,bar:quux,baz:quuz}")) - .containsOnlyKeys("a", "b", "c") - .containsEntry("a", 1) - .containsEntry("b", 2) - .containsEntry("c", 3); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_malformed_map() { - parse("not a map"); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/MappingCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/MappingCodecTest.java deleted file mode 100644 index f78dc774f62..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/MappingCodecTest.java +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.MappingCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import org.junit.Test; - -public class MappingCodecTest extends CodecTestBase { - - public MappingCodecTest() { - this.codec = new CqlIntToStringCodec(); - } - - @Test - public void should_encode() { - // Our codec relies on the JDK's ByteBuffer API. We're not testing the JDK, so no need to try - // a thousand different values. - assertThat(encode("0")).isEqualTo("0x00000000"); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - assertThat(decode("0x00000000")).isEqualTo("0"); - assertThat(decode("0x")).isNull(); - assertThat(decode(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_not_enough_bytes() { - decode("0x0000"); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_too_many_bytes() { - decode("0x0000000000000000"); - } - - @Test - public void should_format() { - assertThat(format("0")).isEqualTo("0"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_parse() { - assertThat(parse("0")).isEqualTo("0"); - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_invalid_input() { - parse("not an int"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(String.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(int.class))).isFalse(); - assertThat(codec.accepts(GenericType.of(Integer.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(String.class)).isTrue(); - assertThat(codec.accepts(int.class)).isFalse(); - assertThat(codec.accepts(Integer.class)).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts("123")).isTrue(); - // codec accepts any String, even if it can't be encoded - assertThat(codec.accepts("not an int")).isTrue(); - assertThat(codec.accepts(Integer.MIN_VALUE)).isFalse(); - } - - @Test - public void should_expose_inner_and_outer_java_types() { - assertThat(((MappingCodec) codec).getInnerJavaType()).isEqualTo(GenericType.INTEGER); - assertThat(codec.getJavaType()).isEqualTo(GenericType.STRING); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/SetCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/SetCodecTest.java deleted file mode 100644 index a302357c9f3..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/SetCodecTest.java +++ /dev/null @@ -1,147 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.util.LinkedHashSet; -import java.util.Set; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -public class SetCodecTest extends CodecTestBase> { - - @Mock private TypeCodec elementCodec; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - - when(elementCodec.getCqlType()).thenReturn(DataTypes.INT); - when(elementCodec.getJavaType()).thenReturn(GenericType.INTEGER); - codec = TypeCodecs.setOf(elementCodec); - } - - @Test - public void should_encode_null() { - assertThat(encode(null)).isNull(); - } - - @Test - public void should_encode_empty_set() { - assertThat(encode(new LinkedHashSet<>())).isEqualTo("0x00000000"); - } - - @Test - public void should_encode_non_empty_set() { - when(elementCodec.encode(1, ProtocolVersion.DEFAULT)).thenReturn(Bytes.fromHexString("0x01")); - when(elementCodec.encode(2, ProtocolVersion.DEFAULT)).thenReturn(Bytes.fromHexString("0x0002")); - when(elementCodec.encode(3, ProtocolVersion.DEFAULT)) - .thenReturn(Bytes.fromHexString("0x000003")); - - assertThat(encode(ImmutableSet.of(1, 2, 3))) - .isEqualTo( - "0x" - + "00000003" // number of elements - + "0000000101" // size + contents of element 1 - + "000000020002" // size + contents of element 2 - + "00000003000003" // size + contents of element 3 - ); - } - - @Test - public void should_decode_null_as_empty_set() { - assertThat(decode(null)).isEmpty(); - } - - @Test - public void should_decode_empty_set() { - assertThat(decode("0x00000000")).isEmpty(); - } - - @Test - public void should_decode_non_empty_set() { - when(elementCodec.decode(Bytes.fromHexString("0x01"), ProtocolVersion.DEFAULT)).thenReturn(1); - when(elementCodec.decode(Bytes.fromHexString("0x0002"), ProtocolVersion.DEFAULT)).thenReturn(2); - when(elementCodec.decode(Bytes.fromHexString("0x000003"), ProtocolVersion.DEFAULT)) - .thenReturn(3); - - assertThat(decode("0x" + "00000003" + "0000000101" + "000000020002" + "00000003000003")) - .containsExactly(1, 2, 3); - } - - @Test - public void should_decode_set_with_null_elements() { - when(elementCodec.decode(Bytes.fromHexString("0x01"), ProtocolVersion.DEFAULT)).thenReturn(1); - assertThat(decode("0x" + "00000002" + "0000000101" + "FFFFFFFF")).containsExactly(1, null); - } - - @Test - public void should_format_null_set() { - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_format_empty_set() { - assertThat(format(new LinkedHashSet<>())).isEqualTo("{}"); - } - - @Test - public void should_format_non_empty_set() { - when(elementCodec.format(1)).thenReturn("a"); - when(elementCodec.format(2)).thenReturn("b"); - when(elementCodec.format(3)).thenReturn("c"); - - assertThat(format(ImmutableSet.of(1, 2, 3))).isEqualTo("{a,b,c}"); - } - - @Test - public void should_parse_null_or_empty_string() { - assertThat(parse(null)).isNull(); - assertThat(parse("")).isNull(); - } - - @Test - public void should_parse_empty_set() { - assertThat(parse("{}")).isEmpty(); - } - - @Test - public void should_parse_non_empty_set() { - when(elementCodec.parse("a")).thenReturn(1); - when(elementCodec.parse("b")).thenReturn(2); - when(elementCodec.parse("c")).thenReturn(3); - - assertThat(parse("{a,b,c}")).containsExactly(1, 2, 3); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_malformed_set() { - parse("not a set"); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/SimpleBlobCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/SimpleBlobCodecTest.java deleted file mode 100644 index 3f40efb16ac..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/SimpleBlobCodecTest.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.nio.ByteBuffer; -import org.junit.Test; - -public class SimpleBlobCodecTest extends CodecTestBase { - - private static final ByteBuffer BUFFER = Bytes.fromHexString("0xcafebabe"); - private static final byte[] ARRAY = Bytes.getArray(Bytes.fromHexString("0xcafebabe")); - - public SimpleBlobCodecTest() { - this.codec = ExtraTypeCodecs.BLOB_TO_ARRAY; - } - - @Test - public void should_encode() { - assertThat(encode(ARRAY)).isEqualTo("0xcafebabe"); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_not_share_position_between_input_and_encoded() { - ByteBuffer encoded = codec.encode(ARRAY, ProtocolVersion.DEFAULT); - assertThat(encoded).isNotNull(); - assertThat(ARRAY).isEqualTo(Bytes.getArray(encoded)); - } - - @Test - public void should_decode() { - assertThat(decode("0xcafebabe")).isEqualTo(ARRAY); - assertThat(decode("0x")).hasSize(0); - assertThat(decode(null)).isNull(); - } - - @Test - public void should_not_share_position_between_decoded_and_input() { - byte[] decoded = codec.decode(BUFFER, ProtocolVersion.DEFAULT); - assertThat(decoded).isEqualTo(ARRAY); - } - - @Test - public void should_format() { - assertThat(format(ARRAY)).isEqualTo("0xcafebabe"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_parse() { - assertThat(parse("0xcafebabe")).isEqualTo(ARRAY); - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_invalid_input() { - parse("not a blob"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(byte[].class))).isTrue(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(byte[].class)).isTrue(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(ARRAY)).isTrue(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/SmallIntCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/SmallIntCodecTest.java deleted file mode 100644 index 483dd0b65bd..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/SmallIntCodecTest.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import org.junit.Test; - -public class SmallIntCodecTest extends CodecTestBase { - - public SmallIntCodecTest() { - this.codec = TypeCodecs.SMALLINT; - } - - @Test - public void should_encode() { - // Our codec relies on the JDK's ByteBuffer API. We're not testing the JDK, so no need to try - // a thousand different values. - assertThat(encode((short) 0)).isEqualTo("0x0000"); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - assertThat(decode("0x0000")).isEqualTo((short) 0); - assertThat(decode("0x")).isNull(); - assertThat(decode(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_not_enough_bytes() { - decode("0x00"); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_too_many_bytes() { - decode("0x000000"); - } - - @Test - public void should_format() { - assertThat(format((short) 0)).isEqualTo("0"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_parse() { - assertThat(parse("0")).isEqualTo((short) 0); - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_invalid_input() { - parse("not a smallint"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(Short.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(short.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(Integer.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(Short.class)).isTrue(); - assertThat(codec.accepts(short.class)).isTrue(); - assertThat(codec.accepts(Integer.class)).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(((short) 123))).isTrue(); - assertThat(codec.accepts(Short.MIN_VALUE)).isTrue(); - assertThat(codec.accepts(Integer.MIN_VALUE)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TextCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TextCodecTest.java deleted file mode 100644 index a42178544d4..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TextCodecTest.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import org.junit.Test; - -public class TextCodecTest extends CodecTestBase { - - public TextCodecTest() { - // We will test edge cases of ASCII in AsciiCodecTest - this.codec = TypeCodecs.TEXT; - } - - @Test - public void should_encode() { - assertThat(encode("hello")).isEqualTo("0x68656c6c6f"); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - assertThat(decode("0x68656c6c6f")).isEqualTo("hello"); - assertThat(decode("0x")).isEmpty(); - assertThat(decode(null)).isNull(); - } - - @Test - public void should_format() { - assertThat(format("hello")).isEqualTo("'hello'"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_parse() { - assertThat(parse("'hello'")).isEqualTo("hello"); - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_invalid_input() { - parse("not a string"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(String.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(Integer.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(String.class)).isTrue(); - assertThat(codec.accepts(Integer.class)).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts("hello")).isTrue(); - assertThat(codec.accepts(Integer.MIN_VALUE)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TimeCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TimeCodecTest.java deleted file mode 100644 index 6d77efd396a..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TimeCodecTest.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import java.time.LocalTime; -import java.time.temporal.ChronoUnit; -import org.junit.Test; - -public class TimeCodecTest extends CodecTestBase { - - public TimeCodecTest() { - this.codec = TypeCodecs.TIME; - } - - @Test - public void should_encode() { - assertThat(encode(LocalTime.MIDNIGHT)).isEqualTo("0x0000000000000000"); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - assertThat(decode("0x0000000000000000")).isEqualTo(LocalTime.MIDNIGHT); - assertThat(decode(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_not_enough_bytes() { - decode("0x0000"); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_too_many_bytes() { - decode("0x0000000000000000" + "0000"); - } - - @Test - public void should_format() { - // No need to test various values because the codec delegates directly to the JDK's formatter, - // which we assume does its job correctly. - assertThat(format(LocalTime.MIDNIGHT)).isEqualTo("'00:00:00.000000000'"); - assertThat(format(LocalTime.NOON.plus(13799999994L, ChronoUnit.NANOS))) - .isEqualTo("'12:00:13.799999994'"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_parse() { - // Raw number - assertThat(parse("'0'")).isEqualTo(LocalTime.MIDNIGHT); - - // String format - assertThat(parse("'00:00'")).isEqualTo(LocalTime.MIDNIGHT); - - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_invalid_input() { - parse("not a time"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(LocalTime.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(Integer.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(LocalTime.class)).isTrue(); - assertThat(codec.accepts(Integer.class)).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(LocalTime.MIDNIGHT)).isTrue(); - assertThat(codec.accepts(Integer.MIN_VALUE)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TimeUuidCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TimeUuidCodecTest.java deleted file mode 100644 index 416bee8e4df..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TimeUuidCodecTest.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import java.util.UUID; -import org.junit.Test; - -public class TimeUuidCodecTest extends CodecTestBase { - - private static final UUID TIME_BASED = new UUID(6342305776366260711L, -5736720392086604862L); - private static final UUID NOT_TIME_BASED = new UUID(2, 1); - - public TimeUuidCodecTest() { - this.codec = TypeCodecs.TIMEUUID; - - assertThat(TIME_BASED.version()).isEqualTo(1); - assertThat(NOT_TIME_BASED.version()).isNotEqualTo(1); - } - - @Test - public void should_encode_time_uuid() { - assertThat(encode(TIME_BASED)).isEqualTo("0x58046580293811e7b0631332a5f033c2"); - } - - @Test(expected = IllegalArgumentException.class) - public void should_not_encode_non_time_uuid() { - assertThat(codec.accepts(NOT_TIME_BASED)).isFalse(); - encode(NOT_TIME_BASED); - } - - @Test - public void should_format_time_uuid() { - assertThat(format(TIME_BASED)).isEqualTo("58046580-2938-11e7-b063-1332a5f033c2"); - } - - @Test(expected = IllegalArgumentException.class) - public void should_not_format_non_time_uuid() { - format(NOT_TIME_BASED); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(UUID.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(Integer.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(UUID.class)).isTrue(); - assertThat(codec.accepts(Integer.class)).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(TIME_BASED)).isTrue(); - assertThat(codec.accepts(Integer.MIN_VALUE)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TimestampCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TimestampCodecTest.java deleted file mode 100644 index 5cfd17da622..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TimestampCodecTest.java +++ /dev/null @@ -1,195 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static java.time.ZoneOffset.ofHours; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; - -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.time.Instant; -import java.time.LocalDate; -import java.time.LocalDateTime; -import java.time.ZoneId; -import java.time.ZoneOffset; -import java.time.ZonedDateTime; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class TimestampCodecTest extends CodecTestBase { - - public TimestampCodecTest() { - // force a given timezone for reproducible results in should_format - codec = new TimestampCodec(ZoneOffset.UTC); - } - - @Test - public void should_encode() { - assertThat(encode(Instant.EPOCH)).isEqualTo("0x0000000000000000"); - assertThat(encode(Instant.ofEpochMilli(128))).isEqualTo("0x0000000000000080"); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - assertThat(decode("0x0000000000000000").toEpochMilli()).isEqualTo(0); - assertThat(decode("0x0000000000000080").toEpochMilli()).isEqualTo(128); - assertThat(decode(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_not_enough_bytes() { - decode("0x0000"); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_too_many_bytes() { - decode("0x0000000000000000" + "0000"); - } - - @Test - public void should_format() { - // No need to test various values because the codec delegates directly to SimpleDateFormat, - // which we assume does its job correctly. - assertThat(format(Instant.EPOCH)).isEqualTo("'1970-01-01T00:00:00.000Z'"); - assertThat(format(Instant.parse("2018-08-16T15:59:34.123Z"))) - .isEqualTo("'2018-08-16T15:59:34.123Z'"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @DataProvider - public static Iterable timeZones() { - return Lists.newArrayList( - ZoneId.systemDefault(), - ZoneOffset.UTC, - ZoneOffset.ofHoursMinutes(3, 30), - ZoneId.of("Europe/Paris"), - ZoneId.of("GMT+7")); - } - - @Test - @UseDataProvider("timeZones") - public void should_parse(ZoneId defaultTimeZone) { - TimestampCodec codec = new TimestampCodec(defaultTimeZone); - - // Raw numbers - assertThat(codec.parse("'0'")).isEqualTo(Instant.EPOCH); - assertThat(codec.parse("'-1'")).isEqualTo(Instant.EPOCH.minusMillis(1)); - assertThat(codec.parse("1534463100000")).isEqualTo(Instant.ofEpochMilli(1534463100000L)); - - // Date formats - Instant expected; - - // date without time, without time zone - expected = LocalDate.parse("2017-01-01").atStartOfDay().atZone(defaultTimeZone).toInstant(); - assertThat(codec.parse("'2017-01-01'")).isEqualTo(expected); - - // date without time, with time zone - expected = LocalDate.parse("2018-08-16").atStartOfDay().atZone(ofHours(2)).toInstant(); - assertThat(codec.parse("'2018-08-16+02'")).isEqualTo(expected); - assertThat(codec.parse("'2018-08-16+0200'")).isEqualTo(expected); - assertThat(codec.parse("'2018-08-16+02:00'")).isEqualTo(expected); - assertThat(codec.parse("'2018-08-16 CEST'")).isEqualTo(expected); - - // date with time, without time zone - expected = LocalDateTime.parse("2018-08-16T23:45").atZone(defaultTimeZone).toInstant(); - assertThat(codec.parse("'2018-08-16T23:45'")).isEqualTo(expected); - assertThat(codec.parse("'2018-08-16 23:45'")).isEqualTo(expected); - - // date with time + seconds, without time zone - expected = LocalDateTime.parse("2019-12-31T16:08:38").atZone(defaultTimeZone).toInstant(); - assertThat(codec.parse("'2019-12-31T16:08:38'")).isEqualTo(expected); - assertThat(codec.parse("'2019-12-31 16:08:38'")).isEqualTo(expected); - - // date with time + seconds + milliseconds, without time zone - expected = LocalDateTime.parse("1950-02-28T12:00:59.230").atZone(defaultTimeZone).toInstant(); - assertThat(codec.parse("'1950-02-28T12:00:59.230'")).isEqualTo(expected); - assertThat(codec.parse("'1950-02-28 12:00:59.230'")).isEqualTo(expected); - - // date with time, with time zone - expected = ZonedDateTime.parse("1973-06-23T23:59:00.000+01:00").toInstant(); - assertThat(codec.parse("'1973-06-23T23:59+01'")).isEqualTo(expected); - assertThat(codec.parse("'1973-06-23T23:59+0100'")).isEqualTo(expected); - assertThat(codec.parse("'1973-06-23T23:59+01:00'")).isEqualTo(expected); - assertThat(codec.parse("'1973-06-23T23:59 CET'")).isEqualTo(expected); - assertThat(codec.parse("'1973-06-23 23:59+01'")).isEqualTo(expected); - assertThat(codec.parse("'1973-06-23 23:59+0100'")).isEqualTo(expected); - assertThat(codec.parse("'1973-06-23 23:59+01:00'")).isEqualTo(expected); - assertThat(codec.parse("'1973-06-23 23:59 CET'")).isEqualTo(expected); - - // date with time + seconds, with time zone - expected = ZonedDateTime.parse("1980-01-01T23:59:59.000-08:00").toInstant(); - assertThat(codec.parse("'1980-01-01T23:59:59-08'")).isEqualTo(expected); - assertThat(codec.parse("'1980-01-01T23:59:59-0800'")).isEqualTo(expected); - assertThat(codec.parse("'1980-01-01T23:59:59-08:00'")).isEqualTo(expected); - assertThat(codec.parse("'1980-01-01T23:59:59 PST'")).isEqualTo(expected); - assertThat(codec.parse("'1980-01-01 23:59:59-08'")).isEqualTo(expected); - assertThat(codec.parse("'1980-01-01 23:59:59-0800'")).isEqualTo(expected); - assertThat(codec.parse("'1980-01-01 23:59:59-08:00'")).isEqualTo(expected); - assertThat(codec.parse("'1980-01-01 23:59:59 PST'")).isEqualTo(expected); - - // date with time + seconds + milliseconds, with time zone - expected = ZonedDateTime.parse("1999-12-31T23:59:59.999+00:00").toInstant(); - assertThat(codec.parse("'1999-12-31T23:59:59.999+00'")).isEqualTo(expected); - assertThat(codec.parse("'1999-12-31T23:59:59.999+0000'")).isEqualTo(expected); - assertThat(codec.parse("'1999-12-31T23:59:59.999+00:00'")).isEqualTo(expected); - assertThat(codec.parse("'1999-12-31T23:59:59.999 UTC'")).isEqualTo(expected); - assertThat(codec.parse("'1999-12-31 23:59:59.999+00'")).isEqualTo(expected); - assertThat(codec.parse("'1999-12-31 23:59:59.999+0000'")).isEqualTo(expected); - assertThat(codec.parse("'1999-12-31 23:59:59.999+00:00'")).isEqualTo(expected); - assertThat(codec.parse("'1999-12-31 23:59:59.999 UTC'")).isEqualTo(expected); - - assertThat(codec.parse("NULL")).isNull(); - assertThat(codec.parse("null")).isNull(); - assertThat(codec.parse("")).isNull(); - assertThat(codec.parse(null)).isNull(); - } - - @Test - public void should_fail_to_parse_invalid_input() { - assertThatThrownBy(() -> parse("not a timestamp")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage("Alphanumeric timestamp literal must be quoted: \"not a timestamp\""); - assertThatThrownBy(() -> parse("'not a timestamp'")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage("Cannot parse timestamp value from \"'not a timestamp'\""); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(Instant.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(Integer.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(Instant.class)).isTrue(); - assertThat(codec.accepts(Integer.class)).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(Instant.EPOCH)).isTrue(); - assertThat(codec.accepts(Integer.MIN_VALUE)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TinyIntCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TinyIntCodecTest.java deleted file mode 100644 index 358c36e9386..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TinyIntCodecTest.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import org.junit.Test; - -public class TinyIntCodecTest extends CodecTestBase { - - public TinyIntCodecTest() { - this.codec = TypeCodecs.TINYINT; - } - - @Test - public void should_encode() { - // Our codec relies on the JDK's ByteBuffer API. We're not testing the JDK, so no need to try - // a thousand different values. - assertThat(encode((byte) 0)).isEqualTo("0x00"); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - assertThat(decode("0x00")).isEqualTo((byte) 0); - assertThat(decode("0x")).isNull(); - assertThat(decode(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_too_many_bytes() { - decode("0x0000"); - } - - @Test - public void should_format() { - assertThat(format((byte) 0)).isEqualTo("0"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_parse() { - assertThat(parse("0")).isEqualTo((byte) 0); - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_invalid_input() { - parse("not a tinyint"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(Byte.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(byte.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(Integer.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(Byte.class)).isTrue(); - assertThat(codec.accepts(byte.class)).isTrue(); - assertThat(codec.accepts(Integer.class)).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(((byte) 123))).isTrue(); - assertThat(codec.accepts(Byte.MIN_VALUE)).isTrue(); - assertThat(codec.accepts(Integer.MIN_VALUE)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TupleCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TupleCodecTest.java deleted file mode 100644 index c51eea20c2e..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TupleCodecTest.java +++ /dev/null @@ -1,309 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyNoMoreInteractions; -import static org.mockito.Mockito.verifyZeroInteractions; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.data.TupleValue; -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.TupleType; -import com.datastax.oss.driver.api.core.type.codec.PrimitiveIntCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.data.DefaultTupleValue; -import com.datastax.oss.driver.internal.core.type.DefaultTupleType; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.protocol.internal.util.Bytes; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -public class TupleCodecTest extends CodecTestBase { - - @Mock private AttachmentPoint attachmentPoint; - @Mock private CodecRegistry codecRegistry; - private PrimitiveIntCodec intCodec; - private TypeCodec doubleCodec; - private TypeCodec textCodec; - - private TupleType tupleType; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - - when(attachmentPoint.getCodecRegistry()).thenReturn(codecRegistry); - when(attachmentPoint.getProtocolVersion()).thenReturn(ProtocolVersion.DEFAULT); - - intCodec = spy(TypeCodecs.INT); - doubleCodec = spy(TypeCodecs.DOUBLE); - textCodec = spy(TypeCodecs.TEXT); - - // Called by the getters/setters - when(codecRegistry.codecFor(DataTypes.INT, Integer.class)).thenAnswer(i -> intCodec); - when(codecRegistry.codecFor(DataTypes.DOUBLE, Double.class)).thenAnswer(i -> doubleCodec); - when(codecRegistry.codecFor(DataTypes.TEXT, String.class)).thenAnswer(i -> textCodec); - - // Called by format/parse - when(codecRegistry.codecFor(DataTypes.INT)).thenAnswer(i -> intCodec); - when(codecRegistry.codecFor(DataTypes.DOUBLE)).thenAnswer(i -> doubleCodec); - when(codecRegistry.codecFor(DataTypes.TEXT)).thenAnswer(i -> textCodec); - - tupleType = - new DefaultTupleType( - ImmutableList.of(DataTypes.INT, DataTypes.DOUBLE, DataTypes.TEXT), attachmentPoint); - - codec = TypeCodecs.tupleOf(tupleType); - } - - @Test - public void should_encode_null_tuple() { - assertThat(encode(null)).isNull(); - } - - @Test - public void should_encode_tuple() { - TupleValue tuple = tupleType.newValue(); - tuple = tuple.setInt(0, 1); - tuple = tuple.setToNull(1); - tuple = tuple.setString(2, "a"); - - assertThat(encode(tuple)) - .isEqualTo( - "0x" - + ("00000004" + "00000001") // size and contents of field 0 - + "ffffffff" // null field 1 - + ("00000001" + "61") // size and contents of field 2 - ); - - verify(intCodec).encodePrimitive(1, ProtocolVersion.DEFAULT); - // null values are handled directly in the tuple codec, without calling the child codec: - verifyZeroInteractions(doubleCodec); - verify(textCodec).encode("a", ProtocolVersion.DEFAULT); - } - - @Test - public void should_decode_null_tuple() { - assertThat(decode(null)).isNull(); - } - - @Test - public void should_decode_tuple() { - TupleValue tuple = decode("0x" + ("00000004" + "00000001") + "ffffffff" + ("00000001" + "61")); - - assertThat(tuple.getInt(0)).isEqualTo(1); - assertThat(tuple.isNull(1)).isTrue(); - assertThat(tuple.getString(2)).isEqualTo("a"); - - verify(intCodec).decodePrimitive(Bytes.fromHexString("0x00000001"), ProtocolVersion.DEFAULT); - verifyZeroInteractions(doubleCodec); - verify(textCodec).decode(Bytes.fromHexString("0x61"), ProtocolVersion.DEFAULT); - } - - /** Test for JAVA-2557. Ensures that the codec can decode null fields with any negative length. */ - @Test - public void should_decode_negative_element_length_as_null_field() { - TupleValue tuple = - decode( - "0x" - + "ffffffff" // field1 has length -1 - + "fffffffe" // field2 has length -2 - + "80000000" // field3 has length Integer.MIN_VALUE (-2147483648) - ); - - assertThat(tuple.isNull(0)).isTrue(); - assertThat(tuple.isNull(1)).isTrue(); - assertThat(tuple.isNull(2)).isTrue(); - - verifyZeroInteractions(intCodec); - verifyZeroInteractions(doubleCodec); - verifyZeroInteractions(textCodec); - } - - @Test - public void should_format_null_tuple() { - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_format_tuple() { - TupleValue tuple = tupleType.newValue(); - tuple = tuple.setInt(0, 1); - tuple = tuple.setToNull(1); - tuple = tuple.setString(2, "a"); - - assertThat(format(tuple)).isEqualTo("(1,NULL,'a')"); - - verify(intCodec).format(1); - verify(doubleCodec).format(null); - verify(textCodec).format("a"); - } - - @Test - public void should_parse_null_tuple() { - assertThat(parse(null)).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("NULL")).isNull(); - } - - @Test - public void should_parse_empty_tuple() { - TupleValue tuple = parse("()"); - - assertThat(tuple.isNull(0)).isTrue(); - assertThat(tuple.isNull(1)).isTrue(); - assertThat(tuple.isNull(2)).isTrue(); - - verifyNoMoreInteractions(intCodec); - verifyNoMoreInteractions(doubleCodec); - verifyNoMoreInteractions(textCodec); - } - - @Test - public void should_parse_partial_tuple() { - TupleValue tuple = parse("(1,NULL)"); - - assertThat(tuple.getInt(0)).isEqualTo(1); - assertThat(tuple.isNull(1)).isTrue(); - assertThat(tuple.isNull(2)).isTrue(); - - verify(intCodec).parse("1"); - verify(doubleCodec).parse("NULL"); - verifyNoMoreInteractions(textCodec); - } - - @Test - public void should_parse_full_tuple() { - TupleValue tuple = parse("(1,NULL,'a')"); - - assertThat(tuple.getInt(0)).isEqualTo(1); - assertThat(tuple.isNull(1)).isTrue(); - assertThat(tuple.getString(2)).isEqualTo("a"); - - verify(intCodec).parse("1"); - verify(doubleCodec).parse("NULL"); - verify(textCodec).parse("'a'"); - } - - @Test - public void should_parse_tuple_with_extra_whitespace() { - TupleValue tuple = parse(" ( 1 , NULL , 'a' ) "); - - assertThat(tuple.getInt(0)).isEqualTo(1); - assertThat(tuple.isNull(1)).isTrue(); - assertThat(tuple.getString(2)).isEqualTo("a"); - - verify(intCodec).parse("1"); - verify(doubleCodec).parse("NULL"); - verify(textCodec).parse("'a'"); - } - - @Test - public void should_fail_to_parse_invalid_input() { - // general tuple structure invalid - assertThatThrownBy(() -> parse("not a tuple")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse tuple value from \"not a tuple\", at character 0 expecting '(' but got 'n'"); - assertThatThrownBy(() -> parse(" ( ")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse tuple value from \" ( \", at field 0 (character 3) expecting CQL value or ')', got EOF"); - assertThatThrownBy(() -> parse("( [")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse tuple value from \"( [\", invalid CQL value at field 0 (character 2)"); - assertThatThrownBy(() -> parse("( 12 , ")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse tuple value from \"( 12 , \", at field 1 (character 7) expecting CQL value or ')', got EOF"); - assertThatThrownBy(() -> parse("( 12 12.34 ")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse tuple value from \"( 12 12.34 \", at field 0 (character 5) expecting ',' but got '1'"); - assertThatThrownBy(() -> parse("(1234,12.34,'text'")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse tuple value from \"(1234,12.34,'text'\", at field 2 (character 18) expecting ',' or ')', but got EOF"); - assertThatThrownBy(() -> parse("(1234,12.34,'text'))")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse tuple value from \"(1234,12.34,'text'))\", at character 19 expecting EOF or blank, but got \")\""); - assertThatThrownBy(() -> parse("())")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse tuple value from \"())\", at character 2 expecting EOF or blank, but got \")\""); - assertThatThrownBy(() -> parse("(1234,12.34,'text') extra")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse tuple value from \"(1234,12.34,'text') extra\", at character 20 expecting EOF or blank, but got \"extra\""); - // element syntax invalid - assertThatThrownBy(() -> parse("(not a valid int,12.34,'text')")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse tuple value from \"(not a valid int,12.34,'text')\", " - + "invalid CQL value at field 0 (character 1): " - + "Cannot parse 32-bits int value from \"not\"") - .hasRootCauseInstanceOf(IllegalArgumentException.class); - assertThatThrownBy(() -> parse("(1234,not a valid double,'text')")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse tuple value from \"(1234,not a valid double,'text')\", " - + "invalid CQL value at field 1 (character 6): " - + "Cannot parse 64-bits double value from \"not\"") - .hasRootCauseInstanceOf(IllegalArgumentException.class); - assertThatThrownBy(() -> parse("(1234,12.34,not a valid text)")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse tuple value from \"(1234,12.34,not a valid text)\", " - + "invalid CQL value at field 2 (character 12): " - + "text or varchar values must be enclosed by single quotes") - .hasRootCauseInstanceOf(IllegalArgumentException.class); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(TupleValue.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(DefaultTupleValue.class))) - .isFalse(); // covariance not allowed - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(TupleValue.class)).isTrue(); - assertThat(codec.accepts(DefaultTupleValue.class)).isFalse(); // covariance not allowed - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(tupleType.newValue())).isTrue(); - assertThat(codec.accepts(new DefaultTupleValue(tupleType))).isTrue(); // covariance allowed - assertThat(codec.accepts("not a tuple")).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/UdtCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/UdtCodecTest.java deleted file mode 100644 index af94247f937..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/UdtCodecTest.java +++ /dev/null @@ -1,362 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyNoMoreInteractions; -import static org.mockito.Mockito.verifyZeroInteractions; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.api.core.type.codec.PrimitiveIntCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.data.DefaultUdtValue; -import com.datastax.oss.driver.internal.core.type.DefaultUserDefinedType; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.protocol.internal.util.Bytes; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -public class UdtCodecTest extends CodecTestBase { - - @Mock private AttachmentPoint attachmentPoint; - @Mock private CodecRegistry codecRegistry; - private PrimitiveIntCodec intCodec; - private TypeCodec doubleCodec; - private TypeCodec textCodec; - - private UserDefinedType userType; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - - when(attachmentPoint.getCodecRegistry()).thenReturn(codecRegistry); - when(attachmentPoint.getProtocolVersion()).thenReturn(ProtocolVersion.DEFAULT); - - intCodec = spy(TypeCodecs.INT); - doubleCodec = spy(TypeCodecs.DOUBLE); - textCodec = spy(TypeCodecs.TEXT); - - // Called by the getters/setters - when(codecRegistry.codecFor(DataTypes.INT, Integer.class)).thenAnswer(i -> intCodec); - when(codecRegistry.codecFor(DataTypes.DOUBLE, Double.class)).thenAnswer(i -> doubleCodec); - when(codecRegistry.codecFor(DataTypes.TEXT, String.class)).thenAnswer(i -> textCodec); - - // Called by format/parse - when(codecRegistry.codecFor(DataTypes.INT)).thenAnswer(i -> intCodec); - when(codecRegistry.codecFor(DataTypes.DOUBLE)).thenAnswer(i -> doubleCodec); - when(codecRegistry.codecFor(DataTypes.TEXT)).thenAnswer(i -> textCodec); - - userType = - new DefaultUserDefinedType( - CqlIdentifier.fromInternal("ks"), - CqlIdentifier.fromInternal("type"), - false, - ImmutableList.of( - CqlIdentifier.fromInternal("field1"), - CqlIdentifier.fromInternal("field2"), - CqlIdentifier.fromInternal("field3")), - ImmutableList.of(DataTypes.INT, DataTypes.DOUBLE, DataTypes.TEXT), - attachmentPoint); - - codec = TypeCodecs.udtOf(userType); - } - - @Test - public void should_encode_null_udt() { - assertThat(encode(null)).isNull(); - } - - @Test - public void should_encode_udt() { - UdtValue udt = userType.newValue(); - udt = udt.setInt("field1", 1); - udt = udt.setToNull("field2"); - udt = udt.setString("field3", "a"); - - assertThat(encode(udt)) - .isEqualTo( - "0x" - + ("00000004" + "00000001") // size and contents of field 0 - + "ffffffff" // null field 1 - + ("00000001" + "61") // size and contents of field 2 - ); - - verify(intCodec).encodePrimitive(1, ProtocolVersion.DEFAULT); - // null values are handled directly in the udt codec, without calling the child codec: - verifyZeroInteractions(doubleCodec); - verify(textCodec).encode("a", ProtocolVersion.DEFAULT); - } - - @Test - public void should_decode_null_udt() { - assertThat(decode(null)).isNull(); - } - - @Test - public void should_decode_udt() { - UdtValue udt = decode("0x" + ("00000004" + "00000001") + "ffffffff" + ("00000001" + "61")); - - assertThat(udt.getInt(0)).isEqualTo(1); - assertThat(udt.isNull(1)).isTrue(); - assertThat(udt.getString(2)).isEqualTo("a"); - - verify(intCodec).decodePrimitive(Bytes.fromHexString("0x00000001"), ProtocolVersion.DEFAULT); - verifyZeroInteractions(doubleCodec); - verify(textCodec).decode(Bytes.fromHexString("0x61"), ProtocolVersion.DEFAULT); - } - - @Test - public void should_decode_udt_when_too_many_fields() { - UdtValue udt = - decode( - "0x" - + ("00000004" + "00000001") - + "ffffffff" - + ("00000001" + "61") - // extra contents - + "ffffffff"); - assertThat(udt.getInt(0)).isEqualTo(1); - assertThat(udt.isNull(1)).isTrue(); - assertThat(udt.getString(2)).isEqualTo("a"); - } - - /** Test for JAVA-2557. Ensures that the codec can decode null fields with any negative length. */ - @Test - public void should_decode_negative_element_length_as_null_field() { - UdtValue udt = - decode( - "0x" - + "ffffffff" // field1 has length -1 - + "fffffffe" // field2 has length -2 - + "80000000" // field3 has length Integer.MIN_VALUE (-2147483648) - ); - - assertThat(udt.isNull(0)).isTrue(); - assertThat(udt.isNull(1)).isTrue(); - assertThat(udt.isNull(2)).isTrue(); - - verifyZeroInteractions(intCodec); - verifyZeroInteractions(doubleCodec); - verifyZeroInteractions(textCodec); - } - - @Test - public void should_decode_absent_element_as_null_field() { - UdtValue udt = decode("0x"); - - assertThat(udt.isNull(0)).isTrue(); - assertThat(udt.isNull(1)).isTrue(); - assertThat(udt.isNull(2)).isTrue(); - - verifyZeroInteractions(intCodec); - verifyZeroInteractions(doubleCodec); - verifyZeroInteractions(textCodec); - } - - @Test - public void should_format_null_udt() { - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_format_udt() { - UdtValue udt = userType.newValue(); - udt = udt.setInt(0, 1); - udt = udt.setToNull(1); - udt = udt.setString(2, "a"); - - assertThat(format(udt)).isEqualTo("{field1:1,field2:NULL,field3:'a'}"); - - verify(intCodec).format(1); - verify(doubleCodec).format(null); - verify(textCodec).format("a"); - } - - @Test - public void should_parse_null_udt() { - assertThat(parse(null)).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("NULL")).isNull(); - } - - @Test - public void should_parse_empty_udt() { - UdtValue udt = parse("{}"); - - assertThat(udt.isNull(0)).isTrue(); - assertThat(udt.isNull(1)).isTrue(); - assertThat(udt.isNull(2)).isTrue(); - - verifyNoMoreInteractions(intCodec); - verifyNoMoreInteractions(doubleCodec); - verifyNoMoreInteractions(textCodec); - } - - @Test - public void should_parse_partial_udt() { - UdtValue udt = parse("{field1:1,field2:NULL}"); - - assertThat(udt.getInt(0)).isEqualTo(1); - assertThat(udt.isNull(1)).isTrue(); - assertThat(udt.isNull(2)).isTrue(); - - verify(intCodec).parse("1"); - verify(doubleCodec).parse("NULL"); - verifyNoMoreInteractions(textCodec); - } - - @Test - public void should_parse_full_udt() { - UdtValue udt = parse("{field1:1,field2:NULL,field3:'a'}"); - - assertThat(udt.getInt(0)).isEqualTo(1); - assertThat(udt.isNull(1)).isTrue(); - assertThat(udt.getString(2)).isEqualTo("a"); - - verify(intCodec).parse("1"); - verify(doubleCodec).parse("NULL"); - verify(textCodec).parse("'a'"); - } - - @Test - public void should_parse_udt_with_extra_whitespace() { - UdtValue udt = parse(" { field1 : 1 , field2 : NULL , field3 : 'a' } "); - - assertThat(udt.getInt(0)).isEqualTo(1); - assertThat(udt.isNull(1)).isTrue(); - assertThat(udt.getString(2)).isEqualTo("a"); - - verify(intCodec).parse("1"); - verify(doubleCodec).parse("NULL"); - verify(textCodec).parse("'a'"); - } - - @Test - public void should_fail_to_parse_invalid_input() { - // general UDT structure invalid - assertThatThrownBy(() -> parse("not a udt")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse UDT value from \"not a udt\" at character 0: expecting '{' but got 'n'"); - assertThatThrownBy(() -> parse(" { ")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse UDT value from \" { \" at character 3: expecting CQL identifier or '}', got EOF"); - assertThatThrownBy(() -> parse("{ [ ")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse UDT value from \"{ [ \", cannot parse a CQL identifier at character 2"); - assertThatThrownBy(() -> parse("{ field1 ")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse UDT value from \"{ field1 \", at field field1 (character 9) expecting ':', but got EOF"); - assertThatThrownBy(() -> parse("{ field1 ,")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse UDT value from \"{ field1 ,\", at field field1 (character 9) expecting ':', but got ','"); - assertThatThrownBy(() -> parse("{nonExistentField:NULL}")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse UDT value from \"{nonExistentField:NULL}\", unknown CQL identifier at character 17: \"nonExistentField\""); - assertThatThrownBy(() -> parse("{ field1 : ")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse UDT value from \"{ field1 : \", invalid CQL value at field field1 (character 11)"); - assertThatThrownBy(() -> parse("{ field1 : [")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse UDT value from \"{ field1 : [\", invalid CQL value at field field1 (character 11)"); - assertThatThrownBy(() -> parse("{ field1 : 1 , ")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse UDT value from \"{ field1 : 1 , \" at field field1 (character 15): expecting CQL identifier or '}', got EOF"); - assertThatThrownBy(() -> parse("{ field1 : 1 field2 ")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse UDT value from \"{ field1 : 1 field2 \", at field field1 (character 13) expecting ',' but got 'f'"); - assertThatThrownBy(() -> parse("{field1:1,field2:12.34,field3:'a'")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse UDT value from \"{field1:1,field2:12.34,field3:'a'\", at field field3 (character 33) expecting ',' or '}', but got EOF"); - assertThatThrownBy(() -> parse("{field1:1,field2:12.34,field3:'a'}}")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse UDT value from \"{field1:1,field2:12.34,field3:'a'}}\", at character 34 expecting EOF or blank, but got \"}\""); - assertThatThrownBy(() -> parse("{field1:1,field2:12.34,field3:'a'} extra")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse UDT value from \"{field1:1,field2:12.34,field3:'a'} extra\", at character 35 expecting EOF or blank, but got \"extra\""); - // element syntax invalid - assertThatThrownBy(() -> parse("{field1:not a valid int,field2:NULL,field3:'a'}")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse UDT value from \"{field1:not a valid int,field2:NULL,field3:'a'}\", " - + "invalid CQL value at field field1 (character 8): " - + "Cannot parse 32-bits int value from \"not\"") - .hasRootCauseInstanceOf(IllegalArgumentException.class); - assertThatThrownBy(() -> parse("{field1:1,field2:not a valid double,field3:'a'}")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse UDT value from \"{field1:1,field2:not a valid double,field3:'a'}\", " - + "invalid CQL value at field field2 (character 17): " - + "Cannot parse 64-bits double value from \"not\"") - .hasRootCauseInstanceOf(IllegalArgumentException.class); - assertThatThrownBy(() -> parse("{field1:1,field2:NULL,field3:not a valid text}")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage( - "Cannot parse UDT value from \"{field1:1,field2:NULL,field3:not a valid text}\", " - + "invalid CQL value at field field3 (character 29): " - + "text or varchar values must be enclosed by single quotes") - .hasRootCauseInstanceOf(IllegalArgumentException.class); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(UdtValue.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(DefaultUdtValue.class))) - .isFalse(); // covariance not allowed - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(UdtValue.class)).isTrue(); - assertThat(codec.accepts(DefaultUdtValue.class)).isFalse(); // covariance not allowed - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(userType.newValue())).isTrue(); - assertThat(codec.accepts(new DefaultUdtValue(userType))).isTrue(); // covariance allowed - assertThat(codec.accepts("not a udt")).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/UuidCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/UuidCodecTest.java deleted file mode 100644 index e62fb4af15b..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/UuidCodecTest.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import java.util.UUID; -import org.junit.Test; - -public class UuidCodecTest extends CodecTestBase { - private static final UUID MOCK_UUID = new UUID(2L, 1L); - - public UuidCodecTest() { - this.codec = TypeCodecs.UUID; - } - - @Test - public void should_encode() { - assertThat(encode(MOCK_UUID)).isEqualTo("0x00000000000000020000000000000001"); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - UUID decoded = decode("0x00000000000000020000000000000001"); - assertThat(decoded.getMostSignificantBits()).isEqualTo(2L); - assertThat(decoded.getLeastSignificantBits()).isEqualTo(1L); - - assertThat(decode(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_not_enough_bytes() { - decode("0x0000"); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_too_many_bytes() { - decode("0x00000000000000020000000000000001" + "0000"); - } - - @Test - public void should_format() { - assertThat(format(MOCK_UUID)).isEqualTo("00000000-0000-0002-0000-000000000001"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_parse() { - assertThat(parse("00000000-0000-0002-0000-000000000001")).isEqualTo(MOCK_UUID); - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_invalid_input() { - parse("not a uuid"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(UUID.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(Integer.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(UUID.class)).isTrue(); - assertThat(codec.accepts(Integer.class)).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(MOCK_UUID)).isTrue(); - assertThat(codec.accepts(Integer.MIN_VALUE)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/VarintCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/VarintCodecTest.java deleted file mode 100644 index a3472d4b8ce..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/VarintCodecTest.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import java.math.BigInteger; -import org.junit.Test; - -public class VarintCodecTest extends CodecTestBase { - - public VarintCodecTest() { - this.codec = TypeCodecs.VARINT; - } - - @Test - public void should_encode() { - assertThat(encode(BigInteger.ONE)).isEqualTo("0x01"); - assertThat(encode(BigInteger.valueOf(128))).isEqualTo("0x0080"); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - assertThat(decode("0x01")).isEqualTo(BigInteger.ONE); - assertThat(decode("0x0080")).isEqualTo(BigInteger.valueOf(128)); - assertThat(decode("0x")).isNull(); - assertThat(decode(null)).isNull(); - } - - @Test - public void should_format() { - assertThat(format(BigInteger.ONE)).isEqualTo("1"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_parse() { - assertThat(parse("1")).isEqualTo(BigInteger.ONE); - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_invalid_input() { - parse("not a varint"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(BigInteger.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(Integer.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(BigInteger.class)).isTrue(); - assertThat(codec.accepts(Integer.class)).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(BigInteger.ONE)).isTrue(); - assertThat(codec.accepts(Integer.MIN_VALUE)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/VectorCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/VectorCodecTest.java deleted file mode 100644 index 17c78514127..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/VectorCodecTest.java +++ /dev/null @@ -1,274 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.data.CqlVector; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.internal.core.type.DefaultVectorType; -import com.datastax.oss.protocol.internal.util.Bytes; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.nio.ByteBuffer; -import java.time.LocalTime; -import java.util.HashMap; -import org.apache.commons.lang3.ArrayUtils; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class VectorCodecTest { - - @DataProvider - public static Object[] dataProvider() { - HashMap map1 = new HashMap<>(); - map1.put(1, "a"); - HashMap map2 = new HashMap<>(); - map2.put(2, "b"); - return new TestDataContainer[] { - new TestDataContainer( - DataTypes.FLOAT, - new Float[] {1.0f, 2.5f}, - "[1.0, 2.5]", - Bytes.fromHexString("0x3f80000040200000")), - new TestDataContainer( - DataTypes.ASCII, - new String[] {"ab", "cde"}, - "['ab', 'cde']", - Bytes.fromHexString("0x02616203636465")), - new TestDataContainer( - DataTypes.BIGINT, - new Long[] {1L, 2L}, - "[1, 2]", - Bytes.fromHexString("0x00000000000000010000000000000002")), - new TestDataContainer( - DataTypes.BLOB, - new ByteBuffer[] {Bytes.fromHexString("0xCAFE"), Bytes.fromHexString("0xABCD")}, - "[0xcafe, 0xabcd]", - Bytes.fromHexString("0x02cafe02abcd")), - new TestDataContainer( - DataTypes.BOOLEAN, - new Boolean[] {true, false}, - "[true, false]", - Bytes.fromHexString("0x0100")), - new TestDataContainer( - DataTypes.TIME, - new LocalTime[] {LocalTime.ofNanoOfDay(1), LocalTime.ofNanoOfDay(2)}, - "['00:00:00.000000001', '00:00:00.000000002']", - Bytes.fromHexString("0x080000000000000001080000000000000002")), - new TestDataContainer( - DataTypes.mapOf(DataTypes.INT, DataTypes.ASCII), - new HashMap[] {map1, map2}, - "[{1:'a'}, {2:'b'}]", - Bytes.fromHexString( - "0x110000000100000004000000010000000161110000000100000004000000020000000162")), - new TestDataContainer( - DataTypes.vectorOf(DataTypes.INT, 1), - new CqlVector[] {CqlVector.newInstance(1), CqlVector.newInstance(2)}, - "[[1], [2]]", - Bytes.fromHexString("0x0000000100000002")), - new TestDataContainer( - DataTypes.vectorOf(DataTypes.TEXT, 1), - new CqlVector[] {CqlVector.newInstance("ab"), CqlVector.newInstance("cdef")}, - "[['ab'], ['cdef']]", - Bytes.fromHexString("0x03026162050463646566")), - new TestDataContainer( - DataTypes.vectorOf(DataTypes.vectorOf(DataTypes.FLOAT, 2), 1), - new CqlVector[] { - CqlVector.newInstance(CqlVector.newInstance(1.0f, 2.5f)), - CqlVector.newInstance(CqlVector.newInstance(3.0f, 4.5f)) - }, - "[[[1.0, 2.5]], [[3.0, 4.5]]]", - Bytes.fromHexString("0x3f800000402000004040000040900000")) - }; - } - - @UseDataProvider("dataProvider") - @Test - public void should_encode(TestDataContainer testData) { - TypeCodec> codec = getCodec(testData.getDataType()); - CqlVector vector = CqlVector.newInstance(testData.getValues()); - assertThat(codec.encode(vector, ProtocolVersion.DEFAULT)).isEqualTo(testData.getBytes()); - } - - @Test - @UseDataProvider("dataProvider") - public void should_throw_on_encode_with_too_few_elements(TestDataContainer testData) { - TypeCodec> codec = getCodec(testData.getDataType()); - assertThatThrownBy( - () -> - codec.encode( - CqlVector.newInstance(testData.getValues()[0]), ProtocolVersion.DEFAULT)) - .isInstanceOf(IllegalArgumentException.class); - } - - @Test - @UseDataProvider("dataProvider") - public void should_throw_on_encode_with_too_many_elements(TestDataContainer testData) { - Object[] doubled = ArrayUtils.addAll(testData.getValues(), testData.getValues()); - TypeCodec> codec = getCodec(testData.getDataType()); - assertThatThrownBy(() -> codec.encode(CqlVector.newInstance(doubled), ProtocolVersion.DEFAULT)) - .isInstanceOf(IllegalArgumentException.class); - } - - @Test - @UseDataProvider("dataProvider") - public void should_decode(TestDataContainer testData) { - TypeCodec> codec = getCodec(testData.getDataType()); - assertThat(codec.decode(testData.getBytes(), ProtocolVersion.DEFAULT)) - .isEqualTo(CqlVector.newInstance(testData.getValues())); - } - - @Test - @UseDataProvider("dataProvider") - public void should_throw_on_decode_if_too_few_bytes(TestDataContainer testData) { - TypeCodec> codec = getCodec(testData.getDataType()); - int lastIndex = testData.getBytes().remaining() - 1; - assertThatThrownBy( - () -> - codec.decode( - (ByteBuffer) testData.getBytes().duplicate().limit(lastIndex), - ProtocolVersion.DEFAULT)) - .isInstanceOf(IllegalArgumentException.class); - } - - @Test - @UseDataProvider("dataProvider") - public void should_throw_on_decode_if_too_many_bytes(TestDataContainer testData) { - ByteBuffer doubled = ByteBuffer.allocate(testData.getBytes().remaining() * 2); - doubled.put(testData.getBytes().duplicate()).put(testData.getBytes().duplicate()).flip(); - TypeCodec> codec = getCodec(testData.getDataType()); - assertThatThrownBy(() -> codec.decode(doubled, ProtocolVersion.DEFAULT)) - .isInstanceOf(IllegalArgumentException.class); - } - - @Test - @UseDataProvider("dataProvider") - public void should_format(TestDataContainer testData) { - TypeCodec> codec = getCodec(testData.getDataType()); - CqlVector vector = CqlVector.newInstance(testData.getValues()); - assertThat(codec.format(vector)).isEqualTo(testData.getFormatted()); - } - - @Test - @UseDataProvider("dataProvider") - public void should_parse(TestDataContainer testData) { - TypeCodec> codec = getCodec(testData.getDataType()); - assertThat(codec.parse(testData.getFormatted())) - .isEqualTo(CqlVector.newInstance(testData.getValues())); - } - - @Test - @UseDataProvider("dataProvider") - public void should_accept_data_type(TestDataContainer testData) { - TypeCodec> codec = getCodec(testData.getDataType()); - assertThat(codec.accepts(new DefaultVectorType(testData.getDataType(), 2))).isTrue(); - assertThat(codec.accepts(new DefaultVectorType(DataTypes.custom("non-existent"), 2))).isFalse(); - } - - @Test - @UseDataProvider("dataProvider") - public void should_accept_vector_type_correct_dimension_only(TestDataContainer testData) { - TypeCodec> codec = getCodec(testData.getDataType()); - assertThat(codec.accepts(new DefaultVectorType(testData.getDataType(), 0))).isFalse(); - assertThat(codec.accepts(new DefaultVectorType(testData.getDataType(), 1))).isFalse(); - assertThat(codec.accepts(new DefaultVectorType(testData.getDataType(), 3))).isFalse(); - } - - @Test - @UseDataProvider("dataProvider") - public void should_accept_generic_type(TestDataContainer testData) { - TypeCodec> codec = getCodec(testData.getDataType()); - assertThat(codec.accepts(codec.getJavaType())).isTrue(); - } - - @Test - @UseDataProvider("dataProvider") - public void should_accept_raw_type(TestDataContainer testData) { - TypeCodec> codec = getCodec(testData.getDataType()); - assertThat(codec.accepts(CqlVector.class)).isTrue(); - assertThat(codec.accepts(Integer.class)).isFalse(); - } - - @Test - @UseDataProvider("dataProvider") - public void should_accept_object(TestDataContainer testData) { - TypeCodec> codec = getCodec(testData.getDataType()); - CqlVector vector = CqlVector.newInstance(testData.getValues()); - assertThat(codec.accepts(vector)).isTrue(); - assertThat(codec.accepts(Integer.MIN_VALUE)).isFalse(); - } - - @Test - public void should_handle_null_and_empty() { - TypeCodec> codec = getCodec(DataTypes.FLOAT); - assertThat(codec.encode(null, ProtocolVersion.DEFAULT)).isNull(); - assertThat(codec.decode(Bytes.fromHexString("0x"), ProtocolVersion.DEFAULT)).isNull(); - assertThat(codec.format(null)).isEqualTo("NULL"); - assertThat(codec.parse("NULL")).isNull(); - assertThat(codec.parse("null")).isNull(); - assertThat(codec.parse("")).isNull(); - assertThat(codec.parse(null)).isNull(); - assertThatThrownBy(() -> codec.encode(CqlVector.newInstance(), ProtocolVersion.DEFAULT)) - .isInstanceOf(IllegalArgumentException.class); - } - - private static TypeCodec> getCodec(DataType dataType) { - return TypeCodecs.vectorOf( - DataTypes.vectorOf(dataType, 2), CodecRegistry.DEFAULT.codecFor(dataType)); - } - - private static class TestDataContainer { - private final DataType dataType; - private final Object[] values; - private final String formatted; - private final ByteBuffer bytes; - - public TestDataContainer( - DataType dataType, Object[] values, String formatted, ByteBuffer bytes) { - this.dataType = dataType; - this.values = values; - this.formatted = formatted; - this.bytes = bytes; - } - - public DataType getDataType() { - return dataType; - } - - public Object[] getValues() { - return values; - } - - public String getFormatted() { - return formatted; - } - - public ByteBuffer getBytes() { - return bytes; - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/OptionalCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/OptionalCodecTest.java deleted file mode 100644 index 745ba7a3aa8..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/OptionalCodecTest.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.CodecTestBase; -import java.util.Optional; -import org.junit.Before; -import org.junit.Test; - -public class OptionalCodecTest extends CodecTestBase> { - - @Before - public void setup() { - codec = ExtraTypeCodecs.optionalOf(TypeCodecs.INT); - } - - @Test - public void should_encode() { - // Our codec relies on the JDK's ByteBuffer API. We're not testing the JDK, so no need to try - // a thousand different values. - assertThat(encode(Optional.of(1))).isEqualTo("0x00000001"); - assertThat(encode(Optional.empty())).isNull(); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - assertThat(decode("0x00000001")).isPresent().contains(1); - assertThat(decode("0x")).isEmpty(); - assertThat(decode(null)).isEmpty(); - } - - @Test - public void should_format() { - assertThat(format(Optional.of(1))).isEqualTo("1"); - assertThat(format(Optional.empty())).isEqualTo("NULL"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_parse() { - assertThat(parse("1")).isPresent().contains(1); - assertThat(parse("NULL")).isEmpty(); - assertThat(parse("null")).isEmpty(); - assertThat(parse("")).isEmpty(); - assertThat(parse(null)).isEmpty(); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.optionalOf(Integer.class))).isTrue(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(Optional.class)).isTrue(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(Optional.of(1))).isTrue(); - assertThat(codec.accepts(Optional.empty())).isTrue(); - assertThat(codec.accepts(Optional.of("foo"))).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/BooleanArrayCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/BooleanArrayCodecTest.java deleted file mode 100644 index 4a175cdf306..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/BooleanArrayCodecTest.java +++ /dev/null @@ -1,147 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.array; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.CodecTestBase; -import org.junit.Before; -import org.junit.Test; - -public class BooleanArrayCodecTest extends CodecTestBase { - - @Before - public void setup() { - codec = ExtraTypeCodecs.BOOLEAN_LIST_TO_ARRAY; - } - - @Test - public void should_encode_null() { - assertThat(encode(null)).isNull(); - } - - @Test - public void should_encode_empty_array() { - assertThat(encode(new boolean[] {})).isEqualTo("0x00000000"); - } - - @Test - public void should_encode_non_empty_array() { - assertThat(encode(new boolean[] {true, false})) - .isEqualTo( - "0x" - + "00000002" // number of elements - + "00000001" // size of element 1 - + "01" // contents of element 1 - + "00000001" // size of element 2 - + "00" // contents of element 2 - ); - } - - @Test - public void should_decode_null_as_empty_array() { - assertThat(decode(null)).isEmpty(); - } - - @Test - public void should_decode_empty_array() { - assertThat(decode("0x00000000")).isEmpty(); - } - - @Test - public void should_decode_non_empty_array() { - assertThat( - decode( - "0x" - + "00000002" // number of elements - + "00000001" // size of element 1 - + "01" // contents of element 1 - + "00000001" // size of element 2 - + "00" // contents of element 2 - )) - .containsExactly(true, false); - } - - @Test(expected = NullPointerException.class) - public void should_not_decode_array_with_null_elements() { - decode( - "0x" - + "00000001" // number of elements - + "FFFFFFFF" // size of element 1 (-1 for null) - ); - } - - @Test - public void should_format_null_array() { - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_format_empty_array() { - assertThat(format(new boolean[] {})).isEqualTo("[]"); - } - - @Test - public void should_format_non_empty_array() { - assertThat(format(new boolean[] {true, false})).isEqualTo("[true,false]"); - } - - @Test - public void should_parse_null_or_empty_string() { - assertThat(parse(null)).isNull(); - assertThat(parse("")).isNull(); - } - - @Test - public void should_parse_empty_array() { - assertThat(parse("[]")).isEmpty(); - } - - @Test - public void should_parse_non_empty_array() { - assertThat(parse("[true,false]")).containsExactly(true, false); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_malformed_array() { - parse("not an array"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.arrayOf(Boolean.TYPE))).isTrue(); - assertThat(codec.accepts(GenericType.arrayOf(Boolean.class))).isFalse(); - assertThat(codec.accepts(GenericType.arrayOf(String.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(GenericType.arrayOf(Boolean.TYPE).getRawType())).isTrue(); - assertThat(codec.accepts(GenericType.arrayOf(Boolean.class).getRawType())).isFalse(); - assertThat(codec.accepts(GenericType.arrayOf(String.class).getRawType())).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(new boolean[] {true, false})).isTrue(); - assertThat(codec.accepts(new Boolean[] {true, false})).isFalse(); - assertThat(codec.accepts(new String[] {"hello", "world"})).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ByteArrayCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ByteArrayCodecTest.java deleted file mode 100644 index 761b568fcea..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ByteArrayCodecTest.java +++ /dev/null @@ -1,151 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.array; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.CodecTestBase; -import org.junit.Before; -import org.junit.Test; - -public class ByteArrayCodecTest extends CodecTestBase { - - @Before - public void setup() { - codec = ExtraTypeCodecs.BYTE_LIST_TO_ARRAY; - } - - @Test - public void should_encode_null() { - assertThat(encode(null)).isNull(); - } - - @Test - public void should_encode_empty_array() { - assertThat(encode(new byte[] {})).isEqualTo("0x00000000"); - } - - @Test - public void should_encode_non_empty_array() { - assertThat(encode(new byte[] {1, 2, 3})) - .isEqualTo( - "0x" - + "00000003" // number of elements - + "00000001" // size of element 1 - + "01" // contents of element 1 - + "00000001" // size of element 2 - + "02" // contents of element 2 - + "00000001" // size of element 3 - + "03" // contents of element 3 - ); - } - - @Test - public void should_decode_null_as_empty_array() { - assertThat(decode(null)).isEmpty(); - } - - @Test - public void should_decode_empty_array() { - assertThat(decode("0x00000000")).isEmpty(); - } - - @Test - public void should_decode_non_empty_array() { - assertThat( - decode( - "0x" - + "00000003" // number of elements - + "00000001" // size of element 1 - + "01" // contents of element 1 - + "00000001" // size of element 2 - + "02" // contents of element 2 - + "00000001" // size of element 3 - + "03" // contents of element 3 - )) - .containsExactly((byte) 1, (byte) 2, (byte) 3); - } - - @Test(expected = NullPointerException.class) - public void should_not_decode_array_with_null_elements() { - decode( - "0x" - + "00000001" // number of elements - + "FFFFFFFF" // size of element 1 (-1 for null) - ); - } - - @Test - public void should_format_null_array() { - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_format_empty_array() { - assertThat(format(new byte[] {})).isEqualTo("[]"); - } - - @Test - public void should_format_non_empty_array() { - assertThat(format(new byte[] {1, 2, 3})).isEqualTo("[1,2,3]"); - } - - @Test - public void should_parse_null_or_empty_string() { - assertThat(parse(null)).isNull(); - assertThat(parse("")).isNull(); - } - - @Test - public void should_parse_empty_array() { - assertThat(parse("[]")).isEmpty(); - } - - @Test - public void should_parse_non_empty_array() { - assertThat(parse("[1,2,3]")).containsExactly((byte) 1, (byte) 2, (byte) 3); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_malformed_array() { - parse("not an array"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.arrayOf(Byte.TYPE))).isTrue(); - assertThat(codec.accepts(GenericType.arrayOf(Byte.class))).isFalse(); - assertThat(codec.accepts(GenericType.arrayOf(String.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(GenericType.arrayOf(Byte.TYPE).getRawType())).isTrue(); - assertThat(codec.accepts(GenericType.arrayOf(Byte.class).getRawType())).isFalse(); - assertThat(codec.accepts(GenericType.arrayOf(String.class).getRawType())).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(new byte[] {1, 2, 3})).isTrue(); - assertThat(codec.accepts(new Byte[] {1, 2, 3})).isFalse(); - assertThat(codec.accepts(new String[] {"hello", "world"})).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/DoubleArrayCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/DoubleArrayCodecTest.java deleted file mode 100644 index 8e951f8ed55..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/DoubleArrayCodecTest.java +++ /dev/null @@ -1,151 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.array; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.CodecTestBase; -import org.junit.Before; -import org.junit.Test; - -public class DoubleArrayCodecTest extends CodecTestBase { - - @Before - public void setup() { - codec = ExtraTypeCodecs.DOUBLE_LIST_TO_ARRAY; - } - - @Test - public void should_encode_null() { - assertThat(encode(null)).isNull(); - } - - @Test - public void should_encode_empty_array() { - assertThat(encode(new double[] {})).isEqualTo("0x00000000"); - } - - @Test - public void should_encode_non_empty_array() { - assertThat(encode(new double[] {1.1d, 2.2d, 3.3d})) - .isEqualTo( - "0x" - + "00000003" // number of elements - + "00000008" // size of element 1 - + "3ff199999999999a" // contents of element 1 - + "00000008" // size of element 2 - + "400199999999999a" // contents of element 2 - + "00000008" // size of element 3 - + "400a666666666666" // contents of element 3 - ); - } - - @Test - public void should_decode_null_as_empty_array() { - assertThat(decode(null)).isEmpty(); - } - - @Test - public void should_decode_empty_array() { - assertThat(decode("0x00000000")).isEmpty(); - } - - @Test - public void should_decode_non_empty_array() { - assertThat( - decode( - "0x" - + "00000003" // number of elements - + "00000008" // size of element 1 - + "3ff199999999999a" // contents of element 1 - + "00000008" // size of element 2 - + "400199999999999a" // contents of element 2 - + "00000008" // size of element 3 - + "400a666666666666" // contents of element 3 - )) - .containsExactly(1.1d, 2.2d, 3.3d); - } - - @Test(expected = NullPointerException.class) - public void should_not_decode_array_with_null_elements() { - decode( - "0x" - + "00000001" // number of elements - + "FFFFFFFF" // size of element 1 (-1 for null) - ); - } - - @Test - public void should_format_null_array() { - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_format_empty_array() { - assertThat(format(new double[] {})).isEqualTo("[]"); - } - - @Test - public void should_format_non_empty_array() { - assertThat(format(new double[] {1.1d, 2.2d, 3.3d})).isEqualTo("[1.1,2.2,3.3]"); - } - - @Test - public void should_parse_null_or_empty_string() { - assertThat(parse(null)).isNull(); - assertThat(parse("")).isNull(); - } - - @Test - public void should_parse_empty_array() { - assertThat(parse("[]")).isEmpty(); - } - - @Test - public void should_parse_non_empty_array() { - assertThat(parse("[1.1,2.2,3.3]")).containsExactly(1.1d, 2.2d, 3.3d); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_malformed_array() { - parse("not an array"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.arrayOf(Double.TYPE))).isTrue(); - assertThat(codec.accepts(GenericType.arrayOf(Double.class))).isFalse(); - assertThat(codec.accepts(GenericType.arrayOf(String.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(GenericType.arrayOf(Double.TYPE).getRawType())).isTrue(); - assertThat(codec.accepts(GenericType.arrayOf(Double.class).getRawType())).isFalse(); - assertThat(codec.accepts(GenericType.arrayOf(String.class).getRawType())).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(new double[] {1.1d, 2.2d, 3.3d})).isTrue(); - assertThat(codec.accepts(new Double[] {1.1d, 2.2d, 3.3d})).isFalse(); - assertThat(codec.accepts(new String[] {"hello", "world"})).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/FloatArrayCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/FloatArrayCodecTest.java deleted file mode 100644 index 77f3eafdcd7..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/FloatArrayCodecTest.java +++ /dev/null @@ -1,151 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.array; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.CodecTestBase; -import org.junit.Before; -import org.junit.Test; - -public class FloatArrayCodecTest extends CodecTestBase { - - @Before - public void setup() { - codec = ExtraTypeCodecs.FLOAT_LIST_TO_ARRAY; - } - - @Test - public void should_encode_null() { - assertThat(encode(null)).isNull(); - } - - @Test - public void should_encode_empty_array() { - assertThat(encode(new float[] {})).isEqualTo("0x00000000"); - } - - @Test - public void should_encode_non_empty_array() { - assertThat(encode(new float[] {1.1f, 2.2f, 3.3f})) - .isEqualTo( - "0x" - + "00000003" // number of elements - + "00000004" // size of element 1 - + "3f8ccccd" // contents of element 1 - + "00000004" // size of element 2 - + "400ccccd" // contents of element 2 - + "00000004" // size of element 3 - + "40533333" // contents of element 3 - ); - } - - @Test - public void should_decode_null_as_empty_array() { - assertThat(decode(null)).isEmpty(); - } - - @Test - public void should_decode_empty_array() { - assertThat(decode("0x00000000")).isEmpty(); - } - - @Test - public void should_decode_non_empty_array() { - assertThat( - decode( - "0x" - + "00000003" // number of elements - + "00000004" // size of element 1 - + "3f8ccccd" // contents of element 1 - + "00000004" // size of element 2 - + "400ccccd" // contents of element 2 - + "00000004" // size of element 3 - + "40533333" // contents of element 3 - )) - .containsExactly(1.1f, 2.2f, 3.3f); - } - - @Test(expected = NullPointerException.class) - public void should_not_decode_array_with_null_elements() { - decode( - "0x" - + "00000001" // number of elements - + "FFFFFFFF" // size of element 1 (-1 for null) - ); - } - - @Test - public void should_format_null_array() { - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_format_empty_array() { - assertThat(format(new float[] {})).isEqualTo("[]"); - } - - @Test - public void should_format_non_empty_array() { - assertThat(format(new float[] {1.1f, 2.2f, 3.3f})).isEqualTo("[1.1,2.2,3.3]"); - } - - @Test - public void should_parse_null_or_empty_string() { - assertThat(parse(null)).isNull(); - assertThat(parse("")).isNull(); - } - - @Test - public void should_parse_empty_array() { - assertThat(parse("[]")).isEmpty(); - } - - @Test - public void should_parse_non_empty_array() { - assertThat(parse("[1.1,2.2,3.3]")).containsExactly(1.1f, 2.2f, 3.3f); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_malformed_array() { - parse("not an array"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.arrayOf(Float.TYPE))).isTrue(); - assertThat(codec.accepts(GenericType.arrayOf(Float.class))).isFalse(); - assertThat(codec.accepts(GenericType.arrayOf(String.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(GenericType.arrayOf(Float.TYPE).getRawType())).isTrue(); - assertThat(codec.accepts(GenericType.arrayOf(Float.class).getRawType())).isFalse(); - assertThat(codec.accepts(GenericType.arrayOf(String.class).getRawType())).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(new float[] {1.1f, 2.2f, 3.3f})).isTrue(); - assertThat(codec.accepts(new Float[] {1.1f, 2.2f, 3.3f})).isFalse(); - assertThat(codec.accepts(new String[] {"hello", "world"})).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/IntArrayCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/IntArrayCodecTest.java deleted file mode 100644 index ac00f1f8e1c..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/IntArrayCodecTest.java +++ /dev/null @@ -1,151 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.array; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.CodecTestBase; -import org.junit.Before; -import org.junit.Test; - -public class IntArrayCodecTest extends CodecTestBase { - - @Before - public void setup() { - codec = ExtraTypeCodecs.INT_LIST_TO_ARRAY; - } - - @Test - public void should_encode_null() { - assertThat(encode(null)).isNull(); - } - - @Test - public void should_encode_empty_array() { - assertThat(encode(new int[] {})).isEqualTo("0x00000000"); - } - - @Test - public void should_encode_non_empty_array() { - assertThat(encode(new int[] {1, 2, 3})) - .isEqualTo( - "0x" - + "00000003" // number of elements - + "00000004" // size of element 1 - + "00000001" // contents of element 1 - + "00000004" // size of element 2 - + "00000002" // contents of element 2 - + "00000004" // size of element 3 - + "00000003" // contents of element 3 - ); - } - - @Test - public void should_decode_null_as_empty_array() { - assertThat(decode(null)).isEmpty(); - } - - @Test - public void should_decode_empty_array() { - assertThat(decode("0x00000000")).isEmpty(); - } - - @Test - public void should_decode_non_empty_array() { - assertThat( - decode( - "0x" - + "00000003" // number of elements - + "00000004" // size of element 1 - + "00000001" // contents of element 1 - + "00000004" // size of element 2 - + "00000002" // contents of element 2 - + "00000004" // size of element 3 - + "00000003" // contents of element 3 - )) - .containsExactly(1, 2, 3); - } - - @Test(expected = NullPointerException.class) - public void should_not_decode_array_with_null_elements() { - decode( - "0x" - + "00000001" // number of elements - + "FFFFFFFF" // size of element 1 (-1 for null) - ); - } - - @Test - public void should_format_null_array() { - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_format_empty_array() { - assertThat(format(new int[] {})).isEqualTo("[]"); - } - - @Test - public void should_format_non_empty_array() { - assertThat(format(new int[] {1, 2, 3})).isEqualTo("[1,2,3]"); - } - - @Test - public void should_parse_null_or_empty_string() { - assertThat(parse(null)).isNull(); - assertThat(parse("")).isNull(); - } - - @Test - public void should_parse_empty_array() { - assertThat(parse("[]")).isEmpty(); - } - - @Test - public void should_parse_non_empty_array() { - assertThat(parse("[1,2,3]")).containsExactly(1, 2, 3); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_malformed_array() { - parse("not an array"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.arrayOf(Integer.TYPE))).isTrue(); - assertThat(codec.accepts(GenericType.arrayOf(Integer.class))).isFalse(); - assertThat(codec.accepts(GenericType.arrayOf(String.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(GenericType.arrayOf(Integer.TYPE).getRawType())).isTrue(); - assertThat(codec.accepts(GenericType.arrayOf(Integer.class).getRawType())).isFalse(); - assertThat(codec.accepts(GenericType.arrayOf(String.class).getRawType())).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(new int[] {1, 2, 3})).isTrue(); - assertThat(codec.accepts(new Integer[] {1, 2, 3})).isFalse(); - assertThat(codec.accepts(new String[] {"hello", "world"})).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/LongArrayCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/LongArrayCodecTest.java deleted file mode 100644 index 737dcfae3c0..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/LongArrayCodecTest.java +++ /dev/null @@ -1,151 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.array; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.CodecTestBase; -import org.junit.Before; -import org.junit.Test; - -public class LongArrayCodecTest extends CodecTestBase { - - @Before - public void setup() { - codec = ExtraTypeCodecs.LONG_LIST_TO_ARRAY; - } - - @Test - public void should_encode_null() { - assertThat(encode(null)).isNull(); - } - - @Test - public void should_encode_empty_array() { - assertThat(encode(new long[] {})).isEqualTo("0x00000000"); - } - - @Test - public void should_encode_non_empty_array() { - assertThat(encode(new long[] {1, 2, 3})) - .isEqualTo( - "0x" - + "00000003" // number of elements - + "00000008" // size of element 1 - + "0000000000000001" // contents of element 1 - + "00000008" // size of element 2 - + "0000000000000002" // contents of element 2 - + "00000008" // size of element 3 - + "0000000000000003" // contents of element 3 - ); - } - - @Test - public void should_decode_null_as_empty_array() { - assertThat(decode(null)).isEmpty(); - } - - @Test - public void should_decode_empty_array() { - assertThat(decode("0x00000000")).isEmpty(); - } - - @Test - public void should_decode_non_empty_array() { - assertThat( - decode( - "0x" - + "00000003" // number of elements - + "00000008" // size of element 1 - + "0000000000000001" // contents of element 1 - + "00000008" // size of element 2 - + "0000000000000002" // contents of element 2 - + "00000008" // size of element 3 - + "0000000000000003" // contents of element 3 - )) - .containsExactly(1L, 2L, 3L); - } - - @Test(expected = NullPointerException.class) - public void should_not_decode_array_with_null_elements() { - decode( - "0x" - + "00000001" // number of elements - + "FFFFFFFF" // size of element 1 (-1 for null) - ); - } - - @Test - public void should_format_null_array() { - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_format_empty_array() { - assertThat(format(new long[] {})).isEqualTo("[]"); - } - - @Test - public void should_format_non_empty_array() { - assertThat(format(new long[] {1, 2, 3})).isEqualTo("[1,2,3]"); - } - - @Test - public void should_parse_null_or_empty_string() { - assertThat(parse(null)).isNull(); - assertThat(parse("")).isNull(); - } - - @Test - public void should_parse_empty_array() { - assertThat(parse("[]")).isEmpty(); - } - - @Test - public void should_parse_non_empty_array() { - assertThat(parse("[1,2,3]")).containsExactly(1L, 2L, 3L); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_malformed_array() { - parse("not an array"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.arrayOf(Long.TYPE))).isTrue(); - assertThat(codec.accepts(GenericType.arrayOf(Long.class))).isFalse(); - assertThat(codec.accepts(GenericType.arrayOf(String.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(GenericType.arrayOf(Long.TYPE).getRawType())).isTrue(); - assertThat(codec.accepts(GenericType.arrayOf(Long.class).getRawType())).isFalse(); - assertThat(codec.accepts(GenericType.arrayOf(String.class).getRawType())).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(new long[] {1, 2, 3})).isTrue(); - assertThat(codec.accepts(new Long[] {1L, 2L, 3L})).isFalse(); - assertThat(codec.accepts(new String[] {"hello", "world"})).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ObjectArrayCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ObjectArrayCodecTest.java deleted file mode 100644 index a2afc652002..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ObjectArrayCodecTest.java +++ /dev/null @@ -1,174 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.array; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.CodecTestBase; -import com.datastax.oss.protocol.internal.util.Bytes; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -public class ObjectArrayCodecTest extends CodecTestBase { - - @Mock private TypeCodec elementCodec; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - when(elementCodec.getCqlType()).thenReturn(DataTypes.TEXT); - when(elementCodec.getJavaType()).thenReturn(GenericType.STRING); - codec = ExtraTypeCodecs.listToArrayOf(elementCodec); - } - - @Test - public void should_encode_null() { - assertThat(encode(null)).isNull(); - } - - @Test - public void should_encode_empty_array() { - assertThat(encode(new String[] {})).isEqualTo("0x00000000"); - } - - @Test - public void should_encode_non_empty_array() { - when(elementCodec.encode("hello", ProtocolVersion.DEFAULT)) - .thenReturn(Bytes.fromHexString("0x68656c6c6f")); - when(elementCodec.encode("world", ProtocolVersion.DEFAULT)) - .thenReturn(Bytes.fromHexString("0x776f726c64")); - assertThat(encode(new String[] {"hello", "world"})) - .isEqualTo( - "0x" - + "00000002" // number of elements - + "00000005" // size of element 1 - + "68656c6c6f" // contents of element 1 - + "00000005" // size of element 2 - + "776f726c64" // contents of element 3 - ); - } - - @Test - public void should_decode_null_as_empty_array() { - assertThat(decode(null)).isEmpty(); - } - - @Test - public void should_decode_empty_array() { - assertThat(decode("0x00000000")).isEmpty(); - } - - @Test - public void should_decode_non_empty_array() { - when(elementCodec.decode(Bytes.fromHexString("0x68656c6c6f"), ProtocolVersion.DEFAULT)) - .thenReturn("hello"); - when(elementCodec.decode(Bytes.fromHexString("0x776f726c64"), ProtocolVersion.DEFAULT)) - .thenReturn("world"); - assertThat( - decode( - "0x" - + "00000002" // number of elements - + "00000005" // size of element 1 - + "68656c6c6f" // contents of element 1 - + "00000005" // size of element 2 - + "776f726c64" // contents of element 3 - )) - .containsExactly("hello", "world"); - } - - @Test - public void should_decode_array_with_null_elements() { - when(elementCodec.decode(Bytes.fromHexString("0x68656c6c6f"), ProtocolVersion.DEFAULT)) - .thenReturn("hello"); - assertThat( - decode( - "0x" - + "00000002" // number of elements - + "FFFFFFFF" // size of element 1 (-1 for null) - + "00000005" // size of element 2 - + "68656c6c6f" // contents of element 2 - )) - .containsExactly(null, "hello"); - } - - @Test - public void should_format_null_array() { - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_format_empty_array() { - assertThat(format(new String[] {})).isEqualTo("[]"); - } - - @Test - public void should_format_non_empty_array() { - when(elementCodec.format("hello")).thenReturn("'hello'"); - when(elementCodec.format("world")).thenReturn("'world'"); - assertThat(format(new String[] {"hello", "world"})).isEqualTo("['hello','world']"); - } - - @Test - public void should_parse_null_or_empty_string() { - assertThat(parse(null)).isNull(); - assertThat(parse("")).isNull(); - } - - @Test - public void should_parse_empty_array() { - assertThat(parse("[]")).isEmpty(); - } - - @Test - public void should_parse_non_empty_array() { - when(elementCodec.parse("'hello'")).thenReturn("hello"); - when(elementCodec.parse("'world'")).thenReturn("world"); - assertThat(parse("['hello','world']")).containsExactly("hello", "world"); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_malformed_array() { - parse("not an array"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.arrayOf(String.class))).isTrue(); - assertThat(codec.accepts(GenericType.arrayOf(Integer.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(GenericType.arrayOf(String.class).getRawType())).isTrue(); - assertThat(codec.accepts(GenericType.arrayOf(Integer.class).getRawType())).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(new String[] {"hello", "world"})).isTrue(); - assertThat(codec.accepts(new Integer[] {1, 2, 3})).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ShortArrayCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ShortArrayCodecTest.java deleted file mode 100644 index 3d489ada38f..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ShortArrayCodecTest.java +++ /dev/null @@ -1,151 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.array; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.CodecTestBase; -import org.junit.Before; -import org.junit.Test; - -public class ShortArrayCodecTest extends CodecTestBase { - - @Before - public void setup() { - codec = ExtraTypeCodecs.SHORT_LIST_TO_ARRAY; - } - - @Test - public void should_encode_null() { - assertThat(encode(null)).isNull(); - } - - @Test - public void should_encode_empty_array() { - assertThat(encode(new short[] {})).isEqualTo("0x00000000"); - } - - @Test - public void should_encode_non_empty_array() { - assertThat(encode(new short[] {1, 2, 3})) - .isEqualTo( - "0x" - + "00000003" // number of elements - + "00000002" // size of element 1 - + "0001" // contents of element 1 - + "00000002" // size of element 2 - + "0002" // contents of element 2 - + "00000002" // size of element 3 - + "0003" // contents of element 3 - ); - } - - @Test - public void should_decode_null_as_empty_array() { - assertThat(decode(null)).isEmpty(); - } - - @Test - public void should_decode_empty_array() { - assertThat(decode("0x00000000")).isEmpty(); - } - - @Test - public void should_decode_non_empty_array() { - assertThat( - decode( - "0x" - + "00000003" // number of elements - + "00000002" // size of element 1 - + "0001" // contents of element 1 - + "00000002" // size of element 2 - + "0002" // contents of element 2 - + "00000002" // size of element 3 - + "0003" // contents of element 3 - )) - .containsExactly((short) 1, (short) 2, (short) 3); - } - - @Test(expected = NullPointerException.class) - public void should_not_decode_array_with_null_elements() { - decode( - "0x" - + "00000001" // number of elements - + "FFFFFFFF" // size of element 1 (-1 for null) - ); - } - - @Test - public void should_format_null_array() { - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_format_empty_array() { - assertThat(format(new short[] {})).isEqualTo("[]"); - } - - @Test - public void should_format_non_empty_array() { - assertThat(format(new short[] {1, 2, 3})).isEqualTo("[1,2,3]"); - } - - @Test - public void should_parse_null_or_empty_string() { - assertThat(parse(null)).isNull(); - assertThat(parse("")).isNull(); - } - - @Test - public void should_parse_empty_array() { - assertThat(parse("[]")).isEmpty(); - } - - @Test - public void should_parse_non_empty_array() { - assertThat(parse("[1,2,3]")).containsExactly((short) 1, (short) 2, (short) 3); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_malformed_array() { - parse("not an array"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.arrayOf(Short.TYPE))).isTrue(); - assertThat(codec.accepts(GenericType.arrayOf(Short.class))).isFalse(); - assertThat(codec.accepts(GenericType.arrayOf(String.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(GenericType.arrayOf(Short.TYPE).getRawType())).isTrue(); - assertThat(codec.accepts(GenericType.arrayOf(Short.class).getRawType())).isFalse(); - assertThat(codec.accepts(GenericType.arrayOf(String.class).getRawType())).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(new short[] {1, 2, 3})).isTrue(); - assertThat(codec.accepts(new Short[] {1, 2, 3})).isFalse(); - assertThat(codec.accepts(new String[] {"hello", "world"})).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/enums/EnumNameCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/enums/EnumNameCodecTest.java deleted file mode 100644 index 093ec8a0be8..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/enums/EnumNameCodecTest.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.enums; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.dse.driver.api.core.DseProtocolVersion; -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.CodecTestBase; -import org.junit.Before; -import org.junit.Test; - -public class EnumNameCodecTest extends CodecTestBase { - - @Before - public void setup() { - codec = ExtraTypeCodecs.enumNamesOf(DefaultProtocolVersion.class); - } - - @Test - public void should_encode() { - // Our codec relies on the JDK's ByteBuffer API. We're not testing the JDK, so no need to try - // a thousand different values. - assertThat(encode(DefaultProtocolVersion.V3)).isEqualTo("0x5633"); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - assertThat(decode("0x5633")).isEqualTo(DefaultProtocolVersion.V3); - assertThat(decode("0x")).isNull(); - assertThat(decode(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_invalid_name() { - decode("0x1234"); - } - - @Test - public void should_format() { - assertThat(format(DefaultProtocolVersion.V3)).isEqualTo("'V3'"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_parse() { - assertThat(parse("'V3'")).isEqualTo(DefaultProtocolVersion.V3); - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_invalid_input() { - parse("not a valid enum constant"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(DefaultProtocolVersion.class))).isTrue(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(DefaultProtocolVersion.class)).isTrue(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(DefaultProtocolVersion.V3)).isTrue(); - assertThat(codec.accepts(DseProtocolVersion.DSE_V1)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/enums/EnumOrdinalCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/enums/EnumOrdinalCodecTest.java deleted file mode 100644 index 7162bc51ff2..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/enums/EnumOrdinalCodecTest.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.enums; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.dse.driver.api.core.DseProtocolVersion; -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.CodecTestBase; -import org.junit.Before; -import org.junit.Test; - -public class EnumOrdinalCodecTest extends CodecTestBase { - - @Before - public void setup() { - codec = ExtraTypeCodecs.enumOrdinalsOf(DefaultProtocolVersion.class); - } - - @Test - public void should_encode() { - // Our codec relies on the JDK's ByteBuffer API. We're not testing the JDK, so no need to try - // a thousand different values. - assertThat(encode(DefaultProtocolVersion.values()[0])).isEqualTo("0x00000000"); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - assertThat(decode("0x00000000")).isEqualTo(DefaultProtocolVersion.values()[0]); - assertThat(decode("0x")).isNull(); - assertThat(decode(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_not_enough_bytes() { - decode("0x0000"); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_too_many_bytes() { - decode("0x0000000000000000"); - } - - @Test - public void should_format() { - assertThat(format(DefaultProtocolVersion.values()[0])).isEqualTo("0"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_parse() { - assertThat(parse("0")).isEqualTo(DefaultProtocolVersion.values()[0]); - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_invalid_input() { - parse("not an int"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(DefaultProtocolVersion.class))).isTrue(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(DefaultProtocolVersion.class)).isTrue(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(DefaultProtocolVersion.V3)).isTrue(); - assertThat(codec.accepts(DseProtocolVersion.DSE_V1)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/json/JsonCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/json/JsonCodecTest.java deleted file mode 100644 index f9c37075b36..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/json/JsonCodecTest.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.json; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.CodecTestBase; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.util.Collections; -import java.util.Set; -import org.junit.Before; -import org.junit.Test; - -public class JsonCodecTest extends CodecTestBase> { - - private static final InetAddress V4_ADDRESS; - private static final InetAddress V6_ADDRESS; - private static final Set SET_OF_ADDRESSES; - - static { - try { - V4_ADDRESS = InetAddress.getByName("127.0.0.1"); - V6_ADDRESS = InetAddress.getByName("::1"); - SET_OF_ADDRESSES = ImmutableSet.of(V4_ADDRESS, V6_ADDRESS); - } catch (UnknownHostException e) { - fail("unexpected error", e); - throw new AssertionError(); // never reached - } - } - - @Before - public void setup() { - this.codec = ExtraTypeCodecs.json(GenericType.setOf(GenericType.INET_ADDRESS)); - } - - @Test - public void should_encode() { - assertThat(encode(SET_OF_ADDRESSES)) - .isEqualTo(encodeJson("[\"127.0.0.1\",\"0:0:0:0:0:0:0:1\"]")); - assertThat(encode(Collections.emptySet())).isEqualTo(encodeJson("[]")); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - assertThat(decode(encodeJson("[\"127.0.0.1\",\"0:0:0:0:0:0:0:1\"]"))) - .isEqualTo(SET_OF_ADDRESSES); - assertThat(decode(encodeJson("[]"))).isEqualTo(Collections.emptySet()); - assertThat(decode(null)).isNull(); - } - - @Test - public void should_format() { - assertThat(format(SET_OF_ADDRESSES)).isEqualTo("'[\"127.0.0.1\",\"0:0:0:0:0:0:0:1\"]'"); - assertThat(format(Collections.emptySet())).isEqualTo("'[]'"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_parse() { - assertThat(parse("'[\"127.0.0.1\",\"0:0:0:0:0:0:0:1\"]'")).isEqualTo(SET_OF_ADDRESSES); - assertThat(parse("'[]'")).isEqualTo(Collections.emptySet()); - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_parse_invalid_input() { - parse("not a JSON string"); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.setOf(GenericType.INET_ADDRESS))).isTrue(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(Set.class)).isTrue(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(SET_OF_ADDRESSES)).isTrue(); - assertThat(codec.accepts(Collections.emptySet())).isTrue(); - assertThat(codec.accepts(Collections.singletonList(V4_ADDRESS))).isFalse(); - } - - private String encodeJson(String json) { - return Bytes.toHexString(TypeCodecs.TEXT.encode(json, ProtocolVersion.DEFAULT)); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/LocalTimestampCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/LocalTimestampCodecTest.java deleted file mode 100644 index 7d87cbbba9f..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/LocalTimestampCodecTest.java +++ /dev/null @@ -1,206 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.time; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; - -import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.CodecTestBase; -import com.datastax.oss.driver.internal.core.type.codec.TimestampCodecTest; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.time.Instant; -import java.time.LocalDate; -import java.time.LocalDateTime; -import java.time.ZoneId; -import java.time.ZoneOffset; -import java.time.ZonedDateTime; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class LocalTimestampCodecTest extends CodecTestBase { - - @Test - public void should_encode() { - codec = ExtraTypeCodecs.LOCAL_TIMESTAMP_UTC; - assertThat(encode(Instant.EPOCH.atZone(ZoneOffset.UTC).toLocalDateTime())) - .isEqualTo("0x0000000000000000"); - assertThat(encode(Instant.ofEpochMilli(128).atZone(ZoneOffset.UTC).toLocalDateTime())) - .isEqualTo("0x0000000000000080"); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - codec = ExtraTypeCodecs.LOCAL_TIMESTAMP_UTC; - assertThat(decode("0x0000000000000000")) - .isEqualTo(Instant.EPOCH.atZone(ZoneOffset.UTC).toLocalDateTime()); - assertThat(decode("0x0000000000000080")) - .isEqualTo(Instant.ofEpochMilli(128).atZone(ZoneOffset.UTC).toLocalDateTime()); - assertThat(decode(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_not_enough_bytes() { - codec = ExtraTypeCodecs.LOCAL_TIMESTAMP_SYSTEM; - decode("0x0000"); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_too_many_bytes() { - codec = ExtraTypeCodecs.LOCAL_TIMESTAMP_SYSTEM; - decode("0x0000000000000000" + "0000"); - } - - @Test - public void should_format() { - codec = ExtraTypeCodecs.localTimestampAt(ZoneOffset.ofHours(2)); - // No need to test various values because the codec delegates directly to SimpleDateFormat, - // which we assume does its job correctly. - assertThat(format(LocalDateTime.parse("2018-08-16T16:59:34.123"))) - .isEqualTo("'2018-08-16T16:59:34.123+02:00'"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - @UseDataProvider(value = "timeZones", location = TimestampCodecTest.class) - public void should_parse(ZoneId defaultTimeZone) { - codec = ExtraTypeCodecs.localTimestampAt(defaultTimeZone); - - // Raw numbers - assertThat(parse("'0'")).isEqualTo(Instant.EPOCH.atZone(defaultTimeZone).toLocalDateTime()); - assertThat(parse("'-1'")) - .isEqualTo(Instant.EPOCH.minusMillis(1).atZone(defaultTimeZone).toLocalDateTime()); - assertThat(parse("1534463100000")) - .isEqualTo(Instant.ofEpochMilli(1534463100000L).atZone(defaultTimeZone).toLocalDateTime()); - - // Date formats - LocalDateTime expected; - - // date without time, without time zone - expected = LocalDate.parse("2017-01-01").atStartOfDay(); - assertThat(parse("'2017-01-01'")).isEqualTo(expected); - - // date without time, with time zone - expected = - ZonedDateTime.parse("2018-08-16T00:00:00+02:00") - .withZoneSameInstant(defaultTimeZone) - .toLocalDateTime(); - assertThat(parse("'2018-08-16+02'")).isEqualTo(expected); - assertThat(parse("'2018-08-16+0200'")).isEqualTo(expected); - assertThat(parse("'2018-08-16+02:00'")).isEqualTo(expected); - assertThat(parse("'2018-08-16 CEST'")).isEqualTo(expected); - - // date with time, without time zone - expected = LocalDateTime.parse("2018-08-16T23:45"); - assertThat(parse("'2018-08-16T23:45'")).isEqualTo(expected); - assertThat(parse("'2018-08-16 23:45'")).isEqualTo(expected); - - // date with time + seconds, without time zone - expected = LocalDateTime.parse("2019-12-31T16:08:38"); - assertThat(parse("'2019-12-31T16:08:38'")).isEqualTo(expected); - assertThat(parse("'2019-12-31 16:08:38'")).isEqualTo(expected); - - // date with time + seconds + milliseconds, without time zone - expected = LocalDateTime.parse("1950-02-28T12:00:59.230"); - assertThat(parse("'1950-02-28T12:00:59.230'")).isEqualTo(expected); - assertThat(parse("'1950-02-28 12:00:59.230'")).isEqualTo(expected); - - // date with time, with time zone - expected = - ZonedDateTime.parse("1973-06-23T23:59:00.000+01:00") - .withZoneSameInstant(defaultTimeZone) - .toLocalDateTime(); - assertThat(parse("'1973-06-23T23:59+01'")).isEqualTo(expected); - assertThat(parse("'1973-06-23T23:59+0100'")).isEqualTo(expected); - assertThat(parse("'1973-06-23T23:59+01:00'")).isEqualTo(expected); - assertThat(parse("'1973-06-23T23:59 CET'")).isEqualTo(expected); - assertThat(parse("'1973-06-23 23:59+01'")).isEqualTo(expected); - assertThat(parse("'1973-06-23 23:59+0100'")).isEqualTo(expected); - assertThat(parse("'1973-06-23 23:59+01:00'")).isEqualTo(expected); - assertThat(parse("'1973-06-23 23:59 CET'")).isEqualTo(expected); - - // date with time + seconds, with time zone - expected = - ZonedDateTime.parse("1980-01-01T23:59:59.000-08:00") - .withZoneSameInstant(defaultTimeZone) - .toLocalDateTime(); - assertThat(parse("'1980-01-01T23:59:59-08'")).isEqualTo(expected); - assertThat(parse("'1980-01-01T23:59:59-0800'")).isEqualTo(expected); - assertThat(parse("'1980-01-01T23:59:59-08:00'")).isEqualTo(expected); - assertThat(parse("'1980-01-01T23:59:59 PST'")).isEqualTo(expected); - assertThat(parse("'1980-01-01 23:59:59-08'")).isEqualTo(expected); - assertThat(parse("'1980-01-01 23:59:59-0800'")).isEqualTo(expected); - assertThat(parse("'1980-01-01 23:59:59-08:00'")).isEqualTo(expected); - assertThat(parse("'1980-01-01 23:59:59 PST'")).isEqualTo(expected); - - // date with time + seconds + milliseconds, with time zone - expected = - ZonedDateTime.parse("1999-12-31T23:59:59.999+00:00") - .withZoneSameInstant(defaultTimeZone) - .toLocalDateTime(); - assertThat(parse("'1999-12-31T23:59:59.999+00'")).isEqualTo(expected); - assertThat(parse("'1999-12-31T23:59:59.999+0000'")).isEqualTo(expected); - assertThat(parse("'1999-12-31T23:59:59.999+00:00'")).isEqualTo(expected); - assertThat(parse("'1999-12-31T23:59:59.999 UTC'")).isEqualTo(expected); - assertThat(parse("'1999-12-31 23:59:59.999+00'")).isEqualTo(expected); - assertThat(parse("'1999-12-31 23:59:59.999+0000'")).isEqualTo(expected); - assertThat(parse("'1999-12-31 23:59:59.999+00:00'")).isEqualTo(expected); - assertThat(parse("'1999-12-31 23:59:59.999 UTC'")).isEqualTo(expected); - - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test - public void should_fail_to_parse_invalid_input() { - codec = ExtraTypeCodecs.LOCAL_TIMESTAMP_SYSTEM; - assertThatThrownBy(() -> parse("not a timestamp")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage("Alphanumeric timestamp literal must be quoted: \"not a timestamp\""); - assertThatThrownBy(() -> parse("'not a timestamp'")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage("Cannot parse timestamp value from \"'not a timestamp'\""); - } - - @Test - public void should_accept_generic_type() { - codec = ExtraTypeCodecs.LOCAL_TIMESTAMP_SYSTEM; - assertThat(codec.accepts(GenericType.LOCAL_DATE_TIME)).isTrue(); - assertThat(codec.accepts(GenericType.INSTANT)).isFalse(); - } - - @Test - public void should_accept_raw_type() { - codec = ExtraTypeCodecs.LOCAL_TIMESTAMP_SYSTEM; - assertThat(codec.accepts(LocalDateTime.class)).isTrue(); - assertThat(codec.accepts(Instant.class)).isFalse(); - } - - @Test - public void should_accept_object() { - codec = ExtraTypeCodecs.LOCAL_TIMESTAMP_SYSTEM; - assertThat(codec.accepts(LocalDateTime.now(ZoneId.systemDefault()))).isTrue(); - assertThat(codec.accepts(Instant.EPOCH)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/PersistentZonedTimestampCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/PersistentZonedTimestampCodecTest.java deleted file mode 100644 index 9bf1cac1007..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/PersistentZonedTimestampCodecTest.java +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.time; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.CodecTestBase; -import java.time.Instant; -import java.time.ZoneId; -import java.time.ZoneOffset; -import java.time.ZonedDateTime; -import org.junit.Before; -import org.junit.Test; - -public class PersistentZonedTimestampCodecTest extends CodecTestBase { - - private static final ZonedDateTime EPOCH_UTC = Instant.EPOCH.atZone(ZoneOffset.UTC); - - private static final ZonedDateTime EPOCH_MILLIS_CET = - Instant.ofEpochMilli(128).atZone(ZoneId.of("CET")); - - private static final ZonedDateTime EPOCH_MILLIS_OFFSET = - Instant.ofEpochMilli(128).atZone(ZoneOffset.ofHours(2)); - - private static final ZonedDateTime EPOCH_MILLIS_EUROPE_PARIS = - Instant.ofEpochMilli(-128).atZone(ZoneId.of("Europe/Paris")); - - private static final String EPOCH_UTC_ENCODED = - "0x" - + ("00000008" + "0000000000000000") // size and contents of timestamp - + ("00000001" + "5a"); // size and contents of zone ID - - private static final String EPOCH_MILLIS_CET_ENCODED = - "0x" - + ("00000008" + "0000000000000080") // size and contents of timestamp - + ("00000003" + "434554"); // size and contents of zone ID - - private static final String EPOCH_MILLIS_OFFSET_ENCODED = - "0x" - + ("00000008" + "0000000000000080") // size and contents of timestamp - + ("00000006" + "2b30323a3030"); // size and contents of zone ID - - private static final String EPOCH_MILLIS_EUROPE_PARIS_ENCODED = - "0x" - + ("00000008" + "ffffffffffffff80") // size and contents of timestamp - + ("0000000c" + "4575726f70652f5061726973"); // size and contents of zone ID - - private static final String EPOCH_UTC_FORMATTED = "('1970-01-01T00:00:00.000Z','Z')"; - - private static final String EPOCH_MILLIS_CET_FORMATTED = "('1970-01-01T00:00:00.128Z','CET')"; - - private static final String EPOCH_MILLIS_OFFSET_FORMATTED = - "('1970-01-01T00:00:00.128Z','+02:00')"; - - private static final String EPOCH_MILLIS_EUROPE_PARIS_FORMATTED = - "('1969-12-31T23:59:59.872Z','Europe/Paris')"; - - @Before - public void setup() { - codec = ExtraTypeCodecs.ZONED_TIMESTAMP_PERSISTED; - } - - @Test - public void should_encode() { - assertThat(encode(EPOCH_UTC)).isEqualTo(EPOCH_UTC_ENCODED); - assertThat(encode(EPOCH_MILLIS_CET)).isEqualTo(EPOCH_MILLIS_CET_ENCODED); - assertThat(encode(EPOCH_MILLIS_OFFSET)).isEqualTo(EPOCH_MILLIS_OFFSET_ENCODED); - assertThat(encode(EPOCH_MILLIS_EUROPE_PARIS)).isEqualTo(EPOCH_MILLIS_EUROPE_PARIS_ENCODED); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - assertThat(decode(EPOCH_UTC_ENCODED)).isEqualTo(EPOCH_UTC); - assertThat(decode(EPOCH_MILLIS_CET_ENCODED)).isEqualTo(EPOCH_MILLIS_CET); - assertThat(decode(EPOCH_MILLIS_OFFSET_ENCODED)).isEqualTo(EPOCH_MILLIS_OFFSET); - assertThat(decode(EPOCH_MILLIS_EUROPE_PARIS_ENCODED)).isEqualTo(EPOCH_MILLIS_EUROPE_PARIS); - assertThat(decode(null)).isNull(); - } - - @Test - public void should_format() { - assertThat(format(EPOCH_UTC)).isEqualTo(EPOCH_UTC_FORMATTED); - assertThat(format(EPOCH_MILLIS_CET)).isEqualTo(EPOCH_MILLIS_CET_FORMATTED); - assertThat(format(EPOCH_MILLIS_OFFSET)).isEqualTo(EPOCH_MILLIS_OFFSET_FORMATTED); - assertThat(format(EPOCH_MILLIS_EUROPE_PARIS)).isEqualTo(EPOCH_MILLIS_EUROPE_PARIS_FORMATTED); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - public void should_parse() { - assertThat(parse(EPOCH_UTC_FORMATTED)).isEqualTo(EPOCH_UTC); - assertThat(parse(EPOCH_MILLIS_CET_FORMATTED)).isEqualTo(EPOCH_MILLIS_CET); - assertThat(parse(EPOCH_MILLIS_OFFSET_FORMATTED)).isEqualTo(EPOCH_MILLIS_OFFSET); - assertThat(parse(EPOCH_MILLIS_EUROPE_PARIS_FORMATTED)).isEqualTo(EPOCH_MILLIS_EUROPE_PARIS); - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test - public void should_accept_generic_type() { - assertThat(codec.accepts(GenericType.of(ZonedDateTime.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(Integer.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - assertThat(codec.accepts(ZonedDateTime.class)).isTrue(); - assertThat(codec.accepts(Integer.class)).isFalse(); - } - - @Test - public void should_accept_object() { - assertThat(codec.accepts(ZonedDateTime.now(ZoneOffset.systemDefault()))).isTrue(); - assertThat(codec.accepts(Integer.MIN_VALUE)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/TimestampMillisCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/TimestampMillisCodecTest.java deleted file mode 100644 index 36ee71eebe6..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/TimestampMillisCodecTest.java +++ /dev/null @@ -1,205 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.time; - -import static java.time.ZoneOffset.ofHours; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; - -import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.CodecTestBase; -import com.datastax.oss.driver.internal.core.type.codec.TimestampCodecTest; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.time.Instant; -import java.time.LocalDate; -import java.time.LocalDateTime; -import java.time.ZoneId; -import java.time.ZoneOffset; -import java.time.ZonedDateTime; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class TimestampMillisCodecTest extends CodecTestBase { - - @Test - public void should_encode() { - codec = ExtraTypeCodecs.TIMESTAMP_MILLIS_UTC; - assertThat(encode(0L)).isEqualTo("0x0000000000000000"); - assertThat(encode(128L)).isEqualTo("0x0000000000000080"); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - codec = ExtraTypeCodecs.TIMESTAMP_MILLIS_UTC; - assertThat(decode("0x0000000000000000")).isEqualTo(0L); - assertThat(decode("0x0000000000000080")).isEqualTo(128L); - assertThat(decode(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_not_enough_bytes() { - codec = ExtraTypeCodecs.TIMESTAMP_MILLIS_SYSTEM; - decode("0x0000"); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_too_many_bytes() { - codec = ExtraTypeCodecs.TIMESTAMP_MILLIS_SYSTEM; - decode("0x0000000000000000" + "0000"); - } - - @Test - public void should_format() { - codec = ExtraTypeCodecs.timestampMillisAt(ZoneOffset.ofHours(2)); - // No need to test various values because the codec delegates directly to SimpleDateFormat, - // which we assume does its job correctly. - assertThat(format(0L)).isEqualTo("'1970-01-01T02:00:00.000+02:00'"); - assertThat(format(1534435174123L)).isEqualTo("'2018-08-16T17:59:34.123+02:00'"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - @UseDataProvider(value = "timeZones", location = TimestampCodecTest.class) - public void should_parse(ZoneId defaultTimeZone) { - codec = ExtraTypeCodecs.timestampMillisAt(defaultTimeZone); - - // Raw numbers - assertThat(parse("'0'")).isEqualTo(0L); - assertThat(parse("'-1'")).isEqualTo(-1L); - assertThat(parse("1534463100000")).isEqualTo(1534463100000L); - - // Date formats - long expected; - - // date without time, without time zone - expected = - LocalDate.parse("2017-01-01") - .atStartOfDay() - .atZone(defaultTimeZone) - .toInstant() - .toEpochMilli(); - assertThat(parse("'2017-01-01'")).isEqualTo(expected); - - // date without time, with time zone - expected = - LocalDate.parse("2018-08-16").atStartOfDay().atZone(ofHours(2)).toInstant().toEpochMilli(); - assertThat(parse("'2018-08-16+02'")).isEqualTo(expected); - assertThat(parse("'2018-08-16+0200'")).isEqualTo(expected); - assertThat(parse("'2018-08-16+02:00'")).isEqualTo(expected); - assertThat(parse("'2018-08-16 CEST'")).isEqualTo(expected); - - // date with time, without time zone - expected = - LocalDateTime.parse("2018-08-16T23:45").atZone(defaultTimeZone).toInstant().toEpochMilli(); - assertThat(parse("'2018-08-16T23:45'")).isEqualTo(expected); - assertThat(parse("'2018-08-16 23:45'")).isEqualTo(expected); - - // date with time + seconds, without time zone - expected = - LocalDateTime.parse("2019-12-31T16:08:38") - .atZone(defaultTimeZone) - .toInstant() - .toEpochMilli(); - assertThat(parse("'2019-12-31T16:08:38'")).isEqualTo(expected); - assertThat(parse("'2019-12-31 16:08:38'")).isEqualTo(expected); - - // date with time + seconds + milliseconds, without time zone - expected = - LocalDateTime.parse("1950-02-28T12:00:59.230") - .atZone(defaultTimeZone) - .toInstant() - .toEpochMilli(); - assertThat(parse("'1950-02-28T12:00:59.230'")).isEqualTo(expected); - assertThat(parse("'1950-02-28 12:00:59.230'")).isEqualTo(expected); - - // date with time, with time zone - expected = ZonedDateTime.parse("1973-06-23T23:59:00.000+01:00").toInstant().toEpochMilli(); - assertThat(parse("'1973-06-23T23:59+01'")).isEqualTo(expected); - assertThat(parse("'1973-06-23T23:59+0100'")).isEqualTo(expected); - assertThat(parse("'1973-06-23T23:59+01:00'")).isEqualTo(expected); - assertThat(parse("'1973-06-23T23:59 CET'")).isEqualTo(expected); - assertThat(parse("'1973-06-23 23:59+01'")).isEqualTo(expected); - assertThat(parse("'1973-06-23 23:59+0100'")).isEqualTo(expected); - assertThat(parse("'1973-06-23 23:59+01:00'")).isEqualTo(expected); - assertThat(parse("'1973-06-23 23:59 CET'")).isEqualTo(expected); - - // date with time + seconds, with time zone - expected = ZonedDateTime.parse("1980-01-01T23:59:59.000-08:00").toInstant().toEpochMilli(); - assertThat(parse("'1980-01-01T23:59:59-08'")).isEqualTo(expected); - assertThat(parse("'1980-01-01T23:59:59-0800'")).isEqualTo(expected); - assertThat(parse("'1980-01-01T23:59:59-08:00'")).isEqualTo(expected); - assertThat(parse("'1980-01-01T23:59:59 PST'")).isEqualTo(expected); - assertThat(parse("'1980-01-01 23:59:59-08'")).isEqualTo(expected); - assertThat(parse("'1980-01-01 23:59:59-0800'")).isEqualTo(expected); - assertThat(parse("'1980-01-01 23:59:59-08:00'")).isEqualTo(expected); - assertThat(parse("'1980-01-01 23:59:59 PST'")).isEqualTo(expected); - - // date with time + seconds + milliseconds, with time zone - expected = ZonedDateTime.parse("1999-12-31T23:59:59.999+00:00").toInstant().toEpochMilli(); - assertThat(parse("'1999-12-31T23:59:59.999+00'")).isEqualTo(expected); - assertThat(parse("'1999-12-31T23:59:59.999+0000'")).isEqualTo(expected); - assertThat(parse("'1999-12-31T23:59:59.999+00:00'")).isEqualTo(expected); - assertThat(parse("'1999-12-31T23:59:59.999 UTC'")).isEqualTo(expected); - assertThat(parse("'1999-12-31 23:59:59.999+00'")).isEqualTo(expected); - assertThat(parse("'1999-12-31 23:59:59.999+0000'")).isEqualTo(expected); - assertThat(parse("'1999-12-31 23:59:59.999+00:00'")).isEqualTo(expected); - assertThat(parse("'1999-12-31 23:59:59.999 UTC'")).isEqualTo(expected); - - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test - public void should_fail_to_parse_invalid_input() { - codec = ExtraTypeCodecs.TIMESTAMP_MILLIS_SYSTEM; - assertThatThrownBy(() -> parse("not a timestamp")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage("Alphanumeric timestamp literal must be quoted: \"not a timestamp\""); - assertThatThrownBy(() -> parse("'not a timestamp'")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage("Cannot parse timestamp value from \"'not a timestamp'\""); - } - - @Test - public void should_accept_generic_type() { - codec = ExtraTypeCodecs.TIMESTAMP_MILLIS_UTC; - assertThat(codec.accepts(GenericType.LONG)).isTrue(); - assertThat(codec.accepts(GenericType.INSTANT)).isFalse(); - } - - @Test - public void should_accept_raw_type() { - codec = ExtraTypeCodecs.TIMESTAMP_MILLIS_UTC; - assertThat(codec.accepts(Long.class)).isTrue(); - assertThat(codec.accepts(Long.TYPE)).isTrue(); - assertThat(codec.accepts(Instant.class)).isFalse(); - } - - @Test - public void should_accept_object() { - codec = ExtraTypeCodecs.TIMESTAMP_MILLIS_UTC; - assertThat(codec.accepts(Long.MIN_VALUE)).isTrue(); - assertThat(codec.accepts(Instant.EPOCH)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/ZonedTimestampCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/ZonedTimestampCodecTest.java deleted file mode 100644 index cd31d13d5ca..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/ZonedTimestampCodecTest.java +++ /dev/null @@ -1,193 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.extras.time; - -import static java.time.ZoneOffset.ofHours; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; - -import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.codec.CodecTestBase; -import com.datastax.oss.driver.internal.core.type.codec.TimestampCodecTest; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.time.Instant; -import java.time.LocalDate; -import java.time.LocalDateTime; -import java.time.ZoneId; -import java.time.ZoneOffset; -import java.time.ZonedDateTime; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class ZonedTimestampCodecTest extends CodecTestBase { - - @Test - @UseDataProvider(value = "timeZones", location = TimestampCodecTest.class) - public void should_encode(ZoneId timeZone) { - codec = ExtraTypeCodecs.zonedTimestampAt(timeZone); - assertThat(encode(Instant.EPOCH.atZone(timeZone))).isEqualTo("0x0000000000000000"); - assertThat(encode(Instant.ofEpochMilli(128).atZone(timeZone))).isEqualTo("0x0000000000000080"); - assertThat(encode(null)).isNull(); - } - - @Test - public void should_decode() { - codec = ExtraTypeCodecs.ZONED_TIMESTAMP_UTC; - assertThat(decode("0x0000000000000000").toInstant().toEpochMilli()).isEqualTo(0); - assertThat(decode("0x0000000000000080").toInstant().toEpochMilli()).isEqualTo(128); - assertThat(decode(null)).isNull(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_not_enough_bytes() { - codec = ExtraTypeCodecs.ZONED_TIMESTAMP_SYSTEM; - decode("0x0000"); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_decode_if_too_many_bytes() { - codec = ExtraTypeCodecs.ZONED_TIMESTAMP_SYSTEM; - decode("0x0000000000000000" + "0000"); - } - - @Test - public void should_format() { - codec = ExtraTypeCodecs.zonedTimestampAt(ZoneOffset.ofHours(2)); - // No need to test various values because the codec delegates directly to SimpleDateFormat, - // which we assume does its job correctly. - assertThat(format(Instant.EPOCH.atZone(ZoneOffset.UTC))) - .isEqualTo("'1970-01-01T02:00:00.000+02:00'"); - assertThat(format(ZonedDateTime.parse("2018-08-16T15:59:34.123Z"))) - .isEqualTo("'2018-08-16T17:59:34.123+02:00'"); - assertThat(format(null)).isEqualTo("NULL"); - } - - @Test - @UseDataProvider(value = "timeZones", location = TimestampCodecTest.class) - public void should_parse(ZoneId timeZone) { - codec = ExtraTypeCodecs.zonedTimestampAt(timeZone); - - // Raw numbers - assertThat(parse("'0'")).isEqualTo(Instant.EPOCH.atZone(timeZone)); - assertThat(parse("'-1'")).isEqualTo(Instant.EPOCH.minusMillis(1).atZone(timeZone)); - assertThat(parse("1534463100000")) - .isEqualTo(Instant.ofEpochMilli(1534463100000L).atZone(timeZone)); - - // Date formats - ZonedDateTime expected; - - // date without time, without time zone - expected = LocalDate.parse("2017-01-01").atStartOfDay().atZone(timeZone); - assertThat(parse("'2017-01-01'")).isEqualTo(expected); - - // date without time, with time zone - expected = LocalDate.parse("2018-08-16").atStartOfDay().atZone(ofHours(2)); - assertThat(parse("'2018-08-16+02'")).isEqualTo(expected); - assertThat(parse("'2018-08-16+0200'")).isEqualTo(expected); - assertThat(parse("'2018-08-16+02:00'")).isEqualTo(expected); - assertThat(parse("'2018-08-16 CEST'")).isEqualTo(expected); - - // date with time, without time zone - expected = LocalDateTime.parse("2018-08-16T23:45").atZone(timeZone); - assertThat(parse("'2018-08-16T23:45'")).isEqualTo(expected); - assertThat(parse("'2018-08-16 23:45'")).isEqualTo(expected); - - // date with time + seconds, without time zone - expected = LocalDateTime.parse("2019-12-31T16:08:38").atZone(timeZone); - assertThat(parse("'2019-12-31T16:08:38'")).isEqualTo(expected); - assertThat(parse("'2019-12-31 16:08:38'")).isEqualTo(expected); - - // date with time + seconds + milliseconds, without time zone - expected = LocalDateTime.parse("1950-02-28T12:00:59.230").atZone(timeZone); - assertThat(parse("'1950-02-28T12:00:59.230'")).isEqualTo(expected); - assertThat(parse("'1950-02-28 12:00:59.230'")).isEqualTo(expected); - - // date with time, with time zone - expected = ZonedDateTime.parse("1973-06-23T23:59:00.000+01:00"); - assertThat(parse("'1973-06-23T23:59+01'")).isEqualTo(expected); - assertThat(parse("'1973-06-23T23:59+0100'")).isEqualTo(expected); - assertThat(parse("'1973-06-23T23:59+01:00'")).isEqualTo(expected); - assertThat(parse("'1973-06-23T23:59 CET'")).isEqualTo(expected); - assertThat(parse("'1973-06-23 23:59+01'")).isEqualTo(expected); - assertThat(parse("'1973-06-23 23:59+0100'")).isEqualTo(expected); - assertThat(parse("'1973-06-23 23:59+01:00'")).isEqualTo(expected); - assertThat(parse("'1973-06-23 23:59 CET'")).isEqualTo(expected); - - // date with time + seconds, with time zone - expected = ZonedDateTime.parse("1980-01-01T23:59:59.000-08:00"); - assertThat(parse("'1980-01-01T23:59:59-08'")).isEqualTo(expected); - assertThat(parse("'1980-01-01T23:59:59-0800'")).isEqualTo(expected); - assertThat(parse("'1980-01-01T23:59:59-08:00'")).isEqualTo(expected); - assertThat(parse("'1980-01-01T23:59:59 PST'")).isEqualTo(expected); - assertThat(parse("'1980-01-01 23:59:59-08'")).isEqualTo(expected); - assertThat(parse("'1980-01-01 23:59:59-0800'")).isEqualTo(expected); - assertThat(parse("'1980-01-01 23:59:59-08:00'")).isEqualTo(expected); - assertThat(parse("'1980-01-01 23:59:59 PST'")).isEqualTo(expected); - - // date with time + seconds + milliseconds, with time zone - expected = ZonedDateTime.parse("1999-12-31T23:59:59.999+00:00"); - assertThat(parse("'1999-12-31T23:59:59.999+00'")).isEqualTo(expected); - assertThat(parse("'1999-12-31T23:59:59.999+0000'")).isEqualTo(expected); - assertThat(parse("'1999-12-31T23:59:59.999+00:00'")).isEqualTo(expected); - assertThat(parse("'1999-12-31T23:59:59.999 UTC'")).isEqualTo(expected); - assertThat(parse("'1999-12-31 23:59:59.999+00'")).isEqualTo(expected); - assertThat(parse("'1999-12-31 23:59:59.999+0000'")).isEqualTo(expected); - assertThat(parse("'1999-12-31 23:59:59.999+00:00'")).isEqualTo(expected); - assertThat(parse("'1999-12-31 23:59:59.999 UTC'")).isEqualTo(expected); - - assertThat(parse("NULL")).isNull(); - assertThat(parse("null")).isNull(); - assertThat(parse("")).isNull(); - assertThat(parse(null)).isNull(); - } - - @Test - public void should_fail_to_parse_invalid_input() { - codec = new ZonedTimestampCodec(); - assertThatThrownBy(() -> parse("not a timestamp")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage("Alphanumeric timestamp literal must be quoted: \"not a timestamp\""); - assertThatThrownBy(() -> parse("'not a timestamp'")) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage("Cannot parse timestamp value from \"'not a timestamp'\""); - } - - @Test - public void should_accept_generic_type() { - codec = new ZonedTimestampCodec(); - assertThat(codec.accepts(GenericType.of(ZonedDateTime.class))).isTrue(); - assertThat(codec.accepts(GenericType.of(Integer.class))).isFalse(); - } - - @Test - public void should_accept_raw_type() { - codec = new ZonedTimestampCodec(); - assertThat(codec.accepts(ZonedDateTime.class)).isTrue(); - assertThat(codec.accepts(Integer.class)).isFalse(); - } - - @Test - public void should_accept_object() { - codec = new ZonedTimestampCodec(); - assertThat(codec.accepts(ZonedDateTime.now(ZoneOffset.systemDefault()))).isTrue(); - assertThat(codec.accepts(Integer.MIN_VALUE)).isFalse(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/registry/CachingCodecRegistryTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/registry/CachingCodecRegistryTest.java deleted file mode 100644 index 231f67a93e7..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/registry/CachingCodecRegistryTest.java +++ /dev/null @@ -1,657 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.registry; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.assertj.core.api.Assertions.fail; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyZeroInteractions; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.data.TupleValue; -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.ListType; -import com.datastax.oss.driver.api.core.type.codec.CodecNotFoundException; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.data.DefaultTupleValue; -import com.datastax.oss.driver.internal.core.data.DefaultUdtValue; -import com.datastax.oss.driver.internal.core.type.codec.CqlIntToStringCodec; -import com.datastax.oss.driver.internal.core.type.codec.IntCodec; -import com.datastax.oss.driver.internal.core.type.codec.ListCodec; -import com.datastax.oss.driver.internal.core.type.codec.registry.CachingCodecRegistryTest.TestCachingCodecRegistry.MockCache; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.time.Period; -import java.util.List; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.InOrder; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -@RunWith(DataProviderRunner.class) -public class CachingCodecRegistryTest { - - @Mock private MockCache mockCache; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - } - - @Test - @UseDataProvider( - value = "primitiveCodecs", - location = CachingCodecRegistryTestDataProviders.class) - public void should_find_primitive_codecs_for_types(TypeCodec codec) { - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - DataType cqlType = codec.getCqlType(); - GenericType javaType = codec.getJavaType(); - assertThat(registry.codecFor(cqlType, javaType)).isSameAs(codec); - assertThat(registry.codecFor(cqlType)).isSameAs(codec); - assertThat(javaType.__getToken().getType()).isInstanceOf(Class.class); - Class javaClass = (Class) javaType.__getToken().getType(); - assertThat(registry.codecFor(cqlType, javaClass)).isSameAs(codec); - // Primitive mappings never hit the cache - verifyZeroInteractions(mockCache); - } - - @Test - @UseDataProvider( - value = "primitiveCodecsWithValues", - location = CachingCodecRegistryTestDataProviders.class) - public void should_find_primitive_codecs_for_value(Object value, TypeCodec codec) { - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - assertThat(registry.codecFor(value)).isEqualTo(codec); - verifyZeroInteractions(mockCache); - } - - @Test - @UseDataProvider( - value = "primitiveCodecsWithCqlTypesAndValues", - location = CachingCodecRegistryTestDataProviders.class) - public void should_find_primitive_codecs_for_cql_type_and_value( - DataType cqlType, Object value, TypeCodec codec) { - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - assertThat(registry.codecFor(cqlType, value)).isEqualTo(codec); - verifyZeroInteractions(mockCache); - } - - @Test - public void should_find_user_codec_for_built_in_java_type() { - // int and String are built-in types, but int <-> String is not a built-in mapping - CqlIntToStringCodec intToStringCodec1 = new CqlIntToStringCodec(); - // register a second codec to also check that the first one is preferred - CqlIntToStringCodec intToStringCodec2 = new CqlIntToStringCodec(); - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - registry.register(intToStringCodec1, intToStringCodec2); - verify(mockCache).lookup(DataTypes.INT, GenericType.STRING, false); - - // When the mapping is not ambiguous, the user type should be returned - assertThat(registry.codecFor(DataTypes.INT, GenericType.STRING)).isSameAs(intToStringCodec1); - assertThat(registry.codecFor(DataTypes.INT, String.class)).isSameAs(intToStringCodec1); - assertThat(registry.codecFor(DataTypes.INT, "123")).isSameAs(intToStringCodec1); - - // When there is an ambiguity with a built-in codec, the built-in codec should have priority - assertThat(registry.codecFor(DataTypes.INT)).isSameAs(TypeCodecs.INT); - assertThat(registry.codecFor("123")).isSameAs(TypeCodecs.TEXT); - - verifyZeroInteractions(mockCache); - } - - @Test - public void should_find_user_codec_for_custom_java_type() { - TextToPeriodCodec textToPeriodCodec1 = new TextToPeriodCodec(); - TextToPeriodCodec textToPeriodCodec2 = new TextToPeriodCodec(); - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - registry.register(textToPeriodCodec1, textToPeriodCodec2); - verify(mockCache).lookup(DataTypes.TEXT, GenericType.of(Period.class), false); - - assertThat(registry.codecFor(DataTypes.TEXT, GenericType.of(Period.class))) - .isSameAs(textToPeriodCodec1); - assertThat(registry.codecFor(DataTypes.TEXT, Period.class)).isSameAs(textToPeriodCodec1); - assertThat(registry.codecFor(DataTypes.TEXT, Period.ofDays(1))).isSameAs(textToPeriodCodec1); - // Now even the search by Java value only is not ambiguous - assertThat(registry.codecFor(Period.ofDays(1))).isSameAs(textToPeriodCodec1); - - // The search by CQL type only still returns the built-in codec - assertThat(registry.codecFor(DataTypes.TEXT)).isSameAs(TypeCodecs.TEXT); - - verifyZeroInteractions(mockCache); - } - - @Test - @UseDataProvider( - value = "collectionsWithCqlAndJavaTypes", - location = CachingCodecRegistryTestDataProviders.class) - public void should_create_collection_codec_for_cql_and_java_types( - DataType cqlType, GenericType javaType, Object value) { - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - InOrder inOrder = inOrder(mockCache); - TypeCodec codec = registry.codecFor(cqlType, javaType); - assertThat(codec).isNotNull(); - assertThat(codec.accepts(cqlType)).isTrue(); - assertThat(codec.accepts(javaType)).isTrue(); - assertThat(codec.accepts(value)).isTrue(); - inOrder.verify(mockCache).lookup(cqlType, javaType, false); - } - - @Test - @UseDataProvider( - value = "collectionsWithCqlAndJavaTypes", - location = CachingCodecRegistryTestDataProviders.class) - public void should_create_collection_codec_for_cql_type( - DataType cqlType, GenericType javaType, Object value) { - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - InOrder inOrder = inOrder(mockCache); - TypeCodec codec = registry.codecFor(cqlType); - assertThat(codec).isNotNull(); - assertThat(codec.accepts(cqlType)).isTrue(); - assertThat(codec.accepts(javaType)).isTrue(); - assertThat(codec.accepts(value)).isTrue(); - inOrder.verify(mockCache).lookup(cqlType, null, false); - } - - @Test - @UseDataProvider( - value = "collectionsWithCqlAndJavaTypes", - location = CachingCodecRegistryTestDataProviders.class) - public void should_create_collection_codec_for_cql_type_and_java_value( - DataType cqlType, GenericType javaType, GenericType javaTypeLookup, Object value) { - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - InOrder inOrder = inOrder(mockCache); - TypeCodec codec = registry.codecFor(cqlType, value); - assertThat(codec).isNotNull(); - assertThat(codec.accepts(cqlType)).isTrue(); - assertThat(codec.accepts(javaType)).isTrue(); - assertThat(codec.accepts(value)).isTrue(); - inOrder.verify(mockCache).lookup(cqlType, javaTypeLookup, true); - } - - @Test - @UseDataProvider( - value = "collectionsWithCqlAndJavaTypes", - location = CachingCodecRegistryTestDataProviders.class) - public void should_create_collection_codec_for_java_value( - DataType cqlType, GenericType javaType, GenericType javaTypeLookup, Object value) { - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - InOrder inOrder = inOrder(mockCache); - TypeCodec codec = registry.codecFor(value); - assertThat(codec).isNotNull(); - assertThat(codec.accepts(cqlType)).isTrue(); - assertThat(codec.accepts(javaType)).isTrue(); - assertThat(codec.accepts(value)).isTrue(); - inOrder.verify(mockCache).lookup(cqlType, javaTypeLookup, true); - } - - @Test - @UseDataProvider( - value = "emptyCollectionsWithCqlAndJavaTypes", - location = CachingCodecRegistryTestDataProviders.class) - public void should_create_collection_codec_for_empty_java_value( - DataType cqlType, - GenericType javaType, - DataType cqlTypeLookup, - GenericType javaTypeLookup, - Object value) { - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - InOrder inOrder = inOrder(mockCache); - TypeCodec codec = registry.codecFor(value); - assertThat(codec).isNotNull(); - assertThat(codec.accepts(cqlType)).isFalse(); - assertThat(codec.accepts(javaType)).isFalse(); - assertThat(codec.accepts(value)).isTrue(); - - // Note that empty collections without CQL type are a corner case, in that the registry returns - // a codec that does not accept cqlType, nor the value's declared Java type. - // The only requirement is that it can encode the value, which holds true: - codec.encode(value, ProtocolVersion.DEFAULT); - - inOrder.verify(mockCache).lookup(cqlTypeLookup, javaTypeLookup, true); - } - - @Test - @UseDataProvider( - value = "emptyCollectionsWithCqlAndJavaTypes", - location = CachingCodecRegistryTestDataProviders.class) - public void should_create_collection_codec_for_cql_type_and_empty_java_value( - DataType cqlType, GenericType javaType, Object value) { - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - InOrder inOrder = inOrder(mockCache); - TypeCodec codec = registry.codecFor(cqlType, value); - assertThat(codec).isNotNull(); - assertThat(codec.accepts(cqlType)).isTrue(); - assertThat(codec.accepts(javaType)).isTrue(); - assertThat(codec.accepts(value)).isTrue(); - // Verify that the codec can encode the value - codec.encode(value, ProtocolVersion.DEFAULT); - inOrder.verify(mockCache).lookup(cqlType, javaType, true); - } - - @Test - @UseDataProvider( - value = "collectionsWithNullElements", - location = CachingCodecRegistryTestDataProviders.class) - public void should_throw_for_collection_containing_null_element(Object value, String expected) { - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - assertThatThrownBy(() -> registry.codecFor(value)) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage(expected); - } - - @Test - @UseDataProvider( - value = "tuplesWithCqlTypes", - location = CachingCodecRegistryTestDataProviders.class) - public void should_create_tuple_codec_for_cql_and_java_types(DataType cqlType, Object value) { - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - InOrder inOrder = inOrder(mockCache); - TypeCodec codec = registry.codecFor(cqlType, GenericType.TUPLE_VALUE); - assertThat(codec).isNotNull(); - assertThat(codec.accepts(cqlType)).isTrue(); - assertThat(codec.accepts(GenericType.TUPLE_VALUE)).isTrue(); - assertThat(codec.accepts(TupleValue.class)).isTrue(); - assertThat(codec.accepts(value)).isTrue(); - inOrder.verify(mockCache).lookup(cqlType, GenericType.TUPLE_VALUE, false); - // field codecs are only looked up when fields are accessed, so no cache hit for list now - } - - @Test - @UseDataProvider( - value = "tuplesWithCqlTypes", - location = CachingCodecRegistryTestDataProviders.class) - public void should_create_tuple_codec_for_cql_type(DataType cqlType, Object value) { - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - InOrder inOrder = inOrder(mockCache); - TypeCodec codec = registry.codecFor(cqlType); - assertThat(codec).isNotNull(); - assertThat(codec.accepts(cqlType)).isTrue(); - assertThat(codec.accepts(GenericType.TUPLE_VALUE)).isTrue(); - assertThat(codec.accepts(TupleValue.class)).isTrue(); - assertThat(codec.accepts(value)).isTrue(); - inOrder.verify(mockCache).lookup(cqlType, null, false); - } - - @Test - @UseDataProvider( - value = "tuplesWithCqlTypes", - location = CachingCodecRegistryTestDataProviders.class) - public void should_create_tuple_codec_for_cql_type_and_java_value( - DataType cqlType, Object value) { - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - InOrder inOrder = inOrder(mockCache); - TypeCodec codec = registry.codecFor(cqlType, value); - assertThat(codec).isNotNull(); - assertThat(codec.accepts(cqlType)).isTrue(); - assertThat(codec.accepts(GenericType.TUPLE_VALUE)).isTrue(); - assertThat(codec.accepts(TupleValue.class)).isTrue(); - assertThat(codec.accepts(value)).isTrue(); - inOrder.verify(mockCache).lookup(cqlType, GenericType.of(DefaultTupleValue.class), true); - inOrder.verifyNoMoreInteractions(); - } - - @Test - @UseDataProvider( - value = "tuplesWithCqlTypes", - location = CachingCodecRegistryTestDataProviders.class) - public void should_create_tuple_codec_for_java_value(DataType cqlType, Object value) { - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - InOrder inOrder = inOrder(mockCache); - TypeCodec codec = registry.codecFor(value); - assertThat(codec).isNotNull(); - assertThat(codec.accepts(cqlType)).isTrue(); - assertThat(codec.accepts(GenericType.TUPLE_VALUE)).isTrue(); - assertThat(codec.accepts(TupleValue.class)).isTrue(); - assertThat(codec.accepts(value)).isTrue(); - inOrder.verify(mockCache).lookup(cqlType, GenericType.of(DefaultTupleValue.class), true); - inOrder.verifyNoMoreInteractions(); - } - - @Test - @UseDataProvider( - value = "udtsWithCqlTypes", - location = CachingCodecRegistryTestDataProviders.class) - public void should_create_udt_codec_for_cql_and_java_types(DataType cqlType, Object value) { - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - InOrder inOrder = inOrder(mockCache); - TypeCodec codec = registry.codecFor(cqlType, GenericType.UDT_VALUE); - assertThat(codec).isNotNull(); - assertThat(codec.accepts(cqlType)).isTrue(); - assertThat(codec.accepts(GenericType.UDT_VALUE)).isTrue(); - assertThat(codec.accepts(UdtValue.class)).isTrue(); - assertThat(codec.accepts(value)).isTrue(); - inOrder.verify(mockCache).lookup(cqlType, GenericType.UDT_VALUE, false); - // field codecs are only looked up when fields are accessed, so no cache hit for list now - - } - - @Test - @UseDataProvider( - value = "udtsWithCqlTypes", - location = CachingCodecRegistryTestDataProviders.class) - public void should_create_udt_codec_for_cql_type(DataType cqlType, Object value) { - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - InOrder inOrder = inOrder(mockCache); - TypeCodec codec = registry.codecFor(cqlType); - assertThat(codec).isNotNull(); - assertThat(codec.accepts(cqlType)).isTrue(); - assertThat(codec.accepts(GenericType.UDT_VALUE)).isTrue(); - assertThat(codec.accepts(UdtValue.class)).isTrue(); - assertThat(codec.accepts(value)).isTrue(); - inOrder.verify(mockCache).lookup(cqlType, null, false); - } - - @Test - @UseDataProvider( - value = "udtsWithCqlTypes", - location = CachingCodecRegistryTestDataProviders.class) - public void should_create_udt_codec_for_cql_type_and_java_value(DataType cqlType, Object value) { - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - InOrder inOrder = inOrder(mockCache); - TypeCodec codec = registry.codecFor(cqlType, value); - assertThat(codec).isNotNull(); - assertThat(codec.accepts(cqlType)).isTrue(); - assertThat(codec.accepts(GenericType.UDT_VALUE)).isTrue(); - assertThat(codec.accepts(UdtValue.class)).isTrue(); - assertThat(codec.accepts(value)).isTrue(); - inOrder.verify(mockCache).lookup(cqlType, GenericType.of(DefaultUdtValue.class), true); - inOrder.verifyNoMoreInteractions(); - } - - @Test - @UseDataProvider( - value = "udtsWithCqlTypes", - location = CachingCodecRegistryTestDataProviders.class) - public void should_create_udt_codec_for_java_value(DataType cqlType, Object value) { - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - InOrder inOrder = inOrder(mockCache); - TypeCodec codec = registry.codecFor(value); - assertThat(codec).isNotNull(); - assertThat(codec.accepts(cqlType)).isTrue(); - assertThat(codec.accepts(GenericType.UDT_VALUE)).isTrue(); - assertThat(codec.accepts(UdtValue.class)).isTrue(); - assertThat(codec.accepts(value)).isTrue(); - inOrder.verify(mockCache).lookup(cqlType, GenericType.of(DefaultUdtValue.class), true); - inOrder.verifyNoMoreInteractions(); - } - - @Test - public void should_not_find_codec_if_java_type_unknown() { - try { - CodecRegistry.DEFAULT.codecFor(StringBuilder.class); - fail("Should not have found a codec for ANY <-> StringBuilder"); - } catch (CodecNotFoundException e) { - // expected - } - try { - CodecRegistry.DEFAULT.codecFor(DataTypes.TEXT, StringBuilder.class); - fail("Should not have found a codec for varchar <-> StringBuilder"); - } catch (CodecNotFoundException e) { - // expected - } - try { - CodecRegistry.DEFAULT.codecFor(new StringBuilder()); - fail("Should not have found a codec for ANY <-> StringBuilder"); - } catch (CodecNotFoundException e) { - // expected - } - } - - @Test - public void should_not_allow_covariance_for_lookups_by_java_type() { - - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - registry.register(new ACodec()); - InOrder inOrder = inOrder(mockCache); - - // covariance not allowed - - assertThatThrownBy(() -> registry.codecFor(B.class)) - .isInstanceOf(CodecNotFoundException.class) - .hasMessage("Codec not found for requested operation: [null <-> %s]", B.class.getName()); - // because of invariance, the custom A codec doesn't match so we try the cache - inOrder.verify(mockCache).lookup(null, GenericType.of(B.class), false); - inOrder.verifyNoMoreInteractions(); - - assertThatThrownBy(() -> registry.codecFor(GenericType.listOf(B.class))) - .isInstanceOf(CodecNotFoundException.class); - inOrder.verify(mockCache).lookup(null, GenericType.listOf(B.class), false); - inOrder.verify(mockCache).lookup(null, GenericType.of(B.class), false); - inOrder.verifyNoMoreInteractions(); - } - - @Test - public void should_allow_covariance_for_lookups_by_cql_type_and_value() { - - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - registry.register(new ACodec()); - InOrder inOrder = inOrder(mockCache); - inOrder.verify(mockCache).lookup(DataTypes.INT, GenericType.of(A.class), false); - - // covariance allowed - - assertThat(registry.codecFor(DataTypes.INT, new B())).isInstanceOf(ACodec.class); - // no cache hit since we find the custom codec directly - inOrder.verifyNoMoreInteractions(); - - // note: in Java, type parameters are always invariant, so List is not a subtype of List; - // but in practice, a codec for List is capable of encoding a List, so we allow it (even - // if in driver 3.x that was forbidden). - List list = Lists.newArrayList(new B()); - ListType cqlType = DataTypes.listOf(DataTypes.INT); - TypeCodec> actual = registry.codecFor(cqlType, list); - assertThat(actual).isInstanceOf(ListCodec.class); - assertThat(actual.getJavaType()).isEqualTo(GenericType.listOf(A.class)); - assertThat(actual.accepts(list)).isTrue(); - // accepts(GenericType) remains invariant, so it returns false for List - assertThat(actual.accepts(GenericType.listOf(B.class))).isFalse(); - inOrder.verify(mockCache).lookup(cqlType, GenericType.listOf(B.class), true); - inOrder.verifyNoMoreInteractions(); - } - - @Test - public void should_allow_covariance_for_lookups_by_value() { - - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - registry.register(new ACodec()); - InOrder inOrder = inOrder(mockCache); - inOrder.verify(mockCache).lookup(DataTypes.INT, GenericType.of(A.class), false); - - // covariance allowed - - assertThat(registry.codecFor(new B())).isInstanceOf(ACodec.class); - // no cache hit since we find the custom codec directly - inOrder.verifyNoMoreInteractions(); - - // note: in Java, type parameters are always invariant, so List is not a subtype of List; - // but in practice, a codec for List is capable of encoding a List, so we allow it (even - // if in driver 3.x that was forbidden). - List list = Lists.newArrayList(new B()); - TypeCodec> actual = registry.codecFor(list); - assertThat(actual).isInstanceOf(ListCodec.class); - assertThat(actual.getJavaType()).isEqualTo(GenericType.listOf(A.class)); - assertThat(actual.accepts(list)).isTrue(); - // accepts(GenericType) remains invariant, so it returns false for List - assertThat(actual.accepts(GenericType.listOf(B.class))).isFalse(); - inOrder.verify(mockCache).lookup(null, GenericType.listOf(B.class), true); - inOrder.verifyNoMoreInteractions(); - } - - @Test - public void should_register_user_codec_at_runtime() { - CqlIntToStringCodec intToStringCodec = new CqlIntToStringCodec(); - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - registry.register(intToStringCodec); - // register checks the cache for collisions - verify(mockCache).lookup(DataTypes.INT, GenericType.STRING, false); - - // When the mapping is not ambiguous, the user type should be returned - assertThat(registry.codecFor(DataTypes.INT, GenericType.STRING)).isSameAs(intToStringCodec); - assertThat(registry.codecFor(DataTypes.INT, String.class)).isSameAs(intToStringCodec); - assertThat(registry.codecFor(DataTypes.INT, "123")).isSameAs(intToStringCodec); - - // When there is an ambiguity with a built-in codec, the built-in codec should have priority - assertThat(registry.codecFor(DataTypes.INT)).isSameAs(TypeCodecs.INT); - assertThat(registry.codecFor("123")).isSameAs(TypeCodecs.TEXT); - - verifyZeroInteractions(mockCache); - } - - @Test - public void should_ignore_user_codec_if_collides_with_builtin_codec() { - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - - IntCodec userIntCodec = new IntCodec(); - registry.register(userIntCodec); - - assertThat(registry.codecFor(DataTypes.INT, Integer.class)).isNotSameAs(userIntCodec); - } - - @Test - public void should_ignore_user_codec_if_collides_with_other_user_codec() { - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - CqlIntToStringCodec intToStringCodec1 = new CqlIntToStringCodec(); - CqlIntToStringCodec intToStringCodec2 = new CqlIntToStringCodec(); - - registry.register(intToStringCodec1, intToStringCodec2); - - assertThat(registry.codecFor(DataTypes.INT, GenericType.STRING)).isSameAs(intToStringCodec1); - } - - @Test - public void should_ignore_user_codec_if_collides_with_generated_codec() { - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - - TypeCodec> userListOfIntCodec = TypeCodecs.listOf(TypeCodecs.INT); - registry.register(userListOfIntCodec); - - assertThat( - registry.codecFor(DataTypes.listOf(DataTypes.INT), GenericType.listOf(Integer.class))) - .isNotSameAs(userListOfIntCodec); - } - - // Our intent is not to test Guava cache, so we don't need an actual cache here. - // The only thing we want to check in our tests is if getCachedCodec was called. - public static class TestCachingCodecRegistry extends CachingCodecRegistry { - private final MockCache cache; - - TestCachingCodecRegistry(MockCache cache) { - super("test", CodecRegistryConstants.PRIMITIVE_CODECS); - this.cache = cache; - } - - @Override - protected TypeCodec getCachedCodec( - @Nullable DataType cqlType, @Nullable GenericType javaType, boolean isJavaCovariant) { - cache.lookup(cqlType, javaType, isJavaCovariant); - return createCodec(cqlType, javaType, isJavaCovariant); - } - - public interface MockCache { - void lookup( - @Nullable DataType cqlType, @Nullable GenericType javaType, boolean isJavaCovariant); - } - } - - public static class TextToPeriodCodec implements TypeCodec { - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.of(Period.class); - } - - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.TEXT; - } - - @Override - public ByteBuffer encode(Period value, @NonNull ProtocolVersion protocolVersion) { - throw new UnsupportedOperationException("not implemented for this test"); - } - - @Override - public Period decode(ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - throw new UnsupportedOperationException("not implemented for this test"); - } - - @NonNull - @Override - public String format(Period value) { - throw new UnsupportedOperationException("not implemented for this test"); - } - - @Override - public Period parse(String value) { - throw new UnsupportedOperationException("not implemented for this test"); - } - } - - private static class A {} - - private static class B extends A {} - - private static class ACodec implements TypeCodec { - - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.of(A.class); - } - - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.INT; - } - - @Override - public ByteBuffer encode(A value, @NonNull ProtocolVersion protocolVersion) { - throw new UnsupportedOperationException("irrelevant"); - } - - @Override - public A decode(ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - throw new UnsupportedOperationException("irrelevant"); - } - - @NonNull - @Override - public String format(A value) { - throw new UnsupportedOperationException("irrelevant"); - } - - @Override - public A parse(String value) { - throw new UnsupportedOperationException("irrelevant"); - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/registry/CachingCodecRegistryTestDataProviders.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/registry/CachingCodecRegistryTestDataProviders.java deleted file mode 100644 index 4c0298bafad..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/registry/CachingCodecRegistryTestDataProviders.java +++ /dev/null @@ -1,639 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec.registry; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.data.CqlDuration; -import com.datastax.oss.driver.api.core.data.CqlVector; -import com.datastax.oss.driver.api.core.data.TupleValue; -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.TupleType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.data.DefaultTupleValue; -import com.datastax.oss.driver.internal.core.data.DefaultUdtValue; -import com.datastax.oss.driver.internal.core.type.UserDefinedTypeBuilder; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import com.tngtech.java.junit.dataprovider.DataProvider; -import java.math.BigDecimal; -import java.math.BigInteger; -import java.net.Inet4Address; -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.nio.ByteBuffer; -import java.time.Instant; -import java.time.LocalDate; -import java.time.LocalTime; -import java.util.Collections; -import java.util.UUID; - -@SuppressWarnings("unused") -public class CachingCodecRegistryTestDataProviders { - - @DataProvider - public static Object[][] primitiveCodecs() { - return new Object[][] { - {TypeCodecs.BOOLEAN}, - {TypeCodecs.TINYINT}, - {TypeCodecs.DOUBLE}, - {TypeCodecs.COUNTER}, - {TypeCodecs.FLOAT}, - {TypeCodecs.INT}, - {TypeCodecs.BIGINT}, - {TypeCodecs.SMALLINT}, - {TypeCodecs.TIMESTAMP}, - {TypeCodecs.DATE}, - {TypeCodecs.TIME}, - {TypeCodecs.BLOB}, - {TypeCodecs.TEXT}, - {TypeCodecs.ASCII}, - {TypeCodecs.VARINT}, - {TypeCodecs.DECIMAL}, - {TypeCodecs.UUID}, - {TypeCodecs.TIMEUUID}, - {TypeCodecs.INET}, - {TypeCodecs.DURATION}, - }; - } - - @DataProvider - public static Object[][] primitiveCodecsWithValues() throws UnknownHostException { - return new Object[][] { - {true, TypeCodecs.BOOLEAN}, - {(byte) 0, TypeCodecs.TINYINT}, - {0.0, TypeCodecs.DOUBLE}, - {0.0f, TypeCodecs.FLOAT}, - {0, TypeCodecs.INT}, - {0L, TypeCodecs.BIGINT}, - {(short) 0, TypeCodecs.SMALLINT}, - {Instant.EPOCH, TypeCodecs.TIMESTAMP}, - {LocalDate.MIN, TypeCodecs.DATE}, - {LocalTime.MIDNIGHT, TypeCodecs.TIME}, - {ByteBuffer.allocate(0), TypeCodecs.BLOB}, - {"", TypeCodecs.TEXT}, - {BigInteger.ONE, TypeCodecs.VARINT}, - {BigDecimal.ONE, TypeCodecs.DECIMAL}, - {new UUID(2L, 1L), TypeCodecs.UUID}, - {InetAddress.getByName("127.0.0.1"), TypeCodecs.INET}, - {CqlDuration.newInstance(1, 2, 3), TypeCodecs.DURATION}, - }; - } - - @DataProvider - public static Object[][] primitiveCodecsWithCqlTypesAndValues() throws UnknownHostException { - return new Object[][] { - {DataTypes.BOOLEAN, true, TypeCodecs.BOOLEAN}, - {DataTypes.TINYINT, (byte) 0, TypeCodecs.TINYINT}, - {DataTypes.DOUBLE, 0.0, TypeCodecs.DOUBLE}, - {DataTypes.FLOAT, 0.0f, TypeCodecs.FLOAT}, - {DataTypes.INT, 0, TypeCodecs.INT}, - {DataTypes.BIGINT, 0L, TypeCodecs.BIGINT}, - {DataTypes.SMALLINT, (short) 0, TypeCodecs.SMALLINT}, - {DataTypes.TIMESTAMP, Instant.EPOCH, TypeCodecs.TIMESTAMP}, - {DataTypes.DATE, LocalDate.MIN, TypeCodecs.DATE}, - {DataTypes.TIME, LocalTime.MIDNIGHT, TypeCodecs.TIME}, - {DataTypes.BLOB, ByteBuffer.allocate(0), TypeCodecs.BLOB}, - {DataTypes.TEXT, "", TypeCodecs.TEXT}, - {DataTypes.VARINT, BigInteger.ONE, TypeCodecs.VARINT}, - {DataTypes.DECIMAL, BigDecimal.ONE, TypeCodecs.DECIMAL}, - {DataTypes.UUID, new UUID(2L, 1L), TypeCodecs.UUID}, - {DataTypes.INET, InetAddress.getByName("127.0.0.1"), TypeCodecs.INET}, - {DataTypes.DURATION, CqlDuration.newInstance(1, 2, 3), TypeCodecs.DURATION}, - }; - } - - @DataProvider - public static Object[][] collectionsWithCqlAndJavaTypes() - throws UnknownHostException, ClassNotFoundException { - TupleType tupleType = DataTypes.tupleOf(DataTypes.INT, DataTypes.listOf(DataTypes.TEXT)); - TupleValue tupleValue = tupleType.newValue(); - UserDefinedType userType = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("ks"), CqlIdentifier.fromInternal("type")) - .withField(CqlIdentifier.fromInternal("field1"), DataTypes.INT) - .withField(CqlIdentifier.fromInternal("field2"), DataTypes.listOf(DataTypes.TEXT)) - .build(); - UdtValue udtValue = userType.newValue(); - return new Object[][] { - // lists - { - DataTypes.listOf(DataTypes.INT), - GenericType.listOf(Integer.class), - GenericType.listOf(Integer.class), - ImmutableList.of(1) - }, - { - DataTypes.listOf(DataTypes.TEXT), - GenericType.listOf(String.class), - GenericType.listOf(String.class), - ImmutableList.of("foo") - }, - { - DataTypes.listOf(DataTypes.BLOB), - GenericType.listOf(ByteBuffer.class), - GenericType.listOf(Class.forName("java.nio.HeapByteBuffer")), - ImmutableList.of(ByteBuffer.wrap(new byte[] {127, 0, 0, 1})) - }, - { - DataTypes.listOf(DataTypes.INET), - GenericType.listOf(InetAddress.class), - GenericType.listOf(Inet4Address.class), - ImmutableList.of(InetAddress.getByAddress(new byte[] {127, 0, 0, 1})) - }, - { - DataTypes.listOf(tupleType), - GenericType.listOf(TupleValue.class), - GenericType.listOf(DefaultTupleValue.class), - ImmutableList.of(tupleValue) - }, - { - DataTypes.listOf(userType), - GenericType.listOf(UdtValue.class), - GenericType.listOf(DefaultUdtValue.class), - ImmutableList.of(udtValue) - }, - { - DataTypes.listOf(DataTypes.listOf(DataTypes.INT)), - GenericType.listOf(GenericType.listOf(Integer.class)), - GenericType.listOf(GenericType.listOf(Integer.class)), - ImmutableList.of(ImmutableList.of(1)) - }, - { - DataTypes.listOf(DataTypes.listOf(tupleType)), - GenericType.listOf(GenericType.listOf(TupleValue.class)), - GenericType.listOf(GenericType.listOf(DefaultTupleValue.class)), - ImmutableList.of(ImmutableList.of(tupleValue)) - }, - { - DataTypes.listOf(DataTypes.listOf(userType)), - GenericType.listOf(GenericType.listOf(UdtValue.class)), - GenericType.listOf(GenericType.listOf(DefaultUdtValue.class)), - ImmutableList.of(ImmutableList.of(udtValue)) - }, - // sets - { - DataTypes.setOf(DataTypes.INT), - GenericType.setOf(Integer.class), - GenericType.setOf(Integer.class), - ImmutableSet.of(1) - }, - { - DataTypes.setOf(DataTypes.TEXT), - GenericType.setOf(String.class), - GenericType.setOf(String.class), - ImmutableSet.of("foo") - }, - { - DataTypes.setOf(DataTypes.BLOB), - GenericType.setOf(ByteBuffer.class), - GenericType.setOf(Class.forName("java.nio.HeapByteBuffer")), - ImmutableSet.of(ByteBuffer.wrap(new byte[] {127, 0, 0, 1})) - }, - { - DataTypes.setOf(DataTypes.INET), - GenericType.setOf(InetAddress.class), - GenericType.setOf(Inet4Address.class), - ImmutableSet.of(InetAddress.getByAddress(new byte[] {127, 0, 0, 1})) - }, - { - DataTypes.setOf(tupleType), - GenericType.setOf(TupleValue.class), - GenericType.setOf(DefaultTupleValue.class), - ImmutableSet.of(tupleValue) - }, - { - DataTypes.setOf(userType), - GenericType.setOf(UdtValue.class), - GenericType.setOf(DefaultUdtValue.class), - ImmutableSet.of(udtValue) - }, - { - DataTypes.setOf(DataTypes.setOf(DataTypes.INT)), - GenericType.setOf(GenericType.setOf(Integer.class)), - GenericType.setOf(GenericType.setOf(Integer.class)), - ImmutableSet.of(ImmutableSet.of(1)) - }, - { - DataTypes.setOf(DataTypes.setOf(tupleType)), - GenericType.setOf(GenericType.setOf(TupleValue.class)), - GenericType.setOf(GenericType.setOf(DefaultTupleValue.class)), - ImmutableSet.of(ImmutableSet.of(tupleValue)) - }, - { - DataTypes.setOf(DataTypes.setOf(userType)), - GenericType.setOf(GenericType.setOf(UdtValue.class)), - GenericType.setOf(GenericType.setOf(DefaultUdtValue.class)), - ImmutableSet.of(ImmutableSet.of(udtValue)) - }, - // maps - { - DataTypes.mapOf(DataTypes.INT, DataTypes.TEXT), - GenericType.mapOf(Integer.class, String.class), - GenericType.mapOf(Integer.class, String.class), - ImmutableMap.of(1, "foo") - }, - { - DataTypes.mapOf(DataTypes.BLOB, DataTypes.INET), - GenericType.mapOf(ByteBuffer.class, InetAddress.class), - GenericType.mapOf(Class.forName("java.nio.HeapByteBuffer"), Inet4Address.class), - ImmutableMap.of( - ByteBuffer.wrap(new byte[] {127, 0, 0, 1}), - InetAddress.getByAddress(new byte[] {127, 0, 0, 1})) - }, - { - DataTypes.mapOf(tupleType, tupleType), - GenericType.mapOf(TupleValue.class, TupleValue.class), - GenericType.mapOf(DefaultTupleValue.class, DefaultTupleValue.class), - ImmutableMap.of(tupleValue, tupleValue) - }, - { - DataTypes.mapOf(userType, userType), - GenericType.mapOf(UdtValue.class, UdtValue.class), - GenericType.mapOf(DefaultUdtValue.class, DefaultUdtValue.class), - ImmutableMap.of(udtValue, udtValue) - }, - { - DataTypes.mapOf(DataTypes.UUID, DataTypes.mapOf(DataTypes.INT, DataTypes.TEXT)), - GenericType.mapOf(GenericType.UUID, GenericType.mapOf(Integer.class, String.class)), - GenericType.mapOf(GenericType.UUID, GenericType.mapOf(Integer.class, String.class)), - ImmutableMap.of(UUID.randomUUID(), ImmutableMap.of(1, "foo")) - }, - { - DataTypes.mapOf(DataTypes.mapOf(userType, userType), DataTypes.mapOf(tupleType, tupleType)), - GenericType.mapOf( - GenericType.mapOf(UdtValue.class, UdtValue.class), - GenericType.mapOf(TupleValue.class, TupleValue.class)), - GenericType.mapOf( - GenericType.mapOf(DefaultUdtValue.class, DefaultUdtValue.class), - GenericType.mapOf(DefaultTupleValue.class, DefaultTupleValue.class)), - ImmutableMap.of( - ImmutableMap.of(udtValue, udtValue), ImmutableMap.of(tupleValue, tupleValue)) - }, - // vectors - { - DataTypes.vectorOf(DataTypes.INT, 1), - GenericType.vectorOf(Integer.class), - GenericType.vectorOf(Integer.class), - CqlVector.newInstance(1) - }, - { - DataTypes.vectorOf(DataTypes.BIGINT, 1), - GenericType.vectorOf(Long.class), - GenericType.vectorOf(Long.class), - CqlVector.newInstance(1l) - }, - { - DataTypes.vectorOf(DataTypes.SMALLINT, 1), - GenericType.vectorOf(Short.class), - GenericType.vectorOf(Short.class), - CqlVector.newInstance((short) 1) - }, - { - DataTypes.vectorOf(DataTypes.TINYINT, 1), - GenericType.vectorOf(Byte.class), - GenericType.vectorOf(Byte.class), - CqlVector.newInstance((byte) 1) - }, - { - DataTypes.vectorOf(DataTypes.FLOAT, 1), - GenericType.vectorOf(Float.class), - GenericType.vectorOf(Float.class), - CqlVector.newInstance(1.0f) - }, - { - DataTypes.vectorOf(DataTypes.DOUBLE, 1), - GenericType.vectorOf(Double.class), - GenericType.vectorOf(Double.class), - CqlVector.newInstance(1.0d) - }, - { - DataTypes.vectorOf(DataTypes.DECIMAL, 1), - GenericType.vectorOf(BigDecimal.class), - GenericType.vectorOf(BigDecimal.class), - CqlVector.newInstance(BigDecimal.ONE) - }, - { - DataTypes.vectorOf(DataTypes.VARINT, 1), - GenericType.vectorOf(BigInteger.class), - GenericType.vectorOf(BigInteger.class), - CqlVector.newInstance(BigInteger.ONE) - }, - // vector with arbitrary types - { - DataTypes.vectorOf(DataTypes.TEXT, 2), - GenericType.vectorOf(String.class), - GenericType.vectorOf(String.class), - CqlVector.newInstance("abc", "de") - }, - { - DataTypes.vectorOf(DataTypes.TIME, 2), - GenericType.vectorOf(LocalTime.class), - GenericType.vectorOf(LocalTime.class), - CqlVector.newInstance(LocalTime.MIDNIGHT, LocalTime.NOON) - }, - { - DataTypes.vectorOf(DataTypes.vectorOf(DataTypes.TINYINT, 2), 2), - GenericType.vectorOf(GenericType.vectorOf(Byte.class)), - GenericType.vectorOf(GenericType.vectorOf(Byte.class)), - CqlVector.newInstance( - CqlVector.newInstance((byte) 1, (byte) 2), CqlVector.newInstance((byte) 3, (byte) 4)) - }, - }; - } - - @DataProvider - public static Object[][] emptyCollectionsWithCqlAndJavaTypes() { - TupleType tupleType = DataTypes.tupleOf(DataTypes.INT, DataTypes.listOf(DataTypes.TEXT)); - UserDefinedType userType = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("ks"), CqlIdentifier.fromInternal("type")) - .withField(CqlIdentifier.fromInternal("field1"), DataTypes.INT) - .withField(CqlIdentifier.fromInternal("field2"), DataTypes.listOf(DataTypes.TEXT)) - .build(); - return new Object[][] { - // lists - { - DataTypes.listOf(DataTypes.INT), - GenericType.listOf(Integer.class), - DataTypes.listOf(DataTypes.BOOLEAN), - GenericType.listOf(Boolean.class), - Collections.emptyList() - }, - { - DataTypes.listOf(DataTypes.TEXT), - GenericType.listOf(String.class), - DataTypes.listOf(DataTypes.BOOLEAN), - GenericType.listOf(Boolean.class), - Collections.emptyList() - }, - { - DataTypes.listOf(DataTypes.BLOB), - GenericType.listOf(ByteBuffer.class), - DataTypes.listOf(DataTypes.BOOLEAN), - GenericType.listOf(Boolean.class), - Collections.emptyList() - }, - { - DataTypes.listOf(DataTypes.INET), - GenericType.listOf(InetAddress.class), - DataTypes.listOf(DataTypes.BOOLEAN), - GenericType.listOf(Boolean.class), - Collections.emptyList() - }, - { - DataTypes.listOf(tupleType), - GenericType.listOf(TupleValue.class), - DataTypes.listOf(DataTypes.BOOLEAN), - GenericType.listOf(Boolean.class), - Collections.emptyList() - }, - { - DataTypes.listOf(userType), - GenericType.listOf(UdtValue.class), - DataTypes.listOf(DataTypes.BOOLEAN), - GenericType.listOf(Boolean.class), - Collections.emptyList() - }, - { - DataTypes.listOf(DataTypes.listOf(DataTypes.INT)), - GenericType.listOf(GenericType.listOf(Integer.class)), - DataTypes.listOf(DataTypes.listOf(DataTypes.BOOLEAN)), - GenericType.listOf(GenericType.listOf(Boolean.class)), - ImmutableList.of(Collections.emptyList()) - }, - { - DataTypes.listOf(DataTypes.listOf(tupleType)), - GenericType.listOf(GenericType.listOf(TupleValue.class)), - DataTypes.listOf(DataTypes.listOf(DataTypes.BOOLEAN)), - GenericType.listOf(GenericType.listOf(Boolean.class)), - ImmutableList.of(Collections.emptyList()) - }, - { - DataTypes.listOf(DataTypes.listOf(userType)), - GenericType.listOf(GenericType.listOf(UdtValue.class)), - DataTypes.listOf(DataTypes.listOf(DataTypes.BOOLEAN)), - GenericType.listOf(GenericType.listOf(Boolean.class)), - ImmutableList.of(Collections.emptyList()) - }, - // sets - { - DataTypes.setOf(DataTypes.INT), - GenericType.setOf(Integer.class), - DataTypes.setOf(DataTypes.BOOLEAN), - GenericType.setOf(Boolean.class), - Collections.emptySet() - }, - { - DataTypes.setOf(DataTypes.TEXT), - GenericType.setOf(String.class), - DataTypes.setOf(DataTypes.BOOLEAN), - GenericType.setOf(Boolean.class), - Collections.emptySet() - }, - { - DataTypes.setOf(DataTypes.BLOB), - GenericType.setOf(ByteBuffer.class), - DataTypes.setOf(DataTypes.BOOLEAN), - GenericType.setOf(Boolean.class), - Collections.emptySet() - }, - { - DataTypes.setOf(DataTypes.INET), - GenericType.setOf(InetAddress.class), - DataTypes.setOf(DataTypes.BOOLEAN), - GenericType.setOf(Boolean.class), - Collections.emptySet() - }, - { - DataTypes.setOf(tupleType), - GenericType.setOf(TupleValue.class), - DataTypes.setOf(DataTypes.BOOLEAN), - GenericType.setOf(Boolean.class), - Collections.emptySet() - }, - { - DataTypes.setOf(userType), - GenericType.setOf(UdtValue.class), - DataTypes.setOf(DataTypes.BOOLEAN), - GenericType.setOf(Boolean.class), - Collections.emptySet() - }, - { - DataTypes.setOf(DataTypes.setOf(DataTypes.INT)), - GenericType.setOf(GenericType.setOf(Integer.class)), - DataTypes.setOf(DataTypes.setOf(DataTypes.BOOLEAN)), - GenericType.setOf(GenericType.setOf(Boolean.class)), - ImmutableSet.of(Collections.emptySet()) - }, - { - DataTypes.setOf(DataTypes.setOf(tupleType)), - GenericType.setOf(GenericType.setOf(TupleValue.class)), - DataTypes.setOf(DataTypes.setOf(DataTypes.BOOLEAN)), - GenericType.setOf(GenericType.setOf(Boolean.class)), - ImmutableSet.of(Collections.emptySet()) - }, - { - DataTypes.setOf(DataTypes.setOf(userType)), - GenericType.setOf(GenericType.setOf(UdtValue.class)), - DataTypes.setOf(DataTypes.setOf(DataTypes.BOOLEAN)), - GenericType.setOf(GenericType.setOf(Boolean.class)), - ImmutableSet.of(Collections.emptySet()) - }, - // maps - { - DataTypes.mapOf(DataTypes.INT, DataTypes.TEXT), - GenericType.mapOf(Integer.class, String.class), - DataTypes.mapOf(DataTypes.BOOLEAN, DataTypes.BOOLEAN), - GenericType.mapOf(Boolean.class, Boolean.class), - Collections.emptyMap() - }, - { - DataTypes.mapOf(DataTypes.BLOB, DataTypes.INET), - GenericType.mapOf(ByteBuffer.class, InetAddress.class), - DataTypes.mapOf(DataTypes.BOOLEAN, DataTypes.BOOLEAN), - GenericType.mapOf(Boolean.class, Boolean.class), - Collections.emptyMap() - }, - { - DataTypes.mapOf(tupleType, tupleType), - GenericType.mapOf(TupleValue.class, TupleValue.class), - DataTypes.mapOf(DataTypes.BOOLEAN, DataTypes.BOOLEAN), - GenericType.mapOf(Boolean.class, Boolean.class), - Collections.emptyMap() - }, - { - DataTypes.mapOf(userType, userType), - GenericType.mapOf(UdtValue.class, UdtValue.class), - DataTypes.mapOf(DataTypes.BOOLEAN, DataTypes.BOOLEAN), - GenericType.mapOf(Boolean.class, Boolean.class), - Collections.emptyMap() - }, - { - DataTypes.mapOf(DataTypes.UUID, DataTypes.mapOf(DataTypes.INT, DataTypes.TEXT)), - GenericType.mapOf(GenericType.UUID, GenericType.mapOf(Integer.class, String.class)), - DataTypes.mapOf(DataTypes.UUID, DataTypes.mapOf(DataTypes.BOOLEAN, DataTypes.BOOLEAN)), - GenericType.mapOf(GenericType.UUID, GenericType.mapOf(Boolean.class, Boolean.class)), - ImmutableMap.of(UUID.randomUUID(), Collections.emptyMap()) - }, - { - DataTypes.mapOf(DataTypes.mapOf(DataTypes.INT, DataTypes.TEXT), DataTypes.UUID), - GenericType.mapOf(GenericType.mapOf(Integer.class, String.class), GenericType.UUID), - DataTypes.mapOf(DataTypes.mapOf(DataTypes.BOOLEAN, DataTypes.BOOLEAN), DataTypes.UUID), - GenericType.mapOf(GenericType.mapOf(Boolean.class, Boolean.class), GenericType.UUID), - ImmutableMap.of(Collections.emptyMap(), UUID.randomUUID()) - }, - { - DataTypes.mapOf(DataTypes.mapOf(userType, userType), DataTypes.mapOf(tupleType, tupleType)), - GenericType.mapOf( - GenericType.mapOf(UdtValue.class, UdtValue.class), - GenericType.mapOf(TupleValue.class, TupleValue.class)), - DataTypes.mapOf( - DataTypes.mapOf(DataTypes.BOOLEAN, DataTypes.BOOLEAN), - DataTypes.mapOf(DataTypes.BOOLEAN, DataTypes.BOOLEAN)), - GenericType.mapOf( - GenericType.mapOf(Boolean.class, Boolean.class), - GenericType.mapOf(Boolean.class, Boolean.class)), - ImmutableMap.of(Collections.emptyMap(), Collections.emptyMap()) - }, - }; - } - - @DataProvider - public static Object[][] collectionsWithNullElements() { - return new Object[][] { - { - Collections.singletonList(null), - "Can't infer list codec because the first element is null " - + "(note that CQL does not allow null values in collections)" - }, - { - Collections.singleton(null), - "Can't infer set codec because the first element is null " - + "(note that CQL does not allow null values in collections)" - }, - { - Collections.singletonMap("foo", null), - "Can't infer map codec because the first key and/or value is null " - + "(note that CQL does not allow null values in collections)" - }, - { - Collections.singletonMap(null, "foo"), - "Can't infer map codec because the first key and/or value is null " - + "(note that CQL does not allow null values in collections)" - }, - { - Collections.singletonMap(null, null), - "Can't infer map codec because the first key and/or value is null " - + "(note that CQL does not allow null values in collections)" - }, - }; - } - - @DataProvider - public static Object[][] tuplesWithCqlTypes() { - TupleType tupleType1 = DataTypes.tupleOf(DataTypes.INT, DataTypes.TEXT); - TupleType tupleType2 = DataTypes.tupleOf(DataTypes.INT, DataTypes.listOf(DataTypes.TEXT)); - TupleType tupleType3 = DataTypes.tupleOf(DataTypes.mapOf(tupleType1, tupleType2)); - TupleValue tupleValue1 = tupleType1.newValue(42, "foo"); - TupleValue tupleValue2 = tupleType2.newValue(42, ImmutableList.of("foo", "bar")); - return new Object[][] { - {tupleType1, tupleType1.newValue()}, - {tupleType1, tupleValue1}, - {tupleType2, tupleType2.newValue()}, - {tupleType2, tupleValue2}, - {tupleType3, tupleType3.newValue()}, - {tupleType3, tupleType3.newValue(ImmutableMap.of(tupleValue1, tupleValue2))}, - }; - } - - @DataProvider - public static Object[][] udtsWithCqlTypes() { - UserDefinedType userType1 = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("ks"), CqlIdentifier.fromInternal("type")) - .withField(CqlIdentifier.fromInternal("field1"), DataTypes.INT) - .withField(CqlIdentifier.fromInternal("field2"), DataTypes.TEXT) - .build(); - UserDefinedType userType2 = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("ks"), CqlIdentifier.fromInternal("type")) - .withField(CqlIdentifier.fromInternal("field1"), DataTypes.setOf(DataTypes.BIGINT)) - .withField(CqlIdentifier.fromInternal("field2"), DataTypes.listOf(DataTypes.TEXT)) - .build(); - UserDefinedType userType3 = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("ks"), CqlIdentifier.fromInternal("type")) - .withField(CqlIdentifier.fromInternal("field1"), DataTypes.mapOf(userType1, userType2)) - .build(); - UdtValue userValue1 = userType1.newValue(42, "foo"); - UdtValue userValue2 = - userType2.newValue(ImmutableSet.of(24L, 43L), ImmutableList.of("foo", "bar")); - return new Object[][] { - {userType1, userType1.newValue()}, - {userType1, userValue1}, - {userType2, userType2.newValue()}, - {userType2, userValue2}, - {userType3, userType3.newValue()}, - {userType3, userType3.newValue(ImmutableMap.of(userValue1, userValue2))}, - }; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/util/VIntCodingTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/util/VIntCodingTest.java deleted file mode 100644 index b85d6d66844..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/util/VIntCodingTest.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.util; - -import static org.junit.Assert.assertEquals; - -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.nio.ByteBuffer; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class VIntCodingTest { - @DataProvider - public static Object[] roundTripTestValues() { - return new Integer[] { - Integer.MAX_VALUE + 1, - Integer.MAX_VALUE, - Integer.MAX_VALUE - 1, - Integer.MIN_VALUE, - Integer.MIN_VALUE + 1, - Integer.MIN_VALUE - 1, - 0, - -1, - 1 - }; - }; - - private static final long[] LONGS = - new long[] { - 53L, - 10201L, - 1097151L, - 168435455L, - 33251130335L, - 3281283447775L, - 417672546086779L, - 52057592037927932L, - 72057594037927937L - }; - - @Test - public void should_compute_unsigned_vint_size() { - for (int i = 0; i < LONGS.length; i++) { - long val = LONGS[i]; - assertEquals(i + 1, VIntCoding.computeUnsignedVIntSize(val)); - } - } - - @Test - @UseDataProvider("roundTripTestValues") - public void should_write_and_read_unsigned_vint_32(int value) { - ByteBuffer bb = ByteBuffer.allocate(9); - - VIntCoding.writeUnsignedVInt32(value, bb); - bb.flip(); - assertEquals(value, VIntCoding.getUnsignedVInt32(bb, 0)); - } - - @Test - @UseDataProvider("roundTripTestValues") - public void should_write_and_read_unsigned_vint(int value) { - ByteBuffer bb = ByteBuffer.allocate(9); - - VIntCoding.writeUnsignedVInt(value, bb); - bb.flip(); - assertEquals(value, VIntCoding.getUnsignedVInt(bb, 0)); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/ArrayUtilsTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/ArrayUtilsTest.java deleted file mode 100644 index c2df6449fdb..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/ArrayUtilsTest.java +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.mockito.Mockito.anyInt; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import java.util.Random; -import org.junit.Test; - -public class ArrayUtilsTest { - - @Test - public void should_swap() { - String[] array = {"a", "b", "c"}; - ArrayUtils.swap(array, 0, 2); - assertThat(array).containsExactly("c", "b", "a"); - } - - @Test - public void should_swap_with_same_index() { - String[] array = {"a", "b", "c"}; - ArrayUtils.swap(array, 0, 0); - assertThat(array).containsExactly("a", "b", "c"); - } - - @Test - public void should_bubble_up() { - String[] array = {"a", "b", "c", "d", "e"}; - ArrayUtils.bubbleUp(array, 3, 1); - assertThat(array).containsExactly("a", "d", "b", "c", "e"); - } - - @Test - public void should_bubble_up_to_same_index() { - String[] array = {"a", "b", "c", "d", "e"}; - ArrayUtils.bubbleUp(array, 3, 3); - assertThat(array).containsExactly("a", "b", "c", "d", "e"); - } - - @Test - public void should_not_bubble_up_when_target_index_higher() { - String[] array = {"a", "b", "c", "d", "e"}; - ArrayUtils.bubbleUp(array, 3, 5); - assertThat(array).containsExactly("a", "b", "c", "d", "e"); - } - - @Test - public void should_bubble_down() { - String[] array = {"a", "b", "c", "d", "e"}; - ArrayUtils.bubbleDown(array, 1, 3); - assertThat(array).containsExactly("a", "c", "d", "b", "e"); - } - - @Test - public void should_bubble_down_to_same_index() { - String[] array = {"a", "b", "c", "d", "e"}; - ArrayUtils.bubbleDown(array, 3, 3); - assertThat(array).containsExactly("a", "b", "c", "d", "e"); - } - - @Test - public void should_not_bubble_down_when_target_index_lower() { - String[] array = {"a", "b", "c", "d", "e"}; - ArrayUtils.bubbleDown(array, 4, 2); - assertThat(array).containsExactly("a", "b", "c", "d", "e"); - } - - @Test - public void should_shuffle_head() { - String[] array = {"a", "b", "c", "d", "e"}; - Random random = mock(Random.class); - when(random.nextInt(anyInt())) - .thenAnswer( - (invocation) -> { - int i = invocation.getArgument(0); - // shifts elements by 1 to the right - return i - 2; - }); - ArrayUtils.shuffleHead(array, 3, random); - assertThat(array[0]).isEqualTo("c"); - assertThat(array[1]).isEqualTo("a"); - assertThat(array[2]).isEqualTo("b"); - // Tail elements should not move - assertThat(array[3]).isEqualTo("d"); - assertThat(array[4]).isEqualTo("e"); - } - - @Test(expected = ArrayIndexOutOfBoundsException.class) - public void should_fail_to_shuffle_head_when_count_is_too_high() { - ArrayUtils.shuffleHead(new String[] {"a", "b", "c"}, 5); - } - - @Test - public void should_rotate() { - String[] array = {"a", "b", "c", "d", "e"}; - - ArrayUtils.rotate(array, 1, 3, 1); - assertThat(array).containsExactly("a", "c", "d", "b", "e"); - - ArrayUtils.rotate(array, 0, 4, 2); - assertThat(array).containsExactly("d", "b", "a", "c", "e"); - - ArrayUtils.rotate(array, 2, 3, 10); - assertThat(array).containsExactly("d", "b", "c", "e", "a"); - } - - @Test - public void should_not_rotate_when_amount_multiple_of_range_size() { - String[] array = {"a", "b", "c", "d", "e"}; - - ArrayUtils.rotate(array, 1, 3, 9); - assertThat(array).containsExactly("a", "b", "c", "d", "e"); - } - - @Test - public void should_not_rotate_when_range_is_singleton_or_empty() { - String[] array = {"a", "b", "c", "d", "e"}; - - ArrayUtils.rotate(array, 1, 1, 3); - assertThat(array).containsExactly("a", "b", "c", "d", "e"); - - ArrayUtils.rotate(array, 1, 0, 3); - assertThat(array).containsExactly("a", "b", "c", "d", "e"); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/ByteBufs.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/ByteBufs.java deleted file mode 100644 index f526e2f12d4..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/ByteBufs.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util; - -import com.datastax.oss.protocol.internal.util.Bytes; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufAllocator; -import java.nio.ByteBuffer; - -/** Helper class to create {@link io.netty.buffer.ByteBuf} instances in tests. */ -public class ByteBufs { - public static ByteBuf wrap(int... bytes) { - ByteBuf bb = ByteBufAllocator.DEFAULT.buffer(bytes.length); - for (int b : bytes) { - bb.writeByte(b); - } - return bb; - } - - public static ByteBuf fromHexString(String hexString) { - ByteBuffer tmp = Bytes.fromHexString(hexString); - ByteBuf target = ByteBufAllocator.DEFAULT.buffer(tmp.remaining()); - target.writeBytes(tmp); - return target; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/CollectionsUtilsTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/CollectionsUtilsTest.java deleted file mode 100644 index 5a95e7f3b74..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/CollectionsUtilsTest.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; - -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.List; -import java.util.Map; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class CollectionsUtilsTest { - @Test - @UseDataProvider("listsProvider") - public void should_combine_two_lists_by_index( - List firstList, List secondList, Map expected) { - - // when - Map result = - CollectionsUtils.combineListsIntoOrderedMap(firstList, secondList); - - // then - assertThat(result).isEqualTo(expected); - } - - @Test - public void should_throw_if_lists_have_not_matching_size() { - // given - List list1 = ImmutableList.of(1); - List list2 = ImmutableList.of(1, 2); - - // when - assertThatThrownBy(() -> CollectionsUtils.combineListsIntoOrderedMap(list1, list2)) - .isInstanceOf(IllegalArgumentException.class) - .hasMessageMatching("Cannot combine lists with not matching sizes"); - } - - @DataProvider - public static Object[][] listsProvider() { - - return new Object[][] { - {ImmutableList.of(1), ImmutableList.of(1), ImmutableMap.of(1, 1)}, - {ImmutableList.of(1, 10, 5), ImmutableList.of(1, 10, 5), ImmutableMap.of(1, 1, 10, 10, 5, 5)}, - {ImmutableList.of(1, 1), ImmutableList.of(1, 2), ImmutableMap.of(1, 2)} - }; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/DirectedGraphTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/DirectedGraphTest.java deleted file mode 100644 index 1b37a5e5b19..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/DirectedGraphTest.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util; - -import static org.assertj.core.api.Assertions.assertThat; - -import org.junit.Test; - -public class DirectedGraphTest { - - @Test - public void should_sort_empty_graph() { - DirectedGraph g = new DirectedGraph<>(); - assertThat(g.topologicalSort()).isEmpty(); - } - - @Test - public void should_sort_graph_with_one_node() { - DirectedGraph g = new DirectedGraph<>("A"); - assertThat(g.topologicalSort()).containsExactly("A"); - } - - @Test - public void should_sort_complex_graph() { - // H G - // / \ /\ - // F | E - // \ / / - // D / - // / \/ - // B C - // | - // A - DirectedGraph g = new DirectedGraph<>("A", "B", "C", "D", "E", "F", "G", "H"); - g.addEdge("H", "F"); - g.addEdge("G", "E"); - g.addEdge("H", "D"); - g.addEdge("F", "D"); - g.addEdge("G", "D"); - g.addEdge("D", "C"); - g.addEdge("E", "C"); - g.addEdge("D", "B"); - g.addEdge("B", "A"); - - // The graph uses linked hash maps internally, so this order will be consistent across JVMs - assertThat(g.topologicalSort()).containsExactly("G", "H", "E", "F", "D", "C", "B", "A"); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_sort_if_graph_has_a_cycle() { - DirectedGraph g = new DirectedGraph<>("A", "B", "C"); - g.addEdge("A", "B"); - g.addEdge("B", "C"); - g.addEdge("C", "B"); - - g.topologicalSort(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_to_sort_if_graph_is_a_cycle() { - DirectedGraph g = new DirectedGraph<>("A", "B", "C"); - g.addEdge("A", "B"); - g.addEdge("B", "C"); - g.addEdge("C", "A"); - - g.topologicalSort(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/LoggerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/LoggerTest.java deleted file mode 100644 index eec3669efca..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/LoggerTest.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util; - -import static org.mockito.Mockito.mock; - -import ch.qos.logback.classic.Level; -import ch.qos.logback.classic.Logger; -import ch.qos.logback.classic.spi.ILoggingEvent; -import ch.qos.logback.core.Appender; -import org.mockito.ArgumentCaptor; -import org.slf4j.LoggerFactory; - -public class LoggerTest { - public static LoggerSetup setupTestLogger(Class clazz, Level levelToCapture) { - @SuppressWarnings("unchecked") - Appender appender = (Appender) mock(Appender.class); - - ArgumentCaptor loggingEventCaptor = ArgumentCaptor.forClass(ILoggingEvent.class); - Logger logger = (Logger) LoggerFactory.getLogger(clazz); - Level originalLoggerLevel = logger.getLevel(); - logger.setLevel(levelToCapture); - logger.addAppender(appender); - return new LoggerSetup(appender, originalLoggerLevel, logger, loggingEventCaptor); - } - - public static class LoggerSetup { - - private final Level originalLoggerLevel; - public final Appender appender; - public final Logger logger; - public ArgumentCaptor loggingEventCaptor; - - private LoggerSetup( - Appender appender, - Level originalLoggerLevel, - Logger logger, - ArgumentCaptor loggingEventCaptor) { - this.appender = appender; - this.originalLoggerLevel = originalLoggerLevel; - this.logger = logger; - this.loggingEventCaptor = loggingEventCaptor; - } - - public void close() { - logger.detachAppender(appender); - logger.setLevel(originalLoggerLevel); - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/ReflectionTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/ReflectionTest.java deleted file mode 100644 index f2614775be4..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/ReflectionTest.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.specex.SpeculativeExecutionPolicy; -import com.datastax.oss.driver.internal.core.config.typesafe.TypesafeDriverConfig; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.specex.ConstantSpeculativeExecutionPolicy; -import com.datastax.oss.driver.internal.core.specex.NoSpeculativeExecutionPolicy; -import com.typesafe.config.ConfigFactory; -import java.util.Map; -import org.junit.Test; - -public class ReflectionTest { - - @Test - public void should_build_policies_per_profile() { - String configSource = - "advanced.speculative-execution-policy {\n" - + " class = ConstantSpeculativeExecutionPolicy\n" - + " max-executions = 3\n" - + " delay = 100 milliseconds\n" - + "}\n" - + "profiles {\n" - // Inherits from default profile - + " profile1 {}\n" - // Inherits but changes one option - + " profile2 { \n" - + " advanced.speculative-execution-policy.max-executions = 2" - + " }\n" - // Same as previous profile, should share the same policy instance - + " profile3 { \n" - + " advanced.speculative-execution-policy.max-executions = 2" - + " }\n" - // Completely overrides default profile - + " profile4 { \n" - + " advanced.speculative-execution-policy.class = NoSpeculativeExecutionPolicy\n" - + " }\n" - + "}\n"; - InternalDriverContext context = mock(InternalDriverContext.class); - TypesafeDriverConfig config = new TypesafeDriverConfig(ConfigFactory.parseString(configSource)); - when(context.getConfig()).thenReturn(config); - - Map policies = - Reflection.buildFromConfigProfiles( - context, - DefaultDriverOption.SPECULATIVE_EXECUTION_POLICY_CLASS, - DefaultDriverOption.SPECULATIVE_EXECUTION_POLICY, - SpeculativeExecutionPolicy.class, - "com.datastax.oss.driver.internal.core.specex"); - - assertThat(policies).hasSize(5); - SpeculativeExecutionPolicy defaultPolicy = policies.get(DriverExecutionProfile.DEFAULT_NAME); - SpeculativeExecutionPolicy policy1 = policies.get("profile1"); - SpeculativeExecutionPolicy policy2 = policies.get("profile2"); - SpeculativeExecutionPolicy policy3 = policies.get("profile3"); - SpeculativeExecutionPolicy policy4 = policies.get("profile4"); - assertThat(defaultPolicy) - .isInstanceOf(ConstantSpeculativeExecutionPolicy.class) - .isSameAs(policy1); - assertThat(policy2).isInstanceOf(ConstantSpeculativeExecutionPolicy.class).isSameAs(policy3); - assertThat(policy4).isInstanceOf(NoSpeculativeExecutionPolicy.class); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/StringsTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/StringsTest.java deleted file mode 100644 index d5cc9dae161..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/StringsTest.java +++ /dev/null @@ -1,170 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.TestDataProviders; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.Locale; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class StringsTest { - - @Test - @UseDataProvider(location = TestDataProviders.class, value = "locales") - public void should_report_cql_keyword(Locale locale) { - Locale def = Locale.getDefault(); - try { - Locale.setDefault(locale); - - assertThat(Strings.isReservedCqlKeyword(null)).isFalse(); - assertThat(Strings.isReservedCqlKeyword("NOT A RESERVED KEYWORD")).isFalse(); - - assertThat(Strings.isReservedCqlKeyword("add")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("allow")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("alter")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("and")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("apply")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("asc")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("authorize")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("batch")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("begin")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("by")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("columnfamily")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("create")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("default")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("delete")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("desc")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("describe")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("drop")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("entries")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("execute")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("from")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("full")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("grant")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("if")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("in")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("index")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("infinity")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("insert")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("into")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("is")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("keyspace")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("limit")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("materialized")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("mbean")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("mbeans")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("modify")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("nan")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("norecursive")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("not")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("null")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("of")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("on")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("or")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("order")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("primary")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("rename")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("replace")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("revoke")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("schema")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("select")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("set")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("table")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("to")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("token")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("truncate")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("unlogged")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("unset")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("update")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("use")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("using")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("view")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("where")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("with")).isTrue(); - - assertThat(Strings.isReservedCqlKeyword("ALLOW")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("ALTER")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("AND")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("APPLY")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("ASC")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("AUTHORIZE")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("BATCH")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("BEGIN")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("BY")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("COLUMNFAMILY")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("CREATE")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("DEFAULT")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("DELETE")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("DESC")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("DESCRIBE")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("DROP")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("ENTRIES")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("EXECUTE")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("FROM")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("FULL")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("GRANT")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("IF")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("IN")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("INDEX")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("INFINITY")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("INSERT")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("INTO")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("IS")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("KEYSPACE")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("LIMIT")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("MATERIALIZED")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("MBEAN")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("MBEANS")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("MODIFY")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("NAN")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("NORECURSIVE")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("NOT")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("NULL")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("OF")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("ON")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("OR")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("ORDER")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("PRIMARY")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("RENAME")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("REPLACE")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("REVOKE")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("SCHEMA")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("SELECT")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("SET")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("TABLE")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("TO")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("TOKEN")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("TRUNCATE")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("UNLOGGED")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("UNSET")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("UPDATE")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("USE")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("USING")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("VIEW")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("WHERE")).isTrue(); - assertThat(Strings.isReservedCqlKeyword("WITH")).isTrue(); - } finally { - Locale.setDefault(def); - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/collection/CompositeQueryPlanTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/collection/CompositeQueryPlanTest.java deleted file mode 100644 index 1adc06a79d3..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/collection/CompositeQueryPlanTest.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.collection; - -import com.datastax.oss.driver.api.core.metadata.Node; -import org.junit.runner.RunWith; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class CompositeQueryPlanTest extends QueryPlanTestBase { - - @Override - protected QueryPlan newQueryPlan(Node... nodes) { - Object[] n1 = new Object[nodes.length / 2]; - Object[] n2 = new Object[nodes.length - n1.length]; - System.arraycopy(nodes, 0, n1, 0, n1.length); - System.arraycopy(nodes, n1.length, n2, 0, n2.length); - return new CompositeQueryPlan( - new SimpleQueryPlan(n1), - new LazyQueryPlan() { - @Override - protected Object[] computeNodes() { - return n2; - } - }); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/collection/LazyQueryPlanTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/collection/LazyQueryPlanTest.java deleted file mode 100644 index 99c72bace06..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/collection/LazyQueryPlanTest.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.collection; - -import com.datastax.oss.driver.api.core.metadata.Node; -import org.junit.runner.RunWith; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class LazyQueryPlanTest extends QueryPlanTestBase { - - @Override - protected QueryPlan newQueryPlan(Node... nodes) { - return new LazyQueryPlan() { - @Override - protected Object[] computeNodes() { - return nodes; - } - }; - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/collection/QueryPlanTestBase.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/collection/QueryPlanTestBase.java deleted file mode 100644 index 8689c282117..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/collection/QueryPlanTestBase.java +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.collection; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.metadata.Node; -import java.util.Comparator; -import java.util.Iterator; -import java.util.Set; -import java.util.concurrent.ConcurrentSkipListSet; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public abstract class QueryPlanTestBase { - - @Mock private Node node1; - @Mock private Node node2; - @Mock private Node node3; - - @Test - public void should_poll_elements() { - QueryPlan queryPlan = newQueryPlan(node1, node2, node3); - assertThat(queryPlan.poll()).isSameAs(node1); - assertThat(queryPlan.poll()).isSameAs(node2); - assertThat(queryPlan.poll()).isSameAs(node3); - assertThat(queryPlan.poll()).isNull(); - assertThat(queryPlan.poll()).isNull(); - } - - @Test - public void should_poll_elements_concurrently() throws InterruptedException { - for (int runs = 0; runs < 5; runs++) { - Node[] nodes = new Node[1000]; - for (int i = 0; i < 1000; i++) { - nodes[i] = mock(Node.class, "node" + i); - when(nodes[i].getOpenConnections()).thenReturn(i); - } - QueryPlan queryPlan = newQueryPlan(nodes); - Set actual = - new ConcurrentSkipListSet<>(Comparator.comparingInt(Node::getOpenConnections)); - Thread[] threads = new Thread[5]; - for (int i = 0; i < 5; i++) { - threads[i] = - new Thread( - () -> { - while (true) { - Node node = queryPlan.poll(); - if (node == null) { - return; - } - actual.add(node); - } - }); - } - for (Thread thread : threads) { - thread.start(); - } - for (Thread thread : threads) { - thread.join(); - } - assertThat(actual).hasSize(1000); - Iterator iterator = actual.iterator(); - for (int i = 0; iterator.hasNext(); i++) { - Node node = iterator.next(); - assertThat(node.getOpenConnections()).isEqualTo(i); - } - } - } - - @Test - public void should_return_size() { - QueryPlan queryPlan = newQueryPlan(node1, node2, node3); - assertThat(queryPlan.size()).isEqualTo(3); - queryPlan.poll(); - assertThat(queryPlan.size()).isEqualTo(2); - queryPlan.poll(); - assertThat(queryPlan.size()).isEqualTo(1); - queryPlan.poll(); - assertThat(queryPlan.size()).isEqualTo(0); - queryPlan.poll(); - assertThat(queryPlan.size()).isEqualTo(0); - } - - @Test - public void should_return_iterator() { - QueryPlan queryPlan = newQueryPlan(node1, node2, node3); - Iterator iterator3 = queryPlan.iterator(); - queryPlan.poll(); - Iterator iterator2 = queryPlan.iterator(); - queryPlan.poll(); - Iterator iterator1 = queryPlan.iterator(); - queryPlan.poll(); - Iterator iterator0 = queryPlan.iterator(); - queryPlan.poll(); - Iterator iterator00 = queryPlan.iterator(); - - assertThat(iterator3).toIterable().containsExactly(node1, node2, node3); - assertThat(iterator2).toIterable().containsExactly(node2, node3); - assertThat(iterator1).toIterable().containsExactly(node3); - assertThat(iterator0).toIterable().isEmpty(); - assertThat(iterator00).toIterable().isEmpty(); - } - - protected abstract QueryPlan newQueryPlan(Node... nodes); -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/collection/SimpleQueryPlanTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/collection/SimpleQueryPlanTest.java deleted file mode 100644 index 31e3e1006d7..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/collection/SimpleQueryPlanTest.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.collection; - -import com.datastax.oss.driver.api.core.metadata.Node; -import org.junit.runner.RunWith; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class SimpleQueryPlanTest extends QueryPlanTestBase { - - @Override - protected QueryPlan newQueryPlan(Node... nodes) { - return new SimpleQueryPlan((Object[]) nodes); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/CapturingTimer.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/CapturingTimer.java deleted file mode 100644 index 88cdfa80104..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/CapturingTimer.java +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.datastax.oss.driver.internal.core.util.concurrent; - -import io.netty.util.Timeout; -import io.netty.util.Timer; -import io.netty.util.TimerTask; -import java.util.Collections; -import java.util.HashSet; -import java.util.Set; -import java.util.concurrent.ArrayBlockingQueue; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; - -/** - * Implementation of Netty's {@link io.netty.util.Timer Timer} interface to capture scheduled {@link - * io.netty.util.Timeout Timeouts} instead of running them, so they can be run manually in tests. - */ -public class CapturingTimer implements Timer { - - private final ArrayBlockingQueue timeoutQueue = new ArrayBlockingQueue<>(16); - - @Override - public Timeout newTimeout(TimerTask task, long delay, TimeUnit unit) { - // delay and unit are not needed as the Timeout's TimerTask will be run manually - CapturedTimeout timeout = new CapturedTimeout(task, this, delay, unit); - // add the timeout to the queue - timeoutQueue.add(timeout); - return timeout; - } - - /** - * Retrieves the next scheduled Timeout. In tests, this will usually be a request timeout or a - * speculative execution. Tests will need be able to predict the ordering as it is not easy to - * tell from the returned Timeout itself. - */ - public CapturedTimeout getNextTimeout() { - return timeoutQueue.poll(); - } - - @Override - public Set stop() { - if (timeoutQueue.isEmpty()) { - return Collections.emptySet(); - } - Set timeoutsRemaining = new HashSet<>(timeoutQueue.size()); - for (Timeout t : timeoutQueue) { - if (t != null) { - t.cancel(); - timeoutsRemaining.add(t); - } - } - return timeoutsRemaining; - } - - /** - * Implementation of Netty's {@link io.netty.util.Timeout Timeout} interface. It is just a simple - * class that keeps track of the {@link io.netty.util.TimerTask TimerTask} and the {@link - * io.netty.util.Timer Timer} implementation that should only be used in tests. The intended use - * is to call the {@link io.netty.util.TimerTask#run(io.netty.util.Timeout) run()} method on the - * TimerTask when you want to execute the task (so you don't have to depend on a real timer). - * - *

Example: - * - *

{@code
-   * // get the next timeout from the timer
-   * Timeout t = timer.getNextTimeout();
-   * // run the TimerTask associated with the timeout
-   * t.task.run(t);
-   * }
- */ - public static class CapturedTimeout implements Timeout { - - private final TimerTask task; - private final CapturingTimer timer; - private final long delay; - private final TimeUnit unit; - private final AtomicBoolean cancelled = new AtomicBoolean(false); - - private CapturedTimeout(TimerTask task, CapturingTimer timer, long delay, TimeUnit unit) { - this.task = task; - this.timer = timer; - this.delay = delay; - this.unit = unit; - } - - @Override - public Timer timer() { - return timer; - } - - @Override - public TimerTask task() { - return task; - } - - public long getDelay(TimeUnit targetUnit) { - return targetUnit.convert(delay, unit); - } - - @Override - public boolean isExpired() { - return false; - } - - @Override - public boolean isCancelled() { - return cancelled.get(); - } - - @Override - public boolean cancel() { - return cancelled.compareAndSet(false, true); - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/CompletableFuturesTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/CompletableFuturesTest.java deleted file mode 100644 index 04f96f185fd..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/CompletableFuturesTest.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.concurrent; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.junit.Assert.fail; - -import java.util.Arrays; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import org.junit.Test; - -public class CompletableFuturesTest { - @Test - public void should_not_suppress_identical_exceptions() throws Exception { - RuntimeException error = new RuntimeException(); - CompletableFuture future1 = new CompletableFuture<>(); - future1.completeExceptionally(error); - CompletableFuture future2 = new CompletableFuture<>(); - future2.completeExceptionally(error); - try { - // if timeout exception is thrown, it indicates that CompletableFutures.allSuccessful() - // did not complete the returned future and potentially caller will wait infinitely - CompletableFutures.allSuccessful(Arrays.asList(future1, future2)) - .toCompletableFuture() - .get(1, TimeUnit.SECONDS); - fail(); - } catch (ExecutionException e) { - assertThat(e.getCause()).isEqualTo(error); - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/CycleDetectorTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/CycleDetectorTest.java deleted file mode 100644 index 74e0801ff61..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/CycleDetectorTest.java +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.concurrent; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; - -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.util.concurrent.ThreadFactoryBuilder; -import com.datastax.oss.driver.shaded.guava.common.util.concurrent.Uninterruptibles; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; -import org.junit.Test; - -public class CycleDetectorTest { - - @Test - public void should_detect_cycle_within_same_thread() { - CycleDetector checker = new CycleDetector("Detected cycle", true); - CyclicContext context = new CyclicContext(checker, false); - try { - context.a.get(); - fail("Expected an exception"); - } catch (Exception e) { - assertThat(e) - .isInstanceOf(IllegalStateException.class) - .hasMessageContaining("Detected cycle"); - } - } - - @Test - public void should_detect_cycle_between_different_threads() throws Throwable { - CycleDetector checker = new CycleDetector("Detected cycle", true); - CyclicContext context = new CyclicContext(checker, true); - ExecutorService executor = - Executors.newFixedThreadPool( - 3, new ThreadFactoryBuilder().setNameFormat("thread%d").build()); - Future futureA = executor.submit(() -> context.a.get()); - Future futureB = executor.submit(() -> context.b.get()); - Future futureC = executor.submit(() -> context.c.get()); - context.latchA.countDown(); - context.latchB.countDown(); - context.latchC.countDown(); - for (Future future : ImmutableList.of(futureA, futureB, futureC)) { - try { - Uninterruptibles.getUninterruptibly(future); - } catch (ExecutionException e) { - assertThat(e.getCause()) - .isInstanceOf(IllegalStateException.class) - .hasMessageContaining("Detected cycle"); - } - } - } - - private static class CyclicContext { - private LazyReference a; - private LazyReference b; - private LazyReference c; - private CountDownLatch latchA; - private CountDownLatch latchB; - private CountDownLatch latchC; - - private CyclicContext(CycleDetector checker, boolean enableLatches) { - this.a = new LazyReference<>("a", this::buildA, checker); - this.b = new LazyReference<>("b", this::buildB, checker); - this.c = new LazyReference<>("c", this::buildC, checker); - if (enableLatches) { - this.latchA = new CountDownLatch(1); - this.latchB = new CountDownLatch(1); - this.latchC = new CountDownLatch(1); - } - } - - private String buildA() { - maybeAwaitUninterruptibly(latchA); - b.get(); - return "a"; - } - - private String buildB() { - maybeAwaitUninterruptibly(latchB); - c.get(); - return "b"; - } - - private String buildC() { - maybeAwaitUninterruptibly(latchC); - a.get(); - return "c"; - } - - private static void maybeAwaitUninterruptibly(CountDownLatch latch) { - if (latch != null) { - try { - latch.await(); - } catch (InterruptedException e) { - fail("interrupted", e); - } - } - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/DebouncerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/DebouncerTest.java deleted file mode 100644 index 71c844e7051..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/DebouncerTest.java +++ /dev/null @@ -1,181 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.concurrent; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.mockito.Mockito.any; -import static org.mockito.Mockito.anyLong; -import static org.mockito.Mockito.eq; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.shaded.guava.common.base.Joiner; -import io.netty.util.concurrent.EventExecutor; -import io.netty.util.concurrent.ScheduledFuture; -import java.time.Duration; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.TimeUnit; -import org.junit.Before; -import org.junit.Test; -import org.mockito.ArgumentCaptor; -import org.mockito.InOrder; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -public class DebouncerTest { - - private static final Duration DEFAULT_WINDOW = Duration.ofSeconds(1); - private static final int DEFAULT_MAX_EVENTS = 10; - - @Mock private EventExecutor adminExecutor; - @Mock private ScheduledFuture scheduledFuture; - private List results; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - when(adminExecutor.inEventLoop()).thenReturn(true); - when(adminExecutor.schedule( - any(Runnable.class), eq(DEFAULT_WINDOW.toNanos()), eq(TimeUnit.NANOSECONDS))) - .thenAnswer((i) -> scheduledFuture); - results = new ArrayList<>(); - } - - private String coalesce(List events) { - return Joiner.on(",").join(events); - } - - private void flush(String result) { - results.add(result); - } - - @Test - public void should_flush_synchronously_if_window_is_zero() { - Debouncer debouncer = - new Debouncer<>( - adminExecutor, this::coalesce, this::flush, Duration.ZERO, DEFAULT_MAX_EVENTS); - - debouncer.receive(1); - debouncer.receive(2); - - verify(adminExecutor, never()).schedule(any(Runnable.class), anyLong(), any(TimeUnit.class)); - - assertThat(results).containsExactly("1", "2"); - } - - @Test - public void should_flush_synchronously_if_max_events_is_one() { - Debouncer debouncer = - new Debouncer<>(adminExecutor, this::coalesce, this::flush, DEFAULT_WINDOW, 1); - - debouncer.receive(1); - debouncer.receive(2); - - verify(adminExecutor, never()).schedule(any(Runnable.class), anyLong(), any(TimeUnit.class)); - - assertThat(results).containsExactly("1", "2"); - } - - @Test - public void should_debounce_after_time_window_if_no_other_event() { - Debouncer debouncer = - new Debouncer<>( - adminExecutor, this::coalesce, this::flush, DEFAULT_WINDOW, DEFAULT_MAX_EVENTS); - debouncer.receive(1); - - // a task should have been scheduled, run it - ArgumentCaptor captor = ArgumentCaptor.forClass(Runnable.class); - verify(adminExecutor) - .schedule(captor.capture(), eq(DEFAULT_WINDOW.toNanos()), eq(TimeUnit.NANOSECONDS)); - captor.getValue().run(); - - // the element should have been flushed - assertThat(results).containsExactly("1"); - } - - @Test - public void should_reset_time_window_when_new_event() { - Debouncer debouncer = - new Debouncer<>( - adminExecutor, this::coalesce, this::flush, DEFAULT_WINDOW, DEFAULT_MAX_EVENTS); - debouncer.receive(1); - debouncer.receive(2); - - InOrder inOrder = inOrder(adminExecutor, scheduledFuture); - - // a first task should have been scheduled, and then cancelled - inOrder - .verify(adminExecutor) - .schedule(any(Runnable.class), eq(DEFAULT_WINDOW.toNanos()), eq(TimeUnit.NANOSECONDS)); - inOrder.verify(scheduledFuture).cancel(true); - - // a second task should have been scheduled, run it - ArgumentCaptor captor = ArgumentCaptor.forClass(Runnable.class); - inOrder - .verify(adminExecutor) - .schedule(captor.capture(), eq(DEFAULT_WINDOW.toNanos()), eq(TimeUnit.NANOSECONDS)); - captor.getValue().run(); - - // both elements should have been flushed together - assertThat(results).containsExactly("1,2"); - } - - @Test - public void should_force_flush_after_max_events() { - Debouncer debouncer = - new Debouncer<>( - adminExecutor, this::coalesce, this::flush, DEFAULT_WINDOW, DEFAULT_MAX_EVENTS); - for (int i = 0; i < 10; i++) { - debouncer.receive(i); - } - verify(adminExecutor, times(9)) - .schedule(any(Runnable.class), eq(DEFAULT_WINDOW.toNanos()), eq(TimeUnit.NANOSECONDS)); - verify(scheduledFuture, times(9)).cancel(true); - assertThat(results).containsExactly("0,1,2,3,4,5,6,7,8,9"); - } - - @Test - public void should_cancel_next_flush_when_stopped() { - Debouncer debouncer = - new Debouncer<>( - adminExecutor, this::coalesce, this::flush, DEFAULT_WINDOW, DEFAULT_MAX_EVENTS); - - debouncer.receive(1); - verify(adminExecutor) - .schedule(any(Runnable.class), eq(DEFAULT_WINDOW.toNanos()), eq(TimeUnit.NANOSECONDS)); - - debouncer.stop(); - verify(scheduledFuture).cancel(true); - } - - @Test - public void should_ignore_new_events_when_flushed() { - Debouncer debouncer = - new Debouncer<>( - adminExecutor, this::coalesce, this::flush, DEFAULT_WINDOW, DEFAULT_MAX_EVENTS); - debouncer.stop(); - - debouncer.receive(1); - verify(adminExecutor, never()) - .schedule(any(Runnable.class), eq(DEFAULT_WINDOW.toNanos()), eq(TimeUnit.NANOSECONDS)); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/PromiseCombinerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/PromiseCombinerTest.java deleted file mode 100644 index 45d0239b604..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/PromiseCombinerTest.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.concurrent; - -import static org.assertj.core.api.Assertions.assertThat; - -import io.netty.util.concurrent.EventExecutor; -import io.netty.util.concurrent.ImmediateEventExecutor; -import io.netty.util.concurrent.Promise; -import java.io.IOException; -import org.junit.Test; - -public class PromiseCombinerTest { - - private final EventExecutor executor = ImmediateEventExecutor.INSTANCE; - - @Test - public void should_complete_normally_if_all_parents_complete_normally() { - // given - Promise promise = executor.newPromise(); - Promise parent1 = executor.newPromise(); - Promise parent2 = executor.newPromise(); - // when - PromiseCombiner.combine(promise, parent1, parent2); - parent1.setSuccess(null); - parent2.setSuccess(null); - // then - assertThat(promise.isSuccess()).isTrue(); - } - - @Test - public void should_complete_exceptionally_if_any_parent_completes_exceptionally() { - // given - Promise promise = executor.newPromise(); - Promise parent1 = executor.newPromise(); - Promise parent2 = executor.newPromise(); - Promise parent3 = executor.newPromise(); - NullPointerException npe = new NullPointerException(); - IOException ioe = new IOException(); - // when - PromiseCombiner.combine(promise, parent1, parent2, parent3); - parent1.setSuccess(null); - parent2.setFailure(npe); - parent3.setFailure(ioe); - // then - assertThat(promise.isSuccess()).isFalse(); - assertThat(promise.cause()).isSameAs(npe).hasSuppressedException(ioe); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/ReconnectionTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/ReconnectionTest.java deleted file mode 100644 index 0e541c13f92..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/ReconnectionTest.java +++ /dev/null @@ -1,361 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.concurrent; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyNoMoreInteractions; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.TestDataProviders; -import com.datastax.oss.driver.api.core.connection.ReconnectionPolicy.ReconnectionSchedule; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import io.netty.channel.embedded.EmbeddedChannel; -import io.netty.util.concurrent.EventExecutor; -import java.time.Duration; -import java.util.concurrent.Callable; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.atomic.AtomicInteger; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -@RunWith(DataProviderRunner.class) -public class ReconnectionTest { - - @Mock private ReconnectionSchedule reconnectionSchedule; - @Mock private Runnable onStartCallback; - @Mock private Runnable onStopCallback; - private EmbeddedChannel channel; - - private MockReconnectionTask reconnectionTask; - private Reconnection reconnection; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - - // Unfortunately Netty does not expose EmbeddedEventLoop, so we have to go through a channel - channel = new EmbeddedChannel(); - EventExecutor eventExecutor = channel.eventLoop(); - - reconnectionTask = new MockReconnectionTask(); - reconnection = - new Reconnection( - "test", - eventExecutor, - () -> reconnectionSchedule, - reconnectionTask, - onStartCallback, - onStopCallback); - } - - @Test - public void should_start_out_not_running() { - assertThat(reconnection.isRunning()).isFalse(); - } - - @Test - public void should_schedule_first_attempt_on_start() { - // Given - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofSeconds(1)); - - // When - reconnection.start(); - - // Then - verify(reconnectionSchedule).nextDelay(); - assertThat(reconnection.isRunning()).isTrue(); - verify(onStartCallback).run(); - } - - @Test - public void should_ignore_start_if_already_started() { - // Given - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofSeconds(1)); - reconnection.start(); - verify(reconnectionSchedule).nextDelay(); - verify(onStartCallback).run(); - - // When - reconnection.start(); - - // Then - verifyNoMoreInteractions(reconnectionSchedule, onStartCallback); - } - - @Test - public void should_stop_if_first_attempt_succeeds() { - // Given - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofNanos(1)); - reconnection.start(); - verify(reconnectionSchedule).nextDelay(); - - // When - // the reconnection task is scheduled: - runPendingTasks(); - assertThat(reconnectionTask.callCount()).isEqualTo(1); - // the reconnection task completes: - reconnectionTask.complete(true); - runPendingTasks(); - - // Then - assertThat(reconnection.isRunning()).isFalse(); - verify(onStopCallback).run(); - } - - @Test - public void should_reschedule_if_first_attempt_fails() { - // Given - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofNanos(1)); - reconnection.start(); - verify(reconnectionSchedule).nextDelay(); - - // When - // the reconnection task is scheduled: - runPendingTasks(); - assertThat(reconnectionTask.callCount()).isEqualTo(1); - // the reconnection task completes: - reconnectionTask.complete(false); - runPendingTasks(); - - // Then - // schedule was called again - verify(reconnectionSchedule, times(2)).nextDelay(); - runPendingTasks(); - // task was called again - assertThat(reconnectionTask.callCount()).isEqualTo(2); - // still running - assertThat(reconnection.isRunning()).isTrue(); - - // When - // second attempt completes - reconnectionTask.complete(true); - runPendingTasks(); - - // Then - assertThat(reconnection.isRunning()).isFalse(); - verify(onStopCallback).run(); - } - - @Test - public void should_reconnect_now_if_next_attempt_not_started() { - // Given - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofDays(1)); - reconnection.start(); - verify(reconnectionSchedule).nextDelay(); - - // When - reconnection.reconnectNow(false); - runPendingTasks(); - - // Then - // reconnection task was run immediately - assertThat(reconnectionTask.callCount()).isEqualTo(1); - // if that attempt fails, another reconnection should be scheduled - reconnectionTask.complete(false); - runPendingTasks(); - verify(reconnectionSchedule, times(2)).nextDelay(); - } - - @Test - public void should_reconnect_now_if_stopped_and_forced() { - // Given - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofDays(1)); - assertThat(reconnection.isRunning()).isFalse(); - - // When - reconnection.reconnectNow(true); - runPendingTasks(); - - // Then - // reconnection task was run immediately - assertThat(reconnectionTask.callCount()).isEqualTo(1); - // if that attempt failed, another reconnection was scheduled - reconnectionTask.complete(false); - runPendingTasks(); - verify(reconnectionSchedule).nextDelay(); - } - - @Test - @UseDataProvider(location = TestDataProviders.class, value = "booleans") - public void should_reconnect_now_when_attempt_in_progress(boolean force) { - // Given - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofNanos(1)); - reconnection.start(); - runPendingTasks(); - // the next scheduled attempt has started, but not completed yet - assertThat(reconnectionTask.callCount()).isEqualTo(1); - - // When - reconnection.reconnectNow(force); - runPendingTasks(); - - // Then - // reconnection task should not have been called again - assertThat(reconnectionTask.callCount()).isEqualTo(1); - // should still run until current attempt completes - assertThat(reconnection.isRunning()).isTrue(); - reconnectionTask.complete(true); - runPendingTasks(); - assertThat(reconnection.isRunning()).isFalse(); - } - - @Test - public void should_not_reconnect_now_if_stopped_and_not_forced() { - // Given - assertThat(reconnection.isRunning()).isFalse(); - - // When - reconnection.reconnectNow(false); - runPendingTasks(); - - // Then - assertThat(reconnectionTask.callCount()).isEqualTo(0); - } - - @Test - public void should_stop_between_attempts() { - // Given - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofSeconds(10)); - reconnection.start(); - runPendingTasks(); - verify(reconnectionSchedule).nextDelay(); - - // When - reconnection.stop(); - runPendingTasks(); - - // Then - verify(onStopCallback).run(); - assertThat(reconnection.isRunning()).isFalse(); - } - - @Test - public void should_restart_after_stopped_between_attempts() { - // Given - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofSeconds(10)); - reconnection.start(); - runPendingTasks(); - verify(reconnectionSchedule).nextDelay(); - reconnection.stop(); - runPendingTasks(); - assertThat(reconnection.isRunning()).isFalse(); - - // When - reconnection.start(); - runPendingTasks(); - - // Then - verify(reconnectionSchedule, times(2)).nextDelay(); - assertThat(reconnection.isRunning()).isTrue(); - } - - @Test - @UseDataProvider(location = TestDataProviders.class, value = "booleans") - public void should_stop_while_attempt_in_progress(boolean outcome) { - // Given - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofNanos(1)); - reconnection.start(); - runPendingTasks(); - // the next scheduled attempt has started, but not completed yet - assertThat(reconnectionTask.callCount()).isEqualTo(1); - verify(onStartCallback).run(); - - // When - reconnection.stop(); - runPendingTasks(); - - // Then - // should let the current attempt complete (whatever its outcome), and become stopped only then - assertThat(reconnection.isRunning()).isTrue(); - verifyNoMoreInteractions(onStopCallback); - reconnectionTask.complete(outcome); - runPendingTasks(); - verify(onStopCallback).run(); - assertThat(reconnection.isRunning()).isFalse(); - } - - @Test - public void should_restart_after_stopped_while_attempt_in_progress() { - // Given - when(reconnectionSchedule.nextDelay()).thenReturn(Duration.ofNanos(1)); - reconnection.start(); - runPendingTasks(); - // the next scheduled attempt has started, but not completed yet - assertThat(reconnectionTask.callCount()).isEqualTo(1); - verify(onStartCallback).run(); - // now stop - reconnection.stop(); - runPendingTasks(); - assertThat(reconnection.isRunning()).isTrue(); - - // When - reconnection.start(); - runPendingTasks(); - - // Then - assertThat(reconnection.isRunning()).isTrue(); - // still waiting on the same attempt, should not have called the task again - assertThat(reconnectionTask.callCount()).isEqualTo(1); - // because we were still in progress all the time, to the outside it's as if the stop/restart - // had never happened - verifyNoMoreInteractions(onStartCallback); - verifyNoMoreInteractions(onStopCallback); - - // When - reconnectionTask.complete(true); - runPendingTasks(); - - // Then - assertThat(reconnection.isRunning()).isFalse(); - verify(onStopCallback).run(); - } - - private void runPendingTasks() { - channel.runPendingTasks(); - } - - private static class MockReconnectionTask implements Callable> { - private volatile CompletableFuture nextResult; - private final AtomicInteger callCount = new AtomicInteger(); - - @Override - public CompletionStage call() throws Exception { - assertThat(nextResult == null || nextResult.isDone()).isTrue(); - callCount.incrementAndGet(); - nextResult = new CompletableFuture<>(); - return nextResult; - } - - private void complete(boolean outcome) { - assertThat(nextResult != null || !nextResult.isDone()).isTrue(); - nextResult.complete(outcome); - nextResult = null; - } - - private int callCount() { - return callCount.get(); - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/ReplayingEventFilterTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/ReplayingEventFilterTest.java deleted file mode 100644 index 65a2ee69b76..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/ReplayingEventFilterTest.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.concurrent; - -import static org.assertj.core.api.Assertions.assertThat; - -import java.util.ArrayList; -import java.util.List; -import org.junit.Before; -import org.junit.Test; - -public class ReplayingEventFilterTest { - private ReplayingEventFilter filter; - private List filteredEvents; - - @Before - public void setup() { - filteredEvents = new ArrayList<>(); - filter = new ReplayingEventFilter<>(filteredEvents::add); - } - - @Test - public void should_discard_events_until_started() { - filter.accept(1); - filter.accept(2); - assertThat(filteredEvents).isEmpty(); - } - - @Test - public void should_accumulate_events_when_started() { - filter.accept(1); - filter.accept(2); - filter.start(); - filter.accept(3); - filter.accept(4); - assertThat(filter.recordedEvents()).containsExactly(3, 4); - } - - @Test - public void should_flush_accumulated_events_when_ready() { - filter.accept(1); - filter.accept(2); - filter.start(); - filter.accept(3); - filter.accept(4); - filter.markReady(); - assertThat(filteredEvents).containsExactly(3, 4); - filter.accept(5); - filter.accept(6); - assertThat(filteredEvents).containsExactly(3, 4, 5, 6); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/ScheduledTaskCapturingEventLoop.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/ScheduledTaskCapturingEventLoop.java deleted file mode 100644 index 295fa545c76..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/ScheduledTaskCapturingEventLoop.java +++ /dev/null @@ -1,184 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.concurrent; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; -import static org.mockito.ArgumentMatchers.anyBoolean; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.shaded.guava.common.util.concurrent.Uninterruptibles; -import edu.umd.cs.findbugs.annotations.NonNull; -import io.netty.channel.DefaultEventLoop; -import io.netty.channel.EventLoopGroup; -import io.netty.util.concurrent.ScheduledFuture; -import java.util.concurrent.ArrayBlockingQueue; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.Callable; -import java.util.concurrent.CancellationException; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.FutureTask; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -/** - * Extend Netty's default event loop to capture scheduled tasks instead of running them. The tasks - * can be checked later, and run manually. - * - *

Tasks submitted with {@link #execute(Runnable)} or {@link #submit(Callable)} are still - * executed normally. - * - *

This is used to make unit tests independent of time. - */ -@SuppressWarnings("FunctionalInterfaceClash") // does not matter for test code -public class ScheduledTaskCapturingEventLoop extends DefaultEventLoop { - - private final BlockingQueue> capturedTasks = new ArrayBlockingQueue<>(100); - - public ScheduledTaskCapturingEventLoop(EventLoopGroup parent) { - super(parent); - } - - @NonNull - @Override - public ScheduledFuture schedule(Callable callable, long delay, TimeUnit unit) { - CapturedTask task = new CapturedTask<>(callable, delay, unit); - boolean added = capturedTasks.offer(task); - assertThat(added).isTrue(); - return task.scheduledFuture; - } - - @NonNull - @Override - public ScheduledFuture schedule(Runnable command, long delay, TimeUnit unit) { - return schedule( - () -> { - command.run(); - return null; - }, - delay, - unit); - } - - @NonNull - @Override - public ScheduledFuture scheduleAtFixedRate( - Runnable command, long initialDelay, long period, TimeUnit unit) { - CapturedTask task = - new CapturedTask<>( - () -> { - command.run(); - return null; - }, - initialDelay, - period, - unit); - boolean added = capturedTasks.offer(task); - assertThat(added).isTrue(); - return task.scheduledFuture; - } - - @NonNull - @Override - public ScheduledFuture scheduleWithFixedDelay( - Runnable command, long initialDelay, long delay, TimeUnit unit) { - throw new UnsupportedOperationException("Not supported yet"); - } - - public CapturedTask nextTask() { - try { - return capturedTasks.poll(100, TimeUnit.MILLISECONDS); - } catch (InterruptedException e) { - fail("Unexpected interruption", e); - throw new AssertionError(); - } - } - - /** - * Wait for any pending non-scheduled task (submitted with {@code submit}, {@code execute}, etc.) - * to complete. - */ - public void waitForNonScheduledTasks() { - ScheduledFuture f = super.schedule(() -> null, 5, TimeUnit.NANOSECONDS); - try { - Uninterruptibles.getUninterruptibly(f, 1, TimeUnit.SECONDS); - } catch (ExecutionException e) { - fail("unexpected error", e.getCause()); - } catch (TimeoutException e) { - fail("timed out while waiting for admin tasks to complete", e); - } - } - - public class CapturedTask { - private final FutureTask futureTask; - private final long initialDelay; - private final long period; - private final TimeUnit unit; - - @SuppressWarnings("unchecked") - private final ScheduledFuture scheduledFuture = mock(ScheduledFuture.class); - - CapturedTask(Callable task, long initialDelay, TimeUnit unit) { - this(task, initialDelay, -1, unit); - } - - CapturedTask(Callable task, long initialDelay, long period, TimeUnit unit) { - this.futureTask = new FutureTask<>(task); - this.initialDelay = initialDelay; - this.period = period; - this.unit = unit; - - // If the code under test cancels the scheduled future, cancel our task - when(scheduledFuture.cancel(anyBoolean())) - .thenAnswer(invocation -> futureTask.cancel(invocation.getArgument(0))); - - // Delegate methods of the scheduled future to our task (to be extended to more methods if - // needed) - when(scheduledFuture.isDone()).thenAnswer(invocation -> futureTask.isDone()); - when(scheduledFuture.isCancelled()).thenAnswer(invocation -> futureTask.isCancelled()); - } - - public void run() { - submit(futureTask); - waitForNonScheduledTasks(); - } - - public boolean isCancelled() { - // futureTask.isCancelled() can create timing issues in CI environments, so give the - // cancellation a short time to complete instead: - try { - futureTask.get(3, TimeUnit.SECONDS); - } catch (CancellationException e) { - return true; - } catch (Exception e) { - // ignore - } - return false; - } - - public long getInitialDelay(TimeUnit targetUnit) { - return targetUnit.convert(initialDelay, unit); - } - - /** By convention, non-recurring tasks have a negative period */ - public long getPeriod(TimeUnit targetUnit) { - return targetUnit.convert(period, unit); - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/ScheduledTaskCapturingEventLoopTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/ScheduledTaskCapturingEventLoopTest.java deleted file mode 100644 index cf0314cc335..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/ScheduledTaskCapturingEventLoopTest.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.concurrent; - -import static com.datastax.oss.driver.Assertions.assertThat; - -import com.datastax.oss.driver.internal.core.util.concurrent.ScheduledTaskCapturingEventLoop.CapturedTask; -import io.netty.util.concurrent.ScheduledFuture; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import org.junit.Test; - -public class ScheduledTaskCapturingEventLoopTest { - - @Test - public void should_capture_task_and_let_test_complete_it_manually() { - ScheduledTaskCapturingEventLoop eventLoop = new ScheduledTaskCapturingEventLoop(null); - final AtomicBoolean ran = new AtomicBoolean(); - ScheduledFuture future = eventLoop.schedule(() -> ran.set(true), 1, TimeUnit.NANOSECONDS); - - assertThat(future.isDone()).isFalse(); - assertThat(future.isCancelled()).isFalse(); - assertThat(ran.get()).isFalse(); - - CapturedTask task = eventLoop.nextTask(); - assertThat(task.getInitialDelay(TimeUnit.NANOSECONDS)).isEqualTo(1); - - task.run(); - - assertThat(future.isDone()).isTrue(); - assertThat(future.isCancelled()).isFalse(); - assertThat(ran.get()).isTrue(); - } - - @Test - public void should_let_tested_code_cancel_future() { - ScheduledTaskCapturingEventLoop eventLoop = new ScheduledTaskCapturingEventLoop(null); - final AtomicBoolean ran = new AtomicBoolean(); - ScheduledFuture future = eventLoop.schedule(() -> ran.set(true), 1, TimeUnit.NANOSECONDS); - - assertThat(future.isDone()).isFalse(); - assertThat(future.isCancelled()).isFalse(); - assertThat(ran.get()).isFalse(); - - future.cancel(true); - - assertThat(future.isDone()).isTrue(); - assertThat(future.isCancelled()).isTrue(); - assertThat(ran.get()).isFalse(); - } -} diff --git a/core/src/test/resources/ReloadingKeyManagerFactoryTest/README.md b/core/src/test/resources/ReloadingKeyManagerFactoryTest/README.md deleted file mode 100644 index 9ff9b622e5b..00000000000 --- a/core/src/test/resources/ReloadingKeyManagerFactoryTest/README.md +++ /dev/null @@ -1,39 +0,0 @@ -# How to create cert stores for ReloadingKeyManagerFactoryTest - -Need the following cert stores: -- `server.keystore` -- `client-original.keystore` -- `client-alternate.keystore` -- `server.truststore`: trusts `client-original.keystore` and `client-alternate.keystore` -- `client.truststore`: trusts `server.keystore` - -We shouldn't need any signing requests or chains of trust, since truststores are just including certs directly. - -First create the three keystores: -``` -$ keytool -genkeypair -keyalg RSA -alias server -keystore server.keystore -dname "CN=server" -storepass changeit -keypass changeit -$ keytool -genkeypair -keyalg RSA -alias client-original -keystore client-original.keystore -dname "CN=client-original" -storepass changeit -keypass changeit -$ keytool -genkeypair -keyalg RSA -alias client-alternate -keystore client-alternate.keystore -dname "CN=client-alternate" -storepass changeit -keypass changeit -``` - -Note that we need to use `-keyalg RSA` because keytool's default keyalg is DSA, which TLS 1.3 doesn't support. If DSA is -used, the handshake will fail due to the server not being able to find any authentication schemes compatible with its -x509 certificate ("Unavailable authentication scheme"). - -Then export all the certs: -``` -$ keytool -exportcert -keystore server.keystore -alias server -file server.cert -storepass changeit -$ keytool -exportcert -keystore client-original.keystore -alias client-original -file client-original.cert -storepass changeit -$ keytool -exportcert -keystore client-alternate.keystore -alias client-alternate -file client-alternate.cert -storepass changeit -``` - -Then create the server.truststore that trusts the two client certs: -``` -$ keytool -import -file client-original.cert -alias client-original -keystore server.truststore -storepass changeit -$ keytool -import -file client-alternate.cert -alias client-alternate -keystore server.truststore -storepass changeit -``` - -Then create the client.truststore that trusts the server cert: -``` -$ keytool -import -file server.cert -alias server -keystore client.truststore -storepass changeit -``` diff --git a/core/src/test/resources/ReloadingKeyManagerFactoryTest/certs/client-alternate.keystore b/core/src/test/resources/ReloadingKeyManagerFactoryTest/certs/client-alternate.keystore deleted file mode 100644 index 91cee636a0b..00000000000 Binary files a/core/src/test/resources/ReloadingKeyManagerFactoryTest/certs/client-alternate.keystore and /dev/null differ diff --git a/core/src/test/resources/ReloadingKeyManagerFactoryTest/certs/client-original.keystore b/core/src/test/resources/ReloadingKeyManagerFactoryTest/certs/client-original.keystore deleted file mode 100644 index 74e31f7bc6f..00000000000 Binary files a/core/src/test/resources/ReloadingKeyManagerFactoryTest/certs/client-original.keystore and /dev/null differ diff --git a/core/src/test/resources/ReloadingKeyManagerFactoryTest/certs/client.truststore b/core/src/test/resources/ReloadingKeyManagerFactoryTest/certs/client.truststore deleted file mode 100644 index 3ce9a720dbc..00000000000 Binary files a/core/src/test/resources/ReloadingKeyManagerFactoryTest/certs/client.truststore and /dev/null differ diff --git a/core/src/test/resources/ReloadingKeyManagerFactoryTest/certs/server.keystore b/core/src/test/resources/ReloadingKeyManagerFactoryTest/certs/server.keystore deleted file mode 100644 index 7d279638a34..00000000000 Binary files a/core/src/test/resources/ReloadingKeyManagerFactoryTest/certs/server.keystore and /dev/null differ diff --git a/core/src/test/resources/ReloadingKeyManagerFactoryTest/certs/server.truststore b/core/src/test/resources/ReloadingKeyManagerFactoryTest/certs/server.truststore deleted file mode 100644 index c9b06b5fbe1..00000000000 Binary files a/core/src/test/resources/ReloadingKeyManagerFactoryTest/certs/server.truststore and /dev/null differ diff --git a/core/src/test/resources/application.conf b/core/src/test/resources/application.conf deleted file mode 100644 index efea37cc078..00000000000 --- a/core/src/test/resources/application.conf +++ /dev/null @@ -1,20 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -datastax-java-driver { - basic.request.timeout = ${datastax-java-driver.advanced.connection.init-query-timeout} -} diff --git a/core/src/test/resources/config/cloud/creds.zip b/core/src/test/resources/config/cloud/creds.zip deleted file mode 100644 index 3b5d1cb1cbd..00000000000 Binary files a/core/src/test/resources/config/cloud/creds.zip and /dev/null differ diff --git a/core/src/test/resources/config/cloud/identity.jks b/core/src/test/resources/config/cloud/identity.jks deleted file mode 100644 index bac5bbaa965..00000000000 Binary files a/core/src/test/resources/config/cloud/identity.jks and /dev/null differ diff --git a/core/src/test/resources/config/cloud/metadata.json b/core/src/test/resources/config/cloud/metadata.json deleted file mode 100644 index 35aa26f67f1..00000000000 --- a/core/src/test/resources/config/cloud/metadata.json +++ /dev/null @@ -1 +0,0 @@ -{"region":"local","contact_info":{"type":"sni_proxy","local_dc":"dc1","contact_points":["4ac06655-f861-49f9-881e-3fee22e69b94","2af7c253-3394-4a0d-bfac-f1ad81b5154d","b17b6e2a-3f48-4d6a-81c1-20a0a1f3192a"],"sni_proxy_address":"localhost:30002"}} diff --git a/core/src/test/resources/config/cloud/trustStore.jks b/core/src/test/resources/config/cloud/trustStore.jks deleted file mode 100644 index 8ee03f97da0..00000000000 Binary files a/core/src/test/resources/config/cloud/trustStore.jks and /dev/null differ diff --git a/core/src/test/resources/config/customApplication.conf b/core/src/test/resources/config/customApplication.conf deleted file mode 100644 index c3e3dc7b468..00000000000 --- a/core/src/test/resources/config/customApplication.conf +++ /dev/null @@ -1,23 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -datastax-java-driver { - // Check that references to other options in `reference.conf` are correctly resolved - basic.request.timeout = ${datastax-java-driver.advanced.connection.init-query-timeout} - - advanced.continuous-paging.max-pages = 10 -} diff --git a/core/src/test/resources/config/customApplication.json b/core/src/test/resources/config/customApplication.json deleted file mode 100644 index 4988a72cd9a..00000000000 --- a/core/src/test/resources/config/customApplication.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "datastax-java-driver": { - "basic": { - "request": { - "page-size": "2000" - } - }, - "advanced": { - "continuous-paging": { - "page-size": 2000 - } - } - } -} diff --git a/core/src/test/resources/config/customApplication.properties b/core/src/test/resources/config/customApplication.properties deleted file mode 100644 index 4c1d1ea0647..00000000000 --- a/core/src/test/resources/config/customApplication.properties +++ /dev/null @@ -1,20 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -datastax-java-driver.basic.request.consistency=ONE -datastax-java-driver.advanced.continuous-paging.max-enqueued-pages = 8 \ No newline at end of file diff --git a/core/src/test/resources/insights/duplicate-dependencies.txt b/core/src/test/resources/insights/duplicate-dependencies.txt deleted file mode 100644 index a808dff3f57..00000000000 --- a/core/src/test/resources/insights/duplicate-dependencies.txt +++ /dev/null @@ -1,2 +0,0 @@ -io.netty:netty-handler:jar:4.0.56.Final:compile -io.netty:netty-handler:jar:4.1.2.Final:compile \ No newline at end of file diff --git a/core/src/test/resources/insights/malformed-pom.properties b/core/src/test/resources/insights/malformed-pom.properties deleted file mode 100644 index 0a503062fbd..00000000000 --- a/core/src/test/resources/insights/malformed-pom.properties +++ /dev/null @@ -1,22 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -#Created by Apache Maven 3.5.0 -#no version -groupId=io.netty -artifactId=netty-handler \ No newline at end of file diff --git a/core/src/test/resources/insights/netty-dependency-optional.txt b/core/src/test/resources/insights/netty-dependency-optional.txt deleted file mode 100644 index 2bd0cd21a0c..00000000000 --- a/core/src/test/resources/insights/netty-dependency-optional.txt +++ /dev/null @@ -1 +0,0 @@ -io.netty:netty-handler:jar:4.0.0.Final:compile (optional) \ No newline at end of file diff --git a/core/src/test/resources/insights/netty-dependency.txt b/core/src/test/resources/insights/netty-dependency.txt deleted file mode 100644 index 69c350c30e8..00000000000 --- a/core/src/test/resources/insights/netty-dependency.txt +++ /dev/null @@ -1 +0,0 @@ -io.netty:netty-handler:jar:4.0.0.Final:runtime \ No newline at end of file diff --git a/core/src/test/resources/insights/ordered-dependencies.txt b/core/src/test/resources/insights/ordered-dependencies.txt deleted file mode 100644 index a5518f89736..00000000000 --- a/core/src/test/resources/insights/ordered-dependencies.txt +++ /dev/null @@ -1,3 +0,0 @@ -b-org.com:art1:jar:1.0:compile -a-org.com:art1:jar:2.0:compile -c-org.com:art1:jar:3.0:compile \ No newline at end of file diff --git a/core/src/test/resources/insights/pom.properties b/core/src/test/resources/insights/pom.properties deleted file mode 100644 index e68a31c8fc7..00000000000 --- a/core/src/test/resources/insights/pom.properties +++ /dev/null @@ -1,23 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -#Created by Apache Maven 3.5.0 -version=4.0.56.Final -groupId=io.netty -artifactId=netty-handler - diff --git a/core/src/test/resources/insights/test-dependencies.txt b/core/src/test/resources/insights/test-dependencies.txt deleted file mode 100644 index e9186a35e6b..00000000000 --- a/core/src/test/resources/insights/test-dependencies.txt +++ /dev/null @@ -1,31 +0,0 @@ - -The following files have been resolved: - com.github.jnr:jffi:jar:1.2.16:compile - org.ow2.asm:asm:jar:5.0.3:compile - com.github.jnr:jnr-constants:jar:0.9.9:compile - com.esri.geometry:esri-geometry-api:jar:1.2.1:compile - com.google.guava:guava:jar:19.0:compile - com.fasterxml.jackson.core:jackson-annotations:jar:2.8.11:compile - com.github.jnr:jnr-posix:jar:3.0.44:compile - org.codehaus.jackson:jackson-core-asl:jar:1.9.12:compile - io.netty:netty-handler:jar:4.0.56.Final:compile - org.ow2.asm:asm-commons:jar:5.0.3:compile - org.ow2.asm:asm-util:jar:5.0.3:compile - org.xerial.snappy:snappy-java:jar:1.1.2.6:compile (optional) - io.netty:netty-buffer:jar:4.0.56.Final:compile - com.github.jnr:jnr-ffi:jar:2.1.7:compile - com.fasterxml.jackson.core:jackson-core:jar:2.8.11:compile - org.hdrhistogram:HdrHistogram:jar:2.1.10:compile (optional) - org.ow2.asm:asm-tree:jar:5.0.3:compile - at.yawk.lz4:lz4-java:jar:1.10.1:compile (optional) - io.netty:netty-transport:jar:4.0.56.Final:compile - io.dropwizard.metrics:metrics-core:jar:3.2.2:compile - io.netty:netty-common:jar:4.0.56.Final:compile - com.fasterxml.jackson.core:jackson-databind:jar:2.7.9.3:compile - org.slf4j:slf4j-api:jar:1.7.25:compile - io.netty:netty-transport-native-epoll:jar:4.0.56.Final:compile (optional) - org.ow2.asm:asm-analysis:jar:5.0.3:compile - com.github.jnr:jnr-x86asm:jar:1.0.2:compile - io.netty:netty-codec:jar:4.0.56.Final:compile - org.json:json:jar:20090211:compile - com.github.jnr:jffi:jar:native:1.2.16:runtime \ No newline at end of file diff --git a/core/src/test/resources/logback-test.xml b/core/src/test/resources/logback-test.xml deleted file mode 100644 index 1424331a31d..00000000000 --- a/core/src/test/resources/logback-test.xml +++ /dev/null @@ -1,32 +0,0 @@ - - - - - - %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n - - - - - - - - diff --git a/core/src/test/resources/project.properties b/core/src/test/resources/project.properties deleted file mode 100644 index 66eab90b6e4..00000000000 --- a/core/src/test/resources/project.properties +++ /dev/null @@ -1,19 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -project.basedir=${basedir} \ No newline at end of file diff --git a/distribution-source/pom.xml b/distribution-source/pom.xml deleted file mode 100644 index 4c1f11e53a8..00000000000 --- a/distribution-source/pom.xml +++ /dev/null @@ -1,125 +0,0 @@ - - - - 4.0.0 - - org.apache.cassandra - java-driver-parent - 4.19.3-SNAPSHOT - - java-driver-distribution-source - pom - Apache Cassandra Java Driver - source distribution - - apache-cassandra-java-driver-${project.version}-source - - - maven-jar-plugin - - - - default-jar - none - - - - - maven-source-plugin - - true - - - - maven-install-plugin - - true - - - - maven-deploy-plugin - - true - - - - org.revapi - revapi-maven-plugin - - true - - - - org.sonatype.plugins - nexus-staging-maven-plugin - - true - - - - - - - release - - - - maven-assembly-plugin - - - assemble-source-tarball - package - - single - - - - - false - - src/assembly/source-tarball.xml - - posix - - - - net.nicoulaj.maven.plugins - checksum-maven-plugin - 1.7 - - - - artifacts - - - - - true - - sha256 - sha512 - - - - - - - - diff --git a/distribution-source/src/assembly/source-tarball.xml b/distribution-source/src/assembly/source-tarball.xml deleted file mode 100644 index b3e2d0f463a..00000000000 --- a/distribution-source/src/assembly/source-tarball.xml +++ /dev/null @@ -1,43 +0,0 @@ - - - - source-tarball - - tar.gz - - - - .. - . - true - - - **/*.iml - **/.classpath - **/.project - **/.java-version - **/.flattened-pom.xml - **/dependency-reduced-pom.xml - **/${project.build.directory}/** - - - - diff --git a/distribution-tests/pom.xml b/distribution-tests/pom.xml deleted file mode 100644 index 9cef313f8a5..00000000000 --- a/distribution-tests/pom.xml +++ /dev/null @@ -1,122 +0,0 @@ - - - - 4.0.0 - - org.apache.cassandra - java-driver-parent - 4.19.3-SNAPSHOT - - java-driver-distribution-tests - Apache Cassandra Java Driver - distribution tests - - - - ${project.groupId} - java-driver-bom - ${project.version} - pom - import - - - - - - org.apache.cassandra - java-driver-test-infra - test - - - org.apache.cassandra - java-driver-query-builder - test - - - org.apache.cassandra - java-driver-mapper-processor - test - - - org.apache.cassandra - java-driver-mapper-runtime - test - - - org.apache.cassandra - java-driver-core - test - - - org.apache.cassandra - java-driver-metrics-micrometer - test - - - org.apache.cassandra - java-driver-metrics-microprofile - test - - - junit - junit - test - - - - - - org.apache.maven.plugins - maven-surefire-plugin - - ${testing.jvm}/bin/java - ${mockitoopens.argline} - 1 - - - - org.revapi - revapi-maven-plugin - - true - - - - maven-install-plugin - - true - - - - maven-deploy-plugin - - true - - - - org.sonatype.plugins - nexus-staging-maven-plugin - - true - - - - - diff --git a/distribution-tests/src/test/java/com/datastax/oss/driver/api/core/DriverDependencyTest.java b/distribution-tests/src/test/java/com/datastax/oss/driver/api/core/DriverDependencyTest.java deleted file mode 100644 index 16952e3d771..00000000000 --- a/distribution-tests/src/test/java/com/datastax/oss/driver/api/core/DriverDependencyTest.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.mapper.MapperBuilder; -import com.datastax.oss.driver.api.querybuilder.QueryBuilder; -import com.datastax.oss.driver.api.testinfra.CassandraResourceRule; -import com.datastax.oss.driver.internal.core.util.Reflection; -import com.datastax.oss.driver.internal.mapper.processor.MapperProcessor; -import com.datastax.oss.driver.internal.metrics.micrometer.MicrometerMetricsFactory; -import com.datastax.oss.driver.internal.metrics.microprofile.MicroProfileMetricsFactory; -import org.junit.Test; - -public class DriverDependencyTest { - @Test - public void should_include_core_jar() { - assertThat(Reflection.loadClass(null, "com.datastax.oss.driver.api.core.session.Session")) - .isEqualTo(Session.class); - } - - @Test - public void should_include_query_builder_jar() { - assertThat(Reflection.loadClass(null, "com.datastax.oss.driver.api.querybuilder.QueryBuilder")) - .isEqualTo(QueryBuilder.class); - } - - @Test - public void should_include_mapper_processor_jar() { - assertThat( - Reflection.loadClass( - null, "com.datastax.oss.driver.internal.mapper.processor.MapperProcessor")) - .isEqualTo(MapperProcessor.class); - } - - @Test - public void should_include_mapper_runtime_jar() { - assertThat(Reflection.loadClass(null, "com.datastax.oss.driver.api.mapper.MapperBuilder")) - .isEqualTo(MapperBuilder.class); - } - - @Test - public void should_include_metrics_micrometer_jar() { - assertThat( - Reflection.loadClass( - null, - "com.datastax.oss.driver.internal.metrics.micrometer.MicrometerMetricsFactory")) - .isEqualTo(MicrometerMetricsFactory.class); - } - - @Test - public void should_include_metrics_microprofile_jar() { - assertThat( - Reflection.loadClass( - null, - "com.datastax.oss.driver.internal.metrics.microprofile.MicroProfileMetricsFactory")) - .isEqualTo(MicroProfileMetricsFactory.class); - } - - @Test - public void should_include_test_infra_jar() { - assertThat( - Reflection.loadClass( - null, "com.datastax.oss.driver.api.testinfra.CassandraResourceRule")) - .isEqualTo(CassandraResourceRule.class); - } -} diff --git a/distribution-tests/src/test/java/com/datastax/oss/driver/api/core/OptionalDependencyTest.java b/distribution-tests/src/test/java/com/datastax/oss/driver/api/core/OptionalDependencyTest.java deleted file mode 100644 index 28626413487..00000000000 --- a/distribution-tests/src/test/java/com/datastax/oss/driver/api/core/OptionalDependencyTest.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.internal.core.util.Dependency; -import com.datastax.oss.driver.internal.core.util.Reflection; -import org.junit.Test; - -public class OptionalDependencyTest { - @Test - public void should_not_include_snappy_jar() { - Dependency.SNAPPY - .classes() - .forEach(clazz -> assertThat(Reflection.loadClass(null, clazz)).isNull()); - } - - @Test - public void should_not_include_l4z_jar() { - Dependency.LZ4 - .classes() - .forEach(clazz -> assertThat(Reflection.loadClass(null, clazz)).isNull()); - } - - @Test - public void should_not_include_esri_jar() { - Dependency.ESRI - .classes() - .forEach(clazz -> assertThat(Reflection.loadClass(null, clazz)).isNull()); - } - - @Test - public void should_not_include_tinkerpop_jar() { - Dependency.TINKERPOP - .classes() - .forEach(clazz -> assertThat(Reflection.loadClass(null, clazz)).isNull()); - } -} diff --git a/distribution-tests/src/test/java/com/datastax/oss/driver/api/core/ProvidedDependencyTest.java b/distribution-tests/src/test/java/com/datastax/oss/driver/api/core/ProvidedDependencyTest.java deleted file mode 100644 index 1070bbc2fb1..00000000000 --- a/distribution-tests/src/test/java/com/datastax/oss/driver/api/core/ProvidedDependencyTest.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.internal.core.util.Reflection; -import org.junit.Test; - -public class ProvidedDependencyTest { - @Test - public void should_not_include_graal_sdk_jar() { - assertThat(Reflection.loadClass(null, "org.graalvm.nativeimage.VMRuntime")).isNull(); - } - - @Test - public void should_not_include_spotbugs_annotations_jar() { - assertThat(Reflection.loadClass(null, "edu.umd.cs.findbugs.annotations.NonNull")).isNull(); - } - - @Test - public void should_not_include_jicp_annotations_jar() { - assertThat(Reflection.loadClass(null, "net.jcip.annotations.ThreadSafe")).isNull(); - } - - @Test - public void should_not_include_blockhound_jar() { - assertThat(Reflection.loadClass(null, "reactor.blockhound.BlockHoundRuntime")).isNull(); - } -} diff --git a/distribution/pom.xml b/distribution/pom.xml deleted file mode 100644 index 20b9afc1bcd..00000000000 --- a/distribution/pom.xml +++ /dev/null @@ -1,163 +0,0 @@ - - - - 4.0.0 - - org.apache.cassandra - java-driver-parent - 4.19.3-SNAPSHOT - - java-driver-distribution - - jar - Apache Cassandra Java Driver - binary distribution - - - - - ${project.groupId} - java-driver-bom - ${project.version} - pom - import - - - - - - ${project.groupId} - java-driver-core - ${project.version} - - - ${project.groupId} - java-driver-query-builder - ${project.version} - - - ${project.groupId} - java-driver-mapper-runtime - ${project.version} - - - ${project.groupId} - java-driver-mapper-processor - ${project.version} - - - - apache-cassandra-java-driver-${project.version} - - - maven-jar-plugin - - - - default-jar - none - - - - - maven-source-plugin - - true - - - - maven-install-plugin - - true - - - - maven-deploy-plugin - - true - - - - org.revapi - revapi-maven-plugin - - true - - - - org.sonatype.plugins - nexus-staging-maven-plugin - - true - - - - - - - release - - - - maven-assembly-plugin - - - assemble-binary-tarball - package - - single - - - - - false - - src/assembly/binary-tarball.xml - - posix - - - - net.nicoulaj.maven.plugins - checksum-maven-plugin - 1.7 - - - - artifacts - - - - - true - - sha256 - sha512 - - - - - - - - diff --git a/distribution/src/assembly/binary-tarball.xml b/distribution/src/assembly/binary-tarball.xml deleted file mode 100644 index b6294a25340..00000000000 --- a/distribution/src/assembly/binary-tarball.xml +++ /dev/null @@ -1,176 +0,0 @@ - - - - binary-tarball - - tar.gz - - true - - - - true - - org.apache.cassandra:java-driver-core - - - lib/core - false - - - lib/core - - - org.apache.cassandra:java-driver-query-builder - org.apache.cassandra:java-driver-mapper-runtime - org.apache.cassandra:java-driver-mapper-processor - - true - - - - - - - true - - org.apache.cassandra:java-driver-query-builder - - - lib/query-builder - false - - - - org.apache.cassandra:java-driver-core - org.apache.cassandra:java-driver-mapper-runtime - org.apache.cassandra:java-driver-mapper-processor - org.apache.cassandra:java-driver-guava-shaded - - com.github.stephenc.jcip:jcip-annotations - com.github.spotbugs:spotbugs-annotations - - true - - - - - - - true - - org.apache.cassandra:java-driver-mapper-runtime - - - lib/mapper-runtime - false - - - - org.apache.cassandra:java-driver-core - org.apache.cassandra:java-driver-query-builder - org.apache.cassandra:java-driver-mapper-processor - org.apache.cassandra:java-driver-guava-shaded - - com.github.stephenc.jcip:jcip-annotations - com.github.spotbugs:spotbugs-annotations - - true - - - - - - - true - - org.apache.cassandra:java-driver-mapper-processor - - - lib/mapper-processor - false - - - - org.apache.cassandra:java-driver-core - org.apache.cassandra:java-driver-query-builder - org.apache.cassandra:java-driver-mapper-runtime - org.apache.cassandra:java-driver-guava-shaded - - com.github.stephenc.jcip:jcip-annotations - com.github.spotbugs:spotbugs-annotations - - true - - - - - - - true - - org.apache.cassandra:java-driver-core - org.apache.cassandra:java-driver-query-builder - org.apache.cassandra:java-driver-mapper-runtime - org.apache.cassandra:java-driver-mapper-processor - - - false - sources - ${module.artifactId}-${module.version}-src.zip - src - - * - - - - - - - target/apidocs - apidocs - - - .. - . - - README* - LICENSE_binary - NOTICE_binary.txt - - - - ../changelog - - - ../faq - - - ../manual - - - ../upgrade_guide - - - diff --git a/docs.yaml b/docs.yaml index 7c679a0f47e..ea99505fe26 100644 --- a/docs.yaml +++ b/docs.yaml @@ -1,24 +1,8 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - title: Java Driver summary: Java Driver for Apache Cassandra® -homepage: http://docs.datastax.com/en/developer/java-driver +homepage: http://docs.datastax.com/en/developer/java-driver/latest theme: datastax +swiftype_drivers: javadrivers sections: - title: Manual prefix: /manual @@ -30,6 +14,14 @@ sections: sources: - type: rst files: 'manual/core/configuration/reference/*.rst' + # The 'manual' section was called 'features' in older releases. Leave both + # definitions and Documentor will pick up whichever exists and ignore the + # other. + - title: Features + prefix: /features + sources: + - type: markdown + files: 'features/**/*.md' - title: Changelog prefix: /changelog sources: @@ -49,12 +41,152 @@ links: - title: Code href: https://github.com/datastax/java-driver/ - title: Docs - href: http://docs.datastax.com/en/developer/java-driver + href: http://docs.datastax.com/en/developer/java-driver/ - title: Issues href: https://datastax-oss.atlassian.net/browse/JAVA/ - title: Mailing List href: https://groups.google.com/a/lists.datastax.com/forum/#!forum/java-driver-user - title: Releases - href: https://github.com/datastax/java-driver/releases + href: https://downloads.datastax.com/#datastax-drivers api_docs: + '4.17': 'http://docs.datastax.com/en/drivers/java/4.17' + '4.16': 'http://docs.datastax.com/en/drivers/java/4.16' + '4.15': 'http://docs.datastax.com/en/drivers/java/4.15' + '4.14': 'http://docs.datastax.com/en/drivers/java/4.14' + '4.13': 'http://docs.datastax.com/en/drivers/java/4.13' + '4.12': 'http://docs.datastax.com/en/drivers/java/4.12' + '4.11': 'http://docs.datastax.com/en/drivers/java/4.11' + '4.10': 'http://docs.datastax.com/en/drivers/java/4.10' + 4.9: http://docs.datastax.com/en/drivers/java/4.9 + 4.8: http://docs.datastax.com/en/drivers/java/4.8 + 4.7: http://docs.datastax.com/en/drivers/java/4.7 + 4.6: http://docs.datastax.com/en/drivers/java/4.6 + 4.5: http://docs.datastax.com/en/drivers/java/4.5 + 4.4: http://docs.datastax.com/en/drivers/java/4.4 + 4.3: http://docs.datastax.com/en/drivers/java/4.3 + 4.2: http://docs.datastax.com/en/drivers/java/4.2 4.0: http://docs.datastax.com/en/drivers/java/4.0 + '3.11': 'http://docs.datastax.com/en/drivers/java/3.11' + '3.10': 'http://docs.datastax.com/en/drivers/java/3.10' + 3.9: http://docs.datastax.com/en/drivers/java/3.9 + 3.8: http://docs.datastax.com/en/drivers/java/3.8 + 3.7: http://docs.datastax.com/en/drivers/java/3.7 + 3.6: http://docs.datastax.com/en/drivers/java/3.6 + 3.5: http://docs.datastax.com/en/drivers/java/3.5 + 3.4: http://docs.datastax.com/en/drivers/java/3.4 + 3.3: http://docs.datastax.com/en/drivers/java/3.3 + 3.2: http://docs.datastax.com/en/drivers/java/3.2 + 3.1: http://docs.datastax.com/en/drivers/java/3.1 + 3.0: http://docs.datastax.com/en/drivers/java/3.0 + 2.1: http://docs.datastax.com/en/drivers/java/2.1 + 2.0: http://docs.datastax.com/en/drivers/java/2.0 +versions: + - name: '4.17' + ref: '4.17.0' + - name: '4.16' + ref: '4.16.0' + - name: '4.15' + ref: '4.15.0' + - name: '4.14' + ref: '4.14.1' + - name: '4.13' + ref: '2a1e37a21' + - name: '4.12' + ref: '4.12.1' + - name: '4.11' + ref: '4.11.3' + - name: '4.10' + ref: '346db0460' + - name: '4.9' + ref: '4.9.0' + - name: '4.8' + ref: '4.8.0_fixes' + - name: '4.7' + ref: '4.7.0' + - name: '4.6' + ref: '4.6.1_fixes' + - name: '4.5' + ref: '4.5.0_fixes' + - name: '4.4' + ref: '4.4.0_fixes' + - name: '4.3' + ref: '4.3.1' + - name: '4.2' + ref: '4.2.0' + - name: '4.0' + ref: '4.0.1' + - name: '3.11' + ref: '3.11.5' + - name: '3.10' + ref: '3.10.2_fixes' + - name: '3.9' + ref: '3.9.0' + - name: '3.8' + ref: 'c6de8beb5' + - name: '3.7' + ref: '3.7.2' + - name: '3.6' + ref: 'b10ea1828' + - name: '3.5' + ref: '3.5.x' + - name: '3.4' + ref: '3.4.x' + - name: '3.3' + ref: '3.3.x' + - name: '3.2' + ref: '3.2.x' + - name: '3.1' + ref: '3.1.x' + - name: '3.0' + ref: '3.0.x' + - name: '2.1' + ref: '2.1' + - name: '2.0' + ref: '2.0' +checks: + external_links: + exclude: + - 'https://twitter.com/dsJavaDriver' + - 'https://twitter.com/datastaxeng' + - 'https://twitter.com/datastax' + - 'https://projectreactor.io' + - 'https://docs.datastax.com/en/drivers/java/4.[0-9]+/com/datastax/oss/driver/internal/' + - 'http://www.planetcassandra.org/blog/user-defined-functions-in-cassandra-3-0/' + - 'http://www.planetcassandra.org/making-the-change-from-thrift-to-cql/' + - 'https://academy.datastax.com/slack' + - 'https://community.datastax.com/index.html' + - 'https://micrometer.io/docs' + - 'http://datastax.github.io/java-driver/features/shaded_jar/' + internal_links: + exclude: + - 'netty_pipeline/' + - '../core/' + - '%5Bguava%20eviction%5D' + +rewrites: + - search: 'https://(helpdocs|docs).datastax.com/(en/astra/)?(aws|gcp)/(dscloud/apollo|doc/dscloud/astra)/dscloudGettingStarted.html' + replace: 'https://docs.datastax.com/en/astra-serverless/docs/getting-started/getting-started.html' + - search: 'https://(helpdocs|docs).datastax.com/(en/astra/)?(aws|gcp)/(dscloud/apollo|doc/dscloud/astra)/dscloudObtainingCredentials.html' + replace: 'https://docs.datastax.com/en/astra-serverless/docs/connect/secure-connect-bundle.html' + - search: 'https://(helpdocs|docs).datastax.com/(en/astra/)?(aws|gcp)/(dscloud/apollo|doc/dscloud/astra)/dscloudShareClusterDetails.html' + replace: 'https://docs.datastax.com/en/astra-serverless/docs/manage/org/manage-tokens.html' + - search: 'http://downloads.datastax.com/java-driver/' + replace: 'https://downloads.datastax.com/#datastax-drivers' + - search: 'https://docs.datastax.com/en/drivers/java/(4.[0-9]+)/com/datastax/oss/driver/api/mapper/EntityHelper.html' + replace: 'https://docs.datastax.com/en/drivers/java/\1/com/datastax/oss/driver/api/mapper/entity/EntityHelper.html' + - search: '(http|https)://www.datastax.com/drivers/java/' + replace: 'https://docs.datastax.com/en/drivers/java/' + - search: 'http://docs.datastax.com/en/drivers/java' + replace: 'https://docs.datastax.com/en/drivers/java' + - search: 'https://docs.astra.datastax.com/docs/creating-your-astra-database' + replace: 'https://docs.datastax.com/en/astra-serverless/docs/getting-started/create-db-choices.html' + - search: 'https://docs.astra.datastax.com/docs/obtaining-database-credentials' + replace: 'https://docs.datastax.com/en/astra-serverless/docs/connect/secure-connect-bundle.html' + - search: 'https://docs.datastax.com/en/drivers/java/(4.[0-9]+)/com/datastax/oss/driver/api/core/cql/SyncCqlSession.html%60' + replace: 'https://docs.datastax.com/en/drivers/java/\1/com/datastax/oss/driver/api/core/cql/SyncCqlSession.html' + - search: 'https://code.google.com/p/snappy/' + replace: 'https://google.github.io/snappy/' + - search: 'https://code.google.com/p/guava-libraries/wiki/ListenableFutureExplained' + replace: 'https://github.com/google/guava/wiki/ListenableFutureExplained' + - search: 'https://community.datastax.com/index.html' + replace: 'https://www.datastax.com/workshops' diff --git a/examples/README.md b/examples/README.md deleted file mode 100644 index 9d2210d8a4a..00000000000 --- a/examples/README.md +++ /dev/null @@ -1,28 +0,0 @@ - - -# Java Driver for Apache Cassandra(R) - Examples - -This module contains examples of how to use the Java Driver for -Apache Cassandra(R). - -## Usage - -Unless otherwise stated, all examples assume that you have a single-node Cassandra 3.0 cluster -listening on localhost:9042. diff --git a/examples/pom.xml b/examples/pom.xml deleted file mode 100644 index 12e42dfdf53..00000000000 --- a/examples/pom.xml +++ /dev/null @@ -1,217 +0,0 @@ - - - - 4.0.0 - - java-driver-parent - org.apache.cassandra - 4.19.3-SNAPSHOT - - java-driver-examples - Apache Cassandra Java Driver - examples. - A collection of examples to demonstrate Java Driver for Apache Cassandra(R). - - - - ${project.groupId} - java-driver-bom - ${project.version} - pom - import - - - - - - - ${project.groupId} - java-driver-core - - - ${project.groupId} - java-driver-query-builder - - - ${project.groupId} - java-driver-mapper-runtime - - - - com.fasterxml.jackson.core - jackson-databind - - - com.fasterxml.jackson.jaxrs - jackson-jaxrs-base - ${jackson.version} - true - - - com.fasterxml.jackson.jaxrs - jackson-jaxrs-json-provider - ${jackson.version} - true - - - - javax.json - javax.json-api - true - - - org.glassfish - javax.json - true - runtime - - - - javax.ws.rs - javax.ws.rs-api - true - - - - org.glassfish.jersey.core - jersey-server - true - - - org.glassfish.jersey.media - jersey-media-json-jackson - true - - - org.glassfish.jersey.containers - jersey-container-jdk-http - true - - - - org.glassfish.hk2 - hk2-api - true - - - org.glassfish.jersey.inject - jersey-hk2 - true - - - - javax.inject - javax.inject - true - - - javax.annotation - javax.annotation-api - true - - - - ch.qos.logback - logback-classic - runtime - - - at.favre.lib - bcrypt - 0.8.0 - - - - io.projectreactor - reactor-core - - - com.github.spotbugs - spotbugs-annotations - provided - - - - - - maven-compiler-plugin - - 1.8 - 1.8 - - - org.apache.cassandra - java-driver-mapper-processor - ${project.version} - - - - - - maven-jar-plugin - - - - com.datastax.oss.driver.examples - - - - - - org.revapi - revapi-maven-plugin - - true - - - - maven-javadoc-plugin - - true - - - - maven-gpg-plugin - - true - - - - maven-install-plugin - - true - - - - maven-deploy-plugin - - true - - - - org.sonatype.plugins - nexus-staging-maven-plugin - - true - - - - - diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/astra/AstraReadCassandraVersion.java b/examples/src/main/java/com/datastax/oss/driver/examples/astra/AstraReadCassandraVersion.java deleted file mode 100644 index ff8dc6d96f4..00000000000 --- a/examples/src/main/java/com/datastax/oss/driver/examples/astra/AstraReadCassandraVersion.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.examples.astra; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import java.nio.file.Paths; - -/** - * Connects to a DataStax Astra cluster and extracts basic information from it. - * - *

Preconditions: - * - *

    - *
  • A DataStax Astra cluster is running and accessible. - *
  • A DataStax Astra secure connect bundle for the running cluster. - *
- * - *

Side effects: none. - * - * @see - * Creating an Astra Database (GCP) - * @see - * Providing access to Astra databases (GCP) - * @see - * Obtaining Astra secure connect bundle (GCP) - * @see Java Driver online - * manual - */ -public class AstraReadCassandraVersion { - - public static void main(String[] args) { - - // The Session is what you use to execute queries. It is thread-safe and should be - // reused. - try (CqlSession session = - CqlSession.builder() - // Change the path here to the secure connect bundle location (see javadocs above) - .withCloudSecureConnectBundle(Paths.get("/path/to/secure-connect-database_name.zip")) - // Change the user_name and password here for the Astra instance - .withAuthCredentials("user_name", "fakePasswordForTests") - // Uncomment the next line to use a specific keyspace - // .withKeyspace("keyspace_name") - .build()) { - - // We use execute to send a query to Cassandra. This returns a ResultSet, which - // is essentially a collection of Row objects. - ResultSet rs = session.execute("select release_version from system.local"); - // Extract the first row (which is the only one in this case). - Row row = rs.one(); - - // Extract the value of the first (and only) column from the row. - assert row != null; - String releaseVersion = row.getString("release_version"); - System.out.printf("Cassandra version is: %s%n", releaseVersion); - } - // The try-with-resources block automatically close the session after we’re done with it. - // This step is important because it frees underlying resources (TCP connections, thread - // pools...). In a real application, you would typically do this at shutdown - // (for example, when undeploying your webapp). - } -} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/basic/CreateAndPopulateKeyspace.java b/examples/src/main/java/com/datastax/oss/driver/examples/basic/CreateAndPopulateKeyspace.java deleted file mode 100644 index 3dcfa702041..00000000000 --- a/examples/src/main/java/com/datastax/oss/driver/examples/basic/CreateAndPopulateKeyspace.java +++ /dev/null @@ -1,155 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.examples.basic; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; - -/** - * Creates a keyspace and tables, and loads some data into them. - * - *

Preconditions: - * - *

    - *
  • An Apache Cassandra(R) cluster is running and accessible through the contacts points - * identified by basic.contact-points (see application.conf). - *
- * - *

Side effects: - * - *

    - *
  • creates a new keyspace "simplex" in the session. If a keyspace with this name already - * exists, it will be reused; - *
  • creates two tables "simplex.songs" and "simplex.playlists". If they exist already, they - * will be reused; - *
  • inserts a row in each table. - *
- * - * @see Java Driver online - * manual - */ -@SuppressWarnings("CatchAndPrintStackTrace") -public class CreateAndPopulateKeyspace { - - public static void main(String[] args) { - - CreateAndPopulateKeyspace client = new CreateAndPopulateKeyspace(); - - try { - client.connect(); - client.createSchema(); - client.loadData(); - client.querySchema(); - - } catch (Exception ex) { - ex.printStackTrace(); - } finally { - client.close(); - } - } - - private CqlSession session; - - /** Initiates a connection to the session specified by the application.conf. */ - public void connect() { - - session = CqlSession.builder().build(); - - System.out.printf("Connected session: %s%n", session.getName()); - } - - /** Creates the schema (keyspace) and tables for this example. */ - public void createSchema() { - - session.execute( - "CREATE KEYSPACE IF NOT EXISTS simplex WITH replication " - + "= {'class':'SimpleStrategy', 'replication_factor':1};"); - - session.execute( - "CREATE TABLE IF NOT EXISTS simplex.songs (" - + "id uuid PRIMARY KEY," - + "title text," - + "album text," - + "artist text," - + "tags set," - + "data blob" - + ");"); - - session.execute( - "CREATE TABLE IF NOT EXISTS simplex.playlists (" - + "id uuid," - + "title text," - + "album text, " - + "artist text," - + "song_id uuid," - + "PRIMARY KEY (id, title, album, artist)" - + ");"); - } - - /** Inserts data into the tables. */ - public void loadData() { - - session.execute( - "INSERT INTO simplex.songs (id, title, album, artist, tags) " - + "VALUES (" - + "756716f7-2e54-4715-9f00-91dcbea6cf50," - + "'La Petite Tonkinoise'," - + "'Bye Bye Blackbird'," - + "'Joséphine Baker'," - + "{'jazz', '2013'})" - + ";"); - - session.execute( - "INSERT INTO simplex.playlists (id, song_id, title, album, artist) " - + "VALUES (" - + "2cc9ccb7-6221-4ccb-8387-f22b6a1b354d," - + "756716f7-2e54-4715-9f00-91dcbea6cf50," - + "'La Petite Tonkinoise'," - + "'Bye Bye Blackbird'," - + "'Joséphine Baker'" - + ");"); - } - - /** Queries and displays data. */ - public void querySchema() { - - ResultSet results = - session.execute( - "SELECT * FROM simplex.playlists " - + "WHERE id = 2cc9ccb7-6221-4ccb-8387-f22b6a1b354d;"); - - System.out.printf("%-30s\t%-20s\t%-20s%n", "title", "album", "artist"); - System.out.println( - "-------------------------------+-----------------------+--------------------"); - - for (Row row : results) { - - System.out.printf( - "%-30s\t%-20s\t%-20s%n", - row.getString("title"), row.getString("album"), row.getString("artist")); - } - } - - /** Closes the session. */ - public void close() { - if (session != null) { - session.close(); - } - } -} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/basic/ReadCassandraVersion.java b/examples/src/main/java/com/datastax/oss/driver/examples/basic/ReadCassandraVersion.java deleted file mode 100644 index 63804ebfece..00000000000 --- a/examples/src/main/java/com/datastax/oss/driver/examples/basic/ReadCassandraVersion.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.examples.basic; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; - -/** - * Connects to a Cassandra cluster and extracts basic information from it. - * - *

Preconditions: - * - *

    - *
  • An Apache Cassandra(R) cluster is running and accessible through the contacts points - * identified by basic.contact-points (see application.conf). - *
- * - *

Side effects: none. - * - * @see Java Driver online - * manual - */ -public class ReadCassandraVersion { - - public static void main(String[] args) { - - // The Session is what you use to execute queries. It is thread-safe and should be - // reused. - try (CqlSession session = CqlSession.builder().build()) { - // We use execute to send a query to Cassandra. This returns a ResultSet, which - // is essentially a collection of Row objects. - ResultSet rs = session.execute("select release_version from system.local"); - // Extract the first row (which is the only one in this case). - Row row = rs.one(); - - // Extract the value of the first (and only) column from the row. - assert row != null; - String releaseVersion = row.getString("release_version"); - System.out.printf("Cassandra version is: %s%n", releaseVersion); - } - // The try-with-resources block automatically close the session after we’re done with it. - // This step is important because it frees underlying resources (TCP connections, thread - // pools...). In a real application, you would typically do this at shutdown - // (for example, when undeploying your webapp). - } -} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/basic/ReadTopologyAndSchemaMetadata.java b/examples/src/main/java/com/datastax/oss/driver/examples/basic/ReadTopologyAndSchemaMetadata.java deleted file mode 100644 index d61911d19b9..00000000000 --- a/examples/src/main/java/com/datastax/oss/driver/examples/basic/ReadTopologyAndSchemaMetadata.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.examples.basic; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.metadata.Metadata; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; - -/** - * Gathers information about a Cassandra cluster's topology (which nodes belong to the cluster) and - * schema (what keyspaces, tables, etc. exist in this cluster). - * - *

Preconditions: - * - *

    - *
  • An Apache Cassandra(R) cluster is running and accessible through the contacts points - * identified by basic.contact-points (see application.conf). - *
- * - *

Side effects: none. - * - * @see Java Driver online - * manual - */ -public class ReadTopologyAndSchemaMetadata { - - public static void main(String[] args) { - - try (CqlSession session = CqlSession.builder().build()) { - - Metadata metadata = session.getMetadata(); - System.out.printf("Connected session: %s%n", session.getName()); - - for (Node node : metadata.getNodes().values()) { - System.out.printf( - "Datatacenter: %s; Host: %s; Rack: %s%n", - node.getDatacenter(), node.getEndPoint(), node.getRack()); - } - - for (KeyspaceMetadata keyspace : metadata.getKeyspaces().values()) { - for (TableMetadata table : keyspace.getTables().values()) { - System.out.printf("Keyspace: %s; Table: %s%n", keyspace.getName(), table.getName()); - } - } - } - } -} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/concurrent/LimitConcurrencyCustom.java b/examples/src/main/java/com/datastax/oss/driver/examples/concurrent/LimitConcurrencyCustom.java deleted file mode 100644 index 444d4f406b7..00000000000 --- a/examples/src/main/java/com/datastax/oss/driver/examples/concurrent/LimitConcurrencyCustom.java +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.examples.concurrent; - -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.bindMarker; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.insertInto; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.CqlSessionBuilder; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.uuid.Uuids; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Semaphore; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; - -/** - * Creates a keyspace and table, and loads data using a multi-threaded approach. - * - *

This example makes usage of a {@link CqlSession#execute(String)} method, which is responsible - * for executing requests in a blocking way. It uses {@link ExecutorService} to limit number of - * concurrent request to {@code CONCURRENCY_LEVEL}. It leverages {@link CompletableFuture} to - * achieve concurrency. It maintains at most {@code IN_FLIGHT_REQUESTS} using {@link Semaphore}. - * - *

Preconditions: - * - *

    - *
  • An Apache Cassandra(R) cluster is running and accessible through the contact points - * identified by basic.contact-points (see application.conf). - *
- * - *

Side effects: - * - *

    - *
  • creates a new keyspace "examples" in the session. If a keyspace with this name already - * exists, it will be reused; - *
  • creates a table "examples.tbl_sample_kv". If it exists already, it will be reused; - *
  • inserts a TOTAL_NUMBER_OF_INSERTS of rows into the table. - *
- * - * @see Java Driver online - * manual - */ -@SuppressWarnings("CatchAndPrintStackTrace") -public class LimitConcurrencyCustom { - private static final int CONCURRENCY_LEVEL = 32; - private static final int TOTAL_NUMBER_OF_INSERTS = 10_000; - private static final int IN_FLIGHT_REQUESTS = 500; - // Semaphore for limiting number of in-flight requests. - private static final Semaphore SEMAPHORE = new Semaphore(IN_FLIGHT_REQUESTS); - - // Create CountDownLatch that wait for completion of all pending requests - private static final CountDownLatch REQUEST_LATCH = new CountDownLatch(TOTAL_NUMBER_OF_INSERTS); - - public static void main(String[] args) throws InterruptedException { - - try (CqlSession session = new CqlSessionBuilder().build()) { - createSchema(session); - insertConcurrent(session); - } - } - - private static void insertConcurrent(CqlSession session) throws InterruptedException { - PreparedStatement pst = - session.prepare( - insertInto("examples", "tbl_sample_kv") - .value("id", bindMarker("id")) - .value("value", bindMarker("value")) - .build()); - - // Used to track number of total inserts - AtomicInteger insertsCounter = new AtomicInteger(); - - // Executor service with CONCURRENCY_LEVEL number of threads that states an upper limit - // on number of request in progress. - ExecutorService executor = Executors.newFixedThreadPool(CONCURRENCY_LEVEL); - - // For every i we will insert a record to db - for (int i = 0; i < TOTAL_NUMBER_OF_INSERTS; i++) { - // Before submitting a request, we need to acquire 1 permit. - // If there is no permits available it blocks caller thread. - SEMAPHORE.acquire(); - // Copy to final variable for usage in a separate thread - final int counter = i; - - // We are running CqlSession.execute in a separate thread pool (executor) - executor.submit( - () -> { - try { - session.execute(pst.bind().setUuid("id", Uuids.random()).setInt("value", counter)); - insertsCounter.incrementAndGet(); - } catch (Throwable t) { - // On production you should leverage logger and use logger.error() method. - t.printStackTrace(); - } finally { - // Signal that processing of this request finishes - REQUEST_LATCH.countDown(); - // Once the request is executed, we release 1 permit. - // By doing so we allow caller thread to submit another async request. - SEMAPHORE.release(); - } - }); - } - // Await for execution of TOTAL_NUMBER_OF_INSERTS - REQUEST_LATCH.await(); - - System.out.println( - String.format( - "Finished executing %s queries with a concurrency level of %s.", - insertsCounter.get(), CONCURRENCY_LEVEL)); - // Shutdown executor to free resources - executor.shutdown(); - executor.awaitTermination(10, TimeUnit.SECONDS); - } - - private static void createSchema(CqlSession session) { - session.execute( - "CREATE KEYSPACE IF NOT EXISTS examples " - + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); - - session.execute( - "CREATE TABLE IF NOT EXISTS examples.tbl_sample_kv (id uuid, value int, PRIMARY KEY (id))"); - } -} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/concurrent/LimitConcurrencyCustomAsync.java b/examples/src/main/java/com/datastax/oss/driver/examples/concurrent/LimitConcurrencyCustomAsync.java deleted file mode 100644 index bec26eb7e81..00000000000 --- a/examples/src/main/java/com/datastax/oss/driver/examples/concurrent/LimitConcurrencyCustomAsync.java +++ /dev/null @@ -1,193 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.examples.concurrent; - -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.bindMarker; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.insertInto; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.CqlSessionBuilder; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.uuid.Uuids; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.function.BiConsumer; - -/** - * Creates a keyspace and table, and loads data using an async API. - * - *

This example makes usage of a {@link CqlSession#executeAsync(String)} method, which is - * responsible for executing requests in a non-blocking way. It uses {@link CompletableFuture} to - * limit number of concurrent request to {@code CONCURRENCY_LEVEL}. - * - *

Preconditions: - * - *

    - *
  • An Apache Cassandra(R) cluster is running and accessible through the contact points - * identified by basic.contact-points (see application.conf). - *
- * - *

Side effects: - * - *

    - *
  • creates a new keyspace "examples" in the session. If a keyspace with this name already - * exists, it will be reused; - *
  • creates a table "examples.tbl_sample_kv". If it exist already, it will be reused; - *
  • inserts a TOTAL_NUMBER_OF_INSERTS of rows into the table. - *
- * - * @see Java Driver online manual - */ -public class LimitConcurrencyCustomAsync { - private static final int CONCURRENCY_LEVEL = 32; - private static final int TOTAL_NUMBER_OF_INSERTS = 10_000; - // Used to track number of total inserts - private static final AtomicInteger INSERTS_COUNTER = new AtomicInteger(); - - public static void main(String[] args) throws InterruptedException, ExecutionException { - - try (CqlSession session = new CqlSessionBuilder().build()) { - createSchema(session); - insertConcurrent(session); - } - } - - private static void insertConcurrent(CqlSession session) - throws InterruptedException, ExecutionException { - PreparedStatement pst = - session.prepare( - insertInto("examples", "tbl_sample_kv") - .value("id", bindMarker("id")) - .value("value", bindMarker("value")) - .build()); - - // Construct CONCURRENCY_LEVEL number of ranges. - // Each range will be executed independently. - List ranges = createRanges(CONCURRENCY_LEVEL, TOTAL_NUMBER_OF_INSERTS); - - // List of pending CONCURRENCY_LEVEL features that we will wait for at the end of the program. - List> pending = new ArrayList<>(); - - // Every range will have dedicated CompletableFuture handling the execution. - for (Range range : ranges) { - pending.add(executeOneAtATime(session, pst, range)); - } - - // Wait for completion of all CONCURRENCY_LEVEL pending CompletableFeatures - CompletableFuture.allOf(pending.toArray(new CompletableFuture[0])).get(); - - System.out.println( - String.format( - "Finished executing %s queries with a concurrency level of %s.", - INSERTS_COUNTER.get(), CONCURRENCY_LEVEL)); - } - - private static CompletableFuture executeOneAtATime( - CqlSession session, PreparedStatement pst, Range range) { - - CompletableFuture lastFeature = null; - for (int i = range.getFrom(); i < range.getTo(); i++) { - int counter = i; - // If this is a first request init the lastFeature. - if (lastFeature == null) { - lastFeature = executeInsert(session, pst, counter); - } else { - // If lastFeature is already created, chain next async action. - // The next action will execute only after the lastFeature will finish. - // If the lastFeature finishes with failure, the subsequent chained executions - // will not be invoked. If you wish to alter that behaviour and recover from failure - // add the exceptionally() call after whenComplete() of lastFeature. - lastFeature = lastFeature.thenCompose((ignored) -> executeInsert(session, pst, counter)); - } - } - return lastFeature; - } - - private static CompletableFuture executeInsert( - CqlSession session, PreparedStatement pst, int counter) { - - return session - .executeAsync(pst.bind().setUuid("id", Uuids.random()).setInt("value", counter)) - .toCompletableFuture() - .whenComplete( - (BiConsumer) - (asyncResultSet, throwable) -> { - if (throwable == null) { - // When the Feature completes and there is no exception - increment counter. - INSERTS_COUNTER.incrementAndGet(); - } else { - // On production you should leverage logger and use logger.error() method. - throwable.printStackTrace(); - } - }); - } - - private static List createRanges(int concurrencyLevel, int totalNumberOfInserts) { - ArrayList ranges = new ArrayList<>(); - int numberOfElementsInRange = totalNumberOfInserts / concurrencyLevel; - // Create concurrencyLevel number of Ranges. - for (int i = 0; i < concurrencyLevel; i++) { - // If this is a last range give it all remaining elements. - // It may be longer than numberOfElementsInRange in case of - // totalNumberOfInserts / concurrencyLevel will return floating point number. - if (i == concurrencyLevel - 1) { - ranges.add(new Range(i * numberOfElementsInRange, totalNumberOfInserts)); - } else { - // Construct Ranges with numberOfElementsInRange elements. - ranges.add(new Range(i * numberOfElementsInRange, (i + 1) * numberOfElementsInRange)); - } - } - return ranges; - } - - private static void createSchema(CqlSession session) { - session.execute( - "CREATE KEYSPACE IF NOT EXISTS examples " - + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); - - session.execute( - "CREATE TABLE IF NOT EXISTS examples.tbl_sample_kv (id uuid, value int, PRIMARY KEY (id))"); - } - - private static class Range { - private final int from; - private final int to; - - private Range(int from, int to) { - this.from = from; - this.to = to; - } - - public int getFrom() { - return from; - } - - public int getTo() { - return to; - } - - @Override - public String toString() { - return "Range{" + "from=" + from + ", to=" + to + '}'; - } - } -} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/concurrent/LimitConcurrencyRequestThrottler.java b/examples/src/main/java/com/datastax/oss/driver/examples/concurrent/LimitConcurrencyRequestThrottler.java deleted file mode 100644 index 87293e50907..00000000000 --- a/examples/src/main/java/com/datastax/oss/driver/examples/concurrent/LimitConcurrencyRequestThrottler.java +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.examples.concurrent; - -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.bindMarker; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.insertInto; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.uuid.Uuids; -import com.datastax.oss.driver.internal.core.session.throttling.ConcurrencyLimitingRequestThrottler; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutionException; - -/** - * Creates a keyspace and tables, and loads data using Async API into them. - * - *

This example makes usage of a {@link CqlSession#executeAsync(String)} method, which is - * responsible for executing requests in a non-blocking way. It uses {@link - * ConcurrencyLimitingRequestThrottler} to limit number of concurrent requests to 32. It uses - * advanced.throttler configuration to limit async concurrency (max-concurrent-requests = 32) The - * max-queue-size is set to 10000 to buffer {@code TOTAL_NUMBER_OF_INSERTS} in a queue in a case of - * initial delay. (see application.conf) - * - *

Preconditions: - * - *

    - *
  • An Apache Cassandra(R) cluster is running and accessible through the contacts points - * identified by basic.contact-points (see application.conf). - *
- * - *

Side effects: - * - *

    - *
  • creates a new keyspace "examples" in the session. If a keyspace with this name already - * exists, it will be reused; - *
  • creates a table "examples.tbl_sample_kv". If it exists already, it will be reused; - *
  • inserts {@code TOTAL_NUMBER_OF_INSERTS} rows into the table. - *
- * - * @see Java - * driver online manual: Request throttling - */ -public class LimitConcurrencyRequestThrottler { - private static final int TOTAL_NUMBER_OF_INSERTS = 10_000; - - public static void main(String[] args) throws InterruptedException, ExecutionException { - - try (CqlSession session = CqlSession.builder().build()) { - createSchema(session); - insertConcurrent(session); - } - } - - private static void insertConcurrent(CqlSession session) - throws InterruptedException, ExecutionException { - PreparedStatement pst = - session.prepare( - insertInto("examples", "tbl_sample_kv") - .value("id", bindMarker("id")) - .value("value", bindMarker("value")) - .build()); - // Create list of pending CompletableFutures. - // We will add every operation returned from executeAsync. - // Next, we will wait for completion of all TOTAL_NUMBER_OF_INSERTS - List> pending = new ArrayList<>(); - - // For every i we will insert a record to db - for (int i = 0; i < TOTAL_NUMBER_OF_INSERTS; i++) { - pending.add( - session - .executeAsync(pst.bind().setUuid("id", Uuids.random()).setInt("value", i)) - // Transform CompletionState toCompletableFuture to be able to wait for execution of - // all using CompletableFuture.allOf - .toCompletableFuture()); - } - - // Wait for completion of all TOTAL_NUMBER_OF_INSERTS pending requests - CompletableFuture.allOf(pending.toArray(new CompletableFuture[0])).get(); - - System.out.println( - String.format( - "Finished executing %s queries with a concurrency level of %s.", - pending.size(), - session - .getContext() - .getConfig() - .getDefaultProfile() - .getInt(DefaultDriverOption.REQUEST_THROTTLER_MAX_CONCURRENT_REQUESTS))); - } - - private static void createSchema(CqlSession session) { - session.execute( - "CREATE KEYSPACE IF NOT EXISTS examples " - + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); - - session.execute( - "CREATE TABLE IF NOT EXISTS examples.tbl_sample_kv (id uuid, value int, PRIMARY KEY (id))"); - } -} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/datatypes/Blobs.java b/examples/src/main/java/com/datastax/oss/driver/examples/datatypes/Blobs.java deleted file mode 100644 index 750ee49f685..00000000000 --- a/examples/src/main/java/com/datastax/oss/driver/examples/datatypes/Blobs.java +++ /dev/null @@ -1,258 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.examples.datatypes; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.data.ByteUtils; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.channels.FileChannel; -import java.util.HashMap; -import java.util.Map; -import java.util.Objects; - -/** - * Inserts and retrieves values in BLOB columns. - * - *

By default, the Java Driver maps this type to {@link java.nio.ByteBuffer}. The ByteBuffer API - * is a bit tricky to use at times, so we will show common pitfalls as well. We strongly recommend - * that you read the {@link java.nio.Buffer} and {@link ByteBuffer} API docs and become familiar - * with the capacity, limit and position properties. This tutorial might also help. - * - *

Preconditions: - * - *

    - *
  • An Apache Cassandra(R) cluster is running and accessible through the contacts points - * identified by basic.contact-points (see application.conf). - *
  • FILE references an existing file. - *
- * - *

Side effects: - * - *

    - *
  • creates a new keyspace "examples" in the cluster. If a keyspace with this name already - * exists, it will be reused; - *
  • creates a table "examples.blobs". If it already exists, it will be reused; - *
  • inserts data in the table. - *
- */ -public class Blobs { - - private static File FILE = new File(Blobs.class.getResource("/cassandra_logo.png").getFile()); - - public static void main(String[] args) throws IOException { - - try (CqlSession session = CqlSession.builder().build()) { - - createSchema(session); - allocateAndInsert(session); - retrieveSimpleColumn(session); - retrieveMapColumn(session); - insertConcurrent(session); - retrieveFromFileAndInsertInto(session); - } - } - - private static void createSchema(CqlSession session) { - session.execute( - "CREATE KEYSPACE IF NOT EXISTS examples " - + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); - session.execute( - "CREATE TABLE IF NOT EXISTS examples.blobs(k int PRIMARY KEY, b blob, m map)"); - } - - private static void allocateAndInsert(CqlSession session) { - // One way to get a byte buffer is to allocate it and fill it yourself: - ByteBuffer buffer = ByteBuffer.allocate(16); - while (buffer.hasRemaining()) { - buffer.put((byte) 0xFF); - } - - // Don't forget to flip! The driver expects a buffer that is ready for reading. That is, it will - // consider all the data between buffer.position() and buffer.limit(). - // Right now we are positioned at the end because we just finished writing, so if we passed the - // buffer as-is it would appear to be empty: - assert buffer.limit() - buffer.position() == 0; - - buffer.flip(); - // Now position is back to the beginning, so the driver will see all 16 bytes. - assert buffer.limit() - buffer.position() == 16; - - Map map = new HashMap(); - map.put("test", buffer); - - PreparedStatement prepared = - session.prepare("INSERT INTO examples.blobs (k, b, m) VALUES (1, ?, ?)"); - - session.execute(prepared.bind(buffer, map)); - } - - private static void retrieveSimpleColumn(CqlSession session) { - Row row = session.execute("SELECT b, m FROM examples.blobs WHERE k = 1").one(); - - assert row != null; - ByteBuffer buffer = row.getByteBuffer("b"); - - // The driver always returns buffers that are ready for reading. - assert buffer != null; - assert buffer.limit() - buffer.position() == 16; - - // One way to read from the buffer is to use absolute getters. Do NOT start reading at index 0, - // as the buffer might start at a different position (we'll see an example of that later). - for (int i = buffer.position(); i < buffer.limit(); i++) { - byte b = buffer.get(i); - assert b == (byte) 0xFF; - } - - // Another way is to use relative getters. - while (buffer.hasRemaining()) { - byte b = buffer.get(); - assert b == (byte) 0xFF; - } - // Note that relative getters change the position, - // so when we're done reading we're at the end again. - assert buffer.position() == buffer.limit(); - - // Reset the position for the next operation. - buffer.flip(); - - // Yet another way is to convert the buffer to a byte array. - // Do NOT use buffer.array(), because it returns the - // buffer's *backing array*, which is not the same thing as its contents: - // - not all byte buffers have backing arrays - // - even then, the backing array might be larger than the buffer's contents - // - // The driver provides a utility method that handles those details for you: - byte[] array = ByteUtils.getArray(buffer); - assert array.length == 16; - for (byte b : array) { - assert b == (byte) 0xFF; - } - } - - @SuppressWarnings("ByteBufferBackingArray") - private static void retrieveMapColumn(CqlSession session) { - Row row = session.execute("SELECT b, m FROM examples.blobs WHERE k = 1").one(); - - // The map columns illustrates the pitfalls with position() and array(). - assert row != null; - Map m = row.getMap("m", String.class, ByteBuffer.class); - assert m != null; - ByteBuffer buffer = m.get("test"); - - // We did get back a buffer that contains 16 bytes as expected. - assert buffer.limit() - buffer.position() == 16; - // However, it is not positioned at 0. And you can also see that its backing array contains more - // than 16 bytes. - // What happens is that the buffer is a "view" of the last 16 of a 32-byte array. - // This is an implementation detail and you shouldn't have to worry about it if you process the - // buffer correctly (don't iterate from 0, use Bytes.getArray()). - assert buffer.position() == 16; - assert buffer.array().length == 32; - } - - private static void insertConcurrent(CqlSession session) { - PreparedStatement preparedStatement = - session.prepare("INSERT INTO examples.blobs (k, b) VALUES (1, :b)"); - - // This is another convenient utility provided by the driver. It's useful for tests. - ByteBuffer buffer = ByteUtils.fromHexString("0xffffff"); - - // When you pass a byte buffer to a bound statement, it creates a shallow copy internally with - // the buffer.duplicate() method. - BoundStatement boundStatement = preparedStatement.bind().setByteBuffer("b", buffer); - - // This means you can now move in the original buffer, without affecting the insertion if it - // happens later. - buffer.position(buffer.limit()); - - session.execute(boundStatement); - Row row = session.execute("SELECT b FROM examples.blobs WHERE k = 1").one(); - assert row != null; - assert Objects.equals(ByteUtils.toHexString(row.getByteBuffer("b")), "0xffffff"); - - buffer.flip(); - - // HOWEVER duplicate() only performs a shallow copy. The two buffers still share the same - // contents. So if you modify the contents of the original buffer, - // this will affect another execution of the bound statement. - buffer.put(0, (byte) 0xaa); - session.execute(boundStatement); - row = session.execute("SELECT b FROM examples.blobs WHERE k = 1").one(); - assert row != null; - assert Objects.equals(ByteUtils.toHexString(row.getByteBuffer("b")), "0xaaffff"); - - // This will also happen if you use the async API, e.g. create the bound statement, call - // executeAsync() on it and reuse the buffer immediately. - - // If you reuse buffers concurrently and want to avoid those issues, perform a deep copy of the - // buffer before passing it to the bound statement. - int startPosition = buffer.position(); - ByteBuffer buffer2 = ByteBuffer.allocate(buffer.limit() - startPosition); - buffer2.put(buffer); - buffer.position(startPosition); - buffer2.flip(); - boundStatement = boundStatement.setByteBuffer("b", buffer2); - session.execute(boundStatement); - - // Note: unlike BoundStatement, SimpleStatement does not duplicate its arguments, so even the - // position will be affected if you change it before executing the statement. - // Again, resort to deep copies if required. - } - - private static void retrieveFromFileAndInsertInto(CqlSession session) throws IOException { - ByteBuffer buffer = readAll(FILE); - PreparedStatement prepared = session.prepare("INSERT INTO examples.blobs (k, b) VALUES (1, ?)"); - session.execute(prepared.bind(buffer)); - - File tmpFile = File.createTempFile("blob", ".png"); - System.out.printf("Writing retrieved buffer to %s%n", tmpFile.getAbsoluteFile()); - - Row row = session.execute("SELECT b FROM examples.blobs WHERE k = 1").one(); - assert row != null; - writeAll(row.getByteBuffer("b"), tmpFile); - } - - // Note: - // - This can be improved by using new-io - // - this reads the whole file in memory in one go. If your file does not fit in memory you should - // probably not insert it into Cassandra either ;) - private static ByteBuffer readAll(File file) throws IOException { - try (FileInputStream inputStream = new FileInputStream(file)) { - FileChannel channel = inputStream.getChannel(); - ByteBuffer buffer = ByteBuffer.allocate((int) channel.size()); - channel.read(buffer); - buffer.flip(); - return buffer; - } - } - - private static void writeAll(ByteBuffer buffer, File file) throws IOException { - try (FileOutputStream outputStream = new FileOutputStream(file)) { - FileChannel channel = outputStream.getChannel(); - channel.write(buffer); - } - } -} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/datatypes/CustomCodecs.java b/examples/src/main/java/com/datastax/oss/driver/examples/datatypes/CustomCodecs.java deleted file mode 100644 index a6d4c508a2e..00000000000 --- a/examples/src/main/java/com/datastax/oss/driver/examples/datatypes/CustomCodecs.java +++ /dev/null @@ -1,242 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.examples.datatypes; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.CqlSessionBuilder; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.data.ByteUtils; -import com.datastax.oss.driver.api.core.data.TupleValue; -import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; -import com.datastax.oss.driver.api.core.type.codec.MappingCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.net.InetAddress; -import java.nio.ByteBuffer; -import java.time.ZonedDateTime; -import java.util.Arrays; -import java.util.List; -import java.util.Optional; - -/** - * Inserts and retrieves values using a few custom codecs. - * - *

Preconditions: - * - *

    - *
  • An Apache Cassandra(R) cluster is running and accessible through the contacts points - * identified by basic.contact-points (see application.conf). - *
- * - *

Side effects: - * - *

    - *
  • creates a new keyspace "examples" in the cluster. If a keyspace with this name already - * exists, it will be reused; - *
  • creates a table "examples.videos". If it already exists, it will be reused; - *
  • inserts data in the table. - *
- * - * More examples of custom codecs can be found in the following examples: - * - *
    - *
  1. Codecs for tuples and UDTs: - *
      - *
    • {@link TuplesSimple} - *
    • {@link TuplesMapped} - *
    • {@link UserDefinedTypesSimple} - *
    • {@link UserDefinedTypesMapped} - *
    - *
  2. Json codecs: - *
      - *
    • {@link com.datastax.oss.driver.examples.json.jackson.JacksonJsonColumn} - *
    • {@link com.datastax.oss.driver.examples.json.jackson.JacksonJsonFunction} - *
    • {@link com.datastax.oss.driver.examples.json.jackson.JacksonJsonRow} - *
    • {@link com.datastax.oss.driver.examples.json.jsr.Jsr353JsonColumn} - *
    • {@link com.datastax.oss.driver.examples.json.jsr.Jsr353JsonFunction} - *
    • {@link com.datastax.oss.driver.examples.json.jsr.Jsr353JsonRow} - *
    - * - * @see driver - * documentation on custom codecs - */ -public class CustomCodecs { - - public static final GenericType> OPTIONAL_OF_INET = - GenericType.optionalOf(InetAddress.class); - - /** A dummy codec converting CQL ints into Java strings. */ - public static class CqlIntToStringCodec extends MappingCodec { - - public CqlIntToStringCodec() { - super(TypeCodecs.INT, GenericType.STRING); - } - - @Nullable - @Override - protected String innerToOuter(@Nullable Integer value) { - return value == null ? null : value.toString(); - } - - @Nullable - @Override - protected Integer outerToInner(@Nullable String value) { - return value == null ? null : Integer.parseInt(value); - } - } - - public enum WeekDay { - MONDAY, - TUESDAY, - WEDNESDAY, - THURSDAY, - FRIDAY, - SATURDAY, - SUNDAY - } - - public static void main(String[] args) { - CqlSessionBuilder builder = CqlSession.builder(); - builder = registerCodecs(builder); - try (CqlSession session = builder.build()) { - createSchema(session); - insertData(session); - retrieveData(session); - } - } - - private static CqlSessionBuilder registerCodecs(CqlSessionBuilder builder) { - return builder.addTypeCodecs( - ExtraTypeCodecs.BLOB_TO_ARRAY, // blob <-> byte[] - ExtraTypeCodecs.ZONED_TIMESTAMP_PERSISTED, // tuple <-> ZonedDateTime - ExtraTypeCodecs.listToArrayOf(TypeCodecs.TEXT), // list <-> String[] - ExtraTypeCodecs.enumNamesOf(WeekDay.class), // text <-> MyEnum - ExtraTypeCodecs.optionalOf(TypeCodecs.INET), // uuid <-> Optional - new CqlIntToStringCodec() // custom codec, int <-> String - ); - } - - private static void createSchema(CqlSession session) { - session.execute( - "CREATE KEYSPACE IF NOT EXISTS examples " - + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); - session.execute( - "CREATE TABLE IF NOT EXISTS examples.videos(" - + "pk int PRIMARY KEY, " - + "contents blob, " - + "uploaded tuple, " - + "tags list, " - + "week_day text, " - + "ip inet" - + ")"); - } - - private static void insertData(CqlSession session) { - // prepare the INSERT statement - PreparedStatement prepared = - session.prepare( - "INSERT INTO examples.videos (pk, contents, uploaded, tags, week_day, ip) " - + "VALUES (:pk, :contents, :uploaded, :tags, :week_day, :ip)"); - - byte[] contents = new byte[] {1, 2, 3, 4}; - ZonedDateTime uploaded = ZonedDateTime.parse("2020-03-21T15:03:45.123+01:00[Europe/Paris]"); - String[] tags = new String[] {"comedy", "US"}; - WeekDay weekDay = WeekDay.SATURDAY; - Optional maybeIp = Optional.empty(); - - // Create a BoundStatement and set values - BoundStatement boundStatement = - prepared - .bind() - .setString("pk", "1") // will use CqlIntToStringCodec - .set("contents", contents, byte[].class) // will use TypeCodecs.BLOB_SIMPLE - .set( - "uploaded", - uploaded, - ZonedDateTime.class) // will use TypeCodecs.ZONED_TIMESTAMP_PERSISTED - .set("tags", tags, String[].class) // will use TypeCodecs.arrayOf(TypeCodecs.TEXT) - .set( - "week_day", - weekDay, - WeekDay.class) // will use TypeCodecs.enumNamesOf(WeekDay.class) - .set( - "ip", maybeIp, OPTIONAL_OF_INET); // will use TypeCodecs.optionalOf(TypeCodecs.INET) - - // execute the insertion - session.execute(boundStatement); - } - - private static void retrieveData(CqlSession session) { - // Execute the SELECT query and retrieve the single row in the result set - SimpleStatement statement = - SimpleStatement.newInstance( - "SELECT pk, contents, uploaded, tags, week_day, ip FROM examples.videos WHERE pk = ?", - // Here, the primary key must be provided as an int, not as a String, because it is not - // possible to use custom codecs in simple statements, only driver built-in codecs. - // If this is an issue, use prepared statements. - 1); - Row row = session.execute(statement).one(); - assert row != null; - - { - // Retrieve values from row using custom codecs - String pk = row.getString("pk"); // will use CqlIntToStringCodec - byte[] contents = row.get("contents", byte[].class); // will use TypeCodecs.BLOB_SIMPLE - ZonedDateTime uploaded = - row.get("uploaded", ZonedDateTime.class); // will use TypeCodecs.ZONED_TIMESTAMP_PERSISTED - String[] tags = - row.get("tags", String[].class); // will use TypeCodecs.arrayOf(TypeCodecs.TEXT) - WeekDay weekDay = - row.get("week_day", WeekDay.class); // will use TypeCodecs.enumNamesOf(WeekDay.class) - Optional maybeIp = - row.get("ip", OPTIONAL_OF_INET); // will use TypeCodecs.optionalOf(TypeCodecs.INET) - - System.out.println("pk: " + pk); - System.out.println("contents: " + Arrays.toString(contents)); - System.out.println("uploaded: " + uploaded); - System.out.println("tags: " + Arrays.toString(tags)); - System.out.println("week day: " + weekDay); - System.out.println("ip: " + maybeIp); - } - - System.out.println("------------------"); - - { - // It is still possible to retrieve the same values from row using driver built-in codecs - int pk = row.getInt("pk"); - ByteBuffer contents = row.getByteBuffer("contents"); - TupleValue uploaded = row.getTupleValue("uploaded"); - List tags = row.getList("tags", String.class); - String weekDay = row.getString("week_day"); - InetAddress ip = row.getInetAddress("ip"); - - System.out.println("pk: " + pk); - System.out.println("contents: " + ByteUtils.toHexString(contents)); - System.out.println( - "uploaded: " + (uploaded == null ? null : uploaded.getFormattedContents())); - System.out.println("tags: " + tags); - System.out.println("week day: " + weekDay); - System.out.println("ip: " + ip); - } - } -} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/datatypes/TuplesMapped.java b/examples/src/main/java/com/datastax/oss/driver/examples/datatypes/TuplesMapped.java deleted file mode 100644 index 1d06fc447ce..00000000000 --- a/examples/src/main/java/com/datastax/oss/driver/examples/datatypes/TuplesMapped.java +++ /dev/null @@ -1,196 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.examples.datatypes; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.data.TupleValue; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.TupleType; -import com.datastax.oss.driver.api.core.type.codec.MappingCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.registry.MutableCodecRegistry; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Objects; - -/** - * Inserts and retrieves values in columns of tuples. - * - *

    By default, the Java Driver maps tuples to {@link TupleValue}. This example goes beyond that - * and shows how to map tuples to arbitrary Java types, leveraging the special {@link MappingCodec}. - * - *

    A simpler example of usage of tuples can be found in {@link TuplesSimple}. - * - *

    Preconditions: - * - *

      - *
    • An Apache Cassandra(R) cluster is running and accessible through the contacts points - * identified by basic.contact-points (see application.conf). - *
    - * - *

    Side effects: - * - *

      - *
    • creates a new keyspace "examples" in the cluster. If a keyspace with this name already - * exists, it will be reused; - *
    • creates a table "examples.tuples". If it already exists, it will be reused; - *
    • inserts data in the table. - *
    - * - * @see TuplesSimple - * @see MappingCodec - * @see driver - * documentation on custom codecs - */ -public class TuplesMapped { - - /** The Java Pojo that will be mapped to the tuple "coordinates". */ - public static class Coordinates { - - private final int x; - private final int y; - - public Coordinates(int x, int y) { - this.x = x; - this.y = y; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } else if (!(o instanceof Coordinates)) { - return false; - } else { - Coordinates that = (Coordinates) o; - return x == that.x && y == that.y; - } - } - - @Override - public int hashCode() { - return Objects.hash(x, y); - } - - @Override - public String toString() { - return "(" + x + ',' + y + ')'; - } - } - - /** The custom codec that will convert to and from {@link Coordinates}. */ - public static class CoordinatesCodec extends MappingCodec { - - public CoordinatesCodec(@NonNull TypeCodec innerCodec) { - super(innerCodec, GenericType.of(Coordinates.class)); - } - - @NonNull - @Override - public TupleType getCqlType() { - return (TupleType) super.getCqlType(); - } - - @Nullable - @Override - protected Coordinates innerToOuter(@Nullable TupleValue value) { - return value == null ? null : new Coordinates(value.getInt(0), value.getInt(1)); - } - - @Nullable - @Override - protected TupleValue outerToInner(@Nullable Coordinates value) { - return value == null - ? null - : this.getCqlType().newValue().setInt(0, value.x).setInt(1, value.y); - } - } - - public static void main(String[] args) { - try (CqlSession session = CqlSession.builder().build()) { - createSchema(session); - registerCoordinatesCodec(session); - insertData(session); - retrieveData(session); - } - } - - private static void createSchema(CqlSession session) { - session.execute( - "CREATE KEYSPACE IF NOT EXISTS examples " - + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); - session.execute( - "CREATE TABLE IF NOT EXISTS examples.tuples(k int PRIMARY KEY, c tuple)"); - } - - private static void registerCoordinatesCodec(CqlSession session) { - // retrieve the codec registry - MutableCodecRegistry codecRegistry = - (MutableCodecRegistry) session.getContext().getCodecRegistry(); - // create the tuple metadata - TupleType coordinatesType = DataTypes.tupleOf(DataTypes.INT, DataTypes.INT); - // retrieve the driver built-in codec for the tuple "coordinates" - TypeCodec innerCodec = codecRegistry.codecFor(coordinatesType); - // create a custom codec to map the "coordinates" tuple to the Coordinates class - CoordinatesCodec coordinatesCodec = new CoordinatesCodec(innerCodec); - // register the new codec - codecRegistry.register(coordinatesCodec); - } - - private static void insertData(CqlSession session) { - // prepare the INSERT statement - PreparedStatement prepared = - session.prepare("INSERT INTO examples.tuples (k, c) VALUES (?, ?)"); - - // bind the parameters in one pass - Coordinates coordinates1 = new Coordinates(12, 34); - BoundStatement boundStatement1 = prepared.bind(1, coordinates1); - // execute the insertion - session.execute(boundStatement1); - - // alternate method: bind the parameters one by one - Coordinates coordinates2 = new Coordinates(56, 78); - BoundStatement boundStatement2 = - prepared.bind().setInt("k", 2).set("c", coordinates2, Coordinates.class); - // execute the insertion - session.execute(boundStatement2); - } - - private static void retrieveData(CqlSession session) { - for (int k = 1; k <= 2; k++) { - // Execute the SELECT query and retrieve the single row in the result set - SimpleStatement statement = - SimpleStatement.newInstance("SELECT c FROM examples.tuples WHERE k = ?", k); - Row row = session.execute(statement).one(); - assert row != null; - - // Retrieve the value for column c - Coordinates coordinatesValue = row.get("c", Coordinates.class); - assert coordinatesValue != null; - - // Display the contents of the Coordinates instance - System.out.println("found coordinate: " + coordinatesValue); - } - } -} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/datatypes/TuplesSimple.java b/examples/src/main/java/com/datastax/oss/driver/examples/datatypes/TuplesSimple.java deleted file mode 100644 index 6aee96bac7f..00000000000 --- a/examples/src/main/java/com/datastax/oss/driver/examples/datatypes/TuplesSimple.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.examples.datatypes; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.data.TupleValue; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.TupleType; - -/** - * Inserts and retrieves values in columns of tuple types. - * - *

    By default, the Java Driver maps tuples to {@link TupleValue}. This example shows how to - * create instances of {@link TupleValue}, how to insert them in the database, and how to retrieve - * such instances from the database. - * - *

    For a more complex example showing how to map tuples to arbitrary Java types, see {@link - * TuplesMapped}. - * - *

    Preconditions: - * - *

      - *
    • An Apache Cassandra(R) cluster is running and accessible through the contacts points - * identified by basic.contact-points (see application.conf). - *
    - * - *

    Side effects: - * - *

      - *
    • creates a new keyspace "examples" in the cluster. If a keyspace with this name already - * exists, it will be reused; - *
    • creates a table "examples.tuples". If it already exists, it will be reused; - *
    • inserts data in the table. - *
    - * - * @see driver - * documentation on custom codecs - * @see TuplesMapped - */ -public class TuplesSimple { - - public static void main(String[] args) { - try (CqlSession session = CqlSession.builder().build()) { - createSchema(session); - insertData(session); - retrieveData(session); - } - } - - private static void createSchema(CqlSession session) { - session.execute( - "CREATE KEYSPACE IF NOT EXISTS examples " - + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); - session.execute( - "CREATE TABLE IF NOT EXISTS examples.tuples(k int PRIMARY KEY, c tuple)"); - } - - private static void insertData(CqlSession session) { - // prepare the INSERT statement - PreparedStatement prepared = - session.prepare("INSERT INTO examples.tuples (k, c) VALUES (?, ?)"); - - // create the tuple metadata - TupleType coordinatesType = DataTypes.tupleOf(DataTypes.INT, DataTypes.INT); - - // bind the parameters in one pass - TupleValue coordinates1 = coordinatesType.newValue(12, 34); - BoundStatement boundStatement1 = prepared.bind(1, coordinates1); - // execute the insertion - session.execute(boundStatement1); - - // alternate method: bind the parameters one by one - TupleValue coordinates2 = coordinatesType.newValue(56, 78); - BoundStatement boundStatement2 = - prepared.bind().setInt("k", 2).setTupleValue("c", coordinates2); - // execute the insertion - session.execute(boundStatement2); - } - - private static void retrieveData(CqlSession session) { - for (int k = 1; k <= 2; k++) { - // Execute the SELECT query and retrieve the single row in the result set - SimpleStatement statement = - SimpleStatement.newInstance("SELECT c FROM examples.tuples WHERE k = ?", k); - Row row = session.execute(statement).one(); - assert row != null; - - // Retrieve the value for column c - TupleValue coordinatesValue = row.getTupleValue("c"); - assert coordinatesValue != null; - - // Display the contents of the tuple - System.out.printf( - "found coordinate: (%d,%d)%n", coordinatesValue.getInt(0), coordinatesValue.getInt(1)); - } - } -} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/datatypes/UserDefinedTypesMapped.java b/examples/src/main/java/com/datastax/oss/driver/examples/datatypes/UserDefinedTypesMapped.java deleted file mode 100644 index ef97f507746..00000000000 --- a/examples/src/main/java/com/datastax/oss/driver/examples/datatypes/UserDefinedTypesMapped.java +++ /dev/null @@ -1,204 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.examples.datatypes; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.api.core.type.codec.MappingCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.registry.MutableCodecRegistry; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Objects; - -/** - * Inserts and retrieves values in columns of user-defined types. - * - *

    By default, the Java Driver maps user-defined types to {@link UdtValue}. This example goes - * beyond that and shows how to map user-defined types to arbitrary Java types, leveraging the - * special {@link MappingCodec}. - * - *

    A simpler example of usage of user-defined types can be found in {@link - * UserDefinedTypesSimple}. - * - *

    Preconditions: - * - *

      - *
    • An Apache Cassandra(R) cluster is running and accessible through the contacts points - * identified by basic.contact-points (see application.conf). - *
    - * - *

    Side effects: - * - *

      - *
    • creates a new keyspace "examples" in the cluster. If a keyspace with this name already - * exists, it will be reused; - *
    • creates a table "examples.udts". If it already exists, it will be reused; - *
    • inserts data in the table. - *
    - * - * @see UserDefinedTypesSimple - * @see MappingCodec - * @see driver - * documentation on custom codecs - */ -public class UserDefinedTypesMapped { - - /** The Java Pojo that will be mapped to the user-defined type "coordinates". */ - public static class Coordinates { - - private final int x; - private final int y; - - public Coordinates(int x, int y) { - this.x = x; - this.y = y; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } else if (!(o instanceof Coordinates)) { - return false; - } else { - Coordinates that = (Coordinates) o; - return x == that.x && y == that.y; - } - } - - @Override - public int hashCode() { - return Objects.hash(x, y); - } - - @Override - public String toString() { - return "(" + x + ',' + y + ')'; - } - } - - /** The custom codec that will convert to and from {@link Coordinates}. */ - public static class CoordinatesCodec extends MappingCodec { - - public CoordinatesCodec(@NonNull TypeCodec innerCodec) { - super(innerCodec, GenericType.of(Coordinates.class)); - } - - @NonNull - @Override - public UserDefinedType getCqlType() { - return (UserDefinedType) super.getCqlType(); - } - - @Nullable - @Override - protected Coordinates innerToOuter(@Nullable UdtValue value) { - return value == null ? null : new Coordinates(value.getInt("x"), value.getInt("y")); - } - - @Nullable - @Override - protected UdtValue outerToInner(@Nullable Coordinates value) { - return value == null - ? null - : getCqlType().newValue().setInt("x", value.x).setInt("y", value.y); - } - } - - public static void main(String[] args) { - try (CqlSession session = CqlSession.builder().build()) { - createSchema(session); - registerCoordinatesCodec(session); - insertData(session); - retrieveData(session); - } - } - - private static void createSchema(CqlSession session) { - session.execute( - "CREATE KEYSPACE IF NOT EXISTS examples " - + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); - session.execute("CREATE TYPE IF NOT EXISTS examples.coordinates(x int, y int)"); - session.execute("CREATE TABLE IF NOT EXISTS examples.udts(k int PRIMARY KEY, c coordinates)"); - } - - private static void registerCoordinatesCodec(CqlSession session) { - // retrieve the codec registry - MutableCodecRegistry codecRegistry = - (MutableCodecRegistry) session.getContext().getCodecRegistry(); - // retrieve the user-defined type metadata - UserDefinedType coordinatesType = retrieveCoordinatesType(session); - // retrieve the driver built-in codec for the user-defined type "coordinates" - TypeCodec innerCodec = codecRegistry.codecFor(coordinatesType); - // create a custom codec to map the "coordinates" user-defined type to the Coordinates class - CoordinatesCodec coordinatesCodec = new CoordinatesCodec(innerCodec); - // register the new codec - codecRegistry.register(coordinatesCodec); - } - - private static void insertData(CqlSession session) { - // prepare the INSERT statement - PreparedStatement prepared = session.prepare("INSERT INTO examples.udts (k, c) VALUES (?, ?)"); - - // bind the parameters in one pass - Coordinates coordinates1 = new Coordinates(12, 34); - BoundStatement boundStatement1 = prepared.bind(1, coordinates1); - // execute the insertion - session.execute(boundStatement1); - - // alternate method: bind the parameters one by one - Coordinates coordinates2 = new Coordinates(56, 78); - BoundStatement boundStatement2 = - prepared.bind().setInt("k", 2).set("c", coordinates2, Coordinates.class); - // execute the insertion - session.execute(boundStatement2); - } - - private static void retrieveData(CqlSession session) { - for (int k = 1; k <= 2; k++) { - // Execute the SELECT query and retrieve the single row in the result set - SimpleStatement statement = - SimpleStatement.newInstance("SELECT c FROM examples.udts WHERE k = ?", k); - Row row = session.execute(statement).one(); - assert row != null; - - // Retrieve the value for column c - Coordinates coordinatesValue = row.get("c", Coordinates.class); - assert coordinatesValue != null; - - // Display the contents of the Coordinates instance - System.out.println("found coordinate: " + coordinatesValue); - } - } - - private static UserDefinedType retrieveCoordinatesType(CqlSession session) { - return session - .getMetadata() - .getKeyspace("examples") - .flatMap(ks -> ks.getUserDefinedType("coordinates")) - .orElseThrow(IllegalStateException::new); - } -} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/datatypes/UserDefinedTypesSimple.java b/examples/src/main/java/com/datastax/oss/driver/examples/datatypes/UserDefinedTypesSimple.java deleted file mode 100644 index 4387cde5b0f..00000000000 --- a/examples/src/main/java/com/datastax/oss/driver/examples/datatypes/UserDefinedTypesSimple.java +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.examples.datatypes; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.core.type.UserDefinedType; - -/** - * Inserts and retrieves values in columns of user-defined types. - * - *

    By default, the Java Driver maps user-defined types to {@link UdtValue}. This example shows - * how to create instances of {@link UdtValue}, how to insert them in the database, and how to - * retrieve such instances from the database. - * - *

    For a more complex example showing how to map user-defined types to arbitrary Java types, see - * {@link UserDefinedTypesMapped}. - * - *

    Preconditions: - * - *

      - *
    • An Apache Cassandra(R) cluster is running and accessible through the contacts points - * identified by basic.contact-points (see application.conf). - *
    - * - *

    Side effects: - * - *

      - *
    • creates a new keyspace "examples" in the cluster. If a keyspace with this name already - * exists, it will be reused; - *
    • creates a table "examples.udts". If it already exists, it will be reused; - *
    • inserts data in the table. - *
    - * - * @see driver - * documentation on custom codecs - * @see UserDefinedTypesMapped - */ -public class UserDefinedTypesSimple { - - public static void main(String[] args) { - try (CqlSession session = CqlSession.builder().build()) { - createSchema(session); - insertData(session); - retrieveData(session); - } - } - - private static void createSchema(CqlSession session) { - session.execute( - "CREATE KEYSPACE IF NOT EXISTS examples " - + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); - session.execute("CREATE TYPE IF NOT EXISTS examples.coordinates(x int, y int)"); - session.execute("CREATE TABLE IF NOT EXISTS examples.udts(k int PRIMARY KEY, c coordinates)"); - } - - private static void insertData(CqlSession session) { - // prepare the INSERT statement - PreparedStatement prepared = session.prepare("INSERT INTO examples.udts (k, c) VALUES (?, ?)"); - - // retrieve the user-defined type metadata - UserDefinedType coordinatesType = retrieveCoordinatesType(session); - - // bind the parameters in one pass - UdtValue coordinates1 = coordinatesType.newValue(12, 34); - BoundStatement boundStatement1 = prepared.bind(1, coordinates1); - // execute the insertion - session.execute(boundStatement1); - - // alternate method: bind the parameters one by one - UdtValue coordinates2 = coordinatesType.newValue(56, 78); - BoundStatement boundStatement2 = prepared.bind().setInt("k", 2).setUdtValue("c", coordinates2); - // execute the insertion - session.execute(boundStatement2); - } - - private static void retrieveData(CqlSession session) { - for (int k = 1; k <= 2; k++) { - // Execute the SELECT query and retrieve the single row in the result set - SimpleStatement statement = - SimpleStatement.newInstance("SELECT c FROM examples.udts WHERE k = ?", k); - Row row = session.execute(statement).one(); - assert row != null; - - // Retrieve the value for column c - UdtValue coordinatesValue = row.getUdtValue("c"); - assert coordinatesValue != null; - - // Display the contents of the UdtValue instance - System.out.printf( - "found coordinate: (%d,%d)%n", - coordinatesValue.getInt("x"), coordinatesValue.getInt("y")); - } - } - - private static UserDefinedType retrieveCoordinatesType(CqlSession session) { - return session - .getMetadata() - .getKeyspace("examples") - .flatMap(ks -> ks.getUserDefinedType("coordinates")) - .orElseThrow(IllegalStateException::new); - } -} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/failover/CrossDatacenterFailover.java b/examples/src/main/java/com/datastax/oss/driver/examples/failover/CrossDatacenterFailover.java deleted file mode 100644 index 07907af6886..00000000000 --- a/examples/src/main/java/com/datastax/oss/driver/examples/failover/CrossDatacenterFailover.java +++ /dev/null @@ -1,458 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.examples.failover; - -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveRow; -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.DriverTimeoutException; -import com.datastax.oss.driver.api.core.NoNodeAvailableException; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.config.OptionsMap; -import com.datastax.oss.driver.api.core.config.TypedDriverOption; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.servererrors.CoordinatorException; -import com.datastax.oss.driver.api.core.servererrors.QueryConsistencyException; -import com.datastax.oss.driver.api.core.servererrors.UnavailableException; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import java.util.Collections; -import java.util.List; -import java.util.Map.Entry; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.ExecutionException; -import reactor.core.publisher.Flux; - -/** - * This example illustrates how to implement a cross-datacenter failover strategy from application - * code. - * - *

    Starting with driver 4.10, cross-datacenter failover is also provided as a configuration - * option for built-in load balancing policies. See Load - * balancing in the manual. - * - *

    This example demonstrates how to achieve the same effect in application code, which confers - * more fained-grained control over which statements should be retried and where. - * - *

    The logic that decides whether or not a cross-DC failover should be attempted is presented in - * the {@link #shouldFailover(DriverException)} method below; study it carefully and adapt it to - * your needs if necessary. - * - *

    The actual request execution and failover code is presented in 3 different programming styles: - * - *

      - *
    1. Synchronous: see the {@link #writeSync()} method below; - *
    2. Asynchronous: see the {@link #writeAsync()} method below; - *
    3. Reactive (using Reactor): see the {@link - * #writeReactive()} method below. - *
    - * - * The 3 styles are identical in terms of failover effect; they are all included merely to help - * programmers pick the variant that is closest to the style they use. - * - *

    Preconditions: - * - *

      - *
    • An Apache Cassandra(R) cluster with two datacenters, dc1 and dc2, containing at least 3 - * nodes in each datacenter, is running and accessible through the contact point: - * 127.0.0.1:9042. - *
    - * - *

    Side effects: - * - *

      - *
    1. Creates a new keyspace {@code failover} in the cluster, with replication factor 3 in both - * datacenters. If a keyspace with this name already exists, it will be reused; - *
    2. Creates a new table {@code failover.orders}. If a table with that name exists already, it - * will be reused; - *
    3. Tries to write a row in the table using the local datacenter dc1; - *
    4. If the local datacenter dc1 is down, retries the write in the remote datacenter dc2. - *
    - * - * @see Java Driver online - * manual - */ -public class CrossDatacenterFailover { - - public static void main(String[] args) throws Exception { - - CrossDatacenterFailover client = new CrossDatacenterFailover(); - - try { - - // Note: when this example is executed, at least the local DC must be available - // since the driver will try to reach contact points in that DC. - - client.connect(); - client.createSchema(); - - // To fully exercise this example, try to stop the entire dc1 here; then observe how - // the writes executed below will first fail in dc1, then be diverted to dc2, where they will - // succeed. - - client.writeSync(); - client.writeAsync(); - client.writeReactive(); - - } finally { - client.close(); - } - } - - private CqlSession session; - - private CrossDatacenterFailover() {} - - /** Initiates a connection to the cluster. */ - private void connect() { - - // For simplicity, this example uses a 100% in-memory configuration loader, but the same - // configuration can be achieved with the more traditional file-based approach. - // Simply put the below snippet in your application.conf file to get the same config: - - /* - datastax-java-driver { - basic.contact-points = [ "127.0.0.1:9042" ] - basic.load-balancing-policy.local-datacenter = "dc1" - basic.request.consistency = LOCAL_QUORUM - profiles { - remote { - basic.load-balancing-policy.local-datacenter = "dc2" - basic.request.consistency = LOCAL_ONE - } - } - } - */ - - OptionsMap options = OptionsMap.driverDefaults(); - // set the datacenter to dc1 in the default profile; this makes dc1 the local datacenter - options.put(TypedDriverOption.LOAD_BALANCING_LOCAL_DATACENTER, "dc1"); - // set the datacenter to dc2 in the "remote" profile - options.put("remote", TypedDriverOption.LOAD_BALANCING_LOCAL_DATACENTER, "dc2"); - // make sure to provide a contact point belonging to dc1, not dc2! - options.put(TypedDriverOption.CONTACT_POINTS, Collections.singletonList("127.0.0.1:9042")); - // in this example, the default consistency level is LOCAL_QUORUM - options.put(TypedDriverOption.REQUEST_CONSISTENCY, "LOCAL_QUORUM"); - // but when failing over, the consistency level will be automatically downgraded to LOCAL_ONE - options.put("remote", TypedDriverOption.REQUEST_CONSISTENCY, "LOCAL_ONE"); - - session = CqlSession.builder().withConfigLoader(DriverConfigLoader.fromMap(options)).build(); - - System.out.println("Connected to cluster with session: " + session.getName()); - } - - /** Creates the schema (keyspace) and table for this example. */ - private void createSchema() { - - session.execute( - "CREATE KEYSPACE IF NOT EXISTS failover WITH replication " - + "= {'class':'NetworkTopologyStrategy', 'dc1':3, 'dc2':3}"); - - session.execute( - "CREATE TABLE IF NOT EXISTS failover.orders (" - + "product_id uuid," - + "timestamp timestamp," - + "price double," - + "PRIMARY KEY (product_id,timestamp)" - + ")"); - } - - /** Inserts data synchronously using the local DC, retrying if necessary in a remote DC. */ - private void writeSync() { - - System.out.println("------- DC failover (sync) ------- "); - - Statement statement = - SimpleStatement.newInstance( - "INSERT INTO failover.orders " - + "(product_id, timestamp, price) " - + "VALUES (" - + "756716f7-2e54-4715-9f00-91dcbea6cf50," - + "'2018-02-26T13:53:46.345+01:00'," - + "2.34)"); - - try { - - // try the statement using the default profile, which targets the local datacenter dc1. - session.execute(statement); - - System.out.println("Write succeeded"); - - } catch (DriverException e) { - - if (shouldFailover(e)) { - - System.out.println("Write failed in local DC, retrying in remote DC"); - - try { - - // try the statement using the remote profile, which targets the remote datacenter dc2. - session.execute(statement.setExecutionProfileName("remote")); - - System.out.println("Write succeeded"); - - } catch (DriverException e2) { - - System.out.println("Write failed in remote DC"); - - e2.printStackTrace(); - } - } - } - // let other errors propagate - } - - /** Inserts data asynchronously using the local DC, retrying if necessary in a remote DC. */ - private void writeAsync() throws ExecutionException, InterruptedException { - - System.out.println("------- DC failover (async) ------- "); - - Statement statement = - SimpleStatement.newInstance( - "INSERT INTO failover.orders " - + "(product_id, timestamp, price) " - + "VALUES (" - + "756716f7-2e54-4715-9f00-91dcbea6cf50," - + "'2018-02-26T13:53:46.345+01:00'," - + "2.34)"); - - CompletionStage result = - // try the statement using the default profile, which targets the local datacenter dc1. - session - .executeAsync(statement) - .handle( - (rs, error) -> { - if (error == null) { - return CompletableFuture.completedFuture(rs); - } else { - if (error instanceof DriverException - && shouldFailover((DriverException) error)) { - System.out.println("Write failed in local DC, retrying in remote DC"); - // try the statement using the remote profile, which targets the remote - // datacenter dc2. - return session.executeAsync(statement.setExecutionProfileName("remote")); - } - // let other errors propagate - return CompletableFutures.failedFuture(error); - } - }) - // unwrap (flatmap) the nested future - .thenCompose(future -> future) - .whenComplete( - (rs, error) -> { - if (error == null) { - System.out.println("Write succeeded"); - } else { - System.out.println("Write failed in remote DC"); - error.printStackTrace(); - } - }); - - // for the sake of this example, wait for the operation to finish - result.toCompletableFuture().get(); - } - - /** Inserts data reactively using the local DC, retrying if necessary in a remote DC. */ - private void writeReactive() { - - System.out.println("------- DC failover (reactive) ------- "); - - Statement statement = - SimpleStatement.newInstance( - "INSERT INTO failover.orders " - + "(product_id, timestamp, price) " - + "VALUES (" - + "756716f7-2e54-4715-9f00-91dcbea6cf50," - + "'2018-02-26T13:53:46.345+01:00'," - + "2.34)"); - - Flux result = - // try the statement using the default profile, which targets the local datacenter dc1. - Flux.from(session.executeReactive(statement)) - .onErrorResume( - DriverException.class, - error -> { - if (shouldFailover(error)) { - System.out.println("Write failed in local DC, retrying in remote DC"); - // try the statement using the remote profile, which targets the remote - // datacenter dc2. - return session.executeReactive(statement.setExecutionProfileName("remote")); - } else { - return Flux.error(error); - } - }) - .doOnComplete(() -> System.out.println("Write succeeded")) - .doOnError( - error -> { - System.out.println("Write failed"); - error.printStackTrace(); - }); - - // for the sake of this example, wait for the operation to finish - result.blockLast(); - } - - /** - * Analyzes the error and decides whether to failover to a remote DC. - * - *

    The logic below categorizes driver exceptions in four main groups: - * - *

      - *
    1. Total DC outage: all nodes in DC were known to be down when the request was executed; - *
    2. Partial DC outage: one or many nodes responded, but reported a replica availability - * problem; - *
    3. DC unreachable: one or many nodes were queried, but none responded (timeout); - *
    4. Other errors. - *
    - * - * A DC failover is authorized for the first three groups above: total DC outage, partial DC - * outage, and DC unreachable. - * - *

    This logic is provided as a good starting point for users to create their own DC failover - * strategy; please adjust it to your exact needs. - */ - private boolean shouldFailover(DriverException mainException) { - - if (mainException instanceof NoNodeAvailableException) { - - // No node could be tried, because all nodes in the query plan were down. This could be a - // total DC outage, so trying another DC makes sense. - System.out.println("All nodes were down in this datacenter, failing over"); - return true; - - } else if (mainException instanceof AllNodesFailedException) { - - // Many nodes were tried (as decided by the retry policy), but all failed. This could be a - // partial DC outage: some nodes were up, but the replicas were down. - - boolean failover = false; - - // Inspect the error to find out how many coordinators were tried, and which errors they - // returned. - for (Entry> entry : - ((AllNodesFailedException) mainException).getAllErrors().entrySet()) { - - Node coordinator = entry.getKey(); - List errors = entry.getValue(); - - System.out.printf( - "Node %s in DC %s was tried %d times but failed with:%n", - coordinator.getEndPoint(), coordinator.getDatacenter(), errors.size()); - - for (Throwable nodeException : errors) { - - System.out.printf("\t- %s%n", nodeException); - - // If the error was a replica availability error, then we know that some replicas were - // down in this DC. Retrying in another DC could solve the problem. Other errors don't - // necessarily mean that the DC is unavailable, so we ignore them. - if (isReplicaAvailabilityError(nodeException)) { - failover = true; - } - } - } - - // Authorize the failover if at least one of the coordinators reported a replica availability - // error that could be solved by trying another DC. - if (failover) { - System.out.println( - "Some nodes tried in this DC reported a replica availability error, failing over"); - } else { - System.out.println("All nodes tried in this DC failed unexpectedly, not failing over"); - } - return failover; - - } else if (mainException instanceof DriverTimeoutException) { - - // One or many nodes were tried, but none replied in a timely manner, and the timeout defined - // by the option `datastax-java-driver.basic.request.timeout` was triggered. - // This could be a DC outage as well, or a network partition issue, so trying another DC may - // make sense. - // Note about SLAs: if your application needs to comply with SLAs, and the maximum acceptable - // latency for a request is equal or very close to the request timeout, beware that failing - // over to a different datacenter here could potentially break your SLA. - - System.out.println( - "No node in this DC replied before the timeout was triggered, failing over"); - return true; - - } else if (mainException instanceof CoordinatorException) { - - // Only one node was tried, and it failed (and the retry policy did not tell the driver to - // retry this request, but rather to surface the error immediately). This is rather unusual - // as the driver's default retry policy retries most of these errors, but some custom retry - // policies could decide otherwise. So we apply the same logic as above: if the error is a - // replica availability error, we authorize the failover. - - Node coordinator = ((CoordinatorException) mainException).getCoordinator(); - System.out.printf( - "Node %s in DC %s was tried once but failed with: %s%n", - coordinator.getEndPoint(), coordinator.getDatacenter(), mainException); - - boolean failover = isReplicaAvailabilityError(mainException); - if (failover) { - System.out.println( - "The only node tried in this DC reported a replica availability error, failing over"); - } else { - System.out.println("The only node tried in this DC failed unexpectedly, not failing over"); - } - return failover; - - } else { - - // The request failed with a rather unusual error. This generally indicates a more serious - // issue, since the retry policy decided to surface the error immediately. Trying another DC - // is probably a bad idea. - System.out.println("The request failed unexpectedly, not failing over: " + mainException); - return false; - } - } - - /** - * Whether the given error is a replica availability error. - * - *

    A replica availability error means that the initial consistency level could not be met - * because not enough replicas were alive. - * - *

    When this error happens, it can be worth failing over to a remote DC, as long as at - * least one of the following conditions apply: - * - *

      - *
    1. if the initial consistency level was DC-local, trying another DC may succeed; - *
    2. if the initial consistency level can be downgraded, then retrying again may succeed (in - * the same DC, or in another one). - *
    - * - * In this example both conditions above apply, so we authorize the failover whenever we detect a - * replica availability error. - */ - private boolean isReplicaAvailabilityError(Throwable t) { - return t instanceof UnavailableException || t instanceof QueryConsistencyException; - } - - private void close() { - if (session != null) { - session.close(); - } - } -} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/json/PlainTextJson.java b/examples/src/main/java/com/datastax/oss/driver/examples/json/PlainTextJson.java deleted file mode 100644 index 6bb2f8a9fdd..00000000000 --- a/examples/src/main/java/com/datastax/oss/driver/examples/json/PlainTextJson.java +++ /dev/null @@ -1,197 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.examples.json; - -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.bindMarker; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.function; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.insertInto; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.literal; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.selectFrom; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.querybuilder.select.Selector; - -/** - * Illustrates basic JSON support with plain JSON strings. For more advanced examples using complex - * objects and custom codecs, refer to the other examples in this package. - * - *

    Preconditions: - * - *

      - *
    • An Apache Cassandra(R) cluster is running and accessible through the contacts points - * identified by basic.contact-points (see application.conf). - *
    - * - *

    Side effects: - * - *

      - *
    • creates a new keyspace "examples" in the cluster. If a keyspace with this name already - * exists, it will be reused; - *
    • creates a table "examples.querybuilder_json". If it already exists, it will be reused; - *
    • inserts data in the table. - *
    - * - * @see What’s - * New in Cassandra 2.2: JSON Support - */ -public class PlainTextJson { - - public static void main(String[] args) { - - try (CqlSession session = CqlSession.builder().build()) { - createSchema(session); - - insertWithCoreApi(session); - selectWithCoreApi(session); - - insertWithQueryBuilder(session); - selectWithQueryBuilder(session); - } - } - - private static void createSchema(CqlSession session) { - session.execute( - "CREATE KEYSPACE IF NOT EXISTS examples " - + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); - session.execute( - "CREATE TABLE IF NOT EXISTS examples.querybuilder_json(" - + "id int PRIMARY KEY, name text, specs map)"); - } - - /** Demonstrates data insertion with the "core" API, i.e. providing the full query strings. */ - private static void insertWithCoreApi(CqlSession session) { - // Bind in a simple statement: - session.execute( - SimpleStatement.newInstance( - "INSERT INTO examples.querybuilder_json JSON ?", - "{ \"id\": 1, \"name\": \"Mouse\", \"specs\": { \"color\": \"silver\" } }")); - - // Bind in a prepared statement: - // (subsequent calls to the prepare() method will return cached statement) - PreparedStatement pst = session.prepare("INSERT INTO examples.querybuilder_json JSON :payload"); - session.execute( - pst.bind() - .setString( - "payload", - "{ \"id\": 2, \"name\": \"Keyboard\", \"specs\": { \"layout\": \"qwerty\" } }")); - - // fromJson lets you provide individual columns as JSON: - session.execute( - SimpleStatement.newInstance( - "INSERT INTO examples.querybuilder_json " - + "(id, name, specs) VALUES (?, ?, fromJson(?))", - 3, - "Screen", - "{ \"size\": \"24-inch\" }")); - } - - /** Demonstrates data retrieval with the "core" API, i.e. providing the full query strings. */ - private static void selectWithCoreApi(CqlSession session) { - // Reading the whole row as a JSON object: - Row row = - session - .execute( - SimpleStatement.newInstance( - "SELECT JSON * FROM examples.querybuilder_json WHERE id = ?", 1)) - .one(); - assert row != null; - System.out.printf("Entry #1 as JSON: %s%n", row.getString("[json]")); - - // Extracting a particular column as JSON: - row = - session - .execute( - SimpleStatement.newInstance( - "SELECT id, toJson(specs) AS json_specs FROM examples.querybuilder_json WHERE id = ?", - 2)) - .one(); - assert row != null; - System.out.printf( - "Entry #%d's specs as JSON: %s%n", row.getInt("id"), row.getString("json_specs")); - } - - /** - * Same as {@link #insertWithCoreApi(CqlSession)}, but using {@link - * com.datastax.oss.driver.api.querybuilder.QueryBuilder} to construct the queries. - */ - private static void insertWithQueryBuilder(CqlSession session) { - // Simple statement: - Statement stmt = - insertInto("examples", "querybuilder_json") - .json("{ \"id\": 1, \"name\": \"Mouse\", \"specs\": { \"color\": \"silver\" } }") - .build(); - session.execute(stmt); - - // Prepare and bind: - PreparedStatement pst = - session.prepare( - insertInto("examples", "querybuilder_json").json(bindMarker("payload")).build()); - session.execute( - pst.bind() - .setString( - "payload", - "{ \"id\": 2, \"name\": \"Keyboard\", \"specs\": { \"layout\": \"qwerty\" } }")); - - // fromJson on a single column: - stmt = - insertInto("examples", "querybuilder_json") - .value("id", literal(3)) - .value("name", literal("Screen")) - .value("specs", function("fromJson", literal("{ \"size\": \"24-inch\" }"))) - .build(); - session.execute(stmt); - } - - /** - * Same as {@link #selectWithCoreApi(CqlSession)}, but using {@link - * com.datastax.oss.driver.api.querybuilder.QueryBuilder} to construct the queries. - */ - private static void selectWithQueryBuilder(CqlSession session) { - // Reading the whole row as a JSON object: - Statement stmt = - selectFrom("examples", "querybuilder_json") - .json() - .all() - .whereColumn("id") - .isEqualTo(literal(1)) - .build(); - Row row = session.execute(stmt).one(); - assert row != null; - System.out.printf("Entry #1 as JSON: %s%n", row.getString("[json]")); - - // Extracting a particular column as JSON: - stmt = - selectFrom("examples", "querybuilder_json") - .column("id") - .function("toJson", Selector.column("specs")) - .as("json_specs") - .whereColumn("id") - .isEqualTo(literal(2)) - .build(); - - row = session.execute(stmt).one(); - assert row != null; - - System.out.printf( - "Entry #%d's specs as JSON: %s%n", row.getInt("id"), row.getString("json_specs")); - } -} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/json/jackson/JacksonJsonColumn.java b/examples/src/main/java/com/datastax/oss/driver/examples/json/jackson/JacksonJsonColumn.java deleted file mode 100644 index 0d1ed61d2f4..00000000000 --- a/examples/src/main/java/com/datastax/oss/driver/examples/json/jackson/JacksonJsonColumn.java +++ /dev/null @@ -1,172 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.examples.json.jackson; - -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.bindMarker; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.insertInto; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.literal; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.selectFrom; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.examples.json.PlainTextJson; -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; - -/** - * Illustrates how to map a single table column of type {@code VARCHAR}, containing JSON payloads, - * into a Java object using the Jackson library. - * - *

    This example makes usage of a {@linkplain ExtraTypeCodecs#json(Class) custom codec for JSON}. - * If you plan to follow this example, make sure to include the following Maven dependencies in your - * project: - * - *

    {@code
    - * 
    - *   com.fasterxml.jackson.core
    - *   jackson-databind
    - *   2.9.8
    - * 
    - * }
    - * - *

    This example also uses the {@link com.datastax.oss.driver.api.querybuilder.QueryBuilder - * QueryBuilder}; for examples using the "core" API, see {@link PlainTextJson} (they are easily - * translatable to the queries in this class). - * - *

    Preconditions: - * - *

      - *
    • An Apache Cassandra(R) cluster is running and accessible through the contacts points - * identified by basic.contact-points (see application.conf). - *
    - * - *

    Side effects: - * - *

      - *
    • creates a new keyspace "examples" in the cluster. If a keyspace with this name already - * exists, it will be reused; - *
    • creates a table "examples.json_jackson_column". If it already exists, it will be reused; - *
    • inserts data in the table. - *
    - */ -public class JacksonJsonColumn { - - // A codec to convert JSON payloads into User instances; - private static final TypeCodec USER_CODEC = ExtraTypeCodecs.json(User.class); - - public static void main(String[] args) { - try (CqlSession session = CqlSession.builder().addTypeCodecs(USER_CODEC).build()) { - createSchema(session); - insertJsonColumn(session); - selectJsonColumn(session); - } - } - - private static void createSchema(CqlSession session) { - session.execute( - "CREATE KEYSPACE IF NOT EXISTS examples " - + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); - session.execute( - "CREATE TABLE IF NOT EXISTS examples.json_jackson_column(" - + "id int PRIMARY KEY, json text)"); - } - - // Mapping a User instance to a table column - private static void insertJsonColumn(CqlSession session) { - - User alice = new User("alice", 30); - User bob = new User("bob", 35); - - // Build and execute a simple statement - - Statement stmt = - insertInto("examples", "json_jackson_column") - .value("id", literal(1)) - // the User object will be converted into a String and persisted into the VARCHAR column - // "json" - .value("json", literal(alice, session.getContext().getCodecRegistry())) - .build(); - session.execute(stmt); - - // The JSON object can be a bound value if the statement is prepared - // (subsequent calls to the prepare() method will return cached statement) - PreparedStatement pst = - session.prepare( - insertInto("examples", "json_jackson_column") - .value("id", bindMarker("id")) - .value("json", bindMarker("json")) - .build()); - session.execute(pst.bind().setInt("id", 2).set("json", bob, User.class)); - } - - // Retrieving User instances from a table column - private static void selectJsonColumn(CqlSession session) { - - Statement stmt = - selectFrom("examples", "json_jackson_column") - .all() - .whereColumn("id") - .in(literal(1), literal(2)) - .build(); - - ResultSet rows = session.execute(stmt); - - for (Row row : rows) { - int id = row.getInt("id"); - // retrieve the JSON payload and convert it to a User instance - User user = row.get("json", User.class); - // it is also possible to retrieve the raw JSON payload - String json = row.getString("json"); - System.out.printf( - "Retrieved row:%n id %d%n user %s%n user (raw) %s%n%n", - id, user, json); - } - } - - @SuppressWarnings("unused") - public static class User { - - private final String name; - - private final int age; - - @JsonCreator - public User(@JsonProperty("name") String name, @JsonProperty("age") int age) { - this.name = name; - this.age = age; - } - - public String getName() { - return name; - } - - public int getAge() { - return age; - } - - @Override - public String toString() { - return String.format("%s (%s)", name, age); - } - } -} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/json/jackson/JacksonJsonFunction.java b/examples/src/main/java/com/datastax/oss/driver/examples/json/jackson/JacksonJsonFunction.java deleted file mode 100644 index b3c2c6aaa95..00000000000 --- a/examples/src/main/java/com/datastax/oss/driver/examples/json/jackson/JacksonJsonFunction.java +++ /dev/null @@ -1,231 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.examples.json.jackson; - -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.bindMarker; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.function; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.insertInto; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.literal; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.selectFrom; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.querybuilder.select.Selector; -import com.datastax.oss.driver.examples.json.PlainTextJson; -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; -import com.fasterxml.jackson.databind.JsonNode; -import com.fasterxml.jackson.databind.node.JsonNodeFactory; -import com.fasterxml.jackson.databind.node.ObjectNode; - -/** - * Illustrates how to map a single table column of an arbitrary type to a Java object using the Jackson library, and leveraging the {@code - * toJson()} and {@code fromJson()} functions introduced in Cassandra 2.2. - * - *

    This example makes usage of a {@linkplain ExtraTypeCodecs#json(Class) custom codec for JSON}. - * If you plan to follow this example, make sure to include the following Maven dependencies in your - * project: - * - *

    {@code
    - * 
    - *   com.fasterxml.jackson.core
    - *   jackson-databind
    - *   2.9.8
    - * 
    - * }
    - * - * This example also uses the {@link com.datastax.oss.driver.api.querybuilder.QueryBuilder - * QueryBuilder}; for examples using the "core" API, see {@link PlainTextJson} (they are easily - * translatable to the queries in this class). - * - *

    Preconditions: - * - *

      - *
    • An Apache Cassandra(R) cluster is running and accessible through the contacts points - * identified by basic.contact-points (see application.conf). - *
    - * - *

    Side effects: - * - *

      - *
    • creates a new keyspace "examples" in the cluster. If a keyspace with this name already - * exists, it will be reused; - *
    • creates a user-defined type (UDT) "examples.json_jackson_function_user". If it already - * exists, it will be reused; - *
    • creates a table "examples.json_jackson_function". If it already exists, it will be reused; - *
    • inserts data in the table. - *
    - * - * @see What’s - * New in Cassandra 2.2: JSON Support - */ -public class JacksonJsonFunction { - - // A codec to convert JSON payloads into User instances; - private static final TypeCodec USER_CODEC = ExtraTypeCodecs.json(User.class); - - // A codec to convert generic JSON payloads into JsonNode instances - private static final TypeCodec JSON_NODE_CODEC = ExtraTypeCodecs.json(JsonNode.class); - - public static void main(String[] args) { - try (CqlSession session = - CqlSession.builder().addTypeCodecs(USER_CODEC, JSON_NODE_CODEC).build()) { - createSchema(session); - insertFromJson(session); - selectToJson(session); - } - } - - private static void createSchema(CqlSession session) { - session.execute( - "CREATE KEYSPACE IF NOT EXISTS examples " - + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); - session.execute( - "CREATE TYPE IF NOT EXISTS examples.json_jackson_function_user(" + "name text, age int)"); - session.execute( - "CREATE TABLE IF NOT EXISTS examples.json_jackson_function(" - + "id int PRIMARY KEY, user frozen, scores map)"); - } - - // Mapping JSON payloads to table columns of arbitrary types, - // using fromJson() function - private static void insertFromJson(CqlSession session) { - - User alice = new User("alice", 30); - User bob = new User("bob", 35); - - ObjectNode aliceScores = - JsonNodeFactory.instance.objectNode().put("call_of_duty", 4.8).put("pokemon_go", 9.7); - ObjectNode bobScores = - JsonNodeFactory.instance.objectNode().put("zelda", 8.3).put("pokemon_go", 12.4); - - // Build and execute a simple statement - Statement stmt = - insertInto("examples", "json_jackson_function") - .value("id", literal(1)) - // client-side, the User object will be converted into a JSON String; - // then, server-side, the fromJson() function will convert that JSON string - // into an instance of the json_jackson_function_user user-defined type (UDT), - // which will be persisted into the column "user" - .value( - "user", - function("fromJson", literal(alice, session.getContext().getCodecRegistry()))) - // same thing, but this time converting from - // a generic JsonNode to a JSON string, then from this string to a map - .value( - "scores", - function("fromJson", literal(aliceScores, session.getContext().getCodecRegistry()))) - .build(); - System.out.println(((SimpleStatement) stmt).getQuery()); - session.execute(stmt); - System.out.println("after"); - - // The JSON object can be a bound value if the statement is prepared - // (subsequent calls to the prepare() method will return cached statement) - PreparedStatement pst = - session.prepare( - insertInto("examples", "json_jackson_function") - .value("id", bindMarker("id")) - .value("user", function("fromJson", bindMarker("user"))) - .value("scores", function("fromJson", bindMarker("scores"))) - .build()); - System.out.println(pst.getQuery()); - session.execute( - pst.bind() - .setInt("id", 2) - .set("user", bob, User.class) - // note that the codec requires that the type passed to the set() method - // be always JsonNode, and not a subclass of it, such as ObjectNode - .set("scores", bobScores, JsonNode.class)); - } - - // Retrieving JSON payloads from table columns of arbitrary types, - // using toJson() function - private static void selectToJson(CqlSession session) { - - Statement stmt = - selectFrom("examples", "json_jackson_function") - .column("id") - .function("toJson", Selector.column("user")) - .as("user") - .function("toJson", Selector.column("scores")) - .as("scores") - .whereColumn("id") - .in(literal(1), literal(2)) - .build(); - System.out.println(((SimpleStatement) stmt).getQuery()); - - ResultSet rows = session.execute(stmt); - - for (Row row : rows) { - int id = row.getInt("id"); - // retrieve the JSON payload and convert it to a User instance - User user = row.get("user", User.class); - // it is also possible to retrieve the raw JSON payload - String userJson = row.getString("user"); - // retrieve the JSON payload and convert it to a JsonNode instance - // note that the codec requires that the type passed to the get() method - // be always JsonNode, and not a subclass of it, such as ObjectNode - JsonNode scores = row.get("scores", JsonNode.class); - // it is also possible to retrieve the raw JSON payload - String scoresJson = row.getString("scores"); - System.out.printf( - "Retrieved row:%n" - + "id %d%n" - + "user %s%n" - + "user (raw) %s%n" - + "scores %s%n" - + "scores (raw) %s%n%n", - id, user, userJson, scores, scoresJson); - } - } - - @SuppressWarnings("unused") - public static class User { - - private final String name; - - private final int age; - - @JsonCreator - public User(@JsonProperty("name") String name, @JsonProperty("age") int age) { - this.name = name; - this.age = age; - } - - public String getName() { - return name; - } - - public int getAge() { - return age; - } - - @Override - public String toString() { - return String.format("%s (%s)", name, age); - } - } -} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/json/jackson/JacksonJsonRow.java b/examples/src/main/java/com/datastax/oss/driver/examples/json/jackson/JacksonJsonRow.java deleted file mode 100644 index 1a5fed0bbf3..00000000000 --- a/examples/src/main/java/com/datastax/oss/driver/examples/json/jackson/JacksonJsonRow.java +++ /dev/null @@ -1,172 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.examples.json.jackson; - -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.bindMarker; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.insertInto; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.literal; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.selectFrom; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.examples.json.PlainTextJson; -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; - -/** - * Illustrates how to map an entire table row to a Java object using the Jackson library, and leveraging the {@code - * SELECT JSON} and {@code INSERT JSON} syntaxes introduced in Cassandra 2.2. - * - *

    This example makes usage of a {@linkplain ExtraTypeCodecs#json(Class) custom codec for JSON}. - * If you plan to follow this example, make sure to include the following Maven dependencies in your - * project: - * - *

    {@code
    - * 
    - *   com.fasterxml.jackson.core
    - *   jackson-databind
    - *   2.9.8
    - * 
    - * }
    - * - * This example also uses the {@link com.datastax.oss.driver.api.querybuilder.QueryBuilder - * QueryBuilder}; for examples using the "core" API, see {@link PlainTextJson} (they are easily - * translatable to the queries in this class). - * - *

    Preconditions: - * - *

      - *
    • An Apache Cassandra(R) cluster is running and accessible through the contacts points - * identified by basic.contact-points (see application.conf). - *
    - * - *

    Side effects: - * - *

      - *
    • creates a new keyspace "examples" in the cluster. If a keyspace with this name already - * exists, it will be reused; - *
    • creates a table "examples.json_jackson_row". If it already exists, it will be reused; - *
    • inserts data in the table. - *
    - * - * @see What’s - * New in Cassandra 2.2: JSON Support - */ -public class JacksonJsonRow { - // A codec to convert JSON payloads into User instances; - private static final TypeCodec USER_CODEC = ExtraTypeCodecs.json(User.class); - - public static void main(String[] args) { - try (CqlSession session = CqlSession.builder().addTypeCodecs(USER_CODEC).build()) { - createSchema(session); - insertJsonRow(session); - selectJsonRow(session); - } - } - - private static void createSchema(CqlSession session) { - session.execute( - "CREATE KEYSPACE IF NOT EXISTS examples " - + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); - session.execute( - "CREATE TABLE IF NOT EXISTS examples.json_jackson_row(" - + "id int PRIMARY KEY, name text, age int)"); - } - - // Mapping a User instance to a table row using INSERT JSON - private static void insertJsonRow(CqlSession session) { - // Build and execute a simple statement - Statement stmt = - insertInto("examples", "json_jackson_row") - .json(new User(1, "alice", 30), session.getContext().getCodecRegistry()) - .build(); - session.execute(stmt); - - // The JSON object can be a bound value if the statement is prepared - // (subsequent calls to the prepare() method will return cached statement) - PreparedStatement pst = - session.prepare( - insertInto("examples", "json_jackson_row").json(bindMarker("user")).build()); - session.execute(pst.bind().set("user", new User(2, "bob", 35), User.class)); - } - - // Retrieving User instances from table rows using SELECT JSON - private static void selectJsonRow(CqlSession session) { - - // Reading the whole row as a JSON object - Statement stmt = - selectFrom("examples", "json_jackson_row") - .json() - .all() - .whereColumn("id") - .in(literal(1), literal(2)) - .build(); - - ResultSet rows = session.execute(stmt); - - for (Row row : rows) { - // SELECT JSON returns only one column for each row, of type VARCHAR, - // containing the row as a JSON payload - User user = row.get(0, User.class); - System.out.printf("Retrieved user: %s%n", user); - } - } - - @SuppressWarnings("unused") - public static class User { - - private final int id; - - private final String name; - - private final int age; - - @JsonCreator - public User( - @JsonProperty("id") int id, - @JsonProperty("name") String name, - @JsonProperty("age") int age) { - this.id = id; - this.name = name; - this.age = age; - } - - public int getId() { - return id; - } - - public String getName() { - return name; - } - - public int getAge() { - return age; - } - - @Override - public String toString() { - return String.format("%s (id %d, age %d)", name, id, age); - } - } -} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/json/jsr/Jsr353JsonCodec.java b/examples/src/main/java/com/datastax/oss/driver/examples/json/jsr/Jsr353JsonCodec.java deleted file mode 100644 index 9b30d5d6c6b..00000000000 --- a/examples/src/main/java/com/datastax/oss/driver/examples/json/jsr/Jsr353JsonCodec.java +++ /dev/null @@ -1,192 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.examples.json.jsr; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.util.Strings; -import com.datastax.oss.protocol.internal.util.Bytes; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.io.StringReader; -import java.io.StringWriter; -import java.nio.ByteBuffer; -import java.util.Map; -import javax.json.Json; -import javax.json.JsonArray; -import javax.json.JsonException; -import javax.json.JsonObject; -import javax.json.JsonReader; -import javax.json.JsonReaderFactory; -import javax.json.JsonStructure; -import javax.json.JsonWriter; -import javax.json.JsonWriterFactory; - -/** - * A JSON codec that uses the Java API for JSON - * processing to perform serialization and deserialization of JSON structures. - * - *

    More specifically, this codec maps an arbitrary {@link JsonStructure} to a CQL {@code varchar} - * column. - * - *

    This codec handles the Java type {@link JsonStructure}. It is therefore required that values - * are set and retrieved using that exact Java type; users should manually downcast to either {@link - * JsonObject} or {@link JsonArray}, as in the example below: - * - *

    {@code
    - * // setting values
    - * JsonObject myObject = ...
    - * PreparedStatement ps = ...
    - * // set values using JsonStructure as target Java type
    - * BoundStatement bs = ps.bind().set(1, myObject, JsonStructure.class);
    - *
    - * // retrieving values
    - * Row row = session.execute(bs).one();
    - * // use JsonStructure as target Java type to retrieve values
    - * JsonStructure json = row.get(0, JsonStructure.class);
    - * if (json instanceof JsonObject) {
    - *     myObject = (JsonObject) json;
    - *     ...
    - * }
    - * }
    - * - *

    Note that at runtime, this codec requires the presence of both JSR-353 API and a - * JSR-353-compatible runtime library, such as JSR-353's reference implementation. If you use - * Maven, this can be done by declaring the following dependencies in your project: - * - *

    {@code
    - * 
    - *   javax.json
    - *   javax.json-api
    - *   1.0
    - * 
    - *
    - * 
    - *   org.glassfish
    - *   javax.json
    - *   1.1.4
    - * 
    - * }
    - */ -public class Jsr353JsonCodec implements TypeCodec { - - private final JsonReaderFactory readerFactory; - - private final JsonWriterFactory writerFactory; - - /** Creates a new instance using a default configuration. */ - public Jsr353JsonCodec() { - this(null); - } - - /** - * Creates a new instance using the provided configuration. - * - * @param config A map of provider-specific configuration properties. May be empty or {@code - * null}. - */ - public Jsr353JsonCodec(@Nullable Map config) { - readerFactory = Json.createReaderFactory(config); - writerFactory = Json.createWriterFactory(config); - } - - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.of(JsonStructure.class); - } - - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.TEXT; - } - - @Nullable - @Override - public ByteBuffer encode( - @Nullable JsonStructure value, @NonNull ProtocolVersion protocolVersion) { - if (value == null) { - return null; - } - try (ByteArrayOutputStream baos = new ByteArrayOutputStream()) { - JsonWriter writer = writerFactory.createWriter(baos); - writer.write(value); - return ByteBuffer.wrap(baos.toByteArray()); - } catch (JsonException | IOException e) { - throw new IllegalArgumentException("Failed to encode value as JSON", e); - } - } - - @Nullable - @Override - public JsonStructure decode( - @Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null) { - return null; - } - try (ByteArrayInputStream bais = new ByteArrayInputStream(Bytes.getArray(bytes))) { - JsonReader reader = readerFactory.createReader(bais); - return reader.read(); - } catch (JsonException | IOException e) { - throw new IllegalArgumentException("Failed to decode JSON value", e); - } - } - - @NonNull - @Override - public String format(@Nullable JsonStructure value) { - if (value == null) { - return "NULL"; - } - String json; - try (StringWriter sw = new StringWriter()) { - JsonWriter writer = writerFactory.createWriter(sw); - writer.write(value); - json = sw.toString(); - } catch (JsonException | IOException e) { - throw new IllegalArgumentException("Failed to format value as JSON", e); - } - return Strings.quote(json); - } - - @Nullable - @Override - public JsonStructure parse(String value) { - if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) { - return null; - } - if (!Strings.isQuoted(value)) { - throw new IllegalArgumentException("JSON strings must be enclosed by single quotes"); - } - String json = Strings.unquote(value); - try (StringReader sr = new StringReader(json)) { - JsonReader reader = readerFactory.createReader(sr); - return reader.read(); - } catch (JsonException e) { - throw new IllegalArgumentException("Failed to parse value as JSON", e); - } - } -} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/json/jsr/Jsr353JsonColumn.java b/examples/src/main/java/com/datastax/oss/driver/examples/json/jsr/Jsr353JsonColumn.java deleted file mode 100644 index 9ded61f82e3..00000000000 --- a/examples/src/main/java/com/datastax/oss/driver/examples/json/jsr/Jsr353JsonColumn.java +++ /dev/null @@ -1,161 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.examples.json.jsr; - -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.bindMarker; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.insertInto; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.literal; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.selectFrom; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.examples.json.PlainTextJson; -import javax.json.Json; -import javax.json.JsonObject; -import javax.json.JsonStructure; - -/** - * Illustrates how to map a single table column of type {@code VARCHAR}, containing JSON payloads, - * into a Java object using the Java API for JSON - * processing. - * - *

    This example makes usage of a custom {@link TypeCodec codec}, {@link Jsr353JsonCodec}, which - * is declared in the java-driver-examples module. If you plan to follow this example, make sure to - * include the following Maven dependencies in your project: - * - *

    {@code
    - * 
    - *     javax.json
    - *     javax.json-api
    - *     1.0
    - * 
    - *
    - * 
    - *     org.glassfish
    - *     javax.json
    - *     1.1.4
    - *     runtime
    - * 
    - * }
    - * - * This example also uses the {@link com.datastax.oss.driver.api.querybuilder.QueryBuilder - * QueryBuilder}; for examples using the "core" API, see {@link PlainTextJson} (they are easily - * translatable to the queries in this class). - * - *

    Preconditions: - * - *

      - *
    • An Apache Cassandra(R) cluster is running and accessible through the contacts points - * identified by basic.contact-points (see application.conf). - *
    - * - *

    Side effects: - * - *

      - *
    • creates a new keyspace "examples" in the cluster. If a keyspace with this name already - * exists, it will be reused; - *
    • creates a table "examples.json_jsr353_column". If it already exists, it will be reused; - *
    • inserts data in the table. - *
    - */ -public class Jsr353JsonColumn { - - // A codec to convert JSON payloads into JsonObject instances; - private static final Jsr353JsonCodec USER_CODEC = new Jsr353JsonCodec(); - - public static void main(String[] args) { - try (CqlSession session = CqlSession.builder().addTypeCodecs(USER_CODEC).build()) { - createSchema(session); - insertJsonColumn(session); - selectJsonColumn(session); - } - } - - private static void createSchema(CqlSession session) { - session.execute( - "CREATE KEYSPACE IF NOT EXISTS examples " - + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); - session.execute( - "CREATE TABLE IF NOT EXISTS examples.json_jsr353_column(" - + "id int PRIMARY KEY, json text)"); - } - - // Mapping a JSON object to a table column - private static void insertJsonColumn(CqlSession session) { - - JsonObject alice = Json.createObjectBuilder().add("name", "alice").add("age", 30).build(); - - JsonObject bob = Json.createObjectBuilder().add("name", "bob").add("age", 35).build(); - - // Build and execute a simple statement - Statement stmt = - insertInto("examples", "json_jsr353_column") - .value("id", literal(1)) - // the JSON object will be converted into a String and persisted into the VARCHAR column - // "json" - .value("json", literal(alice, session.getContext().getCodecRegistry())) - .build(); - session.execute(stmt); - - // The JSON object can be a bound value if the statement is prepared - // (subsequent calls to the prepare() method will return cached statement) - PreparedStatement pst = - session.prepare( - insertInto("examples", "json_jsr353_column") - .value("id", bindMarker("id")) - .value("json", bindMarker("json")) - .build()); - session.execute( - pst.bind() - .setInt("id", 2) - // note that the codec requires that the type passed to the set() method - // be always JsonStructure, and not a subclass of it, such as JsonObject - .set("json", bob, JsonStructure.class)); - } - - // Retrieving JSON objects from a table column - private static void selectJsonColumn(CqlSession session) { - - Statement stmt = - selectFrom("examples", "json_jsr353_column") - .all() - .whereColumn("id") - .in(literal(1), literal(2)) - .build(); - - ResultSet rows = session.execute(stmt); - - for (Row row : rows) { - int id = row.getInt("id"); - // retrieve the JSON payload and convert it to a JsonObject instance - // note that the codec requires that the type passed to the get() method - // be always JsonStructure, and not a subclass of it, such as JsonObject, - // hence the need to downcast to JsonObject manually - JsonObject user = (JsonObject) row.get("json", JsonStructure.class); - // it is also possible to retrieve the raw JSON payload - String json = row.getString("json"); - System.out.printf( - "Retrieved row:%n id %d%n user %s%n user (raw) %s%n%n", - id, user, json); - } - } -} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/json/jsr/Jsr353JsonFunction.java b/examples/src/main/java/com/datastax/oss/driver/examples/json/jsr/Jsr353JsonFunction.java deleted file mode 100644 index 25b243eeb5d..00000000000 --- a/examples/src/main/java/com/datastax/oss/driver/examples/json/jsr/Jsr353JsonFunction.java +++ /dev/null @@ -1,203 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.examples.json.jsr; - -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.bindMarker; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.function; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.insertInto; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.literal; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.selectFrom; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.querybuilder.select.Selector; -import com.datastax.oss.driver.examples.json.PlainTextJson; -import javax.json.Json; -import javax.json.JsonObject; -import javax.json.JsonStructure; - -/** - * Illustrates how to map a single table column of an arbitrary type to a Java object using the Java API for JSON processing, and leveraging the - * {@code toJson()} and {@code fromJson()} functions introduced in Cassandra 2.2. - * - *

    This example makes usage of a custom {@link TypeCodec codec}, {@link Jsr353JsonCodec}, which - * is declared in the java-driver-examples module. If you plan to follow this example, make sure to - * include the following Maven dependencies in your project: - * - *

    {
    - * 
    - *     javax.json
    - *     javax.json-api
    - *     1.0
    - * 
    - *
    - * 
    - *     org.glassfish
    - *     javax.json
    - *     1.1.4
    - *     runtime
    - * 
    - * }
    - * - * This example also uses the {@link com.datastax.oss.driver.api.querybuilder.QueryBuilder - * QueryBuilder}; for examples using the "core" API, see {@link PlainTextJson} (they are easily - * translatable to the queries in this class). - * - *

    Preconditions: - * - *

      - *
    • An Apache Cassandra(R) cluster is running and accessible through the contacts points - * identified by basic.contact-points (see application.conf). - *
    - * - *

    Side effects: - * - *

      - *
    • creates a new keyspace "examples" in the cluster. If a keyspace with this name already - * exists, it will be reused; - *
    • creates a user-defined type (UDT) "examples.json_jsr353_function_user". If it already - * exists, it will be reused; - *
    • creates a table "examples.json_jsr353_function". If it already exists, it will be reused; - *
    • inserts data in the table. - *
    - * - * @see What’s - * New in Cassandra 2.2: JSON Support - */ -public class Jsr353JsonFunction { - - // A codec to convert JSON payloads into JsonObject instances; - private static final Jsr353JsonCodec USER_CODEC = new Jsr353JsonCodec(); - - public static void main(String[] args) { - try (CqlSession session = CqlSession.builder().addTypeCodecs(USER_CODEC).build()) { - - createSchema(session); - insertFromJson(session); - selectToJson(session); - } - } - - private static void createSchema(CqlSession session) { - session.execute( - "CREATE KEYSPACE IF NOT EXISTS examples " - + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); - session.execute( - "CREATE TYPE IF NOT EXISTS examples.json_jsr353_function_user(name text, age int)"); - session.execute( - "CREATE TABLE IF NOT EXISTS examples.json_jsr353_function(" - + "id int PRIMARY KEY, user frozen, scores map)"); - } - - // Mapping JSON payloads to table columns of arbitrary types, - // using fromJson() function - private static void insertFromJson(CqlSession session) { - - JsonObject alice = Json.createObjectBuilder().add("name", "alice").add("age", 30).build(); - - JsonObject bob = Json.createObjectBuilder().add("name", "bob").add("age", 35).build(); - - JsonObject aliceScores = - Json.createObjectBuilder().add("call_of_duty", 4.8).add("pokemon_go", 9.7).build(); - - JsonObject bobScores = - Json.createObjectBuilder().add("zelda", 8.3).add("pokemon_go", 12.4).build(); - - // Build and execute a simple statement - Statement stmt = - insertInto("examples", "json_jsr353_function") - .value("id", literal(1)) - // client-side, the JsonObject will be converted into a JSON String; - // then, server-side, the fromJson() function will convert that JSON string - // into an instance of the json_jsr353_function_user user-defined type (UDT), - // which will be persisted into the column "user" - .value( - "user", - function("fromJson", literal(alice, session.getContext().getCodecRegistry()))) - // same thing, but this time converting from - // a JsonObject to a JSON string, then from this string to a map - .value( - "scores", - function("fromJson", literal(aliceScores, session.getContext().getCodecRegistry()))) - .build(); - session.execute(stmt); - - // The JSON object can be a bound value if the statement is prepared - // (subsequent calls to the prepare() method will return cached statement) - PreparedStatement pst = - session.prepare( - insertInto("examples", "json_jsr353_function") - .value("id", bindMarker("id")) - .value("user", function("fromJson", bindMarker("user"))) - .value("scores", function("fromJson", bindMarker("scores"))) - .build()); - session.execute( - pst.bind() - .setInt("id", 2) - // note that the codec requires that the type passed to the set() method - // be always JsonStructure, and not a subclass of it, such as JsonObject - .set("user", bob, JsonStructure.class) - .set("scores", bobScores, JsonStructure.class)); - } - - // Retrieving JSON payloads from table columns of arbitrary types, - // using toJson() function - private static void selectToJson(CqlSession session) { - - Statement stmt = - selectFrom("examples", "json_jsr353_function") - .column("id") - .function("toJson", Selector.column("user")) - .as("user") - .function("toJson", Selector.column("scores")) - .as("scores") - .whereColumn("id") - .in(literal(1), literal(2)) - .build(); - - ResultSet rows = session.execute(stmt); - - for (Row row : rows) { - int id = row.getInt("id"); - // retrieve the JSON payload and convert it to a JsonObject instance - // note that the codec requires that the type passed to the get() method - // be always JsonStructure, and not a subclass of it, such as JsonObject, - // hence the need to downcast to JsonObject manually - JsonObject user = (JsonObject) row.get("user", JsonStructure.class); - // it is also possible to retrieve the raw JSON payload - String userJson = row.getString("user"); - // retrieve the JSON payload and convert it to a JsonObject instance - JsonObject scores = (JsonObject) row.get("scores", JsonStructure.class); - // it is also possible to retrieve the raw JSON payload - String scoresJson = row.getString("scores"); - System.out.printf( - "Retrieved row:%n" - + "id %d%n" - + "user %s%n" - + "user (raw) %s%n" - + "scores %s%n" - + "scores (raw) %s%n%n", - id, user, userJson, scores, scoresJson); - } - } -} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/json/jsr/Jsr353JsonRow.java b/examples/src/main/java/com/datastax/oss/driver/examples/json/jsr/Jsr353JsonRow.java deleted file mode 100644 index 595522fa964..00000000000 --- a/examples/src/main/java/com/datastax/oss/driver/examples/json/jsr/Jsr353JsonRow.java +++ /dev/null @@ -1,157 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.examples.json.jsr; - -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.bindMarker; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.insertInto; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.literal; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.selectFrom; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.examples.json.PlainTextJson; -import javax.json.Json; -import javax.json.JsonObject; -import javax.json.JsonStructure; - -/** - * Illustrates how to map an entire table row to a Java object using the Java API for JSON processing, and leveraging the - * {@code SELECT JSON} and {@code INSERT JSON} syntaxes introduced in Cassandra 2.2. - * - *

    This example makes usage of a custom {@link TypeCodec codec}, {@link Jsr353JsonCodec}, which - * is declared in the java-driver-examples module. If you plan to follow this example, make sure to - * include the following Maven dependencies in your project: - * - *

    {@code
    - * 
    - *     javax.json
    - *     javax.json-api
    - *     1.0
    - * 
    - *
    - * 
    - *     org.glassfish
    - *     javax.json
    - *     1.1.4
    - *     runtime
    - * 
    - * }
    - * - * This example also uses the {@link com.datastax.oss.driver.api.querybuilder.QueryBuilder - * QueryBuilder}; for examples using the "core" API, see {@link PlainTextJson} (they are easily - * translatable to the queries in this class). - * - *

    Preconditions: - * - *

      - *
    • An Apache Cassandra(R) cluster is running and accessible through the contacts points - * identified by basic.contact-points (see application.conf). - *
    - * - *

    Side effects: - * - *

      - *
    • creates a new keyspace "examples" in the cluster. If a keyspace with this name already - * exists, it will be reused; - *
    • creates a table "examples.json_jsr353_row". If it already exists, it will be reused; - *
    • inserts data in the table. - *
    - * - * @see What’s - * New in Cassandra 2.2: JSON Support - */ -public class Jsr353JsonRow { - - // A codec to convert JSON payloads into JsonObject instances; - private static final Jsr353JsonCodec USER_CODEC = new Jsr353JsonCodec(); - - public static void main(String[] args) { - try (CqlSession session = CqlSession.builder().addTypeCodecs(USER_CODEC).build()) { - createSchema(session); - insertJsonRow(session); - selectJsonRow(session); - } - } - - private static void createSchema(CqlSession session) { - session.execute( - "CREATE KEYSPACE IF NOT EXISTS examples " - + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); - session.execute( - "CREATE TABLE IF NOT EXISTS examples.json_jsr353_row(" - + "id int PRIMARY KEY, name text, age int)"); - } - - // Mapping a User instance to a table row using INSERT JSON - private static void insertJsonRow(CqlSession session) { - - JsonObject alice = - Json.createObjectBuilder().add("id", 1).add("name", "alice").add("age", 30).build(); - - JsonObject bob = - Json.createObjectBuilder().add("id", 2).add("name", "bob").add("age", 35).build(); - - // Build and execute a simple statement - Statement stmt = - insertInto("examples", "json_jsr353_row") - .json(alice, session.getContext().getCodecRegistry()) - .build(); - session.execute(stmt); - - // The JSON object can be a bound value if the statement is prepared - // (we use a local variable here for the sake of example, but in a real application you would - // cache and reuse the prepared statement) - PreparedStatement pst = - session.prepare(insertInto("examples", "json_jsr353_row").json(bindMarker("user")).build()); - session.execute( - pst.bind() - // note that the codec requires that the type passed to the set() method - // be always JsonStructure, and not a subclass of it, such as JsonObject - .set("user", bob, JsonStructure.class)); - } - - // Retrieving User instances from table rows using SELECT JSON - private static void selectJsonRow(CqlSession session) { - - // Reading the whole row as a JSON object - Statement stmt = - selectFrom("examples", "json_jsr353_row") - .json() - .all() - .whereColumn("id") - .in(literal(1), literal(2)) - .build(); - - ResultSet rows = session.execute(stmt); - - for (Row row : rows) { - // SELECT JSON returns only one column for each row, of type VARCHAR, - // containing the row as a JSON payload. - // Note that the codec requires that the type passed to the get() method - // be always JsonStructure, and not a subclass of it, such as JsonObject, - // hence the need to downcast to JsonObject manually - JsonObject user = (JsonObject) row.get(0, JsonStructure.class); - System.out.printf("Retrieved user: %s%n", user); - } - } -} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/mapper/KillrVideoMapperExample.java b/examples/src/main/java/com/datastax/oss/driver/examples/mapper/KillrVideoMapperExample.java deleted file mode 100644 index 6284b16eac1..00000000000 --- a/examples/src/main/java/com/datastax/oss/driver/examples/mapper/KillrVideoMapperExample.java +++ /dev/null @@ -1,194 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.examples.mapper; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.PagingIterable; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.uuid.Uuids; -import com.datastax.oss.driver.examples.mapper.killrvideo.KillrVideoMapper; -import com.datastax.oss.driver.examples.mapper.killrvideo.user.User; -import com.datastax.oss.driver.examples.mapper.killrvideo.user.UserDao; -import com.datastax.oss.driver.examples.mapper.killrvideo.video.LatestVideo; -import com.datastax.oss.driver.examples.mapper.killrvideo.video.UserVideo; -import com.datastax.oss.driver.examples.mapper.killrvideo.video.Video; -import com.datastax.oss.driver.examples.mapper.killrvideo.video.VideoByTag; -import com.datastax.oss.driver.examples.mapper.killrvideo.video.VideoDao; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.time.Instant; -import java.time.ZoneOffset; -import java.time.format.DateTimeFormatter; -import java.util.Arrays; -import java.util.HashSet; -import java.util.List; -import java.util.Optional; -import java.util.Set; -import java.util.stream.Collectors; - -/** - * Uses the driver's object mapper to interact with a schema. - * - *

    We use the data model of the KillrVideo sample - * application. The mapped entities and DAOs are in the {@link - * com.datastax.oss.driver.examples.mapper.killrvideo} package. We only cover a subset of the data - * model (ratings, stats, recommendations and comments are not covered). - * - *

    Preconditions: - * - *

      - *
    • An Apache Cassandra(R) cluster is running and accessible through the contacts points - * identified by basic.contact-points (see application.conf). - *
    - * - *

    Side effects: - * - *

      - *
    • creates a new keyspace "killrvideo" in the session. If a keyspace with this name already - * exists, it will be reused; - *
    • creates the tables of the KillrVideo data model, if they don't already exist; - *
    • inserts a new user, or reuse the existing one if the email address is already taken; - *
    • inserts a video for that user. - *
    - * - * @see Java - * Driver Mapper manual - */ -@SuppressWarnings("CatchAndPrintStackTrace") -public class KillrVideoMapperExample { - - private static final CqlIdentifier KEYSPACE_ID = CqlIdentifier.fromCql("killrvideo"); - - public static void main(String[] args) { - - try (CqlSession session = CqlSession.builder().build()) { - - maybeCreateSchema(session); - - KillrVideoMapper mapper = - KillrVideoMapper.builder(session).withDefaultKeyspace(KEYSPACE_ID).build(); - - // Create a new user - UserDao userDao = mapper.userDao(); - - User user = new User(Uuids.random(), "test", "user", "testuser@example.com", Instant.now()); - - if (userDao.create(user, "fakePasswordForTests".toCharArray())) { - System.out.println("Created " + user); - } else { - user = userDao.getByEmail("testuser@example.com"); - System.out.println("Reusing existing " + user); - } - - // Creating another user with the same email should fail - assert !userDao.create( - new User(Uuids.random(), "test2", "user", "testuser@example.com", Instant.now()), - "fakePasswordForTests2".toCharArray()); - - // Simulate login attempts - tryLogin(userDao, "testuser@example.com", "fakePasswordForTests"); - tryLogin(userDao, "testuser@example.com", "fakePasswordForTests2"); - - // Insert a video - VideoDao videoDao = mapper.videoDao(); - - Video video = new Video(); - video.setUserid(user.getUserid()); - video.setName( - "Getting Started with DataStax Apache Cassandra as a Service on DataStax Astra"); - video.setLocation("https://www.youtube.com/watch?v=68xzKpcZURA"); - Set tags = new HashSet<>(); - tags.add("apachecassandra"); - tags.add("nosql"); - tags.add("hybridcloud"); - video.setTags(tags); - - videoDao.create(video); - System.out.printf("Created video [%s] %s%n", video.getVideoid(), video.getName()); - - // Check that associated denormalized tables have also been updated: - PagingIterable userVideos = videoDao.getByUser(user.getUserid()); - System.out.printf("Videos for %s %s:%n", user.getFirstname(), user.getLastname()); - for (UserVideo userVideo : userVideos) { - System.out.printf(" [%s] %s%n", userVideo.getVideoid(), userVideo.getName()); - } - - PagingIterable latestVideos = videoDao.getLatest(todaysTimestamp()); - System.out.println("Latest videos:"); - for (LatestVideo latestVideo : latestVideos) { - System.out.printf(" [%s] %s%n", latestVideo.getVideoid(), latestVideo.getName()); - } - - PagingIterable videosByTag = videoDao.getByTag("apachecassandra"); - System.out.println("Videos tagged with apachecassandra:"); - for (VideoByTag videoByTag : videosByTag) { - System.out.printf(" [%s] %s%n", videoByTag.getVideoid(), videoByTag.getName()); - } - - // Update the existing video: - Video template = new Video(); - template.setVideoid(video.getVideoid()); - template.setName( - "Getting Started with DataStax Apache Cassandra® as a Service on DataStax Astra"); - videoDao.update(template); - // Reload the whole entity and check the fields - video = videoDao.get(video.getVideoid()); - System.out.printf("Updated name for video %s: %s%n", video.getVideoid(), video.getName()); - } catch (Exception e) { - e.printStackTrace(); - } - } - - private static void tryLogin(UserDao userDao, String email, String password) { - Optional maybeUser = userDao.login(email, password.toCharArray()); - System.out.printf( - "Logging in with %s/%s: %s%n", - email, password, maybeUser.isPresent() ? "Success" : "Failure"); - } - - private static void maybeCreateSchema(CqlSession session) throws Exception { - session.execute( - SimpleStatement.newInstance( - "CREATE KEYSPACE IF NOT EXISTS killrvideo WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}") - .setExecutionProfileName("slow")); - session.execute("USE killrvideo"); - for (String statement : getStatements("killrvideo_schema.cql")) { - session.execute(SimpleStatement.newInstance(statement).setExecutionProfileName("slow")); - } - } - - private static List getStatements(String fileName) throws Exception { - Path path = Paths.get(ClassLoader.getSystemResource(fileName).toURI()); - String contents = new String(Files.readAllBytes(path), StandardCharsets.UTF_8); - return Arrays.stream(contents.split(";")) - .map(String::trim) - .filter(s -> !s.isEmpty()) - .collect(Collectors.toList()); - } - - /** - * KillrVideo uses a textual timestamp to partition recent video. Build the timestamp for today to - * fetch our latest insertions. - */ - private static String todaysTimestamp() { - return DateTimeFormatter.ofPattern("yyyyMMdd").withZone(ZoneOffset.UTC).format(Instant.now()); - } -} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/mapper/killrvideo/KillrVideoMapper.java b/examples/src/main/java/com/datastax/oss/driver/examples/mapper/killrvideo/KillrVideoMapper.java deleted file mode 100644 index c28130481b4..00000000000 --- a/examples/src/main/java/com/datastax/oss/driver/examples/mapper/killrvideo/KillrVideoMapper.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.examples.mapper.killrvideo; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.mapper.MapperBuilder; -import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; -import com.datastax.oss.driver.api.mapper.annotations.Mapper; -import com.datastax.oss.driver.examples.mapper.killrvideo.user.UserDao; -import com.datastax.oss.driver.examples.mapper.killrvideo.video.VideoDao; - -@Mapper -public interface KillrVideoMapper { - - @DaoFactory - UserDao userDao(); - - @DaoFactory - VideoDao videoDao(); - - static MapperBuilder builder(CqlSession session) { - return new KillrVideoMapperBuilder(session); - } -} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/mapper/killrvideo/user/CreateUserQueryProvider.java b/examples/src/main/java/com/datastax/oss/driver/examples/mapper/killrvideo/user/CreateUserQueryProvider.java deleted file mode 100644 index baaeb13b67b..00000000000 --- a/examples/src/main/java/com/datastax/oss/driver/examples/mapper/killrvideo/user/CreateUserQueryProvider.java +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.examples.mapper.killrvideo.user; - -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.bindMarker; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.DefaultConsistencyLevel; -import com.datastax.oss.driver.api.core.cql.BoundStatementBuilder; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.mapper.MapperContext; -import com.datastax.oss.driver.api.mapper.entity.EntityHelper; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import java.time.Instant; -import java.util.Objects; -import java.util.UUID; - -/** - * Provides the implementation of {@link UserDao#create}. - * - *

    Package-private visibility is sufficient, this will be called only from the generated DAO - * implementation. - */ -class CreateUserQueryProvider { - - private final CqlSession session; - private final EntityHelper userHelper; - private final EntityHelper credentialsHelper; - private final PreparedStatement preparedInsertCredentials; - private final PreparedStatement preparedInsertUser; - private final PreparedStatement preparedDeleteCredentials; - private final PreparedStatement preparedDeleteUser; - - CreateUserQueryProvider( - MapperContext context, - EntityHelper userHelper, - EntityHelper credentialsHelper) { - - this.session = context.getSession(); - - this.userHelper = userHelper; - this.credentialsHelper = credentialsHelper; - - this.preparedInsertCredentials = - session.prepare(credentialsHelper.insert().ifNotExists().asCql()); - this.preparedInsertUser = session.prepare(userHelper.insert().asCql()); - this.preparedDeleteCredentials = - session.prepare( - credentialsHelper - .deleteByPrimaryKey() - .ifColumn("userid") - .isEqualTo(bindMarker("userid")) - .builder() - .setConsistencyLevel(DefaultConsistencyLevel.ANY) - .build()); - this.preparedDeleteUser = - session.prepare( - userHelper - .deleteByPrimaryKey() - .ifExists() - .builder() - .setConsistencyLevel(DefaultConsistencyLevel.ANY) - .build()); - } - - boolean create(User user, char[] password) { - Objects.requireNonNull(user.getUserid()); - Objects.requireNonNull(user.getEmail()); - if (user.getCreatedDate() == null) { - user.setCreatedDate(Instant.now()); - } - - try { - // Insert the user first: otherwise there would be a short window where we have credentials - // without a corresponding user in the database, and this is considered an error state in - // LoginQueryProvider - insertUser(user); - if (!insertCredentialsIfNotExists(user.getEmail(), password, user.getUserid())) { - // email already exists - session.execute(preparedDeleteUser.bind(user.getUserid())); - return false; - } - return true; - } catch (Exception insertException) { - // Clean up and rethrow - try { - session.execute(preparedDeleteUser.bind(user.getUserid())); - } catch (Exception e) { - insertException.addSuppressed(e); - } - try { - session.execute(preparedDeleteCredentials.bind(user.getEmail(), user.getUserid())); - } catch (Exception e) { - insertException.addSuppressed(e); - } - throw insertException; - } - } - - private boolean insertCredentialsIfNotExists(String email, char[] password, UUID userId) { - String passwordHash = PasswordHashing.hash(Objects.requireNonNull(password)); - UserCredentials credentials = - new UserCredentials(Objects.requireNonNull(email), passwordHash, userId); - BoundStatementBuilder insertCredentials = preparedInsertCredentials.boundStatementBuilder(); - credentialsHelper.set(credentials, insertCredentials, NullSavingStrategy.DO_NOT_SET, false); - ResultSet resultSet = session.execute(insertCredentials.build()); - return resultSet.wasApplied(); - } - - private void insertUser(User user) { - BoundStatementBuilder insertUser = preparedInsertUser.boundStatementBuilder(); - userHelper.set(user, insertUser, NullSavingStrategy.DO_NOT_SET, false); - session.execute(insertUser.build()); - } -} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/mapper/killrvideo/user/LoginQueryProvider.java b/examples/src/main/java/com/datastax/oss/driver/examples/mapper/killrvideo/user/LoginQueryProvider.java deleted file mode 100644 index 6d88423f046..00000000000 --- a/examples/src/main/java/com/datastax/oss/driver/examples/mapper/killrvideo/user/LoginQueryProvider.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.examples.mapper.killrvideo.user; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.mapper.MapperContext; -import com.datastax.oss.driver.api.mapper.entity.EntityHelper; -import java.util.Optional; -import java.util.UUID; - -/** - * Provides the implementation of {@link UserDao#login}. - * - *

    Package-private visibility is sufficient, this will be called only from the generated DAO - * implementation. - */ -class LoginQueryProvider { - - private final CqlSession session; - private final EntityHelper userHelper; - private final PreparedStatement preparedSelectCredentials; - private final PreparedStatement preparedSelectUser; - - LoginQueryProvider( - MapperContext context, - EntityHelper userHelper, - EntityHelper credentialsHelper) { - - this.session = context.getSession(); - - this.userHelper = userHelper; - - this.preparedSelectCredentials = - session.prepare(credentialsHelper.selectByPrimaryKey().asCql()); - this.preparedSelectUser = session.prepare(userHelper.selectByPrimaryKey().asCql()); - } - - Optional login(String email, char[] password) { - return Optional.ofNullable(session.execute(preparedSelectCredentials.bind(email)).one()) - .flatMap( - credentialsRow -> { - String hashedPassword = credentialsRow.getString("password"); - if (PasswordHashing.matches(password, hashedPassword)) { - UUID userid = credentialsRow.getUuid("userid"); - Row userRow = session.execute(preparedSelectUser.bind(userid)).one(); - if (userRow == null) { - throw new IllegalStateException( - "Should have found matching row for userid " + userid); - } else { - return Optional.of(userHelper.get(userRow, false)); - } - } else { - return Optional.empty(); - } - }); - } -} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/mapper/killrvideo/user/PasswordHashing.java b/examples/src/main/java/com/datastax/oss/driver/examples/mapper/killrvideo/user/PasswordHashing.java deleted file mode 100644 index def919bc1f8..00000000000 --- a/examples/src/main/java/com/datastax/oss/driver/examples/mapper/killrvideo/user/PasswordHashing.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.examples.mapper.killrvideo.user; - -import at.favre.lib.crypto.bcrypt.BCrypt; - -/** - * Utility methods to safely store passwords in the database. - * - *

    We rely on a third-party implementation of the bcrypt password hash function. - * - * @see patrickfav/bcrypt - */ -public class PasswordHashing { - - public static String hash(char[] password) { - return BCrypt.withDefaults().hashToString(12, password); - } - - public static boolean matches(char[] password, String hash) { - BCrypt.Result result = BCrypt.verifyer().verify(password, hash); - return result.verified; - } -} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/mapper/killrvideo/user/User.java b/examples/src/main/java/com/datastax/oss/driver/examples/mapper/killrvideo/user/User.java deleted file mode 100644 index f00b142dc7a..00000000000 --- a/examples/src/main/java/com/datastax/oss/driver/examples/mapper/killrvideo/user/User.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.examples.mapper.killrvideo.user; - -import com.datastax.oss.driver.api.mapper.annotations.CqlName; -import com.datastax.oss.driver.api.mapper.annotations.Entity; -import com.datastax.oss.driver.api.mapper.annotations.PartitionKey; -import java.time.Instant; -import java.util.StringJoiner; -import java.util.UUID; - -@Entity -@CqlName("users") -public class User { - - @PartitionKey private UUID userid; - private String firstname; - private String lastname; - private String email; - private Instant createdDate; - - public User(UUID userid, String firstname, String lastname, String email, Instant createdDate) { - this.userid = userid; - this.firstname = firstname; - this.lastname = lastname; - this.email = email; - this.createdDate = createdDate; - } - - public User() {} - - public UUID getUserid() { - return userid; - } - - public void setUserid(UUID userid) { - this.userid = userid; - } - - public String getFirstname() { - return firstname; - } - - public void setFirstname(String firstname) { - this.firstname = firstname; - } - - public String getLastname() { - return lastname; - } - - public void setLastname(String lastname) { - this.lastname = lastname; - } - - public String getEmail() { - return email; - } - - public void setEmail(String email) { - this.email = email; - } - - public Instant getCreatedDate() { - return createdDate; - } - - public void setCreatedDate(Instant createdDate) { - this.createdDate = createdDate; - } - - @Override - public String toString() { - return new StringJoiner(", ", User.class.getSimpleName() + "[", "]") - .add("userid=" + userid) - .add("firstname='" + firstname + "'") - .add("lastname='" + lastname + "'") - .add("email='" + email + "'") - .add("createdDate=" + createdDate) - .toString(); - } -} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/mapper/killrvideo/user/UserCredentials.java b/examples/src/main/java/com/datastax/oss/driver/examples/mapper/killrvideo/user/UserCredentials.java deleted file mode 100644 index 0b6b32219ce..00000000000 --- a/examples/src/main/java/com/datastax/oss/driver/examples/mapper/killrvideo/user/UserCredentials.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.examples.mapper.killrvideo.user; - -import com.datastax.oss.driver.api.mapper.annotations.Entity; -import com.datastax.oss.driver.api.mapper.annotations.PartitionKey; -import java.util.UUID; - -@Entity -public class UserCredentials { - @PartitionKey private String email; - - private String password; - - private UUID userid; - - public UserCredentials(String email, String password, UUID userid) { - this.email = email; - this.password = password; - this.userid = userid; - } - - public UserCredentials() {} - - public String getEmail() { - return email; - } - - public void setEmail(String email) { - this.email = email; - } - - public String getPassword() { - return password; - } - - public void setPassword(String password) { - this.password = password; - } - - public UUID getUserid() { - return userid; - } - - public void setUserid(UUID userid) { - this.userid = userid; - } -} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/mapper/killrvideo/user/UserDao.java b/examples/src/main/java/com/datastax/oss/driver/examples/mapper/killrvideo/user/UserDao.java deleted file mode 100644 index 19f7f7cad0e..00000000000 --- a/examples/src/main/java/com/datastax/oss/driver/examples/mapper/killrvideo/user/UserDao.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.examples.mapper.killrvideo.user; - -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.QueryProvider; -import com.datastax.oss.driver.api.mapper.annotations.Select; -import java.util.Optional; -import java.util.UUID; - -@Dao -public interface UserDao { - - /** Simple selection by full primary key. */ - @Select - User get(UUID userid); - - @Select - UserCredentials getCredentials(String email); - - /** - * An alternative to query providers is default methods that call other methods on the DAO. - * - *

    The only drawback is that those other methods have to be part of the DAO's public API. - */ - default User getByEmail(String email) { - UserCredentials credentials = getCredentials(email); - return (credentials == null) ? null : get(credentials.getUserid()); - } - - /** - * Creating a user is more than a single insert: we have to update two different tables, check - * that the email is not used already, and handle password encryption. - * - *

    We use a query provider to wrap everything into a single method. - * - *

    Note that you could opt for a more layered approach: only expose basic operations on the DAO - * (insertCredentialsIfNotExists, insertUser...) and add a service layer on top for more complex - * logic. Both designs are valid, this is a matter of personal choice. - * - * @return {@code true} if the new user was created, or {@code false} if this email address was - * already taken. - */ - @QueryProvider( - providerClass = CreateUserQueryProvider.class, - entityHelpers = {User.class, UserCredentials.class}) - boolean create(User user, char[] password); - - /** - * Similar to {@link #create}, this encapsulates encryption so we use a query provider. - * - * @return the authenticated user, or {@link Optional#empty()} if the credentials are invalid. - */ - @QueryProvider( - providerClass = LoginQueryProvider.class, - entityHelpers = {User.class, UserCredentials.class}) - Optional login(String email, char[] password); -} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/mapper/killrvideo/video/CreateVideoQueryProvider.java b/examples/src/main/java/com/datastax/oss/driver/examples/mapper/killrvideo/video/CreateVideoQueryProvider.java deleted file mode 100644 index 132baa474d4..00000000000 --- a/examples/src/main/java/com/datastax/oss/driver/examples/mapper/killrvideo/video/CreateVideoQueryProvider.java +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.examples.mapper.killrvideo.video; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.BatchStatement; -import com.datastax.oss.driver.api.core.cql.BatchStatementBuilder; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.BoundStatementBuilder; -import com.datastax.oss.driver.api.core.cql.DefaultBatchType; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.uuid.Uuids; -import com.datastax.oss.driver.api.mapper.MapperContext; -import com.datastax.oss.driver.api.mapper.entity.EntityHelper; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import java.time.Instant; -import java.time.ZoneOffset; -import java.time.format.DateTimeFormatter; - -/** - * Provides the implementation of {@link VideoDao#create}. - * - *

    Package-private visibility is sufficient, this will be called only from the generated DAO - * implementation. - */ -class CreateVideoQueryProvider { - - private final CqlSession session; - private final EntityHelper

    Also, note that this queries a different table: DAOs are not limited to a single entity, the - * return type of the method dictates what rows will be mapped to. - */ - @Select - PagingIterable getByUser(UUID userid); - - /** Other selection by partial primary key, for another table. */ - @Select - PagingIterable getLatest(String yyyymmdd); - - /** Other selection by partial primary key, for yet another table. */ - @Select - PagingIterable getByTag(String tag); - - /** - * Creating a video is a bit more complex: because of denormalization, it involves multiple - * tables. - * - *

    A query provider is a nice way to wrap all the queries in a single operation, and hide the - * details from the DAO interface. - */ - @QueryProvider( - providerClass = CreateVideoQueryProvider.class, - entityHelpers = {Video.class, UserVideo.class, LatestVideo.class, VideoByTag.class}) - void create(Video video); - - /** - * Update using a template: the template must have its full primary key set; beyond that, any - * non-null field will be considered as a value to SET on the target row. - * - *

    Note that we specify the null saving strategy for emphasis, but this is the default. - */ - @Update(nullSavingStrategy = NullSavingStrategy.DO_NOT_SET) - void update(Video template); -} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/paging/ForwardPagingRestUi.java b/examples/src/main/java/com/datastax/oss/driver/examples/paging/ForwardPagingRestUi.java deleted file mode 100644 index a512457d618..00000000000 --- a/examples/src/main/java/com/datastax/oss/driver/examples/paging/ForwardPagingRestUi.java +++ /dev/null @@ -1,303 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.examples.paging; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.BoundStatementBuilder; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.internal.core.type.codec.DateCodec; -import com.datastax.oss.protocol.internal.util.Bytes; -import com.sun.net.httpserver.HttpServer; -import java.io.IOException; -import java.net.URI; -import java.time.Instant; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import javax.annotation.PostConstruct; -import javax.inject.Inject; -import javax.inject.Singleton; -import javax.ws.rs.GET; -import javax.ws.rs.Path; -import javax.ws.rs.PathParam; -import javax.ws.rs.Produces; -import javax.ws.rs.QueryParam; -import javax.ws.rs.core.Context; -import javax.ws.rs.core.UriBuilder; -import javax.ws.rs.core.UriInfo; -import org.glassfish.hk2.utilities.binding.AbstractBinder; -import org.glassfish.jersey.jdkhttp.JdkHttpServerFactory; -import org.glassfish.jersey.server.ResourceConfig; - -/** - * A stateless REST service (backed by Jersey, HK2 and the JDK HttpServer) that displays paginated results for - * a CQL query. - * - *

    Conversion to and from JSON is made through Jersey Jackson - * providers. - * - *

    Navigation is forward-only. The implementation relies on the paging state returned by - * Cassandra, and encodes it in HTTP URLs. - * - *

    Preconditions: - * - *

      - *
    • An Apache Cassandra(R) cluster is running and accessible through the contacts points - * identified by basic.contact-points (see application.conf). - *
    - * - *

    Side effects: - * - *

      - *
    • creates a new keyspace "examples" in the cluster. If a keyspace with this name already - * exists, it will be reused; - *
    • creates a table "examples.forward_paging_rest_ui". If it already exists, it will be reused; - *
    • inserts data in the table; - *
    • launches a REST server listening on HTTP_PORT. - *
    - */ -public class ForwardPagingRestUi { - - private static final int HTTP_PORT = 8080; - - private static final int ITEMS_PER_PAGE = 10; - - private static final URI BASE_URI = - UriBuilder.fromUri("http://localhost/").path("").port(HTTP_PORT).build(); - - public static void main(String[] args) throws Exception { - - try (CqlSession session = CqlSession.builder().addTypeCodecs(new DateCodec()).build()) { - createSchema(session); - populateSchema(session); - startRestService(session); - } - } - - // Creates a table storing videos by users, in a typically denormalized way - private static void createSchema(CqlSession session) { - session.execute( - "CREATE KEYSPACE IF NOT EXISTS examples " - + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); - session.execute( - "CREATE TABLE IF NOT EXISTS examples.forward_paging_rest_ui(" - + "userid int, username text, " - + "added timestamp, " - + "videoid int, title text, " - + "PRIMARY KEY (userid, added, videoid)" - + ") WITH CLUSTERING ORDER BY (added DESC, videoid ASC)"); - } - - private static void populateSchema(CqlSession session) { - PreparedStatement prepare = - session.prepare( - "INSERT INTO examples.forward_paging_rest_ui (userid, username, added, videoid, title) VALUES (?, ?, ?, ?, ?)"); - // 3 users - for (int i = 0; i < 3; i++) { - // 49 videos each - for (int j = 0; j < 49; j++) { - int videoid = i * 100 + j; - session.execute( - prepare.bind( - i, "user " + i, Instant.ofEpochMilli(j * 100000), videoid, "video " + videoid)); - } - } - } - - // starts the REST server using JDK HttpServer (com.sun.net.httpserver.HttpServer) - private static void startRestService(CqlSession session) - throws IOException, InterruptedException { - - final HttpServer server = - JdkHttpServerFactory.createHttpServer(BASE_URI, new VideoApplication(session), false); - final ExecutorService executor = Executors.newSingleThreadExecutor(); - server.setExecutor(executor); - Runtime.getRuntime() - .addShutdownHook( - new Thread( - () -> { - System.out.println(); - System.out.println("Stopping REST Service"); - server.stop(0); - executor.shutdownNow(); - System.out.println("REST Service stopped"); - })); - server.start(); - - System.out.println(); - System.out.printf( - "REST Service started on http://localhost:%d/users, press CTRL+C to stop%n", HTTP_PORT); - System.out.println( - "To explore this example, start with the following request and walk from there:"); - System.out.printf("curl -i http://localhost:%d/users/1/videos%n", HTTP_PORT); - System.out.println(); - - Thread.currentThread().join(); - } - - /** - * Configures the REST application and handles injection of custom objects, such as the driver - * session. - * - *

    This is also the place where you would normally configure JSON serialization, etc. - * - *

    Note that in this example, we rely on the automatic discovery and configuration of Jackson - * through {@code org.glassfish.jersey.jackson.JacksonFeature}. - */ - public static class VideoApplication extends ResourceConfig { - - public VideoApplication(final CqlSession session) { - super(UserService.class); - // AbstractBinder is provided by HK2 - register( - new AbstractBinder() { - - @Override - protected void configure() { - bind(session).to(CqlSession.class); - } - }); - } - } - - /** - * A typical REST service, handling requests involving users. - * - *

    Typically, this service would contain methods for listing and searching for users, and - * methods to retrieve user details. Here, for brevity, only one method, listing videos by user, - * is implemented. - */ - @Singleton - @Path("/users") - @Produces("application/json") - public static class UserService { - - @Inject private CqlSession session; - - @Context private UriInfo uri; - - private PreparedStatement videosByUser; - - @PostConstruct - @SuppressWarnings("unused") - public void init() { - this.videosByUser = - session.prepare( - "SELECT videoid, title, added FROM examples.forward_paging_rest_ui WHERE userid = ?"); - } - - /** - * Returns a paginated list of all the videos created by the given user. - * - * @param userid the user ID. - * @param page the page to request, or {@code null} to get the first page. - */ - @GET - @Path("/{userid}/videos") - public UserVideosResponse getUserVideos( - @PathParam("userid") int userid, @QueryParam("page") String page) { - - BoundStatementBuilder statementBuilder = - videosByUser.boundStatementBuilder(userid).setPageSize(ITEMS_PER_PAGE); - if (page != null) { - statementBuilder.setPagingState(Bytes.fromHexString(page)); - } - - ResultSet rs = session.execute(statementBuilder.build()); - String nextPage = Bytes.toHexString(rs.getExecutionInfo().getPagingState()); - - int remaining = rs.getAvailableWithoutFetching(); - List videos = new ArrayList<>(remaining); - - if (remaining > 0) { - for (Row row : rs) { - - UserVideo video = - new UserVideo(row.getInt("videoid"), row.getString("title"), row.getInstant("added")); - videos.add(video); - - // Make sure we don't go past the current page (we don't want the driver to fetch the next - // one) - if (--remaining == 0) { - break; - } - } - } - - URI next = null; - if (nextPage != null) - next = uri.getAbsolutePathBuilder().queryParam("page", nextPage).build(); - - return new UserVideosResponse(videos, next); - } - } - - @SuppressWarnings("unused") - public static class UserVideosResponse { - - private final List videos; - - private final URI nextPage; - - public UserVideosResponse(List videos, URI nextPage) { - this.videos = videos; - this.nextPage = nextPage; - } - - public List getVideos() { - return videos; - } - - public URI getNextPage() { - return nextPage; - } - } - - @SuppressWarnings("unused") - public static class UserVideo { - - private final int videoid; - - private final String title; - - private final Instant added; - - public UserVideo(int videoid, String title, Instant added) { - this.videoid = videoid; - this.title = title; - this.added = added; - } - - public int getVideoid() { - return videoid; - } - - public String getTitle() { - return title; - } - - public Instant getAdded() { - return added; - } - } -} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/paging/RandomPagingRestUi.java b/examples/src/main/java/com/datastax/oss/driver/examples/paging/RandomPagingRestUi.java deleted file mode 100644 index 7eb3249b0ac..00000000000 --- a/examples/src/main/java/com/datastax/oss/driver/examples/paging/RandomPagingRestUi.java +++ /dev/null @@ -1,315 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.examples.paging; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.paging.OffsetPager; -import com.datastax.oss.driver.api.core.paging.OffsetPager.Page; -import com.datastax.oss.driver.internal.core.type.codec.DateCodec; -import com.sun.net.httpserver.HttpServer; -import java.io.IOException; -import java.net.URI; -import java.time.Instant; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import javax.annotation.PostConstruct; -import javax.inject.Inject; -import javax.inject.Singleton; -import javax.ws.rs.GET; -import javax.ws.rs.Path; -import javax.ws.rs.PathParam; -import javax.ws.rs.Produces; -import javax.ws.rs.QueryParam; -import javax.ws.rs.core.Context; -import javax.ws.rs.core.UriBuilder; -import javax.ws.rs.core.UriInfo; -import org.glassfish.hk2.utilities.binding.AbstractBinder; -import org.glassfish.jersey.jdkhttp.JdkHttpServerFactory; -import org.glassfish.jersey.server.ResourceConfig; - -/** - * A stateless REST service (backed by Jersey, HK2 and the JDK HttpServer) that displays paginated results for - * a CQL query. - * - *

    Conversion to and from JSON is made through Jersey Jackson - * providers. - * - *

    Navigation is bidirectional, and you can jump to a random page (by modifying the URL). - * Cassandra does not support offset queries (see - * https://issues.apache.org/jira/browse/CASSANDRA-6511), so we emulate it by restarting from the - * beginning each time, and iterating through the results until we reach the requested page. This is - * fundamentally inefficient (O(n) in the number of rows skipped), but the tradeoff might be - * acceptable for some use cases; for example, if you show 10 results per page and you think users - * are unlikely to browse past page 10, you only need to retrieve at most 100 rows. - * - *

    Preconditions: - * - *

      - *
    • An Apache Cassandra(R) cluster is running and accessible through the contacts points - * identified by basic.contact-points (see application.conf). - *
    - * - *

    Side effects: - * - *

      - *
    • creates a new keyspace "examples" in the cluster. If a keyspace with this name already - * exists, it will be reused; - *
    • creates a table "examples.random_paging_rest_ui". If it already exists, it will be reused; - *
    • inserts data in the table; - *
    • launches a REST server listening on HTTP_PORT. - *
    - */ -public class RandomPagingRestUi { - private static final int HTTP_PORT = 8080; - - private static final int ITEMS_PER_PAGE = 10; - // How many rows the driver will retrieve at a time. - // This is set artificially low for the sake of this example. - // Unless your rows are very large, you can probably use a much higher value (the driver's default - // is 5000). - private static final int FETCH_SIZE = 60; - - private static final URI BASE_URI = - UriBuilder.fromUri("http://localhost/").path("").port(HTTP_PORT).build(); - - public static void main(String[] args) throws Exception { - - try (CqlSession session = CqlSession.builder().addTypeCodecs(new DateCodec()).build()) { - createSchema(session); - populateSchema(session); - startRestService(session); - } - } - - // Creates a table storing videos by users, in a typically denormalized way - private static void createSchema(CqlSession session) { - session.execute( - "CREATE KEYSPACE IF NOT EXISTS examples " - + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); - session.execute( - "CREATE TABLE IF NOT EXISTS examples.random_paging_rest_ui(" - + "userid int, username text, " - + "added timestamp, " - + "videoid int, title text, " - + "PRIMARY KEY (userid, added, videoid)" - + ") WITH CLUSTERING ORDER BY (added DESC, videoid ASC)"); - } - - private static void populateSchema(CqlSession session) { - PreparedStatement prepare = - session.prepare( - "INSERT INTO examples.random_paging_rest_ui (userid, username, added, videoid, title) VALUES (?, ?, ?, ?, ?)"); - - // 3 users - for (int i = 0; i < 3; i++) { - // 49 videos each - for (int j = 0; j < 49; j++) { - int videoid = i * 100 + j; - session.execute( - prepare.bind( - i, "user " + i, Instant.ofEpochMilli(j * 100000), videoid, "video " + videoid)); - } - } - } - - // starts the REST server using JDK HttpServer (com.sun.net.httpserver.HttpServer) - private static void startRestService(CqlSession session) - throws IOException, InterruptedException { - - final HttpServer server = - JdkHttpServerFactory.createHttpServer(BASE_URI, new VideoApplication(session), false); - final ExecutorService executor = Executors.newSingleThreadExecutor(); - server.setExecutor(executor); - Runtime.getRuntime() - .addShutdownHook( - new Thread( - () -> { - System.out.println(); - System.out.println("Stopping REST Service"); - server.stop(0); - executor.shutdownNow(); - System.out.println("REST Service stopped"); - })); - server.start(); - - System.out.println(); - System.out.printf( - "REST Service started on http://localhost:%d/users, press CTRL+C to stop%n", HTTP_PORT); - System.out.println( - "To explore this example, start with the following request and walk from there:"); - System.out.printf("curl -i http://localhost:%d/users/1/videos%n", HTTP_PORT); - System.out.println(); - - Thread.currentThread().join(); - } - - /** - * Configures the REST application and handles injection of custom objects, such as the driver - * session. - * - *

    This is also the place where you would normally configure JSON serialization, etc. - * - *

    Note that in this example, we rely on the automatic discovery and configuration of Jackson - * through {@code org.glassfish.jersey.jackson.JacksonFeature}. - */ - public static class VideoApplication extends ResourceConfig { - - public VideoApplication(final CqlSession session) { - super(UserService.class); - // AbstractBinder is provided by HK2 - register( - new AbstractBinder() { - - @Override - protected void configure() { - bind(session).to(CqlSession.class); - } - }); - } - } - - /** - * A typical REST service, handling requests involving users. - * - *

    Typically, this service would contain methods for listing and searching for users, and - * methods to retrieve user details. Here, for brevity, only one method, listing videos by user, - * is implemented. - */ - @Singleton - @Path("/users") - @Produces("application/json") - public static class UserService { - - @Inject private CqlSession session; - - @Context private UriInfo uri; - - private PreparedStatement videosByUser; - private OffsetPager pager; - - @PostConstruct - @SuppressWarnings("unused") - public void init() { - this.pager = new OffsetPager(ITEMS_PER_PAGE); - this.videosByUser = - session.prepare( - "SELECT videoid, title, added FROM examples.random_paging_rest_ui WHERE userid = ?"); - } - - /** - * Returns a paginated list of all the videos created by the given user. - * - * @param userid the user ID. - * @param requestedPageNumber the page to request, or {@code null} to get the first page. - */ - @GET - @Path("/{userid}/videos") - public UserVideosResponse getUserVideos( - @PathParam("userid") int userid, @QueryParam("page") Integer requestedPageNumber) { - - BoundStatement statement = videosByUser.bind(userid).setPageSize(FETCH_SIZE); - - if (requestedPageNumber == null) { - requestedPageNumber = 1; - } - Page page = pager.getPage(session.execute(statement), requestedPageNumber); - - List videos = new ArrayList<>(page.getElements().size()); - for (Row row : page.getElements()) { - UserVideo video = - new UserVideo(row.getInt("videoid"), row.getString("title"), row.getInstant("added")); - videos.add(video); - } - - // The actual number could be different if the requested one was past the end - int actualPageNumber = page.getPageNumber(); - URI previous = - (actualPageNumber == 1) - ? null - : uri.getAbsolutePathBuilder().queryParam("page", actualPageNumber - 1).build(); - URI next = - page.isLast() - ? null - : uri.getAbsolutePathBuilder().queryParam("page", actualPageNumber + 1).build(); - return new UserVideosResponse(videos, previous, next); - } - } - - @SuppressWarnings("unused") - public static class UserVideosResponse { - - private final List videos; - - private final URI previousPage; - - private final URI nextPage; - - public UserVideosResponse(List videos, URI previousPage, URI nextPage) { - this.videos = videos; - this.previousPage = previousPage; - this.nextPage = nextPage; - } - - public List getVideos() { - return videos; - } - - public URI getPreviousPage() { - return previousPage; - } - - public URI getNextPage() { - return nextPage; - } - } - - @SuppressWarnings("unused") - public static class UserVideo { - - private final int videoid; - - private final String title; - - private final Instant added; - - public UserVideo(int videoid, String title, Instant added) { - this.videoid = videoid; - this.title = title; - this.added = added; - } - - public int getVideoid() { - return videoid; - } - - public String getTitle() { - return title; - } - - public Instant getAdded() { - return added; - } - } -} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/retry/DowngradingRetry.java b/examples/src/main/java/com/datastax/oss/driver/examples/retry/DowngradingRetry.java deleted file mode 100644 index 0577432600b..00000000000 --- a/examples/src/main/java/com/datastax/oss/driver/examples/retry/DowngradingRetry.java +++ /dev/null @@ -1,458 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.examples.retry; - -import static com.datastax.oss.driver.api.core.DefaultConsistencyLevel.QUORUM; -import static com.datastax.oss.driver.api.core.cql.DefaultBatchType.UNLOGGED; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.DefaultConsistencyLevel; -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.NoNodeAvailableException; -import com.datastax.oss.driver.api.core.cql.BatchStatement; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.servererrors.DefaultWriteType; -import com.datastax.oss.driver.api.core.servererrors.QueryConsistencyException; -import com.datastax.oss.driver.api.core.servererrors.ReadTimeoutException; -import com.datastax.oss.driver.api.core.servererrors.UnavailableException; -import com.datastax.oss.driver.api.core.servererrors.WriteTimeoutException; -import java.util.List; - -/** - * This example illustrates how to implement a downgrading retry strategy from application code. - * - *

    This strategy is equivalent to the logic implemented by the consistency downgrading retry - * policy, but we think that such a logic should be implemented at application level whenever - * possible. - * - *

    See the FAQ - * and the manual - * section on retries. - * - *

    Preconditions: - * - *

      - *
    • An Apache Cassandra(R) cluster is running and accessible through the contacts points - * identified by basic.contact-points (see application.conf). - *
    - * - *

    Side effects: - * - *

      - *
    1. Creates a new keyspace {@code downgrading} in the cluster, with replication factor 3. If a - * keyspace with this name already exists, it will be reused; - *
    2. Creates a new table {@code downgrading.sensor_data}. If a table with that name exists - * already, it will be reused; - *
    3. Inserts a few rows, downgrading the consistency level if the operation fails; - *
    4. Queries the table, downgrading the consistency level if the operation fails; - *
    5. Displays the results on the console. - *
    - * - * Notes: - * - *
      - *
    • The downgrading logic here is similar to what {@code DowngradingConsistencyRetryPolicy} in - * 3.x driver does; feel free to adapt it to your application needs; - *
    • You should never attempt to retry a non-idempotent write. See the driver's manual page on - * idempotence for more information. - *
    - * - * @see Java Driver online - * manual - */ -public class DowngradingRetry { - /** The maximum number of retries to attempt. */ - private static final int MAX_RETRIES = 1; - - /** The initial consistency level to use. */ - private static final ConsistencyLevel INITIAL_CL = QUORUM; - - public static void main(String[] args) { - - DowngradingRetry client = new DowngradingRetry(MAX_RETRIES); - - try { - - client.connect(); - client.createSchema(); - client.write(INITIAL_CL, 0); - ResultSet rows = client.read(INITIAL_CL, 0); - client.display(rows); - - } finally { - client.close(); - } - } - - private final int maxRetries; - - private CqlSession session; - - private DowngradingRetry(int maxRetries) { - this.maxRetries = maxRetries; - } - - /** Initiates a connection to the session specified by the application.conf. */ - private void connect() { - session = CqlSession.builder().build(); - - System.out.printf("Connected to session: %s%n", session.getName()); - } - - /** Creates the schema (keyspace) and table for this example. */ - private void createSchema() { - - session.execute( - "CREATE KEYSPACE IF NOT EXISTS downgrading WITH replication " - + "= {'class':'SimpleStrategy', 'replication_factor':3}"); - - session.execute( - "CREATE TABLE IF NOT EXISTS downgrading.sensor_data (" - + "sensor_id uuid," - + "date date," - + // emulates bucketing by day - "timestamp timestamp," - + "value double," - + "PRIMARY KEY ((sensor_id,date),timestamp)" - + ")"); - } - - /** - * Inserts data, retrying if necessary with a downgraded CL. - * - * @param cl the consistency level to apply. - * @param retryCount the current retry count. - * @throws DriverException if the current consistency level cannot be downgraded. - */ - private void write(ConsistencyLevel cl, int retryCount) { - - System.out.printf("Writing at %s (retry count: %d)%n", cl, retryCount); - - BatchStatement batch = - BatchStatement.newInstance(UNLOGGED) - .add( - SimpleStatement.newInstance( - "INSERT INTO downgrading.sensor_data " - + "(sensor_id, date, timestamp, value) " - + "VALUES (" - + "756716f7-2e54-4715-9f00-91dcbea6cf50," - + "'2018-02-26'," - + "'2018-02-26T13:53:46.345+01:00'," - + "2.34)")) - .add( - SimpleStatement.newInstance( - "INSERT INTO downgrading.sensor_data " - + "(sensor_id, date, timestamp, value) " - + "VALUES (" - + "756716f7-2e54-4715-9f00-91dcbea6cf50," - + "'2018-02-26'," - + "'2018-02-26T13:54:27.488+01:00'," - + "2.47)")) - .add( - SimpleStatement.newInstance( - "INSERT INTO downgrading.sensor_data " - + "(sensor_id, date, timestamp, value) " - + "VALUES (" - + "756716f7-2e54-4715-9f00-91dcbea6cf50," - + "'2018-02-26'," - + "'2018-02-26T13:56:33.739+01:00'," - + "2.52)")) - .setConsistencyLevel(cl); - - try { - - session.execute(batch); - System.out.println("Write succeeded at " + cl); - - } catch (DriverException e) { - - if (retryCount == maxRetries) { - throw e; - } - - e = unwrapAllNodesFailedException(e); - - System.out.println("Write failed: " + e); - - // General intent: - // 1) If we know the write has been fully persisted on at least one replica, - // ignore the exception since the write will be eventually propagated to other replicas. - // 2) If the write couldn't be persisted at all, abort as it is unlikely that a retry would - // succeed. - // 3) If the write was only partially persisted, retry at the highest consistency - // level that is likely to succeed. - - if (e instanceof UnavailableException) { - - // With an UnavailableException, we know that the write wasn't even attempted. - // Downgrade to the number of replicas reported alive and retry. - int aliveReplicas = ((UnavailableException) e).getAlive(); - - ConsistencyLevel downgraded = downgrade(cl, aliveReplicas, e); - write(downgraded, retryCount + 1); - - } else if (e instanceof WriteTimeoutException) { - - DefaultWriteType writeType = (DefaultWriteType) ((WriteTimeoutException) e).getWriteType(); - int acknowledgements = ((WriteTimeoutException) e).getReceived(); - - switch (writeType) { - case SIMPLE: - case BATCH: - // For simple and batch writes, as long as one replica acknowledged the write, - // ignore the exception; if none responded however, abort as it is unlikely that - // a retry would ever succeed. - if (acknowledgements == 0) { - throw e; - } - break; - - case UNLOGGED_BATCH: - // For unlogged batches, the write might have been persisted only partially, - // so we can't simply ignore the exception: instead, we need to retry with - // consistency level equal to the number of acknowledged writes. - ConsistencyLevel downgraded = downgrade(cl, acknowledgements, e); - write(downgraded, retryCount + 1); - break; - - case BATCH_LOG: - // Rare edge case: the peers that were chosen by the coordinator - // to receive the distributed batch log failed to respond. - // Simply retry with same consistency level. - write(cl, retryCount + 1); - break; - - default: - // Other write types are uncommon and should not be retried. - throw e; - } - - } else { - - // Unexpected error: just retry with same consistency level - // and hope to talk to a healthier coordinator. - write(cl, retryCount + 1); - } - } - } - - /** - * Queries data, retrying if necessary with a downgraded CL. - * - * @param cl the consistency level to apply. - * @param retryCount the current retry count. - * @throws DriverException if the current consistency level cannot be downgraded. - */ - private ResultSet read(ConsistencyLevel cl, int retryCount) { - - System.out.printf("Reading at %s (retry count: %d)%n", cl, retryCount); - - Statement stmt = - SimpleStatement.newInstance( - "SELECT sensor_id, date, timestamp, value " - + "FROM downgrading.sensor_data " - + "WHERE " - + "sensor_id = 756716f7-2e54-4715-9f00-91dcbea6cf50 AND " - + "date = '2018-02-26' AND " - + "timestamp > '2018-02-26+01:00'") - .setConsistencyLevel(cl); - - try { - - ResultSet rows = session.execute(stmt); - System.out.println("Read succeeded at " + cl); - return rows; - - } catch (DriverException e) { - - if (retryCount == maxRetries) { - throw e; - } - - e = unwrapAllNodesFailedException(e); - - System.out.println("Read failed: " + e); - - // General intent: downgrade and retry at the highest consistency level - // that is likely to succeed. - - if (e instanceof UnavailableException) { - - // Downgrade to the number of replicas reported alive and retry. - int aliveReplicas = ((UnavailableException) e).getAlive(); - - ConsistencyLevel downgraded = downgrade(cl, aliveReplicas, e); - return read(downgraded, retryCount + 1); - - } else if (e instanceof ReadTimeoutException) { - - ReadTimeoutException readTimeout = (ReadTimeoutException) e; - int received = readTimeout.getReceived(); - int required = readTimeout.getBlockFor(); - - // If fewer replicas responded than required by the consistency level - // (but at least one replica did respond), retry with a consistency level - // equal to the number of received acknowledgements. - if (received < required) { - - ConsistencyLevel downgraded = downgrade(cl, received, e); - return read(downgraded, retryCount + 1); - } - - // If we received enough replies to meet the consistency level, - // but the actual data was not present among the received responses, - // then retry with the initial consistency level, we might be luckier next time - // and get the data back. - if (!readTimeout.wasDataPresent()) { - - return read(cl, retryCount + 1); - } - - // Otherwise, abort since the read timeout is unlikely to be solved by a retry. - throw e; - - } else { - - // Unexpected error: just retry with same consistency level - // and hope to talk to a healthier coordinator. - return read(cl, retryCount + 1); - } - } - } - - /** - * Displays the results on the console. - * - * @param rows the results to display. - */ - private void display(ResultSet rows) { - - final int width1 = 38; - final int width2 = 12; - final int width3 = 30; - final int width4 = 21; - - String format = "%-" + width1 + "s%-" + width2 + "s%-" + width3 + "s%-" + width4 + "s%n"; - - // headings - System.out.printf(format, "sensor_id", "date", "timestamp", "value"); - - // separators - drawLine(width1, width2, width3, width4); - - // data - for (Row row : rows) { - - System.out.printf( - format, - row.getUuid("sensor_id"), - row.getLocalDate("date"), - row.getInstant("timestamp"), - row.getDouble("value")); - } - } - - /** Closes the session and the cluster. */ - private void close() { - if (session != null) { - session.close(); - } - } - - /** - * Downgrades the current consistency level to the highest level that is likely to succeed, given - * the number of acknowledgements received. Rethrows the original exception if the current - * consistency level cannot be downgraded any further. - * - * @param current the current CL. - * @param acknowledgements the acknowledgements received. - * @param original the original exception. - * @return the downgraded CL. - * @throws DriverException if the current consistency level cannot be downgraded. - */ - private static ConsistencyLevel downgrade( - ConsistencyLevel current, int acknowledgements, DriverException original) { - if (acknowledgements >= 3) { - return DefaultConsistencyLevel.THREE; - } - if (acknowledgements == 2) { - return DefaultConsistencyLevel.TWO; - } - if (acknowledgements == 1) { - return DefaultConsistencyLevel.ONE; - } - // Edge case: EACH_QUORUM does not report a global number of alive replicas - // so even if we get 0 alive replicas, there might be - // a node up in some other datacenter, so retry at ONE. - if (current == DefaultConsistencyLevel.EACH_QUORUM) { - return DefaultConsistencyLevel.ONE; - } - throw original; - } - - /** - * If the driver was unable to contact any node, it throws an umbrella {@link - * NoNodeAvailableException} containing a map of the actual errors, keyed by host. - * - *

    This method unwraps this exception, inspects the map of errors, and returns the first - * exploitable {@link DriverException}. - * - * @param e the exception to unwrap. - * @return the unwrapped exception, or the original exception, if it is not an instance of {@link - * NoNodeAvailableException}. - * @throws NoNodeAvailableException the original exception, if it cannot be unwrapped. - */ - private static DriverException unwrapAllNodesFailedException(DriverException e) { - if (e instanceof AllNodesFailedException) { - AllNodesFailedException noHostAvailable = (AllNodesFailedException) e; - for (List errors : noHostAvailable.getAllErrors().values()) { - for (Throwable error : errors) { - if (error instanceof QueryConsistencyException || error instanceof UnavailableException) { - return (DriverException) error; - } - } - } - // Couldn't find an exploitable error to unwrap: abort. - throw e; - } - // the original exceptional wasn't a NoHostAvailableException: proceed. - return e; - } - - /** - * Draws a line to isolate headings from rows. - * - * @param widths the column widths. - */ - private static void drawLine(int... widths) { - for (int width : widths) { - for (int i = 1; i < width; i++) { - System.out.print('-'); - } - System.out.print('+'); - } - System.out.println(); - } -} diff --git a/examples/src/main/resources/application.conf b/examples/src/main/resources/application.conf deleted file mode 100644 index 170c08d973a..00000000000 --- a/examples/src/main/resources/application.conf +++ /dev/null @@ -1,39 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -datastax-java-driver { - basic.contact-points = ["127.0.0.1:9042"] - basic { - load-balancing-policy { - local-datacenter = datacenter1 - } - } - # need in LimitConcurrencyRequestThrottler example - advanced.throttler { - class = ConcurrencyLimitingRequestThrottler - max-concurrent-requests = 32 - max-queue-size = 10000 - } - - advanced.request.warn-if-set-keyspace = false - - profiles { - slow { - basic.request.timeout = 10 seconds - } - } -} diff --git a/examples/src/main/resources/cassandra_logo.png b/examples/src/main/resources/cassandra_logo.png deleted file mode 100644 index 1637e61c14e..00000000000 Binary files a/examples/src/main/resources/cassandra_logo.png and /dev/null differ diff --git a/examples/src/main/resources/killrvideo_schema.cql b/examples/src/main/resources/killrvideo_schema.cql deleted file mode 100644 index 0c604ba5922..00000000000 --- a/examples/src/main/resources/killrvideo_schema.cql +++ /dev/null @@ -1,156 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -// User credentials, keyed by email address so we can authenticate -CREATE TABLE IF NOT EXISTS user_credentials ( - email text, - password text, - userid uuid, - PRIMARY KEY (email) -); - -// Users keyed by id -CREATE TABLE IF NOT EXISTS users ( - userid uuid, - firstname text, - lastname text, - email text, - created_date timestamp, - PRIMARY KEY (userid) -); - -// Videos by id -CREATE TABLE IF NOT EXISTS videos ( - videoid uuid, - userid uuid, - name text, - description text, - location text, - location_type int, - preview_image_location text, - tags set, - added_date timestamp, - PRIMARY KEY (videoid) -); - -// One-to-many from user point of view (lookup table) -CREATE TABLE IF NOT EXISTS user_videos ( - userid uuid, - added_date timestamp, - videoid uuid, - name text, - preview_image_location text, - PRIMARY KEY (userid, added_date, videoid) -) WITH CLUSTERING ORDER BY (added_date DESC, videoid ASC); - -// Track latest videos, grouped by day (if we ever develop a bad hotspot from the daily grouping here, we could mitigate by -// splitting the row using an arbitrary group number, making the partition key (yyyymmdd, group_number)) -CREATE TABLE IF NOT EXISTS latest_videos ( - yyyymmdd text, - added_date timestamp, - videoid uuid, - userid uuid, - name text, - preview_image_location text, - PRIMARY KEY (yyyymmdd, added_date, videoid) -) WITH CLUSTERING ORDER BY (added_date DESC, videoid ASC); - -// Video ratings (counter table) -CREATE TABLE IF NOT EXISTS video_ratings ( - videoid uuid, - rating_counter counter, - rating_total counter, - PRIMARY KEY (videoid) -); - -// Video ratings by user (to try and mitigate voting multiple times) -CREATE TABLE IF NOT EXISTS video_ratings_by_user ( - videoid uuid, - userid uuid, - rating int, - PRIMARY KEY (videoid, userid) -); - -// Records the number of views/playbacks of a video -CREATE TABLE IF NOT EXISTS video_playback_stats ( - videoid uuid, - views counter, - PRIMARY KEY (videoid) -); - -// Recommendations by user (powered by Spark), with the newest videos added to the site always first -CREATE TABLE IF NOT EXISTS video_recommendations ( - userid uuid, - added_date timestamp, - videoid uuid, - rating float, - authorid uuid, - name text, - preview_image_location text, - PRIMARY KEY(userid, added_date, videoid) -) WITH CLUSTERING ORDER BY (added_date DESC, videoid ASC); - -// Recommendations by video (powered by Spark) -CREATE TABLE IF NOT EXISTS video_recommendations_by_video ( - videoid uuid, - userid uuid, - rating float, - added_date timestamp STATIC, - authorid uuid STATIC, - name text STATIC, - preview_image_location text STATIC, - PRIMARY KEY(videoid, userid) -); - -// Index for tag keywords -CREATE TABLE IF NOT EXISTS videos_by_tag ( - tag text, - videoid uuid, - added_date timestamp, - userid uuid, - name text, - preview_image_location text, - tagged_date timestamp, - PRIMARY KEY (tag, videoid) -); - -// Index for tags by first letter in the tag -CREATE TABLE IF NOT EXISTS tags_by_letter ( - first_letter text, - tag text, - PRIMARY KEY (first_letter, tag) -); - -// Comments for a given video -CREATE TABLE IF NOT EXISTS comments_by_video ( - videoid uuid, - commentid timeuuid, - userid uuid, - comment text, - PRIMARY KEY (videoid, commentid) -) WITH CLUSTERING ORDER BY (commentid DESC); - -// Comments for a given user -CREATE TABLE IF NOT EXISTS comments_by_user ( - userid uuid, - commentid timeuuid, - videoid uuid, - comment text, - PRIMARY KEY (userid, commentid) -) WITH CLUSTERING ORDER BY (commentid DESC); diff --git a/examples/src/main/resources/logback.xml b/examples/src/main/resources/logback.xml deleted file mode 100644 index 061ccccad37..00000000000 --- a/examples/src/main/resources/logback.xml +++ /dev/null @@ -1,36 +0,0 @@ - - - - - - - %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n - - - - - - - - diff --git a/faq/README.md b/faq/README.md deleted file mode 100644 index 97cb4decd00..00000000000 --- a/faq/README.md +++ /dev/null @@ -1,124 +0,0 @@ - - -## Frequently asked questions - -### I'm modifying a statement and the changes get ignored, why? - -In driver 4, statement classes are **immutable**. All mutating methods return a new instance, so -make sure you don't accidentally ignore their result: - -```java -BoundStatement boundSelect = preparedSelect.bind(); - -// This doesn't work: setInt and setPageSize don't modify boundSelect in place: -boundSelect.setInt("k", key); -boundSelect.setPageSize(1000); -session.execute(boundSelect); - -// Instead, reassign the statement every time: -boundSelect = boundSelect.setInt("k", key).setPageSize(1000); -``` - -All of these mutating methods are annotated with `@CheckReturnValue`. Some code analysis tools -- -such as [ErrorProne](https://errorprone.info/) -- can check correct usage at build time, and report -mistakes as compiler errors. - -The driver also provides builders: - -```java -BoundStatement boundSelect = - preparedSelect.boundStatementBuilder() - .setInt("k", key) - .setPageSize(1000) - .build(); -``` - -### Why do asynchronous methods return `CompletionStage` instead of `CompletableFuture`? - -Because it's the right abstraction to use. A completable future, as its name indicates, is a future -that can be completed manually; that is not what we want to return from our API: the driver -completes the futures, not the user. - -Also, `CompletionStage` does not expose a `get()` method; one can view that as an encouragement to -use a fully asynchronous programming model (chaining callbacks instead of blocking for a result). - -At any rate, `CompletionStage` has a `toCompletableFuture()` method. In current JDK versions, every -`CompletionStage` is a `CompletableFuture`, so the conversion has no performance overhead. - -### Where is `DowngradingConsistencyRetryPolicy` from driver 3? - -**As of driver 4.10, this retry policy was made available again as a built-in alternative to the -default retry policy**: see the [manual](../manual/core/retries) to understand how to use it. -For versions between 4.0 and 4.9 inclusive, there is no built-in equivalent of driver 3 -`DowngradingConsistencyRetryPolicy`. - -That retry policy was indeed removed in driver 4.0.0. The main motivation is that this behavior -should be the application's concern, not the driver's. APIs provided by the driver should instead -encourage idiomatic use of a distributed system like Apache Cassandra, and a downgrading policy -works against this. It suggests that an anti-pattern such as "try to read at QUORUM, but fall back -to ONE if that fails" is a good idea in general use cases, when in reality it provides no better -consistency guarantees than working directly at ONE, but with higher latencies. - -However, we recognize that there are use cases where downgrading is good -- for instance, a -dashboard application would present the latest information by reading at QUORUM, but it's acceptable -for it to display stale information by reading at ONE sometimes. - -Thanks to [JAVA-2900], an equivalent retry policy with downgrading behavior was re-introduced in -driver 4.10. Nonetheless, we urge users to avoid using it unless strictly required, and instead, -carefully choose upfront the consistency level that works best for their use cases. Even if there -is a legitimate reason to downgrade and retry, that should be preferably handled by the application -code. An example of downgrading retries implemented at application level can be found in the driver -[examples repository]. - -[JAVA-2900]: https://datastax-oss.atlassian.net/browse/JAVA-2900 -[examples repository]: https://github.com/datastax/java-driver/blob/4.x/examples/src/main/java/com/datastax/oss/driver/examples/retry/DowngradingRetry.java - -### Where is the cross-datacenter failover feature that existed in driver 3? - -In driver 3, it was possible to configure the load balancing policy to automatically failover to -a remote datacenter, when the local datacenter is down. - -This ability is considered a misfeature and has been removed from driver 4.0 onwards. - -However, due to popular demand, cross-datacenter failover has been brought back to driver 4 in -version 4.10.0. - -If you are using a driver version >= 4.10.0, read the [manual](../manual/core/loadbalancing/) to -understand how to enable this feature; for driver versions < 4.10.0, this feature is simply not -available. - -### I want to set a date on a bound statement, where did `setTimestamp()` go? - -The driver now uses Java 8's improved date and time API. CQL type `timestamp` is mapped to -`java.time.Instant`, and the corresponding getter and setter are `getInstant` and `setInstant`. - -See [Temporal types](../manual/core/temporal_types/) for more details. - -### Why do DDL queries have a higher latency than driver 3? - -If you benchmark DDL queries such as `session.execute("CREATE TABLE ...")`, you will observe a -noticeably higher latency than driver 3 (about 1 second). - -This is because those queries are now *debounced*: the driver adds a short wait in an attempt to -group multiple schema changes into a single metadata refresh. If you want to mitigate this, you can -either adjust the debouncing settings, or group your schema updates while temporarily disabling the -metadata; see the [performance](../manual/core/performance/#debouncing) page. - -This only applies to DDL queries; DML statements (`SELECT`, `INSERT`...) are not debounced. diff --git a/guava-shaded/pom.xml b/guava-shaded/pom.xml deleted file mode 100644 index da2e82e0ab0..00000000000 --- a/guava-shaded/pom.xml +++ /dev/null @@ -1,242 +0,0 @@ - - - - 4.0.0 - - org.apache.cassandra - java-driver-parent - 4.19.3-SNAPSHOT - - java-driver-guava-shaded - Apache Cassandra Java Driver - guava shaded dep - Shaded Guava artifact for use in the Java driver for Apache Cassandra® - - - com.google.guava - guava - - - com.google.code.findbugs - jsr305 - - - org.checkerframework - checker-qual - - - com.google.errorprone - error_prone_annotations - - - true - - - org.graalvm.nativeimage - svm - 20.0.0 - provided - - - - - - - org.codehaus.mojo - build-helper-maven-plugin - 1.12 - - - regex-property - - regex-property - - - maven.main.skip - ${java.version} - ^(?!1.8).+ - true - false - - - - - - maven-shade-plugin - - - shade-guava-dependency - package - - shade - - - - - org.apache.cassandra:java-driver-guava-shaded - com.google.guava:guava - com.google.guava:failureaccess - com.google.j2objc:j2objc-annotations - - - - - com.google - com.datastax.oss.driver.shaded.guava - - - - - com.google.guava:* - - META-INF/** - - - - true - true - - - - - - - maven-clean-plugin - - - clean-classes - package - - clean - - - ${project.build.outputDirectory} - - - - - - maven-dependency-plugin - - - unpack-shaded-classes - package - - unpack - - - ${project.build.outputDirectory} - - - org.apache.cassandra - java-driver-guava-shaded - ${project.version} - jar - - - - - - - - org.apache.felix - maven-bundle-plugin - - 3.5.0 - true - - - generate-shaded-manifest - package - - manifest - - - - com.datastax.oss.driver.shaded.guava - !com.datastax.oss.driver.shaded.guava.errorprone.*, !org.checkerframework.*, * - javax.annotation.*;resolution:=optional;version="[3.0,4)", javax.crypto.*;resolution:=optional, sun.misc.*;resolution:=optional, !com.oracle.svm.*, !com.datastax.oss.driver.shaded.guava.errorprone.*, !org.checkerframework.*, * - - - - - - - maven-assembly-plugin - - - generate-final-shaded-jar - package - - single - - - - - ${project.build.outputDirectory}/META-INF/MANIFEST.MF - - - src/assembly/shaded-jar.xml - - - false - - - - - - maven-jar-plugin - - - empty-javadoc-jar - - jar - - - javadoc - ${basedir}/src/main/javadoc - - - - - - org.revapi - revapi-maven-plugin - - true - - - - - diff --git a/guava-shaded/src/assembly/shaded-jar.xml b/guava-shaded/src/assembly/shaded-jar.xml deleted file mode 100644 index d762a27b20f..00000000000 --- a/guava-shaded/src/assembly/shaded-jar.xml +++ /dev/null @@ -1,48 +0,0 @@ - - - - shaded-jar - - jar - - false - - - - ${project.build.outputDirectory} - - META-INF/maven/org.apache.cassandra/java-driver-guava-shaded/pom.xml - - - - - - - - ${project.basedir}/dependency-reduced-pom.xml - META-INF/maven/org.apache.cassandra/java-driver-guava-shaded - pom.xml - - - diff --git a/guava-shaded/src/main/java/com/google/common/primitives/LexicographicalComparatorHolderSubstitution.java b/guava-shaded/src/main/java/com/google/common/primitives/LexicographicalComparatorHolderSubstitution.java deleted file mode 100644 index 95e9c70cdbc..00000000000 --- a/guava-shaded/src/main/java/com/google/common/primitives/LexicographicalComparatorHolderSubstitution.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.common.primitives; - -import com.oracle.svm.core.annotate.Alias; -import com.oracle.svm.core.annotate.RecomputeFieldValue; -import com.oracle.svm.core.annotate.Substitute; -import com.oracle.svm.core.annotate.TargetClass; -import java.util.Comparator; - -@TargetClass(UnsignedBytes.LexicographicalComparatorHolder.class) -final class LexicographicalComparatorHolderSubstitution { - - @Alias - @RecomputeFieldValue(kind = RecomputeFieldValue.Kind.FromAlias) - static Comparator BEST_COMPARATOR = UnsignedBytes.lexicographicalComparatorJavaImpl(); - - /* All known cases should be covered by the field substitution above... keeping this only - * for sake of completeness */ - @Substitute - static Comparator getBestComparator() { - return UnsignedBytes.lexicographicalComparatorJavaImpl(); - } -} diff --git a/guava-shaded/src/main/java/com/google/common/primitives/UnsafeComparatorSubstitution.java b/guava-shaded/src/main/java/com/google/common/primitives/UnsafeComparatorSubstitution.java deleted file mode 100644 index 549de0b5c02..00000000000 --- a/guava-shaded/src/main/java/com/google/common/primitives/UnsafeComparatorSubstitution.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.common.primitives; - -import com.oracle.svm.core.annotate.Delete; -import com.oracle.svm.core.annotate.TargetClass; - -@TargetClass(UnsignedBytes.LexicographicalComparatorHolder.UnsafeComparator.class) -@Delete -final class UnsafeComparatorSubstitution {} diff --git a/guava-shaded/src/main/javadoc/README.txt b/guava-shaded/src/main/javadoc/README.txt deleted file mode 100644 index 57f82b2a265..00000000000 --- a/guava-shaded/src/main/javadoc/README.txt +++ /dev/null @@ -1,2 +0,0 @@ -This empty JAR is generated for compliance with Maven Central rules. Please refer to the original -Guava API docs. \ No newline at end of file diff --git a/install-snapshots.sh b/install-snapshots.sh deleted file mode 100755 index 795b4098f52..00000000000 --- a/install-snapshots.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/sh -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# Install dependencies in the Travis build environment if they are snapshots. -# See .travis.yml - -set -u - -install_snapshot() -{ - URL=$1 - DIRECTORY_NAME=$2 - # Assume the snapshot we want is on the head of the default branch - git clone --depth 1 ${URL} /tmp/${DIRECTORY_NAME} - { - cd /tmp/${DIRECTORY_NAME} - mvn install -DskipTests - } -} - -mvn --projects core dependency:list -DincludeArtifactIds=native-protocol | \ - tee /dev/tty | \ - grep -q native-protocol.*SNAPSHOT -if [ $? -eq 0 ] ; then - install_snapshot https://github.com/datastax/native-protocol.git native-protocol -fi diff --git a/integration-tests/pom.xml b/integration-tests/pom.xml deleted file mode 100644 index e302e12077f..00000000000 --- a/integration-tests/pom.xml +++ /dev/null @@ -1,338 +0,0 @@ - - - - 4.0.0 - - org.apache.cassandra - java-driver-parent - 4.19.3-SNAPSHOT - - java-driver-integration-tests - jar - Apache Cassandra Java Driver - integration tests - - false - ${skipITs} - ${skipITs} - ${skipITs} - - - - - ${project.groupId} - java-driver-bom - ${project.version} - pom - import - - - - - - org.apache.cassandra - java-driver-test-infra - test - - - org.apache.cassandra - java-driver-query-builder - test - - - org.apache.cassandra - java-driver-mapper-processor - test - true - - - org.apache.cassandra - java-driver-mapper-runtime - test - - - org.apache.cassandra - java-driver-core - test-jar - test - - - org.apache.cassandra - java-driver-metrics-micrometer - test - - - org.apache.cassandra - java-driver-metrics-microprofile - test - - - com.github.stephenc.jcip - jcip-annotations - test - - - com.github.spotbugs - spotbugs-annotations - test - - - com.fasterxml.jackson.core - jackson-core - test - - - com.fasterxml.jackson.core - jackson-databind - test - - - com.tngtech.java - junit-dataprovider - test - - - ch.qos.logback - logback-classic - test - - - org.mockito - mockito-core - test - - - com.google.guava - guava - test - - - org.xerial.snappy - snappy-java - test - - - at.yawk.lz4 - lz4-java - test - - - io.reactivex.rxjava2 - rxjava - test - - - org.apache.tinkerpop - gremlin-core - test - - - org.apache.tinkerpop - tinkergraph-gremlin - test - - - org.apache.directory.server - apacheds-core - test - - - org.apache.directory.server - apacheds-protocol-kerberos - test - - - org.apache.directory.server - apacheds-interceptor-kerberos - test - - - org.apache.directory.server - apacheds-protocol-ldap - test - - - org.apache.directory.server - apacheds-ldif-partition - test - - - org.apache.directory.server - apacheds-jdbm-partition - test - - - org.apache.directory.api - api-ldap-codec-standalone - test - - - com.github.tomakehurst - wiremock - test - - - com.datastax.oss.simulacron - simulacron-native-server - test - - - org.apache.commons - commons-exec - test - - - io.smallrye - smallrye-metrics - test - - - io.projectreactor - reactor-core - test - - - io.projectreactor - reactor-test - test - - - io.projectreactor.tools - blockhound-junit-platform - test - - - com.esri.geometry - esri-geometry-api - test - - - - - - org.apache.maven.plugins - maven-failsafe-plugin - - - parallelizable-tests - - integration-test - - - ${testing.jvm}/bin/java - com.datastax.oss.driver.categories.ParallelizableTests - classes - 8 - ${project.build.directory}/failsafe-reports/failsafe-summary-parallelized.xml - ${skipParallelizableITs} - ${blockhound.argline} - ${testing.jvm}/bin/java - - - - serial-tests - - integration-test - - - com.datastax.oss.driver.categories.ParallelizableTests, com.datastax.oss.driver.categories.IsolatedTests - ${project.build.directory}/failsafe-reports/failsafe-summary-serial.xml - ${skipSerialITs} - ${blockhound.argline} - ${testing.jvm}/bin/java - - - - isolated-tests - - integration-test - - - com.datastax.oss.driver.categories.IsolatedTests - - 1 - false - ${project.build.directory}/failsafe-reports/failsafe-summary-isolated.xml - ${skipIsolatedITs} - ${blockhound.argline} - ${testing.jvm}/bin/java - - - - verify-parallelized - - verify - - - ${skipParallelizableITs} - ${project.build.directory}/failsafe-reports/failsafe-summary-parallelized.xml - - - - verify-serial - - verify - - - ${skipSerialITs} - ${project.build.directory}/failsafe-reports/failsafe-summary-serial.xml - - - - verify-isolated - - verify - - - ${skipIsolatedITs} - ${project.build.directory}/failsafe-reports/failsafe-summary-isolated.xml - - - - - - org.revapi - revapi-maven-plugin - - true - - - - maven-install-plugin - - true - - - - maven-deploy-plugin - - true - - - - org.sonatype.plugins - nexus-staging-maven-plugin - - true - - - - - diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/auth/DseGssApiAuthProviderAlternateIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/auth/DseGssApiAuthProviderAlternateIT.java deleted file mode 100644 index 55c420e276b..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/auth/DseGssApiAuthProviderAlternateIT.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.auth; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.dse.driver.internal.core.auth.DseGssApiAuthProvider; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.runner.RunWith; - -@BackendRequirement( - type = BackendType.DSE, - minInclusive = "5.0", - description = "Required for DseAuthenticator") -@RunWith(DataProviderRunner.class) -public class DseGssApiAuthProviderAlternateIT { - @ClassRule public static EmbeddedAdsRule ads = new EmbeddedAdsRule(true); - - @DataProvider - public static Object[][] saslSystemProperties() { - return new Object[][] {{"dse.sasl.service"}, {"dse.sasl.protocol"}}; - } - - @Test - @UseDataProvider("saslSystemProperties") - public void - should_authenticate_using_kerberos_with_keytab_and_alternate_service_principal_using_system_property( - String saslSystemProperty) { - System.setProperty(saslSystemProperty, "alternate"); - try (CqlSession session = - SessionUtils.newSession( - ads.getCcm(), - SessionUtils.configLoaderBuilder() - .withClass(DefaultDriverOption.AUTH_PROVIDER_CLASS, DseGssApiAuthProvider.class) - .withStringMap( - DseDriverOption.AUTH_PROVIDER_SASL_PROPERTIES, - ImmutableMap.of("javax.security.sasl.qop", "auth-conf")) - .withStringMap( - DseDriverOption.AUTH_PROVIDER_LOGIN_CONFIGURATION, - ImmutableMap.of( - "principal", - ads.getUserPrincipal(), - "useKeyTab", - "true", - "refreshKrb5Config", - "true", - "keyTab", - ads.getUserKeytab().getAbsolutePath())) - .build())) { - Row row = session.execute("select * from system.local").one(); - assertThat(row).isNotNull(); - } finally { - System.clearProperty(saslSystemProperty); - } - } - - @Test - public void should_authenticate_using_kerberos_with_keytab_and_alternate_service_principal() { - try (CqlSession session = - SessionUtils.newSession( - ads.getCcm(), - SessionUtils.configLoaderBuilder() - .withClass(DefaultDriverOption.AUTH_PROVIDER_CLASS, DseGssApiAuthProvider.class) - .withString(DseDriverOption.AUTH_PROVIDER_SERVICE, "alternate") - .withStringMap( - DseDriverOption.AUTH_PROVIDER_SASL_PROPERTIES, - ImmutableMap.of("javax.security.sasl.qop", "auth-conf")) - .withStringMap( - DseDriverOption.AUTH_PROVIDER_LOGIN_CONFIGURATION, - ImmutableMap.of( - "principal", - ads.getUserPrincipal(), - "useKeyTab", - "true", - "refreshKrb5Config", - "true", - "keyTab", - ads.getUserKeytab().getAbsolutePath())) - .build())) { - Row row = session.execute("select * from system.local").one(); - assertThat(row).isNotNull(); - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/auth/DseGssApiAuthProviderIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/auth/DseGssApiAuthProviderIT.java deleted file mode 100644 index 4ee28d62367..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/auth/DseGssApiAuthProviderIT.java +++ /dev/null @@ -1,150 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.auth; - -import static com.datastax.dse.driver.api.core.auth.KerberosUtils.acquireTicket; -import static com.datastax.dse.driver.api.core.auth.KerberosUtils.destroyTicket; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.auth.AuthenticationException; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import java.util.List; -import java.util.Map; -import org.junit.Assume; -import org.junit.ClassRule; -import org.junit.Test; - -@BackendRequirement( - type = BackendType.DSE, - minInclusive = "5.0", - description = "Required for DseAuthenticator") -public class DseGssApiAuthProviderIT { - - @ClassRule public static EmbeddedAdsRule ads = new EmbeddedAdsRule(); - - /** - * Ensures that a Session can be established to a DSE server secured with Kerberos and that simple - * queries can be made using a client configuration that provides a keytab file. - */ - @Test - public void should_authenticate_using_kerberos_with_keytab() { - try (CqlSession session = ads.newKeyTabSession()) { - ResultSet set = session.execute("select * from system.local"); - assertThat(set).isNotNull(); - } - } - - /** - * Ensures that a Session can be established to a DSE server secured with Kerberos and that simple - * queries can be made using a client configuration that uses the ticket cache. This test will - * only run on unix platforms since it uses kinit to acquire tickets and kdestroy to destroy them. - */ - @Test - public void should_authenticate_using_kerberos_with_ticket() throws Exception { - String osName = System.getProperty("os.name", "").toLowerCase(); - boolean isUnix = osName.contains("mac") || osName.contains("darwin") || osName.contains("nux"); - Assume.assumeTrue(isUnix); - acquireTicket(ads.getUserPrincipal(), ads.getUserKeytab(), ads.getAdsServer()); - try (CqlSession session = ads.newTicketSession()) { - ResultSet set = session.execute("select * from system.local"); - assertThat(set).isNotNull(); - } finally { - destroyTicket(ads); - } - } - - /** - * Validates that an AllNodesFailedException is thrown when using a ticket-based configuration and - * no such ticket exists in the user's cache. This is expected because we shouldn't be able to - * establish connection to a cassandra node if we cannot authenticate. - * - * @test_category dse:authentication - */ - @SuppressWarnings("unused") - @Test - public void should_not_authenticate_if_no_ticket_in_cache() { - try (CqlSession session = ads.newTicketSession()) { - fail("Expected an AllNodesFailedException"); - } catch (AllNodesFailedException e) { - verifyException(e); - } - } - - /** - * Validates that an AllNodesFailedException is thrown when using a keytab-based configuration and - * no such user exists for the given principal. This is expected because we shouldn't be able to - * establish connection to a cassandra node if we cannot authenticate. - * - * @test_category dse:authentication - */ - @SuppressWarnings("unused") - @Test - public void should_not_authenticate_if_keytab_does_not_map_to_valid_principal() { - try (CqlSession session = - ads.newKeyTabSession(ads.getUnknownPrincipal(), ads.getUnknownKeytab().getAbsolutePath())) { - fail("Expected an AllNodesFailedException"); - } catch (AllNodesFailedException e) { - verifyException(e); - } - } - /** - * Ensures that a Session can be established to a DSE server secured with Kerberos and that simple - * queries can be made using a client configuration that is provided via programatic interface - */ - @Test - public void should_authenticate_using_kerberos_with_keytab_programmatically() { - DseGssApiAuthProviderBase.GssApiOptions.Builder builder = - DseGssApiAuthProviderBase.GssApiOptions.builder(); - Map loginConfig = - ImmutableMap.of( - "principal", - ads.getUserPrincipal(), - "useKeyTab", - "true", - "refreshKrb5Config", - "true", - "keyTab", - ads.getUserKeytab().getAbsolutePath()); - - builder.withLoginConfiguration(loginConfig); - try (CqlSession session = - CqlSession.builder() - .withAuthProvider(new ProgrammaticDseGssApiAuthProvider(builder.build())) - .build()) { - - ResultSet set = session.execute("select * from system.local"); - assertThat(set).isNotNull(); - } - } - - private void verifyException(AllNodesFailedException anfe) { - assertThat(anfe.getAllErrors()).hasSize(1); - List errors = anfe.getAllErrors().values().iterator().next(); - assertThat(errors).hasSize(1); - Throwable firstError = errors.get(0); - assertThat(firstError) - .isInstanceOf(AuthenticationException.class) - .hasMessageContaining("Authentication error on node /127.0.0.1:9042"); - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/auth/DsePlainTextAuthProviderIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/auth/DsePlainTextAuthProviderIT.java deleted file mode 100644 index 256c18f841d..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/auth/DsePlainTextAuthProviderIT.java +++ /dev/null @@ -1,134 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.auth; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Fail.fail; - -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.auth.AuthenticationException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.internal.core.auth.PlainTextAuthProvider; -import com.datastax.oss.driver.shaded.guava.common.util.concurrent.Uninterruptibles; -import java.util.List; -import java.util.concurrent.TimeUnit; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; - -@BackendRequirement( - type = BackendType.DSE, - minInclusive = "5.0", - description = "Required for DseAuthenticator") -public class DsePlainTextAuthProviderIT { - - @ClassRule - public static CustomCcmRule ccm = - CustomCcmRule.builder() - .withCassandraConfiguration( - "authenticator", "com.datastax.bdp.cassandra.auth.DseAuthenticator") - .withDseConfiguration("authentication_options.enabled", true) - .withDseConfiguration("authentication_options.default_scheme", "internal") - .withJvmArgs("-Dcassandra.superuser_setup_delay_ms=0") - .build(); - - @BeforeClass - public static void sleepForAuth() { - if (ccm.getCassandraVersion().compareTo(Version.V2_2_0) < 0) { - // Sleep for 1 second to allow C* auth to do its work. This is only needed for 2.1 - Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); - } - } - - @Test - public void should_connect_dse_plaintext_auth() { - try (CqlSession session = - SessionUtils.newSession( - ccm, - SessionUtils.configLoaderBuilder() - .withString(DseDriverOption.AUTH_PROVIDER_AUTHORIZATION_ID, "") - .withString(DefaultDriverOption.AUTH_PROVIDER_USER_NAME, "cassandra") - .withString(DefaultDriverOption.AUTH_PROVIDER_PASSWORD, "cassandra") - .withClass(DefaultDriverOption.AUTH_PROVIDER_CLASS, PlainTextAuthProvider.class) - .build())) { - session.execute("select * from system.local"); - } - } - - @Test - public void should_connect_dse_plaintext_auth_programmatically() { - try (CqlSession session = - CqlSession.builder() - .addContactEndPoints(ccm.getContactPoints()) - .withAuthCredentials("cassandra", "cassandra") - .build()) { - session.execute("select * from system.local"); - } - } - - @SuppressWarnings("unused") - @Test - public void should_not_connect_with_invalid_credentials() { - try (CqlSession session = - SessionUtils.newSession( - ccm, - SessionUtils.configLoaderBuilder() - .withString(DseDriverOption.AUTH_PROVIDER_AUTHORIZATION_ID, "") - .withString(DefaultDriverOption.AUTH_PROVIDER_USER_NAME, "cassandra") - .withString(DefaultDriverOption.AUTH_PROVIDER_PASSWORD, "NotARealPassword") - .withClass(DefaultDriverOption.AUTH_PROVIDER_CLASS, PlainTextAuthProvider.class) - .build())) { - fail("Expected an AllNodesFailedException"); - } catch (AllNodesFailedException e) { - verifyException(e); - } - } - - @SuppressWarnings("unused") - @Test - public void should_not_connect_without_credentials() { - try (CqlSession session = - SessionUtils.newSession( - ccm, - SessionUtils.configLoaderBuilder() - .withString(DseDriverOption.AUTH_PROVIDER_AUTHORIZATION_ID, "") - .withClass(DefaultDriverOption.AUTH_PROVIDER_CLASS, PlainTextAuthProvider.class) - .build())) { - fail("Expected AllNodesFailedException"); - } catch (AllNodesFailedException e) { - verifyException(e); - } - } - - private void verifyException(AllNodesFailedException anfe) { - assertThat(anfe.getAllErrors()).hasSize(1); - List errors = anfe.getAllErrors().values().iterator().next(); - assertThat(errors).hasSize(1); - Throwable firstError = errors.get(0); - assertThat(firstError) - .isInstanceOf(AuthenticationException.class) - .hasMessageContaining("Authentication error on node /127.0.0.1:9042"); - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/auth/DseProxyAuthenticationIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/auth/DseProxyAuthenticationIT.java deleted file mode 100644 index a3f1c04afc0..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/auth/DseProxyAuthenticationIT.java +++ /dev/null @@ -1,282 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.auth; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; - -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.dse.driver.internal.core.auth.DseGssApiAuthProvider; -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.auth.AuthenticationException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.servererrors.UnauthorizedException; -import com.datastax.oss.driver.api.testinfra.ccm.SchemaChangeSynchronizer; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.internal.core.auth.PlainTextAuthProvider; -import java.util.List; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; - -@BackendRequirement( - type = BackendType.DSE, - minInclusive = "5.1", - description = "Required for DseAuthenticator with proxy") -public class DseProxyAuthenticationIT { - private static String bobPrincipal; - private static String charliePrincipal; - @ClassRule public static EmbeddedAdsRule ads = new EmbeddedAdsRule(); - - @BeforeClass - public static void addUsers() { - bobPrincipal = ads.addUserAndCreateKeyTab("bob", "fakePasswordForBob"); - charliePrincipal = ads.addUserAndCreateKeyTab("charlie", "fakePasswordForCharlie"); - } - - @Before - public void setupRoles() { - - SchemaChangeSynchronizer.withLock( - () -> { - try (CqlSession session = ads.newKeyTabSession()) { - session.execute( - "CREATE ROLE IF NOT EXISTS alice WITH PASSWORD = 'fakePasswordForAlice' AND LOGIN = FALSE"); - session.execute( - "CREATE ROLE IF NOT EXISTS ben WITH PASSWORD = 'fakePasswordForBen' AND LOGIN = TRUE"); - session.execute("CREATE ROLE IF NOT EXISTS 'bob@DATASTAX.COM' WITH LOGIN = TRUE"); - session.execute( - "CREATE ROLE IF NOT EXISTS 'charlie@DATASTAX.COM' WITH PASSWORD = 'fakePasswordForCharlie' AND LOGIN = TRUE"); - session.execute( - "CREATE ROLE IF NOT EXISTS steve WITH PASSWORD = 'fakePasswordForSteve' AND LOGIN = TRUE"); - session.execute( - "CREATE KEYSPACE IF NOT EXISTS aliceks WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor':'1'}"); - session.execute( - "CREATE TABLE IF NOT EXISTS aliceks.alicetable (key text PRIMARY KEY, value text)"); - session.execute( - "INSERT INTO aliceks.alicetable (key, value) VALUES ('hello', 'world')"); - session.execute("GRANT ALL ON KEYSPACE aliceks TO alice"); - session.execute("GRANT EXECUTE ON ALL AUTHENTICATION SCHEMES TO 'ben'"); - session.execute("GRANT EXECUTE ON ALL AUTHENTICATION SCHEMES TO 'bob@DATASTAX.COM'"); - session.execute("GRANT EXECUTE ON ALL AUTHENTICATION SCHEMES TO 'steve'"); - session.execute( - "GRANT EXECUTE ON ALL AUTHENTICATION SCHEMES TO 'charlie@DATASTAX.COM'"); - session.execute("GRANT PROXY.LOGIN ON ROLE 'alice' TO 'ben'"); - session.execute("GRANT PROXY.LOGIN ON ROLE 'alice' TO 'bob@DATASTAX.COM'"); - session.execute("GRANT PROXY.EXECUTE ON ROLE 'alice' TO 'steve'"); - session.execute("GRANT PROXY.EXECUTE ON ROLE 'alice' TO 'charlie@DATASTAX.COM'"); - // ben and bob are allowed to login as alice, but not execute as alice. - // charlie and steve are allowed to execute as alice, but not login as alice. - } - }); - } - /** - * Validates that a connection may be successfully made as user 'alice' using the credentials of a - * user 'ben' using {@link PlainTextAuthProvider} assuming ben has PROXY.LOGIN authorization on - * alice. - */ - @Test - public void should_allow_plain_text_authorized_user_to_login_as() { - try (CqlSession session = - SessionUtils.newSession( - ads.ccm, - SessionUtils.configLoaderBuilder() - .withString(DseDriverOption.AUTH_PROVIDER_AUTHORIZATION_ID, "alice") - .withString(DefaultDriverOption.AUTH_PROVIDER_USER_NAME, "ben") - .withString(DefaultDriverOption.AUTH_PROVIDER_PASSWORD, "fakePasswordForBen") - .withClass(DefaultDriverOption.AUTH_PROVIDER_CLASS, PlainTextAuthProvider.class) - .build())) { - SimpleStatement select = SimpleStatement.builder("select * from aliceks.alicetable").build(); - ResultSet set = session.execute(select); - assertThat(set).isNotNull(); - } - } - - @Test - public void should_allow_plain_text_authorized_user_to_login_as_programmatically() { - try (CqlSession session = - CqlSession.builder() - .addContactEndPoints(ads.ccm.getContactPoints()) - .withAuthCredentials("ben", "fakePasswordForBen", "alice") - .build()) { - session.execute("select * from system.local"); - } - } - - /** - * Validates that a connection may successfully made as user 'alice' using the credentials of a - * principal 'bob@DATASTAX.COM' using {@link DseGssApiAuthProvider} assuming 'bob@DATASTAX.COM' - * has PROXY.LOGIN authorization on alice. - */ - @Test - public void should_allow_kerberos_authorized_user_to_login_as() { - try (CqlSession session = - ads.newKeyTabSession( - bobPrincipal, ads.getKeytabForPrincipal(bobPrincipal).getAbsolutePath(), "alice")) { - SimpleStatement select = SimpleStatement.builder("select * from aliceks.alicetable").build(); - ResultSet set = session.execute(select); - assertThat(set).isNotNull(); - } - } - - /** - * Validates that a connection does not succeed as user 'alice' using the credentials of a user - * 'steve' assuming 'steve' does not have PROXY.LOGIN authorization on alice. - */ - @Test - public void should_not_allow_plain_text_unauthorized_user_to_login_as() { - try (CqlSession session = - SessionUtils.newSession( - ads.ccm, - SessionUtils.configLoaderBuilder() - .withString(DseDriverOption.AUTH_PROVIDER_AUTHORIZATION_ID, "alice") - .withString(DefaultDriverOption.AUTH_PROVIDER_USER_NAME, "steve") - .withString(DefaultDriverOption.AUTH_PROVIDER_PASSWORD, "fakePasswordForSteve") - .withClass(DefaultDriverOption.AUTH_PROVIDER_CLASS, PlainTextAuthProvider.class) - .build())) { - SimpleStatement select = SimpleStatement.builder("select * from aliceks.alicetable").build(); - session.execute(select); - fail("Should have thrown AllNodesFailedException on login."); - } catch (AllNodesFailedException anfe) { - verifyException(anfe); - } - } - /** - * Validates that a connection does not succeed as user 'alice' using the credentials of a - * principal 'charlie@DATASTAX.COM' assuming 'charlie@DATASTAX.COM' does not have PROXY.LOGIN - * authorization on alice. - */ - @Test - public void should_not_allow_kerberos_unauthorized_user_to_login_as() throws Exception { - try (CqlSession session = - ads.newKeyTabSession( - charliePrincipal, - ads.getKeytabForPrincipal(charliePrincipal).getAbsolutePath(), - "alice")) { - SimpleStatement select = SimpleStatement.builder("select * from aliceks.alicetable").build(); - session.execute(select); - fail("Should have thrown AllNodesFailedException on login."); - } catch (AllNodesFailedException anfe) { - verifyException(anfe); - } - } - /** - * Validates that a query may be successfully made as user 'alice' using a {@link CqlSession} that - * is authenticated to user 'steve' using {@link PlainTextAuthProvider} assuming steve has - * PROXY.EXECUTE authorization on alice. - */ - @Test - public void should_allow_plain_text_authorized_user_to_execute_as() { - try (CqlSession session = - SessionUtils.newSession( - ads.ccm, - SessionUtils.configLoaderBuilder() - .withString(DefaultDriverOption.AUTH_PROVIDER_USER_NAME, "steve") - .withString(DefaultDriverOption.AUTH_PROVIDER_PASSWORD, "fakePasswordForSteve") - .withClass(DefaultDriverOption.AUTH_PROVIDER_CLASS, PlainTextAuthProvider.class) - .build())) { - SimpleStatement select = SimpleStatement.builder("select * from aliceks.alicetable").build(); - SimpleStatement statementAsAlice = ProxyAuthentication.executeAs("alice", select); - ResultSet set = session.execute(statementAsAlice); - assertThat(set).isNotNull(); - } - } - /** - * Validates that a query may be successfully made as user 'alice' using a {@link CqlSession} that - * is authenticated to principal 'charlie@DATASTAX.COM' using {@link DseGssApiAuthProvider} - * assuming charlie@DATASTAX.COM has PROXY.EXECUTE authorization on alice. - */ - @Test - public void should_allow_kerberos_authorized_user_to_execute_as() { - try (CqlSession session = - ads.newKeyTabSession( - charliePrincipal, ads.getKeytabForPrincipal(charliePrincipal).getAbsolutePath())) { - SimpleStatement select = SimpleStatement.builder("select * from aliceks.alicetable").build(); - SimpleStatement statementAsAlice = ProxyAuthentication.executeAs("alice", select); - ResultSet set = session.execute(statementAsAlice); - assertThat(set).isNotNull(); - } - } - /** - * Validates that a query may not be made as user 'alice' using a {@link CqlSession} that is - * authenticated to user 'ben' if ben does not have PROXY.EXECUTE authorization on alice. - */ - @Test - public void should_not_allow_plain_text_unauthorized_user_to_execute_as() { - try (CqlSession session = - SessionUtils.newSession( - ads.ccm, - SessionUtils.configLoaderBuilder() - .withString(DefaultDriverOption.AUTH_PROVIDER_USER_NAME, "ben") - .withString(DefaultDriverOption.AUTH_PROVIDER_PASSWORD, "fakePasswordForBen") - .withClass(DefaultDriverOption.AUTH_PROVIDER_CLASS, PlainTextAuthProvider.class) - .build())) { - SimpleStatement select = SimpleStatement.builder("select * from aliceks.alicetable").build(); - SimpleStatement statementAsAlice = ProxyAuthentication.executeAs("alice", select); - session.execute(statementAsAlice); - fail("Should have thrown UnauthorizedException on executeAs."); - } catch (UnauthorizedException ue) { - verifyException(ue, "ben"); - } - } - /** - * Validates that a query may not be made as user 'alice' using a {@link CqlSession} that is - * authenticated to principal 'bob@DATASTAX.COM' using {@link DseGssApiAuthProvider} if - * bob@DATASTAX.COM does not have PROXY.EXECUTE authorization on alice. - */ - @Test - public void should_not_allow_kerberos_unauthorized_user_to_execute_as() { - try (CqlSession session = - ads.newKeyTabSession( - bobPrincipal, ads.getKeytabForPrincipal(bobPrincipal).getAbsolutePath())) { - SimpleStatement select = SimpleStatement.builder("select * from aliceks.alicetable").build(); - SimpleStatement statementAsAlice = ProxyAuthentication.executeAs("alice", select); - session.execute(statementAsAlice); - fail("Should have thrown UnauthorizedException on executeAs."); - } catch (UnauthorizedException ue) { - verifyException(ue, "bob@DATASTAX.COM"); - } - } - - private void verifyException(AllNodesFailedException anfe) { - assertThat(anfe.getAllErrors()).hasSize(1); - List errors = anfe.getAllErrors().values().iterator().next(); - assertThat(errors).hasSize(1); - Throwable firstError = errors.get(0); - assertThat(firstError) - .isInstanceOf(AuthenticationException.class) - .hasMessageContaining( - "Authentication error on node /127.0.0.1:9042: " - + "server replied with 'Failed to login. Please re-try.' to AuthResponse request"); - } - - private void verifyException(UnauthorizedException ue, String user) { - assertThat(ue.getMessage()) - .contains( - String.format( - "Either '%s' does not have permission to execute queries as 'alice' " - + "or that role does not exist.", - user)); - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/auth/EmbeddedAds.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/auth/EmbeddedAds.java deleted file mode 100644 index 5ca751e9151..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/auth/EmbeddedAds.java +++ /dev/null @@ -1,607 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.auth; - -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.collect.Maps; -import com.datastax.oss.driver.shaded.guava.common.collect.Sets; -import com.datastax.oss.driver.shaded.guava.common.io.Files; -import java.io.BufferedWriter; -import java.io.File; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.OutputStreamWriter; -import java.io.PrintWriter; -import java.net.InetAddress; -import java.net.ServerSocket; -import java.net.UnknownHostException; -import java.nio.charset.Charset; -import java.util.Collections; -import java.util.Map; -import java.util.UUID; -import org.apache.directory.api.ldap.model.constants.SchemaConstants; -import org.apache.directory.api.ldap.model.constants.SupportedSaslMechanisms; -import org.apache.directory.api.ldap.model.csn.CsnFactory; -import org.apache.directory.api.ldap.model.entry.Entry; -import org.apache.directory.api.ldap.model.exception.LdapException; -import org.apache.directory.api.ldap.model.exception.LdapInvalidDnException; -import org.apache.directory.api.ldap.model.name.Dn; -import org.apache.directory.api.ldap.model.schema.SchemaManager; -import org.apache.directory.api.ldap.schemamanager.impl.DefaultSchemaManager; -import org.apache.directory.server.constants.ServerDNConstants; -import org.apache.directory.server.core.DefaultDirectoryService; -import org.apache.directory.server.core.api.CacheService; -import org.apache.directory.server.core.api.DirectoryService; -import org.apache.directory.server.core.api.DnFactory; -import org.apache.directory.server.core.api.InstanceLayout; -import org.apache.directory.server.core.api.schema.SchemaPartition; -import org.apache.directory.server.core.kerberos.KeyDerivationInterceptor; -import org.apache.directory.server.core.partition.impl.btree.jdbm.JdbmPartition; -import org.apache.directory.server.core.partition.ldif.LdifPartition; -import org.apache.directory.server.core.shared.DefaultDnFactory; -import org.apache.directory.server.kerberos.KerberosConfig; -import org.apache.directory.server.kerberos.kdc.KdcServer; -import org.apache.directory.server.kerberos.shared.crypto.encryption.KerberosKeyFactory; -import org.apache.directory.server.kerberos.shared.keytab.Keytab; -import org.apache.directory.server.kerberos.shared.keytab.KeytabEntry; -import org.apache.directory.server.ldap.LdapServer; -import org.apache.directory.server.ldap.handlers.sasl.MechanismHandler; -import org.apache.directory.server.ldap.handlers.sasl.cramMD5.CramMd5MechanismHandler; -import org.apache.directory.server.ldap.handlers.sasl.digestMD5.DigestMd5MechanismHandler; -import org.apache.directory.server.ldap.handlers.sasl.gssapi.GssapiMechanismHandler; -import org.apache.directory.server.ldap.handlers.sasl.plain.PlainMechanismHandler; -import org.apache.directory.server.protocol.shared.transport.TcpTransport; -import org.apache.directory.server.protocol.shared.transport.UdpTransport; -import org.apache.directory.shared.kerberos.KerberosTime; -import org.apache.directory.shared.kerberos.codec.types.EncryptionType; -import org.apache.directory.shared.kerberos.components.EncryptionKey; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A convenience utility for running an Embedded Apache Directory Service with LDAP and optionally a - * Kerberos Key Distribution Server. By default listens for LDAP on 10389 and Kerberos on 60088. You - * can use something like Apache Directory Studio - * to verify the server is configured and running correctly by connecting to localhost:10389 with - * username 'uid=admin,ou=system' and password 'secret'. - * - *

    Note: This should only be used for development and testing purposes. - */ -public class EmbeddedAds { - - private static final Logger LOG = LoggerFactory.getLogger(EmbeddedAds.class); - - private final String dn; - - private final String realm; - - private int kdcPort; - - private int ldapPort; - - private final boolean kerberos; - - private InetAddress address; - - private String hostname; - - private File confDir; - - private volatile boolean isInit = false; - - private DirectoryService service; - - private LdapServer ldapServer; - - private KdcServer kdcServer; - - private Dn usersDN; - - private File krb5Conf; - - private EmbeddedAds( - String dn, - String realm, - String address, - int ldapPort, - boolean kerberos, - int kdcPort, - File confDir) { - this.dn = dn; - this.realm = realm; - try { - this.address = InetAddress.getByName(address); - } catch (UnknownHostException e) { - LOG.error("Failure resolving address '{}', falling back to loopback.", address, e); - this.address = InetAddress.getLoopbackAddress(); - } - this.hostname = this.address.getHostName().toLowerCase(); - this.ldapPort = ldapPort; - this.kerberos = kerberos; - this.kdcPort = kdcPort; - this.confDir = confDir; - } - - public void start() throws Exception { - if (isInit) { - return; - } - isInit = true; - File workDir = Files.createTempDir(); - // Set confDir = workDir if not defined. - if (confDir == null) { - confDir = workDir; - } - - if (kerberos) { - kdcPort = kdcPort != -1 ? kdcPort : findAvailablePort(60088); - - // Set system properties required for kerberos auth to work. Unfortunately admin_server - // cannot be expressed via System properties (like realm and kdc can), thus we must create a - // config file. - krb5Conf = createKrb5Conf(); - - System.setProperty("java.security.krb5.conf", krb5Conf.getAbsolutePath()); - // Useful options for debugging. - // System.setProperty("sun.security.krb5.debug", "true"); - // System.setProperty("java.security.debug", "configfile,configparser,gssloginconfig"); - } - - // Initialize service and set its filesystem layout. - service = new DefaultDirectoryService(); - InstanceLayout layout = new InstanceLayout(workDir); - service.setInstanceLayout(layout); - - // Disable ChangeLog as we don't need change tracking. - service.getChangeLog().setEnabled(false); - // Denormalizes attribute DNs to be human readable, i.e uid=admin,ou=system instead of - // 0.9.2.3=admin,2.5=system) - service.setDenormalizeOpAttrsEnabled(true); - - // Create and init cache service which will be used for caching DNs, among other things. - CacheService cacheService = new CacheService(); - cacheService.initialize(layout); - - // Create and load SchemaManager which will create the default schema partition. - SchemaManager schemaManager = new DefaultSchemaManager(); - service.setSchemaManager(schemaManager); - schemaManager.loadAllEnabled(); - - // Create SchemaPartition from schema manager and load ldif from schema directory. - SchemaPartition schemaPartition = new SchemaPartition(schemaManager); - LdifPartition ldifPartition = new LdifPartition(schemaManager, service.getDnFactory()); - ldifPartition.setPartitionPath(new File(layout.getPartitionsDirectory(), "schema").toURI()); - schemaPartition.setWrappedPartition(ldifPartition); - service.setSchemaPartition(schemaPartition); - - // Create a DN factory which can be used to create and cache DNs. - DnFactory dnFactory = new DefaultDnFactory(schemaManager, cacheService.getCache("dnCache")); - service.setDnFactory(dnFactory); - - // Create mandatory system partition. This is used for storing server configuration. - JdbmPartition systemPartition = - createPartition("system", dnFactory.create(ServerDNConstants.SYSTEM_DN)); - service.setSystemPartition(systemPartition); - - // Now that we have a schema and system partition, start up the directory service. - service.startup(); - - // Create partition where user, tgt and ldap principals will live. - Dn partitionDn = dnFactory.create(dn); - String dnName = partitionDn.getRdn().getValue().getString(); - JdbmPartition partition = createPartition(dnName, partitionDn); - - // Add a context entry so the partition can be referenced by entries. - Entry context = service.newEntry(partitionDn); - context.add("objectClass", "top", "domain", "extensibleObject"); - context.add(partitionDn.getRdn().getType(), dnName); - partition.setContextEntry(context); - service.addPartition(partition); - - // Create users domain. - usersDN = partitionDn.add(dnFactory.create("ou=users")); - Entry usersEntry = service.newEntry(usersDN); - usersEntry.add("objectClass", "organizationalUnit", "top"); - usersEntry.add("ou", "users"); - if (kerberos) { - usersEntry = kerberize(usersEntry); - } - service.getAdminSession().add(usersEntry); - - // Uncomment to allow to connect to ldap server without credentials for convenience. - // service.setAllowAnonymousAccess(true); - - startLdap(); - - // Create sasl and krbtgt principals and start KDC if kerberos is enabled. - if (kerberos) { - // Ticket Granting Ticket entry. - Dn tgtDN = usersDN.add(dnFactory.create("uid=krbtgt")); - String servicePrincipal = "krbtgt/" + realm + "@" + realm; - Entry tgtEntry = service.newEntry(tgtDN); - tgtEntry.add( - "objectClass", - "person", - "inetOrgPerson", - "top", - "krb5KDCEntry", - "uidObject", - "krb5Principal"); - tgtEntry.add("krb5KeyVersionNumber", "0"); - tgtEntry.add("krb5PrincipalName", servicePrincipal); - tgtEntry.add("uid", "krbtgt"); - tgtEntry.add("userPassword", "secret"); - tgtEntry.add("sn", "Service"); - tgtEntry.add("cn", "KDC Service"); - service.getAdminSession().add(kerberize(tgtEntry)); - - // LDAP SASL principal. - String saslPrincipal = "ldap/" + hostname + "@" + realm; - ldapServer.setSaslPrincipal(saslPrincipal); - Dn ldapDN = usersDN.add(dnFactory.create("uid=ldap")); - Entry ldapEntry = service.newEntry(ldapDN); - ldapEntry.add( - "objectClass", - "top", - "person", - "inetOrgPerson", - "krb5KDCEntry", - "uidObject", - "krb5Principal"); - ldapEntry.add("krb5KeyVersionNumber", "0"); - ldapEntry.add("krb5PrincipalName", saslPrincipal); - ldapEntry.add("uid", "ldap"); - ldapEntry.add("userPassword", "secret"); - ldapEntry.add("sn", "Service"); - ldapEntry.add("cn", "LDAP Service"); - service.getAdminSession().add(kerberize(ldapEntry)); - - startKDC(servicePrincipal); - } - } - - public boolean isStarted() { - return this.isInit; - } - - private File createKrb5Conf() throws IOException { - File krb5Conf = new File(confDir, "krb5.conf"); - String config = - String.format( - "[libdefaults]%n" - + "default_realm = %s%n" - + "default_tgs_enctypes = aes128-cts-hmac-sha1-96 aes256-cts-hmac-sha1-96%n%n" - + "[realms]%n" - + "%s = {%n" - + " kdc = %s:%d%n" - + " admin_server = %s:%d%n" - + "}%n", - realm, realm, hostname, kdcPort, hostname, kdcPort); - - try (FileOutputStream fios = new FileOutputStream(krb5Conf)) { - PrintWriter pw = - new PrintWriter( - new BufferedWriter(new OutputStreamWriter(fios, Charset.defaultCharset()))); - pw.write(config); - pw.close(); - } - return krb5Conf; - } - - /** - * @return A specialized krb5.conf file that defines and defaults to the domain expressed by this - * server. - */ - public File getKrb5Conf() { - return krb5Conf; - } - - /** - * Adds a user with the given password and principal name and creates a keytab file for - * authenticating with that user's principal. - * - * @param user Username to login with (i.e. cassandra). - * @param password Password to authenticate with. - * @param principal Principal representing the server (i.e. cassandra@DATASTAX.COM). - * @return Generated keytab file for this user. - */ - public File addUserAndCreateKeytab(String user, String password, String principal) - throws IOException, LdapException { - addUser(user, password, principal); - return createKeytab(user, password, principal); - } - - /** - * Creates a keytab file for authenticating with a given principal. - * - * @param user Username to login with (i.e. cassandra). - * @param password Password to authenticate with. - * @param principal Principal representing the server (i.e. cassandra@DATASTAX.COM). - * @return Generated keytab file for this user. - */ - public File createKeytab(String user, String password, String principal) throws IOException { - File keytabFile = new File(confDir, user + ".keytab"); - Keytab keytab = Keytab.getInstance(); - - KerberosTime timeStamp = new KerberosTime(System.currentTimeMillis()); - - Map keys = - KerberosKeyFactory.getKerberosKeys(principal, password); - - KeytabEntry keytabEntry = - new KeytabEntry( - principal, 0, timeStamp, (byte) 0, keys.get(EncryptionType.AES128_CTS_HMAC_SHA1_96)); - - keytab.setEntries(Collections.singletonList(keytabEntry)); - keytab.write(keytabFile); - return keytabFile; - } - - /** - * Adds a user with the given password, does not create necessary kerberos attributes. - * - * @param user Username to login with (i.e. cassandra). - * @param password Password to authenticate with. - */ - public void addUser(String user, String password) throws LdapException { - addUser(user, password, null); - } - - /** - * Adds a user with the given password and principal. If principal is specified and kerberos is - * enabled, user is created with the necessary attributes to authenticate with kerberos (entryCsn, - * entryUuid, etc.). - * - * @param user Username to login with (i.e. cassandra). - * @param password Password to authenticate with. - * @param principal Principal representing the server (i.e. cassandra@DATASTAX.COM). - */ - public void addUser(String user, String password, String principal) throws LdapException { - Preconditions.checkState(isInit); - Dn userDN = usersDN.add("uid=" + user); - Entry userEntry = service.newEntry(userDN); - if (kerberos && principal != null) { - userEntry.add( - "objectClass", - "organizationalPerson", - "person", - "extensibleObject", - "inetOrgPerson", - "top", - "krb5KDCEntry", - "uidObject", - "krb5Principal"); - userEntry.add("krb5KeyVersionNumber", "0"); - userEntry.add("krb5PrincipalName", principal); - userEntry = kerberize(userEntry); - } else { - userEntry.add( - "objectClass", - "organizationalPerson", - "person", - "extensibleObject", - "inetOrgPerson", - "top", - "uidObject"); - } - userEntry.add("uid", user); - userEntry.add("sn", user); - userEntry.add("cn", user); - userEntry.add("userPassword", password); - service.getAdminSession().add(userEntry); - } - - /** Stops the server(s) if running. */ - public void stop() { - if (ldapServer != null) { - ldapServer.stop(); - } - if (kdcServer != null) { - kdcServer.stop(); - } - } - - /** @return The evaluated hostname that the server is listening with. */ - public String getHostname() { - return this.hostname; - } - - /** - * Adds attributes to the given Entry which will enable krb5key attributes to be added to them. - * - * @param entry Entry to add attributes to. - * @return The provided entry. - */ - private Entry kerberize(Entry entry) throws LdapException { - // Add csn and uuids for kerberos, this is needed to generate krb5keys. - entry.add(SchemaConstants.ENTRY_CSN_AT, new CsnFactory(0).newInstance().toString()); - entry.add(SchemaConstants.ENTRY_UUID_AT, UUID.randomUUID().toString()); - return entry; - } - - /** - * Creates a {@link JdbmPartition} with the given id and DN. - * - * @param id Id to create partition with. - * @param dn Distinguished Name to use to create partition. - * @return Created partition. - */ - private JdbmPartition createPartition(String id, Dn dn) throws LdapInvalidDnException { - JdbmPartition partition = new JdbmPartition(service.getSchemaManager(), service.getDnFactory()); - partition.setId(id); - partition.setPartitionPath( - new File(service.getInstanceLayout().getPartitionsDirectory(), id).toURI()); - partition.setSuffixDn(dn); - partition.setSchemaManager(service.getSchemaManager()); - return partition; - } - - /** Starts the LDAP Server with SASL enabled. */ - private void startLdap() throws Exception { - // Create and start LDAP server. - ldapServer = new LdapServer(); - - // Enable SASL layer, this is useful with or without kerberos. - Map mechanismHandlerMap = Maps.newHashMap(); - mechanismHandlerMap.put(SupportedSaslMechanisms.PLAIN, new PlainMechanismHandler()); - mechanismHandlerMap.put(SupportedSaslMechanisms.CRAM_MD5, new CramMd5MechanismHandler()); - mechanismHandlerMap.put(SupportedSaslMechanisms.DIGEST_MD5, new DigestMd5MechanismHandler()); - // GSSAPI is required for kerberos. - mechanismHandlerMap.put(SupportedSaslMechanisms.GSSAPI, new GssapiMechanismHandler()); - ldapServer.setSaslMechanismHandlers(mechanismHandlerMap); - ldapServer.setSaslHost(hostname); - // Realms only used by DIGEST_MD5 and GSSAPI. - ldapServer.setSaslRealms(Collections.singletonList(realm)); - ldapServer.setSearchBaseDn(dn); - - ldapPort = ldapPort != -1 ? ldapPort : findAvailablePort(10389); - ldapServer.setTransports(new TcpTransport(address.getHostAddress(), ldapPort)); - ldapServer.setDirectoryService(service); - if (kerberos) { - // Add an interceptor to attach krb5keys to created principals. - KeyDerivationInterceptor interceptor = new KeyDerivationInterceptor(); - interceptor.init(service); - service.addLast(interceptor); - } - ldapServer.start(); - } - - /** - * Starts the Kerberos Key Distribution Server supporting AES128 using the given principal for the - * Ticket-granting ticket. - * - * @param servicePrincipal TGT principcal service. - */ - private void startKDC(String servicePrincipal) throws Exception { - KerberosConfig config = new KerberosConfig(); - // We choose AES128_CTS_HMAC_SHA1_96 for our generated keytabs so we don't need JCE. - config.setEncryptionTypes(Sets.newHashSet(EncryptionType.AES128_CTS_HMAC_SHA1_96)); - config.setSearchBaseDn(dn); - config.setServicePrincipal(servicePrincipal); - - kdcServer = new KdcServer(config); - kdcServer.setDirectoryService(service); - - kdcServer.setTransports( - new TcpTransport(address.getHostAddress(), kdcPort), - new UdpTransport(address.getHostAddress(), kdcPort)); - kdcServer.start(); - } - - public static Builder builder() { - return new Builder(); - } - - public static class Builder { - - private String dn = "dc=datastax,dc=com"; - - private String realm = "DATASTAX.COM"; - - private boolean kerberos = false; - - private int kdcPort = -1; - - private int ldapPort = -1; - - private String address = "127.0.0.1"; - - private File confDir = null; - - private Builder() {} - - public EmbeddedAds build() { - return new EmbeddedAds(dn, realm, address, ldapPort, kerberos, kdcPort, confDir); - } - - /** - * Configures the base DN to create users under. Defaults to dc=datastax,dc=com. - */ - public Builder withBaseDn(String dn) { - this.dn = dn; - return this; - } - - /** Configures the realm to use for SASL and Kerberos. Defaults to DATASTAX.COM. */ - public Builder withRealm(String realm) { - this.realm = realm; - return this; - } - - /** - * Sets the directory where krb5.conf and generated keytabs are created. Defaults to current - * directory. - */ - public Builder withConfDir(File confDir) { - this.confDir = confDir; - return this; - } - - /** - * Configures the port to use for LDAP. Defaults to the first available port from 10389+. Must - * be greater than 0. - */ - public Builder withLdapPort(int port) { - Preconditions.checkArgument(port > 0); - this.ldapPort = port; - return this; - } - - /** - * Configures the port to use for Kerberos KDC. Defaults to the first available port for 60088+. - * Must be greater than 0. - */ - public Builder withKerberos(int port) { - Preconditions.checkArgument(port > 0); - this.kdcPort = port; - return withKerberos(); - } - - /** - * Configures the server to run with a Kerberos KDC using the first available port for 60088+. - */ - public Builder withKerberos() { - this.kerberos = true; - return this; - } - - /** - * Configures the server to be configured to listen with the given address. Defaults to - * 127.0.0.1. You shouldn't need to change this. - */ - public Builder withAddress(String address) { - this.address = address; - return this; - } - } - - private static int findAvailablePort(int startingWith) { - IOException last = null; - for (int port = startingWith; port < startingWith + 100; port++) { - try { - ServerSocket s = new ServerSocket(port); - s.close(); - return port; - } catch (IOException e) { - last = e; - } - } - // If for whatever reason a port could not be acquired throw the last encountered exception. - throw new RuntimeException("Could not acquire an available port", last); - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/auth/EmbeddedAdsRule.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/auth/EmbeddedAdsRule.java deleted file mode 100644 index a57e349a51b..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/auth/EmbeddedAdsRule.java +++ /dev/null @@ -1,300 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.auth; - -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.dse.driver.internal.core.auth.DseGssApiAuthProvider; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirementRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import java.io.File; -import java.util.HashMap; -import java.util.Map; -import org.junit.AssumptionViolatedException; -import org.junit.rules.ExternalResource; -import org.junit.runner.Description; -import org.junit.runners.model.Statement; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A testing rule that wraps the EmbeddedAds server, and ccmRule into one rule This is needed - * because ccm needs to be aware of the kerberos server settings prior to it's initialization. - */ -public class EmbeddedAdsRule extends ExternalResource { - - private static final Logger LOG = LoggerFactory.getLogger(EmbeddedAdsRule.class); - - public CustomCcmRule ccm; - // Realm for the KDC. - private final String realm = "DATASTAX.COM"; - private final String address = "127.0.0.1"; - - private final EmbeddedAds adsServer = - EmbeddedAds.builder().withKerberos().withRealm(realm).withAddress(address).build(); - - // Principal for DSE service ( = kerberos_options.service_principal) - private final String servicePrincipal = "dse/" + adsServer.getHostname() + "@" + realm; - - // A non-standard principal for DSE service, to test SASL protocol names - private final String alternateServicePrincipal = - "alternate/" + adsServer.getHostname() + "@" + realm; - - // Principal for the default cassandra user. - private final String userPrincipal = "cassandra@" + realm; - - // Principal for a user that doesn't exist. - private final String unknownPrincipal = "unknown@" + realm; - - // Keytabs to use for auth. - private static File userKeytab; - private static File unknownKeytab; - private static File dseKeytab; - private static File alternateKeytab; - private static Map customKeytabs = new HashMap<>(); - - private boolean alternate = false; - - public EmbeddedAdsRule(boolean alternate) { - this.alternate = alternate; - } - - public EmbeddedAdsRule() { - this(false); - } - - @Override - protected void before() { - try { - if (adsServer.isStarted()) { - return; - } - // Start ldap/kdc server. - adsServer.start(); - - // Create users and keytabs for the DSE principal and cassandra user. - dseKeytab = adsServer.addUserAndCreateKeytab("dse", "fakePasswordForTests", servicePrincipal); - alternateKeytab = - adsServer.addUserAndCreateKeytab( - "alternate", "fakePasswordForTests", alternateServicePrincipal); - userKeytab = - adsServer.addUserAndCreateKeytab("cassandra", "fakePasswordForTests", userPrincipal); - unknownKeytab = adsServer.createKeytab("unknown", "fakePasswordForTests", unknownPrincipal); - - String authenticationOptions = - "" - + "authentication_options:\n" - + " enabled: true\n" - + " default_scheme: kerberos\n" - + " other_schemes:\n" - + " - internal"; - - if (alternate) { - ccm = - CustomCcmRule.builder() - .withCassandraConfiguration( - "authorizer", "com.datastax.bdp.cassandra.auth.DseAuthorizer") - .withCassandraConfiguration( - "authenticator", "com.datastax.bdp.cassandra.auth.DseAuthenticator") - .withDseConfiguration("authorization_options.enabled", true) - .withDseConfiguration(authenticationOptions) - .withDseConfiguration("kerberos_options.qop", "auth-conf") - .withDseConfiguration( - "kerberos_options.keytab", getAlternateKeytab().getAbsolutePath()) - .withDseConfiguration( - "kerberos_options.service_principal", "alternate/_HOST@" + getRealm()) - .withJvmArgs( - "-Dcassandra.superuser_setup_delay_ms=0", - "-Djava.security.krb5.conf=" + getAdsServer().getKrb5Conf().getAbsolutePath()) - .build(); - } else { - ccm = - CustomCcmRule.builder() - .withCassandraConfiguration( - "authorizer", "com.datastax.bdp.cassandra.auth.DseAuthorizer") - .withCassandraConfiguration( - "authenticator", "com.datastax.bdp.cassandra.auth.DseAuthenticator") - .withDseConfiguration("authorization_options.enabled", true) - .withDseConfiguration(authenticationOptions) - .withDseConfiguration("kerberos_options.qop", "auth") - .withDseConfiguration("kerberos_options.keytab", getDseKeytab().getAbsolutePath()) - .withDseConfiguration( - "kerberos_options.service_principal", "dse/_HOST@" + getRealm()) - .withJvmArgs( - "-Dcassandra.superuser_setup_delay_ms=0", - "-Djava.security.krb5.conf=" + getAdsServer().getKrb5Conf().getAbsolutePath()) - .build(); - } - ccm.getCcmBridge().create(); - ccm.getCcmBridge().start(); - - } catch (Exception e) { - LOG.error("Unable to start ads server ", e); - } - } - - @Override - public Statement apply(Statement base, Description description) { - if (BackendRequirementRule.meetsDescriptionRequirements(description)) { - return super.apply(base, description); - } else { - // requirements not met, throw reasoning assumption to skip test - return new Statement() { - @Override - public void evaluate() { - throw new AssumptionViolatedException( - BackendRequirementRule.buildReasonString(description)); - } - }; - } - } - - @Override - protected void after() { - adsServer.stop(); - ccm.getCcmBridge().stop(); - } - - public CqlSession newKeyTabSession(String userPrincipal, String keytabPath) { - return SessionUtils.newSession( - getCcm(), - SessionUtils.configLoaderBuilder() - .withClass(DefaultDriverOption.AUTH_PROVIDER_CLASS, DseGssApiAuthProvider.class) - .withStringMap( - DseDriverOption.AUTH_PROVIDER_LOGIN_CONFIGURATION, - ImmutableMap.of( - "principal", - userPrincipal, - "useKeyTab", - "true", - "refreshKrb5Config", - "true", - "keyTab", - keytabPath)) - .build()); - } - - public CqlSession newKeyTabSession(String userPrincipal, String keytabPath, String authId) { - return SessionUtils.newSession( - getCcm(), - SessionUtils.configLoaderBuilder() - .withClass(DefaultDriverOption.AUTH_PROVIDER_CLASS, DseGssApiAuthProvider.class) - .withStringMap( - DseDriverOption.AUTH_PROVIDER_LOGIN_CONFIGURATION, - ImmutableMap.of( - "principal", - userPrincipal, - "useKeyTab", - "true", - "refreshKrb5Config", - "true", - "keyTab", - keytabPath)) - .withString(DseDriverOption.AUTH_PROVIDER_AUTHORIZATION_ID, authId) - .build()); - } - - public CqlSession newKeyTabSession() { - return newKeyTabSession(getUserPrincipal(), getUserKeytab().getAbsolutePath()); - } - - public CqlSession newTicketSession() { - return SessionUtils.newSession( - getCcm(), - SessionUtils.configLoaderBuilder() - .withClass(DefaultDriverOption.AUTH_PROVIDER_CLASS, DseGssApiAuthProvider.class) - .withStringMap( - DseDriverOption.AUTH_PROVIDER_LOGIN_CONFIGURATION, - ImmutableMap.of( - "principal", - userPrincipal, - "useTicketCache", - "true", - "refreshKrb5Config", - "true", - "renewTGT", - "true")) - .build()); - } - - public CustomCcmRule getCcm() { - return ccm; - } - - public String getRealm() { - return realm; - } - - public String getAddress() { - return address; - } - - public EmbeddedAds getAdsServer() { - return adsServer; - } - - public String getServicePrincipal() { - return servicePrincipal; - } - - public String getAlternateServicePrincipal() { - return alternateServicePrincipal; - } - - public String getUserPrincipal() { - return userPrincipal; - } - - public String getUnknownPrincipal() { - return unknownPrincipal; - } - - public File getUserKeytab() { - return userKeytab; - } - - public File getUnknownKeytab() { - return unknownKeytab; - } - - public File getDseKeytab() { - return dseKeytab; - } - - public File getAlternateKeytab() { - return alternateKeytab; - } - - public String addUserAndCreateKeyTab(String user, String password) { - String principal = user + "@" + realm; - try { - File keytabFile = adsServer.addUserAndCreateKeytab(user, password, principal); - customKeytabs.put(principal, keytabFile); - } catch (Exception e) { - LOG.error("Unable to add user and create keytab for " + user + " ", e); - } - return principal; - } - - public File getKeytabForPrincipal(String prinicipal) { - return customKeytabs.get(prinicipal); - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/auth/KerberosUtils.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/auth/KerberosUtils.java deleted file mode 100644 index 5d385b51c92..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/auth/KerberosUtils.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.auth; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import java.io.File; -import java.io.IOException; -import java.util.Map; -import org.apache.commons.exec.CommandLine; -import org.apache.commons.exec.DefaultExecutor; -import org.apache.commons.exec.Executor; - -public class KerberosUtils { - /** - * Executes the given command with KRB5_CONFIG environment variable pointing to the specialized - * config file for the embedded KDC server. - */ - public static void executeCommand(String command, EmbeddedAds adsServer) throws IOException { - Map environmentMap = - ImmutableMap.builder() - .put("KRB5_CONFIG", adsServer.getKrb5Conf().getAbsolutePath()) - .build(); - CommandLine cli = CommandLine.parse(command); - Executor executor = new DefaultExecutor(); - int retValue = executor.execute(cli, environmentMap); - assertThat(retValue).isZero(); - } - - /** - * Acquires a ticket into the cache with the tgt using kinit command with the given principal and - * keytab file. - */ - public static void acquireTicket(String principal, File keytab, EmbeddedAds adsServer) - throws IOException { - executeCommand( - String.format("kinit -t %s -k %s", keytab.getAbsolutePath(), principal), adsServer); - } - - /** Destroys all tickets in the cache with given principal. */ - public static void destroyTicket(EmbeddedAdsRule ads) throws IOException { - executeCommand("kdestroy", ads.getAdsServer()); - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/cql/continuous/ContinuousPagingIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/cql/continuous/ContinuousPagingIT.java deleted file mode 100644 index 45cc84f0719..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/cql/continuous/ContinuousPagingIT.java +++ /dev/null @@ -1,699 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.cql.continuous; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Fail.fail; - -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.dse.driver.api.core.metrics.DseSessionMetric; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.DriverTimeoutException; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.driver.shaded.guava.common.util.concurrent.Uninterruptibles; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.Collections; -import java.util.Iterator; -import java.util.concurrent.CancellationException; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.function.Function; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; -import org.junit.runner.RunWith; - -@BackendRequirement( - type = BackendType.DSE, - minInclusive = "5.1.0", - description = "Continuous paging is only available from 5.1.0 onwards") -@Category(ParallelizableTests.class) -@RunWith(DataProviderRunner.class) -public class ContinuousPagingIT extends ContinuousPagingITBase { - - private static CcmRule ccmRule = CcmRule.getInstance(); - - private static SessionRule sessionRule = - SessionRule.builder(ccmRule) - .withConfigLoader( - SessionUtils.configLoaderBuilder() - .withStringList( - DefaultDriverOption.METRICS_SESSION_ENABLED, - Collections.singletonList(DseSessionMetric.CONTINUOUS_CQL_REQUESTS.getPath())) - .withStringList( - DefaultDriverOption.METRICS_NODE_ENABLED, - Collections.singletonList(DefaultNodeMetric.CQL_MESSAGES.getPath())) - .build()) - .build(); - - @ClassRule public static TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); - - @BeforeClass - public static void setUp() { - initialize(sessionRule.session(), sessionRule.slowProfile()); - } - - /** - * Validates {@link ContinuousSession#executeContinuously(Statement)} with a variety of paging - * options and ensures in all cases the expected number of rows come back. - * - * @test_category queries - * @jira_ticket JAVA-1322 - * @since 1.2.0 - */ - @Test - @UseDataProvider("pagingOptions") - public void should_execute_synchronously(Options options) { - CqlSession session = sessionRule.session(); - SimpleStatement statement = SimpleStatement.newInstance("SELECT v from test where k=?", KEY); - DriverExecutionProfile profile = options.asProfile(session); - ContinuousResultSet result = - session.executeContinuously(statement.setExecutionProfile(profile)); - int i = 0; - for (Row row : result) { - assertThat(row.getInt("v")).isEqualTo(i); - i++; - } - assertThat(i).isEqualTo(options.expectedRows); - validateMetrics(session); - } - - /** - * Validates {@link ContinuousSession#executeContinuously(Statement)} with a variety of paging - * options using a prepared statement and ensures in all cases the expected number of rows come - * back. - * - * @test_category queries - * @jira_ticket JAVA-1322 - * @since 1.2.0 - */ - @Test - @UseDataProvider("pagingOptions") - public void should_execute_prepared_statement_synchronously(Options options) { - CqlSession session = sessionRule.session(); - DriverExecutionProfile profile = options.asProfile(session); - ContinuousResultSet result = - session.executeContinuously(prepared.bind(KEY).setExecutionProfile(profile)); - int i = 0; - for (Row row : result) { - assertThat(row.getInt("v")).isEqualTo(i); - i++; - } - assertThat(i).isEqualTo(options.expectedRows); - validateMetrics(session); - } - - /** - * Validates {@link ContinuousSession#executeContinuouslyAsync(Statement)} with a variety of - * paging options and ensures in all cases the expected number of rows come back and the expected - * number of pages are received. - * - * @test_category queries - * @jira_ticket JAVA-1322 - * @since 1.2.0 - */ - @Test - @UseDataProvider("pagingOptions") - public void should_execute_asynchronously(Options options) { - CqlSession session = sessionRule.session(); - SimpleStatement statement = SimpleStatement.newInstance("SELECT v from test where k=?", KEY); - DriverExecutionProfile profile = options.asProfile(session); - PageStatistics stats = - CompletableFutures.getUninterruptibly( - session - .executeContinuouslyAsync(statement.setExecutionProfile(profile)) - .thenCompose(new AsyncContinuousPagingFunction())); - assertThat(stats.rows).isEqualTo(options.expectedRows); - assertThat(stats.pages).isEqualTo(options.expectedPages); - validateMetrics(session); - } - - /** - * Validates that continuous paging is resilient to a schema change being made in the middle of - * producing pages for the driver if the query was a simple statement. - * - *

    Adds a column 'b' after paging the first row in. This column should not be present in the - * in-flight queries' rows, but should be present for subsequent queries. - * - * @test_category queries - * @jira_ticket JAVA-1653 - * @since 1.2.0 - */ - @Test - public void simple_statement_paging_should_be_resilient_to_schema_change() { - CqlSession session = sessionRule.session(); - SimpleStatement simple = SimpleStatement.newInstance("select * from test_prepare"); - DriverExecutionProfile profile = - session - .getContext() - .getConfig() - .getDefaultProfile() - .withInt(DseDriverOption.CONTINUOUS_PAGING_MAX_ENQUEUED_PAGES, 1) - .withInt(DseDriverOption.CONTINUOUS_PAGING_PAGE_SIZE, 1) - .withDuration( - DseDriverOption.CONTINUOUS_PAGING_TIMEOUT_FIRST_PAGE, Duration.ofSeconds(30)) - .withDuration( - DseDriverOption.CONTINUOUS_PAGING_TIMEOUT_OTHER_PAGES, Duration.ofSeconds(30)); - ContinuousResultSet result = session.executeContinuously(simple.setExecutionProfile(profile)); - Iterator it = result.iterator(); - // First row should have a non-null values. - Row row0 = it.next(); - assertThat(row0.getString("k")).isNotNull(); - assertThat(row0.isNull("v")).isFalse(); - // Make schema change to add b, its metadata should NOT be present in subsequent rows. - CqlSession schemaChangeSession = - SessionUtils.newSession( - ccmRule, session.getKeyspace().orElseThrow(IllegalStateException::new)); - SimpleStatement statement = - SimpleStatement.newInstance("ALTER TABLE test_prepare add b int") - .setExecutionProfile(sessionRule.slowProfile()); - schemaChangeSession.execute(statement); - schemaChangeSession.checkSchemaAgreement(); - while (it.hasNext()) { - // Each row should have a value for k and v, but b should not be present as it was not part - // of the original metadata. - Row row = it.next(); - assertThat(row.getString("k")).isNotNull(); - assertThat(row.isNull("v")).isFalse(); - assertThat(row.getColumnDefinitions().contains("b")).isFalse(); - } - // Subsequent queries should contain b in metadata since its a new query. - result = session.executeContinuously(simple); - it = result.iterator(); - while (it.hasNext()) { - Row row = it.next(); - assertThat(row.getString("k")).isNotNull(); - assertThat(row.isNull("v")).isFalse(); - // b should be null, but present in metadata. - assertThat(row.isNull("b")).isTrue(); - assertThat(row.getColumnDefinitions().contains("b")).isTrue(); - } - } - - /** - * Validates that continuous paging is resilient to a schema change being made in the middle of - * producing pages for the driver if the query was prepared. - * - *

    Drops column 'v' after paging the first row in. This column should still be present in the - * in-flight queries' rows, but it's value should be null. The column should not be present in - * subsequent queries. - * - * @test_category queries - * @jira_ticket JAVA-1653 - * @since 1.2.0 - */ - @Test - public void prepared_statement_paging_should_be_resilient_to_schema_change() { - CqlSession session = sessionRule.session(); - // Create table and prepare select * query against it. - session.execute( - SimpleStatement.newInstance("CREATE TABLE test_prep (k text PRIMARY KEY, v int)") - .setExecutionProfile(SessionUtils.slowProfile(session))); - for (int i = 0; i < 100; i++) { - session.execute(String.format("INSERT INTO test_prep (k, v) VALUES ('foo', %d)", i)); - } - PreparedStatement prepared = session.prepare("SELECT * FROM test_prep WHERE k = ?"); - DriverExecutionProfile profile = - session - .getContext() - .getConfig() - .getDefaultProfile() - .withInt(DseDriverOption.CONTINUOUS_PAGING_MAX_ENQUEUED_PAGES, 1) - .withInt(DseDriverOption.CONTINUOUS_PAGING_PAGE_SIZE, 1) - .withDuration( - DseDriverOption.CONTINUOUS_PAGING_TIMEOUT_FIRST_PAGE, Duration.ofSeconds(30)) - .withDuration( - DseDriverOption.CONTINUOUS_PAGING_TIMEOUT_OTHER_PAGES, Duration.ofSeconds(30)); - ContinuousResultSet result = - session.executeContinuously(prepared.bind("foo").setExecutionProfile(profile)); - Iterator it = result.iterator(); - // First row should have a non-null value for v. - Row row0 = it.next(); - assertThat(row0.getString("k")).isNotNull(); - assertThat(row0.isNull("v")).isFalse(); - // Make schema change to drop v, its metadata should be present, values will be null. - CqlSession schemaChangeSession = - SessionUtils.newSession( - ccmRule, session.getKeyspace().orElseThrow(IllegalStateException::new)); - schemaChangeSession.execute( - SimpleStatement.newInstance("ALTER TABLE test_prep DROP v;") - .setExecutionProfile(SessionUtils.slowProfile(schemaChangeSession))); - while (it.hasNext()) { - // Each row should have a value for k, v should still be present, but null since column was - // dropped. - Row row = it.next(); - assertThat(row.getString("k")).isNotNull(); - if (ccmRule.isDistributionOf( - BackendType.DSE, (dist, cass) -> dist.compareTo(Version.parse("6.0.0")) >= 0)) { - // DSE 6 only, v should be null here since dropped. - // Not reliable for 5.1 since we may have gotten page queued before schema changed. - assertThat(row.isNull("v")).isTrue(); - } - assertThat(row.getColumnDefinitions().contains("v")).isTrue(); - } - // Subsequent queries should lack v from metadata as it was dropped. - prepared = session.prepare("SELECT * FROM test_prep WHERE k = ?"); - result = session.executeContinuously(prepared.bind("foo").setExecutionProfile(profile)); - it = result.iterator(); - while (it.hasNext()) { - Row row = it.next(); - assertThat(row.getString("k")).isNotNull(); - assertThat(row.getColumnDefinitions().contains("v")).isFalse(); - } - } - - /** - * Validates that {@link ContinuousResultSet#cancel()} will cancel a continuous paging session by - * setting maxPagesPerSecond to 1 and sending a cancel immediately and ensuring the total number - * of rows iterated over is equal to the size of pageSize. - * - *

    Also validates that it is possible to resume the operation using the paging state, as - * described in the javadocs of {@link ContinuousResultSet#cancel()}. - * - * @test_category queries - * @jira_ticket JAVA-1322 - * @since 1.2.0 - */ - @Test - public void should_cancel_with_synchronous_paging() { - CqlSession session = sessionRule.session(); - SimpleStatement statement = SimpleStatement.newInstance("SELECT v from test where k=?", KEY); - // create options and throttle at a page per second so - // cancel can go out before the next page is sent. - // Note that this might not be perfect if there are pauses - // in the JVM and cancel isn't sent soon enough. - DriverExecutionProfile profile = - session - .getContext() - .getConfig() - .getDefaultProfile() - .withInt(DseDriverOption.CONTINUOUS_PAGING_MAX_ENQUEUED_PAGES, 1) - .withInt(DseDriverOption.CONTINUOUS_PAGING_PAGE_SIZE, 10) - .withInt(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND, 1); - ContinuousResultSet pagingResult = - session.executeContinuously(statement.setExecutionProfile(profile)); - pagingResult.cancel(); - int i = 0; - for (Row row : pagingResult) { - assertThat(row.getInt("v")).isEqualTo(i); - i++; - } - // Expect only 10 rows as paging was cancelled immediately. - assertThat(i).isEqualTo(10); - // attempt to resume the operation from where we left - ByteBuffer pagingState = pagingResult.getExecutionInfo().getPagingState(); - ContinuousResultSet pagingResultResumed = - session.executeContinuously( - statement - .setExecutionProfile( - profile.withInt(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND, 0)) - .setPagingState(pagingState)); - for (Row row : pagingResultResumed) { - assertThat(row.getInt("v")).isEqualTo(i); - i++; - } - assertThat(i).isEqualTo(100); - } - - /** - * Validates that {@link ContinuousAsyncResultSet#cancel()} will cancel a continuous paging - * session by setting maxPagesPerSecond to 1 and sending a cancel after the first page is received - * and then ensuring that the future returned from {@link - * ContinuousAsyncResultSet#fetchNextPage()} fails. - * - *

    Also validates that it is possible to resume the operation using the paging state, as - * described in the javadocs of {@link ContinuousAsyncResultSet#cancel()}. - * - * @test_category queries - * @jira_ticket JAVA-1322 - * @since 1.2.0 - */ - @Test - public void should_cancel_with_asynchronous_paging() { - CqlSession session = sessionRule.session(); - SimpleStatement statement = SimpleStatement.newInstance("SELECT v from test where k=?", KEY); - // create options and throttle at a page per second so - // cancel can go out before the next page is sent. - // Note that this might not be perfect if there are pauses - // in the JVM and cancel isn't sent soon enough. - DriverExecutionProfile profile = - session - .getContext() - .getConfig() - .getDefaultProfile() - .withInt(DseDriverOption.CONTINUOUS_PAGING_PAGE_SIZE, 10) - .withInt(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND, 1); - CompletionStage future = - session.executeContinuouslyAsync(statement.setExecutionProfile(profile)); - ContinuousAsyncResultSet pagingResult = CompletableFutures.getUninterruptibly(future); - // Calling cancel on the previous result should cause the next future to timeout. - pagingResult.cancel(); - CompletionStage fetchNextPageFuture = pagingResult.fetchNextPage(); - try { - // Expect future to fail since it was cancelled. - CompletableFutures.getUninterruptibly(fetchNextPageFuture); - fail("Expected an execution exception since paging was cancelled."); - } catch (CancellationException e) { - assertThat(e) - .hasMessageContaining("Can't get more results") - .hasMessageContaining("query was cancelled"); - } - int i = 0; - for (Row row : pagingResult.currentPage()) { - assertThat(row.getInt("v")).isEqualTo(i); - i++; - } - // Expect only 10 rows as this is the defined page size. - assertThat(i).isEqualTo(10); - // attempt to resume the operation from where we left - ByteBuffer pagingState = pagingResult.getExecutionInfo().getPagingState(); - future = - session.executeContinuouslyAsync( - statement - .setExecutionProfile( - profile.withInt(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND, 0)) - .setPagingState(pagingState)); - ContinuousAsyncResultSet pagingResultResumed; - do { - pagingResultResumed = CompletableFutures.getUninterruptibly(future); - for (Row row : pagingResultResumed.currentPage()) { - assertThat(row.getInt("v")).isEqualTo(i); - i++; - } - if (pagingResultResumed.hasMorePages()) { - future = pagingResultResumed.fetchNextPage(); - } - } while (pagingResultResumed.hasMorePages()); - // expect 10 more rows - assertThat(i).isEqualTo(100); - } - - /** - * Validates that {@link ContinuousAsyncResultSet#cancel()} will cancel a continuous paging - * session and current tracked {@link CompletionStage} tied to the paging session. - * - *

    Also validates that it is possible to resume the operation using the paging state, as - * described in the javadocs of {@link ContinuousAsyncResultSet#cancel()}. - * - * @test_category queries - * @jira_ticket JAVA-1322 - * @since 1.2.0 - */ - @Test - public void should_cancel_future_when_cancelling_previous_result() { - CqlSession session = sessionRule.session(); - SimpleStatement statement = SimpleStatement.newInstance("SELECT v from test where k=?", KEY); - // create options and throttle at a page per second so - // cancel can go out before the next page is sent. - // Note that this might not be perfect if there are pauses - // in the JVM and cancel isn't sent soon enough. - DriverExecutionProfile profile = - session - .getContext() - .getConfig() - .getDefaultProfile() - .withInt(DseDriverOption.CONTINUOUS_PAGING_PAGE_SIZE, 10) - .withInt(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND, 1); - CompletionStage future = - session.executeContinuouslyAsync(statement.setExecutionProfile(profile)); - ContinuousAsyncResultSet pagingResult = CompletableFutures.getUninterruptibly(future); - CompletionStage fetchNextPageFuture = pagingResult.fetchNextPage(); - // Calling cancel on the previous result should cause the current future to be cancelled. - pagingResult.cancel(); - assertThat(fetchNextPageFuture.toCompletableFuture().isCancelled()).isTrue(); - try { - // Expect future to be cancelled since the previous result was cancelled. - CompletableFutures.getUninterruptibly(fetchNextPageFuture); - fail("Expected a cancellation exception since previous result was cancelled."); - } catch (CancellationException ce) { - // expected - } - int i = 0; - for (Row row : pagingResult.currentPage()) { - assertThat(row.getInt("v")).isEqualTo(i); - i++; - } - // Expect only 10 rows as this is the defined page size. - assertThat(i).isEqualTo(10); - // attempt to resume the operation from where we left - ByteBuffer pagingState = pagingResult.getExecutionInfo().getPagingState(); - future = - session.executeContinuouslyAsync( - statement - .setExecutionProfile( - profile.withInt(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND, 0)) - .setPagingState(pagingState)); - ContinuousAsyncResultSet pagingResultResumed; - do { - pagingResultResumed = CompletableFutures.getUninterruptibly(future); - for (Row row : pagingResultResumed.currentPage()) { - assertThat(row.getInt("v")).isEqualTo(i); - i++; - } - if (pagingResultResumed.hasMorePages()) { - future = pagingResultResumed.fetchNextPage(); - } - } while (pagingResultResumed.hasMorePages()); - // expect 10 more rows - assertThat(i).isEqualTo(100); - } - - /** - * Validates that {@link CompletableFuture#cancel(boolean)} will cancel a continuous paging - * session by setting maxPagesPerSecond to 1 and sending a cancel after the first page is received - * and then ensuring that the future returned from {@link - * ContinuousAsyncResultSet#fetchNextPage()} is cancelled. - * - *

    Also validates that it is possible to resume the operation using the paging state, as - * described in the javadocs of {@link ContinuousAsyncResultSet#cancel()}. - * - * @test_category queries - * @jira_ticket JAVA-1322 - * @since 1.2.0 - */ - @Test - public void should_cancel_when_future_is_cancelled() { - CqlSession session = sessionRule.session(); - SimpleStatement statement = SimpleStatement.newInstance("SELECT v from test where k=?", KEY); - // create options and throttle at a page per second so - // cancel can go out before the next page is sent. - // Note that this might not be perfect if there are pauses - // in the JVM and cancel isn't sent soon enough. - DriverExecutionProfile profile = - session - .getContext() - .getConfig() - .getDefaultProfile() - .withInt(DseDriverOption.CONTINUOUS_PAGING_PAGE_SIZE, 10) - .withInt(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND, 1); - CompletionStage future = - session.executeContinuouslyAsync(statement.setExecutionProfile(profile)); - ContinuousAsyncResultSet pagingResult = CompletableFutures.getUninterruptibly(future); - CompletableFuture fetchNextPageFuture = pagingResult.fetchNextPage().toCompletableFuture(); - fetchNextPageFuture.cancel(false); - assertThat(fetchNextPageFuture.isCancelled()).isTrue(); - try { - // Expect cancellation. - CompletableFutures.getUninterruptibly(fetchNextPageFuture); - fail("Expected a cancellation exception since future was cancelled."); - } catch (CancellationException ce) { - // expected - } - int i = 0; - for (Row row : pagingResult.currentPage()) { - assertThat(row.getInt("v")).isEqualTo(i); - i++; - } - // Expect only 10 rows as this is the defined page size. - assertThat(i).isEqualTo(10); - // attempt to resume the operation from where we left - ByteBuffer pagingState = pagingResult.getExecutionInfo().getPagingState(); - future = - session.executeContinuouslyAsync( - statement - .setExecutionProfile( - profile.withInt(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND, 0)) - .setPagingState(pagingState)); - ContinuousAsyncResultSet pagingResultResumed; - do { - pagingResultResumed = CompletableFutures.getUninterruptibly(future); - for (Row row : pagingResultResumed.currentPage()) { - assertThat(row.getInt("v")).isEqualTo(i); - i++; - } - if (pagingResultResumed.hasMorePages()) { - future = pagingResultResumed.fetchNextPage(); - } - } while (pagingResultResumed.hasMorePages()); - // expect 10 more rows - assertThat(i).isEqualTo(100); - } - - /** - * Validates that a client-side timeout is correctly reported to the caller. - * - * @test_category queries - * @jira_ticket JAVA-1390 - * @since 1.2.0 - */ - @Test - public void should_time_out_when_server_does_not_produce_pages_fast_enough() throws Exception { - CqlSession session = sessionRule.session(); - SimpleStatement statement = SimpleStatement.newInstance("SELECT v from test where k=?", KEY); - // Throttle server at a page per second and set client timeout much lower so that the client - // will experience a timeout. - // Note that this might not be perfect if there are pauses in the JVM and the timeout - // doesn't fire soon enough. - DriverExecutionProfile profile = - session - .getContext() - .getConfig() - .getDefaultProfile() - .withInt(DseDriverOption.CONTINUOUS_PAGING_PAGE_SIZE, 10) - .withInt(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND, 1) - .withDuration( - DseDriverOption.CONTINUOUS_PAGING_TIMEOUT_OTHER_PAGES, Duration.ofMillis(100)); - CompletionStage future = - session.executeContinuouslyAsync(statement.setExecutionProfile(profile)); - ContinuousAsyncResultSet pagingResult = CompletableFutures.getUninterruptibly(future); - try { - pagingResult.fetchNextPage().toCompletableFuture().get(); - fail("Expected a timeout"); - } catch (ExecutionException e) { - assertThat(e.getCause()) - .isInstanceOf(DriverTimeoutException.class) - .hasMessageContaining("Timed out waiting for page 2"); - } - } - - /** - * Validates that the driver behaves appropriately when the client gets behind while paging rows - * in a continuous paging session. The driver should set autoread to false on the channel for that - * connection until the client consumes enough pages, at which point it will reenable autoread and - * continue reading. - * - *

    There is not really a direct way to verify that autoread is disabled, but delaying - * immediately after executing a continuous paging query should produce this effect. - * - * @test_category queries - * @jira_ticket JAVA-1375 - * @since 1.2.0 - */ - @Test - public void should_resume_reading_when_client_catches_up() { - CqlSession session = sessionRule.session(); - SimpleStatement statement = - SimpleStatement.newInstance("SELECT * from test_autoread where k=?", KEY); - DriverExecutionProfile profile = - session - .getContext() - .getConfig() - .getDefaultProfile() - .withInt(DseDriverOption.CONTINUOUS_PAGING_PAGE_SIZE, 100); - CompletionStage result = - session.executeContinuouslyAsync(statement.setExecutionProfile(profile)); - // Defer consuming of rows for a second, this should cause autoread to be disabled. - Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); - // Start consuming rows, this should cause autoread to be reenabled once we consume some pages. - PageStatistics stats = - CompletableFutures.getUninterruptibly( - result.thenCompose(new AsyncContinuousPagingFunction())); - // 20k rows in this table. - assertThat(stats.rows).isEqualTo(20000); - // 200 * 100 = 20k. - assertThat(stats.pages).isEqualTo(200); - } - - private static class PageStatistics { - int rows; - int pages; - - PageStatistics(int rows, int pages) { - this.rows = rows; - this.pages = pages; - } - } - - /** - * A function that when invoked, will return a transformed future with another {@link - * AsyncContinuousPagingFunction} wrapping {@link ContinuousAsyncResultSet#fetchNextPage()} if - * there are more pages, otherwise returns an immediate future that shares {@link PageStatistics} - * about how many rows were returned and how many pages were encountered. - * - *

    Note that if observe that data is not parsed in order this future fails with an Exception. - */ - private static class AsyncContinuousPagingFunction - implements Function> { - - private final int rowsSoFar; - - AsyncContinuousPagingFunction() { - this(0); - } - - AsyncContinuousPagingFunction(int rowsSoFar) { - this.rowsSoFar = rowsSoFar; - } - - @Override - public CompletionStage apply(ContinuousAsyncResultSet input) { - int rows = rowsSoFar; - // Iterate over page and ensure data is in order. - for (Row row : input.currentPage()) { - int v = row.getInt("v"); - if (v != rows) { - fail(String.format("Expected v == %d, got %d.", rows, v)); - } - rows++; - } - // If on last page, complete future, otherwise keep iterating. - if (!input.hasMorePages()) { - // DSE may send an empty page as it can't always know if it's done paging or not yet. - // See: CASSANDRA-8871. In this case, don't count this page. - int pages = rows == rowsSoFar ? input.pageNumber() - 1 : input.pageNumber(); - CompletableFuture future = new CompletableFuture<>(); - future.complete(new PageStatistics(rows, pages)); - return future; - } else { - return input.fetchNextPage().thenCompose(new AsyncContinuousPagingFunction(rows)); - } - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/cql/continuous/ContinuousPagingITBase.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/cql/continuous/ContinuousPagingITBase.java deleted file mode 100644 index 4a68454d559..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/cql/continuous/ContinuousPagingITBase.java +++ /dev/null @@ -1,197 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.cql.continuous; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.awaitility.Awaitility.await; - -import com.codahale.metrics.Timer; -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.dse.driver.api.core.metrics.DseSessionMetric; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.BatchStatement; -import com.datastax.oss.driver.api.core.cql.DefaultBatchType; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; -import com.datastax.oss.driver.api.core.metrics.Metrics; -import com.tngtech.java.junit.dataprovider.DataProvider; -import java.time.Duration; -import java.util.UUID; - -public abstract class ContinuousPagingITBase { - - protected static final String KEY = "k"; - - static PreparedStatement prepared; - - protected static void initialize(CqlSession session, DriverExecutionProfile slowProfile) { - session.execute( - SimpleStatement.newInstance("CREATE TABLE test (k text, v int, PRIMARY KEY (k, v))") - .setExecutionProfile(slowProfile)); - // Load enough rows to cause TCP Zero Window. Default window size is 65535 bytes, each row - // is at least 48 bytes, so it would take ~1365 enqueued rows to zero window. - // Conservatively load 20k rows. - session.execute( - SimpleStatement.newInstance( - "CREATE TABLE test_autoread (k text, v int, v0 uuid, v1 uuid, PRIMARY KEY (k, v, v0))") - .setExecutionProfile(slowProfile)); - session.execute( - SimpleStatement.newInstance("CREATE TABLE test_prepare (k text PRIMARY KEY, v int)") - .setExecutionProfile(slowProfile)); - session.checkSchemaAgreement(); - prepared = session.prepare("SELECT v from test where k = ?"); - for (int i = 0; i < 100; i++) { - session.execute(String.format("INSERT INTO test (k, v) VALUES ('%s', %d)", KEY, i)); - } - int count = 0; - for (int i = 0; i < 200; i++) { - BatchStatement batch = BatchStatement.newInstance(DefaultBatchType.UNLOGGED); - for (int j = 0; j < 100; j++) { - batch = - batch.add( - SimpleStatement.newInstance( - "INSERT INTO test_autoread (k, v, v0, v1) VALUES (?, ?, ?, ?)", - KEY, - count++, - UUID.randomUUID(), - UUID.randomUUID())); - } - session.execute(batch); - } - for (int i = 0; i < 100; i++) { - session.execute(String.format("INSERT INTO test_prepare (k, v) VALUES ('%d', %d)", i, i)); - } - } - - @DataProvider(format = "%m[%p[0]]") - public static Object[][] pagingOptions() { - return new Object[][] { - // exact # of rows. - {new Options(100, false, 0, 0, 100, 1)}, - // # of rows - 1. - {new Options(99, false, 0, 0, 100, 2)}, - // # of rows / 2. - {new Options(50, false, 0, 0, 100, 2)}, - // # 1 row per page. - {new Options(1, false, 0, 0, 100, 100)}, - // 10 rows per page, 10 pages overall = 100 (exact). - {new Options(10, false, 10, 0, 100, 10)}, - // 10 rows per page, 9 pages overall = 90 (less than exact number of pages). - {new Options(10, false, 9, 0, 90, 9)}, - // 10 rows per page, 2 pages per second should take ~5secs. - {new Options(10, false, 0, 2, 100, 10)}, - // 8 bytes per page == 1 row per page as len(4) + int(4) for each row. - {new Options(8, true, 0, 0, 100, 100)}, - // 16 bytes per page == 2 rows page per page. - {new Options(16, true, 0, 0, 100, 50)}, - // 32 bytes per page == 4 rows per page. - {new Options(32, true, 0, 0, 100, 25)} - }; - } - - protected void validateMetrics(CqlSession session) { - Node node = session.getMetadata().getNodes().values().iterator().next(); - assertThat(session.getMetrics()).as("assert session.getMetrics() present").isPresent(); - Metrics metrics = session.getMetrics().get(); - assertThat(metrics.getNodeMetric(node, DefaultNodeMetric.CQL_MESSAGES)) - .as("assert metrics.getNodeMetric(node, DefaultNodeMetric.CQL_MESSAGES) present") - .isPresent(); - Timer messages = (Timer) metrics.getNodeMetric(node, DefaultNodeMetric.CQL_MESSAGES).get(); - await() - .atMost(Duration.ofSeconds(5)) - .untilAsserted( - () -> { - assertThat(messages.getCount()) - .as("assert messages.getCount() >= 0") - .isGreaterThan(0); - assertThat(messages.getMeanRate()) - .as("assert messages.getMeanRate() >= 0") - .isGreaterThan(0); - }); - assertThat(metrics.getSessionMetric(DseSessionMetric.CONTINUOUS_CQL_REQUESTS)) - .as("assert metrics.getSessionMetric(DseSessionMetric.CONTINUOUS_CQL_REQUESTS) present") - .isPresent(); - Timer requests = - (Timer) metrics.getSessionMetric(DseSessionMetric.CONTINUOUS_CQL_REQUESTS).get(); - await() - .atMost(Duration.ofSeconds(5)) - .untilAsserted( - () -> { - assertThat(requests.getCount()) - .as("assert requests.getCount() >= 0") - .isGreaterThan(0); - assertThat(requests.getMeanRate()) - .as("assert requests.getMeanRate() >= 0") - .isGreaterThan(0); - }); - } - - public static class Options { - public int pageSize; - public boolean sizeInBytes; - public int maxPages; - public int maxPagesPerSecond; - public int expectedRows; - public int expectedPages; - - Options( - int pageSize, - boolean sizeInBytes, - int maxPages, - int maxPagesPerSecond, - int expectedRows, - int expectedPages) { - this.pageSize = pageSize; - this.sizeInBytes = sizeInBytes; - this.maxPages = maxPages; - this.maxPagesPerSecond = maxPagesPerSecond; - this.expectedRows = expectedRows; - this.expectedPages = expectedPages; - } - - public DriverExecutionProfile asProfile(CqlSession session) { - return session - .getContext() - .getConfig() - .getDefaultProfile() - .withInt(DseDriverOption.CONTINUOUS_PAGING_PAGE_SIZE, pageSize) - .withBoolean(DseDriverOption.CONTINUOUS_PAGING_PAGE_SIZE_BYTES, sizeInBytes) - .withInt(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES, maxPages) - .withInt(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND, maxPagesPerSecond); - } - - @Override - public String toString() { - return "pageSize=" - + pageSize - + ", sizeInBytes=" - + sizeInBytes - + ", maxPages=" - + maxPages - + ", maxPagesPerSecond=" - + maxPagesPerSecond - + ", expectedRows=" - + expectedRows - + ", expectedPages=" - + expectedPages; - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/cql/continuous/reactive/ContinuousPagingReactiveIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/cql/continuous/reactive/ContinuousPagingReactiveIT.java deleted file mode 100644 index f2a28d72597..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/cql/continuous/reactive/ContinuousPagingReactiveIT.java +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.cql.continuous.reactive; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.dse.driver.api.core.cql.continuous.ContinuousPagingITBase; -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveRow; -import com.datastax.dse.driver.api.core.metrics.DseSessionMetric; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import io.reactivex.Flowable; -import java.util.Collections; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Set; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; -import org.junit.runner.RunWith; - -@BackendRequirement( - type = BackendType.DSE, - minInclusive = "5.1.0", - description = "Continuous paging is only available from 5.1.0 onwards") -@Category(ParallelizableTests.class) -@RunWith(DataProviderRunner.class) -public class ContinuousPagingReactiveIT extends ContinuousPagingITBase { - - private static CcmRule ccmRule = CcmRule.getInstance(); - - private static SessionRule sessionRule = - SessionRule.builder(ccmRule) - .withConfigLoader( - SessionUtils.configLoaderBuilder() - .withStringList( - DefaultDriverOption.METRICS_SESSION_ENABLED, - Collections.singletonList(DseSessionMetric.CONTINUOUS_CQL_REQUESTS.getPath())) - .withStringList( - DefaultDriverOption.METRICS_NODE_ENABLED, - Collections.singletonList(DefaultNodeMetric.CQL_MESSAGES.getPath())) - .build()) - .build(); - - @ClassRule public static TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); - - @BeforeClass - public static void setUp() { - initialize(sessionRule.session(), sessionRule.slowProfile()); - } - - @Test - @UseDataProvider("pagingOptions") - public void should_execute_reactively(Options options) { - CqlSession session = sessionRule.session(); - SimpleStatement statement = SimpleStatement.newInstance("SELECT v from test where k=?", KEY); - DriverExecutionProfile profile = options.asProfile(session); - ContinuousReactiveResultSet rs = - session.executeContinuouslyReactive(statement.setExecutionProfile(profile)); - List results = Flowable.fromPublisher(rs).toList().blockingGet(); - assertThat(results).hasSize(options.expectedRows); - Set expectedExecInfos = new LinkedHashSet<>(); - for (int i = 0; i < results.size(); i++) { - ReactiveRow row = results.get(i); - assertThat(row.getInt("v")).isEqualTo(i); - expectedExecInfos.add(row.getExecutionInfo()); - } - - List execInfos = - Flowable.fromPublisher(rs.getExecutionInfos()).toList().blockingGet(); - // DSE may send an empty page as it can't always know if it's done paging or not yet. - // See: CASSANDRA-8871. In this case, this page's execution info appears in - // rs.getExecutionInfos(), but is not present in expectedExecInfos since the page did not - // contain any rows. - assertThat(execInfos).containsAll(expectedExecInfos); - - List colDefs = - Flowable.fromPublisher(rs.getColumnDefinitions()).toList().blockingGet(); - ReactiveRow first = results.get(0); - assertThat(colDefs).hasSize(1).containsExactly(first.getColumnDefinitions()); - - List wasApplied = Flowable.fromPublisher(rs.wasApplied()).toList().blockingGet(); - assertThat(wasApplied).hasSize(1).containsExactly(first.wasApplied()); - - validateMetrics(session); - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/data/geometry/GeometryIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/data/geometry/GeometryIT.java deleted file mode 100644 index 83b01796337..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/data/geometry/GeometryIT.java +++ /dev/null @@ -1,405 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.data.geometry; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.BatchStatement; -import com.datastax.oss.driver.api.core.cql.BatchStatementBuilder; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.DefaultBatchType; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.data.TupleValue; -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.core.type.TupleType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.api.core.uuid.Uuids; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.Sets; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; -import org.assertj.core.util.Preconditions; -import org.junit.Ignore; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -@Category({ParallelizableTests.class}) -public abstract class GeometryIT { - - private final Class genericType; - private final T baseSample; - private final List sampleData; - private final SessionRule sessionRule; - - @SuppressWarnings("unchecked") - GeometryIT(List sampleData, Class genericType, SessionRule sessionRule) { - Preconditions.checkArgument( - sampleData.size() >= 3, "Must be at least 3 samples, was given " + sampleData.size()); - this.baseSample = sampleData.get(0); - this.genericType = genericType; - this.sampleData = sampleData; - this.sessionRule = sessionRule; - } - - static void onTestContextInitialized(String cqlTypeName, SessionRule sessionRule) { - sessionRule - .session() - .execute( - SimpleStatement.builder(String.format("CREATE TYPE udt1 (g '%s')", cqlTypeName)) - .setExecutionProfile(sessionRule.slowProfile()) - .build()); - sessionRule - .session() - .execute( - SimpleStatement.builder( - String.format( - "CREATE TABLE tbl (k uuid PRIMARY KEY, g '%s', l list<'%s'>, s set<'%s'>, m0 map<'%s',int>, m1 map, t tuple<'%s','%s','%s'>, u frozen)", - cqlTypeName, - cqlTypeName, - cqlTypeName, - cqlTypeName, - cqlTypeName, - cqlTypeName, - cqlTypeName, - cqlTypeName)) - .setExecutionProfile(sessionRule.slowProfile()) - .build()); - sessionRule - .session() - .execute( - SimpleStatement.builder( - String.format("CREATE TABLE tblpk (k '%s' primary key, v int)", cqlTypeName)) - .setExecutionProfile(sessionRule.slowProfile()) - .build()); - sessionRule - .session() - .execute( - SimpleStatement.builder( - String.format( - "CREATE TABLE tblclustering (k0 int, k1 '%s', v int, primary key (k0, k1))", - cqlTypeName)) - .setExecutionProfile(sessionRule.slowProfile()) - .build()); - } - - private void validate(UUID key, String columnName, V expected, GenericType type) { - ResultSet result = - sessionRule - .session() - .execute( - SimpleStatement.builder( - String.format("SELECT k,%s FROM tbl where k =? ", columnName)) - .addPositionalValue(key) - .build()); - Row row = result.iterator().next(); - assertThat(row.getUuid("k")).isEqualTo(key); - assertThat(row.get(columnName, type)).isEqualTo(expected); - assertThat(row.get(1, type)).isEqualTo(expected); - } - - private void validate(UUID key, T expected) { - validate(key, "g", expected, GenericType.of(genericType)); - } - - /** - * Validates that a given geometry value can be inserted into a column using codec.format() and - * verifies that it is stored correctly by retrieving it and ensuring it matches. - */ - @Test - public void should_insert_using_format() { - for (T expected : sampleData) { - - String val = null; - if (expected != null) { - TypeCodec codec = - sessionRule.session().getContext().getCodecRegistry().codecFor(expected); - val = codec.format(expected); - } - UUID key = Uuids.random(); - sessionRule - .session() - .execute(String.format("INSERT INTO tbl (k, g) VALUES (%s, %s)", key, val)); - validate(key, expected); - } - } - - /** - * Validates that a given geometry value can be inserted into a column by providing it as a simple - * statement parameter and verifies that it is stored correctly by retrieving it and ensuring it - * matches. - */ - @Test - public void should_insert_using_simple_statement_with_parameters() { - for (T expected : sampleData) { - UUID key = Uuids.random(); - sessionRule - .session() - .execute( - SimpleStatement.builder("INSERT INTO tbl (k, g) VALUES (?, ?)") - .addPositionalValues(key, expected) - .build()); - validate(key, expected); - } - } - /** - * Validates that a given geometry value can be inserted into a column by providing it as a bound - * parameter in a BoundStatement and verifies that it is stored correctly by retrieving it and - * ensuring it matches. - */ - @Test - public void should_insert_using_prepared_statement_with_parameters() { - for (T expected : sampleData) { - UUID key = Uuids.random(); - PreparedStatement prepared = - sessionRule.session().prepare("INSERT INTO tbl (k, g) values (?, ?)"); - BoundStatement bs = - prepared.boundStatementBuilder().setUuid(0, key).set(1, expected, genericType).build(); - sessionRule.session().execute(bs); - validate(key, expected); - } - } - /** - * Validates that geometry values can be inserted as a list and verifies that the list is stored - * correctly by retrieving it and ensuring it matches. - */ - @Test - public void should_insert_as_list() { - UUID key = Uuids.random(); - PreparedStatement prepared = - sessionRule.session().prepare("INSERT INTO tbl (k, l) values (?, ?)"); - - BoundStatement bs = - prepared - .boundStatementBuilder() - .setUuid(0, key) - .setList(1, sampleData, genericType) - .build(); - sessionRule.session().execute(bs); - validate(key, "l", sampleData, GenericType.listOf(genericType)); - } - /** - * Validates that geometry values can be inserted as a set and verifies that the set is stored - * correctly by retrieving it and ensuring it matches. - */ - @Test - public void should_insert_as_set() { - UUID key = Uuids.random(); - Set asSet = Sets.newHashSet(sampleData); - - PreparedStatement prepared = - sessionRule.session().prepare("INSERT INTO tbl (k, s) values (?, ?)"); - BoundStatement bs = - prepared.boundStatementBuilder().setUuid(0, key).setSet(1, asSet, genericType).build(); - - sessionRule.session().execute(bs); - validate(key, "s", asSet, GenericType.setOf(genericType)); - } - - /** - * Validates that geometry values can be inserted into a map as keys and verifies that the map is - * stored correctly by retrieving it and ensuring it matches. - */ - @Test - public void should_insert_as_map_keys() { - UUID key = Uuids.random(); - ImmutableMap.Builder builder = ImmutableMap.builder(); - int count = 0; - for (T val : sampleData) { - builder = builder.put(val, count++); - } - Map asMapKeys = builder.build(); - - PreparedStatement prepared = - sessionRule.session().prepare("INSERT INTO tbl (k, m0) values (?, ?)"); - BoundStatement bs = - prepared - .boundStatementBuilder() - .setUuid(0, key) - .setMap(1, asMapKeys, genericType, Integer.class) - .build(); - sessionRule.session().execute(bs); - validate(key, "m0", asMapKeys, GenericType.mapOf(genericType, Integer.class)); - } - - /** - * Validates that geometry values can be inserted into a map as values and verifies that the map - * is stored correctly by retrieving it and ensuring it matches. - */ - @Test - public void should_insert_as_map_values() { - UUID key = Uuids.random(); - ImmutableMap.Builder builder = ImmutableMap.builder(); - int count = 0; - for (T val : sampleData) { - builder = builder.put(count++, val); - } - Map asMapValues = builder.build(); - PreparedStatement prepared = - sessionRule.session().prepare("INSERT INTO tbl (k, m1) values (?, ?)"); - BoundStatement bs = - prepared - .boundStatementBuilder() - .setUuid(0, key) - .setMap(1, asMapValues, Integer.class, genericType) - .build(); - sessionRule.session().execute(bs); - validate(key, "m1", asMapValues, GenericType.mapOf(Integer.class, genericType)); - } - - /** - * Validates that geometry values can be inserted as a tuple and verifies that the tuple is stored - * correctly by retrieving it and ensuring it matches. - */ - @Test - @Ignore - public void should_insert_as_tuple() { - UUID key = Uuids.random(); - - PreparedStatement prepared = - sessionRule.session().prepare("INSERT INTO tbl (k, t) values (?, ?)"); - TupleType tupleType = (TupleType) prepared.getVariableDefinitions().get(1).getType(); - TupleValue tuple = tupleType.newValue(); - tuple = tuple.set(0, sampleData.get(0), genericType); - tuple = tuple.set(1, sampleData.get(1), genericType); - tuple = tuple.set(2, sampleData.get(2), genericType); - BoundStatement bs = - prepared.boundStatementBuilder().setUuid(0, key).setTupleValue(1, tuple).build(); - sessionRule.session().execute(bs); - ResultSet rs = - sessionRule - .session() - .execute( - SimpleStatement.builder("SELECT k,t FROM tbl where k=?") - .addPositionalValues(key) - .build()); - Row row = rs.iterator().next(); - assertThat(row.getUuid("k")).isEqualTo(key); - assertThat(row.getTupleValue("t")).isEqualTo(tuple); - assertThat(row.getTupleValue(1)).isEqualTo(tuple); - } - /** - * Validates that a geometry value can be inserted as a field in a UDT and verifies that the UDT - * is stored correctly by retrieving it and ensuring it matches. - */ - @Test - @Ignore - public void should_insert_as_field_in_udt() { - UUID key = Uuids.random(); - UserDefinedType udtType = - sessionRule - .session() - .getMetadata() - .getKeyspace(sessionRule.session().getKeyspace().orElseThrow(AssertionError::new)) - .flatMap(ks -> ks.getUserDefinedType(CqlIdentifier.fromInternal("udt1"))) - .orElseThrow(AssertionError::new); - assertThat(udtType).isNotNull(); - UdtValue value = udtType.newValue(); - value = value.set("g", sampleData.get(0), genericType); - - PreparedStatement prepared = - sessionRule.session().prepare("INSERT INTO tbl (k, u) values (?, ?)"); - BoundStatement bs = - prepared.boundStatementBuilder().setUuid(0, key).setUdtValue(1, value).build(); - sessionRule.session().execute(bs); - - ResultSet rs = - sessionRule - .session() - .execute( - SimpleStatement.builder("SELECT k,u FROM tbl where k=?") - .addPositionalValues(key) - .build()); - Row row = rs.iterator().next(); - assertThat(row.getUuid("k")).isEqualTo(key); - assertThat(row.getUdtValue("u")).isEqualTo(value); - assertThat(row.getUdtValue(1)).isEqualTo(value); - } - - /** - * Validates that a geometry value can be inserted into a column that is the partition key and - * then validates that it can be queried back by partition key. - */ - @Test - public void should_accept_as_partition_key() { - sessionRule - .session() - .execute( - SimpleStatement.builder("INSERT INTO tblpk (k, v) VALUES (?,?)") - .addPositionalValues(baseSample, 1) - .build()); - ResultSet results = sessionRule.session().execute("SELECT k,v FROM tblpk"); - Row row = results.one(); - T key = row.get("k", genericType); - assertThat(key).isEqualTo(baseSample); - } - - /** - * Validates that geometry values can be inserted into a column that is a clustering key in rows - * sharing a partition key and then validates that the rows can be retrieved by partition key. - * - * @test_category dse:geospatial - */ - @Test - public void should_accept_as_clustering_key() { - PreparedStatement insert = - sessionRule.session().prepare("INSERT INTO tblclustering (k0, k1, v) values (?,?,?)"); - BatchStatementBuilder batchbuilder = BatchStatement.builder(DefaultBatchType.UNLOGGED); - - int count = 0; - for (T value : sampleData) { - BoundStatement bound = - insert - .boundStatementBuilder() - .setInt(0, 0) - .set(1, value, genericType) - .setInt(2, count++) - .build(); - batchbuilder.addStatement(bound); - } - sessionRule.session().execute(batchbuilder.build()); - - ResultSet result = - sessionRule - .session() - .execute( - SimpleStatement.builder("SELECT * from tblclustering where k0=?") - .addPositionalValue(0) - .build()); - - // The order of rows returned is not significant for geospatial types since it is stored in - // lexicographic byte order (8 bytes at a time). Thus we pull them all sort and extract and - // ensure all values were returned. - List rows = result.all(); - - assertThat(rows) - .extracting(row -> row.get("k1", genericType)) - .containsOnlyElementsOf(sampleData) - .hasSameSizeAs(sampleData); - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/data/geometry/LineStringIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/data/geometry/LineStringIT.java deleted file mode 100644 index c626f0e26c6..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/data/geometry/LineStringIT.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.data.geometry; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.uuid.Uuids; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import java.util.List; -import java.util.UUID; -import org.assertj.core.util.Lists; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@BackendRequirement(type = BackendType.DSE, minInclusive = "5.0") -public class LineStringIT extends GeometryIT { - - private static CcmRule ccm = CcmRule.getInstance(); - - private static SessionRule sessionRule = SessionRule.builder(ccm).build(); - - @ClassRule public static TestRule chain = RuleChain.outerRule(ccm).around(sessionRule); - - private static final String LINE_STRING_TYPE = "LineStringType"; - - public LineStringIT() { - super( - Lists.newArrayList( - LineString.fromPoints(Point.fromCoordinates(0, 10), Point.fromCoordinates(10, 0)), - LineString.fromPoints( - Point.fromCoordinates(30, 10), - Point.fromCoordinates(10, 30), - Point.fromCoordinates(40, 40)), - LineString.fromPoints( - Point.fromCoordinates(-5, 0), - Point.fromCoordinates(0, 10), - Point.fromCoordinates(10, 5))), - LineString.class, - sessionRule); - } - - @BeforeClass - public static void initialize() { - onTestContextInitialized(LINE_STRING_TYPE, sessionRule); - } - - @Test - public void should_insert_and_retrieve_empty_linestring() { - LineString empty = LineString.fromWellKnownText("LINESTRING EMPTY"); - UUID key = Uuids.random(); - sessionRule - .session() - .execute( - SimpleStatement.builder("INSERT INTO tbl (k, g) VALUES (?, ?)") - .addPositionalValues(key, empty) - .build()); - - ResultSet result = - sessionRule - .session() - .execute( - SimpleStatement.builder("SELECT g from tbl where k=?") - .addPositionalValues(key) - .build()); - Row row = result.iterator().next(); - List points = row.get("g", LineString.class).getPoints(); - assertThat(points.isEmpty()).isTrue(); - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/data/geometry/PointIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/data/geometry/PointIT.java deleted file mode 100644 index b81049cd444..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/data/geometry/PointIT.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.data.geometry; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import org.assertj.core.util.Lists; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@BackendRequirement(type = BackendType.DSE, minInclusive = "5.0") -public class PointIT extends GeometryIT { - - private static CcmRule ccm = CcmRule.getInstance(); - - private static SessionRule sessionRule = SessionRule.builder(ccm).build(); - - @ClassRule public static TestRule chain = RuleChain.outerRule(ccm).around(sessionRule); - - private static final String POINT_TYPE = "PointType"; - - public PointIT() { - super( - Lists.newArrayList( - Point.fromCoordinates(-1.0, -5), - Point.fromCoordinates(0, 0), - Point.fromCoordinates(1.1, 2.2), - Point.fromCoordinates(Double.MIN_VALUE, 0), - Point.fromCoordinates(Double.MAX_VALUE, Double.MIN_VALUE)), - Point.class, - sessionRule); - } - - @BeforeClass - public static void initialize() { - onTestContextInitialized(POINT_TYPE, sessionRule); - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/data/geometry/PolygonIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/data/geometry/PolygonIT.java deleted file mode 100644 index 1d9d49bd776..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/data/geometry/PolygonIT.java +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.data.geometry; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.uuid.Uuids; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import java.util.UUID; -import org.assertj.core.util.Lists; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@BackendRequirement(type = BackendType.DSE, minInclusive = "5.0") -public class PolygonIT extends GeometryIT { - - private static CcmRule ccm = CcmRule.getInstance(); - - private static SessionRule sessionRule = SessionRule.builder(ccm).build(); - - @ClassRule public static TestRule chain = RuleChain.outerRule(ccm).around(sessionRule); - - private static final String POLYGON_TYPE = "PolygonType"; - - private static Polygon squareInMinDomain = - Polygon.fromPoints( - Point.fromCoordinates(Double.MIN_VALUE, Double.MIN_VALUE), - Point.fromCoordinates(Double.MIN_VALUE, Double.MIN_VALUE + 1), - Point.fromCoordinates(Double.MIN_VALUE + 1, Double.MIN_VALUE + 1), - Point.fromCoordinates(Double.MIN_VALUE + 1, Double.MIN_VALUE)); - - private static Polygon triangle = - Polygon.fromPoints( - Point.fromCoordinates(-5, 10), - Point.fromCoordinates(5, 5), - Point.fromCoordinates(10, -5)); - - private static Polygon complexPolygon = - Polygon.builder() - .addRing( - Point.fromCoordinates(0, 0), - Point.fromCoordinates(0, 3), - Point.fromCoordinates(5, 3), - Point.fromCoordinates(5, 0)) - .addRing( - Point.fromCoordinates(1, 1), - Point.fromCoordinates(1, 2), - Point.fromCoordinates(2, 2), - Point.fromCoordinates(2, 1)) - .addRing( - Point.fromCoordinates(3, 1), - Point.fromCoordinates(3, 2), - Point.fromCoordinates(4, 2), - Point.fromCoordinates(4, 1)) - .build(); - - public PolygonIT() { - super( - Lists.newArrayList(squareInMinDomain, complexPolygon, triangle), - Polygon.class, - sessionRule); - } - - @BeforeClass - public static void initialize() { - onTestContextInitialized(POLYGON_TYPE, sessionRule); - } - - /** - * Validates that an empty {@link Polygon} can be inserted and retrieved. - * - * @jira_ticket JAVA-1076 - * @test_category dse:graph - */ - @Test - public void should_insert_and_retrieve_empty_polygon() { - Polygon empty = Polygon.builder().build(); - UUID key = Uuids.random(); - sessionRule - .session() - .execute( - SimpleStatement.builder("INSERT INTO tbl (k, g) VALUES (?, ?)") - .addPositionalValues(key, empty) - .build()); - - ResultSet result = - sessionRule - .session() - .execute( - SimpleStatement.builder("SELECT g from tbl where k=?") - .addPositionalValues(key) - .build()); - Row row = result.iterator().next(); - assertThat(row.get("g", Polygon.class).getInteriorRings()).isEmpty(); - assertThat(row.get("g", Polygon.class).getExteriorRing()).isEmpty(); - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/data/time/DateRangeIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/data/time/DateRangeIT.java deleted file mode 100644 index 9b2370e3376..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/data/time/DateRangeIT.java +++ /dev/null @@ -1,356 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.data.time; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.data.TupleValue; -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.core.servererrors.InvalidQueryException; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.google.common.collect.Sets; -import java.time.Duration; -import java.util.List; -import java.util.Map; -import java.util.Set; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestName; -import org.junit.rules.TestRule; - -@Category({ParallelizableTests.class}) -@BackendRequirement(type = BackendType.DSE, minInclusive = "5.1") -public class DateRangeIT { - - private static CcmRule ccmRule = CcmRule.getInstance(); - - private static SessionRule sessionRule = - SessionRule.builder(ccmRule) - .withConfigLoader( - SessionUtils.configLoaderBuilder() - .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(30)) - .build()) - .build(); - - @ClassRule public static TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); - - @Rule public TestName testName = new TestName(); - - /** - * Validates that data can be retrieved by primary key where its primary key is a 'DateRangeType' - * column, and that the data returned properly parses into the expected {@link DateRange}. - */ - @Test - public void should_use_date_range_as_primary_key() throws Exception { - CqlSession session = sessionRule.session(); - String tableName = testName.getMethodName(); - - session.execute( - String.format("CREATE TABLE %s (k 'DateRangeType' PRIMARY KEY, v int)", tableName)); - session.execute( - String.format("INSERT INTO %s (k, v) VALUES ('[2010-12-03 TO 2010-12-04]', 1)", tableName)); - session.execute( - String.format( - "INSERT INTO %s (k, v) VALUES ('[2015-12-03T10:15:30.001Z TO 2016-01-01T00:05:11.967Z]', 2)", - tableName)); - - List rows = session.execute("SELECT * FROM " + tableName).all(); - - assertThat(rows).hasSize(2); - assertThat(rows.get(0).get("k", DateRange.class)) - .isEqualTo(DateRange.parse("[2010-12-03 TO 2010-12-04]")); - assertThat(rows.get(1).get("k", DateRange.class)) - .isEqualTo(DateRange.parse("[2015-12-03T10:15:30.001Z TO 2016-01-01T00:05:11.967Z]")); - - rows = - session - .execute( - String.format( - "SELECT * FROM %s WHERE k = '[2015-12-03T10:15:30.001Z TO 2016-01-01T00:05:11.967]'", - tableName)) - .all(); - assertThat(rows.size()).isEqualTo(1); - assertThat(rows.get(0).getInt("v")).isEqualTo(2); - } - - /** - * Validates that a 'DateRangeType' column can take a variety of {@link DateRange} inputs: - * - *

      - *
    1. Upper bound unbounded - *
    2. Lower bound unbounded - *
    3. Unbounded - *
    4. Bounded - *
    5. null - *
    6. unset - *
    - */ - @Test - public void should_store_date_range() throws Exception { - CqlSession session = sessionRule.session(); - String tableName = testName.getMethodName(); - - session.execute( - String.format("CREATE TABLE %s (k int PRIMARY KEY, v 'DateRangeType')", tableName)); - session.execute( - String.format( - "INSERT INTO %s (k, v) VALUES (1, '[2000-01-01T10:15:30.301Z TO *]')", tableName)); - session.execute( - String.format("INSERT INTO %s (k, v) VALUES (2, '[2000-02 TO 2000-03]')", tableName)); - session.execute(String.format("INSERT INTO %s (k, v) VALUES (3, '[* TO 2020]')", tableName)); - session.execute(String.format("INSERT INTO %s (k, v) VALUES (4, null)", tableName)); - session.execute(String.format("INSERT INTO %s (k) VALUES (5)", tableName)); - session.execute(String.format("INSERT INTO %s (k, v) VALUES (6, '*')", tableName)); - - List rows = session.execute("SELECT * FROM " + tableName).all(); - - assertThat(rows) - .extracting(input -> input.get("v", DateRange.class)) - .containsOnly( - DateRange.parse("[2000-01-01T10:15:30.301Z TO *]"), - DateRange.parse("[2000-02 TO 2000-03]"), - DateRange.parse("[* TO 2020]"), - null, - DateRange.parse("*")); - } - - /** - * Validates that if a provided {@link DateRange} for a 'DateRangeType' column has the bounds - * reversed (lower bound is later than upper bound), then an {@link InvalidQueryException} is - * thrown. - */ - @Test - public void should_disallow_invalid_order() throws Exception { - CqlSession session = sessionRule.session(); - String tableName = testName.getMethodName(); - - session.execute( - String.format("CREATE TABLE %s (k int PRIMARY KEY, v 'DateRangeType')", tableName)); - - assertThatThrownBy( - () -> - session.execute( - String.format( - "INSERT INTO %s (k, v) " - + "VALUES (1, '[2020-01-01T10:15:30.009Z TO 2010-01-01T00:05:11.031Z]')", - tableName))) - .isInstanceOf(InvalidQueryException.class) - .hasMessageContaining("Wrong order: 2020-01-01T10:15:30.009Z TO 2010-01-01T00:05:11.031Z") - .hasMessageContaining( - "Could not parse date range: [2020-01-01T10:15:30.009Z TO 2010-01-01T00:05:11.031Z]"); - } - - /** Validates that {@link DateRange} can be used in UDT and Tuple types. */ - @Test - public void should_allow_date_range_in_udt_and_tuple() throws Exception { - CqlSession session = sessionRule.session(); - String tableName = testName.getMethodName(); - - session.execute("CREATE TYPE IF NOT EXISTS test_udt (i int, range 'DateRangeType')"); - session.execute( - String.format( - "CREATE TABLE %s (k int PRIMARY KEY, u test_udt, uf frozen, " - + "t tuple<'DateRangeType', int>, tf frozen>)", - tableName)); - session.execute( - String.format( - "INSERT INTO %s (k, u, uf, t, tf) VALUES (" - + "1, " - + "{i: 10, range: '[2000-01-01T10:15:30.003Z TO 2020-01-01T10:15:30.001Z]'}, " - + "{i: 20, range: '[2000-01-01T10:15:30.003Z TO 2020-01-01T10:15:30.001Z]'}, " - + "('[2000-01-01T10:15:30.003Z TO 2020-01-01T10:15:30.001Z]', 30), " - + "('[2000-01-01T10:15:30.003Z TO 2020-01-01T10:15:30.001Z]', 40))", - tableName)); - - DateRange expected = DateRange.parse("[2000-01-01T10:15:30.003Z TO 2020-01-01T10:15:30.001Z]"); - - List rows = session.execute("SELECT * FROM " + tableName).all(); - assertThat(rows).hasSize(1); - - UdtValue u = rows.get(0).get("u", UdtValue.class); - DateRange dateRange = u.get("range", DateRange.class); - assertThat(dateRange).isEqualTo(expected); - assertThat(u.getInt("i")).isEqualTo(10); - - u = rows.get(0).get("uf", UdtValue.class); - dateRange = u.get("range", DateRange.class); - assertThat(dateRange).isEqualTo(expected); - assertThat(u.getInt("i")).isEqualTo(20); - - TupleValue t = rows.get(0).get("t", TupleValue.class); - dateRange = t.get(0, DateRange.class); - assertThat(dateRange).isEqualTo(expected); - assertThat(t.getInt(1)).isEqualTo(30); - - t = rows.get(0).get("tf", TupleValue.class); - dateRange = t.get(0, DateRange.class); - assertThat(dateRange).isEqualTo(expected); - assertThat(t.getInt(1)).isEqualTo(40); - } - - /** Validates that {@link DateRange} can be used in Collection types (Map, Set, List). */ - @Test - public void should_allow_date_range_in_collections() throws Exception { - CqlSession session = sessionRule.session(); - String tableName = testName.getMethodName(); - - session.execute( - String.format( - "CREATE TABLE %s (k int PRIMARY KEY, l list<'DateRangeType'>, s set<'DateRangeType'>, " - + "dr2i map<'DateRangeType', int>, i2dr map)", - tableName)); - session.execute( - String.format( - "INSERT INTO %s (k, l, s, i2dr, dr2i) VALUES (" - + "1, " - // l - + "['[2000-01-01T10:15:30.001Z TO 2020]', '[2010-01-01T10:15:30.001Z TO 2020]'," - + " '2001-01-02'], " - // s - + "{'[2000-01-01T10:15:30.001Z TO 2020]', '[2000-01-01T10:15:30.001Z TO 2020]', " - + "'[2010-01-01T10:15:30.001Z TO 2020]'}, " - // i2dr - + "{1: '[2000-01-01T10:15:30.001Z TO 2020]', " - + "2: '[2010-01-01T10:15:30.001Z TO 2020]'}, " - // dr2i - + "{'[2000-01-01T10:15:30.001Z TO 2020]': 1, " - + "'[2010-01-01T10:15:30.001Z TO 2020]': 2})", - tableName)); - - List rows = session.execute("SELECT * FROM " + tableName).all(); - assertThat(rows.size()).isEqualTo(1); - - List drList = rows.get(0).getList("l", DateRange.class); - assertThat(drList.size()).isEqualTo(3); - assertThat(drList.get(0)).isEqualTo(DateRange.parse("[2000-01-01T10:15:30.001Z TO 2020]")); - assertThat(drList.get(1)).isEqualTo(DateRange.parse("[2010-01-01T10:15:30.001Z TO 2020]")); - assertThat(drList.get(2)).isEqualTo(DateRange.parse("2001-01-02")); - - Set drSet = rows.get(0).getSet("s", DateRange.class); - assertThat(drSet.size()).isEqualTo(2); - assertThat(drSet) - .isEqualTo( - Sets.newHashSet( - DateRange.parse("[2000-01-01T10:15:30.001Z TO 2020]"), - DateRange.parse("[2010-01-01T10:15:30.001Z TO 2020]"))); - - Map dr2i = rows.get(0).getMap("dr2i", DateRange.class, Integer.class); - assertThat(dr2i.size()).isEqualTo(2); - assertThat((int) dr2i.get(DateRange.parse("[2000-01-01T10:15:30.001Z TO 2020]"))).isEqualTo(1); - assertThat((int) dr2i.get(DateRange.parse("[2010-01-01T10:15:30.001Z TO 2020]"))).isEqualTo(2); - - Map i2dr = rows.get(0).getMap("i2dr", Integer.class, DateRange.class); - assertThat(i2dr.size()).isEqualTo(2); - assertThat(i2dr.get(1)).isEqualTo(DateRange.parse("[2000-01-01T10:15:30.001Z TO 2020]")); - assertThat(i2dr.get(2)).isEqualTo(DateRange.parse("[2010-01-01T10:15:30.001Z TO 2020]")); - } - - /** - * Validates that a 'DateRangeType' column can take a {@link DateRange} inputs as a prepared - * statement parameter. - */ - @Test - public void should_bind_date_range_in_prepared_statement() throws Exception { - CqlSession session = sessionRule.session(); - String tableName = testName.getMethodName(); - - session.execute( - String.format("CREATE TABLE %s (k int PRIMARY KEY, v 'DateRangeType')", tableName)); - PreparedStatement statement = - session.prepare(String.format("INSERT INTO %s (k,v) VALUES(?,?)", tableName)); - - DateRange expected = DateRange.parse("[2007-12-03 TO 2007-12]"); - session.execute(statement.bind(1, expected)); - List rows = session.execute("SELECT * FROM " + tableName).all(); - assertThat(rows.size()).isEqualTo(1); - DateRange actual = rows.get(0).get("v", DateRange.class); - assertThat(actual).isEqualTo(expected); - assertThat(actual.getLowerBound().getPrecision()).isEqualTo(DateRangePrecision.DAY); - assertThat(actual.getUpperBound()) - .hasValueSatisfying( - upperBound -> - assertThat(upperBound.getPrecision()).isEqualTo(DateRangePrecision.MONTH)); - assertThat(actual.toString()).isEqualTo("[2007-12-03 TO 2007-12]"); - - expected = DateRange.parse("[* TO *]"); - session.execute(statement.bind(1, expected)); - rows = session.execute("SELECT * FROM " + tableName).all(); - assertThat(rows.size()).isEqualTo(1); - actual = rows.get(0).get("v", DateRange.class); - assertThat(actual).isEqualTo(expected); - assertThat(actual.getLowerBound().isUnbounded()).isTrue(); - assertThat(actual.isSingleBounded()).isFalse(); - assertThat(actual.getUpperBound()) - .hasValueSatisfying(upperBound -> assertThat(upperBound.isUnbounded()).isTrue()); - assertThat(actual.toString()).isEqualTo("[* TO *]"); - - expected = DateRange.parse("*"); - session.execute(statement.bind(1, expected)); - rows = session.execute("SELECT * FROM " + tableName).all(); - assertThat(rows.size()).isEqualTo(1); - actual = rows.get(0).get("v", DateRange.class); - assertThat(actual).isEqualTo(expected); - assertThat(actual.getLowerBound().isUnbounded()).isTrue(); - assertThat(actual.isSingleBounded()).isTrue(); - assertThat(actual.toString()).isEqualTo("*"); - } - - /** - * Validates that 'DateRangeType' columns are retrievable using SELECT JSON queries - * and that their value representations match their input. - */ - @Test - public void should_select_date_range_using_json() throws Exception { - CqlSession session = sessionRule.session(); - String tableName = testName.getMethodName(); - - session.execute( - String.format("CREATE TABLE %s (k int PRIMARY KEY, v 'DateRangeType')", tableName)); - PreparedStatement statement = - session.prepare(String.format("INSERT INTO %s (k,v) VALUES(?,?)", tableName)); - - DateRange expected = DateRange.parse("[2007-12-03 TO 2007-12]"); - session.execute(statement.bind(1, expected)); - List rows = session.execute("SELECT JSON * FROM " + tableName).all(); - assertThat(rows.get(0).getString(0)) - .isEqualTo("{\"k\": 1, \"v\": \"[2007-12-03 TO 2007-12]\"}"); - - expected = DateRange.parse("[* TO *]"); - session.execute(statement.bind(1, expected)); - rows = session.execute("SELECT JSON * FROM " + tableName).all(); - assertThat(rows.get(0).getString(0)).isEqualTo("{\"k\": 1, \"v\": \"[* TO *]\"}"); - - expected = DateRange.parse("*"); - session.execute(statement.bind(1, expected)); - rows = session.execute("SELECT JSON * FROM " + tableName).all(); - assertThat(rows.get(0).getString(0)).isEqualTo("{\"k\": 1, \"v\": \"*\"}"); - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/ClassicGraphDataTypeITBase.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/ClassicGraphDataTypeITBase.java deleted file mode 100644 index d42b156a8be..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/ClassicGraphDataTypeITBase.java +++ /dev/null @@ -1,178 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import static com.datastax.dse.driver.api.core.graph.TinkerGraphAssertions.assertThat; - -import com.datastax.dse.driver.api.core.data.geometry.LineString; -import com.datastax.dse.driver.api.core.data.geometry.Point; -import com.datastax.dse.driver.api.core.data.geometry.Polygon; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.testinfra.ccm.CcmBridge; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.shaded.guava.common.base.Charsets; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import com.datastax.oss.driver.shaded.guava.common.net.InetAddresses; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.math.BigDecimal; -import java.math.BigInteger; -import java.time.Instant; -import java.time.LocalDate; -import java.time.LocalTime; -import java.util.Objects; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.atomic.AtomicInteger; -import org.apache.tinkerpop.gremlin.structure.Vertex; -import org.junit.AssumptionViolatedException; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public abstract class ClassicGraphDataTypeITBase { - - private static final boolean IS_DSE50 = - CcmBridge.VERSION.compareTo(Objects.requireNonNull(Version.parse("5.1"))) < 0; - private static final Set TYPES_REQUIRING_DSE51 = - ImmutableSet.of("Date()", "Time()", "Point()", "Linestring()", "Polygon()"); - - private static final AtomicInteger SCHEMA_COUNTER = new AtomicInteger(); - - @DataProvider - public static Object[][] typeSamples() { - return new Object[][] { - // Types that DSE supports. - {"Boolean()", true}, - {"Boolean()", false}, - {"Smallint()", Short.MAX_VALUE}, - {"Smallint()", Short.MIN_VALUE}, - {"Smallint()", (short) 0}, - {"Smallint()", (short) 42}, - {"Int()", Integer.MAX_VALUE}, - {"Int()", Integer.MIN_VALUE}, - {"Int()", 0}, - {"Int()", 42}, - {"Bigint()", Long.MAX_VALUE}, - {"Bigint()", Long.MIN_VALUE}, - {"Bigint()", 0L}, - {"Double()", Double.MAX_VALUE}, - {"Double()", Double.MIN_VALUE}, - {"Double()", 0.0d}, - {"Double()", Math.PI}, - {"Float()", Float.MAX_VALUE}, - {"Float()", Float.MIN_VALUE}, - {"Float()", 0.0f}, - {"Text()", ""}, - {"Text()", "75"}, - {"Text()", "Lorem Ipsum"}, - // Inet, UUID, Date - {"Inet()", InetAddresses.forString("127.0.0.1")}, - {"Inet()", InetAddresses.forString("0:0:0:0:0:0:0:1")}, - {"Inet()", InetAddresses.forString("2001:db8:85a3:0:0:8a2e:370:7334")}, - {"Uuid()", UUID.randomUUID()}, - // Timestamps - {"Timestamp()", Instant.ofEpochMilli(123)}, - {"Timestamp()", Instant.ofEpochMilli(1488313909)}, - {"Duration()", java.time.Duration.parse("P2DT3H4M")}, - {"Date()", LocalDate.of(2016, 5, 12)}, - {"Time()", LocalTime.parse("18:30:41.554")}, - {"Time()", LocalTime.parse("18:30:41.554010034")}, - // Blob - {"Blob()", "Hello World!".getBytes(Charsets.UTF_8)}, - // BigDecimal/BigInteger - {"Decimal()", new BigDecimal("8675309.9998")}, - {"Varint()", new BigInteger("8675309")}, - // Geospatial types - {"Point().withBounds(-2, -2, 2, 2)", Point.fromCoordinates(0, 1)}, - {"Point().withBounds(-40, -40, 40, 40)", Point.fromCoordinates(-5, 20)}, - { - "Linestring().withGeoBounds()", - LineString.fromPoints( - Point.fromCoordinates(30, 10), - Point.fromCoordinates(10, 30), - Point.fromCoordinates(40, 40)) - }, - { - "Polygon().withGeoBounds()", - Polygon.builder() - .addRing( - Point.fromCoordinates(35, 10), - Point.fromCoordinates(45, 45), - Point.fromCoordinates(15, 40), - Point.fromCoordinates(10, 20), - Point.fromCoordinates(35, 10)) - .addRing( - Point.fromCoordinates(20, 30), - Point.fromCoordinates(35, 35), - Point.fromCoordinates(30, 20), - Point.fromCoordinates(20, 30)) - .build() - } - }; - } - - @UseDataProvider("typeSamples") - @Test - public void should_create_and_retrieve_vertex_property_with_correct_type( - String type, Object value) { - if (IS_DSE50 && requiresDse51(type)) { - throw new AssumptionViolatedException(type + " not supported in DSE " + CcmBridge.VERSION); - } - - int id = SCHEMA_COUNTER.getAndIncrement(); - - String vertexLabel = "vertex" + id; - String propertyName = "prop" + id; - GraphStatement addVertexLabelAndProperty = - ScriptGraphStatement.builder( - "schema.propertyKey(property)." - + type - + ".create()\n" - + "schema.vertexLabel(vertexLabel).properties(property).create()") - .setQueryParam("vertexLabel", vertexLabel) - .setQueryParam("property", propertyName) - .build(); - - session().execute(addVertexLabelAndProperty); - - Vertex v = insertVertexAndReturn(vertexLabel, propertyName, value); - - assertThat(v).hasProperty(propertyName, value); - } - - private boolean requiresDse51(String type) { - for (String prefix : TYPES_REQUIRING_DSE51) { - if (type.startsWith(prefix)) { - return true; - } - } - return false; - } - - public abstract Vertex insertVertexAndReturn( - String vertexLabel, String propertyName, Object value); - - /** - * Note that the {@link SessionRule} (and setupSchema method) must be redeclared in each subclass, - * since it depends on the CCM rule that can't be shared across serial tests. - */ - public abstract CqlSession session(); -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/ClassicGraphGeoSearchIndexIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/ClassicGraphGeoSearchIndexIT.java deleted file mode 100644 index 9878f1186e6..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/ClassicGraphGeoSearchIndexIT.java +++ /dev/null @@ -1,159 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.shaded.guava.common.base.Joiner; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import java.util.ArrayList; -import java.util.Collection; -import org.apache.tinkerpop.gremlin.process.traversal.AnonymousTraversalSource; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@BackendRequirement( - type = BackendType.DSE, - minInclusive = "5.1", - description = "DSE 5.1 required for graph geo indexing") -public class ClassicGraphGeoSearchIndexIT extends GraphGeoSearchIndexITBase { - private static final CustomCcmRule CCM_RULE = - CustomCcmRule.builder().withDseWorkloads("graph", "solr").build(); - - private static final SessionRule SESSION_RULE = - GraphTestSupport.getClassicGraphSessionBuilder(CCM_RULE).build(); - - @ClassRule public static TestRule chain = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - private final GraphTraversalSource g = - AnonymousTraversalSource.traversal() - .withRemote(DseGraph.remoteConnectionBuilder(SESSION_RULE.session()).build()); - - @Override - protected boolean isGraphBinary() { - return false; - } - - @Override - protected GraphTraversalSource graphTraversalSource() { - return g; - } - - @BeforeClass - public static void setup() { - for (String setupQuery : geoIndices()) { - SESSION_RULE.session().execute(ScriptGraphStatement.newInstance(setupQuery)); - } - - CCM_RULE.getCcmBridge().reloadCore(1, SESSION_RULE.getGraphName(), "user_p", true); - } - - /** - * A schema representing an address book with search enabled on name, description, and - * coordinates. - */ - public static Collection geoIndices() { - Object[][] providerIndexTypes = indexTypes(); - String[] indexTypes = new String[providerIndexTypes.length]; - for (int i = 0; i < providerIndexTypes.length; i++) { - indexTypes[i] = (String) providerIndexTypes[i][0]; - } - - StringBuilder schema = new StringBuilder("schema.propertyKey('full_name').Text().create()\n"); - StringBuilder propertyKeys = new StringBuilder(); - StringBuilder vertexLabel = new StringBuilder("schema.vertexLabel('user').properties("); - StringBuilder indices = new StringBuilder(); - StringBuilder vertex0 = - new StringBuilder("g.addV('user').property('full_name', 'Paul Thomas Joe')"); - StringBuilder vertex1 = - new StringBuilder("g.addV('user').property('full_name', 'George Bill Steve')"); - String vertex2 = "g.addV('user').property('full_name', 'James Paul Joe')"; - StringBuilder vertex3 = new StringBuilder("g.addV('user').property('full_name', 'Jill Alice')"); - - ArrayList propertyNames = new ArrayList<>(); - propertyNames.add("'full_name'"); - - for (String indexType : indexTypes) { - - propertyKeys.append( - String.format( - "schema.propertyKey('pointPropWithBounds_%s')." - + "Point().withBounds(0.000000, 0.000000, 100.000000, 100.000000).create()\n", - indexType)); - - propertyKeys.append( - String.format( - "schema.propertyKey('pointPropWithGeoBounds_%s').Point().withGeoBounds().create()\n", - indexType)); - - propertyNames.add("'pointPropWithBounds_" + indexType + "'"); - propertyNames.add("'pointPropWithGeoBounds_" + indexType + "'"); - - if (indexType.equals("search")) { - - indices.append( - String.format( - "schema.vertexLabel('user').index('search').search().by('pointPropWithBounds_%s').withError(0.00001, 0.0).by('pointPropWithGeoBounds_%s').withError(0.00001, 0.0).add()\n", - indexType, indexType)); - } else { - - indices.append( - String.format( - "schema.vertexLabel('user').index('by_pointPropWithBounds_%s').%s().by('pointPropWithBounds_%s').add()\n", - indexType, indexType, indexType)); - - indices.append( - String.format( - "schema.vertexLabel('user').index('by_pointPropWithGeoBounds_%s').%s().by('pointPropWithGeoBounds_%s').add()\n", - indexType, indexType, indexType)); - } - - vertex0.append( - String.format( - ".property('pointPropWithBounds_%s', 'POINT(40.0001 40)').property('pointPropWithGeoBounds_%s', 'POINT(40.0001 40)')", - indexType, indexType)); - vertex1.append( - String.format( - ".property('pointPropWithBounds_%s', 'POINT(40 40)').property('pointPropWithGeoBounds_%s', 'POINT(40 40)')", - indexType, indexType)); - vertex3.append( - String.format( - ".property('pointPropWithBounds_%s', 'POINT(30 30)').property('pointPropWithGeoBounds_%s', 'POINT(30 30)')", - indexType, indexType)); - } - - vertexLabel.append(Joiner.on(", ").join(propertyNames)); - vertexLabel.append(").create()\n"); - - schema.append(propertyKeys).append(vertexLabel).append(indices); - - return Lists.newArrayList( - SampleGraphScripts.MAKE_STRICT, - schema.toString(), - vertex0.toString(), - vertex1.toString(), - vertex2, - vertex3.toString()); - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/ClassicGraphTextSearchIndexIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/ClassicGraphTextSearchIndexIT.java deleted file mode 100644 index 13d503d6b25..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/ClassicGraphTextSearchIndexIT.java +++ /dev/null @@ -1,154 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.shaded.guava.common.base.Joiner; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import java.util.ArrayList; -import java.util.Collection; -import org.apache.tinkerpop.gremlin.process.traversal.AnonymousTraversalSource; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@BackendRequirement( - type = BackendType.DSE, - minInclusive = "5.1", - description = "DSE 5.1 required for graph geo indexing") -public class ClassicGraphTextSearchIndexIT extends GraphTextSearchIndexITBase { - private static final CustomCcmRule CCM_RULE = - CustomCcmRule.builder().withDseWorkloads("graph", "solr").build(); - - private static final SessionRule SESSION_RULE = - GraphTestSupport.getClassicGraphSessionBuilder(CCM_RULE).build(); - - @ClassRule public static TestRule chain = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - private final GraphTraversalSource g = - AnonymousTraversalSource.traversal() - .withRemote(DseGraph.remoteConnectionBuilder(SESSION_RULE.session()).build()); - - /** - * A schema representing an address book with 3 properties (full_name_*, description_*, alias_*) - * created for each type of index (search, secondary, materialized). - */ - public static Collection textIndices() { - Object[][] providerIndexTypes = indexTypes(); - String[] indexTypes = new String[providerIndexTypes.length]; - for (int i = 0; i < providerIndexTypes.length; i++) { - indexTypes[i] = (String) providerIndexTypes[i][0]; - } - - StringBuilder schema = new StringBuilder(); - StringBuilder propertyKeys = new StringBuilder(); - StringBuilder vertexLabel = new StringBuilder("schema.vertexLabel('user').properties("); - StringBuilder indices = new StringBuilder(); - StringBuilder vertex0 = new StringBuilder("g.addV('user')"); - StringBuilder vertex1 = new StringBuilder("g.addV('user')"); - StringBuilder vertex2 = new StringBuilder("g.addV('user')"); - StringBuilder vertex3 = new StringBuilder("g.addV('user')"); - - ArrayList propertyNames = new ArrayList<>(); - for (String indexType : indexTypes) { - propertyKeys.append( - String.format( - "schema.propertyKey('full_name_%s').Text().create()\n" - + "schema.propertyKey('description_%s').Text().create()\n" - + "schema.propertyKey('alias_%s').Text().create()\n", - indexType, indexType, indexType)); - - propertyNames.add("'full_name_" + indexType + "'"); - propertyNames.add("'description_" + indexType + "'"); - propertyNames.add("'alias_" + indexType + "'"); - - if (indexType.equals("search")) { - indices.append( - "schema.vertexLabel('user').index('search').search().by('full_name_search').asString().by('description_search').asText().by('alias_search').asString().add()\n"); - } else { - indices.append( - String.format( - "schema.vertexLabel('user').index('by_full_name_%s').%s().by('full_name_%s').add()\n", - indexType, indexType, indexType)); - indices.append( - String.format( - "schema.vertexLabel('user').index('by_description_%s').%s().by('description_%s').add()\n", - indexType, indexType, indexType)); - indices.append( - String.format( - "schema.vertexLabel('user').index('by_alias_name_%s').%s().by('alias_%s').add()\n", - indexType, indexType, indexType)); - } - - vertex0.append( - String.format( - ".property('full_name_%s', 'Paul Thomas Joe').property('description_%s', 'Lives by the hospital').property('alias_%s', 'mario')", - indexType, indexType, indexType)); - vertex1.append( - String.format( - ".property('full_name_%s', 'George Bill Steve').property('description_%s', 'A cold dude').property('alias_%s', 'wario')", - indexType, indexType, indexType)); - vertex2.append( - String.format( - ".property('full_name_%s', 'James Paul Joe').property('description_%s', 'Likes to hang out').property('alias_%s', 'bowser')", - indexType, indexType, indexType)); - vertex3.append( - String.format( - ".property('full_name_%s', 'Jill Alice').property('description_%s', 'Enjoys a very nice cold coca cola').property('alias_%s', 'peach')", - indexType, indexType, indexType)); - } - - vertexLabel.append(Joiner.on(", ").join(propertyNames)); - vertexLabel.append(").create()\n"); - - schema.append(propertyKeys).append(vertexLabel).append(indices); - - return Lists.newArrayList( - SampleGraphScripts.MAKE_STRICT, - schema.toString(), - vertex0.toString(), - vertex1.toString(), - vertex2.toString(), - vertex3.toString()); - } - - @BeforeClass - public static void setup() { - for (String setupQuery : textIndices()) { - SESSION_RULE.session().execute(ScriptGraphStatement.newInstance(setupQuery)); - } - - CCM_RULE.getCcmBridge().reloadCore(1, SESSION_RULE.getGraphName(), "user_p", true); - } - - @Override - protected boolean isGraphBinary() { - return false; - } - - @Override - protected GraphTraversalSource graphTraversalSource() { - return g; - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/CoreGraphDataTypeITBase.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/CoreGraphDataTypeITBase.java deleted file mode 100644 index 7fe31a059d7..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/CoreGraphDataTypeITBase.java +++ /dev/null @@ -1,271 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import static com.datastax.oss.driver.api.core.type.DataTypes.BIGINT; -import static com.datastax.oss.driver.api.core.type.DataTypes.INT; -import static com.datastax.oss.driver.api.core.type.DataTypes.TEXT; -import static com.datastax.oss.driver.api.core.type.DataTypes.listOf; -import static com.datastax.oss.driver.api.core.type.DataTypes.tupleOf; -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.dse.driver.api.core.data.geometry.LineString; -import com.datastax.dse.driver.api.core.data.geometry.Point; -import com.datastax.dse.driver.api.core.data.geometry.Polygon; -import com.datastax.dse.driver.api.core.graph.predicates.Geo; -import com.datastax.dse.driver.api.core.type.DseDataTypes; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.data.CqlDuration; -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.core.type.TupleType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.api.core.uuid.Uuids; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableSet; -import com.google.common.collect.Sets; -import java.math.BigDecimal; -import java.math.BigInteger; -import java.nio.ByteBuffer; -import java.time.Instant; -import java.time.LocalDate; -import java.time.LocalTime; -import java.time.ZoneId; -import java.time.temporal.ChronoUnit; -import java.util.Arrays; -import java.util.Map; -import org.junit.Test; - -public abstract class CoreGraphDataTypeITBase { - - protected abstract CqlSession session(); - - protected abstract String graphName(); - - @Test - public void should_create_and_retrieve_correct_data_with_types() { - CqlSession session = session(); - - // use CQL to create type for now because DSP-17567 is not in yet, so this is more stable - session.execute( - String.format( - "CREATE TYPE %s.udt_graphbinary(simple text, complex tuple, missing text)", - graphName())); - - session.execute( - String.format( - "CREATE TYPE %s.udt_graphbinarygeo(point 'PointType', line 'LineStringType', poly 'PolygonType')", - graphName())); - - ImmutableMap.Builder properties = - ImmutableMap.builder() - .put("Ascii", "test") - .put("Bigint", 5L) - .put("Boolean", true) - .put("Date", LocalDate.of(2007, 7, 7)) - .put("Decimal", BigDecimal.valueOf(2.3)) - .put("Double", 4.5d) - .put("Float", 4.8f) - .put("Int", 45) - .put("Smallint", (short) 1) - .put("Text", "test") - .put("Time", LocalTime.now(ZoneId.systemDefault())) - .put("Timeuuid", Uuids.timeBased()) - .put("Timestamp", Instant.now().truncatedTo(ChronoUnit.MILLIS)) - .put("Uuid", java.util.UUID.randomUUID()) - .put("Varint", BigInteger.valueOf(3234)) - .put("Blob", ByteBuffer.wrap(new byte[] {1, 2, 3})) - .put("Tinyint", (byte) 38) - .put("listOf(Int)", Arrays.asList(2, 3, 4)) - .put("setOf(Int)", Sets.newHashSet(2, 3, 4)) - .put("mapOf(Int, Text)", ImmutableMap.of(2, "two", 4, "four")) - .put("Duration", CqlDuration.newInstance(1, 2, 3)) - .put("LineString", Geo.lineString(1, 2, 3, 4, 5, 6)) - .put("Point", Geo.point(3, 4)) - .put("Polygon", Geo.polygon(Geo.point(3, 4), Geo.point(5, 4), Geo.point(6, 6))) - .put("tupleOf(Int, Text)", tupleOf(INT, TEXT).newValue(5, "Bar")) - .put( - "typeOf('udt_graphbinary')", - session - .getMetadata() - .getKeyspace(graphName()) - .flatMap(keyspace -> keyspace.getUserDefinedType("udt_graphbinary")) - .orElseThrow(IllegalStateException::new) - .newValue( - "some text", tupleOf(INT, TEXT).newValue(5, "Bar"), "some missing text")) - .put( - "typeOf('udt_graphbinarygeo')", - session - .getMetadata() - .getKeyspace(graphName()) - .flatMap( - keyspaceMetadata -> - keyspaceMetadata.getUserDefinedType("udt_graphbinarygeo")) - .orElseThrow(IllegalStateException::new) - .newValue( - Point.fromCoordinates(3.3, 4.4), - LineString.fromPoints( - Point.fromCoordinates(1, 1), - Point.fromCoordinates(2, 2), - Point.fromCoordinates(3, 3)), - Polygon.fromPoints( - Point.fromCoordinates(3, 4), - Point.fromCoordinates(5, 4), - Point.fromCoordinates(6, 6)))); - - TupleType tuple = tupleOf(DseDataTypes.POINT, DseDataTypes.LINE_STRING, DseDataTypes.POLYGON); - tuple.attach(session.getContext()); - - properties.put( - "tupleOf(Point, LineString, Polygon)", - tuple.newValue( - Point.fromCoordinates(3.3, 4.4), - LineString.fromPoints( - Point.fromCoordinates(1, 1), - Point.fromCoordinates(2, 2), - Point.fromCoordinates(3, 3)), - Polygon.fromPoints( - Point.fromCoordinates(3, 4), - Point.fromCoordinates(5, 4), - Point.fromCoordinates(6, 6)))); - - int vertexID = 1; - String vertexLabel = "graphBinaryAllTypes"; - - runTest(properties.build(), vertexLabel, vertexID); - } - - @Test - public void should_insert_and_retrieve_nested_UDTS_and_tuples() { - CqlSession session = session(); - - // use CQL to create type for now because DSP-17567 is not in yet, so this is more stable - session.execute(String.format("CREATE TYPE %s.udt1(a int, b text)", graphName())); - - session.execute( - String.format( - "CREATE TYPE %s.udt2(" - + "a int" - + ", b text" - + ", c frozen" - + ", mylist list" - + ", mytuple_withlist tuple>>>" - + ")", - graphName())); - - session.execute( - String.format( - "CREATE TYPE %s.udt3(" - + "a list" - + ", b set" - + ", c map" - + ", d list>>" - + ", e set>>" - + ", f list>>" - + ")", - graphName())); - - UserDefinedType udt1 = - session - .getMetadata() - .getKeyspace(graphName()) - .flatMap(keyspace -> keyspace.getUserDefinedType("udt1")) - .orElseThrow(IllegalStateException::new); - UdtValue udtValue1 = udt1.newValue(1, "2"); - - UserDefinedType udt2 = - session - .getMetadata() - .getKeyspace(graphName()) - .flatMap(keyspace -> keyspace.getUserDefinedType("udt2")) - .orElseThrow(IllegalStateException::new); - TupleType secondNested = tupleOf(BIGINT, listOf(BIGINT)); - TupleType firstNested = tupleOf(TEXT, secondNested); - UdtValue udtValue2 = - udt2.newValue( - 1, - "2", - udt1.newValue(3, "4"), - ImmutableList.of(5L), - firstNested.newValue("6", secondNested.newValue(7L, ImmutableList.of(8L)))); - - UserDefinedType udt3 = - session - .getMetadata() - .getKeyspace(graphName()) - .flatMap(keyspace -> keyspace.getUserDefinedType("udt3")) - .orElseThrow(IllegalStateException::new); - UdtValue udtValue3 = - udt3.newValue( - ImmutableList.of(1), - ImmutableSet.of(2.1f), - ImmutableMap.of("3", 4L), - ImmutableList.of(ImmutableList.of(5.1d, 6.1d), ImmutableList.of(7.1d)), - ImmutableSet.of(ImmutableSet.of(8.1f), ImmutableSet.of(9.1f)), - ImmutableList.of(tupleOf(INT, TEXT).newValue(10, "11"))); - - Map properties = - ImmutableMap.builder() - .put("frozen(typeOf('udt1'))", udtValue1) - .put("frozen(typeOf('udt2'))", udtValue2) - .put("frozen(typeOf('udt3'))", udtValue3) - .build(); - - int vertexID = 1; - String vertexLabel = "graphBinaryNestedTypes"; - - runTest(properties, vertexLabel, vertexID); - } - - private void runTest(Map properties, String vertexLabel, int vertexID) { - // setup schema - session().execute(createVertexLabelStatement(properties, vertexLabel)); - - // execute insert query and read query - Map results = insertVertexThenReadProperties(properties, vertexID, vertexLabel); - - // test valid properties are returned - properties.forEach((k, v) -> assertThat(results.get(formatPropertyName(k))).isEqualTo(v)); - } - - private static GraphStatement createVertexLabelStatement( - Map properties, String vertexLabel) { - StringBuilder ddl = - new StringBuilder("schema.vertexLabel(vertexLabel).ifNotExists().partitionBy('id', Int)"); - - for (Map.Entry entry : properties.entrySet()) { - String typeDefinition = entry.getKey(); - String propName = formatPropertyName(typeDefinition); - - ddl.append(String.format(".property('%s', %s)", propName, typeDefinition)); - } - ddl.append(".create()"); - - return ScriptGraphStatement.newInstance(ddl.toString()) - .setQueryParam("vertexLabel", vertexLabel); - } - - protected abstract Map insertVertexThenReadProperties( - Map properties, int vertexID, String vertexLabel); - - protected static String formatPropertyName(String originalName) { - return String.format( - "prop%s", - originalName.replace("(", "").replace(")", "").replace(", ", "").replace("'", "")); - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/CoreGraphGeoSearchIndexIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/CoreGraphGeoSearchIndexIT.java deleted file mode 100644 index 12db8820117..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/CoreGraphGeoSearchIndexIT.java +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import java.util.Collection; -import org.apache.tinkerpop.gremlin.process.traversal.AnonymousTraversalSource; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@BackendRequirement( - type = BackendType.DSE, - minInclusive = "6.8.0", - description = "DSE 6.8.0 required for Core graph support") -public class CoreGraphGeoSearchIndexIT extends GraphGeoSearchIndexITBase { - - private static final CustomCcmRule CCM_RULE = - CustomCcmRule.builder().withDseWorkloads("graph", "solr").build(); - - private static final SessionRule SESSION_RULE = - GraphTestSupport.getCoreGraphSessionBuilder(CCM_RULE).build(); - - @ClassRule public static TestRule chain = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - private final GraphTraversalSource g = - AnonymousTraversalSource.traversal() - .withRemote(DseGraph.remoteConnectionBuilder(SESSION_RULE.session()).build()) - .with("allow-filtering"); - - @Override - protected boolean isGraphBinary() { - return true; - } - - @Override - protected GraphTraversalSource graphTraversalSource() { - return g; - } - - @BeforeClass - public static void setup() { - for (String setupQuery : geoIndices()) { - SESSION_RULE.session().execute(ScriptGraphStatement.newInstance(setupQuery)); - } - - CCM_RULE.getCcmBridge().reloadCore(1, SESSION_RULE.getGraphName(), "user", true); - } - - /** - * A schema representing an address book with search enabled on name, description, and - * coordinates. - */ - public static Collection geoIndices() { - Object[][] providerIndexTypes = indexTypes(); - String[] indexTypes = new String[providerIndexTypes.length]; - for (int i = 0; i < providerIndexTypes.length; i++) { - indexTypes[i] = (String) providerIndexTypes[i][0]; - } - - StringBuilder schema = - new StringBuilder("schema.vertexLabel('user').partitionBy('full_name', Text)"); - StringBuilder propertyKeys = new StringBuilder(); - StringBuilder indices = new StringBuilder(); - StringBuilder vertex0 = - new StringBuilder("g.addV('user').property('full_name', 'Paul Thomas Joe')"); - StringBuilder vertex1 = - new StringBuilder("g.addV('user').property('full_name', 'George Bill Steve')"); - String vertex2 = "g.addV('user').property('full_name', 'James Paul Joe')"; - StringBuilder vertex3 = new StringBuilder("g.addV('user').property('full_name', 'Jill Alice')"); - - for (String indexType : indexTypes) { - propertyKeys.append(String.format(".property('pointPropWithBounds_%s', Point)\n", indexType)); - - propertyKeys.append( - String.format(".property('pointPropWithGeoBounds_%s', Point)\n", indexType)); - - if (indexType.equals("search")) { - indices.append( - String.format( - "schema.vertexLabel('user').searchIndex().by('pointPropWithBounds_%s').by('pointPropWithGeoBounds_%s').create()\n", - indexType, indexType)); - - } else { - throw new UnsupportedOperationException("IndexType other than search is not supported."); - } - - vertex0.append( - String.format( - ".property('pointPropWithBounds_%s', point(40.0001,40)).property('pointPropWithGeoBounds_%s', point(40.0001,40))", - indexType, indexType)); - vertex1.append( - String.format( - ".property('pointPropWithBounds_%s', point(40,40)).property('pointPropWithGeoBounds_%s', point(40,40))", - indexType, indexType)); - vertex3.append( - String.format( - ".property('pointPropWithBounds_%s', point(30,30)).property('pointPropWithGeoBounds_%s', point(30,30))", - indexType, indexType)); - } - - schema.append(propertyKeys).append(".create();\n").append(indices); - - return Lists.newArrayList( - schema.toString(), vertex0.toString(), vertex1.toString(), vertex2, vertex3.toString()); - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/CoreGraphTextSearchIndexIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/CoreGraphTextSearchIndexIT.java deleted file mode 100644 index 5545c3c00ac..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/CoreGraphTextSearchIndexIT.java +++ /dev/null @@ -1,133 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import java.util.Collection; -import org.apache.tinkerpop.gremlin.process.traversal.AnonymousTraversalSource; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@BackendRequirement( - type = BackendType.DSE, - minInclusive = "6.8.0", - description = "DSE 6.8.0 required for Core graph support") -public class CoreGraphTextSearchIndexIT extends GraphTextSearchIndexITBase { - - private static final CustomCcmRule CCM_RULE = - CustomCcmRule.builder().withDseWorkloads("graph", "solr").build(); - - private static final SessionRule SESSION_RULE = - GraphTestSupport.getCoreGraphSessionBuilder(CCM_RULE).build(); - - @ClassRule public static TestRule chain = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - private final GraphTraversalSource g = - AnonymousTraversalSource.traversal() - .withRemote(DseGraph.remoteConnectionBuilder(SESSION_RULE.session()).build()) - .with("allow-filtering"); - - @Override - protected boolean isGraphBinary() { - return true; - } - - @Override - protected GraphTraversalSource graphTraversalSource() { - return g; - } - - /** - * A schema representing an address book with 3 properties (full_name_*, description_*, alias_*) - * created for each type of index (search, secondary, materialized). - */ - public static Collection textIndices() { - Object[][] providerIndexTypes = indexTypes(); - String[] indexTypes = new String[providerIndexTypes.length]; - for (int i = 0; i < providerIndexTypes.length; i++) { - indexTypes[i] = (String) providerIndexTypes[i][0]; - } - - StringBuilder schema = new StringBuilder("schema.vertexLabel('user')"); - StringBuilder propertyKeys = new StringBuilder(); - StringBuilder indices = new StringBuilder(); - StringBuilder vertex0 = new StringBuilder("g.addV('user')"); - StringBuilder vertex1 = new StringBuilder("g.addV('user')"); - StringBuilder vertex2 = new StringBuilder("g.addV('user')"); - StringBuilder vertex3 = new StringBuilder("g.addV('user')"); - - for (String indexType : indexTypes) { - propertyKeys.append( - String.format( - ".partitionBy('full_name_%s', Text)" - + ".property('description_%s', Text)" - + ".property('alias_%s', Text)\n", - indexType, indexType, indexType)); - - if (indexType.equals("search")) { - indices.append( - "schema.vertexLabel('user').searchIndex().by('full_name_search').asString().by('description_search').asText().by('alias_search').asString().create()\n"); - } else { - throw new UnsupportedOperationException("IndexType other than search is not supported."); - } - - vertex0.append( - String.format( - ".property('full_name_%s', 'Paul Thomas Joe').property('description_%s', 'Lives by the hospital').property('alias_%s', 'mario')", - indexType, indexType, indexType)); - vertex1.append( - String.format( - ".property('full_name_%s', 'George Bill Steve').property('description_%s', 'A cold dude').property('alias_%s', 'wario')", - indexType, indexType, indexType)); - vertex2.append( - String.format( - ".property('full_name_%s', 'James Paul Joe').property('description_%s', 'Likes to hang out').property('alias_%s', 'bowser')", - indexType, indexType, indexType)); - vertex3.append( - String.format( - ".property('full_name_%s', 'Jill Alice').property('description_%s', 'Enjoys a very nice cold coca cola').property('alias_%s', 'peach')", - indexType, indexType, indexType)); - } - - schema.append(propertyKeys).append(".create();\n").append(indices); - - return Lists.newArrayList( - schema.toString(), - vertex0.toString(), - vertex1.toString(), - vertex2.toString(), - vertex3.toString()); - } - - @BeforeClass - public static void setup() { - for (String setupQuery : textIndices()) { - SESSION_RULE.session().execute(ScriptGraphStatement.newInstance(setupQuery)); - } - - CCM_RULE.getCcmBridge().reloadCore(1, SESSION_RULE.getGraphName(), "user", true); - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/CqlCollectionIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/CqlCollectionIT.java deleted file mode 100644 index 8bc497c37db..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/CqlCollectionIT.java +++ /dev/null @@ -1,235 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import static com.datastax.dse.driver.api.core.graph.predicates.CqlCollection.contains; -import static com.datastax.dse.driver.api.core.graph.predicates.CqlCollection.containsKey; -import static com.datastax.dse.driver.api.core.graph.predicates.CqlCollection.entryEq; -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.dse.driver.api.core.graph.predicates.CqlCollection; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.CqlSessionRuleBuilder; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableSet; -import java.util.Collection; -import java.util.List; -import java.util.Map; -import java.util.Set; -import org.apache.tinkerpop.gremlin.process.traversal.AnonymousTraversalSource; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@BackendRequirement( - type = BackendType.DSE, - minInclusive = "6.8", - description = "DSE 6.8.0 required for collection predicates support") -public class CqlCollectionIT { - - private static final CustomCcmRule CCM_RULE = - CustomCcmRule.builder().withDseWorkloads("graph").build(); - - private static final SessionRule SESSION_RULE = - new CqlSessionRuleBuilder(CCM_RULE) - .withCreateGraph() - .withCoreEngine() - .withGraphProtocol("graph-binary-1.0") - .build(); - - @ClassRule public static TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - private final GraphTraversalSource g = - AnonymousTraversalSource.traversal() - .withRemote(DseGraph.remoteConnectionBuilder(SESSION_RULE.session()).build()); - - @BeforeClass - public static void setup() { - for (String setupQuery : createSchema()) { - SESSION_RULE.session().execute(ScriptGraphStatement.newInstance(setupQuery)); - } - } - - private static Collection createSchema() { - return ImmutableList.of( - "schema.vertexLabel('software').ifNotExists().partitionBy('name', Varchar)" - + ".property('myList', listOf(Varchar))" - + ".property('mySet', setOf(Varchar))" - + ".property('myMapKeys', mapOf(Varchar, Int))" - + ".property('myMapValues', mapOf(Int, Varchar))" - + ".property('myMapEntries', mapOf(Int, Varchar))" - + ".property('myFrozenList', frozen(listOf(Varchar)))" - + ".property('myFrozenSet', frozen(setOf(Float)))" - + ".property('myFrozenMap', frozen(mapOf(Int, Varchar)))" - + ".create()", - "schema.vertexLabel('software').secondaryIndex('by_myList').ifNotExists().by('myList').create();" - + "schema.vertexLabel('software').secondaryIndex('by_mySet').ifNotExists().by('mySet').create();" - + "schema.vertexLabel('software').secondaryIndex('by_myMapKeys').ifNotExists().by('myMapKeys').indexKeys().create();" - + "schema.vertexLabel('software').secondaryIndex('by_myMapValues').ifNotExists().by('myMapValues').indexValues().create();" - + "schema.vertexLabel('software').secondaryIndex('by_myMapEntries').ifNotExists().by('myMapEntries').indexEntries().create();" - + "schema.vertexLabel('software').secondaryIndex('by_myFrozenList').ifNotExists().by('myFrozenList').indexFull().create();" - + "schema.vertexLabel('software').secondaryIndex('by_myFrozenSet').ifNotExists().by('myFrozenSet').indexFull().create();" - + "schema.vertexLabel('software').secondaryIndex('by_myFrozenMap').ifNotExists().by('myFrozenMap').indexFull().create()"); - } - - @Test - public void should_apply_contains_predicate_to_non_frozen_list() { - CqlSession session = SESSION_RULE.session(); - - List myList1 = com.google.common.collect.ImmutableList.of("apple", "banana"); - List myList2 = com.google.common.collect.ImmutableList.of("cranberry", "orange"); - - session.execute( - FluentGraphStatement.newInstance( - DseGraph.g - .addV("software") - .property("name", "dse list 1") - .property("myList", myList1))); - session.execute( - FluentGraphStatement.newInstance( - DseGraph.g - .addV("software") - .property("name", "dse list 2") - .property("myList", myList2))); - - assertThat(g.V().has("software", "myList", contains("apple")).values("myList").toList()) - .hasSize(1) - .contains(myList1) - .doesNotContain(myList2); - assertThat(g.V().has("software", "myList", contains("strawberry")).toList()).isEmpty(); - } - - @Test - public void should_apply_contains_predicate_to_non_frozen_set() { - CqlSession session = SESSION_RULE.session(); - - Set mySet1 = ImmutableSet.of("apple", "banana"); - Set mySet2 = ImmutableSet.of("cranberry", "orange"); - - session.execute( - FluentGraphStatement.newInstance( - DseGraph.g.addV("software").property("name", "dse set 1").property("mySet", mySet1))); - session.execute( - FluentGraphStatement.newInstance( - DseGraph.g.addV("software").property("name", "dse set 2").property("mySet", mySet2))); - - assertThat(g.V().has("software", "mySet", contains("apple")).values("mySet").toList()) - .hasSize(1) - .contains(mySet1) - .doesNotContain(mySet2); - assertThat(g.V().has("software", "mySet", contains("strawberry")).toList()).isEmpty(); - } - - @Test - public void should_apply_containsKey_predicate_to_non_frozen_map() { - CqlSession session = SESSION_RULE.session(); - - Map myMap1 = ImmutableMap.builder().put("id1", 1).build(); - Map myMap2 = ImmutableMap.builder().put("id2", 2).build(); - - session.execute( - FluentGraphStatement.newInstance( - DseGraph.g - .addV("software") - .property("name", "dse map containsKey 1") - .property("myMapKeys", myMap1))); - session.execute( - FluentGraphStatement.newInstance( - DseGraph.g - .addV("software") - .property("name", "dse map containsKey 2") - .property("myMapKeys", myMap2))); - - assertThat(g.V().has("software", "myMapKeys", containsKey("id1")).values("myMapKeys").toList()) - .hasSize(1) - .contains(myMap1) - .doesNotContain(myMap2); - assertThat(g.V().has("software", "myMapKeys", containsKey("id3")).toList()).isEmpty(); - } - - @Test - public void should_apply_containsValue_predicate_to_non_frozen_map() { - CqlSession session = SESSION_RULE.session(); - - Map myMap1 = ImmutableMap.builder().put(11, "abc").build(); - Map myMap2 = ImmutableMap.builder().put(22, "def").build(); - - session.execute( - FluentGraphStatement.newInstance( - DseGraph.g - .addV("software") - .property("name", "dse map containsValue 1") - .property("myMapValues", myMap1))); - session.execute( - FluentGraphStatement.newInstance( - DseGraph.g - .addV("software") - .property("name", "dse map containsValue 2") - .property("myMapValues", myMap2))); - assertThat( - g.V() - .has("software", "myMapValues", CqlCollection.containsValue("abc")) - .values("myMapValues") - .toList()) - .hasSize(1) - .contains(myMap1) - .doesNotContain(myMap2); - assertThat(g.V().has("software", "myMapValues", CqlCollection.containsValue("xyz")).toList()) - .isEmpty(); - } - - @Test - public void should_apply_entryEq_predicate_to_non_frozen_map() { - CqlSession session = SESSION_RULE.session(); - - Map myMap1 = ImmutableMap.builder().put(11, "abc").build(); - Map myMap2 = ImmutableMap.builder().put(22, "def").build(); - - session.execute( - FluentGraphStatement.newInstance( - DseGraph.g - .addV("software") - .property("name", "dse map entryEq 1") - .property("myMapEntries", myMap1))); - session.execute( - FluentGraphStatement.newInstance( - DseGraph.g - .addV("software") - .property("name", "dse map entryEq 2") - .property("myMapEntries", myMap2))); - assertThat( - g.V() - .has("software", "myMapEntries", entryEq(11, "abc")) - .values("myMapEntries") - .toList()) - .hasSize(1) - .contains(myMap1) - .doesNotContain(myMap2); - assertThat(g.V().has("software", "myMapEntries", entryEq(11, "xyz")).toList()).isEmpty(); - assertThat(g.V().has("software", "myMapEntries", entryEq(33, "abc")).toList()).isEmpty(); - assertThat(g.V().has("software", "myMapEntries", entryEq(33, "xyz")).toList()).isEmpty(); - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/GraphAuthenticationIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/GraphAuthenticationIT.java deleted file mode 100644 index de1c23fd661..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/GraphAuthenticationIT.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import static com.datastax.dse.driver.api.core.graph.TinkerGraphAssertions.assertThat; - -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.internal.core.auth.PlainTextAuthProvider; -import com.datastax.oss.driver.shaded.guava.common.util.concurrent.Uninterruptibles; -import java.util.concurrent.TimeUnit; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; - -@BackendRequirement( - type = BackendType.DSE, - minInclusive = "5.0.0", - description = "DSE 5 required for Graph") -public class GraphAuthenticationIT { - - @ClassRule - public static CustomCcmRule ccm = - CustomCcmRule.builder() - .withDseConfiguration("authentication_options.enabled", true) - .withJvmArgs("-Dcassandra.superuser_setup_delay_ms=0") - .withDseWorkloads("graph") - .build(); - - @BeforeClass - public static void sleepForAuth() { - if (ccm.getCassandraVersion().compareTo(Version.V2_2_0) < 0) { - // Sleep for 1 second to allow C* auth to do its work. This is only needed for 2.1 - Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); - } - } - - @Test - public void should_execute_graph_query_on_authenticated_connection() { - CqlSession dseSession = - SessionUtils.newSession( - ccm, - DriverConfigLoader.programmaticBuilder() - .withString(DseDriverOption.AUTH_PROVIDER_AUTHORIZATION_ID, "") - .withString(DefaultDriverOption.AUTH_PROVIDER_USER_NAME, "cassandra") - .withString(DefaultDriverOption.AUTH_PROVIDER_PASSWORD, "cassandra") - .withClass(DefaultDriverOption.AUTH_PROVIDER_CLASS, PlainTextAuthProvider.class) - .build()); - - GraphNode gn = - dseSession.execute(ScriptGraphStatement.newInstance("1+1").setSystemQuery(true)).one(); - assertThat(gn).isNotNull(); - assertThat(gn.asInt()).isEqualTo(2); - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/GraphGeoSearchIndexITBase.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/GraphGeoSearchIndexITBase.java deleted file mode 100644 index 67d0cb34d43..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/GraphGeoSearchIndexITBase.java +++ /dev/null @@ -1,180 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import static com.datastax.dse.driver.api.core.graph.TinkerGraphAssertions.assertThat; -import static org.assertj.core.api.Assertions.fail; -import static org.assertj.core.api.AssertionsForClassTypes.assertThatCode; - -import com.datastax.dse.driver.api.core.data.geometry.Point; -import com.datastax.dse.driver.api.core.graph.predicates.Geo; -import com.datastax.oss.driver.api.core.servererrors.InvalidQueryException; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; -import org.apache.tinkerpop.gremlin.structure.Vertex; -import org.assertj.core.api.Assumptions; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public abstract class GraphGeoSearchIndexITBase { - - protected abstract boolean isGraphBinary(); - - protected abstract GraphTraversalSource graphTraversalSource(); - - @DataProvider - public static Object[][] indexTypes() { - return new Object[][] {{"search"} - - // FIXME for some reason, materialized and secondary indices have decided not to work - // I get an exception saying "there is no index for this query, here is the defined - // indices: " and the list contains the indices that are needed. Mysterious. - // There may be something to do with differences in the CCMBridge adapter of the new - // driver, some changes make materialized views and secondary indices to be not - // considered for graph: - // - // , {"materialized"} - // , {"secondary"} - }; - } - - @UseDataProvider("indexTypes") - @Test - public void search_by_distance_cartesian_graphson(String indexType) { - // cartesian is not supported by graph_binary - Assumptions.assumeThat(isGraphBinary()).isFalse(); - // in cartesian geometry, the distance between POINT(30 30) and POINT(40 40) is exactly - // 14.142135623730951 - // any point further than that should be detected outside of the range. - // the vertex "Paul Thomas Joe" is at POINT(40.0001 40), and shouldn't be detected inside the - // range for classic. - - GraphTraversal traversal = - graphTraversalSource() - .V() - .has( - "user", - "pointPropWithBounds_" + indexType, - Geo.inside(Point.fromCoordinates(30, 30), 14.142135623730951)) - .values("full_name"); - assertThat(traversal.toList()).containsOnly("George Bill Steve", "Jill Alice"); - } - - @UseDataProvider("indexTypes") - @Test - public void search_by_distance_geodetic(String indexType) { - // in geodetic geometry, the distance between POINT(30 30) and POINT(40 40) is exactly - // 12.908258700131379 - // any point further than that should be detected outside of the range. - // the vertex "Paul Thomas Joe" is at POINT(40.0001 40), and shouldn't be detected inside the - // range. - GraphTraversal traversal = - graphTraversalSource() - .V() - .has( - "user", - "pointPropWithGeoBounds_" + indexType, - Geo.inside(Point.fromCoordinates(30, 30), 12.908258700131379, Geo.Unit.DEGREES)) - .values("full_name"); - assertThat(traversal.toList()).containsOnly("George Bill Steve", "Jill Alice"); - } - - @Test - public void - should_fail_if_geodetic_predicate_used_against_cartesian_property_with_search_index() { - - // for graph_binary cartesian properties are not supported, thus it does not fail - if (isGraphBinary()) { - assertThatCode( - () -> { - GraphTraversal traversal = - graphTraversalSource() - .V() - .has( - "user", - "pointPropWithBounds_search", - Geo.inside( - Point.fromCoordinates(30, 30), - 12.908258700131379, - Geo.Unit.DEGREES)) - .values("full_name"); - traversal.toList(); - }) - .doesNotThrowAnyException(); - } else { - try { - GraphTraversal traversal = - graphTraversalSource() - .V() - .has( - "user", - "pointPropWithBounds_search", - Geo.inside(Point.fromCoordinates(30, 30), 12.908258700131379, Geo.Unit.DEGREES)) - .values("full_name"); - traversal.toList(); - fail("Should have failed executing the traversal because the property type is incorrect"); - } catch (InvalidQueryException e) { - assertThat(e.getMessage()) - .contains("Distance units cannot be used in queries against non-geodetic points."); - } - } - } - - @Test - public void - should_fail_if_cartesian_predicate_used_against_geodetic_property_with_search_index() { - - if (isGraphBinary()) { - try { - GraphTraversal traversal = - graphTraversalSource() - .V() - .has( - "user", - "pointPropWithGeoBounds_search", - Geo.inside(Point.fromCoordinates(30, 30), 14.142135623730951)) - .values("full_name"); - traversal.toList(); - fail("Should have failed executing the traversal because the property type is incorrect"); - } catch (InvalidQueryException e) { - assertThat(e.getMessage()) - .contains("Predicate 'insideCartesian' is not supported on property"); - } - } else { - try { - GraphTraversal traversal = - graphTraversalSource() - .V() - .has( - "user", - "pointPropWithGeoBounds_search", - Geo.inside(Point.fromCoordinates(30, 30), 14.142135623730951)) - .values("full_name"); - traversal.toList(); - fail("Should have failed executing the traversal because the property type is incorrect"); - } catch (InvalidQueryException e) { - assertThat(e.getMessage()) - .contains("Distance units are required for queries against geodetic points."); - } - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/GraphPagingIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/GraphPagingIT.java deleted file mode 100644 index 01938c34e07..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/GraphPagingIT.java +++ /dev/null @@ -1,510 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import static com.datastax.dse.driver.api.core.cql.continuous.ContinuousPagingITBase.Options; -import static org.assertj.core.api.Assertions.fail; -import static org.assertj.core.api.AssertionsForInterfaceTypes.assertThat; - -import com.codahale.metrics.Timer; -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.dse.driver.api.core.cql.continuous.ContinuousPagingITBase; -import com.datastax.dse.driver.api.core.metrics.DseNodeMetric; -import com.datastax.dse.driver.api.core.metrics.DseSessionMetric; -import com.datastax.dse.driver.internal.core.graph.MultiPageGraphResultSet; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.DriverTimeoutException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.Metrics; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.internal.core.util.CountingIterator; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.net.SocketAddress; -import java.time.Duration; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.ExecutionException; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; -import org.junit.runner.RunWith; - -@BackendRequirement( - type = BackendType.DSE, - minInclusive = "6.8.0", - description = "Graph paging requires DSE 6.8+") -@RunWith(DataProviderRunner.class) -public class GraphPagingIT { - - private static final CustomCcmRule CCM_RULE = GraphTestSupport.GRAPH_CCM_RULE_BUILDER.build(); - - private static final SessionRule SESSION_RULE = - GraphTestSupport.getCoreGraphSessionBuilder(CCM_RULE) - .withConfigLoader( - SessionUtils.configLoaderBuilder() - .withStringList( - DefaultDriverOption.METRICS_SESSION_ENABLED, - Collections.singletonList(DseSessionMetric.GRAPH_REQUESTS.getPath())) - .withStringList( - DefaultDriverOption.METRICS_NODE_ENABLED, - Collections.singletonList(DseNodeMetric.GRAPH_MESSAGES.getPath())) - .build()) - .build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - @BeforeClass - public static void setupSchema() { - SESSION_RULE - .session() - .execute( - ScriptGraphStatement.newInstance( - "schema.vertexLabel('person')" - + ".partitionBy('pk', Int)" - + ".clusterBy('cc', Int)" - + ".property('name', Text)" - + ".create();") - .setGraphName(SESSION_RULE.getGraphName())); - for (int i = 1; i <= 100; i++) { - SESSION_RULE - .session() - .execute( - ScriptGraphStatement.newInstance( - String.format( - "g.addV('person').property('pk',0).property('cc',%d).property('name', '%s');", - i, "user" + i)) - .setGraphName(SESSION_RULE.getGraphName())); - } - } - - @UseDataProvider(location = ContinuousPagingITBase.class, value = "pagingOptions") - @Test - public void synchronous_paging_with_options(Options options) { - // given - DriverExecutionProfile profile = enableGraphPaging(options, PagingEnabledOptions.ENABLED); - - if (options.sizeInBytes) { - // Page sizes in bytes are not supported with graph queries - return; - } - - // when - GraphResultSet result = - SESSION_RULE - .session() - .execute( - ScriptGraphStatement.newInstance("g.V().hasLabel('person').values('name')") - .setGraphName(SESSION_RULE.getGraphName()) - .setTraversalSource("g") - .setExecutionProfile(profile)); - - // then - List nodes = result.all(); - - assertThat(((CountingIterator) result.iterator()).remaining()).isZero(); - assertThat(nodes).hasSize(options.expectedRows); - for (int i = 1; i <= nodes.size(); i++) { - GraphNode node = nodes.get(i - 1); - assertThat(node.asString()).isEqualTo("user" + i); - } - assertThat(result.getRequestExecutionInfo()).isNotNull(); - assertThat(result.getRequestExecutionInfo().getCoordinator().getEndPoint().resolve()) - .isEqualTo(firstCcmNode()); - assertIfMultiPage(result, options.expectedPages); - validateMetrics(SESSION_RULE.session()); - } - - @UseDataProvider(location = ContinuousPagingITBase.class, value = "pagingOptions") - @Test - public void synchronous_paging_with_options_when_auto(Options options) { - // given - DriverExecutionProfile profile = enableGraphPaging(options, PagingEnabledOptions.AUTO); - - if (options.sizeInBytes) { - // Page sizes in bytes are not supported with graph queries - return; - } - - // when - GraphResultSet result = - SESSION_RULE - .session() - .execute( - ScriptGraphStatement.newInstance("g.V().hasLabel('person').values('name')") - .setGraphName(SESSION_RULE.getGraphName()) - .setTraversalSource("g") - .setExecutionProfile(profile)); - - // then - List nodes = result.all(); - - assertThat(((CountingIterator) result.iterator()).remaining()).isZero(); - assertThat(nodes).hasSize(options.expectedRows); - for (int i = 1; i <= nodes.size(); i++) { - GraphNode node = nodes.get(i - 1); - assertThat(node.asString()).isEqualTo("user" + i); - } - assertThat(result.getRequestExecutionInfo()).isNotNull(); - assertThat(result.getRequestExecutionInfo().getCoordinator().getEndPoint().resolve()) - .isEqualTo(firstCcmNode()); - - assertIfMultiPage(result, options.expectedPages); - validateMetrics(SESSION_RULE.session()); - } - - private void assertIfMultiPage(GraphResultSet result, int expectedPages) { - if (result instanceof MultiPageGraphResultSet) { - assertThat(((MultiPageGraphResultSet) result).getRequestExecutionInfos()) - .hasSize(expectedPages); - assertThat(result.getRequestExecutionInfo()) - .isSameAs( - ((MultiPageGraphResultSet) result).getRequestExecutionInfos().get(expectedPages - 1)); - } - } - - @UseDataProvider(location = ContinuousPagingITBase.class, value = "pagingOptions") - @Test - public void synchronous_options_with_paging_disabled_should_fallback_to_single_page( - Options options) { - // given - DriverExecutionProfile profile = enableGraphPaging(options, PagingEnabledOptions.DISABLED); - - if (options.sizeInBytes) { - // Page sizes in bytes are not supported with graph queries - return; - } - - // when - GraphResultSet result = - SESSION_RULE - .session() - .execute( - ScriptGraphStatement.newInstance("g.V().hasLabel('person').values('name')") - .setGraphName(SESSION_RULE.getGraphName()) - .setTraversalSource("g") - .setExecutionProfile(profile)); - - // then - List nodes = result.all(); - - assertThat(((CountingIterator) result.iterator()).remaining()).isZero(); - assertThat(nodes).hasSize(100); - for (int i = 1; i <= nodes.size(); i++) { - GraphNode node = nodes.get(i - 1); - assertThat(node.asString()).isEqualTo("user" + i); - } - assertThat(result.getRequestExecutionInfo()).isNotNull(); - assertThat(result.getRequestExecutionInfo().getCoordinator().getEndPoint().resolve()) - .isEqualTo(firstCcmNode()); - validateMetrics(SESSION_RULE.session()); - } - - @UseDataProvider(location = ContinuousPagingITBase.class, value = "pagingOptions") - @Test - public void asynchronous_paging_with_options(Options options) - throws ExecutionException, InterruptedException { - // given - DriverExecutionProfile profile = enableGraphPaging(options, PagingEnabledOptions.ENABLED); - - if (options.sizeInBytes) { - // Page sizes in bytes are not supported with graph queries - return; - } - - // when - CompletionStage result = - SESSION_RULE - .session() - .executeAsync( - ScriptGraphStatement.newInstance("g.V().hasLabel('person').values('name')") - .setGraphName(SESSION_RULE.getGraphName()) - .setTraversalSource("g") - .setExecutionProfile(profile)); - - // then - checkAsyncResult(result, options, 0, 1, new ArrayList<>()); - validateMetrics(SESSION_RULE.session()); - } - - @UseDataProvider(location = ContinuousPagingITBase.class, value = "pagingOptions") - @Test - public void asynchronous_paging_with_options_when_auto(Options options) - throws ExecutionException, InterruptedException { - // given - DriverExecutionProfile profile = enableGraphPaging(options, PagingEnabledOptions.AUTO); - - if (options.sizeInBytes) { - // Page sizes in bytes are not supported with graph queries - return; - } - - // when - CompletionStage result = - SESSION_RULE - .session() - .executeAsync( - ScriptGraphStatement.newInstance("g.V().hasLabel('person').values('name')") - .setGraphName(SESSION_RULE.getGraphName()) - .setTraversalSource("g") - .setExecutionProfile(profile)); - - // then - checkAsyncResult(result, options, 0, 1, new ArrayList<>()); - validateMetrics(SESSION_RULE.session()); - } - - @UseDataProvider(location = ContinuousPagingITBase.class, value = "pagingOptions") - @Test - public void asynchronous_options_with_paging_disabled_should_fallback_to_single_page( - Options options) throws ExecutionException, InterruptedException { - // given - DriverExecutionProfile profile = enableGraphPaging(options, PagingEnabledOptions.DISABLED); - - if (options.sizeInBytes) { - // Page sizes in bytes are not supported with graph queries - return; - } - - // when - CompletionStage result = - SESSION_RULE - .session() - .executeAsync( - ScriptGraphStatement.newInstance("g.V().hasLabel('person').values('name')") - .setGraphName(SESSION_RULE.getGraphName()) - .setTraversalSource("g") - .setExecutionProfile(profile)); - - // then - AsyncGraphResultSet asyncGraphResultSet = result.toCompletableFuture().get(); - for (int i = 1; i <= 100; i++, asyncGraphResultSet.remaining()) { - GraphNode node = asyncGraphResultSet.one(); - assertThat(node.asString()).isEqualTo("user" + i); - } - assertThat(asyncGraphResultSet.remaining()).isEqualTo(0); - validateMetrics(SESSION_RULE.session()); - } - - private void checkAsyncResult( - CompletionStage future, - Options options, - int rowsFetched, - int pageNumber, - List graphExecutionInfos) - throws ExecutionException, InterruptedException { - AsyncGraphResultSet result = future.toCompletableFuture().get(); - int remaining = result.remaining(); - rowsFetched += remaining; - assertThat(remaining).isLessThanOrEqualTo(options.pageSize); - - if (options.expectedRows == rowsFetched) { - assertThat(result.hasMorePages()).isFalse(); - } else { - assertThat(result.hasMorePages()).isTrue(); - } - - int first = (pageNumber - 1) * options.pageSize + 1; - int last = (pageNumber - 1) * options.pageSize + remaining; - - for (int i = first; i <= last; i++, remaining--) { - GraphNode node = result.one(); - assertThat(node.asString()).isEqualTo("user" + i); - assertThat(result.remaining()).isEqualTo(remaining - 1); - } - - assertThat(result.remaining()).isZero(); - assertThat(result.getRequestExecutionInfo()).isNotNull(); - assertThat(result.getRequestExecutionInfo().getCoordinator().getEndPoint().resolve()) - .isEqualTo(firstCcmNode()); - - graphExecutionInfos.add(result.getRequestExecutionInfo()); - - assertThat(graphExecutionInfos).hasSize(pageNumber); - assertThat(result.getRequestExecutionInfo()).isSameAs(graphExecutionInfos.get(pageNumber - 1)); - if (pageNumber == options.expectedPages) { - assertThat(result.hasMorePages()).isFalse(); - assertThat(options.expectedRows).isEqualTo(rowsFetched); - assertThat(options.expectedPages).isEqualTo(pageNumber); - } else { - assertThat(result.hasMorePages()).isTrue(); - checkAsyncResult( - result.fetchNextPage(), options, rowsFetched, pageNumber + 1, graphExecutionInfos); - } - } - - @Test - public void should_cancel_result_set() { - // given - DriverExecutionProfile profile = - enableGraphPaging() - .withInt(DseDriverOption.GRAPH_CONTINUOUS_PAGING_MAX_ENQUEUED_PAGES, 1) - .withInt(DseDriverOption.GRAPH_CONTINUOUS_PAGING_PAGE_SIZE, 10); - - // when - GraphStatement statement = - ScriptGraphStatement.newInstance("g.V().hasLabel('person').values('name')") - .setGraphName(SESSION_RULE.getGraphName()) - .setTraversalSource("g") - .setExecutionProfile(profile); - MultiPageGraphResultSet results = - (MultiPageGraphResultSet) SESSION_RULE.session().execute(statement); - - assertThat(((MultiPageGraphResultSet.RowIterator) results.iterator()).isCancelled()).isFalse(); - assertThat(((CountingIterator) results.iterator()).remaining()).isEqualTo(10); - results.cancel(); - - assertThat(((MultiPageGraphResultSet.RowIterator) results.iterator()).isCancelled()).isTrue(); - assertThat(((CountingIterator) results.iterator()).remaining()).isEqualTo(10); - for (int i = 0; i < 10; i++) { - results.one(); - } - } - - @Test - public void should_trigger_global_timeout_sync_from_config() { - // given - Duration timeout = Duration.ofMillis(100); - DriverExecutionProfile profile = - enableGraphPaging().withDuration(DseDriverOption.GRAPH_TIMEOUT, timeout); - - // when - try { - CCM_RULE.getCcmBridge().pause(1); - try { - SESSION_RULE - .session() - .execute( - ScriptGraphStatement.newInstance("g.V().hasLabel('person').values('name')") - .setGraphName(SESSION_RULE.getGraphName()) - .setTraversalSource("g") - .setExecutionProfile(profile)); - fail("Expecting DriverTimeoutException"); - } catch (DriverTimeoutException e) { - assertThat(e).hasMessage("Query timed out after " + timeout); - } - } finally { - CCM_RULE.getCcmBridge().resume(1); - } - } - - @Test - public void should_trigger_global_timeout_sync_from_statement() { - // given - Duration timeout = Duration.ofMillis(100); - - // when - try { - CCM_RULE.getCcmBridge().pause(1); - try { - SESSION_RULE - .session() - .execute( - ScriptGraphStatement.newInstance("g.V().hasLabel('person').values('name')") - .setGraphName(SESSION_RULE.getGraphName()) - .setTraversalSource("g") - .setTimeout(timeout)); - fail("Expecting DriverTimeoutException"); - } catch (DriverTimeoutException e) { - assertThat(e).hasMessage("Query timed out after " + timeout); - } - } finally { - CCM_RULE.getCcmBridge().resume(1); - } - } - - @Test - public void should_trigger_global_timeout_async() throws InterruptedException { - // given - Duration timeout = Duration.ofMillis(100); - DriverExecutionProfile profile = - enableGraphPaging().withDuration(DseDriverOption.GRAPH_TIMEOUT, timeout); - - // when - try { - CCM_RULE.getCcmBridge().pause(1); - CompletionStage result = - SESSION_RULE - .session() - .executeAsync( - ScriptGraphStatement.newInstance("g.V().hasLabel('person').values('name')") - .setGraphName(SESSION_RULE.getGraphName()) - .setTraversalSource("g") - .setExecutionProfile(profile)); - result.toCompletableFuture().get(); - fail("Expecting DriverTimeoutException"); - } catch (ExecutionException e) { - assertThat(e.getCause()).hasMessage("Query timed out after " + timeout); - } finally { - CCM_RULE.getCcmBridge().resume(1); - } - } - - private DriverExecutionProfile enableGraphPaging() { - return SESSION_RULE - .session() - .getContext() - .getConfig() - .getDefaultProfile() - .withString(DseDriverOption.GRAPH_PAGING_ENABLED, PagingEnabledOptions.ENABLED.name()); - } - - private DriverExecutionProfile enableGraphPaging( - Options options, PagingEnabledOptions pagingEnabledOptions) { - return SESSION_RULE - .session() - .getContext() - .getConfig() - .getDefaultProfile() - .withInt(DseDriverOption.GRAPH_CONTINUOUS_PAGING_PAGE_SIZE, options.pageSize) - .withInt(DseDriverOption.GRAPH_CONTINUOUS_PAGING_MAX_PAGES, options.maxPages) - .withInt( - DseDriverOption.GRAPH_CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND, options.maxPagesPerSecond) - .withString(DseDriverOption.GRAPH_PAGING_ENABLED, pagingEnabledOptions.name()); - } - - private SocketAddress firstCcmNode() { - return CCM_RULE.getContactPoints().iterator().next().resolve(); - } - - private void validateMetrics(CqlSession session) { - Node node = session.getMetadata().getNodes().values().iterator().next(); - assertThat(session.getMetrics()).isPresent(); - Metrics metrics = session.getMetrics().get(); - assertThat(metrics.getNodeMetric(node, DseNodeMetric.GRAPH_MESSAGES)).isPresent(); - Timer messages = (Timer) metrics.getNodeMetric(node, DseNodeMetric.GRAPH_MESSAGES).get(); - assertThat(messages.getCount()).isGreaterThan(0); - assertThat(messages.getMeanRate()).isGreaterThan(0); - assertThat(metrics.getSessionMetric(DseSessionMetric.GRAPH_REQUESTS)).isPresent(); - Timer requests = (Timer) metrics.getSessionMetric(DseSessionMetric.GRAPH_REQUESTS).get(); - assertThat(requests.getCount()).isGreaterThan(0); - assertThat(requests.getMeanRate()).isGreaterThan(0); - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/GraphSpeculativeExecutionIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/GraphSpeculativeExecutionIT.java deleted file mode 100644 index 130e9a17cc1..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/GraphSpeculativeExecutionIT.java +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.internal.core.specex.ConstantSpeculativeExecutionPolicy; -import com.datastax.oss.driver.internal.core.specex.NoSpeculativeExecutionPolicy; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.time.Duration; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.runner.RunWith; - -@BackendRequirement( - type = BackendType.DSE, - minInclusive = "6.8.0", - description = "DSE 6.8 required for graph paging") -@RunWith(DataProviderRunner.class) -public class GraphSpeculativeExecutionIT { - - @ClassRule - public static CustomCcmRule ccmRule = CustomCcmRule.builder().withDseWorkloads("graph").build(); - - @Test - @UseDataProvider("idempotenceAndSpecExecs") - public void should_use_speculative_executions_when_enabled( - boolean defaultIdempotence, - Boolean statementIdempotence, - Class speculativeExecutionClass, - boolean expectSpeculativeExecutions) { - - try (CqlSession session = - CqlSession.builder() - .addContactEndPoints(ccmRule.getContactPoints()) - .withConfigLoader( - SessionUtils.configLoaderBuilder() - .withBoolean( - DefaultDriverOption.REQUEST_DEFAULT_IDEMPOTENCE, defaultIdempotence) - .withInt(DefaultDriverOption.SPECULATIVE_EXECUTION_MAX, 10) - .withClass( - DefaultDriverOption.SPECULATIVE_EXECUTION_POLICY_CLASS, - speculativeExecutionClass) - .withDuration( - DefaultDriverOption.SPECULATIVE_EXECUTION_DELAY, Duration.ofMillis(10)) - .withString(DseDriverOption.GRAPH_PAGING_ENABLED, "ENABLED") - .build()) - .build()) { - - ScriptGraphStatement statement = - ScriptGraphStatement.newInstance( - "java.util.concurrent.TimeUnit.MILLISECONDS.sleep(1000L);") - .setIdempotent(statementIdempotence); - - GraphResultSet result = session.execute(statement); - int speculativeExecutionCount = - result.getRequestExecutionInfo().getSpeculativeExecutionCount(); - if (expectSpeculativeExecutions) { - assertThat(speculativeExecutionCount).isGreaterThan(0); - } else { - assertThat(speculativeExecutionCount).isEqualTo(0); - } - } - } - - @DataProvider - public static Object[][] idempotenceAndSpecExecs() { - return new Object[][] { - new Object[] {false, false, NoSpeculativeExecutionPolicy.class, false}, - new Object[] {false, true, NoSpeculativeExecutionPolicy.class, false}, - new Object[] {false, null, NoSpeculativeExecutionPolicy.class, false}, - new Object[] {true, false, NoSpeculativeExecutionPolicy.class, false}, - new Object[] {true, true, NoSpeculativeExecutionPolicy.class, false}, - new Object[] {true, null, NoSpeculativeExecutionPolicy.class, false}, - new Object[] {false, false, ConstantSpeculativeExecutionPolicy.class, false}, - new Object[] {false, true, ConstantSpeculativeExecutionPolicy.class, true}, - new Object[] {false, null, ConstantSpeculativeExecutionPolicy.class, false}, - new Object[] {true, false, ConstantSpeculativeExecutionPolicy.class, false}, - new Object[] {true, true, ConstantSpeculativeExecutionPolicy.class, true}, - new Object[] {true, null, ConstantSpeculativeExecutionPolicy.class, true}, - }; - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/GraphTestSupport.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/GraphTestSupport.java deleted file mode 100644 index 6508be38175..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/GraphTestSupport.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import com.datastax.dse.driver.internal.core.graph.GraphProtocol; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.session.CqlSessionRuleBuilder; - -/** Utility for creating commonly used Rule builders for tests. */ -public class GraphTestSupport { - - /** CCM Rule builder for Graph Data Type tests. */ - public static final CustomCcmRule.Builder CCM_BUILDER_WITH_GRAPH = - CustomCcmRule.builder() - .withDseWorkloads("graph") - .withDseConfiguration("graph.max_query_params", 32) - .withDseConfiguration( - "graph.gremlin_server.scriptEngines.gremlin-groovy.config.sandbox_enabled", "false"); - - /** CCM Rule builder for general Graph workload tests. */ - public static final CustomCcmRule.Builder GRAPH_CCM_RULE_BUILDER = - CustomCcmRule.builder().withDseWorkloads("graph"); - - /** - * Creates a session rule builder for Classic Graph workloads with the default Graph protocol. The - * default GraphProtocol for Classic Graph: GraphSON 2.0. - * - * @param ccmRule CustomCcmRule configured for Graph workloads - * @return A Session rule builder configured for Classic Graph workloads - */ - public static CqlSessionRuleBuilder getClassicGraphSessionBuilder(CustomCcmRule ccmRule) { - return new CqlSessionRuleBuilder(ccmRule) - .withCreateGraph() - .withGraphProtocol(GraphProtocol.GRAPHSON_2_0.toInternalCode()); - } - - /** - * Creates a session rule builder for Core Graph workloads with the default Graph protocol. The - * default GraphProtocol for Core Graph: Graph Binary 1.0. - * - * @param ccmRule CustomCcmRule configured for Graph workloads - * @return A Session rule builder configured for Core Graph workloads - */ - public static CqlSessionRuleBuilder getCoreGraphSessionBuilder(CustomCcmRule ccmRule) { - return new CqlSessionRuleBuilder(ccmRule) - .withCreateGraph() - .withCoreEngine() - .withGraphProtocol(GraphProtocol.GRAPH_BINARY_1_0.toInternalCode()); - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/GraphTextSearchIndexITBase.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/GraphTextSearchIndexITBase.java deleted file mode 100644 index d70d206715e..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/GraphTextSearchIndexITBase.java +++ /dev/null @@ -1,229 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.dse.driver.api.core.graph.predicates.Search; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; -import org.apache.tinkerpop.gremlin.structure.Vertex; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public abstract class GraphTextSearchIndexITBase { - - protected abstract boolean isGraphBinary(); - - protected abstract GraphTraversalSource graphTraversalSource(); - - @DataProvider - public static Object[][] indexTypes() { - return new Object[][] {{"search"} - - // FIXME for some reason, materialized and secondary indices have decided not to work - // I get an exception saying "there is no index for this query, here is the defined - // indices: " and the list contains the indices that are needed. Mysterious. - // There may be something to do with differences in the CCMBridge adapter of the new - // driver, some changes make materialized views and secondary indices to be not - // considered for graph: - // - // , {"materialized"} - // , {"secondary"} - }; - } - - /** - * Validates that a graph traversal can be made by using a Search prefix predicate on an indexed - * property of the given type. - * - *

    Finds all 'user' vertices having a 'full_name' property beginning with 'Paul'. - * - * @test_category dse:graph - */ - @UseDataProvider("indexTypes") - @Test - public void search_by_prefix_search(String indexType) { - // Only one user with full_name starting with Paul. - GraphTraversal traversal = - graphTraversalSource() - .V() - .has("user", "full_name_" + indexType, Search.prefix("Paul")) - .values("full_name_" + indexType); - assertThat(traversal.toList()).containsOnly("Paul Thomas Joe"); - } - - /** - * Validates that a graph traversal can be made by using a Search regex predicate on an indexed - * property of the given type. - * - *

    Finds all 'user' vertices having a 'full_name' property matching regex '.*Paul.*'. - * - * @test_category dse:graph - */ - @UseDataProvider("indexTypes") - @Test - public void search_by_regex(String indexType) { - // Only two people with names containing pattern for Paul. - GraphTraversal traversal = - graphTraversalSource() - .V() - .has("user", "full_name_" + indexType, Search.regex(".*Paul.*")) - .values("full_name_" + indexType); - assertThat(traversal.toList()).containsOnly("Paul Thomas Joe", "James Paul Joe"); - } - - /** - * Validates that a graph traversal can be made by using a Search fuzzy predicate on an indexed - * property of the given type. - * - *

    Finds all 'user' vertices having a 'alias' property matching 'awrio' with a fuzzy distance - * of 1. - * - * @test_category dse:graph - */ - @UseDataProvider("indexTypes") - @Test - @BackendRequirement(type = BackendType.DSE, minInclusive = "5.1.0") - public void search_by_fuzzy(String indexType) { - // Alias matches 'awrio' fuzzy - GraphTraversal traversal = - graphTraversalSource() - .V() - .has("user", "alias_" + indexType, Search.fuzzy("awrio", 1)) - .values("full_name_" + indexType); - // Should not match 'Paul Thomas Joe' since alias is 'mario', which is at distance 2 of 'awrio' - // (a -> m, w -> a) - // Should match 'George Bill Steve' since alias is 'wario' witch matches 'awrio' within a - // distance of 1 (transpose w with a). - assertThat(traversal.toList()).containsOnly("George Bill Steve"); - } - - /** - * Validates that a graph traversal can be made by using a Search token predicate on an indexed - * property of the given type. - * - *

    Finds all 'user' vertices having a 'description' property containing the token 'cold'. - * - * @test_category dse:graph - */ - @UseDataProvider("indexTypes") - @Test - public void search_by_token(String indexType) { - // Description containing token 'cold' - GraphTraversal traversal = - graphTraversalSource() - .V() - .has("user", "description_" + indexType, Search.token("cold")) - .values("full_name_" + indexType); - assertThat(traversal.toList()).containsOnly("Jill Alice", "George Bill Steve"); - } - - /** - * Validates that a graph traversal can be made by using a Search token prefix predicate on an - * indexed property of the given type. - * - *

    Finds all 'user' vertices having a 'description' containing the token prefix 'h'. - */ - @UseDataProvider("indexTypes") - @Test - public void search_by_token_prefix(String indexType) { - // Description containing a token starting with h - GraphTraversal traversal = - graphTraversalSource() - .V() - .has("user", "description_" + indexType, Search.tokenPrefix("h")) - .values("full_name_" + indexType); - assertThat(traversal.toList()).containsOnly("Paul Thomas Joe", "James Paul Joe"); - } - - /** - * Validates that a graph traversal can be made by using a Search token regex predicate on an - * indexed property of the given type. - * - *

    Finds all 'user' vertices having a 'description' containing the token regex - * '(nice|hospital)'. - */ - @UseDataProvider("indexTypes") - @Test - public void search_by_token_regex(String indexType) { - // Description containing nice or hospital - GraphTraversal traversal = - graphTraversalSource() - .V() - .has("user", "description_" + indexType, Search.tokenRegex("(nice|hospital)")) - .values("full_name_" + indexType); - assertThat(traversal.toList()).containsOnly("Paul Thomas Joe", "Jill Alice"); - } - - /** - * Validates that a graph traversal can be made by using a Search fuzzy predicate on an indexed - * property of the given type. - * - *

    Finds all 'user' vertices having a 'description' property matching 'lieks' with a fuzzy - * distance of 1. - * - * @test_category dse:graph - */ - @UseDataProvider("indexTypes") - @Test - @BackendRequirement(type = BackendType.DSE, minInclusive = "5.1.0") - public void search_by_token_fuzzy(String indexType) { - // Description containing 'lives' fuzzy - GraphTraversal traversal = - graphTraversalSource() - .V() - .has("user", "description_" + indexType, Search.tokenFuzzy("lieks", 1)) - .values("full_name_" + indexType); - // Should not match 'Paul Thomas Joe' since description contains 'Lives' which is at distance of - // 2 (e -> v, k -> e) - // Should match 'James Paul Joe' since description contains 'Likes' (transpose e for k) - assertThat(traversal.toList()).containsOnly("James Paul Joe"); - } - - /** - * Validates that a graph traversal can be made by using a Search phrase predicate on an indexed - * property of the given type. - * - *

    Finds all 'user' vertices having a 'description' property matching 'a cold' with a distance - * of 2. - * - * @test_category dse:graph - */ - @UseDataProvider("indexTypes") - @Test - @BackendRequirement(type = BackendType.DSE, minInclusive = "5.1.0") - public void search_by_phrase(String indexType) { - // Full name contains phrase "Paul Joe" - GraphTraversal traversal = - graphTraversalSource() - .V() - .has("user", "description_" + indexType, Search.phrase("a cold", 2)) - .values("full_name_" + indexType); - // Should match 'George Bill Steve' since 'A cold dude' is at distance of 0 for 'a cold'. - // Should match 'Jill Alice' since 'Enjoys a very nice cold coca cola' is at distance of 2 for - // 'a cold'. - assertThat(traversal.toList()).containsOnly("George Bill Steve", "Jill Alice"); - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/GraphTimeoutsIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/GraphTimeoutsIT.java deleted file mode 100644 index d2b58cc0f9c..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/GraphTimeoutsIT.java +++ /dev/null @@ -1,184 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import static com.datastax.dse.driver.api.core.graph.ScriptGraphStatement.newInstance; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; - -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.DriverTimeoutException; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.servererrors.InvalidQueryException; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import java.time.Duration; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@BackendRequirement( - type = BackendType.DSE, - minInclusive = "5.0.0", - description = "DSE 5 required for Graph") -public class GraphTimeoutsIT { - - public static CustomCcmRule ccmRule = CustomCcmRule.builder().withDseWorkloads("graph").build(); - - public static SessionRule sessionRule = - SessionRule.builder(ccmRule).withCreateGraph().build(); - - @ClassRule public static TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); - - @Test - public void should_have_driver_wait_indefinitely_by_default_and_server_return_timeout_response() { - Duration serverTimeout = Duration.ofSeconds(1); - - DriverExecutionProfile drivertest1 = - sessionRule - .session() - .getContext() - .getConfig() - .getDefaultProfile() - .withString(DseDriverOption.GRAPH_TRAVERSAL_SOURCE, "drivertest1"); - - // We could have done with the server's default but it's 30 secs so the test would have taken at - // least that time. So we simulate a server timeout change. - sessionRule - .session() - .execute( - newInstance( - "graph.schema().config().option(\"graph.traversal_sources.drivertest1.evaluation_timeout\").set('" - + serverTimeout.toMillis() - + " ms')") - .setExecutionProfile(drivertest1)); - - try { - // The driver should wait indefinitely, but the server should timeout first. - sessionRule - .session() - .execute( - newInstance("java.util.concurrent.TimeUnit.MILLISECONDS.sleep(35000L);1+1") - .setExecutionProfile(drivertest1)); - fail("The request should have timed out"); - } catch (InvalidQueryException e) { - assertThat(e) - .hasMessageContainingAll( - "evaluation exceeded", - "threshold of ", - Long.toString(serverTimeout.toMillis()), - "ms"); - } - } - - @Test - public void should_not_take_into_account_request_timeout_if_more_than_server_timeout() { - Duration serverTimeout = Duration.ofSeconds(1); - Duration clientTimeout = Duration.ofSeconds(10); - - DriverExecutionProfile drivertest2 = - sessionRule - .session() - .getContext() - .getConfig() - .getDefaultProfile() - .withString(DseDriverOption.GRAPH_TRAVERSAL_SOURCE, "drivertest2") - .withDuration(DseDriverOption.GRAPH_TIMEOUT, clientTimeout); - - sessionRule - .session() - .execute( - newInstance( - "graph.schema().config().option(\"graph.traversal_sources.drivertest2.evaluation_timeout\").set('" - + serverTimeout.toMillis() - + " ms')") - .setExecutionProfile(drivertest2)); - - try { - // The driver should wait 32 secs, but the server should timeout first. - sessionRule - .session() - .execute( - newInstance("java.util.concurrent.TimeUnit.MILLISECONDS.sleep(35000L);1+1") - .setExecutionProfile(drivertest2)); - fail("The request should have timed out"); - } catch (InvalidQueryException e) { - assertThat(e) - .hasMessageContainingAll( - "evaluation exceeded", - "threshold of ", - Long.toString(serverTimeout.toMillis()), - "ms"); - } - } - - @Test - public void should_take_into_account_request_timeout_if_less_than_server_timeout() { - Duration serverTimeout = Duration.ofSeconds(10); - Duration clientTimeout = Duration.ofSeconds(1); - - DriverExecutionProfile drivertest3 = - sessionRule - .session() - .getContext() - .getConfig() - .getDefaultProfile() - .withString(DseDriverOption.GRAPH_TRAVERSAL_SOURCE, "drivertest3"); - - // We could have done with the server's default but it's 30 secs so the test would have taken at - // least that time. Also, we don't want to rely on server's default. So we simulate a server - // timeout change. - sessionRule - .session() - .execute( - ScriptGraphStatement.newInstance( - "graph.schema().config().option(\"graph.traversal_sources.drivertest3.evaluation_timeout\").set('" - + serverTimeout.toMillis() - + " ms')") - .setExecutionProfile(drivertest3)); - - try { - // The timeout on the request is lower than what's defined server side, so it should be taken - // into account. - sessionRule - .session() - .execute( - ScriptGraphStatement.newInstance( - "java.util.concurrent.TimeUnit.MILLISECONDS.sleep(35000L);1+1") - .setExecutionProfile( - drivertest3.withDuration(DseDriverOption.GRAPH_TIMEOUT, clientTimeout))); - fail("The request should have timed out"); - // Since the driver sends its timeout in the request payload, server timeout will be equal to - // client timeout for this request. We cannot know for sure if it will be a client timeout - // error, or a server timeout, and during tests, both happened and not deterministically. - } catch (DriverTimeoutException e) { - assertThat(e).hasMessage("Query timed out after " + clientTimeout); - } catch (InvalidQueryException e) { - assertThat(e) - .hasMessageContainingAll( - "evaluation exceeded", - "threshold of ", - Long.toString(clientTimeout.toMillis()), - "ms"); - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/SampleGraphScripts.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/SampleGraphScripts.java deleted file mode 100644 index 19ff957736a..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/SampleGraphScripts.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -public class SampleGraphScripts { - - public static final String MAKE_STRICT = - "schema.config().option('graph.schema_mode').set('production');\n"; - - public static final String MAKE_NOT_STRICT = - "schema.config().option('graph.schema_mode').set('development');\n"; - - public static final String ALLOW_SCANS = - "schema.config().option('graph.allow_scan').set('true');\n"; - - private static final String CLASSIC_SCHEMA = - "schema.propertyKey('name').Text().ifNotExists().create();\n" - + "schema.propertyKey('age').Int().ifNotExists().create();\n" - + "schema.propertyKey('lang').Text().ifNotExists().create();\n" - + "schema.propertyKey('weight').Float().ifNotExists().create();\n" - + "schema.vertexLabel('person').properties('name', 'age').ifNotExists().create();\n" - + "schema.vertexLabel('software').properties('name', 'lang').ifNotExists().create();\n" - + "schema.edgeLabel('created').properties('weight').connection('person', 'software').ifNotExists().create();\n" - + "schema.edgeLabel('knows').properties('weight').connection('person', 'person').ifNotExists().create();\n"; - - private static final String INSERT_DATA = - "marko = g.addV('person').property('name', 'marko').property('age', 29).next();\n" - + "vadas = g.addV('person').property('name', 'vadas').property('age', 27).next();\n" - + "josh = g.addV('person').property('name', 'josh').property('age', 32).next();\n" - + "peter = g.addV('person').property('name', 'peter').property('age', 35).next();\n" - + "lop = g.addV('software').property('name', 'lop').property('lang', 'java').next();\n" - + "ripple = g.addV('software').property('name', 'ripple').property('lang', 'java').next();\n" - + "g.V().has('name', 'marko').as('marko').V().has('name', 'vadas').as('vadas').addE('knows').from('marko').property('weight', 0.5f).next();\n" - + "g.V().has('name', 'marko').as('marko').V().has('name', 'josh').as('josh').addE('knows').from('marko').property('weight', 1.0f).next();\n" - + "g.V().has('name', 'marko').as('marko').V().has('name', 'lop').as('lop').addE('created').from('marko').property('weight', 0.4f).next();\n" - + "g.V().has('name', 'josh').as('josh').V().has('name', 'ripple').as('ripple').addE('created').from('josh').property('weight', 1.0f).next();\n" - + "g.V().has('name', 'josh').as('josh').V().has('name', 'lop').as('lop').addE('created').from('josh').property('weight', 0.4f).next();\n" - + "g.V().has('name', 'peter').as('peter').V().has('name', 'lop').as('lop').addE('created').from('peter').property('weight', 0.2f);"; - - public static String CLASSIC_GRAPH = CLASSIC_SCHEMA + INSERT_DATA; - - private static final String CORE_SCHEMA = - "schema.vertexLabel('person').ifNotExists().partitionBy('name', Text).property('age', Int).create();\n" - + "schema.vertexLabel('software').ifNotExists().partitionBy('name', Text).property('lang', Text).create();\n" - + "schema.edgeLabel('created').ifNotExists().from('person').to('software').property('weight', Float).create();\n" - + "schema.edgeLabel('knows').ifNotExists().from('person').to('person').property('weight', Float).create();\n"; - - public static String CORE_GRAPH = CORE_SCHEMA + INSERT_DATA; -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/SocialTraversalDsl.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/SocialTraversalDsl.java deleted file mode 100644 index 327f32a240d..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/SocialTraversalDsl.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import org.apache.tinkerpop.gremlin.process.traversal.dsl.GremlinDsl; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; -import org.apache.tinkerpop.gremlin.structure.Vertex; - -@GremlinDsl(traversalSource = "com.datastax.dse.driver.api.core.graph.SocialTraversalSourceDsl") -public interface SocialTraversalDsl extends GraphTraversal.Admin { - public default GraphTraversal knows(String personName) { - return out("knows").hasLabel("person").has("name", personName).in(); - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/SocialTraversalSourceDsl.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/SocialTraversalSourceDsl.java deleted file mode 100644 index e61b94e2d09..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/SocialTraversalSourceDsl.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import org.apache.tinkerpop.gremlin.process.remote.RemoteConnection; -import org.apache.tinkerpop.gremlin.process.traversal.P; -import org.apache.tinkerpop.gremlin.process.traversal.TraversalStrategies; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.DefaultGraphTraversal; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; -import org.apache.tinkerpop.gremlin.process.traversal.step.map.GraphStep; -import org.apache.tinkerpop.gremlin.structure.Graph; -import org.apache.tinkerpop.gremlin.structure.Vertex; - -public class SocialTraversalSourceDsl extends GraphTraversalSource { - - public SocialTraversalSourceDsl( - final Graph graph, final TraversalStrategies traversalStrategies) { - super(graph, traversalStrategies); - } - - public SocialTraversalSourceDsl(final Graph graph) { - super(graph); - } - - public SocialTraversalSourceDsl(RemoteConnection connection) { - super(connection); - } - - public GraphTraversal persons(String... names) { - GraphTraversalSource clone = this.clone(); - - // Manually add a "start" step for the traversal in this case the equivalent of V(). GraphStep - // is marked - // as a "start" step by passing "true" in the constructor. - clone.getBytecode().addStep(GraphTraversal.Symbols.V); - GraphTraversal traversal = new DefaultGraphTraversal<>(clone); - traversal.asAdmin().addStep(new GraphStep(traversal.asAdmin(), Vertex.class, true)); - - traversal = traversal.hasLabel("person"); - if (names.length > 0) traversal = traversal.has("name", P.within(names)); - - return traversal; - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/TinkerEdgeAssert.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/TinkerEdgeAssert.java deleted file mode 100644 index e3cc8cb687b..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/TinkerEdgeAssert.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import com.datastax.oss.driver.assertions.Assertions; -import org.apache.tinkerpop.gremlin.structure.Edge; -import org.apache.tinkerpop.gremlin.structure.Vertex; - -public class TinkerEdgeAssert extends TinkerElementAssert { - - public TinkerEdgeAssert(Edge actual) { - super(actual, TinkerEdgeAssert.class); - } - - public TinkerEdgeAssert hasInVLabel(String label) { - Assertions.assertThat(actual.inVertex().label()).isEqualTo(label); - return myself; - } - - public TinkerEdgeAssert hasOutVLabel(String label) { - Assertions.assertThat(actual.outVertex().label()).isEqualTo(label); - return myself; - } - - public TinkerEdgeAssert hasOutV(Vertex vertex) { - Assertions.assertThat(actual.outVertex()).isEqualTo(vertex); - return myself; - } - - public TinkerEdgeAssert hasInV(Vertex vertex) { - Assertions.assertThat(actual.inVertex()).isEqualTo(vertex); - return myself; - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/TinkerElementAssert.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/TinkerElementAssert.java deleted file mode 100644 index f54cec3065a..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/TinkerElementAssert.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import static org.assertj.core.api.Assertions.assertThat; - -import org.apache.tinkerpop.gremlin.structure.Element; -import org.assertj.core.api.AbstractAssert; - -public abstract class TinkerElementAssert, A extends Element> - extends AbstractAssert { - - protected TinkerElementAssert(A actual, Class selfType) { - super(actual, selfType); - } - - public S hasId(Object id) { - assertThat(actual.id()).isEqualTo(id); - return myself; - } - - public S hasLabel(String label) { - assertThat(actual.label()).isEqualTo(label); - return myself; - } - - public S hasProperty(String propertyName) { - assertThat(actual.property(propertyName).isPresent()).isTrue(); - return myself; - } - - public S hasProperty(String propertyName, Object value) { - hasProperty(propertyName); - assertThat(actual.property(propertyName).value()).isEqualTo(value); - return myself; - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/TinkerGraphAssertions.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/TinkerGraphAssertions.java deleted file mode 100644 index 9c38a58db4c..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/TinkerGraphAssertions.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import org.apache.tinkerpop.gremlin.process.traversal.Path; -import org.apache.tinkerpop.gremlin.process.traversal.step.util.Tree; -import org.apache.tinkerpop.gremlin.structure.Edge; -import org.apache.tinkerpop.gremlin.structure.Vertex; -import org.apache.tinkerpop.gremlin.structure.VertexProperty; - -public class TinkerGraphAssertions extends com.datastax.oss.driver.assertions.Assertions { - - public static TinkerEdgeAssert assertThat(Edge edge) { - return new TinkerEdgeAssert(edge); - } - - public static TinkerVertexAssert assertThat(Vertex vertex) { - return new TinkerVertexAssert(vertex); - } - - public static TinkerVertexPropertyAssert assertThat(VertexProperty vertexProperty) { - return new TinkerVertexPropertyAssert(vertexProperty); - } - - public static TinkerPathAssert assertThat(Path path) { - return new TinkerPathAssert(path); - } - - public static TinkerTreeAssert assertThat(Tree tree) { - return new TinkerTreeAssert<>(tree); - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/TinkerPathAssert.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/TinkerPathAssert.java deleted file mode 100644 index 30f8f5fffa4..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/TinkerPathAssert.java +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import static org.assertj.core.api.Assertions.assertThat; - -import org.apache.tinkerpop.gremlin.process.traversal.Path; -import org.apache.tinkerpop.gremlin.structure.Edge; -import org.apache.tinkerpop.gremlin.structure.Vertex; -import org.assertj.core.api.AbstractAssert; -import org.assertj.core.api.AbstractObjectAssert; - -public class TinkerPathAssert extends AbstractAssert { - - public TinkerPathAssert(Path actual) { - super(actual, TinkerPathAssert.class); - } - - /** - * Ensures that the given Path matches one of the exact traversals we'd expect for a person whom - * Marko knows that has created software and what software that is. - * - *

    These paths should be: - * - *

      - *
    • marko -> knows -> josh -> created -> lop - *
    • marko -> knows -> josh -> created -> ripple - *
    - */ - public static void validatePathObjects(Path path) { - - // marko should be the origin point. - TinkerGraphAssertions.assertThat(path).vertexAt(0).hasLabel("person"); - - // there should be a 'knows' outgoing relationship between marko and josh. - TinkerGraphAssertions.assertThat(path) - .edgeAt(1) - .hasLabel("knows") - .hasOutVLabel("person") - .hasOutV((Vertex) path.objects().get(0)) - .hasInVLabel("person") - .hasInV((Vertex) path.objects().get(2)); - - // josh... - TinkerGraphAssertions.assertThat(path).vertexAt(2).hasLabel("person"); - - // there should be a 'created' relationship between josh and lop. - TinkerGraphAssertions.assertThat(path) - .edgeAt(3) - .hasLabel("created") - .hasOutVLabel("person") - .hasOutV((Vertex) path.objects().get(2)) - .hasInVLabel("software") - .hasInV((Vertex) path.objects().get(4)); - - // lop.. - TinkerGraphAssertions.assertThat(path).vertexAt(4).hasLabel("software"); - } - - public AbstractObjectAssert objectAt(int i) { - assertThat(actual.size()).isGreaterThanOrEqualTo(i); - return assertThat(actual.objects().get(i)); - } - - public TinkerVertexAssert vertexAt(int i) { - assertThat(actual.size()).isGreaterThanOrEqualTo(i); - Object o = actual.objects().get(i); - assertThat(o).isInstanceOf(Vertex.class); - return new TinkerVertexAssert((Vertex) o); - } - - public TinkerEdgeAssert edgeAt(int i) { - assertThat(actual.size()).isGreaterThanOrEqualTo(i); - Object o = actual.objects().get(i); - assertThat(o).isInstanceOf(Edge.class); - return new TinkerEdgeAssert((Edge) o); - } - - public TinkerPathAssert hasLabel(int i, String... labels) { - assertThat(actual.labels().size()).isGreaterThanOrEqualTo(i); - assertThat(actual.labels().get(i)).containsExactly(labels); - return myself; - } - - public TinkerPathAssert hasNoLabel(int i) { - assertThat(actual.labels().size()).isGreaterThanOrEqualTo(i); - assertThat(actual.labels().get(i)).isEmpty(); - return myself; - } - - public TinkerPathAssert doesNotHaveLabel(String label) { - assertThat(actual.hasLabel(label)).isFalse(); - return myself; - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/TinkerTreeAssert.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/TinkerTreeAssert.java deleted file mode 100644 index 6196e0a1021..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/TinkerTreeAssert.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import static org.assertj.core.api.Assertions.assertThat; - -import org.apache.tinkerpop.gremlin.process.traversal.step.util.Tree; -import org.assertj.core.api.MapAssert; - -public class TinkerTreeAssert extends MapAssert> { - - public TinkerTreeAssert(Tree actual) { - super(actual); - } - - public TinkerTreeAssert hasTree(T key) { - assertThat(actual).containsKey(key); - return this; - } - - public TinkerTreeAssert isLeaf() { - assertThat(actual).hasSize(0); - return this; - } - - public TinkerTreeAssert tree(T key) { - hasTree(key); - return new TinkerTreeAssert<>(actual.get(key)); - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/TinkerVertexAssert.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/TinkerVertexAssert.java deleted file mode 100644 index 3777fc8e96a..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/TinkerVertexAssert.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import static org.assertj.core.api.Assertions.assertThat; - -import org.apache.tinkerpop.gremlin.structure.Property; -import org.apache.tinkerpop.gremlin.structure.Vertex; - -public class TinkerVertexAssert extends TinkerElementAssert { - - public TinkerVertexAssert(Vertex actual) { - super(actual, TinkerVertexAssert.class); - } - - @Override - public TinkerVertexAssert hasProperty(String propertyName) { - assertThat(actual.properties(propertyName)).toIterable().isNotEmpty(); - return myself; - } - - @Override - public TinkerVertexAssert hasProperty(String propertyName, Object value) { - hasProperty(propertyName); - assertThat(actual.properties(propertyName)) - .toIterable() - .extracting(Property::value) - .contains(value); - return myself; - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/TinkerVertexPropertyAssert.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/TinkerVertexPropertyAssert.java deleted file mode 100644 index 40bb8b5e239..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/TinkerVertexPropertyAssert.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph; - -import static org.assertj.core.api.Assertions.assertThat; - -import org.apache.tinkerpop.gremlin.structure.Element; -import org.apache.tinkerpop.gremlin.structure.VertexProperty; - -public class TinkerVertexPropertyAssert - extends TinkerElementAssert, VertexProperty> { - - public TinkerVertexPropertyAssert(VertexProperty actual) { - super(actual, TinkerVertexPropertyAssert.class); - } - - public TinkerVertexPropertyAssert hasKey(String key) { - assertThat(actual.key()).isEqualTo(key); - return this; - } - - public TinkerVertexPropertyAssert hasParent(Element parent) { - assertThat(actual.element()).isEqualTo(parent); - return this; - } - - public TinkerVertexPropertyAssert hasValue(T value) { - assertThat(actual.value()).isEqualTo(value); - return this; - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/reactive/DefaultReactiveGraphResultSetIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/reactive/DefaultReactiveGraphResultSetIT.java deleted file mode 100644 index b4c8bb05df4..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/reactive/DefaultReactiveGraphResultSetIT.java +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph.reactive; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; -import com.datastax.dse.driver.internal.core.graph.GraphProtocol; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import io.reactivex.Flowable; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Set; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; -import org.junit.runner.RunWith; - -@BackendRequirement( - type = BackendType.DSE, - minInclusive = "6.8.0", - description = "Graph paging requires DSE 6.8+") -@RunWith(DataProviderRunner.class) -public class DefaultReactiveGraphResultSetIT { - - private static CustomCcmRule ccmRule = CustomCcmRule.builder().withDseWorkloads("graph").build(); - - private static SessionRule sessionRule = - SessionRule.builder(ccmRule) - .withCreateGraph() - .withCoreEngine() - .withGraphProtocol(GraphProtocol.GRAPH_BINARY_1_0.toInternalCode()) - .build(); - - @ClassRule public static TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); - - @BeforeClass - public static void setupSchema() { - sessionRule - .session() - .execute( - ScriptGraphStatement.newInstance( - "schema.vertexLabel('person')" - + ".partitionBy('pk', Int)" - + ".clusterBy('cc', Int)" - + ".property('name', Text)" - + ".create();") - .setGraphName(sessionRule.getGraphName())); - for (int i = 1; i <= 1000; i++) { - sessionRule - .session() - .execute( - ScriptGraphStatement.newInstance( - String.format( - "g.addV('person').property('pk',0).property('cc',%d).property('name', '%s');", - i, "user" + i)) - .setGraphName(sessionRule.getGraphName())); - } - } - - @Test - @DataProvider( - value = {"1", "10", "100", "999", "1000", "1001", "2000"}, - format = "%m [page size %p[0]]") - public void should_retrieve_all_rows(int pageSize) { - DriverExecutionProfile profile = - sessionRule - .session() - .getContext() - .getConfig() - .getDefaultProfile() - .withInt(DseDriverOption.GRAPH_CONTINUOUS_PAGING_PAGE_SIZE, pageSize); - ScriptGraphStatement statement = - ScriptGraphStatement.builder("g.V()").setExecutionProfile(profile).build(); - ReactiveGraphResultSet rs = sessionRule.session().executeReactive(statement); - List results = Flowable.fromPublisher(rs).toList().blockingGet(); - assertThat(results.size()).isEqualTo(1000); - Set expectedExecInfos = new LinkedHashSet<>(); - for (ReactiveGraphNode row : results) { - assertThat(row.getExecutionInfo()).isNotNull(); - assertThat(row.isVertex()).isTrue(); - expectedExecInfos.add(row.getExecutionInfo()); - } - List execInfos = - Flowable.fromPublisher(rs.getExecutionInfos()).toList().blockingGet(); - // DSE may send an empty page as it can't always know if it's done paging or not yet. - // See: CASSANDRA-8871. In this case, this page's execution info appears in - // rs.getExecutionInfos(), but is not present in expectedExecInfos since the page did not - // contain any rows. - assertThat(execInfos).containsAll(expectedExecInfos); - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/remote/ClassicGraphDataTypeRemoteIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/remote/ClassicGraphDataTypeRemoteIT.java deleted file mode 100644 index b57d7a952bc..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/remote/ClassicGraphDataTypeRemoteIT.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph.remote; - -import com.datastax.dse.driver.api.core.graph.ClassicGraphDataTypeITBase; -import com.datastax.dse.driver.api.core.graph.DseGraph; -import com.datastax.dse.driver.api.core.graph.GraphTestSupport; -import com.datastax.dse.driver.api.core.graph.SampleGraphScripts; -import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import org.apache.tinkerpop.gremlin.process.traversal.AnonymousTraversalSource; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; -import org.apache.tinkerpop.gremlin.structure.Vertex; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@BackendRequirement( - type = BackendType.DSE, - minInclusive = "5.0.3", - description = "DSE 5.0.3 required for remote TinkerPop support") -public class ClassicGraphDataTypeRemoteIT extends ClassicGraphDataTypeITBase { - - private static final CustomCcmRule CCM_RULE = GraphTestSupport.GRAPH_CCM_RULE_BUILDER.build(); - - private static final SessionRule SESSION_RULE = - GraphTestSupport.getClassicGraphSessionBuilder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - @BeforeClass - public static void setupSchema() { - SESSION_RULE - .session() - .execute(ScriptGraphStatement.newInstance(SampleGraphScripts.ALLOW_SCANS)); - SESSION_RULE - .session() - .execute(ScriptGraphStatement.newInstance(SampleGraphScripts.MAKE_STRICT)); - } - - @Override - public CqlSession session() { - return SESSION_RULE.session(); - } - - private final GraphTraversalSource g = - AnonymousTraversalSource.traversal() - .withRemote(DseGraph.remoteConnectionBuilder(SESSION_RULE.session()).build()); - - @Override - public Vertex insertVertexAndReturn(String vertexLabel, String propertyName, Object value) { - return g.addV(vertexLabel).property(propertyName, value).next(); - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/remote/ClassicGraphTraversalRemoteIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/remote/ClassicGraphTraversalRemoteIT.java deleted file mode 100644 index de85b6af267..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/remote/ClassicGraphTraversalRemoteIT.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph.remote; - -import static org.apache.tinkerpop.gremlin.process.traversal.AnonymousTraversalSource.traversal; - -import com.datastax.dse.driver.api.core.graph.DseGraph; -import com.datastax.dse.driver.api.core.graph.GraphTestSupport; -import com.datastax.dse.driver.api.core.graph.SampleGraphScripts; -import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; -import com.datastax.dse.driver.api.core.graph.SocialTraversalSource; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@BackendRequirement( - type = BackendType.DSE, - minInclusive = "5.0.9", - description = "DSE 5.0.9 required for inserting edges and vertices script.") -public class ClassicGraphTraversalRemoteIT extends GraphTraversalRemoteITBase { - - private static final CustomCcmRule CCM_RULE = GraphTestSupport.GRAPH_CCM_RULE_BUILDER.build(); - - private static final SessionRule SESSION_RULE = - GraphTestSupport.getClassicGraphSessionBuilder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - @BeforeClass - public static void setupSchema() { - SESSION_RULE - .session() - .execute(ScriptGraphStatement.newInstance(SampleGraphScripts.ALLOW_SCANS)); - SESSION_RULE - .session() - .execute(ScriptGraphStatement.newInstance(SampleGraphScripts.CLASSIC_GRAPH)); - SESSION_RULE - .session() - .execute(ScriptGraphStatement.newInstance(SampleGraphScripts.MAKE_STRICT)); - } - - @Override - protected CqlSession session() { - return SESSION_RULE.session(); - } - - @Override - protected boolean isGraphBinary() { - return false; - } - - @Override - protected GraphTraversalSource graphTraversalSource() { - return traversal().withRemote(DseGraph.remoteConnectionBuilder(session()).build()); - } - - @Override - protected SocialTraversalSource socialTraversalSource() { - return traversal(SocialTraversalSource.class) - .withRemote(DseGraph.remoteConnectionBuilder(session()).build()); - } - - @Override - protected CustomCcmRule ccmRule() { - return CCM_RULE; - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/remote/CoreGraphDataTypeRemoteIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/remote/CoreGraphDataTypeRemoteIT.java deleted file mode 100644 index 88b9cdc0433..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/remote/CoreGraphDataTypeRemoteIT.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph.remote; - -import static org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.__.unfold; - -import com.datastax.dse.driver.api.core.graph.CoreGraphDataTypeITBase; -import com.datastax.dse.driver.api.core.graph.DseGraph; -import com.datastax.dse.driver.api.core.graph.GraphTestSupport; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import java.util.Map; -import org.apache.tinkerpop.gremlin.process.traversal.AnonymousTraversalSource; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; -import org.apache.tinkerpop.gremlin.structure.Vertex; -import org.junit.ClassRule; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; -import org.junit.runner.RunWith; - -@BackendRequirement( - type = BackendType.DSE, - minInclusive = "6.8.0", - description = "DSE 6.8.0 required for Core graph support") -@RunWith(DataProviderRunner.class) -public class CoreGraphDataTypeRemoteIT extends CoreGraphDataTypeITBase { - - private static final CustomCcmRule CCM_RULE = GraphTestSupport.CCM_BUILDER_WITH_GRAPH.build(); - - private static final SessionRule SESSION_RULE = - GraphTestSupport.getCoreGraphSessionBuilder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - @Override - protected CqlSession session() { - return SESSION_RULE.session(); - } - - @Override - protected String graphName() { - return SESSION_RULE.getGraphName(); - } - - private final GraphTraversalSource g = - AnonymousTraversalSource.traversal() - .withRemote(DseGraph.remoteConnectionBuilder(session()).build()); - - @Override - public Map insertVertexThenReadProperties( - Map properties, int vertexID, String vertexLabel) { - GraphTraversal traversal = g.addV(vertexLabel).property("id", vertexID); - - for (Map.Entry entry : properties.entrySet()) { - String typeDefinition = entry.getKey(); - String propName = formatPropertyName(typeDefinition); - Object value = entry.getValue(); - traversal = traversal.property(propName, value); - } - - // insert vertex - traversal.iterate(); - - // query properties - return g.V().has(vertexLabel, "id", vertexID).valueMap().by(unfold()).next(); - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/remote/CoreGraphTraversalRemoteIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/remote/CoreGraphTraversalRemoteIT.java deleted file mode 100644 index 2bbdf1f6d45..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/remote/CoreGraphTraversalRemoteIT.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph.remote; - -import static org.apache.tinkerpop.gremlin.process.traversal.AnonymousTraversalSource.traversal; - -import com.datastax.dse.driver.api.core.graph.DseGraph; -import com.datastax.dse.driver.api.core.graph.GraphTestSupport; -import com.datastax.dse.driver.api.core.graph.SampleGraphScripts; -import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; -import com.datastax.dse.driver.api.core.graph.SocialTraversalSource; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@BackendRequirement( - type = BackendType.DSE, - minInclusive = "6.8", - description = "DSE 6.8 required for Core graph support") -public class CoreGraphTraversalRemoteIT extends GraphTraversalRemoteITBase { - - private static final CustomCcmRule CCM_RULE = GraphTestSupport.GRAPH_CCM_RULE_BUILDER.build(); - - private static final SessionRule SESSION_RULE = - GraphTestSupport.getCoreGraphSessionBuilder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - @BeforeClass - public static void setupSchema() { - SESSION_RULE.session().execute(ScriptGraphStatement.newInstance(SampleGraphScripts.CORE_GRAPH)); - } - - @Override - protected CqlSession session() { - return SESSION_RULE.session(); - } - - @Override - protected boolean isGraphBinary() { - return true; - } - - @Override - protected GraphTraversalSource graphTraversalSource() { - return traversal() - .withRemote(DseGraph.remoteConnectionBuilder(session()).build()) - .with("allow-filtering"); - } - - @Override - protected SocialTraversalSource socialTraversalSource() { - return traversal(SocialTraversalSource.class) - .withRemote(DseGraph.remoteConnectionBuilder(session()).build()) - .with("allow-filtering"); - } - - @Override - protected CustomCcmRule ccmRule() { - return CCM_RULE; - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/remote/GraphTraversalMetaPropertiesRemoteIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/remote/GraphTraversalMetaPropertiesRemoteIT.java deleted file mode 100644 index 2966fb44cf9..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/remote/GraphTraversalMetaPropertiesRemoteIT.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph.remote; - -import static com.datastax.dse.driver.api.core.graph.SampleGraphScripts.ALLOW_SCANS; -import static com.datastax.dse.driver.api.core.graph.SampleGraphScripts.MAKE_STRICT; -import static com.datastax.dse.driver.api.core.graph.TinkerGraphAssertions.assertThat; - -import com.datastax.dse.driver.api.core.graph.DseGraph; -import com.datastax.dse.driver.api.core.graph.GraphTestSupport; -import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import org.apache.tinkerpop.gremlin.process.traversal.AnonymousTraversalSource; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; -import org.apache.tinkerpop.gremlin.structure.Vertex; -import org.apache.tinkerpop.gremlin.structure.VertexProperty; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -// INFO: meta props are going away in NGDG - -@BackendRequirement( - type = BackendType.DSE, - minInclusive = "5.0.3", - description = "DSE 5.0.3 required for remote TinkerPop support") -public class GraphTraversalMetaPropertiesRemoteIT { - - private static final CustomCcmRule CCM_RULE = GraphTestSupport.GRAPH_CCM_RULE_BUILDER.build(); - - private static final SessionRule SESSION_RULE = - GraphTestSupport.getClassicGraphSessionBuilder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - private final GraphTraversalSource g = - AnonymousTraversalSource.traversal() - .withRemote(DseGraph.remoteConnectionBuilder(SESSION_RULE.session()).build()); - - /** Builds a simple schema that provides for a vertex with a property with sub properties. */ - public static final String META_PROPS = - MAKE_STRICT - + ALLOW_SCANS - + "schema.propertyKey('sub_prop').Text().create()\n" - + "schema.propertyKey('sub_prop2').Text().create()\n" - + "schema.propertyKey('meta_prop').Text().properties('sub_prop', 'sub_prop2').create()\n" - + "schema.vertexLabel('meta_v').properties('meta_prop').create()"; - - /** - * Ensures that a traversal that yields a vertex with a property that has its own properties that - * is appropriately parsed and made accessible via {@link VertexProperty#property}. - * - * @test_category dse:graph - */ - @Test - public void should_parse_meta_properties() { - // given a schema that defines meta properties. - SESSION_RULE.session().execute(ScriptGraphStatement.newInstance(META_PROPS)); - - // when adding a vertex with that meta property - Vertex v = - g.addV("meta_v") - .property("meta_prop", "hello", "sub_prop", "hi", "sub_prop2", "hi2") - .next(); - - // then the created vertex should have the meta prop present with its sub properties. - assertThat(v).hasProperty("meta_prop"); - VertexProperty metaProp = v.property("meta_prop"); - assertThat(metaProp) - .hasValue("hello") - .hasProperty("sub_prop", "hi") - .hasProperty("sub_prop2", "hi2"); - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/remote/GraphTraversalMultiPropertiesRemoteIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/remote/GraphTraversalMultiPropertiesRemoteIT.java deleted file mode 100644 index c55a7b67c4a..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/remote/GraphTraversalMultiPropertiesRemoteIT.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph.remote; - -import static com.datastax.dse.driver.api.core.graph.SampleGraphScripts.ALLOW_SCANS; -import static com.datastax.dse.driver.api.core.graph.SampleGraphScripts.MAKE_STRICT; -import static com.datastax.dse.driver.api.core.graph.TinkerGraphAssertions.assertThat; - -import com.datastax.dse.driver.api.core.graph.DseGraph; -import com.datastax.dse.driver.api.core.graph.GraphTestSupport; -import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import java.util.Iterator; -import org.apache.tinkerpop.gremlin.process.traversal.AnonymousTraversalSource; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; -import org.apache.tinkerpop.gremlin.structure.Vertex; -import org.apache.tinkerpop.gremlin.structure.VertexProperty; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -// INFO: multi props are not supported in Core -@BackendRequirement( - type = BackendType.DSE, - minInclusive = "5.0.3", - description = "DSE 5.0.3 required for remote TinkerPop support") -public class GraphTraversalMultiPropertiesRemoteIT { - - private static final CustomCcmRule CCM_RULE = GraphTestSupport.GRAPH_CCM_RULE_BUILDER.build(); - - private static final SessionRule SESSION_RULE = - GraphTestSupport.getClassicGraphSessionBuilder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - private final GraphTraversalSource g = - AnonymousTraversalSource.traversal() - .withRemote(DseGraph.remoteConnectionBuilder(SESSION_RULE.session()).build()); - - /** Builds a simple schema that provides for a vertex with a multi-cardinality property. */ - public static final String MULTI_PROPS = - MAKE_STRICT - + ALLOW_SCANS - + "schema.propertyKey('multi_prop').Text().multiple().create()\n" - + "schema.vertexLabel('multi_v').properties('multi_prop').create()\n"; - - /** - * Ensures that a traversal that yields a vertex with a property name that is present multiple - * times that the properties are parsed and made accessible via {@link - * Vertex#properties(String...)}. - * - * @test_category dse:graph - */ - @Test - public void should_parse_multiple_cardinality_properties() { - // given a schema that defines multiple cardinality properties. - SESSION_RULE.session().execute(ScriptGraphStatement.newInstance(MULTI_PROPS)); - - // when adding a vertex with a multiple cardinality property - Vertex v = - g.addV("multi_v") - .property("multi_prop", "Hello") - .property("multi_prop", "Sweet") - .property("multi_prop", "World") - .next(); - - // then the created vertex should have the multi-cardinality property present with its values. - assertThat(v).hasProperty("multi_prop"); - Iterator> multiProp = v.properties("multi_prop"); - assertThat(multiProp) - .toIterable() - .extractingResultOf("value") - .containsExactly("Hello", "Sweet", "World"); - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/remote/GraphTraversalRemoteITBase.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/remote/GraphTraversalRemoteITBase.java deleted file mode 100644 index 3db8a7d1a12..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/remote/GraphTraversalRemoteITBase.java +++ /dev/null @@ -1,661 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph.remote; - -import static com.datastax.dse.driver.api.core.graph.TinkerGraphAssertions.assertThat; -import static com.datastax.dse.driver.internal.core.graph.GraphTestUtils.assertThatContainsLabel; -import static com.datastax.dse.driver.internal.core.graph.GraphTestUtils.assertThatContainsProperties; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.assertj.core.api.Assertions.fail; - -import com.datastax.dse.driver.Assertions; -import com.datastax.dse.driver.api.core.graph.SocialTraversalSource; -import com.datastax.dse.driver.api.core.graph.TinkerPathAssert; -import com.datastax.dse.driver.api.core.graph.__; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.servererrors.InvalidQueryException; -import com.datastax.oss.driver.api.testinfra.ccm.CcmBridge; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutionException; -import java.util.stream.Collectors; -import org.apache.tinkerpop.gremlin.process.traversal.Path; -import org.apache.tinkerpop.gremlin.process.traversal.Traversal; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; -import org.apache.tinkerpop.gremlin.process.traversal.step.util.Tree; -import org.apache.tinkerpop.gremlin.structure.Direction; -import org.apache.tinkerpop.gremlin.structure.Edge; -import org.apache.tinkerpop.gremlin.structure.Graph; -import org.apache.tinkerpop.gremlin.structure.Vertex; -import org.assertj.core.api.Assumptions; -import org.junit.Test; - -public abstract class GraphTraversalRemoteITBase { - - protected abstract CqlSession session(); - - protected abstract boolean isGraphBinary(); - - protected abstract GraphTraversalSource graphTraversalSource(); - - protected abstract SocialTraversalSource socialTraversalSource(); - - protected abstract CustomCcmRule ccmRule(); - - /** - * Ensures that a previously returned {@link Vertex}'s {@link Vertex#id()} can be used as an input - * to {@link - * org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource#V(Object...)} to - * retrieve the {@link Vertex} and that the returned {@link Vertex} is the same. - * - * @test_category dse:graph - */ - @Test - public void should_use_vertex_id_as_parameter() { - GraphTraversalSource g = graphTraversalSource(); - - // given an existing vertex - Vertex marko = g.V().hasLabel("person").has("name", "marko").next(); - if (isGraphBinary()) { - Map properties = - g.V().hasLabel("person").has("name", "marko").elementMap("name").next(); - - assertThat(properties).containsEntry("name", "marko"); - } else { - assertThat(marko).hasProperty("name", "marko"); - } - - // then should be able to retrieve that same vertex by id. - assertThat(g.V(marko.id()).next()).isEqualTo(marko); - } - - /** - * Ensures that a previously returned {@link Edge}'s {@link Edge#id()} can be used as an input to - * {@link - * org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource#E(Object...)} to - * retrieve the {@link Edge} and that the returned {@link Edge} is the same. - * - * @test_category dse:graph - */ - @Test - public void should_use_edge_is_as_parameter() { - GraphTraversalSource g = graphTraversalSource(); - - // given an existing edge - Edge created = g.E().has("weight", 0.2f).next(); - - if (isGraphBinary()) { - List> properties = - g.E().has("weight").elementMap("weight", "software", "person").toList(); - - assertThat(properties) - .anySatisfy( - props -> { - assertThatContainsProperties(props, "weight", 0.2f); - assertThatContainsLabel(props, Direction.IN, "software"); - assertThatContainsLabel(props, Direction.OUT, "person"); - }); - - } else { - assertThat(created) - .hasProperty("weight", 0.2f) - .hasInVLabel("software") - .hasOutVLabel("person"); - } - - // should be able to retrieve incoming and outgoing vertices by edge id - if (isGraphBinary()) { - Map inProperties = g.E(created.id()).inV().elementMap("name", "lang").next(); - Map outProperties = g.E(created.id()).outV().elementMap("name").next(); - assertThatContainsProperties(inProperties, "name", "lop", "lang", "java"); - assertThatContainsProperties(outProperties, "name", "peter"); - - } else { - Vertex in = g.E(created.id()).inV().next(); - Vertex out = g.E(created.id()).outV().next(); - - // should resolve to lop - assertThat(in).hasLabel("software").hasProperty("name", "lop").hasProperty("lang", "java"); - - // should resolve to marko, josh and peter whom created lop. - assertThat(out).hasLabel("person").hasProperty("name", "peter"); - } - } - - /** - * A sanity check that a returned {@link Vertex}'s id is a {@link Map}. This test could break in - * the future if the format of a vertex ID changes from a Map to something else in DSE. - * - * @test_category dse:graph - */ - @Test - public void should_deserialize_vertex_id_as_map() { - GraphTraversalSource g = graphTraversalSource(); - // given an existing vertex - Vertex marko = g.V().hasLabel("person").has("name", "marko").next(); - - // then id should be a map with expected values. - // Note: this is pretty dependent on DSE Graphs underlying id structure which may vary in - // the future. - if (isGraphBinary()) { - assertThat(((String) marko.id())).contains("marko"); - assertThat(marko.label()).isEqualTo("person"); - } else { - @SuppressWarnings("unchecked") - Map id = (Map) marko.id(); - assertThat(id) - .hasSize(3) - .containsEntry("~label", "person") - .containsKey("community_id") - .containsKey("member_id"); - } - } - - /** - * Ensures that a traversal that returns a result of mixed types is interpreted as a {@link Map} - * with {@link Object} values. Also uses {@link - * org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal#by(Traversal)} with an - * anonymous traversal to get inbound 'created' edges and folds them into a list. - * - *

    Executes a vertex traversal that binds label 'a' and 'b' to vertex properties and label 'c' - * to vertices that have edges from that vertex. - * - * @test_category dse:graph - */ - @Test - public void should_handle_result_object_of_mixed_types() { - GraphTraversalSource g = graphTraversalSource(); - // find all software vertices and select name, language, and find all vertices that created - // such software. - List> results = - g.V() - .hasLabel("software") - .as("a", "b", "c") - .select("a", "b", "c") - .by("name") - .by("lang") - .by(__.in("created").fold()) - .toList(); - - // ensure that lop and ripple and their data are the results return. - assertThat(results).extracting(m -> m.get("a")).containsOnly("lop", "ripple"); - - for (Map result : results) { - assertThat(result).containsOnlyKeys("a", "b", "c"); - // both software are written in java. - assertThat(result.get("b")).isEqualTo("java"); - // ensure the created vertices match the creators of the software. - @SuppressWarnings("unchecked") - List vertices = (List) result.get("c"); - if (result.get("a").equals("lop")) { - if (isGraphBinary()) { - // should contain three vertices - assertThat(vertices.size()).isEqualTo(3); - } else { - // lop, 'c' should contain marko, josh, peter. - assertThat(vertices) - .extracting(vertex -> vertex.property("name").value()) - .containsOnly("marko", "josh", "peter"); - } - } else { - if (isGraphBinary()) { - // has only one label - assertThat(vertices.size()).isEqualTo(1); - } else { - assertThat(vertices) - .extracting(vertex -> vertex.property("name").value()) - .containsOnly("josh"); - } - } - } - } - - /** - * Ensures that a traversal that returns a sub graph can be retrieved. - * - *

    The subgraph is all members in a knows relationship, thus is all people who marko knows and - * the edges that connect them. - */ - @Test - public void should_handle_subgraph_graphson() { - Assumptions.assumeThat(isGraphBinary()).isFalse(); - - GraphTraversalSource g = graphTraversalSource(); - // retrieve a subgraph on the knows relationship, this omits the created edges. - Graph graph = (Graph) g.E().hasLabel("knows").subgraph("subGraph").cap("subGraph").next(); - - // there should only be 2 edges (since there are are only 2 knows relationships) and 3 vertices - assertThat(graph.edges()).toIterable().hasSize(2); - assertThat(graph.vertices()).toIterable().hasSize(3); - } - - /** - * Ensures that a traversal that returns a sub graph can be retrieved. - * - *

    The subgraph is all members in a knows relationship, thus is all people who marko knows and - * the edges that connect them. - */ - @Test - public void should_handle_subgraph_graph_binary() { - Assumptions.assumeThat(isGraphBinary()).isTrue(); - - GraphTraversalSource g = graphTraversalSource(); - // retrieve a subgraph on the knows relationship, this omits the created edges. - String graph = (String) g.E().hasLabel("knows").subgraph("subGraph").cap("subGraph").next(); - - // there should only be 2 edges (since there are are only 2 knows relationships) and 3 vertices - assertThat(graph).contains("vertices:3").contains("edges:2"); - } - - /** - * Ensures a traversal that yields no results is properly retrieved and is empty. - * - * @test_category dse:graph - */ - @Test - public void should_return_zero_results() { - if (isGraphBinary()) { - assertThatThrownBy(() -> graphTraversalSource().V().hasLabel("notALabel").toList()) - .isInstanceOf(InvalidQueryException.class) - .hasMessageContaining("Unknown vertex label 'notALabel'"); - } else { - assertThat(graphTraversalSource().V().hasLabel("notALabel").toList()).isEmpty(); - } - } - - /** - * Validates that a traversal returning a {@link Tree} structure is returned appropriately with - * the expected contents. - * - *

    Retrieves trees of people marko knows and the software they created. - * - * @test_category dse:graph - */ - @Test - public void should_parse_tree() { - // Get a tree structure showing the paths from mark to people he knows to software they've - // created. - @SuppressWarnings("unchecked") - Tree tree = - graphTraversalSource() - .V() - .hasLabel("person") - .out("knows") - .out("created") - .tree() - .by("name") - .next(); - - // Marko knows josh who created lop and ripple. - assertThat(tree).tree("marko").tree("josh").tree("lop").isLeaf(); - - assertThat(tree).tree("marko").tree("josh").tree("ripple").isLeaf(); - } - - /** - * Validates that a traversal using lambda operations with anonymous traversals are applied - * appropriately and return the expected results. - * - *

    Traversal that filters 'person'-labeled vertices by name 'marko' and flatMaps outgoing - * vertices on the 'knows' relationship by their outgoing 'created' vertices and then maps by - * their 'name' property and folds them into one list. - * - *

    Note: This does not validate lambdas with functions as those can't be interpreted and - * sent remotely. - * - * @test_category dse:graph - */ - @Test - public void should_handle_lambdas() { - // Find all people marko knows and the software they created. - List software = - graphTraversalSource() - .V() - .hasLabel("person") - .filter(__.has("name", "marko")) - .out("knows") - .flatMap(__.out("created")) - .map(__.values("name")) - .fold() - .next(); - - // Marko only knows josh and vadas, of which josh created lop and ripple. - assertThat(software).containsOnly("lop", "ripple"); - } - - /** - * Validates that {@link - * org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal#tryNext()} functions - * appropriate by returning an {@link Optional} of which the presence of the underlying data - * depends on whether or not remaining data is present. - * - *

    This is more of a test of Tinkerpop than the protocol between the client and DSE graph. - * - * @test_category dse:graph - */ - @Test - public void should_handle_tryNext() { - GraphTraversal traversal = - graphTraversalSource().V().hasLabel("person").has("name", "marko"); - - // value present - Optional v0 = traversal.tryNext(); - assertThat(v0.isPresent()).isTrue(); - if (!isGraphBinary()) { - assertThat(v0.get()).hasProperty("name", "marko"); - } - - // value absent as there was only 1 matching vertex. - Optional v1 = traversal.tryNext(); - assertThat(v1.isPresent()).isFalse(); - } - - /** - * Validates that {@link GraphTraversal#toStream()} appropriately creates a stream from the - * underlying iterator on the traversal, and then an attempt to call toStream again yields no - * results. - * - *

    This is more of a test of Tinkerpop than the protocol between the client and DSE graph. - * - * @test_category dse:graph - */ - @Test - public void should_handle_streaming_graphson() { - Assumptions.assumeThat(isGraphBinary()).isFalse(); - - GraphTraversal traversal = graphTraversalSource().V().hasLabel("person"); - // retrieve all person vertices to stream, and filter on client side all persons under age 30 - // and map to their name. - List under30 = - traversal - .toStream() - .filter(v -> v.property("age").value() < 30) - .map(v -> v.property("name").value()) - .collect(Collectors.toList()); - - assertThat(under30).containsOnly("marko", "vadas"); - - // attempt to get a stream again, which should be empty. - assertThat(traversal.toStream().collect(Collectors.toList())).isEmpty(); - } - - /** - * Validates that {@link GraphTraversal#toStream()} appropriately creates a stream from the - * underlying iterator on the traversal, and then an attempt to call toStream again yields no - * results. - * - *

    This is more of a test of Tinkerpop than the protocol between the client and DSE graph. - * - * @test_category dse:graph - */ - @Test - public void should_handle_streaming_binary() { - Assumptions.assumeThat(isGraphBinary()).isTrue(); - - GraphTraversal> traversal = - graphTraversalSource().V().hasLabel("person").elementMap("age", "name"); - // retrieve all person vertices to stream, and filter on client side all persons under age 30 - // and map to their name. - List under30 = - traversal - .toStream() - .filter(v -> (Integer) v.get("age") < 30) - .map(v -> (String) v.get("name")) - .collect(Collectors.toList()); - - assertThat(under30).containsOnly("marko", "vadas"); - - // attempt to get a stream again, which should be empty. - assertThat(traversal.toStream().collect(Collectors.toList())).isEmpty(); - } - - /** - * Validates that when traversing a path and labeling some of the elements during the traversal - * that the output elements are properly labeled. - * - * @test_category dse:graph - */ - @Test - public void should_resolve_path_with_some_labels() { - // given a traversal where some objects have labels. - List paths = - graphTraversalSource() - .V() - .hasLabel("person") - .has("name", "marko") - .as("a") - .outE("knows") - .inV() - .as("c", "d") - .outE("created") - .as("e", "f", "g") - .inV() - .path() - .toList(); - - // then the paths returned should be labeled for the - // appropriate objects, and not labeled otherwise. - for (Path path : paths) { - TinkerPathAssert.validatePathObjects(path); - assertThat(path) - .hasLabel(0, "a") - .hasNoLabel(1) - .hasLabel(2, "c", "d") - .hasLabel(3, "e", "f", "g") - .hasNoLabel(4); - } - } - - /** - * Validates that when traversing a path and labeling all of the elements during the traversal - * that the output elements are properly labeled. - * - * @test_category dse:graph - */ - @Test - public void should_resolve_path_with_labels() { - // given a traversal where all objects have labels. - List paths = - graphTraversalSource() - .V() - .hasLabel("person") - .has("name", "marko") - .as("a") - .outE("knows") - .as("b") - .inV() - .as("c", "d") - .outE("created") - .as("e", "f", "g") - .inV() - .as("h") - .path() - .toList(); - - // then the paths returned should be labeled for all - // objects. - for (Path path : paths) { - TinkerPathAssert.validatePathObjects(path); - Assertions.assertThat(path.labels()).hasSize(5); - assertThat(path) - .hasLabel(0, "a") - .hasLabel(1, "b") - .hasLabel(2, "c", "d") - .hasLabel(3, "e", "f", "g") - .hasLabel(4, "h"); - } - } - - /** - * Validates that when traversing a path and labeling none of the elements during the traversal - * that all the labels are empty in the result. - * - * @test_category dse:graph - */ - @Test - public void should_resolve_path_without_labels() { - // given a traversal where no objects have labels. - List paths = - graphTraversalSource() - .V() - .hasLabel("person") - .has("name", "marko") - .outE("knows") - .inV() - .outE("created") - .inV() - .path() - .toList(); - - // then the paths returned should be labeled for - // all objects. - for (Path path : paths) { - TinkerPathAssert.validatePathObjects(path); - for (int i = 0; i < 5; i++) assertThat(path).hasNoLabel(i); - } - } - - @Test - public void should_handle_asynchronous_execution_graphson() { - Assumptions.assumeThat(isGraphBinary()).isFalse(); - - StringBuilder names = new StringBuilder(); - - CompletableFuture> future = - graphTraversalSource().V().hasLabel("person").promise(Traversal::toList); - try { - // dumb processing to make sure the completable future works correctly and correct results are - // returned - future - .thenAccept( - vertices -> vertices.forEach(vertex -> names.append((String) vertex.value("name")))) - .get(); - } catch (InterruptedException | ExecutionException e) { - fail("Shouldn't have thrown an exception waiting for the result to complete"); - } - - assertThat(names.toString()).contains("peter", "marko", "vadas", "josh"); - } - - @Test - public void should_handle_asynchronous_execution_graph_binary() { - Assumptions.assumeThat(isGraphBinary()).isTrue(); - - StringBuilder names = new StringBuilder(); - - CompletableFuture> future = - graphTraversalSource().V().hasLabel("person").promise(Traversal::toList); - try { - // dumb processing to make sure the completable future works correctly and correct results are - // returned - future.thenAccept(vertices -> vertices.forEach(vertex -> names.append(vertex.id()))).get(); - } catch (InterruptedException | ExecutionException e) { - fail("Shouldn't have thrown an exception waiting for the result to complete"); - } - - assertThat(names.toString()).contains("peter", "marko", "vadas", "josh"); - } - - /** - * Validates that if a traversal is made that encounters an error on the server side that the - * exception is set on the future. - * - * @test_category dse:graph - */ - @Test - @BackendRequirement(type = BackendType.DSE, minInclusive = "5.1.0") - public void should_fail_future_returned_from_promise_on_query_error() throws Exception { - CompletableFuture future = - graphTraversalSource().V("invalidid").peerPressure().promise(Traversal::next); - - try { - future.get(); - fail("Expected an ExecutionException"); - } catch (ExecutionException e) { - assertThat(e.getCause()).isInstanceOf(InvalidQueryException.class); - } - } - - /** - * A simple smoke test to ensure that a user can supply a custom {@link GraphTraversalSource} for - * use with DSLs. - * - * @test_category dse:graph - */ - @Test - public void should_allow_use_of_dsl_graphson() { - Assumptions.assumeThat(isGraphBinary()).isFalse(); - - List vertices = socialTraversalSource().persons("marko").knows("vadas").toList(); - assertThat(vertices.size()).isEqualTo(1); - assertThat(vertices.get(0)) - .hasProperty("name", "marko") - .hasProperty("age", 29) - .hasLabel("person"); - } - - /** - * A simple smoke test to ensure that a user can supply a custom {@link GraphTraversalSource} for - * use with DSLs. - * - * @test_category dse:graph - */ - @Test - public void should_allow_use_of_dsl_graph_binary() { - Assumptions.assumeThat(isGraphBinary()).isTrue(); - - List> vertices = - socialTraversalSource().persons("marko").knows("vadas").elementMap("name", "age").toList(); - assertThat(vertices.size()).isEqualTo(1); - - assertThatContainsProperties(vertices.get(0), "name", "marko", "age", 29); - assertThat(vertices.get(0).values()).contains("person"); - } - - /** - * Ensures that traversals with barriers (which return results bulked) contain the correct amount - * of end results. - * - *

    This will fail if ran against DSE < 5.0.9 or DSE < 5.1.2. - */ - @Test - public void should_return_correct_results_when_bulked() { - Assumptions.assumeThat( - CcmBridge.isDistributionOf( - BackendType.DSE, (dist, cass) -> dist.compareTo(Version.parse("5.1.2")) > 0)) - .isTrue(); - - List results = graphTraversalSource().E().label().barrier().toList(); - Collections.sort(results); - - List expected = - Arrays.asList("knows", "created", "created", "knows", "created", "created"); - Collections.sort(expected); - - assertThat(results).isEqualTo(expected); - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/ClassicGraphDataTypeFluentIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/ClassicGraphDataTypeFluentIT.java deleted file mode 100644 index d1355100c4b..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/ClassicGraphDataTypeFluentIT.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph.statement; - -import com.datastax.dse.driver.api.core.graph.ClassicGraphDataTypeITBase; -import com.datastax.dse.driver.api.core.graph.DseGraph; -import com.datastax.dse.driver.api.core.graph.FluentGraphStatement; -import com.datastax.dse.driver.api.core.graph.GraphTestSupport; -import com.datastax.dse.driver.api.core.graph.SampleGraphScripts; -import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import org.apache.tinkerpop.gremlin.structure.Vertex; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@BackendRequirement( - type = BackendType.DSE, - minInclusive = "5.0.3", - description = "DSE 5.0.3 required for fluent API support") -public class ClassicGraphDataTypeFluentIT extends ClassicGraphDataTypeITBase { - - private static final CustomCcmRule CCM_RULE = GraphTestSupport.CCM_BUILDER_WITH_GRAPH.build(); - - private static final SessionRule SESSION_RULE = - GraphTestSupport.getClassicGraphSessionBuilder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - @BeforeClass - public static void setupSchema() { - SESSION_RULE - .session() - .execute(ScriptGraphStatement.newInstance(SampleGraphScripts.ALLOW_SCANS)); - SESSION_RULE - .session() - .execute(ScriptGraphStatement.newInstance(SampleGraphScripts.MAKE_STRICT)); - } - - @Override - public CqlSession session() { - return SESSION_RULE.session(); - } - - @Override - public Vertex insertVertexAndReturn(String vertexLabel, String propertyName, Object value) { - return SESSION_RULE - .session() - .execute( - FluentGraphStatement.newInstance( - DseGraph.g.addV(vertexLabel).property(propertyName, value))) - .one() - .asVertex(); - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/ClassicGraphDataTypeScriptIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/ClassicGraphDataTypeScriptIT.java deleted file mode 100644 index 81f088d0c18..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/ClassicGraphDataTypeScriptIT.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph.statement; - -import com.datastax.dse.driver.api.core.graph.ClassicGraphDataTypeITBase; -import com.datastax.dse.driver.api.core.graph.GraphTestSupport; -import com.datastax.dse.driver.api.core.graph.SampleGraphScripts; -import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import org.apache.tinkerpop.gremlin.structure.Vertex; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@BackendRequirement( - type = BackendType.DSE, - minInclusive = "5.0.4", - description = "DSE 5.0.4 required for script API with GraphSON 2") -public class ClassicGraphDataTypeScriptIT extends ClassicGraphDataTypeITBase { - - private static final CustomCcmRule CCM_RULE = GraphTestSupport.CCM_BUILDER_WITH_GRAPH.build(); - - private static final SessionRule SESSION_RULE = - GraphTestSupport.getClassicGraphSessionBuilder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - @BeforeClass - public static void setupSchema() { - SESSION_RULE - .session() - .execute(ScriptGraphStatement.newInstance(SampleGraphScripts.ALLOW_SCANS)); - SESSION_RULE - .session() - .execute(ScriptGraphStatement.newInstance(SampleGraphScripts.MAKE_STRICT)); - } - - @Override - public CqlSession session() { - return SESSION_RULE.session(); - } - - @Override - public Vertex insertVertexAndReturn(String vertexLabel, String propertyName, Object value) { - return SESSION_RULE - .session() - .execute( - ScriptGraphStatement.builder("g.addV(labelP).property(nameP, valueP)") - .setQueryParam("labelP", vertexLabel) - .setQueryParam("nameP", propertyName) - .setQueryParam("valueP", value) - .build()) - .one() - .asVertex(); - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/ClassicGraphTraversalBatchIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/ClassicGraphTraversalBatchIT.java deleted file mode 100644 index 81f39753856..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/ClassicGraphTraversalBatchIT.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph.statement; - -import com.datastax.dse.driver.api.core.graph.GraphTestSupport; -import com.datastax.dse.driver.api.core.graph.SampleGraphScripts; -import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; -import org.apache.tinkerpop.gremlin.structure.util.empty.EmptyGraph; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@BackendRequirement( - type = BackendType.DSE, - minInclusive = "6.0", - description = "DSE 6.0 required for BatchGraphStatement.") -public class ClassicGraphTraversalBatchIT extends GraphTraversalBatchITBase { - - private static final CustomCcmRule CCM_RULE = GraphTestSupport.GRAPH_CCM_RULE_BUILDER.build(); - - private static final SessionRule SESSION_RULE = - GraphTestSupport.getClassicGraphSessionBuilder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - private final GraphTraversalSource g = EmptyGraph.instance().traversal(); - - @BeforeClass - public static void setupSchema() { - SESSION_RULE - .session() - .execute(ScriptGraphStatement.newInstance(SampleGraphScripts.ALLOW_SCANS)); - SESSION_RULE - .session() - .execute(ScriptGraphStatement.newInstance(SampleGraphScripts.MAKE_NOT_STRICT)); - } - - @Override - protected CqlSession session() { - return SESSION_RULE.session(); - } - - @Override - protected boolean isGraphBinary() { - return false; - } - - @Override - protected CustomCcmRule ccmRule() { - return CCM_RULE; - } - - @Override - protected GraphTraversalSource graphTraversalSource() { - return g; - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/ClassicGraphTraversalIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/ClassicGraphTraversalIT.java deleted file mode 100644 index 672e1b6f679..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/ClassicGraphTraversalIT.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph.statement; - -import com.datastax.dse.driver.api.core.graph.GraphTestSupport; -import com.datastax.dse.driver.api.core.graph.SampleGraphScripts; -import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; -import com.datastax.dse.driver.api.core.graph.SocialTraversalSource; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; -import org.apache.tinkerpop.gremlin.structure.util.empty.EmptyGraph; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@BackendRequirement( - type = BackendType.DSE, - minInclusive = "5.0.9", - description = "DSE 5.0.9 required for inserting edges and vertices script.") -public class ClassicGraphTraversalIT extends GraphTraversalITBase { - - private static final CustomCcmRule CCM_RULE = GraphTestSupport.GRAPH_CCM_RULE_BUILDER.build(); - - private static final SessionRule SESSION_RULE = - GraphTestSupport.getClassicGraphSessionBuilder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - private final GraphTraversalSource graphTraversalSource = EmptyGraph.instance().traversal(); - private final SocialTraversalSource socialTraversal = - EmptyGraph.instance().traversal(SocialTraversalSource.class); - - @BeforeClass - public static void setupSchema() { - SESSION_RULE - .session() - .execute(ScriptGraphStatement.newInstance(SampleGraphScripts.ALLOW_SCANS)); - SESSION_RULE - .session() - .execute(ScriptGraphStatement.newInstance(SampleGraphScripts.CLASSIC_GRAPH)); - SESSION_RULE - .session() - .execute(ScriptGraphStatement.newInstance(SampleGraphScripts.MAKE_STRICT)); - } - - @Override - protected CqlSession session() { - return SESSION_RULE.session(); - } - - @Override - protected boolean isGraphBinary() { - return false; - } - - @Override - protected CustomCcmRule ccmRule() { - return CCM_RULE; - } - - @Override - protected GraphTraversalSource graphTraversalSource() { - return graphTraversalSource; - } - - @Override - protected SocialTraversalSource socialTraversalSource() { - return socialTraversal; - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/CoreGraphDataTypeFluentIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/CoreGraphDataTypeFluentIT.java deleted file mode 100644 index 94e6415f471..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/CoreGraphDataTypeFluentIT.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph.statement; - -import static com.datastax.dse.driver.api.core.graph.DseGraph.g; -import static org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.__.unfold; - -import com.datastax.dse.driver.api.core.graph.CoreGraphDataTypeITBase; -import com.datastax.dse.driver.api.core.graph.FluentGraphStatement; -import com.datastax.dse.driver.api.core.graph.GraphTestSupport; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import java.util.Map; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; -import org.apache.tinkerpop.gremlin.structure.Vertex; -import org.junit.ClassRule; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; -import org.junit.runner.RunWith; - -@BackendRequirement( - type = BackendType.DSE, - minInclusive = "6.8.0", - description = "DSE 6.8.0 required for Core graph support") -@RunWith(DataProviderRunner.class) -public class CoreGraphDataTypeFluentIT extends CoreGraphDataTypeITBase { - - private static final CustomCcmRule CCM_RULE = GraphTestSupport.CCM_BUILDER_WITH_GRAPH.build(); - - private static final SessionRule SESSION_RULE = - GraphTestSupport.getCoreGraphSessionBuilder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - @Override - protected CqlSession session() { - return SESSION_RULE.session(); - } - - @Override - protected String graphName() { - return SESSION_RULE.getGraphName(); - } - - @Override - public Map insertVertexThenReadProperties( - Map properties, int vertexID, String vertexLabel) { - GraphTraversal traversal = g.addV(vertexLabel).property("id", vertexID); - - for (Map.Entry entry : properties.entrySet()) { - String typeDefinition = entry.getKey(); - String propName = formatPropertyName(typeDefinition); - Object value = entry.getValue(); - traversal = traversal.property(propName, value); - } - - session().execute(FluentGraphStatement.newInstance(traversal)); - - return session() - .execute( - FluentGraphStatement.newInstance( - g.V().has(vertexLabel, "id", vertexID).valueMap().by(unfold()))) - .one() - .asMap(); - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/CoreGraphDataTypeScriptIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/CoreGraphDataTypeScriptIT.java deleted file mode 100644 index b79aecf6c6a..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/CoreGraphDataTypeScriptIT.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph.statement; - -import com.datastax.dse.driver.api.core.graph.CoreGraphDataTypeITBase; -import com.datastax.dse.driver.api.core.graph.GraphTestSupport; -import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; -import com.datastax.dse.driver.api.core.graph.ScriptGraphStatementBuilder; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import java.util.Map; -import org.junit.ClassRule; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; -import org.junit.runner.RunWith; - -@BackendRequirement( - type = BackendType.DSE, - minInclusive = "6.8.0", - description = "DSE 6.8.0 required for Core graph support") -@RunWith(DataProviderRunner.class) -public class CoreGraphDataTypeScriptIT extends CoreGraphDataTypeITBase { - - private static final CustomCcmRule CCM_RULE = GraphTestSupport.CCM_BUILDER_WITH_GRAPH.build(); - - private static final SessionRule SESSION_RULE = - GraphTestSupport.getCoreGraphSessionBuilder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - @Override - protected CqlSession session() { - return SESSION_RULE.session(); - } - - @Override - protected String graphName() { - return SESSION_RULE.getGraphName(); - } - - @Override - protected Map insertVertexThenReadProperties( - Map properties, int vertexID, String vertexLabel) { - StringBuilder insert = new StringBuilder("g.addV(vertexLabel).property('id', vertexID)"); - - ScriptGraphStatementBuilder statementBuilder = - new ScriptGraphStatementBuilder() - .setQueryParam("vertexID", vertexID) - .setQueryParam("vertexLabel", vertexLabel); - - for (Map.Entry entry : properties.entrySet()) { - String typeDefinition = entry.getKey(); - String propName = formatPropertyName(typeDefinition); - Object value = entry.getValue(); - - insert.append(String.format(".property('%s', %s)", propName, propName)); - statementBuilder = statementBuilder.setQueryParam(propName, value); - } - - session().execute(statementBuilder.setScript(insert.toString()).build()); - - return session() - .execute( - ScriptGraphStatement.newInstance( - "g.V().has(vertexLabel, 'id', vertexID).valueMap().by(unfold())") - .setQueryParam("vertexID", vertexID) - .setQueryParam("vertexLabel", vertexLabel)) - .one() - .asMap(); - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/CoreGraphTraversalBatchIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/CoreGraphTraversalBatchIT.java deleted file mode 100644 index be09ac1bfb2..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/CoreGraphTraversalBatchIT.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph.statement; - -import com.datastax.dse.driver.api.core.graph.GraphTestSupport; -import com.datastax.dse.driver.api.core.graph.SampleGraphScripts; -import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; -import org.apache.tinkerpop.gremlin.structure.util.empty.EmptyGraph; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@BackendRequirement( - type = BackendType.DSE, - minInclusive = "6.8.0", - description = "DSE 6.8.0 required for Core graph support") -public class CoreGraphTraversalBatchIT extends GraphTraversalBatchITBase { - - private static final CustomCcmRule CCM_RULE = GraphTestSupport.GRAPH_CCM_RULE_BUILDER.build(); - - private static final SessionRule SESSION_RULE = - GraphTestSupport.getCoreGraphSessionBuilder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - private final GraphTraversalSource g = EmptyGraph.instance().traversal().with("allow-filtering"); - - @BeforeClass - public static void setupSchema() { - SESSION_RULE.session().execute(ScriptGraphStatement.newInstance(SampleGraphScripts.CORE_GRAPH)); - } - - @Override - protected CqlSession session() { - return SESSION_RULE.session(); - } - - @Override - protected boolean isGraphBinary() { - return true; - } - - @Override - protected CustomCcmRule ccmRule() { - return CCM_RULE; - } - - @Override - protected GraphTraversalSource graphTraversalSource() { - return g; - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/CoreGraphTraversalIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/CoreGraphTraversalIT.java deleted file mode 100644 index d97b0da958a..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/CoreGraphTraversalIT.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph.statement; - -import com.datastax.dse.driver.api.core.graph.GraphTestSupport; -import com.datastax.dse.driver.api.core.graph.SampleGraphScripts; -import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; -import com.datastax.dse.driver.api.core.graph.SocialTraversalSource; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; -import org.apache.tinkerpop.gremlin.structure.util.empty.EmptyGraph; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@BackendRequirement( - type = BackendType.DSE, - minInclusive = "6.8.0", - description = "DSE 6.8.0 required for Core graph support") -public class CoreGraphTraversalIT extends GraphTraversalITBase { - - private static final CustomCcmRule CCM_RULE = GraphTestSupport.CCM_BUILDER_WITH_GRAPH.build(); - - private static final SessionRule SESSION_RULE = - GraphTestSupport.getCoreGraphSessionBuilder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - private final GraphTraversalSource graphTraversalSource = - EmptyGraph.instance().traversal().with("allow-filtering"); - private final SocialTraversalSource socialTraversalSource = - EmptyGraph.instance().traversal(SocialTraversalSource.class).with("allow-filtering"); - - @BeforeClass - public static void setupSchema() { - SESSION_RULE.session().execute(ScriptGraphStatement.newInstance(SampleGraphScripts.CORE_GRAPH)); - } - - @Override - protected CqlSession session() { - return SESSION_RULE.session(); - } - - @Override - protected boolean isGraphBinary() { - return true; - } - - @Override - protected CustomCcmRule ccmRule() { - return CCM_RULE; - } - - @Override - protected GraphTraversalSource graphTraversalSource() { - return graphTraversalSource; - } - - @Override - protected SocialTraversalSource socialTraversalSource() { - return socialTraversalSource; - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/GraphTraversalBatchITBase.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/GraphTraversalBatchITBase.java deleted file mode 100644 index 0c8c3b8b5d4..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/GraphTraversalBatchITBase.java +++ /dev/null @@ -1,159 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph.statement; - -import static com.datastax.dse.driver.api.core.graph.TinkerGraphAssertions.assertThat; -import static com.datastax.dse.driver.internal.core.graph.GraphTestUtils.assertThatContainsLabel; -import static com.datastax.dse.driver.internal.core.graph.GraphTestUtils.assertThatContainsProperties; -import static org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.__.addE; -import static org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.__.addV; -import static org.assertj.core.api.Assertions.fail; - -import com.datastax.dse.driver.api.core.graph.BatchGraphStatement; -import com.datastax.dse.driver.api.core.graph.FluentGraphStatement; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.servererrors.InvalidQueryException; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import java.util.Map; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.__; -import org.apache.tinkerpop.gremlin.structure.Direction; -import org.apache.tinkerpop.gremlin.structure.Edge; -import org.junit.Test; - -public abstract class GraphTraversalBatchITBase { - - protected abstract CqlSession session(); - - protected abstract boolean isGraphBinary(); - - protected abstract CustomCcmRule ccmRule(); - - protected abstract GraphTraversalSource graphTraversalSource(); - - @Test - public void should_allow_vertex_and_edge_insertions_in_batch() { - BatchGraphStatement batch = - BatchGraphStatement.builder() - .addTraversals( - ImmutableList.of( - addV("person").property("name", "batch1").property("age", 1), - addV("person").property("name", "batch2").property("age", 2))) - .build(); - - BatchGraphStatement batch2 = - BatchGraphStatement.builder() - .addTraversals(batch) - .addTraversal( - addE("knows") - .from(__.V().has("name", "batch1")) - .to(__.V().has("name", "batch2")) - .property("weight", 2.3f)) - .build(); - - assertThat(batch.size()).isEqualTo(2); - assertThat(batch2.size()).isEqualTo(3); - - session().execute(batch2); - - if (isGraphBinary()) { - Map properties = - session() - .execute( - FluentGraphStatement.newInstance( - graphTraversalSource().V().has("name", "batch1").elementMap("age"))) - .one() - .asMap(); - - assertThatContainsProperties(properties, "age", 1); - - properties = - session() - .execute( - FluentGraphStatement.newInstance( - graphTraversalSource().V().has("name", "batch2").elementMap("age"))) - .one() - .asMap(); - - assertThatContainsProperties(properties, "age", 2); - - properties = - session() - .execute( - FluentGraphStatement.newInstance( - graphTraversalSource() - .V() - .has("name", "batch1") - .bothE() - .elementMap("weight", "person"))) - .one() - .asMap(); - - assertThatContainsProperties(properties, "weight", 2.3f); - assertThatContainsLabel(properties, Direction.IN, "person"); - assertThatContainsLabel(properties, Direction.OUT, "person"); - - } else { - - assertThat( - session() - .execute( - FluentGraphStatement.newInstance( - graphTraversalSource().V().has("name", "batch1"))) - .one() - .asVertex()) - .hasProperty("age", 1); - - assertThat( - session() - .execute( - FluentGraphStatement.newInstance( - graphTraversalSource().V().has("name", "batch2"))) - .one() - .asVertex()) - .hasProperty("age", 2); - - assertThat( - session() - .execute( - FluentGraphStatement.newInstance( - graphTraversalSource().V().has("name", "batch1").bothE())) - .one() - .asEdge()) - .hasProperty("weight", 2.3f) - .hasOutVLabel("person") - .hasInVLabel("person"); - } - } - - @Test - public void should_fail_if_no_bytecode_in_batch() { - BatchGraphStatement batch = - BatchGraphStatement.builder().addTraversals(ImmutableList.of()).build(); - assertThat(batch.size()).isEqualTo(0); - try { - session().execute(batch); - fail( - "Should have thrown InvalidQueryException because batch does not contain any traversals."); - } catch (InvalidQueryException e) { - assertThat(e.getMessage()) - .contains("The batch statement sent does not contain any traversal"); - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/GraphTraversalITBase.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/GraphTraversalITBase.java deleted file mode 100644 index 5bcb01bc165..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/GraphTraversalITBase.java +++ /dev/null @@ -1,668 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph.statement; - -import static com.datastax.dse.driver.api.core.graph.FluentGraphStatement.newInstance; -import static com.datastax.dse.driver.api.core.graph.TinkerGraphAssertions.assertThat; -import static com.datastax.dse.driver.api.core.graph.TinkerPathAssert.validatePathObjects; -import static com.datastax.dse.driver.internal.core.graph.GraphTestUtils.assertThatContainsLabel; -import static com.datastax.dse.driver.internal.core.graph.GraphTestUtils.assertThatContainsProperties; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.assertj.core.api.Assertions.fail; - -import com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet; -import com.datastax.dse.driver.api.core.graph.FluentGraphStatement; -import com.datastax.dse.driver.api.core.graph.GraphNode; -import com.datastax.dse.driver.api.core.graph.GraphResultSet; -import com.datastax.dse.driver.api.core.graph.GraphStatement; -import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; -import com.datastax.dse.driver.api.core.graph.SocialTraversalSource; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.servererrors.InvalidQueryException; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.api.testinfra.ccm.CcmBridge; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import java.util.List; -import java.util.Map; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.ExecutionException; -import java.util.stream.Collectors; -import org.apache.tinkerpop.gremlin.process.traversal.Path; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.__; -import org.apache.tinkerpop.gremlin.process.traversal.step.util.Tree; -import org.apache.tinkerpop.gremlin.structure.Direction; -import org.apache.tinkerpop.gremlin.structure.Edge; -import org.apache.tinkerpop.gremlin.structure.Graph; -import org.apache.tinkerpop.gremlin.structure.Vertex; -import org.assertj.core.api.Assertions; -import org.assertj.core.api.Assumptions; -import org.junit.Test; - -public abstract class GraphTraversalITBase { - - protected abstract CqlSession session(); - - protected abstract boolean isGraphBinary(); - - protected abstract CustomCcmRule ccmRule(); - - protected abstract GraphTraversalSource graphTraversalSource(); - - protected abstract SocialTraversalSource socialTraversalSource(); - - /** - * Ensures that a previously returned {@link Vertex}'s {@link Vertex#id()} can be used as an input - * to {@link - * org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource#V(Object...)} to - * retrieve the {@link Vertex} and that the returned {@link Vertex} is the same. - * - * @test_category dse:graph - */ - @Test - public void should_use_vertex_id_as_parameter() { - GraphTraversal query = - graphTraversalSource().V().hasLabel("person").has("name", "marko"); - GraphResultSet resultSet = session().execute(newInstance(query)); - - List results = resultSet.all(); - - assertThat(results.size()).isEqualTo(1); - Vertex marko = results.get(0).asVertex(); - if (isGraphBinary()) { - Map properties = - session().execute(newInstance(query.elementMap("name"))).one().asMap(); - assertThatContainsProperties(properties, "name", "marko"); - } else { - assertThat(marko).hasProperty("name", "marko"); - } - - resultSet = session().execute(newInstance(graphTraversalSource().V(marko.id()))); - - results = resultSet.all(); - assertThat(results.size()).isEqualTo(1); - Vertex marko2 = results.get(0).asVertex(); - // Ensure that the returned vertex is the same as the first. - assertThat(marko2).isEqualTo(marko); - } - - /** - * Ensures that a previously returned {@link Edge}'s {@link Edge#id()} can be used as an input to - * {@link - * org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource#E(Object...)} to - * retrieve the {@link Edge} and that the returned {@link Edge} is the same. - * - * @test_category dse:graph - */ - @Test - public void should_use_edge_id_as_parameter() { - GraphTraversal query = graphTraversalSource().E().has("weight", 0.2f); - GraphResultSet resultSet = session().execute(newInstance(query)); - - List results = resultSet.all(); - assertThat(results.size()).isEqualTo(1); - - Edge created = results.get(0).asEdge(); - if (isGraphBinary()) { - Map properties = - session() - .execute(newInstance(query.elementMap("weight", "software", "person"))) - .one() - .asMap(); - - assertThatContainsProperties(properties, "weight", 0.2f); - assertThatContainsLabel(properties, Direction.IN, "software"); - assertThatContainsLabel(properties, Direction.OUT, "person"); - } else { - assertThat(created) - .hasProperty("weight", 0.2f) - .hasInVLabel("software") - .hasOutVLabel("person"); - } - - if (isGraphBinary()) { - Map inProperties = - session() - .execute( - newInstance( - graphTraversalSource().E(created.id()).inV().elementMap("name", "lang"))) - .one() - .asMap(); - assertThatContainsProperties(inProperties, "name", "lop", "lang", "java"); - } else { - resultSet = session().execute(newInstance(graphTraversalSource().E(created.id()).inV())); - results = resultSet.all(); - assertThat(results.size()).isEqualTo(1); - Vertex lop = results.get(0).asVertex(); - - assertThat(lop).hasLabel("software").hasProperty("name", "lop").hasProperty("lang", "java"); - } - } - - /** - * A sanity check that a returned {@link Vertex}'s id is a {@link Map}. This test could break in - * the future if the format of a vertex ID changes from a Map to something else in DSE. - * - * @test_category dse:graph - */ - @Test - public void should_deserialize_vertex_id_as_map() { - GraphResultSet resultSet = - session() - .execute( - newInstance(graphTraversalSource().V().hasLabel("person").has("name", "marko"))); - - List results = resultSet.all(); - assertThat(results.size()).isEqualTo(1); - - Vertex marko = results.get(0).asVertex(); - - if (isGraphBinary()) { - assertThat(((String) marko.id())).contains("marko"); - assertThat(marko.label()).isEqualTo("person"); - } else { - assertThat(marko).hasProperty("name", "marko"); - @SuppressWarnings("unchecked") - Map id = (Map) marko.id(); - assertThat(id) - .hasSize(3) - .containsEntry("~label", "person") - .containsKey("community_id") - .containsKey("member_id"); - } - } - - /** - * Ensures that a traversal that returns a result of mixed types is interpreted as a {@link Map} - * with {@link Object} values. Also uses {@link - * org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal#by(org.apache.tinkerpop.gremlin.process.traversal.Traversal)} - * with an anonymous traversal to get inbound 'created' edges and folds them into a list. - * - *

    Executes a vertex traversal that binds label 'a' and 'b' to vertex properties and label 'c' - * to vertices that have edges from that vertex. - * - * @test_category dse:graph - */ - @Test - public void should_handle_result_object_of_mixed_types() { - // find all software vertices and select name, language, and find all vertices that created such - // software. - GraphResultSet rs = - session() - .execute( - newInstance( - graphTraversalSource() - .V() - .hasLabel("software") - .as("a", "b", "c") - .select("a", "b", "c") - .by("name") - .by("lang") - .by(__.in("created").fold()))); - - List results = rs.all(); - assertThat(results.size()).isEqualTo(2); - - // Ensure that we got 'lop' and 'ripple' for property a. - assertThat(results) - .extracting(m -> m.getByKey("a").as(Object.class)) - .containsOnly("lop", "ripple"); - - for (GraphNode result : results) { - // The row should represent a map with a, b, and c keys. - assertThat(ImmutableList.copyOf(result.keys())).containsOnlyOnce("a", "b", "c"); - // 'e' should not exist, thus it should be null. - assertThat(result.getByKey("e")).isNull(); - // both software are written in java. - assertThat(result.getByKey("b").isNull()).isFalse(); - assertThat(result.getByKey("b").asString()).isEqualTo("java"); - GraphNode c = result.getByKey("c"); - assertThat(c.isList()).isTrue(); - if (result.getByKey("a").asString().equals("lop")) { - if (isGraphBinary()) { - // should contain three vertices - Assertions.assertThat(c.size()).isEqualTo(3); - } else { - // 'c' should contain marko, josh, peter. - // Ensure we have three vertices. - assertThat(c.size()).isEqualTo(3); - List vertices = - Lists.newArrayList( - c.getByIndex(0).asVertex(), - c.getByIndex(1).asVertex(), - c.getByIndex(2).asVertex()); - assertThat(vertices) - .extracting(vertex -> vertex.property("name").value()) - .containsOnly("marko", "josh", "peter"); - } - } else { - if (isGraphBinary()) { - // has only one label - Assertions.assertThat(c.size()).isEqualTo(1); - } else { - // ripple, 'c' should contain josh. - // Ensure we have 1 vertex. - assertThat(c.size()).isEqualTo(1); - Vertex vertex = c.getByIndex(0).asVertex(); - assertThat(vertex).hasProperty("name", "josh"); - } - } - } - } - - /** - * Ensures a traversal that yields no results is properly retrieved and is empty. - * - * @test_category dse:graph - */ - @Test - public void should_return_zero_results() { - if (isGraphBinary()) { - assertThatThrownBy( - () -> - session().execute(newInstance(graphTraversalSource().V().hasLabel("notALabel")))) - .isInstanceOf(InvalidQueryException.class) - .hasMessageContaining("Unknown vertex label 'notALabel'"); - } else { - GraphResultSet rs = - session().execute(newInstance(graphTraversalSource().V().hasLabel("notALabel"))); - assertThat(rs.all().size()).isZero(); - } - } - - /** - * Ensures a traversal that yields no results is properly retrieved and is empty, using GraphSON2 - * and the TinkerPop transform results function. - * - * @test_category dse:graph - */ - @Test - public void should_return_zero_results_graphson_2() { - Assumptions.assumeThat(isGraphBinary()).isFalse(); - - GraphStatement simpleGraphStatement = - ScriptGraphStatement.newInstance("g.V().hasLabel('notALabel')"); - - GraphResultSet rs = session().execute(simpleGraphStatement); - assertThat(rs.one()).isNull(); - } - - /** - * Validates that a traversal using lambda operations with anonymous traversals are applied - * appropriately and return the expected results. - * - *

    Traversal that filters 'person'-labeled vertices by name 'marko' and flatMaps outgoing - * vertices on the 'knows' relationship by their outgoing 'created' vertices and then maps by - * their 'name' property and folds them into one list. - * - *

    Note: This does not validate lambdas with functions as those can't be interpreted and - * sent remotely. - * - * @test_category dse:graph - */ - @Test - public void should_handle_lambdas() { - // Find all people marko knows and the software they created. - GraphResultSet result = - session() - .execute( - newInstance( - graphTraversalSource() - .V() - .hasLabel("person") - .filter(__.has("name", "marko")) - .out("knows") - .flatMap(__.out("created")) - .map(__.values("name")) - .fold())); - - // Marko only knows josh and vadas, of which josh created lop and ripple. - List software = result.one().as(GenericType.listOf(String.class)); - assertThat(software).containsOnly("lop", "ripple"); - } - - /** - * Validates that when traversing a path and labeling some of the elements during the traversal - * that the output elements are properly labeled. - * - * @test_category dse:graph - */ - @Test - public void should_resolve_path_with_some_labels() { - GraphResultSet rs = - session() - .execute( - newInstance( - graphTraversalSource() - .V() - .hasLabel("person") - .has("name", "marko") - .as("a") - .outE("knows") - .inV() - .as("c", "d") - .outE("created") - .as("e", "f", "g") - .inV() - .path())); - - List results = rs.all(); - assertThat(results.size()).isEqualTo(2); - for (GraphNode result : results) { - Path path = result.asPath(); - validatePathObjects(path); - assertThat(path.labels()).hasSize(5); - assertThat(path) - .hasLabel(0, "a") - .hasNoLabel(1) - .hasLabel(2, "c", "d") - .hasLabel(3, "e", "f", "g") - .hasNoLabel(4); - } - } - - /** - * Validates that when traversing a path and labeling all of the elements during the traversal - * that the output elements are properly labeled. - * - * @test_category dse:graph - */ - @Test - public void should_resolve_path_with_labels() { - GraphResultSet rs = - session() - .execute( - newInstance( - graphTraversalSource() - .V() - .hasLabel("person") - .has("name", "marko") - .as("a") - .outE("knows") - .as("b") - .inV() - .as("c", "d") - .outE("created") - .as("e", "f", "g") - .inV() - .as("h") - .path())); - List results = rs.all(); - assertThat(results.size()).isEqualTo(2); - for (GraphNode result : results) { - Path path = result.asPath(); - validatePathObjects(path); - assertThat(path.labels()).hasSize(5); - assertThat(path) - .hasLabel(0, "a") - .hasLabel(1, "b") - .hasLabel(2, "c", "d") - .hasLabel(3, "e", "f", "g") - .hasLabel(4, "h"); - } - } - - /** - * Validates that when traversing a path and labeling none of the elements during the traversal - * that all the labels are empty in the result. - * - * @test_category dse:graph - */ - @Test - public void should_resolve_path_without_labels() { - GraphResultSet rs = - session() - .execute( - newInstance( - graphTraversalSource() - .V() - .hasLabel("person") - .has("name", "marko") - .outE("knows") - .inV() - .outE("created") - .inV() - .path())); - List results = rs.all(); - assertThat(results.size()).isEqualTo(2); - for (GraphNode result : results) { - Path path = result.asPath(); - validatePathObjects(path); - assertThat(path.labels()).hasSize(5); - for (int i = 0; i < 5; i++) assertThat(path).hasNoLabel(i); - } - } - - /** - * Validates that a traversal returning a Tree structure is returned appropriately with the - * expected contents. - * - *

    Retrieves trees of people marko knows and the software they created. - * - * @test_category dse:graph - */ - @Test - public void should_parse_tree() { - // Get a tree structure showing the paths from mark to people he knows to software they've - // created. - GraphResultSet rs = - session() - .execute( - newInstance( - graphTraversalSource() - .V() - .hasLabel("person") - .out("knows") - .out("created") - .tree() - .by("name"))); - - List results = rs.all(); - assertThat(results.size()).isEqualTo(1); - - // [{key=marko, value=[{key=josh, value=[{key=ripple, value=[]}, {key=lop, value=[]}]}]}] - GraphNode result = results.get(0); - - @SuppressWarnings("unchecked") - Tree tree = result.as(Tree.class); - - assertThat(tree).tree("marko").tree("josh").tree("lop").isLeaf(); - - assertThat(tree).tree("marko").tree("josh").tree("ripple").isLeaf(); - } - - /** - * Ensures that a traversal that returns a sub graph can be retrieved. - * - *

    The subgraph is all members in a knows relationship, thus is all people who marko knows and - * the edges that connect them. - */ - @Test - public void should_handle_subgraph_graphson() { - Assumptions.assumeThat(isGraphBinary()).isFalse(); - GraphResultSet rs = - session() - .execute( - newInstance( - graphTraversalSource() - .E() - .hasLabel("knows") - .subgraph("subGraph") - .cap("subGraph"))); - - List results = rs.all(); - assertThat(results.size()).isEqualTo(1); - - Graph graph = results.get(0).as(Graph.class); - - assertThat(graph.edges()).toIterable().hasSize(2); - assertThat(graph.vertices()).toIterable().hasSize(3); - } - - /** - * Ensures that a traversal that returns a sub graph can be retrieved. - * - *

    The subgraph is all members in a knows relationship, thus is all people who marko knows and - * the edges that connect them. - */ - @Test - public void should_handle_subgraph_grap_binary() { - Assumptions.assumeThat(isGraphBinary()).isTrue(); - GraphResultSet rs = - session() - .execute( - newInstance( - graphTraversalSource() - .E() - .hasLabel("knows") - .subgraph("subGraph") - .cap("subGraph"))); - - List results = rs.all(); - assertThat(results.size()).isEqualTo(1); - - String graph = results.get(0).as(String.class); - - assertThat(graph).contains("vertices:3").contains("edges:2"); - } - - /** - * A simple smoke test to ensure that a user can supply a custom {@link GraphTraversalSource} for - * use with DSLs. - * - * @test_category dse:graph - */ - @Test - public void should_allow_use_of_dsl_graphson() throws Exception { - Assumptions.assumeThat(isGraphBinary()).isFalse(); - SocialTraversalSource gSocial = socialTraversalSource(); - - GraphStatement gs = newInstance(gSocial.persons("marko").knows("vadas")); - - GraphResultSet rs = session().execute(gs); - List results = rs.all(); - - assertThat(results.size()).isEqualTo(1); - assertThat(results.get(0).asVertex()) - .hasProperty("name", "marko") - .hasProperty("age", 29) - .hasLabel("person"); - } - - /** - * A simple smoke test to ensure that a user can supply a custom {@link GraphTraversalSource} for - * use with DSLs. - * - * @test_category dse:graph - */ - @Test - public void should_allow_use_of_dsl_graph_binary() throws Exception { - Assumptions.assumeThat(isGraphBinary()).isTrue(); - SocialTraversalSource gSocial = socialTraversalSource(); - - GraphStatement gs = - newInstance(gSocial.persons("marko").knows("vadas").elementMap("name", "age")); - - GraphResultSet rs = session().execute(gs); - List results = rs.all(); - - assertThat(results.size()).isEqualTo(1); - assertThatContainsProperties(results.get(0).asMap(), "name", "marko", "age", 29); - Assertions.assertThat(results.get(0).asMap().values()).contains("person"); - } - - /** - * Ensures that traversals with barriers (which return results bulked) contain the correct amount - * of end results. - * - *

    This will fail if ran against DSE < 5.0.9 or DSE < 5.1.2. - */ - @Test - public void should_return_correct_results_when_bulked() { - Assumptions.assumeThat( - CcmBridge.isDistributionOf( - BackendType.DSE, (dist, cass) -> dist.compareTo(Version.parse("5.1.2")) > 0)) - .isTrue(); - - GraphResultSet rs = - session().execute(newInstance(graphTraversalSource().E().label().barrier())); - - List results = - rs.all().stream().map(GraphNode::asString).sorted().collect(Collectors.toList()); - - assertThat(results) - .hasSize(6) - .containsSequence("created", "created", "created", "created") - .containsSequence("knows", "knows"); - } - - @Test - public void should_handle_asynchronous_execution_graphson() { - Assumptions.assumeThat(isGraphBinary()).isFalse(); - StringBuilder names = new StringBuilder(); - - CompletionStage future = - session() - .executeAsync( - FluentGraphStatement.newInstance(graphTraversalSource().V().hasLabel("person"))); - - try { - // dumb processing to make sure the completable future works correctly and correct results are - // returned - Iterable results = - future.thenApply(AsyncGraphResultSet::currentPage).toCompletableFuture().get(); - for (GraphNode gn : results) { - names.append(gn.asVertex().property("name").value()); - } - } catch (InterruptedException | ExecutionException e) { - fail("Shouldn't have thrown an exception waiting for the result to complete"); - } - - assertThat(names.toString()).contains("peter", "marko", "vadas", "josh"); - } - - @Test - public void should_handle_asynchronous_execution_graph_binary() { - Assumptions.assumeThat(isGraphBinary()).isTrue(); - StringBuilder names = new StringBuilder(); - - CompletionStage future = - session() - .executeAsync( - FluentGraphStatement.newInstance(graphTraversalSource().V().hasLabel("person"))); - - try { - // dumb processing to make sure the completable future works correctly and correct results are - // returned - Iterable results = - future.thenApply(AsyncGraphResultSet::currentPage).toCompletableFuture().get(); - for (GraphNode gn : results) { - names.append(gn.asVertex().id()); - } - } catch (InterruptedException | ExecutionException e) { - fail("Shouldn't have thrown an exception waiting for the result to complete"); - } - - assertThat(names.toString()).contains("peter", "marko", "vadas", "josh"); - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/GraphTraversalMetaPropertiesIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/GraphTraversalMetaPropertiesIT.java deleted file mode 100644 index d8058cbf59e..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/GraphTraversalMetaPropertiesIT.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph.statement; - -// INFO: meta props are going away in NGDG - -import static com.datastax.dse.driver.api.core.graph.DseGraph.g; -import static com.datastax.dse.driver.api.core.graph.FluentGraphStatement.newInstance; -import static com.datastax.dse.driver.api.core.graph.SampleGraphScripts.ALLOW_SCANS; -import static com.datastax.dse.driver.api.core.graph.SampleGraphScripts.MAKE_STRICT; -import static com.datastax.dse.driver.api.core.graph.TinkerGraphAssertions.assertThat; - -import com.datastax.dse.driver.api.core.graph.GraphResultSet; -import com.datastax.dse.driver.api.core.graph.GraphTestSupport; -import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import org.apache.tinkerpop.gremlin.structure.Vertex; -import org.apache.tinkerpop.gremlin.structure.VertexProperty; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -// INFO: meta props are going away in NGDG - -@BackendRequirement( - type = BackendType.DSE, - minInclusive = "5.0.3", - description = "DSE 5.0.3 required for remote TinkerPop support") -public class GraphTraversalMetaPropertiesIT { - - private static final CustomCcmRule CCM_RULE = GraphTestSupport.GRAPH_CCM_RULE_BUILDER.build(); - - private static final SessionRule SESSION_RULE = - GraphTestSupport.getClassicGraphSessionBuilder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - /** Builds a simple schema that provides for a vertex with a property with sub properties. */ - private static final String META_PROPS = - MAKE_STRICT - + ALLOW_SCANS - + "schema.propertyKey('sub_prop').Text().create()\n" - + "schema.propertyKey('sub_prop2').Text().create()\n" - + "schema.propertyKey('meta_prop').Text().properties('sub_prop', 'sub_prop2').create()\n" - + "schema.vertexLabel('meta_v').properties('meta_prop').create()"; - - /** - * Ensures that a traversal that yields a vertex with a property that has its own properties that - * is appropriately parsed and made accessible via {@link VertexProperty#property(String)}. - * - * @test_category dse:graph - */ - @Test - public void should_parse_meta_properties() { - SESSION_RULE.session().execute(ScriptGraphStatement.newInstance(META_PROPS)); - - GraphResultSet result = - SESSION_RULE - .session() - .execute( - newInstance( - g.addV("meta_v") - .property("meta_prop", "hello", "sub_prop", "hi", "sub_prop2", "hi2"))); - - Vertex v = result.one().asVertex(); - assertThat(v).hasProperty("meta_prop"); - - VertexProperty metaProp = v.property("meta_prop"); - assertThat(metaProp) - .hasValue("hello") - .hasProperty("sub_prop", "hi") - .hasProperty("sub_prop2", "hi2"); - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/GraphTraversalMultiPropertiesIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/GraphTraversalMultiPropertiesIT.java deleted file mode 100644 index c30e770f40e..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/GraphTraversalMultiPropertiesIT.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.graph.statement; - -import static com.datastax.dse.driver.api.core.graph.DseGraph.g; -import static com.datastax.dse.driver.api.core.graph.FluentGraphStatement.newInstance; -import static com.datastax.dse.driver.api.core.graph.SampleGraphScripts.ALLOW_SCANS; -import static com.datastax.dse.driver.api.core.graph.SampleGraphScripts.MAKE_STRICT; -import static com.datastax.dse.driver.api.core.graph.TinkerGraphAssertions.assertThat; - -import com.datastax.dse.driver.api.core.graph.GraphResultSet; -import com.datastax.dse.driver.api.core.graph.GraphTestSupport; -import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import java.util.Iterator; -import org.apache.tinkerpop.gremlin.structure.Vertex; -import org.apache.tinkerpop.gremlin.structure.VertexProperty; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -// INFO: multi props are not supported in Core -@BackendRequirement( - type = BackendType.DSE, - minInclusive = "5.0.3", - description = "DSE 5.0.3 required for remote TinkerPop support") -public class GraphTraversalMultiPropertiesIT { - - private static final CustomCcmRule CCM_RULE = GraphTestSupport.GRAPH_CCM_RULE_BUILDER.build(); - - private static final SessionRule SESSION_RULE = - GraphTestSupport.getClassicGraphSessionBuilder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - /** Builds a simple schema that provides for a vertex with a multi-cardinality property. */ - private static final String MULTI_PROPS = - MAKE_STRICT - + ALLOW_SCANS - + "schema.propertyKey('multi_prop').Text().multiple().create()\n" - + "schema.vertexLabel('multi_v').properties('multi_prop').create()\n"; - - /** - * Ensures that a traversal that yields a vertex with a property name that is present multiple - * times that the properties are parsed and made accessible via {@link - * Vertex#properties(String...)}. - * - * @test_category dse:graph - */ - @Test - public void should_parse_multiple_cardinality_properties() { - // given a schema that defines multiple cardinality properties. - SESSION_RULE.session().execute(ScriptGraphStatement.newInstance(MULTI_PROPS)); - - // when adding a vertex with a multiple cardinality property - GraphResultSet result = - SESSION_RULE - .session() - .execute( - newInstance( - g.addV("multi_v") - .property("multi_prop", "Hello") - .property("multi_prop", "Sweet") - .property("multi_prop", "World"))); - - Vertex v = result.one().asVertex(); - assertThat(v).hasProperty("multi_prop"); - - Iterator> multiProp = v.properties("multi_prop"); - assertThat(multiProp) - .toIterable() - .extractingResultOf("value") - .containsExactly("Hello", "Sweet", "World"); - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/insights/InsightsClientIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/insights/InsightsClientIT.java deleted file mode 100644 index 0296908be44..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/insights/InsightsClientIT.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.insights; - -import com.datastax.dse.driver.internal.core.insights.InsightsClient; -import com.datastax.dse.driver.internal.core.insights.configuration.InsightsConfiguration; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import io.netty.util.concurrent.DefaultEventExecutor; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@BackendRequirement( - type = BackendType.DSE, - minInclusive = "6.7.0", - description = "DSE 6.7.0 required for Insights support") -public class InsightsClientIT { - private static final StackTraceElement[] EMPTY_STACK_TRACE = {}; - - private static CustomCcmRule ccmRule = - CustomCcmRule.builder() - .withNodes(1) - .withJvmArgs( - "-Dinsights.service_options_enabled=true", - "-Dinsights.default_mode=ENABLED_WITH_LOCAL_STORAGE") - .build(); - - private static SessionRule sessionRule = SessionRule.builder(ccmRule).build(); - - @ClassRule public static TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); - - @Test - public void should_send_insights_startup_event_using_client() - throws ExecutionException, InterruptedException, TimeoutException { - // given - InsightsClient insightsClient = - InsightsClient.createInsightsClient( - new InsightsConfiguration(true, 300000L, new DefaultEventExecutor()), - (InternalDriverContext) sessionRule.session().getContext(), - EMPTY_STACK_TRACE); - - // when - insightsClient.sendStartupMessage().toCompletableFuture().get(1000, TimeUnit.SECONDS); - - // then no exception - } - - @Test - public void should_send_insights_status_event_using_client() - throws ExecutionException, InterruptedException, TimeoutException { - // given - InsightsClient insightsClient = - InsightsClient.createInsightsClient( - new InsightsConfiguration(true, 300000L, new DefaultEventExecutor()), - (InternalDriverContext) sessionRule.session().getContext(), - EMPTY_STACK_TRACE); - - // when - insightsClient.sendStatusMessage().toCompletableFuture().get(1000, TimeUnit.SECONDS); - - // then no exception - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/metadata/schema/AbstractMetadataIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/metadata/schema/AbstractMetadataIT.java deleted file mode 100644 index ea28dc6449e..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/metadata/schema/AbstractMetadataIT.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.metadata.schema; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import java.util.Optional; -import org.junit.experimental.categories.Category; - -/* Abstract class to hold common methods for Metadata Schema tests. */ -@Category(ParallelizableTests.class) -public abstract class AbstractMetadataIT { - - /* Convenience method for executing a CQL statement using the test's Session Rule. */ - public void execute(String cql) { - getSessionRule() - .session() - .execute( - SimpleStatement.builder(cql) - .setExecutionProfile(getSessionRule().slowProfile()) - .build()); - } - - /** - * Convenience method for retrieving the Keyspace metadata from this test's Session Rule. Also - * asserts the Keyspace exists and has the expected name. - */ - public DseKeyspaceMetadata getKeyspace() { - Optional keyspace = - getSessionRule().session().getMetadata().getKeyspace(getSessionRule().keyspace()); - assertThat(keyspace) - .isPresent() - .hasValueSatisfying( - ks -> { - assertThat(ks).isInstanceOf(DseKeyspaceMetadata.class); - assertThat(ks.getName()).isEqualTo(getSessionRule().keyspace()); - }); - return ((DseKeyspaceMetadata) keyspace.get()); - } - - /* Concrete ITs should return their ClassRule SessionRule. */ - protected abstract SessionRule getSessionRule(); -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/metadata/schema/DseAggregateMetadataIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/metadata/schema/DseAggregateMetadataIT.java deleted file mode 100644 index 4c899fa5e63..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/metadata/schema/DseAggregateMetadataIT.java +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.metadata.schema; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assumptions.assumeThat; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.metadata.schema.AggregateMetadata; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import java.util.Objects; -import java.util.Optional; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@BackendRequirement( - type = BackendType.DSE, - minInclusive = "5.0", - description = "DSE 5.0+ required function/aggregate support") -public class DseAggregateMetadataIT extends AbstractMetadataIT { - - private static final CcmRule CCM_RULE = CcmRule.getInstance(); - - private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - private static final Version DSE_6_0_0 = Objects.requireNonNull(Version.parse("6.0.0")); - - @Override - protected SessionRule getSessionRule() { - return DseAggregateMetadataIT.SESSION_RULE; - } - - @Test - public void should_parse_aggregate_without_deterministic() { - String cqlFunction = - "CREATE FUNCTION nondetf(i int) RETURNS NULL ON NULL INPUT RETURNS int LANGUAGE java AS 'return new java.util.Random().nextInt(i);';"; - String cqlAggregate = "CREATE AGGREGATE nondeta() SFUNC nondetf STYPE int INITCOND 0;"; - execute(cqlFunction); - execute(cqlAggregate); - DseKeyspaceMetadata keyspace = getKeyspace(); - Optional aggregateOpt = keyspace.getAggregate("nondeta"); - assertThat(aggregateOpt.map(DseAggregateMetadata.class::cast)) - .hasValueSatisfying( - aggregate -> { - if (isDse6OrHigher()) { - assertThat(aggregate.getDeterministic()).contains(false); - } else { - assertThat(aggregate.getDeterministic()).isEmpty(); - } - assertThat(aggregate.getStateType()).isEqualTo(DataTypes.INT); - assertThat(aggregate.describe(false)) - .isEqualTo( - String.format( - "CREATE AGGREGATE \"%s\".\"nondeta\"() SFUNC \"nondetf\" STYPE int INITCOND 0;", - keyspace.getName().asInternal())); - }); - } - - @Test - public void should_parse_aggregate_with_deterministic() { - assumeThat(isDse6OrHigher()).describedAs("DSE 6.0+ required for DETERMINISTIC").isTrue(); - String cqlFunction = - "CREATE FUNCTION detf(i int, y int) RETURNS NULL ON NULL INPUT RETURNS int LANGUAGE java AS 'return i+y;';"; - String cqlAggregate = - "CREATE AGGREGATE deta(int) SFUNC detf STYPE int INITCOND 0 DETERMINISTIC;"; - execute(cqlFunction); - execute(cqlAggregate); - DseKeyspaceMetadata keyspace = getKeyspace(); - Optional aggregateOpt = keyspace.getAggregate("deta", DataTypes.INT); - assertThat(aggregateOpt.map(DseAggregateMetadata.class::cast)) - .hasValueSatisfying( - aggregate -> { - assertThat(aggregate.getDeterministic()).contains(true); - assertThat(aggregate.getStateType()).isEqualTo(DataTypes.INT); - assertThat(aggregate.describe(false)) - .isEqualTo( - String.format( - "CREATE AGGREGATE \"%s\".\"deta\"(int) SFUNC \"detf\" STYPE int INITCOND 0 DETERMINISTIC;", - keyspace.getName().asInternal())); - }); - } - - private static boolean isDse6OrHigher() { - assumeThat(CCM_RULE.isDistributionOf(BackendType.DSE)) - .describedAs("DSE required for DseFunctionMetadata tests") - .isTrue(); - return CCM_RULE.getDistributionVersion().compareTo(DSE_6_0_0) >= 0; - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/metadata/schema/DseFunctionMetadataIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/metadata/schema/DseFunctionMetadataIT.java deleted file mode 100644 index 53559a66b1b..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/metadata/schema/DseFunctionMetadataIT.java +++ /dev/null @@ -1,241 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.metadata.schema; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assumptions.assumeThat; - -import com.datastax.dse.driver.api.core.metadata.schema.DseFunctionMetadata.Monotonicity; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.metadata.schema.FunctionMetadata; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import java.util.Objects; -import java.util.Optional; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@BackendRequirement( - type = BackendType.DSE, - minInclusive = "5.0", - description = "DSE 5.0+ required function/aggregate support") -public class DseFunctionMetadataIT extends AbstractMetadataIT { - - private static final CcmRule CCM_RULE = CcmRule.getInstance(); - - private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - private static final Version DSE_6_0_0 = Objects.requireNonNull(Version.parse("6.0.0")); - - @Override - public SessionRule getSessionRule() { - return DseFunctionMetadataIT.SESSION_RULE; - } - - @Test - public void should_parse_function_without_deterministic_or_monotonic() { - String cqlFunction = - "CREATE FUNCTION nondetf(i int) RETURNS NULL ON NULL INPUT RETURNS int LANGUAGE java AS 'return new java.util.Random().nextInt(i);';"; - execute(cqlFunction); - DseKeyspaceMetadata keyspace = getKeyspace(); - Optional functionOpt = keyspace.getFunction("nondetf", DataTypes.INT); - assertThat(functionOpt.map(DseFunctionMetadata.class::cast)) - .hasValueSatisfying( - function -> { - if (isDse6OrHigher()) { - assertThat(function.getDeterministic()).contains(false); - assertThat(function.getMonotonicity()).contains(Monotonicity.NOT_MONOTONIC); - } else { - assertThat(function.getDeterministic()).isEmpty(); - assertThat(function.getMonotonicity()).isEmpty(); - } - assertThat(function.getMonotonicArgumentNames()).isEmpty(); - assertThat(function.getLanguage()).isEqualTo("java"); - assertThat(function.getReturnType()).isEqualTo(DataTypes.INT); - assertThat(function.getBody()).isEqualTo("return new java.util.Random().nextInt(i);"); - assertThat(function.describe(false)) - .isEqualTo( - String.format( - "CREATE FUNCTION \"%s\".\"nondetf\"(\"i\" int) RETURNS NULL ON NULL INPUT RETURNS int LANGUAGE java AS 'return new java.util.Random().nextInt(i);';", - keyspace.getName().asInternal())); - }); - } - - @Test - public void should_parse_function_with_deterministic() { - assumeThat(isDse6OrHigher()) - .describedAs("DSE 6.0+ required for DETERMINISTIC / MONOTONIC") - .isTrue(); - String cqlFunction = - "CREATE FUNCTION detf(i int, y int) RETURNS NULL ON NULL INPUT RETURNS int DETERMINISTIC LANGUAGE java AS 'return i+y;';"; - execute(cqlFunction); - DseKeyspaceMetadata keyspace = getKeyspace(); - Optional functionOpt = - keyspace.getFunction("detf", DataTypes.INT, DataTypes.INT); - assertThat(functionOpt.map(DseFunctionMetadata.class::cast)) - .hasValueSatisfying( - function -> { - assertThat(function.getDeterministic()).contains(true); - assertThat(function.getMonotonicity()).contains(Monotonicity.NOT_MONOTONIC); - assertThat(function.getMonotonicArgumentNames()).isEmpty(); - assertThat(function.getLanguage()).isEqualTo("java"); - assertThat(function.getReturnType()).isEqualTo(DataTypes.INT); - assertThat(function.getBody()).isEqualTo("return i+y;"); - assertThat(function.describe(false)) - .isEqualTo( - String.format( - "CREATE FUNCTION \"%s\".\"detf\"(\"i\" int,\"y\" int) RETURNS NULL ON NULL INPUT RETURNS int DETERMINISTIC LANGUAGE java AS 'return i+y;';", - keyspace.getName().asInternal())); - }); - } - - @Test - public void should_parse_function_with_monotonic() { - assumeThat(isDse6OrHigher()) - .describedAs("DSE 6.0+ required for DETERMINISTIC / MONOTONIC") - .isTrue(); - String cqlFunction = - "CREATE FUNCTION monotonic(dividend int, divisor int) CALLED ON NULL INPUT RETURNS int MONOTONIC LANGUAGE java AS 'return dividend / divisor;';"; - execute(cqlFunction); - DseKeyspaceMetadata keyspace = getKeyspace(); - Optional functionOpt = - keyspace.getFunction("monotonic", DataTypes.INT, DataTypes.INT); - assertThat(functionOpt.map(DseFunctionMetadata.class::cast)) - .hasValueSatisfying( - function -> { - assertThat(function.getDeterministic()).contains(false); - assertThat(function.getMonotonicity()).contains(Monotonicity.FULLY_MONOTONIC); - assertThat(function.getMonotonicArgumentNames()) - .containsExactly( - CqlIdentifier.fromCql("dividend"), CqlIdentifier.fromCql("divisor")); - assertThat(function.getLanguage()).isEqualTo("java"); - assertThat(function.getReturnType()).isEqualTo(DataTypes.INT); - assertThat(function.getBody()).isEqualTo("return dividend / divisor;"); - assertThat(function.describe(false)) - .isEqualTo( - String.format( - "CREATE FUNCTION \"%s\".\"monotonic\"(\"dividend\" int,\"divisor\" int) CALLED ON NULL INPUT RETURNS int MONOTONIC LANGUAGE java AS 'return dividend / divisor;';", - keyspace.getName().asInternal())); - }); - } - - @Test - public void should_parse_function_with_monotonic_on() { - assumeThat(isDse6OrHigher()) - .describedAs("DSE 6.0+ required for DETERMINISTIC / MONOTONIC") - .isTrue(); - String cqlFunction = - "CREATE FUNCTION monotonic_on(dividend int, divisor int) CALLED ON NULL INPUT RETURNS int MONOTONIC ON \"dividend\" LANGUAGE java AS 'return dividend / divisor;';"; - execute(cqlFunction); - DseKeyspaceMetadata keyspace = getKeyspace(); - Optional functionOpt = - keyspace.getFunction("monotonic_on", DataTypes.INT, DataTypes.INT); - assertThat(functionOpt.map(DseFunctionMetadata.class::cast)) - .hasValueSatisfying( - function -> { - assertThat(function.getDeterministic()).contains(false); - assertThat(function.getMonotonicity()).contains(Monotonicity.PARTIALLY_MONOTONIC); - assertThat(function.getMonotonicArgumentNames()) - .containsExactly(CqlIdentifier.fromCql("dividend")); - assertThat(function.getLanguage()).isEqualTo("java"); - assertThat(function.getReturnType()).isEqualTo(DataTypes.INT); - assertThat(function.getBody()).isEqualTo("return dividend / divisor;"); - assertThat(function.describe(false)) - .isEqualTo( - String.format( - "CREATE FUNCTION \"%s\".\"monotonic_on\"(\"dividend\" int,\"divisor\" int) CALLED ON NULL INPUT RETURNS int MONOTONIC ON \"dividend\" LANGUAGE java AS 'return dividend / divisor;';", - keyspace.getName().asInternal())); - }); - } - - @Test - public void should_parse_function_with_deterministic_and_monotonic() { - assumeThat(isDse6OrHigher()) - .describedAs("DSE 6.0+ required for DETERMINISTIC / MONOTONIC") - .isTrue(); - String cqlFunction = - "CREATE FUNCTION det_and_monotonic(dividend int, divisor int) CALLED ON NULL INPUT RETURNS int DETERMINISTIC MONOTONIC LANGUAGE java AS 'return dividend / divisor;';"; - execute(cqlFunction); - DseKeyspaceMetadata keyspace = getKeyspace(); - Optional functionOpt = - keyspace.getFunction("det_and_monotonic", DataTypes.INT, DataTypes.INT); - assertThat(functionOpt.map(DseFunctionMetadata.class::cast)) - .hasValueSatisfying( - function -> { - assertThat(function.getDeterministic()).contains(true); - assertThat(function.getMonotonicity()).contains(Monotonicity.FULLY_MONOTONIC); - assertThat(function.getMonotonicArgumentNames()) - .containsExactly( - CqlIdentifier.fromCql("dividend"), CqlIdentifier.fromCql("divisor")); - assertThat(function.getLanguage()).isEqualTo("java"); - assertThat(function.getReturnType()).isEqualTo(DataTypes.INT); - assertThat(function.getBody()).isEqualTo("return dividend / divisor;"); - assertThat(function.describe(false)) - .isEqualTo( - String.format( - "CREATE FUNCTION \"%s\".\"det_and_monotonic\"(\"dividend\" int,\"divisor\" int) CALLED ON NULL INPUT RETURNS int DETERMINISTIC MONOTONIC LANGUAGE java AS 'return dividend / divisor;';", - keyspace.getName().asInternal())); - }); - } - - @Test - public void should_parse_function_with_deterministic_and_monotonic_on() { - assumeThat(isDse6OrHigher()) - .describedAs("DSE 6.0+ required for DETERMINISTIC / MONOTONIC") - .isTrue(); - String cqlFunction = - "CREATE FUNCTION det_and_monotonic_on(dividend int, divisor int) CALLED ON NULL INPUT RETURNS int DETERMINISTIC MONOTONIC ON \"dividend\" LANGUAGE java AS 'return dividend / divisor;';"; - execute(cqlFunction); - DseKeyspaceMetadata keyspace = getKeyspace(); - Optional functionOpt = - keyspace.getFunction("det_and_monotonic_on", DataTypes.INT, DataTypes.INT); - assertThat(functionOpt.map(DseFunctionMetadata.class::cast)) - .hasValueSatisfying( - function -> { - assertThat(function.getDeterministic()).contains(true); - assertThat(function.getMonotonicity()).contains(Monotonicity.PARTIALLY_MONOTONIC); - assertThat(function.getMonotonicArgumentNames()) - .containsExactly(CqlIdentifier.fromCql("dividend")); - assertThat(function.getLanguage()).isEqualTo("java"); - assertThat(function.getReturnType()).isEqualTo(DataTypes.INT); - assertThat(function.getBody()).isEqualTo("return dividend / divisor;"); - assertThat(function.describe(false)) - .isEqualTo( - String.format( - "CREATE FUNCTION \"%s\".\"det_and_monotonic_on\"(\"dividend\" int,\"divisor\" int) CALLED ON NULL INPUT RETURNS int DETERMINISTIC MONOTONIC ON \"dividend\" LANGUAGE java AS 'return dividend / divisor;';", - keyspace.getName().asInternal())); - }); - } - - private static boolean isDse6OrHigher() { - assumeThat(CCM_RULE.isDistributionOf(BackendType.DSE)) - .describedAs("DSE required for DseFunctionMetadata tests") - .isTrue(); - return CCM_RULE.getDistributionVersion().compareTo(DSE_6_0_0) >= 0; - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/metadata/schema/KeyspaceGraphMetadataIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/metadata/schema/KeyspaceGraphMetadataIT.java deleted file mode 100644 index dc96b265140..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/metadata/schema/KeyspaceGraphMetadataIT.java +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.metadata.schema; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.metadata.Metadata; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.CqlSessionRuleBuilder; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -@BackendRequirement(type = BackendType.DSE, minInclusive = "6.8") -public class KeyspaceGraphMetadataIT { - - private static final CcmRule CCM_RULE = CcmRule.getInstance(); - - private static final SessionRule SESSION_RULE = - new CqlSessionRuleBuilder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - @Test - public void should_expose_graph_engine_if_set() { - CqlSession session = SESSION_RULE.session(); - session.execute( - "CREATE KEYSPACE keyspace_metadata_it_graph_engine " - + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1} " - + "AND graph_engine = 'Core'"); - Metadata metadata = session.getMetadata(); - assertThat(metadata.getKeyspace("keyspace_metadata_it_graph_engine")) - .hasValueSatisfying( - keyspaceMetadata -> - assertThat(((DseGraphKeyspaceMetadata) keyspaceMetadata).getGraphEngine()) - .hasValue("Core")); - } - - @Test - public void should_expose_graph_engine_if_keyspace_altered() { - CqlSession session = SESSION_RULE.session(); - session.execute( - "CREATE KEYSPACE keyspace_metadata_it_graph_engine_alter " - + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); - assertThat(session.getMetadata().getKeyspace("keyspace_metadata_it_graph_engine_alter")) - .hasValueSatisfying( - keyspaceMetadata -> - assertThat(((DseGraphKeyspaceMetadata) keyspaceMetadata).getGraphEngine()) - .isEmpty()); - - session.execute( - "ALTER KEYSPACE keyspace_metadata_it_graph_engine_alter WITH graph_engine = 'Core'"); - assertThat(session.getMetadata().getKeyspace("keyspace_metadata_it_graph_engine_alter")) - .hasValueSatisfying( - keyspaceMetadata -> - assertThat(((DseGraphKeyspaceMetadata) keyspaceMetadata).getGraphEngine()) - .hasValue("Core")); - } - - @Test - public void should_not_allow_classic_graph_engine_to_be_specified_on_keyspace() { - CqlSession session = SESSION_RULE.session(); - assertThatThrownBy( - () -> - session.execute( - "CREATE KEYSPACE keyspace_metadata_it_graph_engine_classic " - + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1} " - + "AND graph_engine = 'Classic'")) - .hasMessageContaining("Invalid/unknown graph engine name 'Classic'"); - } - - @Test - public void should_expose_core_graph_engine_if_set() { - CqlSession session = SESSION_RULE.session(); - session.execute( - "CREATE KEYSPACE keyspace_metadata_it_graph_engine_core " - + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1} " - + "AND graph_engine = 'Core'"); - Metadata metadata = session.getMetadata(); - assertThat(metadata.getKeyspace("keyspace_metadata_it_graph_engine_core")) - .hasValueSatisfying( - keyspaceMetadata -> - assertThat(((DseGraphKeyspaceMetadata) keyspaceMetadata).getGraphEngine()) - .hasValue("Core")); - } - - @Test - public void should_expose_empty_graph_engine_if_not_set() { - // The default keyspace created by CcmRule has no graph engine - Metadata metadata = SESSION_RULE.session().getMetadata(); - assertThat(metadata.getKeyspace(SESSION_RULE.keyspace())) - .hasValueSatisfying( - keyspaceMetadata -> - assertThat(((DseGraphKeyspaceMetadata) keyspaceMetadata).getGraphEngine()) - .isEmpty()); - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/metadata/schema/TableGraphMetadataCaseSensitiveIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/metadata/schema/TableGraphMetadataCaseSensitiveIT.java deleted file mode 100644 index 35242294302..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/metadata/schema/TableGraphMetadataCaseSensitiveIT.java +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.metadata.schema; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.metadata.Metadata; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.CqlSessionRuleBuilder; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -/** - * A regression test for a specific case of schema parsing for graphs built from tables containing - * case-sensitive column names in its tables. See JAVA-2492 for more information. - */ -@Category(ParallelizableTests.class) -@BackendRequirement(type = BackendType.DSE, minInclusive = "6.8") -public class TableGraphMetadataCaseSensitiveIT { - - private static final CcmRule CCM_RULE = CcmRule.getInstance(); - - private static final SessionRule SESSION_RULE = - new CqlSessionRuleBuilder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - @BeforeClass - public static void createTables() { - CqlSession session = SESSION_RULE.session(); - - session.execute( - "CREATE TABLE \"Person\" (\"Name\" varchar, \"Age\" int, PRIMARY KEY ((\"Name\"), \"Age\")) WITH VERTEX LABEL"); - session.execute( - "CREATE TABLE \"Software\" (\"Name\" varchar, \"Complexity\" int, PRIMARY KEY ((\"Name\"), \"Complexity\")) WITH VERTEX LABEL"); - session.execute( - "CREATE TABLE \"Created\"" - + " (\"PersonName\" varchar, \"SoftwareName\" varchar, \"PersonAge\" int, \"SoftwareComplexity\" int, weight int," - + " primary key ((\"PersonName\"), \"SoftwareName\", weight)) WITH EDGE LABEL\n" - + " FROM \"Person\"((\"PersonName\"),\"PersonAge\")" - + " TO \"Software\"((\"SoftwareName\"),\"SoftwareComplexity\");"); - } - - @Test - public void should_expose_case_sensitive_edge_metadata() { - CqlSession session = SESSION_RULE.session(); - Metadata metadata = session.getMetadata(); - assertThat(metadata.getKeyspace(SESSION_RULE.keyspace())) - .hasValueSatisfying( - keyspaceMetadata -> - assertThat(keyspaceMetadata.getTable(CqlIdentifier.fromInternal("Created"))) - .hasValueSatisfying( - created -> { - DseGraphTableMetadata dseCreated = (DseGraphTableMetadata) created; - assertThat(dseCreated.getEdge()) - .hasValueSatisfying( - edge -> { - assertThat(edge.getFromPartitionKeyColumns()) - .isEqualTo( - ImmutableList.of( - CqlIdentifier.fromInternal("PersonName"))); - assertThat(edge.getToPartitionKeyColumns()) - .isEqualTo( - ImmutableList.of( - CqlIdentifier.fromInternal("SoftwareName"))); - assertThat(edge.getFromClusteringColumns()) - .isEqualTo( - ImmutableList.of( - CqlIdentifier.fromInternal("PersonAge"))); - assertThat(edge.getToClusteringColumns()) - .isEqualTo( - ImmutableList.of( - CqlIdentifier.fromInternal("SoftwareComplexity"))); - }); - })); - } -} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/metadata/schema/TableGraphMetadataIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/metadata/schema/TableGraphMetadataIT.java deleted file mode 100644 index 51a2204800e..00000000000 --- a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/metadata/schema/TableGraphMetadataIT.java +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.core.metadata.schema; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.metadata.Metadata; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.CqlSessionRuleBuilder; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -@BackendRequirement(type = BackendType.DSE, minInclusive = "6.8") -public class TableGraphMetadataIT { - - private static final CcmRule CCM_RULE = CcmRule.getInstance(); - - private static final SessionRule SESSION_RULE = - new CqlSessionRuleBuilder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - @BeforeClass - public static void createTables() { - CqlSession session = SESSION_RULE.session(); - - session.execute("CREATE TABLE person (name text PRIMARY KEY) WITH VERTEX LABEL"); - session.execute( - "CREATE TABLE software (company text, name text, version int, " - + "PRIMARY KEY ((company, name), version)) " - + "WITH VERTEX LABEL soft"); - session.execute( - "CREATE TABLE contributors (contributor text, company_name text, software_name text, " - + "software_version int, " - + "PRIMARY KEY(contributor, company_name, software_name, software_version)) " - + "WITH EDGE LABEL contrib " - + "FROM person(contributor) " - + "TO soft((company_name, software_name), software_version)"); - } - - @Test - public void should_expose_vertex_and_edge_metadata() { - CqlSession session = SESSION_RULE.session(); - Metadata metadata = session.getMetadata(); - assertThat(metadata.getKeyspace(SESSION_RULE.keyspace())) - .hasValueSatisfying( - keyspaceMetadata -> { - assertThat(keyspaceMetadata.getTable("person")) - .hasValueSatisfying( - person -> { - DseGraphTableMetadata dsePerson = (DseGraphTableMetadata) person; - assertThat(dsePerson.getVertex()) - .hasValueSatisfying( - vertex -> - assertThat(vertex.getLabelName()) - .isEqualTo(CqlIdentifier.fromInternal("person"))); - assertThat(dsePerson.getEdge()).isEmpty(); - }); - - assertThat(keyspaceMetadata.getTable("software")) - .hasValueSatisfying( - software -> { - DseGraphTableMetadata dseSoftware = (DseGraphTableMetadata) software; - assertThat(dseSoftware.getVertex()) - .hasValueSatisfying( - vertex -> - assertThat(vertex.getLabelName()) - .isEqualTo(CqlIdentifier.fromInternal("soft"))); - assertThat(dseSoftware.getEdge()).isEmpty(); - }); - - assertThat(keyspaceMetadata.getTable("contributors")) - .hasValueSatisfying( - contributors -> { - DseGraphTableMetadata dseContributors = - (DseGraphTableMetadata) contributors; - assertThat(dseContributors.getVertex()).isEmpty(); - assertThat(dseContributors.getEdge()) - .hasValueSatisfying( - edge -> { - assertThat(edge.getLabelName()) - .isEqualTo(CqlIdentifier.fromInternal("contrib")); - - assertThat(edge.getFromTable().asInternal()).isEqualTo("person"); - assertThat(edge.getFromLabel()) - .isEqualTo(CqlIdentifier.fromInternal("person")); - assertThat(edge.getFromPartitionKeyColumns()) - .containsExactly(CqlIdentifier.fromInternal("contributor")); - assertThat(edge.getFromClusteringColumns()).isEmpty(); - - assertThat(edge.getToTable().asInternal()).isEqualTo("software"); - assertThat(edge.getToLabel()) - .isEqualTo(CqlIdentifier.fromInternal("soft")); - assertThat(edge.getToPartitionKeyColumns()) - .containsExactly( - CqlIdentifier.fromInternal("company_name"), - CqlIdentifier.fromInternal("software_name")); - assertThat(edge.getToClusteringColumns()) - .containsExactly( - CqlIdentifier.fromInternal("software_version")); - }); - }); - }); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/cloud/CloudIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/api/core/cloud/CloudIT.java deleted file mode 100644 index f7990d707e4..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/cloud/CloudIT.java +++ /dev/null @@ -1,500 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cloud; - -import static com.datastax.oss.driver.internal.core.util.LoggerTest.setupTestLogger; -import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; -import static com.github.tomakehurst.wiremock.client.WireMock.any; -import static com.github.tomakehurst.wiremock.client.WireMock.stubFor; -import static com.github.tomakehurst.wiremock.client.WireMock.urlEqualTo; -import static com.github.tomakehurst.wiremock.core.WireMockConfiguration.wireMockConfig; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.timeout; -import static org.mockito.Mockito.verify; - -import ch.qos.logback.classic.Level; -import ch.qos.logback.classic.spi.ILoggingEvent; -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.auth.AuthenticationException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.session.SessionBuilder; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.categories.IsolatedTests; -import com.datastax.oss.driver.internal.core.config.cloud.CloudConfigFactory; -import com.datastax.oss.driver.internal.core.ssl.DefaultSslEngineFactory; -import com.datastax.oss.driver.internal.core.util.LoggerTest; -import com.github.tomakehurst.wiremock.junit.WireMockRule; -import java.io.IOException; -import java.io.InputStream; -import java.net.InetSocketAddress; -import java.net.URL; -import java.nio.file.Files; -import java.nio.file.Path; -import java.security.NoSuchAlgorithmException; -import java.util.Collections; -import java.util.List; -import javax.net.ssl.SSLContext; -import org.junit.ClassRule; -import org.junit.Ignore; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -@Category(IsolatedTests.class) -@Ignore("Disabled because it is causing trouble in Jenkins CI") -public class CloudIT { - - private static final String BUNDLE_URL_PATH = "/certs/bundles/creds.zip"; - - @ClassRule public static SniProxyRule proxyRule = new SniProxyRule(); - - // Used only to host the secure connect bundle, for tests that require external URLs - @Rule - public WireMockRule wireMockRule = - new WireMockRule(wireMockConfig().dynamicPort().dynamicHttpsPort()); - - @Test - public void should_connect_to_proxy_using_path() { - ResultSet set; - Path bundle = proxyRule.getProxy().getDefaultBundlePath(); - try (CqlSession session = - CqlSession.builder() - .withAuthCredentials("cassandra", "cassandra") - .withCloudSecureConnectBundle(bundle) - .build()) { - set = session.execute("select * from system.local"); - } - assertThat(set).isNotNull(); - } - - @Test - public void should_connect_and_log_info_that_config_json_with_username_password_was_provided() { - ResultSet set; - Path bundle = proxyRule.getProxy().getDefaultBundlePath(); - LoggerTest.LoggerSetup logger = setupTestLogger(CloudConfigFactory.class, Level.INFO); - - try (CqlSession session = - CqlSession.builder() - .withAuthCredentials("cassandra", "cassandra") - .withCloudSecureConnectBundle(bundle) - .build()) { - set = session.execute("select * from system.local"); - verify(logger.appender, timeout(500).atLeast(1)) - .doAppend(logger.loggingEventCaptor.capture()); - assertThat( - logger.loggingEventCaptor.getAllValues().stream() - .map(ILoggingEvent::getFormattedMessage)) - .contains( - "The bundle contains config.json with username and/or password. Providing it in the bundle is deprecated and ignored."); - } - assertThat(set).isNotNull(); - } - - @Test - public void - should_fail_with_auth_error_when_connecting_using_bundle_with_username_password_in_config_json() { - Path bundle = proxyRule.getProxy().getDefaultBundlePath(); - - // fails with auth error because username/password from config.json is ignored - AllNodesFailedException exception = null; - try { - CqlSession.builder().withCloudSecureConnectBundle(bundle).build(); - } catch (AllNodesFailedException ex) { - exception = ex; - } - assertThat(exception).isNotNull(); - List errors = exception.getAllErrors().values().iterator().next(); - Throwable firstError = errors.get(0); - assertThat(firstError).isInstanceOf(AuthenticationException.class); - } - - @Test - public void should_connect_to_proxy_without_credentials() { - ResultSet set; - Path bundle = proxyRule.getProxy().getBundleWithoutCredentialsPath(); - try (CqlSession session = - CqlSession.builder() - .withCloudSecureConnectBundle(bundle) - .withAuthCredentials("cassandra", "cassandra") - .build()) { - set = session.execute("select * from system.local"); - } - assertThat(set).isNotNull(); - } - - @Test - public void should_connect_to_proxy_using_non_normalized_path() { - Path bundle = proxyRule.getProxy().getBundlesRootPath().resolve("../bundles/creds-v1.zip"); - ResultSet set; - try (CqlSession session = - CqlSession.builder() - .withAuthCredentials("cassandra", "cassandra") - .withCloudSecureConnectBundle(bundle) - .build()) { - set = session.execute("select * from system.local"); - } - assertThat(set).isNotNull(); - } - - @Test - public void should_connect_to_proxy_using_input_stream() throws IOException { - InputStream bundle = Files.newInputStream(proxyRule.getProxy().getDefaultBundlePath()); - ResultSet set; - try (CqlSession session = - CqlSession.builder() - .withAuthCredentials("cassandra", "cassandra") - .withCloudSecureConnectBundle(bundle) - .build()) { - set = session.execute("select * from system.local"); - } - assertThat(set).isNotNull(); - } - - @Test - public void should_connect_to_proxy_using_URL() throws IOException { - // given - byte[] bundle = Files.readAllBytes(proxyRule.getProxy().getDefaultBundlePath()); - stubFor( - any(urlEqualTo(BUNDLE_URL_PATH)) - .willReturn( - aResponse() - .withStatus(200) - .withHeader("Content-Type", "application/octet-stream") - .withBody(bundle))); - URL bundleUrl = - new URL(String.format("http://localhost:%d%s", wireMockRule.port(), BUNDLE_URL_PATH)); - - // when - ResultSet set; - try (CqlSession session = - CqlSession.builder() - .withAuthCredentials("cassandra", "cassandra") - .withCloudSecureConnectBundle(bundleUrl) - .build()) { - - // then - set = session.execute("select * from system.local"); - } - assertThat(set).isNotNull(); - } - - @Test - public void should_connect_to_proxy_using_absolute_path_provided_in_the_session_setting() { - // given - String bundle = proxyRule.getProxy().getDefaultBundlePath().toString(); - DriverConfigLoader loader = - DriverConfigLoader.programmaticBuilder() - .withString(DefaultDriverOption.CLOUD_SECURE_CONNECT_BUNDLE, bundle) - .build(); - // when - ResultSet set; - try (CqlSession session = - CqlSession.builder() - .withAuthCredentials("cassandra", "cassandra") - .withConfigLoader(loader) - .build()) { - - // then - set = session.execute("select * from system.local"); - } - assertThat(set).isNotNull(); - } - - @Test - public void should_connect_to_proxy_using_non_normalized_path_provided_in_the_session_setting() { - // given - String bundle = - proxyRule.getProxy().getBundlesRootPath().resolve("../bundles/creds-v1.zip").toString(); - DriverConfigLoader loader = - DriverConfigLoader.programmaticBuilder() - .withString(DefaultDriverOption.CLOUD_SECURE_CONNECT_BUNDLE, bundle) - .build(); - // when - ResultSet set; - try (CqlSession session = - CqlSession.builder() - .withAuthCredentials("cassandra", "cassandra") - .withConfigLoader(loader) - .build()) { - - // then - set = session.execute("select * from system.local"); - } - assertThat(set).isNotNull(); - } - - @Test - public void - should_connect_to_proxy_using_url_with_file_protocol_provided_in_the_session_setting() { - // given - String bundle = proxyRule.getProxy().getDefaultBundlePath().toString(); - DriverConfigLoader loader = - DriverConfigLoader.programmaticBuilder() - .withString(DefaultDriverOption.CLOUD_SECURE_CONNECT_BUNDLE, bundle) - .build(); - // when - ResultSet set; - try (CqlSession session = - CqlSession.builder() - .withAuthCredentials("cassandra", "cassandra") - .withConfigLoader(loader) - .build()) { - - // then - set = session.execute("select * from system.local"); - } - assertThat(set).isNotNull(); - } - - @Test - public void should_connect_to_proxy_using_url_with_http_protocol_provided_in_the_session_setting() - throws IOException { - // given - byte[] bundle = Files.readAllBytes(proxyRule.getProxy().getDefaultBundlePath()); - stubFor( - any(urlEqualTo(BUNDLE_URL_PATH)) - .willReturn( - aResponse() - .withStatus(200) - .withHeader("Content-Type", "application/octet-stream") - .withBody(bundle))); - String bundleUrl = String.format("http://localhost:%d%s", wireMockRule.port(), BUNDLE_URL_PATH); - DriverConfigLoader loader = - DriverConfigLoader.programmaticBuilder() - .withString(DefaultDriverOption.CLOUD_SECURE_CONNECT_BUNDLE, bundleUrl) - .build(); - // when - ResultSet set; - try (CqlSession session = - CqlSession.builder() - .withAuthCredentials("cassandra", "cassandra") - .withConfigLoader(loader) - .build()) { - - // then - set = session.execute("select * from system.local"); - } - assertThat(set).isNotNull(); - } - - @Test - public void - should_connect_and_log_info_when_contact_points_and_secure_bundle_used_programmatic() { - // given - LoggerTest.LoggerSetup logger = setupTestLogger(SessionBuilder.class, Level.INFO); - - Path bundle = proxyRule.getProxy().getBundleWithoutCredentialsPath(); - - try (CqlSession session = - CqlSession.builder() - .withCloudSecureConnectBundle(bundle) - .addContactPoint(new InetSocketAddress("127.0.0.1", 9042)) - .withAuthCredentials("cassandra", "cassandra") - .build(); ) { - - // when - ResultSet set = session.execute("select * from system.local"); - // then - assertThat(set).isNotNull(); - verify(logger.appender, timeout(500).atLeast(1)) - .doAppend(logger.loggingEventCaptor.capture()); - assertThat( - logger.loggingEventCaptor.getAllValues().stream() - .map(ILoggingEvent::getFormattedMessage)) - .contains( - "Both a secure connect bundle and contact points were provided. These are mutually exclusive. The contact points from the secure bundle will have priority."); - - } finally { - logger.close(); - } - } - - @Test - public void should_connect_and_log_info_when_contact_points_and_secure_bundle_used_config() { - // given - LoggerTest.LoggerSetup logger = setupTestLogger(SessionBuilder.class, Level.INFO); - - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withStringList( - DefaultDriverOption.CONTACT_POINTS, Collections.singletonList("localhost:9042")) - .build(); - - Path bundle = proxyRule.getProxy().getBundleWithoutCredentialsPath(); - - try (CqlSession session = - CqlSession.builder() - .withConfigLoader(loader) - .withCloudSecureConnectBundle(bundle) - .withAuthCredentials("cassandra", "cassandra") - .build(); ) { - - // when - ResultSet set = session.execute("select * from system.local"); - // then - assertThat(set).isNotNull(); - verify(logger.appender, timeout(500).atLeast(1)) - .doAppend(logger.loggingEventCaptor.capture()); - assertThat( - logger.loggingEventCaptor.getAllValues().stream() - .map(ILoggingEvent::getFormattedMessage)) - .contains( - "Both a secure connect bundle and contact points were provided. These are mutually exclusive. The contact points from the secure bundle will have priority."); - - } finally { - logger.close(); - } - } - - @Test - public void should_connect_and_log_info_when_ssl_context_and_secure_bundle_used_programmatic() - throws NoSuchAlgorithmException { - // given - LoggerTest.LoggerSetup logger = setupTestLogger(SessionBuilder.class, Level.INFO); - - Path bundle = proxyRule.getProxy().getBundleWithoutCredentialsPath(); - - try (CqlSession session = - CqlSession.builder() - .withCloudSecureConnectBundle(bundle) - .withAuthCredentials("cassandra", "cassandra") - .withSslContext(SSLContext.getInstance("SSL")) - .build()) { - // when - ResultSet set = session.execute("select * from system.local"); - // then - assertThat(set).isNotNull(); - verify(logger.appender, timeout(500).atLeast(1)) - .doAppend(logger.loggingEventCaptor.capture()); - assertThat( - logger.loggingEventCaptor.getAllValues().stream() - .map(ILoggingEvent::getFormattedMessage)) - .contains( - "Both a secure connect bundle and SSL options were provided. They are mutually exclusive. The SSL options from the secure bundle will have priority."); - } finally { - logger.close(); - } - } - - @Test - public void should_error_when_ssl_context_and_secure_bundle_used_config() - throws NoSuchAlgorithmException { - // given - LoggerTest.LoggerSetup logger = setupTestLogger(SessionBuilder.class, Level.INFO); - - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withBoolean(DefaultDriverOption.RECONNECT_ON_INIT, true) - .withClass(DefaultDriverOption.SSL_ENGINE_FACTORY_CLASS, DefaultSslEngineFactory.class) - .build(); - - Path bundle = proxyRule.getProxy().getBundleWithoutCredentialsPath(); - - try (CqlSession session = - CqlSession.builder() - .withConfigLoader(loader) - .withCloudSecureConnectBundle(bundle) - .withAuthCredentials("cassandra", "cassandra") - .build()) { - // when - ResultSet set = session.execute("select * from system.local"); - // then - assertThat(set).isNotNull(); - verify(logger.appender, timeout(500).atLeast(1)) - .doAppend(logger.loggingEventCaptor.capture()); - assertThat( - logger.loggingEventCaptor.getAllValues().stream() - .map(ILoggingEvent::getFormattedMessage)) - .contains( - "Both a secure connect bundle and SSL options were provided. They are mutually exclusive. The SSL options from the secure bundle will have priority."); - } finally { - logger.close(); - } - } - - @Test - public void - should_connect_and_log_info_when_local_data_center_and_secure_bundle_used_programmatic() { - // given - LoggerTest.LoggerSetup logger = setupTestLogger(SessionBuilder.class, Level.INFO); - - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withString(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER, "dc-ignore") - .build(); - - Path bundle = proxyRule.getProxy().getBundleWithoutCredentialsPath(); - - try (CqlSession session = - CqlSession.builder() - .withCloudSecureConnectBundle(bundle) - .withConfigLoader(loader) - .withAuthCredentials("cassandra", "cassandra") - .build(); ) { - - // when - ResultSet set = session.execute("select * from system.local"); - // then - assertThat(set).isNotNull(); - verify(logger.appender, timeout(500).atLeast(1)) - .doAppend(logger.loggingEventCaptor.capture()); - assertThat( - logger.loggingEventCaptor.getAllValues().stream() - .map(ILoggingEvent::getFormattedMessage)) - .contains( - "Both a secure connect bundle and a local datacenter were provided. They are mutually exclusive. The local datacenter from the secure bundle will have priority."); - - } finally { - logger.close(); - } - } - - @Test - public void should_connect_and_log_info_when_local_data_center_and_secure_bundle_used_config() { - // given - LoggerTest.LoggerSetup logger = setupTestLogger(SessionBuilder.class, Level.INFO); - - Path bundle = proxyRule.getProxy().getBundleWithoutCredentialsPath(); - - try (CqlSession session = - CqlSession.builder() - .withCloudSecureConnectBundle(bundle) - .withLocalDatacenter("dc-ignored") - .withAuthCredentials("cassandra", "cassandra") - .build(); ) { - - // when - ResultSet set = session.execute("select * from system.local"); - // then - assertThat(set).isNotNull(); - verify(logger.appender, timeout(500).atLeast(1)) - .doAppend(logger.loggingEventCaptor.capture()); - assertThat( - logger.loggingEventCaptor.getAllValues().stream() - .map(ILoggingEvent::getFormattedMessage)) - .contains( - "Both a secure connect bundle and a local datacenter were provided. They are mutually exclusive. The local datacenter from the secure bundle will have priority."); - - } finally { - logger.close(); - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/cloud/SniProxyRule.java b/integration-tests/src/test/java/com/datastax/oss/driver/api/core/cloud/SniProxyRule.java deleted file mode 100644 index fa009de78ae..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/cloud/SniProxyRule.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cloud; - -import org.junit.rules.ExternalResource; - -public class SniProxyRule extends ExternalResource { - - private final SniProxyServer proxy; - - public SniProxyRule() { - proxy = new SniProxyServer(); - } - - @Override - protected void before() { - proxy.startProxy(); - } - - @Override - protected void after() { - proxy.stopProxy(); - } - - public SniProxyServer getProxy() { - return proxy; - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/cloud/SniProxyServer.java b/integration-tests/src/test/java/com/datastax/oss/driver/api/core/cloud/SniProxyServer.java deleted file mode 100644 index 809354a7daf..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/cloud/SniProxyServer.java +++ /dev/null @@ -1,150 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cloud; - -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.concurrent.TimeUnit; -import org.apache.commons.exec.CommandLine; -import org.apache.commons.exec.DefaultExecutor; -import org.apache.commons.exec.ExecuteStreamHandler; -import org.apache.commons.exec.ExecuteWatchdog; -import org.apache.commons.exec.Executor; -import org.apache.commons.exec.LogOutputStream; -import org.apache.commons.exec.PumpStreamHandler; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class SniProxyServer { - - private static final Logger LOG = LoggerFactory.getLogger(SniProxyServer.class); - - private final Path proxyPath; - private final Path bundlesRootPath; - private final Path defaultBundlePath; - private final Path bundleWithoutCredentialsPath; - private final Path bundleWithoutClientCertificatesPath; - private final Path bundleWithInvalidCAPath; - private final Path bundleWithUnreachableMetadataServicePath; - - private volatile boolean running = false; - - public SniProxyServer() { - this(Paths.get(System.getProperty("proxy.path", "./"))); - } - - public SniProxyServer(Path proxyPath) { - this.proxyPath = proxyPath.normalize().toAbsolutePath(); - bundlesRootPath = proxyPath.resolve("certs/bundles/"); - defaultBundlePath = bundlesRootPath.resolve("creds-v1.zip"); - bundleWithoutCredentialsPath = bundlesRootPath.resolve("creds-v1-wo-creds.zip"); - bundleWithoutClientCertificatesPath = bundlesRootPath.resolve("creds-v1-wo-cert.zip"); - bundleWithInvalidCAPath = bundlesRootPath.resolve("creds-v1-invalid-ca.zip"); - bundleWithUnreachableMetadataServicePath = bundlesRootPath.resolve("creds-v1-unreachable.zip"); - } - - public void startProxy() { - CommandLine run = CommandLine.parse(proxyPath + "/run.sh"); - execute(run); - running = true; - } - - public void stopProxy() { - if (running) { - CommandLine findImageId = - CommandLine.parse("docker ps -a -q --filter ancestor=single_endpoint"); - String id = execute(findImageId); - CommandLine stop = CommandLine.parse("docker kill " + id); - execute(stop); - running = false; - } - } - - /** @return The root folder of the SNI proxy server docker image. */ - public Path getProxyPath() { - return proxyPath; - } - - /** - * @return The root folder where secure connect bundles exposed by this SNI proxy for testing - * purposes can be found. - */ - public Path getBundlesRootPath() { - return bundlesRootPath; - } - - /** - * @return The default secure connect bundle. It contains credentials and all certificates - * required to connect. - */ - public Path getDefaultBundlePath() { - return defaultBundlePath; - } - - /** @return A secure connect bundle without credentials in config.json. */ - public Path getBundleWithoutCredentialsPath() { - return bundleWithoutCredentialsPath; - } - - /** @return A secure connect bundle without client certificates (no identity.jks). */ - public Path getBundleWithoutClientCertificatesPath() { - return bundleWithoutClientCertificatesPath; - } - - /** @return A secure connect bundle with an invalid Certificate Authority. */ - public Path getBundleWithInvalidCAPath() { - return bundleWithInvalidCAPath; - } - - /** @return A secure connect bundle with an invalid address for the Proxy Metadata Service. */ - public Path getBundleWithUnreachableMetadataServicePath() { - return bundleWithUnreachableMetadataServicePath; - } - - private String execute(CommandLine cli) { - LOG.debug("Executing: " + cli); - ExecuteWatchdog watchDog = new ExecuteWatchdog(TimeUnit.MINUTES.toMillis(10)); - ByteArrayOutputStream outStream = new ByteArrayOutputStream(); - try (LogOutputStream errStream = - new LogOutputStream() { - @Override - protected void processLine(String line, int logLevel) { - LOG.error("sniendpointerr> {}", line); - } - }) { - Executor executor = new DefaultExecutor(); - ExecuteStreamHandler streamHandler = new PumpStreamHandler(outStream, errStream); - executor.setStreamHandler(streamHandler); - executor.setWatchdog(watchDog); - executor.setWorkingDirectory(proxyPath.toFile()); - int retValue = executor.execute(cli); - if (retValue != 0) { - LOG.error("Non-zero exit code ({}) returned from executing ccm command: {}", retValue, cli); - } - return outStream.toString(); - } catch (IOException ex) { - if (watchDog.killedProcess()) { - throw new RuntimeException("The command '" + cli + "' was killed after 10 minutes"); - } else { - throw new RuntimeException("The command '" + cli + "' failed to execute", ex); - } - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/AllNodesFailedIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/AllNodesFailedIT.java deleted file mode 100644 index ed453681a65..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/AllNodesFailedIT.java +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core; - -import static com.datastax.oss.simulacron.common.codec.ConsistencyLevel.ONE; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.readTimeout; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.when; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.retry.RetryDecision; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.internal.core.retry.DefaultRetryPolicy; -import com.datastax.oss.simulacron.common.cluster.ClusterSpec; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Iterator; -import java.util.List; -import java.util.Map.Entry; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -@Category(ParallelizableTests.class) -public class AllNodesFailedIT { - - @ClassRule - public static final SimulacronRule SIMULACRON_RULE = - new SimulacronRule(ClusterSpec.builder().withNodes(2)); - - @Test - public void should_report_multiple_errors_per_node() { - SIMULACRON_RULE.cluster().prime(when("SELECT foo").then(readTimeout(ONE, 0, 0, false))); - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withClass(DefaultDriverOption.RETRY_POLICY_CLASS, MultipleRetryPolicy.class) - .build(); - - try (CqlSession session = - (CqlSession) - SessionUtils.baseBuilder() - .addContactEndPoints(SIMULACRON_RULE.getContactPoints()) - .withConfigLoader(loader) - .build()) { - // when executing a query. - session.execute("SELECT foo"); - fail("AllNodesFailedException expected"); - } catch (AllNodesFailedException ex) { - assertThat(ex.getAllErrors()).hasSize(2); - Iterator>> iterator = ex.getAllErrors().entrySet().iterator(); - // first node should have been tried twice - Entry> node1Errors = iterator.next(); - assertThat(node1Errors.getValue()).hasSize(2); - // second node should have been tried twice - Entry> node2Errors = iterator.next(); - assertThat(node2Errors.getValue()).hasSize(2); - } - } - - public static class MultipleRetryPolicy extends DefaultRetryPolicy { - - public MultipleRetryPolicy(DriverContext context, String profileName) { - super(context, profileName); - } - - @Override - @Deprecated - public RetryDecision onReadTimeout( - @NonNull Request request, - @NonNull ConsistencyLevel cl, - int blockFor, - int received, - boolean dataPresent, - int retryCount) { - // retry each node twice - if (retryCount % 2 == 0) { - return RetryDecision.RETRY_SAME; - } else { - return RetryDecision.RETRY_NEXT; - } - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/ConnectIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/ConnectIT.java deleted file mode 100644 index 67585bc691d..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/ConnectIT.java +++ /dev/null @@ -1,214 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core; - -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.rows; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.when; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.assertj.core.api.Assertions.catchThrowable; -import static org.awaitility.Awaitility.await; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeState; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.internal.core.connection.ConstantReconnectionPolicy; -import com.datastax.oss.simulacron.common.cluster.ClusterSpec; -import com.datastax.oss.simulacron.server.BoundCluster; -import com.datastax.oss.simulacron.server.RejectScope; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.time.Duration; -import java.util.Map; -import java.util.UUID; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.TimeUnit; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -@Category(ParallelizableTests.class) -public class ConnectIT { - - @ClassRule - public static final SimulacronRule SIMULACRON_RULE = - new SimulacronRule(ClusterSpec.builder().withNodes(2)); - - @Before - public void setup() { - SIMULACRON_RULE.cluster().acceptConnections(); - SIMULACRON_RULE - .cluster() - .prime( - // Absolute minimum for a working schema metadata (we just want to check that it gets - // loaded at startup). - when("SELECT * FROM system_schema.keyspaces") - .then(rows().row("keyspace_name", "system").row("keyspace_name", "test"))); - } - - @Test - public void should_fail_fast_if_contact_points_unreachable_and_reconnection_disabled() { - // Given - SIMULACRON_RULE.cluster().rejectConnections(0, RejectScope.STOP); - - // When - Throwable t = catchThrowable(() -> SessionUtils.newSession(SIMULACRON_RULE)); - - // Then - assertThat(t) - .isInstanceOf(AllNodesFailedException.class) - .hasMessageContaining( - "Could not reach any contact point, make sure you've provided valid addresses"); - } - - @Test - public void should_wait_for_contact_points_if_reconnection_enabled() throws Exception { - // Given - SIMULACRON_RULE.cluster().rejectConnections(0, RejectScope.STOP); - - // When - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withBoolean(DefaultDriverOption.RECONNECT_ON_INIT, true) - .withClass( - DefaultDriverOption.RECONNECTION_POLICY_CLASS, InitOnlyReconnectionPolicy.class) - // Use a short delay so we don't have to wait too long: - .withDuration(DefaultDriverOption.RECONNECTION_BASE_DELAY, Duration.ofMillis(500)) - .build(); - CompletableFuture sessionFuture = - newSessionAsync(loader).toCompletableFuture(); - // wait a bit to ensure we have a couple of reconnections, otherwise we might race and allow - // reconnections before the initial attempt - TimeUnit.SECONDS.sleep(2); - - // Then - assertThat(sessionFuture).isNotCompleted(); - - // When - SIMULACRON_RULE.cluster().acceptConnections(); - - // Then this doesn't throw - try (Session session = sessionFuture.get(30, TimeUnit.SECONDS)) { - assertThat(session.getMetadata().getKeyspaces()).containsKey(CqlIdentifier.fromCql("test")); - } - } - - /** - * Test for JAVA-1948. This ensures that when the LBP initialization fails that any connections - * are cleaned up appropriately. - */ - @Test - public void should_cleanup_on_lbp_init_failure() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .without(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER) - .build(); - assertThatThrownBy( - () -> - CqlSession.builder() - .addContactEndPoints(SIMULACRON_RULE.getContactPoints()) - .withConfigLoader(loader) - .build()) - .isInstanceOf(IllegalStateException.class) - .hasMessageContaining( - "Since you provided explicit contact points, the local DC must be explicitly set"); - // One second should be plenty of time for connections to close server side - await() - .atMost(1, TimeUnit.SECONDS) - .until(() -> SIMULACRON_RULE.cluster().getConnections().getConnections().isEmpty()); - } - - /** - * Test for JAVA-2177. This ensures that even if the first attempted contact point is unreachable, - * its distance is set to LOCAL and reconnections are scheduled. - */ - @Test - public void should_mark_unreachable_contact_points_as_local_and_schedule_reconnections() { - // Reject connections only on one node - BoundCluster boundCluster = SIMULACRON_RULE.cluster(); - boundCluster.node(0).rejectConnections(0, RejectScope.STOP); - - try (CqlSession session = SessionUtils.newSession(SIMULACRON_RULE)) { - Map nodes = session.getMetadata().getNodes(); - // Node states are updated asynchronously, so guard against race conditions - await() - .pollInterval(500, TimeUnit.MILLISECONDS) - .atMost(60, TimeUnit.SECONDS) - .untilAsserted( - () -> { - // Before JAVA-2177, this would fail every other time because if the node was tried - // first for the initial connection, it was marked down and not passed to - // LBP.init(), and therefore stayed at distance IGNORED. - Node node0 = nodes.get(boundCluster.node(0).getHostId()); - assertThat(node0.getState()).isEqualTo(NodeState.DOWN); - assertThat(node0.getDistance()).isEqualTo(NodeDistance.LOCAL); - assertThat(node0.getOpenConnections()).isEqualTo(0); - assertThat(node0.isReconnecting()).isTrue(); - - Node node1 = nodes.get(boundCluster.node(1).getHostId()); - assertThat(node1.getState()).isEqualTo(NodeState.UP); - assertThat(node1.getDistance()).isEqualTo(NodeDistance.LOCAL); - assertThat(node1.getOpenConnections()).isEqualTo(2); // control + regular - assertThat(node1.isReconnecting()).isFalse(); - }); - } - } - - @SuppressWarnings("unchecked") - private CompletionStage newSessionAsync(DriverConfigLoader loader) { - return SessionUtils.baseBuilder() - .addContactEndPoints(ConnectIT.SIMULACRON_RULE.getContactPoints()) - .withConfigLoader(loader) - .buildAsync(); - } - - /** - * Test policy that fails if a "runtime" control connection schedule is requested. - * - *

    This is just to check that {@link #newControlConnectionSchedule(boolean)} is called with the - * correct boolean parameter. - */ - public static class InitOnlyReconnectionPolicy extends ConstantReconnectionPolicy { - - public InitOnlyReconnectionPolicy(DriverContext context) { - super(context); - } - - @NonNull - @Override - public ReconnectionSchedule newControlConnectionSchedule(boolean isInitialConnection) { - if (isInitialConnection) { - return super.newControlConnectionSchedule(true); - } else { - throw new UnsupportedOperationException( - "should not be called with isInitialConnection==false"); - } - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/ConnectKeyspaceIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/ConnectKeyspaceIT.java deleted file mode 100644 index af943b00184..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/ConnectKeyspaceIT.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.InvalidKeyspaceException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.categories.ParallelizableTests; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -public class ConnectKeyspaceIT { - private static final CcmRule CCM_RULE = CcmRule.getInstance(); - - private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - @Test - public void should_connect_to_existing_keyspace() { - CqlIdentifier keyspace = SESSION_RULE.keyspace(); - try (Session session = SessionUtils.newSession(CCM_RULE, keyspace)) { - assertThat(session.getKeyspace()).hasValue(keyspace); - } - } - - @Test - public void should_connect_with_no_keyspace() { - try (Session session = SessionUtils.newSession(CCM_RULE)) { - assertThat(session.getKeyspace()).isEmpty(); - } - } - - @Test(expected = InvalidKeyspaceException.class) - public void should_fail_to_connect_to_non_existent_keyspace_when_not_reconnecting_on_init() { - should_fail_to_connect_to_non_existent_keyspace(null); - } - - @Test(expected = InvalidKeyspaceException.class) - public void should_fail_to_connect_to_non_existent_keyspace_when_reconnecting_on_init() { - // Just checking that we don't trigger retries for this unrecoverable error - should_fail_to_connect_to_non_existent_keyspace( - SessionUtils.configLoaderBuilder() - .withBoolean(DefaultDriverOption.RECONNECT_ON_INIT, true) - .build()); - } - - private void should_fail_to_connect_to_non_existent_keyspace(DriverConfigLoader loader) { - CqlIdentifier keyspace = CqlIdentifier.fromInternal("does not exist"); - SessionUtils.newSession(CCM_RULE, keyspace, loader); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/PeersV2NodeRefreshIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/PeersV2NodeRefreshIT.java deleted file mode 100644 index 47f3e3957af..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/PeersV2NodeRefreshIT.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.protocol.internal.request.Query; -import com.datastax.oss.simulacron.common.cluster.ClusterSpec; -import com.datastax.oss.simulacron.common.cluster.QueryLog; -import com.datastax.oss.simulacron.server.BoundCluster; -import com.datastax.oss.simulacron.server.Server; -import java.util.concurrent.ExecutionException; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; - -/** Test for JAVA-2654. */ -public class PeersV2NodeRefreshIT { - - private static Server peersV2Server; - private static BoundCluster cluster; - - @BeforeClass - public static void setup() { - peersV2Server = Server.builder().withMultipleNodesPerIp(true).build(); - cluster = peersV2Server.register(ClusterSpec.builder().withNodes(2)); - } - - @AfterClass - public static void tearDown() { - if (cluster != null) { - cluster.stop(); - } - if (peersV2Server != null) { - peersV2Server.close(); - } - } - - @Test - public void should_successfully_send_peers_v2_node_refresh_query() - throws InterruptedException, ExecutionException { - CqlSession session = - CqlSession.builder().addContactPoint(cluster.node(1).inetSocketAddress()).build(); - Node node = findNonControlNode(session); - ((InternalDriverContext) session.getContext()) - .getMetadataManager() - .refreshNode(node) - .toCompletableFuture() - .get(); - assertThat(hasNodeRefreshQuery()) - .describedAs("Expecting peers_v2 node refresh query to be present but it wasn't") - .isTrue(); - } - - private Node findNonControlNode(CqlSession session) { - EndPoint controlNode = - ((InternalDriverContext) session.getContext()) - .getControlConnection() - .channel() - .getEndPoint(); - return session.getMetadata().getNodes().values().stream() - .filter(node -> !node.getEndPoint().equals(controlNode)) - .findAny() - .orElseThrow(() -> new IllegalStateException("Expecting at least one non-control node")); - } - - private boolean hasNodeRefreshQuery() { - for (QueryLog log : cluster.getLogs().getQueryLogs()) { - if (log.getFrame().message instanceof Query) { - if (((Query) log.getFrame().message) - .query.contains( - "SELECT * FROM system.peers_v2 WHERE peer = :address and peer_port = :port")) { - return true; - } - } - } - return false; - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/PoolBalancingIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/PoolBalancingIT.java deleted file mode 100644 index c927976520b..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/PoolBalancingIT.java +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core; - -import static java.util.concurrent.TimeUnit.SECONDS; -import static org.assertj.core.api.Assertions.fail; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.NoNodeAvailableException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.simulacron.common.cluster.ClusterSpec; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.atomic.AtomicReference; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -public class PoolBalancingIT { - - private static final int POOL_SIZE = 2; - private static final int REQUESTS_PER_CONNECTION = 20; - - private static final SimulacronRule SIMULACRON_RULE = - new SimulacronRule(ClusterSpec.builder().withNodes(1)); - - private static final SessionRule SESSION_RULE = - SessionRule.builder(SIMULACRON_RULE) - .withConfigLoader( - DriverConfigLoader.programmaticBuilder() - .withInt(DefaultDriverOption.CONNECTION_MAX_REQUESTS, REQUESTS_PER_CONNECTION) - .withInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE, POOL_SIZE) - .build()) - .build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(SIMULACRON_RULE).around(SESSION_RULE); - - private CountDownLatch done; - private AtomicReference unexpectedErrorRef; - - @Before - public void setup() { - done = new CountDownLatch(1); - unexpectedErrorRef = new AtomicReference<>(); - } - - @Test - public void should_balance_requests_across_connections() throws InterruptedException { - // Generate just the right load to completely fill the pool. All requests should succeed. - int simultaneousRequests = POOL_SIZE * REQUESTS_PER_CONNECTION; - - for (int i = 0; i < simultaneousRequests; i++) { - reschedule(null, null); - } - SECONDS.sleep(1); - done.countDown(); - - Throwable unexpectedError = unexpectedErrorRef.get(); - if (unexpectedError != null) { - fail("At least one request failed unexpectedly", unexpectedError); - } - } - - private void reschedule(AsyncResultSet asyncResultSet, Throwable throwable) { - if (done.getCount() == 1) { - if (throwable != null - // Actually there is a tiny race condition where pool acquisition may still fail: channel - // sizes can change as the client is iterating through them, so it can look like they're - // all full even if there's always a free slot somewhere at every point in time. This will - // result in NoNodeAvailableException, ignore it. - && !(throwable instanceof NoNodeAvailableException)) { - unexpectedErrorRef.compareAndSet(null, throwable); - // Even a single error is a failure, no need to continue - done.countDown(); - } - SESSION_RULE - .session() - .executeAsync("SELECT release_version FROM system.local") - .whenComplete(this::reschedule); - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/ProtocolVersionInitialNegotiationIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/ProtocolVersionInitialNegotiationIT.java deleted file mode 100644 index 326c05eb15b..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/ProtocolVersionInitialNegotiationIT.java +++ /dev/null @@ -1,320 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; - -import com.datastax.dse.driver.api.core.DseProtocolVersion; -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.UnsupportedProtocolVersionException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.categories.ParallelizableTests; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -/** Covers protocol negotiation for the initial connection to the first contact point. */ -@Category(ParallelizableTests.class) -public class ProtocolVersionInitialNegotiationIT { - - @Rule public CcmRule ccm = CcmRule.getInstance(); - - @BackendRequirement( - type = BackendType.CASSANDRA, - minInclusive = "2.1", - maxExclusive = "2.2", - description = "Only C* in [2.1,2.2[ has V3 as its highest version") - @BackendRequirement( - type = BackendType.DSE, - maxExclusive = "5.0", - description = "Only DSE in [*,5.0[ has V3 as its highest version") - @Test - public void should_downgrade_to_v3() { - try (CqlSession session = SessionUtils.newSession(ccm)) { - assertThat(session.getContext().getProtocolVersion().getCode()).isEqualTo(3); - session.execute("select * from system.local"); - } - } - - @BackendRequirement( - type = BackendType.CASSANDRA, - minInclusive = "2.2", - maxExclusive = "4.0-rc1", - description = "Only C* in [2.2,4.0-rc1[ has V4 as its highest version") - @BackendRequirement( - type = BackendType.DSE, - minInclusive = "5.0", - maxExclusive = "5.1", - description = "Only DSE in [5.0,5.1[ has V4 as its highest version") - @Test - public void should_downgrade_to_v4() { - try (CqlSession session = SessionUtils.newSession(ccm)) { - assertThat(session.getContext().getProtocolVersion().getCode()).isEqualTo(4); - session.execute("select * from system.local"); - } - } - - @BackendRequirement( - type = BackendType.CASSANDRA, - minInclusive = "4.0-rc1", - description = "Only C* in [4.0-rc1,*[ has V5 as its highest version") - @Test - public void should_downgrade_to_v5_oss() { - try (CqlSession session = SessionUtils.newSession(ccm)) { - assertThat(session.getContext().getProtocolVersion().getCode()).isEqualTo(5); - session.execute("select * from system.local"); - } - } - - @BackendRequirement( - type = BackendType.DSE, - minInclusive = "5.1", - maxExclusive = "6.0", - description = "Only DSE in [5.1,6.0[ has DSE_V1 as its highest version") - @Test - public void should_downgrade_to_dse_v1() { - try (CqlSession session = SessionUtils.newSession(ccm)) { - assertThat(session.getContext().getProtocolVersion()).isEqualTo(DseProtocolVersion.DSE_V1); - session.execute("select * from system.local"); - } - } - - @BackendRequirement( - type = BackendType.CASSANDRA, - maxExclusive = "2.2", - description = "Only C* in [*,2.2[ has V4 unsupported") - @BackendRequirement( - type = BackendType.DSE, - maxExclusive = "5.0", - description = "Only DSE in [*,5.0[ has V4 unsupported") - @Test - public void should_fail_if_provided_v4_is_not_supported() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withString(DefaultDriverOption.PROTOCOL_VERSION, "V4") - .build(); - try (CqlSession ignored = SessionUtils.newSession(ccm, loader)) { - fail("Expected an AllNodesFailedException"); - } catch (AllNodesFailedException anfe) { - Throwable cause = anfe.getAllErrors().values().iterator().next().get(0); - assertThat(cause).isInstanceOf(UnsupportedProtocolVersionException.class); - UnsupportedProtocolVersionException unsupportedException = - (UnsupportedProtocolVersionException) cause; - assertThat(unsupportedException.getAttemptedVersions()) - .containsOnly(DefaultProtocolVersion.V4); - } - } - - @BackendRequirement( - type = BackendType.CASSANDRA, - minInclusive = "2.1", - maxExclusive = "4.0-rc1", - description = "Only C* in [2.1,4.0-rc1[ has V5 unsupported or supported as beta") - @BackendRequirement( - type = BackendType.DSE, - maxExclusive = "7.0", - description = "Only DSE in [*,7.0[ has V5 unsupported or supported as beta") - @Test - public void should_fail_if_provided_v5_is_not_supported() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withString(DefaultDriverOption.PROTOCOL_VERSION, "V5") - .build(); - try (CqlSession ignored = SessionUtils.newSession(ccm, loader)) { - fail("Expected an AllNodesFailedException"); - } catch (AllNodesFailedException anfe) { - Throwable cause = anfe.getAllErrors().values().iterator().next().get(0); - assertThat(cause).isInstanceOf(UnsupportedProtocolVersionException.class); - UnsupportedProtocolVersionException unsupportedException = - (UnsupportedProtocolVersionException) cause; - assertThat(unsupportedException.getAttemptedVersions()) - .containsOnly(DefaultProtocolVersion.V5); - } - } - - @BackendRequirement( - type = BackendType.DSE, - maxExclusive = "5.1", - description = "Only DSE in [*,5.1[ has DSE_V1 unsupported") - @Test - public void should_fail_if_provided_dse_v1_is_not_supported() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withString(DefaultDriverOption.PROTOCOL_VERSION, "DSE_V1") - .build(); - try (CqlSession ignored = SessionUtils.newSession(ccm, loader)) { - fail("Expected an AllNodesFailedException"); - } catch (AllNodesFailedException anfe) { - Throwable cause = anfe.getAllErrors().values().iterator().next().get(0); - assertThat(cause).isInstanceOf(UnsupportedProtocolVersionException.class); - UnsupportedProtocolVersionException unsupportedException = - (UnsupportedProtocolVersionException) cause; - assertThat(unsupportedException.getAttemptedVersions()) - .containsOnly(DseProtocolVersion.DSE_V1); - } - } - - @BackendRequirement( - type = BackendType.DSE, - maxExclusive = "6.0", - description = "Only DSE in [*,6.0[ has DSE_V2 unsupported") - @Test - public void should_fail_if_provided_dse_v2_is_not_supported() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withString(DefaultDriverOption.PROTOCOL_VERSION, "DSE_V2") - .build(); - try (CqlSession ignored = SessionUtils.newSession(ccm, loader)) { - fail("Expected an AllNodesFailedException"); - } catch (AllNodesFailedException anfe) { - Throwable cause = anfe.getAllErrors().values().iterator().next().get(0); - assertThat(cause).isInstanceOf(UnsupportedProtocolVersionException.class); - UnsupportedProtocolVersionException unsupportedException = - (UnsupportedProtocolVersionException) cause; - assertThat(unsupportedException.getAttemptedVersions()) - .containsOnly(DseProtocolVersion.DSE_V2); - } - } - - /** Note that this test will need to be updated as new protocol versions are introduced. */ - @BackendRequirement( - type = BackendType.CASSANDRA, - minInclusive = "4.0", - description = "Only C* in [4.0,*[ has V5 supported") - @Test - public void should_not_downgrade_if_server_supports_latest_version() { - try (CqlSession session = SessionUtils.newSession(ccm)) { - assertThat(session.getContext().getProtocolVersion()).isEqualTo(ProtocolVersion.V5); - session.execute("select * from system.local"); - } - } - - /** Note that this test will need to be updated as new protocol versions are introduced. */ - @BackendRequirement( - type = BackendType.DSE, - minInclusive = "6.0", - description = "Only DSE in [6.0,*[ has DSE_V2 supported") - @Test - public void should_not_downgrade_if_server_supports_latest_version_dse() { - try (CqlSession session = SessionUtils.newSession(ccm)) { - assertThat(session.getContext().getProtocolVersion()).isEqualTo(ProtocolVersion.DSE_V2); - session.execute("select * from system.local"); - } - } - - @BackendRequirement( - type = BackendType.CASSANDRA, - minInclusive = "2.1", - description = "Only C* in [2.1,*[ has V3 supported") - @BackendRequirement( - type = BackendType.DSE, - minInclusive = "4.8", - description = "Only DSE in [4.8,*[ has V3 supported") - @Test - public void should_use_explicitly_provided_v3() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withString(DefaultDriverOption.PROTOCOL_VERSION, "V3") - .build(); - try (CqlSession session = SessionUtils.newSession(ccm, loader)) { - assertThat(session.getContext().getProtocolVersion().getCode()).isEqualTo(3); - session.execute("select * from system.local"); - } - } - - @BackendRequirement( - type = BackendType.CASSANDRA, - minInclusive = "2.2", - description = "Only C* in [2.2,*[ has V4 supported") - @BackendRequirement( - type = BackendType.DSE, - minInclusive = "5.0", - description = "Only DSE in [5.0,*[ has V4 supported") - @Test - public void should_use_explicitly_provided_v4() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withString(DefaultDriverOption.PROTOCOL_VERSION, "V4") - .build(); - try (CqlSession session = SessionUtils.newSession(ccm, loader)) { - assertThat(session.getContext().getProtocolVersion().getCode()).isEqualTo(4); - session.execute("select * from system.local"); - } - } - - @BackendRequirement( - type = BackendType.CASSANDRA, - minInclusive = "4.0", - description = "Only C* in [4.0,*[ has V5 supported") - @BackendRequirement( - type = BackendType.DSE, - minInclusive = "7.0", - description = "Only DSE in [7.0,*[ has V5 supported") - @Test - public void should_use_explicitly_provided_v5() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withString(DefaultDriverOption.PROTOCOL_VERSION, "V5") - .build(); - try (CqlSession session = SessionUtils.newSession(ccm, loader)) { - assertThat(session.getContext().getProtocolVersion().getCode()).isEqualTo(5); - session.execute("select * from system.local"); - } - } - - @BackendRequirement( - type = BackendType.DSE, - minInclusive = "5.1", - description = "Only DSE in [5.1,*[ has DSE_V1 supported") - @Test - public void should_use_explicitly_provided_dse_v1() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withString(DefaultDriverOption.PROTOCOL_VERSION, "DSE_V1") - .build(); - try (CqlSession session = SessionUtils.newSession(ccm, loader)) { - assertThat(session.getContext().getProtocolVersion()).isEqualTo(DseProtocolVersion.DSE_V1); - session.execute("select * from system.local"); - } - } - - @BackendRequirement( - type = BackendType.DSE, - minInclusive = "6.0", - description = "Only DSE in [6.0,*[ has DSE_V2 supported") - @Test - public void should_use_explicitly_provided_dse_v2() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withString(DefaultDriverOption.PROTOCOL_VERSION, "DSE_V2") - .build(); - try (CqlSession session = SessionUtils.newSession(ccm, loader)) { - assertThat(session.getContext().getProtocolVersion()).isEqualTo(DseProtocolVersion.DSE_V2); - session.execute("select * from system.local"); - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/ProtocolVersionMixedClusterIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/ProtocolVersionMixedClusterIT.java deleted file mode 100644 index fae7477063c..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/ProtocolVersionMixedClusterIT.java +++ /dev/null @@ -1,189 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core; - -import static com.datastax.oss.driver.assertions.Assertions.assertThat; -import static org.assertj.core.api.Assertions.catchThrowable; -import static org.assertj.core.api.Assertions.fail; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.UnsupportedProtocolVersionException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.protocol.internal.request.Query; -import com.datastax.oss.simulacron.common.cluster.ClusterSpec; -import com.datastax.oss.simulacron.common.cluster.DataCenterSpec; -import com.datastax.oss.simulacron.common.cluster.QueryLog; -import com.datastax.oss.simulacron.server.BoundCluster; -import com.datastax.oss.simulacron.server.BoundNode; -import com.datastax.oss.simulacron.server.BoundTopic; -import java.util.stream.Stream; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -/** - * Covers protocol re-negotiation with a mixed cluster: if, after the initial connection and the - * first node list refresh, we find out that some nodes only support a lower version, reconnect the - * control connection immediately. - */ -@Category(ParallelizableTests.class) -public class ProtocolVersionMixedClusterIT { - - @Test - public void should_downgrade_if_peer_does_not_support_negotiated_version() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withBoolean(DefaultDriverOption.METADATA_SCHEMA_ENABLED, false) - .build(); - try (BoundCluster simulacron = mixedVersions("3.0.0", "2.2.0", "2.1.0"); - BoundNode contactPoint = simulacron.node(0); - CqlSession session = - (CqlSession) - SessionUtils.baseBuilder() - .addContactPoint(contactPoint.inetSocketAddress()) - .withConfigLoader(loader) - .build()) { - - InternalDriverContext context = (InternalDriverContext) session.getContext(); - // General version should have been downgraded to V3 - assertThat(context.getProtocolVersion()).isEqualTo(DefaultProtocolVersion.V3); - // But control connection should still be using protocol V4 since node0 supports V4 - assertThat(context.getControlConnection().channel().protocolVersion()) - .isEqualTo(DefaultProtocolVersion.V4); - - assertThat(queries(simulacron)).hasSize(4); - - assertThat(protocolQueries(contactPoint, 4)) - .containsExactly( - // Initial connection with protocol v4 - "SELECT cluster_name FROM system.local", - "SELECT * FROM system.local", - "SELECT * FROM system.peers_v2", - "SELECT * FROM system.peers"); - } - } - - @Test - public void should_keep_current_if_supported_by_all_peers() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withBoolean(DefaultDriverOption.METADATA_SCHEMA_ENABLED, false) - .build(); - try (BoundCluster simulacron = mixedVersions("3.0.0", "2.2.0", "3.11"); - BoundNode contactPoint = simulacron.node(0); - CqlSession session = - (CqlSession) - SessionUtils.baseBuilder() - .addContactPoint(contactPoint.inetSocketAddress()) - .withConfigLoader(loader) - .build()) { - - InternalDriverContext context = (InternalDriverContext) session.getContext(); - assertThat(context.getProtocolVersion()).isEqualTo(DefaultProtocolVersion.V4); - assertThat(queries(simulacron)).hasSize(4); - assertThat(protocolQueries(contactPoint, 4)) - .containsExactly( - // Initial connection with protocol v4 - "SELECT cluster_name FROM system.local", - "SELECT * FROM system.local", - "SELECT * FROM system.peers_v2", - "SELECT * FROM system.peers"); - } - } - - @Test - public void should_fail_if_peer_does_not_support_v3() { - - Throwable t = - catchThrowable( - () -> { - try (BoundCluster simulacron = mixedVersions("3.0.0", "2.0.9", "3.11"); - BoundNode contactPoint = simulacron.node(0); - CqlSession ignored = - (CqlSession) - SessionUtils.baseBuilder() - .addContactPoint(contactPoint.inetSocketAddress()) - .build()) { - fail("Cluster init should have failed"); - } - }); - - assertThat(t) - .isInstanceOf(UnsupportedProtocolVersionException.class) - .hasMessageContaining( - "reports Cassandra version 2.0.9, but the driver only supports 2.1.0 and above"); - } - - @Test - public void should_not_downgrade_and_force_down_old_nodes_if_version_forced() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withString(DefaultDriverOption.PROTOCOL_VERSION, "V4") - .withBoolean(DefaultDriverOption.METADATA_SCHEMA_ENABLED, false) - .build(); - try (BoundCluster simulacron = mixedVersions("3.0.0", "2.2.0", "2.0.0"); - BoundNode contactPoint = simulacron.node(0); - CqlSession session = - (CqlSession) - SessionUtils.baseBuilder() - .addContactPoint(contactPoint.inetSocketAddress()) - .withConfigLoader(loader) - .build()) { - assertThat(session.getContext().getProtocolVersion()).isEqualTo(DefaultProtocolVersion.V4); - - assertThat(queries(simulacron)).hasSize(4); - assertThat(protocolQueries(contactPoint, 4)) - .containsExactly( - // Initial connection with protocol v4 - "SELECT cluster_name FROM system.local", - "SELECT * FROM system.local", - "SELECT * FROM system.peers_v2", - "SELECT * FROM system.peers"); - - // Note: the 2.0.0 would be forced down if we try to open a connection to it. We can't check - // that here because Simulacron can't prime STARTUP requests. - } - } - - private BoundCluster mixedVersions(String... versions) { - ClusterSpec clusterSpec = ClusterSpec.builder().withCassandraVersion(versions[0]).build(); - DataCenterSpec dc0 = clusterSpec.addDataCenter().build(); - // inherits versions[0] - dc0.addNode().build(); - for (int i = 1; i < versions.length; i++) { - dc0.addNode().withCassandraVersion(versions[i]).build(); - } - return SimulacronRule.server.register(clusterSpec); - } - - private Stream queries(BoundTopic topic) { - return topic.getLogs().getQueryLogs().stream() - .filter(q -> q.getFrame().message instanceof Query); - } - - private Stream protocolQueries(BoundTopic topic, int protocolVersion) { - return queries(topic) - .filter(q -> q.getFrame().protocolVersion == protocolVersion) - .map(QueryLog::getQuery); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/SerializationIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/SerializationIT.java deleted file mode 100644 index b33e5421838..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/SerializationIT.java +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core; - -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.rows; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.serverError; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.when; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.ColumnDefinition; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.servererrors.ServerError; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.internal.SerializationHelper; -import com.datastax.oss.simulacron.common.cluster.ClusterSpec; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -public class SerializationIT { - private static final SimulacronRule SIMULACRON_RULE = - new SimulacronRule(ClusterSpec.builder().withNodes(1)); - - private static final SessionRule SESSION_RULE = - SessionRule.builder(SIMULACRON_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(SIMULACRON_RULE).around(SESSION_RULE); - - @Before - public void clear() { - SIMULACRON_RULE.cluster().clearPrimes(true); - } - - @Test - public void should_serialize_node() { - // Given - Node node = SESSION_RULE.session().getMetadata().getNodes().values().iterator().next(); - - // When - Node deserializedNode = SerializationHelper.serializeAndDeserialize(node); - - // Then - // verify a few fields, no need to be exhaustive - assertThat(deserializedNode.getHostId()).isEqualTo(node.getHostId()); - assertThat(deserializedNode.getEndPoint()).isEqualTo(node.getEndPoint()); - assertThat(deserializedNode.getCassandraVersion()).isEqualTo(node.getCassandraVersion()); - } - - @Test - public void should_serialize_driver_exception() { - // Given - SIMULACRON_RULE.cluster().prime(when("mock query").then(serverError("mock server error"))); - try { - SESSION_RULE.session().execute("mock query"); - fail("Expected a ServerError"); - } catch (ServerError error) { - assertThat(error.getExecutionInfo()).isNotNull(); - - // When - ServerError deserializedError = SerializationHelper.serializeAndDeserialize(error); - - // Then - assertThat(deserializedError.getMessage()).isEqualTo("mock server error"); - assertThat(deserializedError.getCoordinator().getEndPoint()) - .isEqualTo(error.getCoordinator().getEndPoint()); - assertThat(deserializedError.getExecutionInfo()).isNull(); // transient - } - } - - @Test - public void should_serialize_row() { - // Given - SIMULACRON_RULE - .cluster() - .prime(when("mock query").then(rows().row("t", "mock data").columnTypes("t", "varchar"))); - Row row = SESSION_RULE.session().execute("mock query").one(); - - // When - row = SerializationHelper.serializeAndDeserialize(row); - - // Then - ColumnDefinition columnDefinition = row.getColumnDefinitions().get("t"); - assertThat(columnDefinition.getType()).isEqualTo(DataTypes.TEXT); - assertThat(row.getString("t")).isEqualTo("mock data"); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/SessionLeakIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/SessionLeakIT.java deleted file mode 100644 index c0cf0b78e7f..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/SessionLeakIT.java +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Fail.fail; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.reset; -import static org.mockito.Mockito.verify; - -import ch.qos.logback.classic.Level; -import ch.qos.logback.classic.Logger; -import ch.qos.logback.classic.spi.ILoggingEvent; -import ch.qos.logback.core.Appender; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.InvalidKeyspaceException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; -import com.datastax.oss.driver.categories.IsolatedTests; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.simulacron.common.cluster.ClusterSpec; -import com.datastax.oss.simulacron.common.stubbing.PrimeDsl; -import java.util.HashSet; -import java.util.Set; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.mockito.ArgumentCaptor; -import org.mockito.Captor; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; -import org.slf4j.LoggerFactory; - -@Category(IsolatedTests.class) -@RunWith(MockitoJUnitRunner.class) -public class SessionLeakIT { - - @ClassRule - public static final SimulacronRule SIMULACRON_RULE = - new SimulacronRule(ClusterSpec.builder().withNodes(1)); - - @Mock private Appender appender; - @Captor private ArgumentCaptor loggingEventCaptor; - - @Before - public void setupLogger() { - Logger logger = (Logger) LoggerFactory.getLogger(DefaultSession.class); - logger.setLevel(Level.WARN); - logger.addAppender(appender); - // no need to clean up after since this is an isolated test - } - - @Test - public void should_warn_when_session_count_exceeds_threshold() { - int threshold = 4; - // Set the config option explicitly, in case it gets overridden in the test application.conf: - DriverConfigLoader configLoader = - DriverConfigLoader.programmaticBuilder() - .withInt(DefaultDriverOption.SESSION_LEAK_THRESHOLD, threshold) - .build(); - - Set sessions = new HashSet<>(); - - // Stay under the threshold, no warnings expected - for (int i = 0; i < threshold; i++) { - sessions.add(SessionUtils.newSession(SIMULACRON_RULE, configLoader)); - } - verify(appender, never()).doAppend(any()); - - // Go over the threshold, 1 warning for every new session - sessions.add(SessionUtils.newSession(SIMULACRON_RULE, configLoader)); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getValue().getFormattedMessage()) - .contains("You have too many session instances: 5 active, expected less than 4"); - - reset(appender); - sessions.add(SessionUtils.newSession(SIMULACRON_RULE, configLoader)); - verify(appender).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getValue().getFormattedMessage()) - .contains("You have too many session instances: 6 active, expected less than 4"); - - // Go back under the threshold, no warnings expected - sessions.forEach(Session::close); - sessions.clear(); - reset(appender); - CqlSession session = SessionUtils.newSession(SIMULACRON_RULE, configLoader); - verify(appender, never()).doAppend(any()); - session.close(); - } - - @Test - public void should_never_warn_when_session_init_fails() { - SIMULACRON_RULE - .cluster() - .prime(PrimeDsl.when("USE \"non_existent_keyspace\"").then(PrimeDsl.invalid("irrelevant"))); - int threshold = 4; - // Set the config option explicitly, in case it gets overridden in the test application.conf: - DriverConfigLoader configLoader = - DriverConfigLoader.programmaticBuilder() - .withInt(DefaultDriverOption.SESSION_LEAK_THRESHOLD, threshold) - .build(); - // Go over the threshold, no warnings expected - for (int i = 0; i < threshold + 1; i++) { - try (Session session = - SessionUtils.newSession( - SIMULACRON_RULE, CqlIdentifier.fromCql("non_existent_keyspace"), configLoader)) { - fail("Session %s should have failed to initialize", session.getName()); - } catch (InvalidKeyspaceException e) { - assertThat(e.getMessage()).isEqualTo("Invalid keyspace non_existent_keyspace"); - } - } - verify(appender, never()).doAppend(any()); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/auth/PlainTextAuthProviderIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/auth/PlainTextAuthProviderIT.java deleted file mode 100644 index 86dd6cda2fd..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/auth/PlainTextAuthProviderIT.java +++ /dev/null @@ -1,145 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.auth; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.auth.AuthProvider; -import com.datastax.oss.driver.api.core.auth.ProgrammaticPlainTextAuthProvider; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.session.SessionBuilder; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.internal.core.auth.PlainTextAuthProvider; -import com.datastax.oss.driver.shaded.guava.common.util.concurrent.Uninterruptibles; -import java.util.concurrent.TimeUnit; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; - -public class PlainTextAuthProviderIT { - - @ClassRule - public static final CustomCcmRule CCM_RULE = - CustomCcmRule.builder() - .withCassandraConfiguration("authenticator", "PasswordAuthenticator") - .withJvmArgs("-Dcassandra.superuser_setup_delay_ms=0") - .build(); - - @BeforeClass - public static void sleepForAuth() { - if (CCM_RULE.getCassandraVersion().compareTo(Version.V2_2_0) < 0) { - // Sleep for 1 second to allow C* auth to do its work. This is only needed for 2.1 - Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); - } - } - - @Test - public void should_connect_with_credentials() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withClass(DefaultDriverOption.AUTH_PROVIDER_CLASS, PlainTextAuthProvider.class) - .withString(DefaultDriverOption.AUTH_PROVIDER_USER_NAME, "cassandra") - .withString(DefaultDriverOption.AUTH_PROVIDER_PASSWORD, "cassandra") - .build(); - try (CqlSession session = SessionUtils.newSession(CCM_RULE, loader)) { - session.execute("select * from system.local"); - } - } - - @Test - public void should_connect_with_programmatic_credentials() { - - SessionBuilder builder = - SessionUtils.baseBuilder() - .addContactEndPoints(CCM_RULE.getContactPoints()) - .withAuthCredentials("cassandra", "cassandra"); - - try (CqlSession session = (CqlSession) builder.build()) { - session.execute("select * from system.local"); - } - } - - @Test - public void should_connect_with_programmatic_provider() { - - AuthProvider authProvider = new ProgrammaticPlainTextAuthProvider("cassandra", "cassandra"); - SessionBuilder builder = - SessionUtils.baseBuilder() - .addContactEndPoints(CCM_RULE.getContactPoints()) - // Open more than one connection in order to validate that the provider is creating - // valid Credentials for every invocation of PlainTextAuthProviderBase.getCredentials. - .withConfigLoader( - SessionUtils.configLoaderBuilder() - .withInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE, 4) - .build()) - .withAuthProvider(authProvider); - - try (CqlSession session = (CqlSession) builder.build()) { - session.execute("select * from system.local"); - } - } - - @Test(expected = AllNodesFailedException.class) - public void should_not_connect_with_invalid_credentials() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withClass(DefaultDriverOption.AUTH_PROVIDER_CLASS, PlainTextAuthProvider.class) - .withString(DefaultDriverOption.AUTH_PROVIDER_USER_NAME, "baduser") - .withString(DefaultDriverOption.AUTH_PROVIDER_PASSWORD, "badpass") - .build(); - try (CqlSession session = SessionUtils.newSession(CCM_RULE, loader)) { - session.execute("select * from system.local"); - } - } - - @Test(expected = AllNodesFailedException.class) - public void should_not_connect_with_invalid_programmatic_credentials() { - SessionBuilder builder = - SessionUtils.baseBuilder() - .addContactEndPoints(CCM_RULE.getContactPoints()) - .withAuthCredentials("baduser", "badpass"); - - try (CqlSession session = (CqlSession) builder.build()) { - session.execute("select * from system.local"); - } - } - - @Test(expected = AllNodesFailedException.class) - public void should_not_connect_with_invalid_programmatic_provider() { - - AuthProvider authProvider = new ProgrammaticPlainTextAuthProvider("baduser", "badpass"); - SessionBuilder builder = - SessionUtils.baseBuilder() - .addContactEndPoints(CCM_RULE.getContactPoints()) - .withAuthProvider(authProvider); - - try (CqlSession session = (CqlSession) builder.build()) { - session.execute("select * from system.local"); - } - } - - @Test(expected = AllNodesFailedException.class) - public void should_not_connect_without_credentials() { - try (CqlSession session = SessionUtils.newSession(CCM_RULE)) { - session.execute("select * from system.local"); - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/compression/DirectCompressionIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/compression/DirectCompressionIT.java deleted file mode 100644 index 3dad08f4de6..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/compression/DirectCompressionIT.java +++ /dev/null @@ -1,133 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.compression; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.offset; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.categories.ParallelizableTests; -import java.time.Duration; -import org.junit.Assume; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -public class DirectCompressionIT { - - private static final CcmRule CCM_RULE = CcmRule.getInstance(); - - private static final SessionRule SCHEMA_SESSION_RULE = - SessionRule.builder(CCM_RULE) - .withConfigLoader( - SessionUtils.configLoaderBuilder() - .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(30)) - .build()) - .build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SCHEMA_SESSION_RULE); - - @BeforeClass - public static void setup() { - SCHEMA_SESSION_RULE - .session() - .execute("CREATE TABLE test (k text PRIMARY KEY, t text, i int, f float)"); - } - - /** - * Validates that a cluster configured with Snappy compression and can execute queries that insert - * and retrieve data. - * - * @test_category connection:compression - * @expected_result session established and queries made successfully using it. - */ - @Test - public void should_execute_queries_with_snappy_compression() throws Exception { - Assume.assumeTrue( - "Snappy is not supported in OSS C* 4.0+ with protocol v5", - !CCM_RULE.isDistributionOf(BackendType.HCD) - && (CCM_RULE.isDistributionOf(BackendType.DSE) - || CCM_RULE.getCassandraVersion().nextStable().compareTo(Version.V4_0_0) < 0)); - createAndCheckCluster("snappy"); - } - - /** - * Validates that a cluster configured with LZ4 compression and can execute queries that insert - * and retrieve data. - * - * @test_category connection:compression - * @expected_result session established and queries made successfully using it. - */ - @Test - public void should_execute_queries_with_lz4_compression() throws Exception { - createAndCheckCluster("lz4"); - } - - private void createAndCheckCluster(String compressorOption) { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withString(DefaultDriverOption.PROTOCOL_COMPRESSION, compressorOption) - .build(); - try (CqlSession session = - SessionUtils.newSession(CCM_RULE, SCHEMA_SESSION_RULE.keyspace(), loader)) { - // Run a couple of simple test queries - ResultSet rs = - session.execute( - SimpleStatement.newInstance( - "INSERT INTO test (k, t, i, f) VALUES (?, ?, ?, ?)", "key", "foo", 42, 24.03f)); - assertThat(rs.iterator().hasNext()).isFalse(); - - ResultSet rs1 = session.execute("SELECT * FROM test WHERE k = 'key'"); - assertThat(rs1.iterator().hasNext()).isTrue(); - Row row = rs1.iterator().next(); - assertThat(rs1.iterator().hasNext()).isFalse(); - assertThat(row.getString("k")).isEqualTo("key"); - assertThat(row.getString("t")).isEqualTo("foo"); - assertThat(row.getInt("i")).isEqualTo(42); - assertThat(row.getFloat("f")).isEqualTo(24.03f, offset(0.1f)); - - ExecutionInfo executionInfo = rs.getExecutionInfo(); - // There's not much more we can check without hard-coding sizes. - // We are testing with small responses, so the compressed payload is not even guaranteed to be - // smaller. - assertThat(executionInfo.getResponseSizeInBytes()).isGreaterThan(0); - if (session.getContext().getProtocolVersion().getCode() == 5) { - // in protocol v5, compression is done at segment level - assertThat(executionInfo.getCompressedResponseSizeInBytes()).isEqualTo(-1); - } else { - assertThat(executionInfo.getCompressedResponseSizeInBytes()).isGreaterThan(0); - } - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/compression/HeapCompressionIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/compression/HeapCompressionIT.java deleted file mode 100644 index a14c3b29b21..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/compression/HeapCompressionIT.java +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.compression; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.offset; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.categories.IsolatedTests; -import java.time.Duration; -import org.junit.Assume; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(IsolatedTests.class) -public class HeapCompressionIT { - - static { - System.setProperty("io.netty.noPreferDirect", "true"); - System.setProperty("io.netty.noUnsafe", "true"); - } - - private static final CustomCcmRule CCM_RULE = CustomCcmRule.builder().build(); - - private static final SessionRule SCHEMA_SESSION_RULE = - SessionRule.builder(CCM_RULE) - .withConfigLoader( - SessionUtils.configLoaderBuilder() - .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(30)) - .build()) - .build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SCHEMA_SESSION_RULE); - - @BeforeClass - public static void setup() { - SCHEMA_SESSION_RULE - .session() - .execute("CREATE TABLE test (k text PRIMARY KEY, t text, i int, f float)"); - } - - /** - * Validates that Snappy compression still works when using heap buffers. - * - * @test_category connection:compression - * @expected_result session established and queries made successfully using it. - */ - @Test - public void should_execute_queries_with_snappy_compression() throws Exception { - Assume.assumeTrue( - "Snappy is not supported in OSS C* 4.0+ with protocol v5", - CCM_RULE.isDistributionOf(BackendType.DSE) - || CCM_RULE.getCassandraVersion().nextStable().compareTo(Version.V4_0_0) < 0); - createAndCheckCluster("snappy"); - } - - /** - * Validates that LZ4 compression still works when using heap buffers. - * - * @test_category connection:compression - * @expected_result session established and queries made successfully using it. - */ - @Test - public void should_execute_queries_with_lz4_compression() throws Exception { - createAndCheckCluster("lz4"); - } - - private void createAndCheckCluster(String compressorOption) { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withString(DefaultDriverOption.PROTOCOL_COMPRESSION, compressorOption) - .build(); - try (CqlSession session = - SessionUtils.newSession(CCM_RULE, SCHEMA_SESSION_RULE.keyspace(), loader)) { - // Run a couple of simple test queries - ResultSet rs = - session.execute( - SimpleStatement.newInstance( - "INSERT INTO test (k, t, i, f) VALUES (?, ?, ?, ?)", "key", "foo", 42, 24.03f)); - assertThat(rs.iterator().hasNext()).isFalse(); - - ResultSet rs1 = session.execute("SELECT * FROM test WHERE k = 'key'"); - assertThat(rs1.iterator().hasNext()).isTrue(); - Row row = rs1.iterator().next(); - assertThat(rs1.iterator().hasNext()).isFalse(); - assertThat(row.getString("k")).isEqualTo("key"); - assertThat(row.getString("t")).isEqualTo("foo"); - assertThat(row.getInt("i")).isEqualTo(42); - assertThat(row.getFloat("f")).isEqualTo(24.03f, offset(0.1f)); - - ExecutionInfo executionInfo = rs.getExecutionInfo(); - // There's not much more we can check without hard-coding sizes. - // We are testing with small responses, so the compressed payload is not even guaranteed to be - // smaller. - assertThat(executionInfo.getResponseSizeInBytes()).isGreaterThan(0); - if (session.getContext().getProtocolVersion().getCode() == 5) { - // in protocol v5, compression is done at segment level - assertThat(executionInfo.getCompressedResponseSizeInBytes()).isEqualTo(-1); - } else { - assertThat(executionInfo.getCompressedResponseSizeInBytes()).isGreaterThan(0); - } - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/config/DriverConfigValidationIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/config/DriverConfigValidationIT.java deleted file mode 100644 index e5056e05495..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/config/DriverConfigValidationIT.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.config; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.simulacron.common.cluster.ClusterSpec; -import java.util.Collections; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -@Category(ParallelizableTests.class) -public class DriverConfigValidationIT { - - @ClassRule - public static final SimulacronRule SIMULACRON_RULE = - new SimulacronRule(ClusterSpec.builder().withNodes(1)); - - @Test - public void should_fail_to_init_with_invalid_policy() { - should_fail_to_init_with_invalid_policy(DefaultDriverOption.LOAD_BALANCING_POLICY_CLASS); - should_fail_to_init_with_invalid_policy(DefaultDriverOption.RECONNECTION_POLICY_CLASS); - should_fail_to_init_with_invalid_policy(DefaultDriverOption.RETRY_POLICY_CLASS); - should_fail_to_init_with_invalid_policy(DefaultDriverOption.SPECULATIVE_EXECUTION_POLICY_CLASS); - should_fail_to_init_with_invalid_policy(DefaultDriverOption.AUTH_PROVIDER_CLASS); - should_fail_to_init_with_invalid_policy(DefaultDriverOption.SSL_ENGINE_FACTORY_CLASS); - should_fail_to_init_with_invalid_policy(DefaultDriverOption.TIMESTAMP_GENERATOR_CLASS); - should_fail_to_init_with_invalid_policy(DefaultDriverOption.REQUEST_THROTTLER_CLASS); - should_fail_to_init_with_invalid_policy(DefaultDriverOption.ADDRESS_TRANSLATOR_CLASS); - } - - @Test - public void should_fail_to_init_with_invalid_components() { - should_fail_to_init_with_invalid_components(DefaultDriverOption.REQUEST_TRACKER_CLASSES); - should_fail_to_init_with_invalid_components( - DefaultDriverOption.METADATA_NODE_STATE_LISTENER_CLASSES); - should_fail_to_init_with_invalid_components( - DefaultDriverOption.METADATA_SCHEMA_CHANGE_LISTENER_CLASSES); - } - - private void should_fail_to_init_with_invalid_policy(DefaultDriverOption option) { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder().withString(option, "AClassThatDoesNotExist").build(); - assertConfigError(option, loader); - } - - private void should_fail_to_init_with_invalid_components(DefaultDriverOption option) { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withStringList(option, Collections.singletonList("AClassThatDoesNotExist")) - .build(); - assertConfigError(option, loader); - } - - private void assertConfigError(DefaultDriverOption option, DriverConfigLoader loader) { - assertThatThrownBy(() -> SessionUtils.newSession(SIMULACRON_RULE, loader)) - .satisfies( - error -> - assertThat(error) - .isInstanceOf(IllegalArgumentException.class) - .hasMessageContaining( - "Can't find class AClassThatDoesNotExist " - + "(specified by " - + option.getPath() - + ")")); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/config/DriverExecutionProfileCcmIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/config/DriverExecutionProfileCcmIT.java deleted file mode 100644 index 1eee9c304b6..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/config/DriverExecutionProfileCcmIT.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.config; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.BatchStatement; -import com.datastax.oss.driver.api.core.cql.BatchStatementBuilder; -import com.datastax.oss.driver.api.core.cql.DefaultBatchType; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import java.util.concurrent.CompletionStage; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -@Category(ParallelizableTests.class) -public class DriverExecutionProfileCcmIT { - - @ClassRule public static final CcmRule CCM_RULE = CcmRule.getInstance(); - - @Test - public void should_use_profile_page_size() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withInt(DefaultDriverOption.REQUEST_PAGE_SIZE, 100) - .startProfile("smallpages") - .withInt(DefaultDriverOption.REQUEST_PAGE_SIZE, 10) - .build(); - try (CqlSession session = SessionUtils.newSession(CCM_RULE, loader)) { - - CqlIdentifier keyspace = SessionUtils.uniqueKeyspaceId(); - DriverExecutionProfile slowProfile = SessionUtils.slowProfile(session); - SessionUtils.createKeyspace(session, keyspace, slowProfile); - - session.execute(String.format("USE %s", keyspace.asCql(false))); - - // load 500 rows (value beyond page size). - session.execute( - SimpleStatement.builder( - "CREATE TABLE IF NOT EXISTS test (k int, v int, PRIMARY KEY (k,v))") - .setExecutionProfile(slowProfile) - .build()); - PreparedStatement prepared = session.prepare("INSERT INTO test (k, v) values (0, ?)"); - BatchStatementBuilder bs = - BatchStatement.builder(DefaultBatchType.UNLOGGED).setExecutionProfile(slowProfile); - for (int i = 0; i < 500; i++) { - bs.addStatement(prepared.bind(i)); - } - session.execute(bs.build()); - - String query = "SELECT * FROM test where k=0"; - // Execute query without profile, should use global page size (100) - CompletionStage future = session.executeAsync(query); - AsyncResultSet result = CompletableFutures.getUninterruptibly(future); - assertThat(result.remaining()).isEqualTo(100); - result = CompletableFutures.getUninterruptibly(result.fetchNextPage()); - // next fetch should also be 100 pages. - assertThat(result.remaining()).isEqualTo(100); - - // Execute query with profile, should use profile page size - future = - session.executeAsync( - SimpleStatement.builder(query).setExecutionProfileName("smallpages").build()); - result = CompletableFutures.getUninterruptibly(future); - assertThat(result.remaining()).isEqualTo(10); - // next fetch should also be 10 pages. - result = CompletableFutures.getUninterruptibly(result.fetchNextPage()); - assertThat(result.remaining()).isEqualTo(10); - - SessionUtils.dropKeyspace(session, keyspace, slowProfile); - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/config/DriverExecutionProfileReloadIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/config/DriverExecutionProfileReloadIT.java deleted file mode 100644 index 02bea70405e..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/config/DriverExecutionProfileReloadIT.java +++ /dev/null @@ -1,228 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.config; - -import static com.datastax.oss.driver.internal.core.config.typesafe.DefaultDriverConfigLoader.DEFAULT_CONFIG_SUPPLIER; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.noRows; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.when; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.catchThrowable; -import static org.junit.Assert.fail; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.DriverTimeoutException; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; -import com.datastax.oss.driver.internal.core.config.ConfigChangeEvent; -import com.datastax.oss.driver.internal.core.config.typesafe.DefaultDriverConfigLoader; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.simulacron.common.cluster.ClusterSpec; -import com.typesafe.config.ConfigFactory; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicReference; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; - -public class DriverExecutionProfileReloadIT { - - @ClassRule - public static final SimulacronRule SIMULACRON_RULE = - new SimulacronRule(ClusterSpec.builder().withNodes(3)); - - @Before - public void clearPrimes() { - SIMULACRON_RULE.cluster().clearLogs(); - SIMULACRON_RULE.cluster().clearPrimes(true); - } - - @Test - public void should_periodically_reload_configuration() { - String query = "mockquery"; - // Define a loader which configures a reload interval of 2s and current value of configSource. - AtomicReference configSource = new AtomicReference<>(""); - DefaultDriverConfigLoader loader = - new DefaultDriverConfigLoader( - () -> - ConfigFactory.parseString( - "basic.config-reload-interval = 2s\n" - + "basic.request.timeout = 2s\n" - + configSource.get()) - .withFallback(DEFAULT_CONFIG_SUPPLIER.get())); - try (CqlSession session = - (CqlSession) - SessionUtils.baseBuilder() - .withConfigLoader(loader) - .addContactEndPoints(SIMULACRON_RULE.getContactPoints()) - .build()) { - SIMULACRON_RULE.cluster().prime(when(query).then(noRows()).delay(4, TimeUnit.SECONDS)); - - // Expect timeout since default session timeout is 2s - try { - session.execute(query); - fail("DriverTimeoutException expected"); - } catch (DriverTimeoutException e) { - // expected. - } - - // Bump up request timeout to 10 seconds and wait for config to reload. - configSource.set("basic.request.timeout = 10s"); - waitForConfigChange(session, 3, TimeUnit.SECONDS); - - // Execute again, should not timeout. - session.execute(query); - } - } - - @Test - public void should_reload_configuration_when_event_fired() { - String query = "mockquery"; - // Define a loader which configures no automatic reloads and current value of configSource. - AtomicReference configSource = new AtomicReference<>(""); - DefaultDriverConfigLoader loader = - new DefaultDriverConfigLoader( - () -> - ConfigFactory.parseString( - "basic.config-reload-interval = 0\n" - + "basic.request.timeout = 2s\n" - + configSource.get()) - .withFallback(DEFAULT_CONFIG_SUPPLIER.get())); - try (CqlSession session = - (CqlSession) - SessionUtils.baseBuilder() - .withConfigLoader(loader) - .addContactEndPoints(SIMULACRON_RULE.getContactPoints()) - .build()) { - SIMULACRON_RULE.cluster().prime(when(query).then(noRows()).delay(4, TimeUnit.SECONDS)); - - // Expect timeout since default session timeout is 2s - try { - session.execute(query); - fail("DriverTimeoutException expected"); - } catch (DriverTimeoutException e) { - // expected. - } - - // Bump up request timeout to 10 seconds and trigger a manual reload. - configSource.set("basic.request.timeout = 10s"); - session.getContext().getConfigLoader().reload(); - waitForConfigChange(session, 500, TimeUnit.MILLISECONDS); - - // Execute again, should not timeout. - session.execute(query); - } - } - - @Test - public void should_not_allow_dynamically_adding_profile() { - String query = "mockquery"; - // Define a loader which configures a reload interval of 2s and current value of configSource. - AtomicReference configSource = new AtomicReference<>(""); - DefaultDriverConfigLoader loader = - new DefaultDriverConfigLoader( - () -> - ConfigFactory.parseString( - "basic.config-reload-interval = 2s\n" + configSource.get()) - .withFallback(DEFAULT_CONFIG_SUPPLIER.get())); - try (CqlSession session = - (CqlSession) - SessionUtils.baseBuilder() - .withConfigLoader(loader) - .addContactEndPoints(SIMULACRON_RULE.getContactPoints()) - .build()) { - SIMULACRON_RULE.cluster().prime(when(query).then(noRows()).delay(4, TimeUnit.SECONDS)); - - // Expect failure because profile doesn't exist. - try { - session.execute(SimpleStatement.builder(query).setExecutionProfileName("slow").build()); - fail("Expected IllegalArgumentException"); - } catch (IllegalArgumentException e) { - // expected. - } - - // Bump up request timeout to 10 seconds on profile and wait for config to reload. - configSource.set("profiles.slow.basic.request.timeout = 10s"); - waitForConfigChange(session, 3, TimeUnit.SECONDS); - - // Execute again, should expect to fail again because doesn't allow to dynamically define - // profile. - Throwable t = - catchThrowable( - () -> - session.execute( - SimpleStatement.builder(query).setExecutionProfileName("slow").build())); - - assertThat(t).isInstanceOf(IllegalArgumentException.class); - } - } - - @Test - public void should_reload_profile_config_when_reloading_config() { - String query = "mockquery"; - // Define a loader which configures a reload interval of 2s and current value of configSource. - // Define initial profile settings so it initially exists. - AtomicReference configSource = new AtomicReference<>(""); - DefaultDriverConfigLoader loader = - new DefaultDriverConfigLoader( - () -> - ConfigFactory.parseString( - "profiles.slow.basic.request.consistency = ONE\n" - + "basic.config-reload-interval = 2s\n" - + "basic.request.timeout = 2s\n" - + configSource.get()) - .withFallback(DEFAULT_CONFIG_SUPPLIER.get())); - try (CqlSession session = - (CqlSession) - SessionUtils.baseBuilder() - .withConfigLoader(loader) - .addContactEndPoints(SIMULACRON_RULE.getContactPoints()) - .build()) { - SIMULACRON_RULE.cluster().prime(when(query).then(noRows()).delay(4, TimeUnit.SECONDS)); - - // Expect failure because profile doesn't exist. - try { - session.execute(SimpleStatement.builder(query).setExecutionProfileName("slow").build()); - fail("Expected DriverTimeoutException"); - } catch (DriverTimeoutException e) { - // expected. - } - - // Bump up request timeout to 10 seconds on profile and wait for config to reload. - configSource.set("profiles.slow.basic.request.timeout = 10s"); - waitForConfigChange(session, 3, TimeUnit.SECONDS); - - // Execute again, should succeed because profile timeout was increased. - session.execute(SimpleStatement.builder(query).setExecutionProfileName("slow").build()); - } - } - - private void waitForConfigChange(CqlSession session, long timeout, TimeUnit unit) { - CountDownLatch latch = new CountDownLatch(1); - ((InternalDriverContext) session.getContext()) - .getEventBus() - .register(ConfigChangeEvent.class, (e) -> latch.countDown()); - try { - boolean success = latch.await(timeout, unit); - assertThat(success).isTrue(); - } catch (InterruptedException e) { - throw new AssertionError("Interrupted while waiting for config change event", e); - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/config/DriverExecutionProfileSimulacronIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/config/DriverExecutionProfileSimulacronIT.java deleted file mode 100644 index f5131a2bfa3..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/config/DriverExecutionProfileSimulacronIT.java +++ /dev/null @@ -1,179 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.config; - -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.noRows; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.serverError; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.when; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.catchThrowable; -import static org.junit.Assert.fail; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.DriverTimeoutException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.servererrors.ServerError; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.simulacron.common.cluster.ClusterSpec; -import com.datastax.oss.simulacron.common.cluster.QueryLog; -import java.time.Duration; -import java.util.Optional; -import java.util.concurrent.TimeUnit; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -@Category(ParallelizableTests.class) -public class DriverExecutionProfileSimulacronIT { - - @ClassRule - public static final SimulacronRule SIMULACRON_RULE = - new SimulacronRule(ClusterSpec.builder().withNodes(3)); - - @Before - public void clearPrimes() { - SIMULACRON_RULE.cluster().clearLogs(); - SIMULACRON_RULE.cluster().clearPrimes(true); - } - - @Test - public void should_fail_if_config_profile_specified_doesnt_exist() { - try (CqlSession session = SessionUtils.newSession(SIMULACRON_RULE)) { - SimpleStatement statement = - SimpleStatement.builder("select * from system.local") - .setExecutionProfileName("IDONTEXIST") - .build(); - - Throwable t = catchThrowable(() -> session.execute(statement)); - - assertThat(t) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage("Unknown profile 'IDONTEXIST'. Check your configuration."); - } - } - - @Test - public void should_use_profile_request_timeout() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(2)) - .startProfile("olap") - .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(10)) - .build(); - try (CqlSession session = SessionUtils.newSession(SIMULACRON_RULE, loader)) { - String query = "mockquery"; - // configure query with delay of 4 seconds. - SIMULACRON_RULE.cluster().prime(when(query).then(noRows()).delay(4, TimeUnit.SECONDS)); - - // Execute query without profile, should timeout with default session timeout (2s). - try { - session.execute(query); - fail("Should have timed out"); - } catch (DriverTimeoutException e) { - // expected. - } - - // Execute query with profile, should not timeout since waits up to 10 seconds. - session.execute(SimpleStatement.builder(query).setExecutionProfileName("olap").build()); - } - } - - @Test - public void should_use_profile_default_idempotence() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .startProfile("idem") - .withBoolean(DefaultDriverOption.REQUEST_DEFAULT_IDEMPOTENCE, true) - .build(); - try (CqlSession session = SessionUtils.newSession(SIMULACRON_RULE, loader)) { - String query = "mockquery"; - // configure query with server error which should invoke onRequestError in retry policy. - SIMULACRON_RULE.cluster().prime(when(query).then(serverError("fail"))); - - // Execute query without profile, should fail because couldn't be retried. - try { - session.execute(query); - fail("Should have failed with server error"); - } catch (ServerError e) { - // expected. - } - - // Execute query with profile, should retry on all hosts since query is idempotent. - Throwable t = - catchThrowable( - () -> - session.execute( - SimpleStatement.builder(query).setExecutionProfileName("idem").build())); - - assertThat(t).isInstanceOf(AllNodesFailedException.class); - } - } - - @Test - public void should_use_profile_consistency() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .startProfile("cl") - .withString(DefaultDriverOption.REQUEST_CONSISTENCY, "LOCAL_QUORUM") - .withString(DefaultDriverOption.REQUEST_SERIAL_CONSISTENCY, "LOCAL_SERIAL") - .build(); - try (CqlSession session = SessionUtils.newSession(SIMULACRON_RULE, loader)) { - String query = "mockquery"; - - // Execute query without profile, should use default CLs (LOCAL_ONE, SERIAL). - session.execute(query); - - Optional log = - SIMULACRON_RULE.cluster().getLogs().getQueryLogs().stream() - .filter(q -> q.getQuery().equals(query)) - .findFirst(); - - assertThat(log) - .isPresent() - .hasValueSatisfying( - (l) -> { - assertThat(l.getConsistency().toString()).isEqualTo("LOCAL_ONE"); - assertThat(l.getSerialConsistency().toString()).isEqualTo("SERIAL"); - }); - - SIMULACRON_RULE.cluster().clearLogs(); - - // Execute query with profile, should use profile CLs - session.execute(SimpleStatement.builder(query).setExecutionProfileName("cl").build()); - - log = - SIMULACRON_RULE.cluster().getLogs().getQueryLogs().stream() - .filter(q -> q.getQuery().equals(query)) - .findFirst(); - - assertThat(log) - .isPresent() - .hasValueSatisfying( - (l) -> { - assertThat(l.getConsistency().toString()).isEqualTo("LOCAL_QUORUM"); - assertThat(l.getSerialConsistency().toString()).isEqualTo("LOCAL_SERIAL"); - }); - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/config/MapBasedConfigLoaderIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/config/MapBasedConfigLoaderIT.java deleted file mode 100644 index b8a6accce69..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/config/MapBasedConfigLoaderIT.java +++ /dev/null @@ -1,207 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.config; - -import static com.datastax.oss.simulacron.common.codec.ConsistencyLevel.QUORUM; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.unavailable; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.when; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.awaitility.Awaitility.await; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.config.OptionsMap; -import com.datastax.oss.driver.api.core.config.TypedDriverOption; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.retry.RetryDecision; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.servererrors.CoordinatorException; -import com.datastax.oss.driver.api.core.servererrors.UnavailableException; -import com.datastax.oss.driver.api.core.servererrors.WriteType; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.internal.core.config.ConfigChangeEvent; -import com.datastax.oss.simulacron.common.cluster.ClusterSpec; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.List; -import java.util.concurrent.TimeUnit; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -@Category(ParallelizableTests.class) -public class MapBasedConfigLoaderIT { - - @ClassRule - public static final SimulacronRule SIMULACRON_RULE = - new SimulacronRule(ClusterSpec.builder().withNodes(1)); - - @Before - public void setup() { - SIMULACRON_RULE.cluster().clearPrimes(true); - } - - /** - * Checks that runtime changes to the pool size are reflected in the driver. This is a special - * case because unlike other options, the driver does not re-read the option at regular intervals; - * instead, it relies on the {@link ConfigChangeEvent} being fired. - */ - @Test - public void should_resize_pool_when_config_changes() { - OptionsMap optionsMap = OptionsMap.driverDefaults(); - - try (CqlSession session = - CqlSession.builder() - .addContactEndPoints(SIMULACRON_RULE.getContactPoints()) - .withLocalDatacenter("dc1") - .withConfigLoader(DriverConfigLoader.fromMap(optionsMap)) - .build()) { - - Node node = session.getMetadata().getNodes().values().iterator().next(); - assertThat(node.getOpenConnections()).isEqualTo(2); // control connection + pool (default 1) - - optionsMap.put(TypedDriverOption.CONNECTION_POOL_LOCAL_SIZE, 2); - - await() - .pollInterval(500, TimeUnit.MILLISECONDS) - .atMost(60, TimeUnit.SECONDS) - .until(() -> node.getOpenConnections() == 3); - } - } - - /** Checks that profiles that have specific policy options will get their own policy instance. */ - @Test - public void should_create_policies_per_profile() { - // Given - // a query that throws UNAVAILABLE - String mockQuery = "mock query"; - SIMULACRON_RULE.cluster().prime(when(mockQuery).then(unavailable(QUORUM, 3, 2))); - - // a default profile that uses the default retry policy, and an alternate profile that uses a - // policy that ignores all errors - OptionsMap optionsMap = OptionsMap.driverDefaults(); - String alternateProfile = "profile1"; - optionsMap.put( - alternateProfile, TypedDriverOption.RETRY_POLICY_CLASS, IgnoreAllPolicy.class.getName()); - - try (CqlSession session = - CqlSession.builder() - .addContactEndPoints(SIMULACRON_RULE.getContactPoints()) - .withLocalDatacenter("dc1") - .withConfigLoader(DriverConfigLoader.fromMap(optionsMap)) - .build()) { - - // When - // executing the query for the default profile - SimpleStatement defaultProfileStatement = SimpleStatement.newInstance(mockQuery); - assertThatThrownBy(() -> session.execute(defaultProfileStatement)) - .satisfies( - t -> { - // Then - // the UNAVAILABLE error is surfaced - assertThat(t).isInstanceOf(AllNodesFailedException.class); - AllNodesFailedException anfe = (AllNodesFailedException) t; - assertThat(anfe.getAllErrors()).hasSize(1); - List nodeErrors = anfe.getAllErrors().values().iterator().next(); - assertThat(nodeErrors).hasSize(1); - assertThat(nodeErrors.get(0)).isInstanceOf(UnavailableException.class); - }); - - // When - // executing the query for the alternate profile - SimpleStatement alternateProfileStatement = - SimpleStatement.newInstance(mockQuery).setExecutionProfileName(alternateProfile); - ResultSet rs = session.execute(alternateProfileStatement); - - // Then - // the error is ignored - assertThat(rs.one()).isNull(); - } - } - - public static class IgnoreAllPolicy implements RetryPolicy { - - public IgnoreAllPolicy( - @SuppressWarnings("unused") DriverContext context, - @SuppressWarnings("unused") String profile) { - // nothing to do - } - - @Override - @Deprecated - public RetryDecision onReadTimeout( - @NonNull Request request, - @NonNull ConsistencyLevel cl, - int blockFor, - int received, - boolean dataPresent, - int retryCount) { - return RetryDecision.IGNORE; - } - - @Override - @Deprecated - public RetryDecision onWriteTimeout( - @NonNull Request request, - @NonNull ConsistencyLevel cl, - @NonNull WriteType writeType, - int blockFor, - int received, - int retryCount) { - return RetryDecision.IGNORE; - } - - @Override - @Deprecated - public RetryDecision onUnavailable( - @NonNull Request request, - @NonNull ConsistencyLevel cl, - int required, - int alive, - int retryCount) { - return RetryDecision.IGNORE; - } - - @Override - @Deprecated - public RetryDecision onRequestAborted( - @NonNull Request request, @NonNull Throwable error, int retryCount) { - return RetryDecision.IGNORE; - } - - @Override - @Deprecated - public RetryDecision onErrorResponse( - @NonNull Request request, @NonNull CoordinatorException error, int retryCount) { - return RetryDecision.IGNORE; - } - - @Override - public void close() { - // nothing to do - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/connection/ChannelSocketOptionsIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/connection/ChannelSocketOptionsIT.java deleted file mode 100644 index 177a0cd0a24..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/connection/ChannelSocketOptionsIT.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.connection; - -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.SOCKET_KEEP_ALIVE; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.SOCKET_LINGER_INTERVAL; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.SOCKET_RECEIVE_BUFFER_SIZE; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.SOCKET_REUSE_ADDRESS; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.SOCKET_SEND_BUFFER_SIZE; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.SOCKET_TCP_NODELAY; -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.internal.core.session.SessionWrapper; -import com.datastax.oss.simulacron.common.cluster.ClusterSpec; -import io.netty.channel.FixedRecvByteBufAllocator; -import io.netty.channel.RecvByteBufAllocator; -import io.netty.channel.socket.SocketChannelConfig; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -public class ChannelSocketOptionsIT { - - private static final SimulacronRule SIMULACRON_RULE = - new SimulacronRule(ClusterSpec.builder().withNodes(1)); - - private static DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withBoolean(DefaultDriverOption.SOCKET_TCP_NODELAY, true) - .withBoolean(DefaultDriverOption.SOCKET_KEEP_ALIVE, false) - .withBoolean(DefaultDriverOption.SOCKET_REUSE_ADDRESS, false) - .withInt(DefaultDriverOption.SOCKET_LINGER_INTERVAL, 10) - .withInt(DefaultDriverOption.SOCKET_RECEIVE_BUFFER_SIZE, 123456) - .withInt(DefaultDriverOption.SOCKET_SEND_BUFFER_SIZE, 123456) - .build(); - - private static final SessionRule SESSION_RULE = - SessionRule.builder(SIMULACRON_RULE).withConfigLoader(loader).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(SIMULACRON_RULE).around(SESSION_RULE); - - @Test - public void should_report_socket_options() { - Session session = SESSION_RULE.session(); - DriverExecutionProfile config = session.getContext().getConfig().getDefaultProfile(); - assertThat(config.getBoolean(SOCKET_TCP_NODELAY)).isTrue(); - assertThat(config.getBoolean(SOCKET_KEEP_ALIVE)).isFalse(); - assertThat(config.getBoolean(SOCKET_REUSE_ADDRESS)).isFalse(); - assertThat(config.getInt(SOCKET_LINGER_INTERVAL)).isEqualTo(10); - assertThat(config.getInt(SOCKET_RECEIVE_BUFFER_SIZE)).isEqualTo(123456); - assertThat(config.getInt(SOCKET_SEND_BUFFER_SIZE)).isEqualTo(123456); - Node node = session.getMetadata().getNodes().values().iterator().next(); - if (session instanceof SessionWrapper) { - session = ((SessionWrapper) session).getDelegate(); - } - DriverChannel channel = ((DefaultSession) session).getChannel(node, "test"); - assertThat(channel).isNotNull(); - assertThat(channel.config()).isInstanceOf(SocketChannelConfig.class); - SocketChannelConfig socketConfig = (SocketChannelConfig) channel.config(); - assertThat(socketConfig.isTcpNoDelay()).isTrue(); - assertThat(socketConfig.isKeepAlive()).isFalse(); - assertThat(socketConfig.isReuseAddress()).isFalse(); - assertThat(socketConfig.getSoLinger()).isEqualTo(10); - RecvByteBufAllocator allocator = socketConfig.getRecvByteBufAllocator(); - assertThat(allocator).isInstanceOf(FixedRecvByteBufAllocator.class); - assertThat(allocator.newHandle().guess()).isEqualTo(123456); - // cannot assert around SO_RCVBUF and SO_SNDBUF, such values are just hints - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/connection/FrameLengthIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/connection/FrameLengthIT.java deleted file mode 100644 index 887a578f7c4..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/connection/FrameLengthIT.java +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.connection; - -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.noRows; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.rows; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.when; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.connection.FrameTooLongException; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.retry.RetryVerdict; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.testinfra.loadbalancing.SortingLoadBalancingPolicy; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.internal.core.retry.DefaultRetryPolicy; -import com.datastax.oss.simulacron.common.cluster.ClusterSpec; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.Buffer; -import java.nio.ByteBuffer; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.TimeUnit; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -public class FrameLengthIT { - private static final SimulacronRule SIMULACRON_RULE = - new SimulacronRule(ClusterSpec.builder().withNodes(1)); - - private static DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withClass( - DefaultDriverOption.LOAD_BALANCING_POLICY_CLASS, SortingLoadBalancingPolicy.class) - .withClass(DefaultDriverOption.RETRY_POLICY_CLASS, AlwaysRetryAbortedPolicy.class) - .withBytes(DefaultDriverOption.PROTOCOL_MAX_FRAME_LENGTH, 100 * 1024) - .build(); - - private static final SessionRule SESSION_RULE = - SessionRule.builder(SIMULACRON_RULE).withConfigLoader(loader).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(SIMULACRON_RULE).around(SESSION_RULE); - - private static final SimpleStatement LARGE_QUERY = - SimpleStatement.newInstance("select * from foo").setIdempotent(true); - private static final SimpleStatement SLOW_QUERY = - SimpleStatement.newInstance("select * from bar"); - - private static final Buffer ONE_HUNDRED_KB = ByteBuffer.allocate(100 * 1024).limit(100 * 1024); - - @BeforeClass - public static void primeQueries() { - SIMULACRON_RULE - .cluster() - .prime( - when(LARGE_QUERY.getQuery()) - .then(rows().row("result", ONE_HUNDRED_KB).columnTypes("result", "blob").build())); - SIMULACRON_RULE - .cluster() - .prime(when(SLOW_QUERY.getQuery()).then(noRows()).delay(60, TimeUnit.SECONDS)); - } - - @Test(expected = FrameTooLongException.class) - public void should_fail_if_request_exceeds_max_frame_length() { - SESSION_RULE - .session() - .execute(SimpleStatement.newInstance("insert into foo (k) values (?)", ONE_HUNDRED_KB)); - } - - @Test - public void should_fail_if_response_exceeds_max_frame_length() { - CompletionStage slowResultFuture = - SESSION_RULE.session().executeAsync(SLOW_QUERY); - try { - SESSION_RULE.session().execute(LARGE_QUERY); - fail("Expected a " + FrameTooLongException.class.getSimpleName()); - } catch (FrameTooLongException e) { - // expected - } - // Check that the error does not abort other requests on the same connection - assertThat(slowResultFuture.toCompletableFuture()).isNotCompleted(); - } - - /** - * A retry policy that always retries aborted requests. - * - *

    We use this to validate that {@link FrameTooLongException} is never passed to the policy (if - * it were, then this policy would retry it, and the exception thrown to the client would be an - * {@link AllNodesFailedException}). - */ - public static class AlwaysRetryAbortedPolicy extends DefaultRetryPolicy { - public AlwaysRetryAbortedPolicy(DriverContext context, String profileName) { - super(context, profileName); - } - - @Override - public RetryVerdict onRequestAbortedVerdict( - @NonNull Request request, @NonNull Throwable error, int retryCount) { - return RetryVerdict.RETRY_NEXT; - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/connection/NettyResourceLeakDetectionIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/connection/NettyResourceLeakDetectionIT.java deleted file mode 100644 index c605db151df..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/connection/NettyResourceLeakDetectionIT.java +++ /dev/null @@ -1,188 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.connection; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; - -import ch.qos.logback.classic.Logger; -import ch.qos.logback.classic.spi.ILoggingEvent; -import ch.qos.logback.core.Appender; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirementRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.categories.IsolatedTests; -import com.datastax.oss.driver.shaded.guava.common.base.Strings; -import com.datastax.oss.protocol.internal.Segment; -import com.datastax.oss.protocol.internal.util.Bytes; -import io.netty.util.ResourceLeakDetector; -import io.netty.util.ResourceLeakDetector.Level; -import java.nio.ByteBuffer; -import java.util.List; -import org.junit.After; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; -import org.slf4j.LoggerFactory; - -@Category(IsolatedTests.class) -@RunWith(MockitoJUnitRunner.class) -public class NettyResourceLeakDetectionIT { - - static { - ResourceLeakDetector.setLevel(Level.PARANOID); - } - - private static final CustomCcmRule CCM_RULE = CustomCcmRule.builder().build(); - - private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - // Separately use BackendRequirementRule with @Rule so backend requirements are evaluated for each - // test method. - @Rule public final BackendRequirementRule backendRequirementRule = new BackendRequirementRule(); - - private static final ByteBuffer LARGE_PAYLOAD = - Bytes.fromHexString("0x" + Strings.repeat("ab", Segment.MAX_PAYLOAD_LENGTH + 100)); - - @Mock private Appender appender; - - @BeforeClass - public static void createTables() { - CqlSession session = SESSION_RULE.session(); - DriverExecutionProfile slowProfile = SessionUtils.slowProfile(session); - session.execute( - SimpleStatement.newInstance( - "CREATE TABLE IF NOT EXISTS leak_test_small (key int PRIMARY KEY, value int)") - .setExecutionProfile(slowProfile)); - session.execute( - SimpleStatement.newInstance( - "CREATE TABLE IF NOT EXISTS leak_test_large (key int PRIMARY KEY, value blob)") - .setExecutionProfile(slowProfile)); - } - - @Before - public void setupLogger() { - Logger logger = (Logger) LoggerFactory.getLogger(ResourceLeakDetector.class); - logger.setLevel(ch.qos.logback.classic.Level.ERROR); - logger.addAppender(appender); - } - - @After - public void resetLogger() { - Logger logger = (Logger) LoggerFactory.getLogger(ResourceLeakDetector.class); - logger.detachAppender(appender); - } - - @Test - public void should_not_leak_uncompressed() { - doLeakDetectionTest(SESSION_RULE.session()); - } - - @Test - public void should_not_leak_compressed_lz4() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withString(DefaultDriverOption.PROTOCOL_COMPRESSION, "lz4") - .build(); - try (CqlSession session = SessionUtils.newSession(CCM_RULE, SESSION_RULE.keyspace(), loader)) { - doLeakDetectionTest(session); - } - } - - @BackendRequirement( - type = BackendType.DSE, - description = "Snappy is not supported in OSS C* 4.0+ with protocol v5") - @BackendRequirement( - type = BackendType.CASSANDRA, - maxExclusive = "4.0.0", - description = "Snappy is not supported in OSS C* 4.0+ with protocol v5") - @Test - public void should_not_leak_compressed_snappy() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withString(DefaultDriverOption.PROTOCOL_COMPRESSION, "snappy") - .build(); - try (CqlSession session = SessionUtils.newSession(CCM_RULE, SESSION_RULE.keyspace(), loader)) { - doLeakDetectionTest(session); - } - } - - private void doLeakDetectionTest(CqlSession session) { - for (int i = 0; i < 10; i++) { - testSmallMessages(session); - verify(appender, never()).doAppend(any()); - System.gc(); - testLargeMessages(session); - verify(appender, never()).doAppend(any()); - System.gc(); - } - } - - private void testSmallMessages(CqlSession session) { - // trigger some activity using small requests and responses; in v5, these messages should fit in - // one single, self-contained segment - for (int i = 0; i < 1000; i++) { - session.execute("INSERT INTO leak_test_small (key, value) VALUES (?,?)", i, i); - } - List rows = session.execute("SELECT value FROM leak_test_small").all(); - assertThat(rows).hasSize(1000); - for (Row row : rows) { - assertThat(row).isNotNull(); - int actual = row.getInt(0); - assertThat(actual).isGreaterThanOrEqualTo(0).isLessThan(1000); - } - } - - private void testLargeMessages(CqlSession session) { - // trigger some activity using large requests and responses; in v5, these messages are likely to - // be split in multiple segments - for (int i = 0; i < 100; i++) { - session.execute( - "INSERT INTO leak_test_large (key, value) VALUES (?,?)", i, LARGE_PAYLOAD.duplicate()); - } - List rows = session.execute("SELECT value FROM leak_test_large").all(); - assertThat(rows).hasSize(100); - for (Row row : rows) { - assertThat(row).isNotNull(); - ByteBuffer actual = row.getByteBuffer(0); - assertThat(actual).isEqualTo(LARGE_PAYLOAD.duplicate()); - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/context/LifecycleListenerIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/context/LifecycleListenerIT.java deleted file mode 100644 index 3bd4add3003..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/context/LifecycleListenerIT.java +++ /dev/null @@ -1,127 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.context; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; -import static org.awaitility.Awaitility.await; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.internal.core.config.typesafe.DefaultDriverConfigLoader; -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import com.datastax.oss.driver.internal.core.context.LifecycleListener; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.simulacron.common.cluster.ClusterSpec; -import com.datastax.oss.simulacron.server.RejectScope; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Collections; -import java.util.List; -import java.util.concurrent.TimeUnit; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -@Category(ParallelizableTests.class) -public class LifecycleListenerIT { - - @ClassRule - public static final SimulacronRule SIMULACRON_RULE = - new SimulacronRule(ClusterSpec.builder().withNodes(1)); - - @Test - public void should_notify_listener_of_init_and_shutdown() { - TestLifecycleListener listener = new TestLifecycleListener(); - assertThat(listener.ready).isFalse(); - assertThat(listener.closed).isFalse(); - - try (CqlSession session = newSession(listener)) { - await().atMost(1, TimeUnit.SECONDS).until(() -> listener.ready); - assertThat(listener.closed).isFalse(); - } - assertThat(listener.ready).isTrue(); - await().atMost(1, TimeUnit.SECONDS).until(() -> listener.closed); - } - - @Test - public void should_not_notify_listener_when_init_fails() { - TestLifecycleListener listener = new TestLifecycleListener(); - assertThat(listener.ready).isFalse(); - assertThat(listener.closed).isFalse(); - - SIMULACRON_RULE.cluster().rejectConnections(0, RejectScope.STOP); - try (CqlSession session = newSession(listener)) { - fail("Expected AllNodesFailedException"); - } catch (AllNodesFailedException ignored) { - } finally { - SIMULACRON_RULE.cluster().acceptConnections(); - } - assertThat(listener.ready).isFalse(); - await().atMost(1, TimeUnit.SECONDS).until(() -> listener.closed); - } - - private CqlSession newSession(TestLifecycleListener listener) { - TestContext context = new TestContext(new DefaultDriverConfigLoader(), listener); - return CompletableFutures.getUninterruptibly( - DefaultSession.init(context, SIMULACRON_RULE.getContactPoints(), null)); - } - - public static class TestLifecycleListener implements LifecycleListener { - volatile boolean ready; - volatile boolean closed; - - @Override - public void onSessionReady() { - ready = true; - } - - @Override - public void close() throws Exception { - closed = true; - } - } - - public static class TestContext extends DefaultDriverContext { - - private final List listeners; - - TestContext(DriverConfigLoader configLoader, TestLifecycleListener listener) { - super( - configLoader, - Collections.emptyList(), - null, - null, - null, - Collections.emptyMap(), - Collections.emptyMap(), - null); - this.listeners = ImmutableList.of(listener); - } - - @NonNull - @Override - public List getLifecycleListeners() { - return listeners; - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/AsyncResultSetIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/AsyncResultSetIT.java deleted file mode 100644 index e109c28525e..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/AsyncResultSetIT.java +++ /dev/null @@ -1,218 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.cql; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.BatchStatement; -import com.datastax.oss.driver.api.core.cql.BatchStatementBuilder; -import com.datastax.oss.driver.api.core.cql.DefaultBatchType; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.ccm.SchemaChangeSynchronizer; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.categories.ParallelizableTests; -import java.util.Iterator; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.function.Function; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -public class AsyncResultSetIT { - - private static final int PAGE_SIZE = 100; - private static final int ROWS_PER_PARTITION = 1000; - private static final String PARTITION_KEY1 = "part"; - private static final String PARTITION_KEY2 = "part2"; - - private static final CcmRule CCM_RULE = CcmRule.getInstance(); - - private static final SessionRule SESSION_RULE = - SessionRule.builder(CCM_RULE) - .withConfigLoader( - SessionUtils.configLoaderBuilder() - .withInt(DefaultDriverOption.REQUEST_PAGE_SIZE, PAGE_SIZE) - .build()) - .build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - @BeforeClass - public static void setupSchema() { - // create table and load data across two partitions so we can test paging across tokens. - SchemaChangeSynchronizer.withLock( - () -> { - SESSION_RULE - .session() - .execute( - SimpleStatement.builder( - "CREATE TABLE IF NOT EXISTS test (k0 text, k1 int, v int, PRIMARY KEY(k0, k1))") - .setExecutionProfile(SESSION_RULE.slowProfile()) - .build()); - }); - - PreparedStatement prepared = - SESSION_RULE.session().prepare("INSERT INTO test (k0, k1, v) VALUES (?, ?, ?)"); - - BatchStatementBuilder batchPart1 = BatchStatement.builder(DefaultBatchType.UNLOGGED); - BatchStatementBuilder batchPart2 = BatchStatement.builder(DefaultBatchType.UNLOGGED); - for (int i = 0; i < ROWS_PER_PARTITION; i++) { - batchPart1.addStatement(prepared.bind(PARTITION_KEY1, i, i)); - batchPart2.addStatement( - prepared.bind(PARTITION_KEY2, i + ROWS_PER_PARTITION, i + ROWS_PER_PARTITION)); - } - - SESSION_RULE - .session() - .execute(batchPart1.setExecutionProfile(SESSION_RULE.slowProfile()).build()); - SESSION_RULE - .session() - .execute(batchPart2.setExecutionProfile(SESSION_RULE.slowProfile()).build()); - } - - @Test - public void should_only_iterate_over_rows_in_current_page() throws Exception { - // very basic test that just ensures that iterating over an AsyncResultSet only visits the first - // page. - CompletionStage result = - SESSION_RULE - .session() - .executeAsync( - SimpleStatement.builder("SELECT * FROM test where k0 = ?") - .addPositionalValue(PARTITION_KEY1) - .build()); - - AsyncResultSet rs = result.toCompletableFuture().get(); - - // Should only receive rows in page. - assertThat(rs.remaining()).isEqualTo(PAGE_SIZE); - assertThat(rs.hasMorePages()).isTrue(); - - Iterator rowIt = rs.currentPage().iterator(); - for (int i = 0; i < PAGE_SIZE; i++) { - Row row = rowIt.next(); - assertThat(row.getString("k0")).isEqualTo(PARTITION_KEY1); - assertThat(row.getInt("k1")).isEqualTo(i); - assertThat(row.getInt("v")).isEqualTo(i); - } - } - - @Test - public void should_iterate_over_all_pages_asynchronously_single_partition() throws Exception { - // Validates async paging behavior over single partition. - CompletionStage result = - SESSION_RULE - .session() - .executeAsync( - SimpleStatement.builder("SELECT * FROM test where k0 = ?") - .addPositionalValue(PARTITION_KEY1) - .build()) - .thenCompose(new AsyncResultSetConsumingFunction()); - - PageStatistics stats = result.toCompletableFuture().get(); - - assertThat(stats.rows).isEqualTo(ROWS_PER_PARTITION); - assertThat(stats.pages).isEqualTo((int) Math.ceil(ROWS_PER_PARTITION / (double) PAGE_SIZE)); - } - - @Test - public void should_iterate_over_all_pages_asynchronously_cross_partition() throws Exception { - // Validates async paging behavior over a range query. - CompletionStage result = - SESSION_RULE - .session() - .executeAsync("SELECT * FROM test") - .thenCompose(new AsyncResultSetConsumingFunction()); - - PageStatistics stats = result.toCompletableFuture().get(); - - assertThat(stats.rows).isEqualTo(ROWS_PER_PARTITION * 2); - assertThat(stats.pages).isEqualTo((int) Math.ceil(ROWS_PER_PARTITION * 2 / (double) PAGE_SIZE)); - } - - private static class PageStatistics { - int rows; - int pages; - - PageStatistics(int rows, int pages) { - this.rows = rows; - this.pages = pages; - } - } - - private static class AsyncResultSetConsumingFunction - implements Function> { - - // number of rows paged before exercising this function. - private final int rowsSoFar; - // number of pages encountered before exercising this function. - private final int pagesSoFar; - - AsyncResultSetConsumingFunction() { - this(0, 0); - } - - AsyncResultSetConsumingFunction(int rowsSoFar, int pagesSoFar) { - this.rowsSoFar = rowsSoFar; - this.pagesSoFar = pagesSoFar; - } - - @Override - public CompletionStage apply(AsyncResultSet result) { - int consumedRows = rowsSoFar; - - // Only count page if it has rows. - int pages = result.remaining() == 0 ? pagesSoFar : pagesSoFar + 1; - - // iterate over page and ensure data is in order. - for (Row row : result.currentPage()) { - int v = row.getInt("v"); - if (v != consumedRows) { - CompletableFuture next = new CompletableFuture<>(); - next.completeExceptionally( - new Exception(String.format("Expected v == %d, got %d.", consumedRows, v))); - return next; - } - consumedRows++; - } - - if (result.hasMorePages()) { - return result - .fetchNextPage() - .thenComposeAsync(new AsyncResultSetConsumingFunction(consumedRows, pages)); - } else { - CompletableFuture next = new CompletableFuture<>(); - next.complete(new PageStatistics(consumedRows, pages)); - return next; - } - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/BatchStatementIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/BatchStatementIT.java deleted file mode 100644 index 8b652638729..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/BatchStatementIT.java +++ /dev/null @@ -1,402 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.cql; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.cql.BatchStatement; -import com.datastax.oss.driver.api.core.cql.BatchStatementBuilder; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.DefaultBatchType; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.servererrors.InvalidQueryException; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.ccm.SchemaChangeSynchronizer; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.categories.ParallelizableTests; -import java.util.Iterator; -import java.util.List; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestName; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -public class BatchStatementIT { - - private CcmRule ccmRule = CcmRule.getInstance(); - - private SessionRule sessionRule = SessionRule.builder(ccmRule).build(); - - @Rule public TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); - - @Rule public TestName name = new TestName(); - - private static final int batchCount = 100; - - @Before - public void createTable() { - String[] schemaStatements = - new String[] { - "CREATE TABLE test (k0 text, k1 int, v int, PRIMARY KEY (k0, k1))", - "CREATE TABLE counter1 (k0 text PRIMARY KEY, c counter)", - "CREATE TABLE counter2 (k0 text PRIMARY KEY, c counter)", - "CREATE TABLE counter3 (k0 text PRIMARY KEY, c counter)", - }; - - SchemaChangeSynchronizer.withLock( - () -> { - for (String schemaStatement : schemaStatements) { - sessionRule - .session() - .execute( - SimpleStatement.newInstance(schemaStatement) - .setExecutionProfile(sessionRule.slowProfile())); - } - }); - } - - @Test - public void should_execute_batch_of_simple_statements_with_variables() { - // Build a batch of batchCount simple statements, each with their own positional variables. - BatchStatementBuilder builder = BatchStatement.builder(DefaultBatchType.UNLOGGED); - for (int i = 0; i < batchCount; i++) { - SimpleStatement insert = - SimpleStatement.builder( - String.format( - "INSERT INTO test (k0, k1, v) values ('%s', ?, ?)", name.getMethodName())) - .addPositionalValues(i, i + 1) - .build(); - builder.addStatement(insert); - } - - BatchStatement batchStatement = builder.build(); - sessionRule.session().execute(batchStatement); - - verifyBatchInsert(); - } - - @Test - public void should_execute_batch_of_bound_statements_with_variables() { - // Build a batch of batchCount statements with bound statements, each with their own positional - // variables. - BatchStatementBuilder builder = BatchStatement.builder(DefaultBatchType.UNLOGGED); - SimpleStatement insert = - SimpleStatement.builder( - String.format( - "INSERT INTO test (k0, k1, v) values ('%s', ? , ?)", name.getMethodName())) - .build(); - PreparedStatement preparedStatement = sessionRule.session().prepare(insert); - - for (int i = 0; i < batchCount; i++) { - builder.addStatement(preparedStatement.bind(i, i + 1)); - } - - BatchStatement batchStatement = builder.build(); - sessionRule.session().execute(batchStatement); - - verifyBatchInsert(); - } - - @Test - @BackendRequirement(type = BackendType.CASSANDRA, minInclusive = "2.2") - public void should_execute_batch_of_bound_statements_with_unset_values() { - // Build a batch of batchCount statements with bound statements, each with their own positional - // variables. - BatchStatementBuilder builder = BatchStatement.builder(DefaultBatchType.UNLOGGED); - SimpleStatement insert = - SimpleStatement.builder( - String.format( - "INSERT INTO test (k0, k1, v) values ('%s', ? , ?)", name.getMethodName())) - .build(); - PreparedStatement preparedStatement = sessionRule.session().prepare(insert); - - for (int i = 0; i < batchCount; i++) { - builder.addStatement(preparedStatement.bind(i, i + 1)); - } - - BatchStatement batchStatement = builder.build(); - sessionRule.session().execute(batchStatement); - - verifyBatchInsert(); - - BatchStatementBuilder builder2 = BatchStatement.builder(DefaultBatchType.UNLOGGED); - for (int i = 0; i < batchCount; i++) { - BoundStatement boundStatement = preparedStatement.bind(i, i + 2); - // unset v every 20 statements. - if (i % 20 == 0) { - boundStatement = boundStatement.unset(1); - } - builder.addStatement(boundStatement); - } - - sessionRule.session().execute(builder2.build()); - - Statement select = - SimpleStatement.builder("SELECT * from test where k0 = ?") - .addPositionalValue(name.getMethodName()) - .build(); - - ResultSet result = sessionRule.session().execute(select); - - List rows = result.all(); - assertThat(rows).hasSize(100); - - Iterator iterator = rows.iterator(); - for (int i = 0; i < batchCount; i++) { - Row row = iterator.next(); - assertThat(row.getString("k0")).isEqualTo(name.getMethodName()); - assertThat(row.getInt("k1")).isEqualTo(i); - // value should be from first insert (i + 1) if at row divisble by 20, otherwise second. - int expectedValue = i % 20 == 0 ? i + 1 : i + 2; - if (i % 20 == 0) { - assertThat(row.getInt("v")).isEqualTo(expectedValue); - } - } - } - - @Test - public void should_execute_batch_of_bound_statements_with_named_variables() { - // Build a batch of batchCount statements with bound statements, each with their own named - // variable values. - BatchStatementBuilder builder = BatchStatement.builder(DefaultBatchType.UNLOGGED); - PreparedStatement preparedStatement = - sessionRule.session().prepare("INSERT INTO test (k0, k1, v) values (:k0, :k1, :v)"); - - for (int i = 0; i < batchCount; i++) { - builder.addStatement( - preparedStatement - .boundStatementBuilder() - .setString("k0", name.getMethodName()) - .setInt("k1", i) - .setInt("v", i + 1) - .build()); - } - - BatchStatement batchStatement = builder.build(); - sessionRule.session().execute(batchStatement); - - verifyBatchInsert(); - } - - @Test - public void should_execute_batch_of_bound_and_simple_statements_with_variables() { - // Build a batch of batchCount statements with simple and bound statements alternating. - BatchStatementBuilder builder = BatchStatement.builder(DefaultBatchType.UNLOGGED); - SimpleStatement insert = - SimpleStatement.builder( - String.format( - "INSERT INTO test (k0, k1, v) values ('%s', ? , ?)", name.getMethodName())) - .build(); - PreparedStatement preparedStatement = sessionRule.session().prepare(insert); - - for (int i = 0; i < batchCount; i++) { - if (i % 2 == 1) { - SimpleStatement simpleInsert = - SimpleStatement.builder( - String.format( - "INSERT INTO test (k0, k1, v) values ('%s', ?, ?)", name.getMethodName())) - .addPositionalValues(i, i + 1) - .build(); - builder.addStatement(simpleInsert); - } else { - builder.addStatement(preparedStatement.bind(i, i + 1)); - } - } - - BatchStatement batchStatement = builder.build(); - sessionRule.session().execute(batchStatement); - - verifyBatchInsert(); - } - - @Test - public void should_execute_cas_batch() { - // Build a batch with CAS operations on the same partition. - BatchStatementBuilder builder = BatchStatement.builder(DefaultBatchType.UNLOGGED); - SimpleStatement insert = - SimpleStatement.builder( - String.format( - "INSERT INTO test (k0, k1, v) values ('%s', ? , ?) IF NOT EXISTS", - name.getMethodName())) - .build(); - PreparedStatement preparedStatement = sessionRule.session().prepare(insert); - - for (int i = 0; i < batchCount; i++) { - builder.addStatement(preparedStatement.bind(i, i + 1)); - } - - BatchStatement batchStatement = builder.build(); - ResultSet result = sessionRule.session().execute(batchStatement); - assertThat(result.wasApplied()).isTrue(); - - verifyBatchInsert(); - - // re execute same batch and ensure wasn't applied. - result = sessionRule.session().execute(batchStatement); - assertThat(result.wasApplied()).isFalse(); - } - - @Test - public void should_execute_counter_batch() { - // should be able to do counter increments in a counter batch. - BatchStatementBuilder builder = BatchStatement.builder(DefaultBatchType.COUNTER); - - for (int i = 1; i <= 3; i++) { - SimpleStatement insert = - SimpleStatement.builder( - String.format( - "UPDATE counter%d set c = c + %d where k0 = '%s'", - i, i, name.getMethodName())) - .build(); - builder.addStatement(insert); - } - - BatchStatement batchStatement = builder.build(); - sessionRule.session().execute(batchStatement); - - for (int i = 1; i <= 3; i++) { - ResultSet result = - sessionRule - .session() - .execute( - String.format( - "SELECT c from counter%d where k0 = '%s'", i, name.getMethodName())); - - List rows = result.all(); - assertThat(rows).hasSize(1); - - Row row = rows.iterator().next(); - assertThat(row.getLong("c")).isEqualTo(i); - } - } - - @Test(expected = InvalidQueryException.class) - public void should_fail_logged_batch_with_counter_increment() { - // should not be able to do counter inserts in a unlogged batch. - BatchStatementBuilder builder = BatchStatement.builder(DefaultBatchType.LOGGED); - - for (int i = 1; i <= 3; i++) { - SimpleStatement insert = - SimpleStatement.builder( - String.format( - "UPDATE counter%d set c = c + %d where k0 = '%s'", - i, i, name.getMethodName())) - .build(); - builder.addStatement(insert); - } - - BatchStatement batchStatement = builder.build(); - sessionRule.session().execute(batchStatement); - } - - @Test(expected = InvalidQueryException.class) - public void should_fail_counter_batch_with_non_counter_increment() { - // should not be able to do a counter batch if it contains a non-counter increment statement. - BatchStatementBuilder builder = BatchStatement.builder(DefaultBatchType.COUNTER); - - for (int i = 1; i <= 3; i++) { - SimpleStatement insert = - SimpleStatement.builder( - String.format( - "UPDATE counter%d set c = c + %d where k0 = '%s'", - i, i, name.getMethodName())) - .build(); - builder.addStatement(insert); - } - // add a non-counter increment statement. - SimpleStatement simpleInsert = - SimpleStatement.builder( - String.format( - "INSERT INTO test (k0, k1, v) values ('%s', ?, ?)", name.getMethodName())) - .addPositionalValues(1, 2) - .build(); - builder.addStatement(simpleInsert); - - BatchStatement batchStatement = builder.build(); - sessionRule.session().execute(batchStatement); - } - - @Test - public void should_not_allow_unset_value_when_protocol_less_than_v4() { - // CREATE TABLE test (k0 text, k1 int, v int, PRIMARY KEY (k0, k1)) - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withString(DefaultDriverOption.PROTOCOL_VERSION, "V3") - .build(); - try (CqlSession v3Session = SessionUtils.newSession(ccmRule, loader)) { - // Intentionally use fully qualified table here to avoid warnings as these are not supported - // by v3 protocol version, see JAVA-3068 - PreparedStatement prepared = - v3Session.prepare( - String.format( - "INSERT INTO %s.test (k0, k1, v) values (?, ?, ?)", sessionRule.keyspace())); - - BatchStatementBuilder builder = BatchStatement.builder(DefaultBatchType.LOGGED); - builder.addStatements( - // All set => OK - prepared.bind(name.getMethodName(), 1, 1), - // One variable unset => should fail - prepared - .boundStatementBuilder() - .setString(0, name.getMethodName()) - .setInt(1, 2) - .unset(2) - .build()); - - assertThatThrownBy(() -> v3Session.execute(builder.build())) - .isInstanceOf(IllegalStateException.class) - .hasMessageContaining("Unset value at index"); - } - } - - private void verifyBatchInsert() { - // validate data inserted by the batch. - Statement select = - SimpleStatement.builder("SELECT * from test where k0 = ?") - .addPositionalValue(name.getMethodName()) - .build(); - - ResultSet result = sessionRule.session().execute(select); - - List rows = result.all(); - assertThat(rows).hasSize(100); - - Iterator iterator = rows.iterator(); - for (int i = 0; i < batchCount; i++) { - Row row = iterator.next(); - assertThat(row.getString("k0")).isEqualTo(name.getMethodName()); - assertThat(row.getInt("k1")).isEqualTo(i); - assertThat(row.getInt("v")).isEqualTo(i + 1); - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/BoundStatementCcmIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/BoundStatementCcmIT.java deleted file mode 100644 index 9e4b62cd230..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/BoundStatementCcmIT.java +++ /dev/null @@ -1,451 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.cql; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.assertj.core.api.Assumptions.assumeThat; - -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.DefaultConsistencyLevel; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.cql.SimpleStatementBuilder; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.ccm.SchemaChangeSynchronizer; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.core.type.codec.CqlIntToStringCodec; -import com.datastax.oss.driver.internal.core.DefaultProtocolFeature; -import com.datastax.oss.driver.internal.core.ProtocolVersionRegistry; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.util.RoutingKey; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.protocol.internal.util.Bytes; -import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableMap; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.List; -import java.util.Map; -import java.util.concurrent.CompletionStage; -import java.util.function.Function; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestName; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -public class BoundStatementCcmIT { - - private CcmRule ccmRule = CcmRule.getInstance(); - - private final boolean atLeastV4 = ccmRule.getHighestProtocolVersion().getCode() >= 4; - - private SessionRule sessionRule = - SessionRule.builder(ccmRule) - .withConfigLoader( - SessionUtils.configLoaderBuilder() - .withInt(DefaultDriverOption.REQUEST_PAGE_SIZE, 20) - .build()) - .build(); - - @Rule public TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); - - @Rule public TestName name = new TestName(); - - private static final String KEY = "test"; - - private static final int VALUE = 7; - - @Before - public void setupSchema() { - // table where every column forms the primary key. - SchemaChangeSynchronizer.withLock( - () -> { - sessionRule - .session() - .execute( - SimpleStatement.builder( - "CREATE TABLE IF NOT EXISTS test (k text, v int, PRIMARY KEY(k, v))") - .setExecutionProfile(sessionRule.slowProfile()) - .build()); - for (int i = 0; i < 100; i++) { - sessionRule - .session() - .execute( - SimpleStatement.builder("INSERT INTO test (k, v) VALUES (?, ?)") - .addPositionalValues(KEY, i) - .build()); - } - - // table with simple primary key, single cell. - sessionRule - .session() - .execute( - SimpleStatement.builder( - "CREATE TABLE IF NOT EXISTS test2 (k text primary key, v0 int)") - .setExecutionProfile(sessionRule.slowProfile()) - .build()); - - // table with composite partition key - sessionRule - .session() - .execute( - SimpleStatement.builder( - "CREATE TABLE IF NOT EXISTS test3 " - + "(pk1 int, pk2 int, v int, " - + "PRIMARY KEY ((pk1, pk2)))") - .setExecutionProfile(sessionRule.slowProfile()) - .build()); - }); - } - - @Test - public void should_not_allow_unset_value_when_protocol_less_than_v4() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withString(DefaultDriverOption.PROTOCOL_VERSION, "V3") - .build(); - try (CqlSession v3Session = SessionUtils.newSession(ccmRule, loader)) { - // Intentionally use fully qualified table here to avoid warnings as these are not supported - // by v3 protocol version, see JAVA-3068 - PreparedStatement prepared = - v3Session.prepare( - String.format("INSERT INTO %s.test2 (k, v0) values (?, ?)", sessionRule.keyspace())); - - BoundStatement boundStatement = - prepared.boundStatementBuilder().setString(0, name.getMethodName()).unset(1).build(); - - assertThatThrownBy(() -> v3Session.execute(boundStatement)) - .isInstanceOf(IllegalStateException.class) - .hasMessageContaining("Unset value at index"); - } - } - - @Test - public void should_not_write_tombstone_if_value_is_implicitly_unset() { - assumeThat(atLeastV4).as("unset values require protocol V4+").isTrue(); - try (CqlSession session = SessionUtils.newSession(ccmRule, sessionRule.keyspace())) { - PreparedStatement prepared = session.prepare("INSERT INTO test2 (k, v0) values (?, ?)"); - - session.execute(prepared.bind(name.getMethodName(), VALUE)); - - BoundStatement boundStatement = - prepared.boundStatementBuilder().setString(0, name.getMethodName()).build(); - - verifyUnset(session, boundStatement, name.getMethodName()); - } - } - - @Test - public void should_write_tombstone_if_value_is_explicitly_unset() { - assumeThat(atLeastV4).as("unset values require protocol V4+").isTrue(); - try (CqlSession session = SessionUtils.newSession(ccmRule, sessionRule.keyspace())) { - PreparedStatement prepared = session.prepare("INSERT INTO test2 (k, v0) values (?, ?)"); - - session.execute(prepared.bind(name.getMethodName(), VALUE)); - - BoundStatement boundStatement = - prepared - .boundStatementBuilder() - .setString(0, name.getMethodName()) - .setInt(1, VALUE + 1) // set initially, will be unset later - .build(); - - verifyUnset(session, boundStatement.unset(1), name.getMethodName()); - } - } - - @Test - public void should_write_tombstone_if_value_is_explicitly_unset_on_builder() { - assumeThat(atLeastV4).as("unset values require protocol V4+").isTrue(); - try (CqlSession session = SessionUtils.newSession(ccmRule, sessionRule.keyspace())) { - PreparedStatement prepared = session.prepare("INSERT INTO test2 (k, v0) values (?, ?)"); - - session.execute(prepared.bind(name.getMethodName(), VALUE)); - - BoundStatement boundStatement = - prepared - .boundStatementBuilder() - .setString(0, name.getMethodName()) - .setInt(1, VALUE + 1) // set initially, will be unset later - .unset(1) - .build(); - - verifyUnset(session, boundStatement, name.getMethodName()); - } - } - - @Test - public void should_have_empty_result_definitions_for_update_query() { - try (CqlSession session = SessionUtils.newSession(ccmRule, sessionRule.keyspace())) { - PreparedStatement prepared = session.prepare("INSERT INTO test2 (k, v0) values (?, ?)"); - - assertThat(prepared.getResultSetDefinitions()).hasSize(0); - - ResultSet rs = session.execute(prepared.bind(name.getMethodName(), VALUE)); - assertThat(rs.getColumnDefinitions()).hasSize(0); - } - } - - @Test - public void should_bind_null_value_when_setting_values_in_bulk() { - try (CqlSession session = SessionUtils.newSession(ccmRule, sessionRule.keyspace())) { - PreparedStatement prepared = session.prepare("INSERT INTO test2 (k, v0) values (?, ?)"); - BoundStatement boundStatement = prepared.bind(name.getMethodName(), null); - assertThat(boundStatement.get(1, TypeCodecs.INT)).isNull(); - } - } - - @Test - public void should_allow_custom_codecs_when_setting_values_in_bulk() { - // v0 is an int column, but we'll bind a String to it - CqlIntToStringCodec codec = new CqlIntToStringCodec(); - try (CqlSession session = sessionWithCustomCodec(codec)) { - PreparedStatement prepared = session.prepare("INSERT INTO test2 (k, v0) values (?, ?)"); - for (BoundStatement boundStatement : - ImmutableList.of( - prepared.bind(name.getMethodName(), "42"), - prepared.boundStatementBuilder(name.getMethodName(), "42").build())) { - - session.execute(boundStatement); - ResultSet rs = - session.execute( - SimpleStatement.newInstance( - "SELECT v0 FROM test2 WHERE k = ?", name.getMethodName())); - assertThat(rs.one().getInt(0)).isEqualTo(42); - } - } - } - - @Test - public void should_use_page_size_from_simple_statement() { - try (CqlSession session = SessionUtils.newSession(ccmRule, sessionRule.keyspace())) { - SimpleStatement st = SimpleStatement.builder("SELECT v FROM test").setPageSize(10).build(); - PreparedStatement prepared = session.prepare(st); - CompletionStage future = session.executeAsync(prepared.bind()); - AsyncResultSet result = CompletableFutures.getUninterruptibly(future); - - // Should have only fetched 10 (page size) rows. - assertThat(result.remaining()).isEqualTo(10); - } - } - - @Test - public void should_use_page_size() { - try (CqlSession session = SessionUtils.newSession(ccmRule, sessionRule.keyspace())) { - // set page size on simple statement, but will be unused since - // overridden by bound statement. - SimpleStatement st = SimpleStatement.builder("SELECT v FROM test").setPageSize(10).build(); - PreparedStatement prepared = session.prepare(st); - CompletionStage future = - session.executeAsync(prepared.bind().setPageSize(12)); - AsyncResultSet result = CompletableFutures.getUninterruptibly(future); - - // Should have fetched 12 (page size) rows. - assertThat(result.remaining()).isEqualTo(12); - } - } - - @Test - public void should_propagate_attributes_when_preparing_a_simple_statement() { - CqlSession session = sessionRule.session(); - - DriverExecutionProfile mockProfile = - session - .getContext() - .getConfig() - .getDefaultProfile() - // Value doesn't matter, we just want a distinct profile - .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(10)); - ByteBuffer mockPagingState = Bytes.fromHexString("0xaaaa"); - CqlIdentifier mockKeyspace = - supportsPerRequestKeyspace(session) ? CqlIdentifier.fromCql("system") : null; - CqlIdentifier mockRoutingKeyspace = CqlIdentifier.fromCql("mockRoutingKeyspace"); - ByteBuffer mockRoutingKey = Bytes.fromHexString("0xbbbb"); - Token mockRoutingToken = session.getMetadata().getTokenMap().get().newToken(mockRoutingKey); - Map mockCustomPayload = - NullAllowingImmutableMap.of("key1", Bytes.fromHexString("0xcccc")); - Duration mockTimeout = Duration.ofSeconds(1); - ConsistencyLevel mockCl = DefaultConsistencyLevel.LOCAL_QUORUM; - ConsistencyLevel mockSerialCl = DefaultConsistencyLevel.LOCAL_SERIAL; - int mockPageSize = 2000; - - SimpleStatementBuilder simpleStatementBuilder = - SimpleStatement.builder("SELECT release_version FROM system.local") - .setExecutionProfile(mockProfile) - .setPagingState(mockPagingState) - .setKeyspace(mockKeyspace) - .setRoutingKeyspace(mockRoutingKeyspace) - .setRoutingKey(mockRoutingKey) - .setRoutingToken(mockRoutingToken) - .setQueryTimestamp(42) - .setIdempotence(true) - .setTracing() - .setTimeout(mockTimeout) - .setConsistencyLevel(mockCl) - .setSerialConsistencyLevel(mockSerialCl) - .setPageSize(mockPageSize); - - if (atLeastV4) { - simpleStatementBuilder = - simpleStatementBuilder.addCustomPayload("key1", mockCustomPayload.get("key1")); - } - - PreparedStatement preparedStatement = session.prepare(simpleStatementBuilder.build()); - - // Cover all the ways to create bound statements: - ImmutableList> createMethods = - ImmutableList.of(PreparedStatement::bind, p -> p.boundStatementBuilder().build()); - - for (Function createMethod : createMethods) { - BoundStatement boundStatement = createMethod.apply(preparedStatement); - - assertThat(boundStatement.getExecutionProfile()).isEqualTo(mockProfile); - assertThat(boundStatement.getPagingState()).isEqualTo(mockPagingState); - assertThat(boundStatement.getRoutingKeyspace()) - .isEqualTo(mockKeyspace != null ? mockKeyspace : mockRoutingKeyspace); - assertThat(boundStatement.getRoutingKey()).isEqualTo(mockRoutingKey); - assertThat(boundStatement.getRoutingToken()).isEqualTo(mockRoutingToken); - if (atLeastV4) { - assertThat(boundStatement.getCustomPayload()).isEqualTo(mockCustomPayload); - } - assertThat(boundStatement.isIdempotent()).isTrue(); - assertThat(boundStatement.isTracing()).isTrue(); - assertThat(boundStatement.getTimeout()).isEqualTo(mockTimeout); - assertThat(boundStatement.getConsistencyLevel()).isEqualTo(mockCl); - assertThat(boundStatement.getSerialConsistencyLevel()).isEqualTo(mockSerialCl); - assertThat(boundStatement.getPageSize()).isEqualTo(mockPageSize); - - // Bound statements do not support per-query keyspaces, so this is not set - assertThat(boundStatement.getKeyspace()).isNull(); - // Should not be propagated - assertThat(boundStatement.getQueryTimestamp()).isEqualTo(Statement.NO_DEFAULT_TIMESTAMP); - } - } - - // Test for JAVA-2066 - @Test - @BackendRequirement(type = BackendType.CASSANDRA, minInclusive = "2.2") - public void should_compute_routing_key_when_indices_randomly_distributed() { - try (CqlSession session = SessionUtils.newSession(ccmRule, sessionRule.keyspace())) { - - PreparedStatement ps = session.prepare("INSERT INTO test3 (v, pk2, pk1) VALUES (?,?,?)"); - - List indices = ps.getPartitionKeyIndices(); - assertThat(indices).containsExactly(2, 1); - - BoundStatement bs = ps.bind(1, 2, 3); - ByteBuffer routingKey = bs.getRoutingKey(); - - assertThat(routingKey) - .isEqualTo(RoutingKey.compose(bs.getBytesUnsafe(2), bs.getBytesUnsafe(1))); - } - } - - @Test - public void should_set_all_occurrences_of_variable() { - CqlSession session = sessionRule.session(); - PreparedStatement ps = session.prepare("INSERT INTO test3 (pk1, pk2, v) VALUES (:i, :i, :i)"); - - CqlIdentifier id = CqlIdentifier.fromCql("i"); - ColumnDefinitions variableDefinitions = ps.getVariableDefinitions(); - assertThat(variableDefinitions.allIndicesOf(id)).containsExactly(0, 1, 2); - - should_set_all_occurrences_of_variable(ps.bind().setInt(id, 12)); - should_set_all_occurrences_of_variable(ps.boundStatementBuilder().setInt(id, 12).build()); - } - - private void should_set_all_occurrences_of_variable(BoundStatement bs) { - assertThat(bs.getInt(0)).isEqualTo(12); - assertThat(bs.getInt(1)).isEqualTo(12); - assertThat(bs.getInt(2)).isEqualTo(12); - - // Nothing should be shared internally (this would be a bug if the client later retrieves a - // buffer with getBytesUnsafe and modifies it) - ByteBuffer bytes0 = bs.getBytesUnsafe(0); - ByteBuffer bytes1 = bs.getBytesUnsafe(1); - assertThat(bytes0).isNotNull(); - assertThat(bytes1).isNotNull(); - // Not the same instance - assertThat(bytes0).isNotSameAs(bytes1); - // Contents are not shared - bytes0.putInt(0, 11); - assertThat(bytes1.getInt(0)).isEqualTo(12); - bytes0.putInt(0, 12); - - CqlSession session = sessionRule.session(); - session.execute(bs); - Row row = session.execute("SELECT * FROM test3 WHERE pk1 = 12 AND pk2 = 12").one(); - assertThat(row).isNotNull(); - assertThat(row.getInt("v")).isEqualTo(12); - } - - private static void verifyUnset( - CqlSession session, BoundStatement boundStatement, String valueName) { - session.execute(boundStatement.unset(1)); - - // Verify that no tombstone was written by reading data back and ensuring initial value is - // retained. - ResultSet result = - session.execute( - SimpleStatement.builder("SELECT v0 from test2 where k = ?") - .addPositionalValue(valueName) - .build()); - - Row row = result.iterator().next(); - assertThat(row.getInt(0)).isEqualTo(VALUE); - } - - @SuppressWarnings("unchecked") - private CqlSession sessionWithCustomCodec(CqlIntToStringCodec codec) { - return (CqlSession) - SessionUtils.baseBuilder() - .addContactEndPoints(ccmRule.getContactPoints()) - .withKeyspace(sessionRule.keyspace()) - .addTypeCodecs(codec) - .build(); - } - - private boolean supportsPerRequestKeyspace(CqlSession session) { - InternalDriverContext context = (InternalDriverContext) session.getContext(); - ProtocolVersionRegistry protocolVersionRegistry = context.getProtocolVersionRegistry(); - return protocolVersionRegistry.supports( - context.getProtocolVersion(), DefaultProtocolFeature.PER_REQUEST_KEYSPACE); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/BoundStatementSimulacronIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/BoundStatementSimulacronIT.java deleted file mode 100644 index cb81874d47a..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/BoundStatementSimulacronIT.java +++ /dev/null @@ -1,191 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.cql; - -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.noRows; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.query; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.when; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.catchThrowable; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.DefaultConsistencyLevel; -import com.datastax.oss.driver.api.core.DriverTimeoutException; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.request.Execute; -import com.datastax.oss.simulacron.common.cluster.ClusterSpec; -import com.datastax.oss.simulacron.common.cluster.QueryLog; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.Lists; -import java.time.Duration; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.concurrent.TimeUnit; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -@Category(ParallelizableTests.class) -public class BoundStatementSimulacronIT { - - @ClassRule - public static final SimulacronRule SIMULACRON_RULE = - new SimulacronRule(ClusterSpec.builder().withNodes(1)); - - @Before - public void clearPrimes() { - SIMULACRON_RULE.cluster().clearLogs(); - SIMULACRON_RULE.cluster().clearPrimes(true); - } - - @Test - public void should_use_consistencies_from_simple_statement() { - try (CqlSession session = SessionUtils.newSession(SIMULACRON_RULE)) { - SimpleStatement st = - SimpleStatement.builder("SELECT * FROM test where k = ?") - .setConsistencyLevel(DefaultConsistencyLevel.TWO) - .setSerialConsistencyLevel(DefaultConsistencyLevel.LOCAL_SERIAL) - .build(); - PreparedStatement prepared = session.prepare(st); - SIMULACRON_RULE.cluster().clearLogs(); - // since query is unprimed, we use a text value for bind parameter as this is - // what simulacron expects for unprimed statements. - session.execute(prepared.bind("0")); - - List logs = SIMULACRON_RULE.cluster().getLogs().getQueryLogs(); - assertThat(logs).hasSize(1); - - QueryLog log = logs.get(0); - - Message message = log.getFrame().message; - assertThat(message).isInstanceOf(Execute.class); - Execute execute = (Execute) message; - assertThat(execute.options.consistency) - .isEqualTo(DefaultConsistencyLevel.TWO.getProtocolCode()); - assertThat(execute.options.serialConsistency) - .isEqualTo(DefaultConsistencyLevel.LOCAL_SERIAL.getProtocolCode()); - } - } - - @Test - public void should_use_consistencies() { - try (CqlSession session = SessionUtils.newSession(SIMULACRON_RULE)) { - // set consistencies on simple statement, but they will be unused since - // overridden by bound statement. - SimpleStatement st = - SimpleStatement.builder("SELECT * FROM test where k = ?") - .setConsistencyLevel(DefaultConsistencyLevel.TWO) - .setSerialConsistencyLevel(DefaultConsistencyLevel.LOCAL_SERIAL) - .build(); - PreparedStatement prepared = session.prepare(st); - SIMULACRON_RULE.cluster().clearLogs(); - // since query is unprimed, we use a text value for bind parameter as this is - // what simulacron expects for unprimed statements. - session.execute( - prepared - .boundStatementBuilder("0") - .setConsistencyLevel(DefaultConsistencyLevel.THREE) - .setSerialConsistencyLevel(DefaultConsistencyLevel.SERIAL) - .build()); - - List logs = SIMULACRON_RULE.cluster().getLogs().getQueryLogs(); - assertThat(logs).hasSize(1); - - QueryLog log = logs.get(0); - - Message message = log.getFrame().message; - assertThat(message).isInstanceOf(Execute.class); - Execute execute = (Execute) message; - assertThat(execute.options.consistency) - .isEqualTo(DefaultConsistencyLevel.THREE.getProtocolCode()); - assertThat(execute.options.serialConsistency) - .isEqualTo(DefaultConsistencyLevel.SERIAL.getProtocolCode()); - } - } - - @Test - public void should_use_timeout_from_simple_statement() { - try (CqlSession session = SessionUtils.newSession(SIMULACRON_RULE)) { - LinkedHashMap params = new LinkedHashMap<>(ImmutableMap.of("k", 0)); - LinkedHashMap paramTypes = new LinkedHashMap<>(ImmutableMap.of("k", "int")); - SIMULACRON_RULE - .cluster() - .prime( - when(query( - "mock query", - Lists.newArrayList( - com.datastax.oss.simulacron.common.codec.ConsistencyLevel.ONE), - params, - paramTypes)) - .then(noRows()) - .delay(1500, TimeUnit.MILLISECONDS)); - SimpleStatement st = - SimpleStatement.builder("mock query") - .setTimeout(Duration.ofSeconds(1)) - .setConsistencyLevel(DefaultConsistencyLevel.ONE) - .build(); - PreparedStatement prepared = session.prepare(st); - - Throwable t = catchThrowable(() -> session.execute(prepared.bind(0))); - - assertThat(t) - .isInstanceOf(DriverTimeoutException.class) - .hasMessage("Query timed out after PT1S"); - } - } - - @Test - public void should_use_timeout() { - try (CqlSession session = SessionUtils.newSession(SIMULACRON_RULE)) { - LinkedHashMap params = new LinkedHashMap<>(ImmutableMap.of("k", 0)); - LinkedHashMap paramTypes = new LinkedHashMap<>(ImmutableMap.of("k", "int")); - // set timeout on simple statement, but will be unused since overridden by bound statement. - SIMULACRON_RULE - .cluster() - .prime( - when(query( - "mock query", - Lists.newArrayList( - com.datastax.oss.simulacron.common.codec.ConsistencyLevel.ONE), - params, - paramTypes)) - .then(noRows()) - .delay(1500, TimeUnit.MILLISECONDS)); - SimpleStatement st = - SimpleStatement.builder("mock query") - .setTimeout(Duration.ofSeconds(1)) - .setConsistencyLevel(DefaultConsistencyLevel.ONE) - .build(); - PreparedStatement prepared = session.prepare(st); - - Throwable t = - catchThrowable( - () -> session.execute(prepared.bind(0).setTimeout(Duration.ofMillis(150)))); - - assertThat(t) - .isInstanceOf(DriverTimeoutException.class) - .hasMessage("Query timed out after PT0.15S"); - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/ExecutionInfoWarningsIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/ExecutionInfoWarningsIT.java deleted file mode 100644 index edee9723a38..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/ExecutionInfoWarningsIT.java +++ /dev/null @@ -1,209 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.datastax.oss.driver.core.cql; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.timeout; -import static org.mockito.Mockito.verify; - -import ch.qos.logback.classic.Level; -import ch.qos.logback.classic.Logger; -import ch.qos.logback.classic.spi.ILoggingEvent; -import ch.qos.logback.core.Appender; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.ccm.SchemaChangeSynchronizer; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.internal.core.cql.CqlRequestHandler; -import com.datastax.oss.driver.internal.core.tracker.RequestLogger; -import com.google.common.base.Strings; -import java.util.List; -import java.util.stream.Collectors; -import org.junit.After; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; -import org.junit.runner.RunWith; -import org.mockito.ArgumentCaptor; -import org.mockito.Captor; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; -import org.slf4j.LoggerFactory; - -@RunWith(MockitoJUnitRunner.class) -public class ExecutionInfoWarningsIT { - - private static final String KEY = "test"; - - private CustomCcmRule ccmRule = - new CustomCcmRule.Builder() - // set the warn threshold to 5Kb (default is 64Kb in newer versions) - .withCassandraConfiguration("batch_size_warn_threshold_in_kb", "5") - .build(); - private SessionRule sessionRule = - SessionRule.builder(ccmRule) - .withConfigLoader( - SessionUtils.configLoaderBuilder() - .withInt(DefaultDriverOption.REQUEST_PAGE_SIZE, 20) - .withInt( - DefaultDriverOption.REQUEST_LOGGER_MAX_QUERY_LENGTH, - RequestLogger.DEFAULT_REQUEST_LOGGER_MAX_QUERY_LENGTH) - .startProfile("log-disabled") - .withString(DefaultDriverOption.REQUEST_LOG_WARNINGS, "false") - .build()) - .build(); - - @Rule public TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); - - @Mock private Appender appender; - @Captor private ArgumentCaptor loggingEventCaptor; - private Logger logger; - private Level originalLoggerLevel; - - @Before - public void createSchema() { - // table with simple primary key, single cell. - SchemaChangeSynchronizer.withLock( - () -> { - sessionRule - .session() - .execute( - SimpleStatement.builder( - "CREATE TABLE IF NOT EXISTS test (k int primary key, v text)") - .setExecutionProfile(sessionRule.slowProfile()) - .build()); - }); - for (int i = 0; i < 100; i++) { - sessionRule - .session() - .execute( - SimpleStatement.builder("INSERT INTO test (k, v) VALUES (?, ?)") - .addPositionalValues(KEY, i) - .build()); - } - } - - @Before - public void setupLogger() { - logger = (Logger) LoggerFactory.getLogger(CqlRequestHandler.class); - originalLoggerLevel = logger.getLevel(); - logger.setLevel(Level.WARN); - logger.addAppender(appender); - } - - @After - public void cleanupLogger() { - logger.setLevel(originalLoggerLevel); - logger.detachAppender(appender); - } - - @Test - @BackendRequirement(type = BackendType.CASSANDRA, minInclusive = "3.0") - public void should_execute_query_and_log_server_side_warnings() { - final String query = "SELECT count(*) FROM test;"; - Statement st = SimpleStatement.builder(query).build(); - ResultSet result = sessionRule.session().execute(st); - - ExecutionInfo executionInfo = result.getExecutionInfo(); - assertThat(executionInfo).isNotNull(); - List warnings = executionInfo.getWarnings(); - assertThat(warnings).isNotEmpty(); - String warning = warnings.get(0); - assertThat(warning).isEqualTo("Aggregation query used without partition key"); - // verify the log was generated - verify(appender, timeout(500).times(1)).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getValue().getMessage()).isNotNull(); - String logMessage = loggingEventCaptor.getValue().getFormattedMessage(); - assertThat(logMessage) - .startsWith( - "Query '[0 values] " - + query - + "' generated server side warning(s): Aggregation query used without partition key"); - } - - @Test - @BackendRequirement(type = BackendType.CASSANDRA, minInclusive = "3.0") - public void should_execute_query_and_not_log_server_side_warnings() { - final String query = "SELECT count(*) FROM test;"; - Statement st = - SimpleStatement.builder(query).setExecutionProfileName("log-disabled").build(); - ResultSet result = sessionRule.session().execute(st); - - ExecutionInfo executionInfo = result.getExecutionInfo(); - assertThat(executionInfo).isNotNull(); - List warnings = executionInfo.getWarnings(); - assertThat(warnings).isNotEmpty(); - String warning = warnings.get(0); - assertThat(warning).isEqualTo("Aggregation query used without partition key"); - // verify the log was NOT generated - verify(appender, timeout(500).times(0)).doAppend(loggingEventCaptor.capture()); - } - - @Test - @BackendRequirement(type = BackendType.CASSANDRA, minInclusive = "2.2") - public void should_expose_warnings_on_execution_info() { - // the default batch size warn threshold is 5 * 1024 bytes, but after CASSANDRA-10876 there must - // be multiple mutations in a batch to trigger this warning so the batch includes 2 different - // inserts. - final String query = - String.format( - "BEGIN UNLOGGED BATCH\n" - + "INSERT INTO test (k, v) VALUES (1, '%s')\n" - + "INSERT INTO test (k, v) VALUES (2, '%s')\n" - + "APPLY BATCH", - Strings.repeat("1", 2 * 1024), Strings.repeat("1", 3 * 1024)); - Statement st = SimpleStatement.builder(query).build(); - ResultSet result = sessionRule.session().execute(st); - ExecutionInfo executionInfo = result.getExecutionInfo(); - assertThat(executionInfo).isNotNull(); - List warnings = executionInfo.getWarnings(); - assertThat(warnings).isNotEmpty(); - // verify the log was generated - verify(appender, timeout(500).atLeast(1)).doAppend(loggingEventCaptor.capture()); - List logMessages = - loggingEventCaptor.getAllValues().stream() - .map(ILoggingEvent::getFormattedMessage) - .collect(Collectors.toList()); - assertThat(logMessages) - .anySatisfy( - logMessage -> - assertThat(logMessage) - .startsWith("Query '") - // different versiosns of Cassandra produce slightly different formated logs - // the .contains() below verify the common bits - .contains( - query.substring(0, RequestLogger.DEFAULT_REQUEST_LOGGER_MAX_QUERY_LENGTH)) - .contains("' generated server side warning(s): ") - .contains("Batch") - .contains("for") - .contains(String.format("%s.test", sessionRule.keyspace().asCql(true))) - .contains("is of size") - .containsPattern("exceeding specified .*threshold")); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/NowInSecondsIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/NowInSecondsIT.java deleted file mode 100644 index 191dc040ffd..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/NowInSecondsIT.java +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.cql; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.BatchStatement; -import com.datastax.oss.driver.api.core.cql.BatchType; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import java.util.function.Function; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -@BackendRequirement(type = BackendType.CASSANDRA, minInclusive = "4.0") -// Use next version -- not sure if it will be in by then, but as a reminder to check -@BackendRequirement( - type = BackendType.DSE, - minInclusive = "7.0", - description = "Feature not available in DSE yet") -public class NowInSecondsIT { - - private static final CcmRule CCM_RULE = CcmRule.getInstance(); - - private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - @Before - public void setup() { - for (String statement : - ImmutableList.of( - "DROP TABLE IF EXISTS test", "CREATE TABLE test(k int PRIMARY KEY, v int)")) { - SESSION_RULE - .session() - .execute( - SimpleStatement.newInstance(statement) - .setExecutionProfile(SESSION_RULE.slowProfile())); - } - } - - @Test - public void should_use_now_in_seconds_with_simple_statement() { - should_use_now_in_seconds(SimpleStatement::newInstance); - } - - @Test - public void should_use_now_in_seconds_with_bound_statement() { - should_use_now_in_seconds( - queryString -> { - PreparedStatement preparedStatement = SESSION_RULE.session().prepare(queryString); - return preparedStatement.bind(); - }); - } - - @Test - public void should_use_now_in_seconds_with_batch_statement() { - should_use_now_in_seconds( - queryString -> - BatchStatement.newInstance(BatchType.LOGGED, SimpleStatement.newInstance(queryString))); - } - - private > void should_use_now_in_seconds( - Function buildWriteStatement) { - CqlSession session = SESSION_RULE.session(); - - // Given - StatementT writeStatement = - buildWriteStatement.apply("INSERT INTO test (k,v) VALUES (1,1) USING TTL 20"); - SimpleStatement readStatement = - SimpleStatement.newInstance("SELECT TTL(v) FROM test WHERE k = 1"); - - // When - // insert at t = 0 with TTL 20 - session.execute(writeStatement.setNowInSeconds(0)); - // read TTL at t = 10 - ResultSet rs = session.execute(readStatement.setNowInSeconds(10)); - int remainingTtl = rs.one().getInt(0); - - // Then - assertThat(remainingTtl).isEqualTo(10); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/PagingIterableSpliteratorIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/PagingIterableSpliteratorIT.java deleted file mode 100644 index 02078b683db..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/PagingIterableSpliteratorIT.java +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.cql; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.BatchStatement; -import com.datastax.oss.driver.api.core.cql.BatchStatementBuilder; -import com.datastax.oss.driver.api.core.cql.DefaultBatchType; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.ArrayList; -import java.util.List; -import java.util.Spliterator; -import java.util.stream.StreamSupport; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -@Category(ParallelizableTests.class) -public class PagingIterableSpliteratorIT { - - private static final CcmRule CCM_RULE = CcmRule.getInstance(); - - private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - @BeforeClass - public static void setupSchema() { - SESSION_RULE - .session() - .execute( - SimpleStatement.builder( - "CREATE TABLE IF NOT EXISTS test (k0 int, k1 int, v int, PRIMARY KEY(k0, k1))") - .setExecutionProfile(SESSION_RULE.slowProfile()) - .build()); - PreparedStatement prepared = - SESSION_RULE.session().prepare("INSERT INTO test (k0, k1, v) VALUES (?, ?, ?)"); - for (int i = 0; i < 20_000; i += 1_000) { - BatchStatementBuilder batch = BatchStatement.builder(DefaultBatchType.UNLOGGED); - for (int j = 0; j < 1_000; j++) { - int n = i + j; - batch.addStatement(prepared.bind(0, n, n)); - } - SESSION_RULE.session().execute(batch.setExecutionProfile(SESSION_RULE.slowProfile()).build()); - } - } - - @Test - @UseDataProvider("pageSizes") - public void should_consume_spliterator(int pageSize, boolean parallel) throws Exception { - CqlSession session = SESSION_RULE.session(); - DriverExecutionProfile profile = - session - .getContext() - .getConfig() - .getDefaultProfile() - .withInt(DefaultDriverOption.REQUEST_PAGE_SIZE, pageSize); - ResultSet result = - session.execute( - SimpleStatement.newInstance("SELECT v FROM test where k0 = 0") - .setExecutionProfile(profile)); - Spliterator spliterator = result.spliterator(); - if (pageSize > 20_000) { - // if the page size is greater than the result set size, - // we create a SinglePageResultSet with known spliterator size - assertThat(spliterator.estimateSize()).isEqualTo(20_000); - assertThat(spliterator.getExactSizeIfKnown()).isEqualTo(20_000); - assertThat(spliterator.characteristics()) - .isEqualTo( - Spliterator.ORDERED - | Spliterator.IMMUTABLE - | Spliterator.NONNULL - | Spliterator.SIZED - | Spliterator.SUBSIZED); - } else { - assertThat(spliterator.estimateSize()).isEqualTo(Long.MAX_VALUE); - assertThat(spliterator.getExactSizeIfKnown()).isEqualTo(-1); - assertThat(spliterator.characteristics()) - .isEqualTo(Spliterator.ORDERED | Spliterator.IMMUTABLE | Spliterator.NONNULL); - } - long count = StreamSupport.stream(spliterator, parallel).count(); - assertThat(count).isEqualTo(20_000L); - } - - @DataProvider - public static Iterable pageSizes() { - List> arguments = new ArrayList<>(); - arguments.add(Lists.newArrayList(30_000, false)); - arguments.add(Lists.newArrayList(20_000, false)); - arguments.add(Lists.newArrayList(10_000, false)); - arguments.add(Lists.newArrayList(5_000, false)); - arguments.add(Lists.newArrayList(500, false)); - arguments.add(Lists.newArrayList(9_999, false)); - arguments.add(Lists.newArrayList(10_001, false)); - arguments.add(Lists.newArrayList(5, false)); - arguments.add(Lists.newArrayList(19_995, false)); - arguments.add(Lists.newArrayList(30_000, true)); - arguments.add(Lists.newArrayList(20_000, true)); - arguments.add(Lists.newArrayList(10_000, true)); - arguments.add(Lists.newArrayList(5_000, true)); - arguments.add(Lists.newArrayList(500, true)); - arguments.add(Lists.newArrayList(9_999, true)); - arguments.add(Lists.newArrayList(10_001, true)); - arguments.add(Lists.newArrayList(5, true)); - arguments.add(Lists.newArrayList(19_995, true)); - return arguments; - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/PagingStateIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/PagingStateIT.java deleted file mode 100644 index 6d33f35238a..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/PagingStateIT.java +++ /dev/null @@ -1,202 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.cql; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.catchThrowable; -import static org.assertj.core.api.Assertions.fail; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.PagingState; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.type.codec.CodecNotFoundException; -import com.datastax.oss.driver.api.core.type.codec.MappingCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.ccm.SchemaChangeSynchronizer; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.internal.core.type.codec.IntCodec; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.function.UnaryOperator; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -public class PagingStateIT { - - private static final CcmRule CCM_RULE = CcmRule.getInstance(); - - private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - - @ClassRule public static TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - @Before - public void setupSchema() { - CqlSession session = SESSION_RULE.session(); - SchemaChangeSynchronizer.withLock( - () -> { - session.execute( - SimpleStatement.builder( - "CREATE TABLE IF NOT EXISTS foo (k int, cc int, v int, PRIMARY KEY(k, cc))") - .setExecutionProfile(SESSION_RULE.slowProfile()) - .build()); - }); - for (int i = 0; i < 20; i++) { - session.execute( - SimpleStatement.newInstance("INSERT INTO foo (k, cc, v) VALUES (1, ?, ?)", i, i)); - } - } - - @Test - public void should_extract_and_reuse() { - should_extract_and_reuse(UnaryOperator.identity()); - } - - @Test - public void should_convert_to_bytes() { - should_extract_and_reuse(pagingState -> PagingState.fromBytes(pagingState.toBytes())); - } - - @Test - public void should_convert_to_string() { - should_extract_and_reuse(pagingState -> PagingState.fromString(pagingState.toString())); - } - - private void should_extract_and_reuse(UnaryOperator transformation) { - CqlSession session = SESSION_RULE.session(); - - BoundStatement boundStatement = - session - .prepare(SimpleStatement.newInstance("SELECT * FROM foo WHERE k = ?").setPageSize(15)) - .bind(1); - - ResultSet resultSet = session.execute(boundStatement); - assertThat(resultSet.getAvailableWithoutFetching()).isEqualTo(15); - assertThat(resultSet.isFullyFetched()).isFalse(); - - PagingState pagingState = - transformation.apply(resultSet.getExecutionInfo().getSafePagingState()); - - assertThat(pagingState.matches(boundStatement)).isTrue(); - resultSet = session.execute(boundStatement.setPagingState(pagingState)); - assertThat(resultSet.getAvailableWithoutFetching()).isEqualTo(5); - assertThat(resultSet.isFullyFetched()).isTrue(); - } - - @Test - public void should_inject_in_simple_statement_with_custom_codecs() { - try (CqlSession session = - (CqlSession) - SessionUtils.baseBuilder() - .addTypeCodecs(new IntWrapperCodec()) - .addContactEndPoints(CCM_RULE.getContactPoints()) - .withKeyspace(SESSION_RULE.keyspace()) - .build()) { - - SimpleStatement statement = - SimpleStatement.newInstance("SELECT * FROM foo WHERE k = ?", new IntWrapper(1)) - .setPageSize(15); - - ResultSet resultSet = session.execute(statement); - assertThat(resultSet.getAvailableWithoutFetching()).isEqualTo(15); - assertThat(resultSet.isFullyFetched()).isFalse(); - - PagingState pagingState = resultSet.getExecutionInfo().getSafePagingState(); - - // This is the case where we need the session: simple statements are not attached, so - // setPagingState() cannot find the custom codec. - try { - @SuppressWarnings("unused") - SimpleStatement ignored = statement.setPagingState(pagingState); - fail("Expected a CodecNotFoundException"); - } catch (CodecNotFoundException e) { - // expected - } - - resultSet = session.execute(statement.setPagingState(pagingState, session)); - assertThat(resultSet.getAvailableWithoutFetching()).isEqualTo(5); - assertThat(resultSet.isFullyFetched()).isTrue(); - } - } - - @Test - public void should_fail_if_query_does_not_match() { - should_fail("SELECT * FROM foo WHERE k = ?", 1, "SELECT v FROM FOO WHERE k = ?", 1); - } - - @Test - public void should_fail_if_values_do_not_match() { - should_fail("SELECT * FROM foo WHERE k = ?", 1, "SELECT * FROM foo WHERE k = ?", 2); - } - - private void should_fail(String query1, int value1, String query2, int value2) { - CqlSession session = SESSION_RULE.session(); - - BoundStatement boundStatement1 = - session.prepare(SimpleStatement.newInstance(query1).setPageSize(15)).bind(value1); - - ResultSet resultSet = session.execute(boundStatement1); - PagingState pagingState = resultSet.getExecutionInfo().getSafePagingState(); - - @SuppressWarnings("ResultOfMethodCallIgnored") - Throwable t = - catchThrowable( - () -> - session - .prepare(SimpleStatement.newInstance(query2).setPageSize(15)) - .bind(value2) - .setPagingState(pagingState)); - - assertThat(t).isInstanceOf(IllegalArgumentException.class); - } - - static class IntWrapper { - final int value; - - public IntWrapper(int value) { - this.value = value; - } - } - - static class IntWrapperCodec extends MappingCodec { - - protected IntWrapperCodec() { - super(new IntCodec(), GenericType.of(IntWrapper.class)); - } - - @Nullable - @Override - protected IntWrapper innerToOuter(@Nullable Integer value) { - return value == null ? null : new IntWrapper(value); - } - - @Nullable - @Override - protected Integer outerToInner(@Nullable IntWrapper wrapper) { - return wrapper == null ? null : wrapper.value; - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/PerRequestKeyspaceIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/PerRequestKeyspaceIT.java deleted file mode 100644 index 9eb883144db..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/PerRequestKeyspaceIT.java +++ /dev/null @@ -1,265 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.cql; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.catchThrowable; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.cql.BatchStatement; -import com.datastax.oss.driver.api.core.cql.DefaultBatchType; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.ccm.SchemaChangeSynchronizer; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.categories.ParallelizableTests; -import java.nio.ByteBuffer; -import java.time.Duration; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestName; -import org.junit.rules.TestRule; - -/** - * Note: at the time of writing, this test exercises features of an unreleased Cassandra version. To - * test against a local build, run with - * - *

    - *   -Dccm.version=4.0.0 -Dccm.directory=/path/to/cassandra -Ddatastax-java-driver.advanced.protocol.version=V5
    - * 
    - */ -@Category(ParallelizableTests.class) -public class PerRequestKeyspaceIT { - - private CcmRule ccmRule = CcmRule.getInstance(); - - private SessionRule sessionRule = SessionRule.builder(ccmRule).build(); - - @Rule public TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); - - @Rule public TestName nameRule = new TestName(); - - @Before - public void setupSchema() { - SchemaChangeSynchronizer.withLock( - () -> { - sessionRule - .session() - .execute( - SimpleStatement.builder( - "CREATE TABLE IF NOT EXISTS foo (k text, cc int, v int, PRIMARY KEY(k, cc))") - .setExecutionProfile(sessionRule.slowProfile()) - .build()); - }); - } - - @Test - @BackendRequirement(type = BackendType.CASSANDRA, minInclusive = "2.2") - public void should_reject_simple_statement_with_keyspace_in_protocol_v4() { - should_reject_statement_with_keyspace_in_protocol_v4( - SimpleStatement.newInstance("SELECT * FROM foo").setKeyspace(sessionRule.keyspace())); - } - - @Test - @BackendRequirement(type = BackendType.CASSANDRA, minInclusive = "2.2") - public void should_reject_batch_statement_with_explicit_keyspace_in_protocol_v4() { - SimpleStatement statementWithoutKeyspace = - SimpleStatement.newInstance( - "INSERT INTO foo (k, cc, v) VALUES (?, ?, ?)", nameRule.getMethodName(), 1, 1); - should_reject_statement_with_keyspace_in_protocol_v4( - BatchStatement.builder(DefaultBatchType.LOGGED) - .setKeyspace(sessionRule.keyspace()) - .addStatement(statementWithoutKeyspace) - .build()); - } - - @Test - @BackendRequirement(type = BackendType.CASSANDRA, minInclusive = "2.2") - public void should_reject_batch_statement_with_inferred_keyspace_in_protocol_v4() { - SimpleStatement statementWithKeyspace = - SimpleStatement.newInstance( - "INSERT INTO foo (k, cc, v) VALUES (?, ?, ?)", nameRule.getMethodName(), 1, 1) - .setKeyspace(sessionRule.keyspace()); - should_reject_statement_with_keyspace_in_protocol_v4( - BatchStatement.builder(DefaultBatchType.LOGGED) - .addStatement(statementWithKeyspace) - .build()); - } - - private void should_reject_statement_with_keyspace_in_protocol_v4(Statement statement) { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withString(DefaultDriverOption.PROTOCOL_VERSION, "V4") - .build(); - try (CqlSession session = SessionUtils.newSession(ccmRule, loader)) { - Throwable t = catchThrowable(() -> session.execute(statement)); - assertThat(t) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage("Can't use per-request keyspace with protocol V4"); - } - } - - @Test - @BackendRequirement(type = BackendType.CASSANDRA, minInclusive = "4.0") - public void should_execute_simple_statement_with_keyspace() { - CqlSession session = sessionRule.session(); - session.execute( - SimpleStatement.newInstance( - "INSERT INTO foo (k, cc, v) VALUES (?, ?, ?)", nameRule.getMethodName(), 1, 1) - .setKeyspace(sessionRule.keyspace())); - Row row = - session - .execute( - SimpleStatement.newInstance( - "SELECT v FROM foo WHERE k = ? AND cc = 1", nameRule.getMethodName()) - .setKeyspace(sessionRule.keyspace())) - .one(); - assertThat(row.getInt(0)).isEqualTo(1); - } - - @Test - @BackendRequirement(type = BackendType.CASSANDRA, minInclusive = "4.0") - public void should_execute_batch_with_explicit_keyspace() { - CqlSession session = sessionRule.session(); - session.execute( - BatchStatement.builder(DefaultBatchType.LOGGED) - .setKeyspace(sessionRule.keyspace()) - .addStatements( - SimpleStatement.newInstance( - "INSERT INTO foo (k, cc, v) VALUES (?, ?, ?)", nameRule.getMethodName(), 1, 1), - SimpleStatement.newInstance( - "INSERT INTO foo (k, cc, v) VALUES (?, ?, ?)", nameRule.getMethodName(), 2, 2)) - .build()); - - Row row = - session - .execute( - SimpleStatement.newInstance( - "SELECT v FROM foo WHERE k = ? AND cc = 1", nameRule.getMethodName()) - .setKeyspace(sessionRule.keyspace())) - .one(); - assertThat(row.getInt(0)).isEqualTo(1); - } - - @Test - @BackendRequirement(type = BackendType.CASSANDRA, minInclusive = "4.0") - public void should_execute_batch_with_inferred_keyspace() { - CqlSession session = sessionRule.session(); - session.execute( - BatchStatement.builder(DefaultBatchType.LOGGED) - .setKeyspace(sessionRule.keyspace()) - .addStatements( - SimpleStatement.newInstance( - "INSERT INTO foo (k, cc, v) VALUES (?, ?, ?)", - nameRule.getMethodName(), - 1, - 1) - .setKeyspace(sessionRule.keyspace()), - SimpleStatement.newInstance( - "INSERT INTO foo (k, cc, v) VALUES (?, ?, ?)", - nameRule.getMethodName(), - 2, - 2) - .setKeyspace(sessionRule.keyspace())) - .build()); - - Row row = - session - .execute( - SimpleStatement.newInstance( - "SELECT v FROM foo WHERE k = ? AND cc = 1", nameRule.getMethodName()) - .setKeyspace(sessionRule.keyspace())) - .one(); - assertThat(row.getInt(0)).isEqualTo(1); - } - - @Test - @BackendRequirement(type = BackendType.CASSANDRA, minInclusive = "4.0") - public void should_prepare_statement_with_keyspace() { - CqlSession session = sessionRule.session(); - PreparedStatement prepared = - session.prepare( - SimpleStatement.newInstance("INSERT INTO foo (k, cc, v) VALUES (?, ?, ?)") - .setKeyspace(sessionRule.keyspace())); - session.execute(prepared.bind(nameRule.getMethodName(), 1, 1)); - - Row row = - session - .execute( - SimpleStatement.newInstance( - "SELECT v FROM foo WHERE k = ? AND cc = 1", nameRule.getMethodName()) - .setKeyspace(sessionRule.keyspace())) - .one(); - assertThat(row.getInt(0)).isEqualTo(1); - } - - @Test - @BackendRequirement(type = BackendType.CASSANDRA, minInclusive = "4.0") - public void should_reprepare_statement_with_keyspace_on_the_fly() { - // Create a separate session because we don't want it to have a default keyspace - SchemaChangeSynchronizer.withLock( - () -> { - try (CqlSession session = SessionUtils.newSession(ccmRule)) { - executeDdl( - session, - String.format( - "CREATE TABLE IF NOT EXISTS %s.bar (k int primary key)", - sessionRule.keyspace())); - PreparedStatement pst = - session.prepare( - SimpleStatement.newInstance("SELECT * FROM bar WHERE k=?") - .setKeyspace(sessionRule.keyspace())); - - // Drop and re-create the table to invalidate the prepared statement server side - executeDdl(session, String.format("DROP TABLE %s.bar", sessionRule.keyspace())); - executeDdl( - session, - String.format("CREATE TABLE %s.bar (k int primary key)", sessionRule.keyspace())); - assertThat(preparedStatementExistsOnServer(session, pst.getId())).isFalse(); - - // This will re-prepare on the fly - session.execute(pst.bind(0)); - assertThat(preparedStatementExistsOnServer(session, pst.getId())).isTrue(); - } - }); - } - - private void executeDdl(CqlSession session, String query) { - session.execute(SimpleStatement.builder(query).setTimeout(Duration.ofSeconds(30)).build()); - } - - private boolean preparedStatementExistsOnServer(CqlSession session, ByteBuffer id) { - ResultSet resultSet = - session.execute( - SimpleStatement.newInstance( - "SELECT * FROM system.prepared_statements WHERE prepared_id = ?", id)); - return resultSet.iterator().hasNext(); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/PreparedStatementCachingIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/PreparedStatementCachingIT.java deleted file mode 100644 index 617d489fb95..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/PreparedStatementCachingIT.java +++ /dev/null @@ -1,429 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.cql; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.codahale.metrics.Gauge; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; -import com.datastax.oss.driver.api.core.session.ProgrammaticArguments; -import com.datastax.oss.driver.api.core.session.SessionBuilder; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.ccm.SchemaChangeSynchronizer; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.categories.IsolatedTests; -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import com.datastax.oss.driver.internal.core.cql.CqlPrepareAsyncProcessor; -import com.datastax.oss.driver.internal.core.cql.CqlPrepareSyncProcessor; -import com.datastax.oss.driver.internal.core.metadata.schema.events.TypeChangeEvent; -import com.datastax.oss.driver.internal.core.session.BuiltInRequestProcessors; -import com.datastax.oss.driver.internal.core.session.RequestProcessor; -import com.datastax.oss.driver.internal.core.session.RequestProcessorRegistry; -import com.datastax.oss.driver.shaded.guava.common.cache.RemovalListener; -import com.datastax.oss.driver.shaded.guava.common.util.concurrent.Uninterruptibles; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableSet; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; -import java.util.Set; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicReference; -import java.util.function.Consumer; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -// These tests must be isolated because setup modifies SessionUtils.SESSION_BUILDER_CLASS_PROPERTY -@Category(IsolatedTests.class) -public class PreparedStatementCachingIT { - - private CustomCcmRule ccmRule = CustomCcmRule.builder().build(); - - private SessionRule sessionRule = - SessionRule.builder(ccmRule) - .withConfigLoader( - SessionUtils.configLoaderBuilder() - .withInt(DefaultDriverOption.REQUEST_PAGE_SIZE, 2) - .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(30)) - .build()) - .build(); - - @Rule public TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); - - private static class PreparedStatementRemovalEvent { - - private final ByteBuffer queryId; - - public PreparedStatementRemovalEvent(ByteBuffer queryId) { - this.queryId = queryId; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || !(o instanceof PreparedStatementRemovalEvent)) return false; - PreparedStatementRemovalEvent that = (PreparedStatementRemovalEvent) o; - return Objects.equals(queryId, that.queryId); - } - - @Override - public int hashCode() { - return Objects.hash(queryId); - } - - @Override - public String toString() { - return "PreparedStatementRemovalEvent{" + "queryId=" + queryId + '}'; - } - } - - private static class TestCqlPrepareAsyncProcessor extends CqlPrepareAsyncProcessor { - - private static final Logger LOG = - LoggerFactory.getLogger(PreparedStatementCachingIT.TestCqlPrepareAsyncProcessor.class); - - private static RemovalListener buildCacheRemoveCallback( - @NonNull Optional context) { - return (evt) -> { - try { - CompletableFuture future = - (CompletableFuture) evt.getValue(); - ByteBuffer queryId = Uninterruptibles.getUninterruptibly(future).getId(); - context.ifPresent( - ctx -> ctx.getEventBus().fire(new PreparedStatementRemovalEvent(queryId))); - } catch (Exception e) { - LOG.error("Unable to register removal handler", e); - } - }; - } - - public TestCqlPrepareAsyncProcessor(@NonNull Optional context) { - // Default CqlPrepareAsyncProcessor uses weak values here as well. We avoid doing so - // to prevent cache entries from unexpectedly disappearing mid-test. - super(context, builder -> builder.removalListener(buildCacheRemoveCallback(context))); - } - } - - private static class TestDefaultDriverContext extends DefaultDriverContext { - public TestDefaultDriverContext( - DriverConfigLoader configLoader, ProgrammaticArguments programmaticArguments) { - super(configLoader, programmaticArguments); - } - - @Override - protected RequestProcessorRegistry buildRequestProcessorRegistry() { - // Re-create the processor cache to insert the TestCqlPrepareAsyncProcessor with it's strong - // prepared statement cache, see JAVA-3062 - List> processors = - BuiltInRequestProcessors.createDefaultProcessors(this); - processors.removeIf((processor) -> processor instanceof CqlPrepareAsyncProcessor); - processors.removeIf((processor) -> processor instanceof CqlPrepareSyncProcessor); - CqlPrepareAsyncProcessor asyncProcessor = new TestCqlPrepareAsyncProcessor(Optional.of(this)); - processors.add(2, asyncProcessor); - processors.add(3, new CqlPrepareSyncProcessor(asyncProcessor)); - return new RequestProcessorRegistry( - getSessionName(), processors.toArray(new RequestProcessor[0])); - } - } - - private static class TestSessionBuilder extends SessionBuilder { - - @Override - protected Object wrap(@NonNull CqlSession defaultSession) { - return defaultSession; - } - - @Override - protected DriverContext buildContext( - DriverConfigLoader configLoader, ProgrammaticArguments programmaticArguments) { - return new TestDefaultDriverContext(configLoader, programmaticArguments); - } - } - - @BeforeClass - public static void setup() { - System.setProperty( - SessionUtils.SESSION_BUILDER_CLASS_PROPERTY, PreparedStatementCachingIT.class.getName()); - } - - @AfterClass - public static void teardown() { - System.clearProperty(SessionUtils.SESSION_BUILDER_CLASS_PROPERTY); - } - - public static SessionBuilder builder() { - return new TestSessionBuilder(); - } - - private void invalidationResultSetTest( - Consumer setupTestSchema, Set expectedChangedTypes) { - invalidationTestInner( - setupTestSchema, - "select f from test_table_1 where e = ?", - "select h from test_table_2 where g = ?", - expectedChangedTypes); - } - - private void invalidationVariableDefsTest( - Consumer setupTestSchema, - boolean isCollection, - Set expectedChangedTypes) { - String condition = isCollection ? "contains ?" : "= ?"; - invalidationTestInner( - setupTestSchema, - String.format("select e from test_table_1 where f %s allow filtering", condition), - String.format("select g from test_table_2 where h %s allow filtering", condition), - expectedChangedTypes); - } - - private void invalidationTestInner( - Consumer setupTestSchema, - String preparedStmtQueryType1, - String preparedStmtQueryType2, - Set expectedChangedTypes) { - - try (CqlSession session = sessionWithCacheSizeMetric()) { - - assertThat(getPreparedCacheSize(session)).isEqualTo(0); - setupTestSchema.accept(session); - - session.prepare(preparedStmtQueryType1); - ByteBuffer queryId2 = session.prepare(preparedStmtQueryType2).getId(); - assertThat(getPreparedCacheSize(session)).isEqualTo(2); - - CountDownLatch preparedStmtCacheRemoveLatch = new CountDownLatch(1); - CountDownLatch typeChangeEventLatch = new CountDownLatch(expectedChangedTypes.size()); - - DefaultDriverContext ctx = (DefaultDriverContext) session.getContext(); - Map changedTypes = new ConcurrentHashMap<>(); - AtomicReference> removedQueryIds = - new AtomicReference<>(Optional.empty()); - AtomicReference> typeChangeEventError = - new AtomicReference<>(Optional.empty()); - AtomicReference> removedQueryEventError = - new AtomicReference<>(Optional.empty()); - ctx.getEventBus() - .register( - TypeChangeEvent.class, - (e) -> { - // expect one event per type changed and for every parent type that nests it - if (Boolean.TRUE.equals( - changedTypes.putIfAbsent(e.oldType.getName().toString(), true))) { - // store an error if we see duplicate change event - // any non-empty error will fail the test so it's OK to do this multiple times - typeChangeEventError.set(Optional.of("Duplicate type change event " + e)); - } - typeChangeEventLatch.countDown(); - }); - ctx.getEventBus() - .register( - PreparedStatementRemovalEvent.class, - (e) -> { - if (!removedQueryIds.compareAndSet(Optional.empty(), Optional.of(e.queryId))) { - // store an error if we see multiple cache invalidation events - // any non-empty error will fail the test so it's OK to do this multiple times - removedQueryEventError.set( - Optional.of("Unable to set reference for PS removal event")); - } - preparedStmtCacheRemoveLatch.countDown(); - }); - - // alter test_type_2 to trigger cache invalidation and above events - session.execute("ALTER TYPE test_type_2 add i blob"); - - // wait for latches and fail if they don't reach zero before timeout - assertThat( - Uninterruptibles.awaitUninterruptibly( - preparedStmtCacheRemoveLatch, 10, TimeUnit.SECONDS)) - .withFailMessage("preparedStmtCacheRemoveLatch did not trigger before timeout") - .isTrue(); - assertThat(Uninterruptibles.awaitUninterruptibly(typeChangeEventLatch, 10, TimeUnit.SECONDS)) - .withFailMessage("typeChangeEventLatch did not trigger before timeout") - .isTrue(); - - /* Okay, the latch triggered so cache processing should now be done. Let's validate :allthethings: */ - assertThat(changedTypes.keySet()).isEqualTo(expectedChangedTypes); - assertThat(removedQueryIds.get()).isNotEmpty().get().isEqualTo(queryId2); - assertThat(getPreparedCacheSize(session)).isEqualTo(1); - - // check no errors were seen in callback (and report those as fail msgs) - // if something is broken these may still succeed due to timing - // but shouldn't intermittently fail if the code is working properly - assertThat(typeChangeEventError.get()) - .withFailMessage(() -> typeChangeEventError.get().get()) - .isEmpty(); - assertThat(removedQueryEventError.get()) - .withFailMessage(() -> removedQueryEventError.get().get()) - .isEmpty(); - } - } - - Consumer setupCacheEntryTestBasic = - (session) -> { - session.execute("CREATE TYPE test_type_1 (a text, b int)"); - session.execute("CREATE TYPE test_type_2 (c int, d text)"); - session.execute("CREATE TABLE test_table_1 (e int primary key, f frozen)"); - session.execute("CREATE TABLE test_table_2 (g int primary key, h frozen)"); - }; - - @Test - public void should_invalidate_cache_entry_on_basic_udt_change_result_set() { - SchemaChangeSynchronizer.withLock( - () -> { - invalidationResultSetTest(setupCacheEntryTestBasic, ImmutableSet.of("test_type_2")); - }); - } - - @Test - public void should_invalidate_cache_entry_on_basic_udt_change_variable_defs() { - SchemaChangeSynchronizer.withLock( - () -> { - invalidationVariableDefsTest( - setupCacheEntryTestBasic, false, ImmutableSet.of("test_type_2")); - }); - } - - Consumer setupCacheEntryTestCollection = - (session) -> { - session.execute("CREATE TYPE test_type_1 (a text, b int)"); - session.execute("CREATE TYPE test_type_2 (c int, d text)"); - session.execute( - "CREATE TABLE test_table_1 (e int primary key, f list>)"); - session.execute( - "CREATE TABLE test_table_2 (g int primary key, h list>)"); - }; - - @Test - public void should_invalidate_cache_entry_on_collection_udt_change_result_set() { - SchemaChangeSynchronizer.withLock( - () -> { - invalidationResultSetTest(setupCacheEntryTestCollection, ImmutableSet.of("test_type_2")); - }); - } - - @Test - public void should_invalidate_cache_entry_on_collection_udt_change_variable_defs() { - SchemaChangeSynchronizer.withLock( - () -> { - invalidationVariableDefsTest( - setupCacheEntryTestCollection, true, ImmutableSet.of("test_type_2")); - }); - } - - Consumer setupCacheEntryTestTuple = - (session) -> { - session.execute("CREATE TYPE test_type_1 (a text, b int)"); - session.execute("CREATE TYPE test_type_2 (c int, d text)"); - session.execute( - "CREATE TABLE test_table_1 (e int primary key, f tuple)"); - session.execute( - "CREATE TABLE test_table_2 (g int primary key, h tuple)"); - }; - - @Test - public void should_invalidate_cache_entry_on_tuple_udt_change_result_set() { - SchemaChangeSynchronizer.withLock( - () -> { - invalidationResultSetTest(setupCacheEntryTestTuple, ImmutableSet.of("test_type_2")); - }); - } - - @Test - public void should_invalidate_cache_entry_on_tuple_udt_change_variable_defs() { - SchemaChangeSynchronizer.withLock( - () -> { - invalidationVariableDefsTest( - setupCacheEntryTestTuple, false, ImmutableSet.of("test_type_2")); - }); - } - - Consumer setupCacheEntryTestNested = - (session) -> { - session.execute("CREATE TYPE test_type_1 (a text, b int)"); - session.execute("CREATE TYPE test_type_2 (c int, d text)"); - session.execute("CREATE TYPE test_type_3 (e frozen, f int)"); - session.execute("CREATE TYPE test_type_4 (g int, h frozen)"); - session.execute("CREATE TABLE test_table_1 (e int primary key, f frozen)"); - session.execute("CREATE TABLE test_table_2 (g int primary key, h frozen)"); - }; - - @Test - public void should_invalidate_cache_entry_on_nested_udt_change_result_set() { - SchemaChangeSynchronizer.withLock( - () -> { - invalidationResultSetTest( - setupCacheEntryTestNested, ImmutableSet.of("test_type_2", "test_type_4")); - }); - } - - @Test - public void should_invalidate_cache_entry_on_nested_udt_change_variable_defs() { - SchemaChangeSynchronizer.withLock( - () -> { - invalidationVariableDefsTest( - setupCacheEntryTestNested, false, ImmutableSet.of("test_type_2", "test_type_4")); - }); - } - - /* ========================= Infrastructure copied from PreparedStatementIT ========================= */ - private CqlSession sessionWithCacheSizeMetric() { - return SessionUtils.newSession( - ccmRule, - sessionRule.keyspace(), - SessionUtils.configLoaderBuilder() - .withInt(DefaultDriverOption.REQUEST_PAGE_SIZE, 2) - .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(30)) - .withStringList( - DefaultDriverOption.METRICS_SESSION_ENABLED, - ImmutableList.of(DefaultSessionMetric.CQL_PREPARED_CACHE_SIZE.getPath())) - .build()); - } - - @SuppressWarnings("unchecked") - private static long getPreparedCacheSize(CqlSession session) { - return session - .getMetrics() - .flatMap(metrics -> metrics.getSessionMetric(DefaultSessionMetric.CQL_PREPARED_CACHE_SIZE)) - .map(metric -> ((Gauge) metric).getValue()) - .orElseThrow( - () -> - new AssertionError( - "Could not access metric " - + DefaultSessionMetric.CQL_PREPARED_CACHE_SIZE.getPath())); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/PreparedStatementCancellationIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/PreparedStatementCancellationIT.java deleted file mode 100644 index d7e581e4606..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/PreparedStatementCancellationIT.java +++ /dev/null @@ -1,166 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.cql; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.junit.Assert.fail; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.PrepareRequest; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.categories.IsolatedTests; -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import com.datastax.oss.driver.internal.core.cql.CqlPrepareAsyncProcessor; -import com.datastax.oss.driver.shaded.guava.common.base.Predicates; -import com.datastax.oss.driver.shaded.guava.common.cache.Cache; -import com.datastax.oss.driver.shaded.guava.common.collect.Iterables; -import java.util.concurrent.CompletableFuture; -import org.junit.After; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(IsolatedTests.class) -public class PreparedStatementCancellationIT { - - private CustomCcmRule ccmRule = CustomCcmRule.builder().build(); - - private SessionRule sessionRule = SessionRule.builder(ccmRule).build(); - - @Rule public TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); - - @Before - public void setup() { - - CqlSession session = SessionUtils.newSession(ccmRule, sessionRule.keyspace()); - session.execute("DROP TABLE IF EXISTS test_table_1"); - session.execute("CREATE TABLE test_table_1 (k int primary key, v int)"); - session.execute("INSERT INTO test_table_1 (k,v) VALUES (1, 100)"); - session.execute("INSERT INTO test_table_1 (k,v) VALUES (2, 200)"); - session.execute("INSERT INTO test_table_1 (k,v) VALUES (3, 300)"); - session.close(); - } - - @After - public void teardown() { - - CqlSession session = SessionUtils.newSession(ccmRule, sessionRule.keyspace()); - session.execute("DROP TABLE test_table_1"); - session.close(); - } - - private CompletableFuture toCompletableFuture(CqlSession session, String cql) { - - return session.prepareAsync(cql).toCompletableFuture(); - } - - private CqlPrepareAsyncProcessor findProcessor(CqlSession session) { - - DefaultDriverContext context = (DefaultDriverContext) session.getContext(); - return (CqlPrepareAsyncProcessor) - Iterables.find( - context.getRequestProcessorRegistry().getProcessors(), - Predicates.instanceOf(CqlPrepareAsyncProcessor.class)); - } - - @Test - public void should_cache_valid_cql() throws Exception { - - CqlSession session = SessionUtils.newSession(ccmRule, sessionRule.keyspace()); - CqlPrepareAsyncProcessor processor = findProcessor(session); - Cache> cache = processor.getCache(); - assertThat(cache.size()).isEqualTo(0); - - // Make multiple CompletableFuture requests for the specified CQL, then wait until - // the cached request finishes and confirm that all futures got the same values - String cql = "select v from test_table_1 where k = ?"; - CompletableFuture cf1 = toCompletableFuture(session, cql); - CompletableFuture cf2 = toCompletableFuture(session, cql); - assertThat(cache.size()).isEqualTo(1); - - CompletableFuture future = Iterables.get(cache.asMap().values(), 0); - PreparedStatement stmt = future.get(); - - assertThat(cf1.isDone()).isTrue(); - assertThat(cf2.isDone()).isTrue(); - - assertThat(cf1.join()).isEqualTo(stmt); - assertThat(cf2.join()).isEqualTo(stmt); - } - - // A holdover from work done on JAVA-3055. This probably isn't _desired_ behaviour but this test - // documents the fact that the current driver impl will behave in this way. We should probably - // consider changing this in a future release, although it's worthwhile fully considering the - // implications of such a change. - @Test - public void will_cache_invalid_cql() throws Exception { - - CqlSession session = SessionUtils.newSession(ccmRule, sessionRule.keyspace()); - CqlPrepareAsyncProcessor processor = findProcessor(session); - Cache> cache = processor.getCache(); - assertThat(cache.size()).isEqualTo(0); - - // Verify that we get the CompletableFuture even if the CQL is invalid but that nothing is - // cached - String cql = "select v fromfrom test_table_1 where k = ?"; - CompletableFuture cf = toCompletableFuture(session, cql); - - // join() here should throw exceptions due to the invalid syntax... for purposes of this test we - // can ignore this - try { - cf.join(); - fail(); - } catch (Exception e) { - } - - assertThat(cache.size()).isEqualTo(1); - } - - @Test - public void should_not_affect_cache_if_returned_futures_are_cancelled() throws Exception { - - CqlSession session = SessionUtils.newSession(ccmRule, sessionRule.keyspace()); - CqlPrepareAsyncProcessor processor = findProcessor(session); - Cache> cache = processor.getCache(); - assertThat(cache.size()).isEqualTo(0); - - String cql = "select v from test_table_1 where k = ?"; - CompletableFuture cf = toCompletableFuture(session, cql); - - assertThat(cf.isCancelled()).isFalse(); - assertThat(cf.cancel(false)).isTrue(); - assertThat(cf.isCancelled()).isTrue(); - assertThat(cf.isCompletedExceptionally()).isTrue(); - - // Confirm that cancelling the CompletableFuture returned to the user does _not_ cancel the - // future used within the cache. CacheEntry very deliberately doesn't maintain a reference - // to it's contained CompletableFuture so we have to get at this by secondary effects. - assertThat(cache.size()).isEqualTo(1); - CompletableFuture future = Iterables.get(cache.asMap().values(), 0); - PreparedStatement rv = future.get(); - assertThat(rv).isNotNull(); - assertThat(rv.getQuery()).isEqualTo(cql); - assertThat(cache.size()).isEqualTo(1); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/PreparedStatementIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/PreparedStatementIT.java deleted file mode 100644 index 5671a7684e5..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/PreparedStatementIT.java +++ /dev/null @@ -1,578 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.cql; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatCode; -import static org.assertj.core.api.Assertions.catchThrowable; - -import com.codahale.metrics.Gauge; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; -import com.datastax.oss.driver.api.core.servererrors.InvalidQueryException; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.internal.core.metadata.token.DefaultTokenMap; -import com.datastax.oss.driver.internal.core.metadata.token.TokenFactory; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.protocol.internal.util.Bytes; -import com.google.common.collect.ImmutableList; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.concurrent.CompletionStage; -import junit.framework.TestCase; -import org.assertj.core.api.AbstractThrowableAssert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -/** - * Note: at the time of writing, some of these tests exercises features of an unreleased Cassandra - * version. To test against a local build, run with - * - *
    - *   -Dccm.version=4.0.0 -Dccm.directory=/path/to/cassandra -Ddatastax-java-driver.advanced.protocol.version=V5
    - * 
    - */ -@Category(ParallelizableTests.class) -public class PreparedStatementIT { - - private CcmRule ccmRule = CcmRule.getInstance(); - - private SessionRule sessionRule = - SessionRule.builder(ccmRule) - .withConfigLoader( - SessionUtils.configLoaderBuilder() - .withInt(DefaultDriverOption.REQUEST_PAGE_SIZE, 2) - .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(30)) - .build()) - .build(); - - @Rule public TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); - - @Before - public void setupSchema() { - for (String query : - ImmutableList.of( - "DROP TABLE IF EXISTS prepared_statement_test", - "CREATE TABLE prepared_statement_test (a int PRIMARY KEY, b int, c int)", - "INSERT INTO prepared_statement_test (a, b, c) VALUES (1, 1, 1)", - "INSERT INTO prepared_statement_test (a, b, c) VALUES (2, 2, 2)", - "INSERT INTO prepared_statement_test (a, b, c) VALUES (3, 3, 3)", - "INSERT INTO prepared_statement_test (a, b, c) VALUES (4, 4, 4)")) { - executeDdl(query); - } - } - - private void executeDdl(String query) { - sessionRule - .session() - .execute( - SimpleStatement.builder(query).setExecutionProfile(sessionRule.slowProfile()).build()); - } - - @Test - public void should_have_empty_result_definitions_for_insert_query_without_bound_variable() { - try (CqlSession session = SessionUtils.newSession(ccmRule, sessionRule.keyspace())) { - PreparedStatement prepared = - session.prepare("INSERT INTO prepared_statement_test (a, b, c) VALUES (1, 1, 1)"); - assertThat(prepared.getVariableDefinitions()).isEmpty(); - assertThat(prepared.getPartitionKeyIndices()).isEmpty(); - assertThat(prepared.getResultSetDefinitions()).isEmpty(); - } - } - - @Test - public void should_have_non_empty_result_definitions_for_insert_query_with_bound_variable() { - try (CqlSession session = SessionUtils.newSession(ccmRule, sessionRule.keyspace())) { - PreparedStatement prepared = - session.prepare("INSERT INTO prepared_statement_test (a, b, c) VALUES (?, ?, ?)"); - assertThat(prepared.getVariableDefinitions()).hasSize(3); - assertThat(prepared.getPartitionKeyIndices()).hasSize(1); - assertThat(prepared.getResultSetDefinitions()).isEmpty(); - } - } - - @Test - public void should_have_empty_variable_definitions_for_select_query_without_bound_variable() { - try (CqlSession session = SessionUtils.newSession(ccmRule, sessionRule.keyspace())) { - PreparedStatement prepared = - session.prepare("SELECT a,b,c FROM prepared_statement_test WHERE a = 1"); - assertThat(prepared.getVariableDefinitions()).isEmpty(); - assertThat(prepared.getPartitionKeyIndices()).isEmpty(); - assertThat(prepared.getResultSetDefinitions()).hasSize(3); - } - } - - @Test - public void should_have_non_empty_variable_definitions_for_select_query_with_bound_variable() { - try (CqlSession session = SessionUtils.newSession(ccmRule, sessionRule.keyspace())) { - PreparedStatement prepared = - session.prepare("SELECT a,b,c FROM prepared_statement_test WHERE a = ?"); - assertThat(prepared.getVariableDefinitions()).hasSize(1); - assertThat(prepared.getPartitionKeyIndices()).hasSize(1); - assertThat(prepared.getResultSetDefinitions()).hasSize(3); - } - } - - @Test - @BackendRequirement(type = BackendType.CASSANDRA, minInclusive = "4.0") - public void should_update_metadata_when_schema_changed_across_executions() { - // Given - CqlSession session = sessionRule.session(); - PreparedStatement ps = session.prepare("SELECT * FROM prepared_statement_test WHERE a = ?"); - ByteBuffer idBefore = ps.getResultMetadataId(); - - // When - session.execute( - SimpleStatement.builder("ALTER TABLE prepared_statement_test ADD d int") - .setExecutionProfile(sessionRule.slowProfile()) - .build()); - BoundStatement bs = ps.bind(1); - ResultSet rows = session.execute(bs); - - // Then - ByteBuffer idAfter = ps.getResultMetadataId(); - assertThat(Bytes.toHexString(idAfter)).isNotEqualTo(Bytes.toHexString(idBefore)); - for (ColumnDefinitions columnDefinitions : - ImmutableList.of( - ps.getResultSetDefinitions(), - bs.getPreparedStatement().getResultSetDefinitions(), - rows.getColumnDefinitions())) { - assertThat(columnDefinitions).hasSize(4); - assertThat(columnDefinitions.get("d").getType()).isEqualTo(DataTypes.INT); - } - } - - @Test - @BackendRequirement(type = BackendType.CASSANDRA, minInclusive = "4.0") - public void should_update_metadata_when_schema_changed_across_pages() { - // Given - CqlSession session = sessionRule.session(); - PreparedStatement ps = session.prepare("SELECT * FROM prepared_statement_test"); - ByteBuffer idBefore = ps.getResultMetadataId(); - assertThat(ps.getResultSetDefinitions()).hasSize(3); - - CompletionStage future = session.executeAsync(ps.bind()); - AsyncResultSet rows = CompletableFutures.getUninterruptibly(future); - assertThat(rows.getColumnDefinitions()).hasSize(3); - assertThat(rows.getColumnDefinitions().contains("d")).isFalse(); - // Consume the first page - for (Row row : rows.currentPage()) { - try { - row.getInt("d"); - TestCase.fail("expected an error"); - } catch (IllegalArgumentException e) { - /*expected*/ - } - } - - // When - session.execute( - SimpleStatement.builder("ALTER TABLE prepared_statement_test ADD d int") - .setExecutionProfile(sessionRule.slowProfile()) - .build()); - - // Then - // this should trigger a background fetch of the second page, and therefore update the - // definitions - rows = CompletableFutures.getUninterruptibly(rows.fetchNextPage()); - for (Row row : rows.currentPage()) { - assertThat(row.isNull("d")).isTrue(); - } - assertThat(rows.getColumnDefinitions()).hasSize(4); - assertThat(rows.getColumnDefinitions().get("d").getType()).isEqualTo(DataTypes.INT); - // Should have updated the prepared statement too - ByteBuffer idAfter = ps.getResultMetadataId(); - assertThat(Bytes.toHexString(idAfter)).isNotEqualTo(Bytes.toHexString(idBefore)); - assertThat(ps.getResultSetDefinitions()).hasSize(4); - assertThat(ps.getResultSetDefinitions().get("d").getType()).isEqualTo(DataTypes.INT); - } - - @Test - @BackendRequirement(type = BackendType.CASSANDRA, minInclusive = "4.0") - public void should_update_metadata_when_schema_changed_across_sessions() { - // Given - CqlSession session1 = sessionRule.session(); - CqlSession session2 = SessionUtils.newSession(ccmRule, sessionRule.keyspace()); - - PreparedStatement ps1 = session1.prepare("SELECT * FROM prepared_statement_test WHERE a = ?"); - PreparedStatement ps2 = session2.prepare("SELECT * FROM prepared_statement_test WHERE a = ?"); - - ByteBuffer id1a = ps1.getResultMetadataId(); - ByteBuffer id2a = ps2.getResultMetadataId(); - - ResultSet rows1 = session1.execute(ps1.bind(1)); - ResultSet rows2 = session2.execute(ps2.bind(1)); - - assertThat(rows1.getColumnDefinitions()).hasSize(3); - assertThat(rows1.getColumnDefinitions().contains("d")).isFalse(); - assertThat(rows2.getColumnDefinitions()).hasSize(3); - assertThat(rows2.getColumnDefinitions().contains("d")).isFalse(); - - // When - session1.execute("ALTER TABLE prepared_statement_test ADD d int"); - - rows1 = session1.execute(ps1.bind(1)); - rows2 = session2.execute(ps2.bind(1)); - - ByteBuffer id1b = ps1.getResultMetadataId(); - ByteBuffer id2b = ps2.getResultMetadataId(); - - // Then - assertThat(Bytes.toHexString(id1b)).isNotEqualTo(Bytes.toHexString(id1a)); - assertThat(Bytes.toHexString(id2b)).isNotEqualTo(Bytes.toHexString(id2a)); - - assertThat(ps1.getResultSetDefinitions()).hasSize(4); - assertThat(ps1.getResultSetDefinitions().contains("d")).isTrue(); - assertThat(ps2.getResultSetDefinitions()).hasSize(4); - assertThat(ps2.getResultSetDefinitions().contains("d")).isTrue(); - - assertThat(rows1.getColumnDefinitions()).hasSize(4); - assertThat(rows1.getColumnDefinitions().contains("d")).isTrue(); - assertThat(rows2.getColumnDefinitions()).hasSize(4); - assertThat(rows2.getColumnDefinitions().contains("d")).isTrue(); - - session2.close(); - } - - @Test - @BackendRequirement(type = BackendType.CASSANDRA, minInclusive = "4.0") - public void should_fail_to_reprepare_if_query_becomes_invalid() { - // Given - CqlSession session = sessionRule.session(); - session.execute("ALTER TABLE prepared_statement_test ADD d int"); - PreparedStatement ps = - session.prepare("SELECT a, b, c, d FROM prepared_statement_test WHERE a = ?"); - session.execute("ALTER TABLE prepared_statement_test DROP d"); - - // When - Throwable t = catchThrowable(() -> session.execute(ps.bind())); - - // Then - assertThat(t) - .isInstanceOf(InvalidQueryException.class) - .hasMessageContaining("Undefined column name d"); - } - - @Test - @BackendRequirement(type = BackendType.CASSANDRA, minInclusive = "4.0") - public void should_not_store_metadata_for_conditional_updates() { - should_not_store_metadata_for_conditional_updates(sessionRule.session()); - } - - @Test - @BackendRequirement(type = BackendType.CASSANDRA, minInclusive = "2.2") - public void should_not_store_metadata_for_conditional_updates_in_legacy_protocol() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withString(DefaultDriverOption.PROTOCOL_VERSION, "V4") - .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(30)) - .build(); - try (CqlSession session = SessionUtils.newSession(ccmRule, sessionRule.keyspace(), loader)) { - should_not_store_metadata_for_conditional_updates(session); - } - } - - private void should_not_store_metadata_for_conditional_updates(CqlSession session) { - // Given - PreparedStatement ps = - session.prepare( - "INSERT INTO prepared_statement_test (a, b, c) VALUES (?, ?, ?) IF NOT EXISTS"); - - // Never store metadata in the prepared statement for conditional updates, since the result set - // can change - // depending on the outcome. - assertThat(ps.getResultSetDefinitions()).hasSize(0); - ByteBuffer idBefore = ps.getResultMetadataId(); - - // When - ResultSet rs = session.execute(ps.bind(5, 5, 5)); - - // Then - // Successful conditional update => only contains the [applied] column - assertThat(rs.wasApplied()).isTrue(); - assertThat(rs.getColumnDefinitions()).hasSize(1); - assertThat(rs.getColumnDefinitions().get("[applied]").getType()).isEqualTo(DataTypes.BOOLEAN); - // However the prepared statement shouldn't have changed - assertThat(ps.getResultSetDefinitions()).hasSize(0); - assertThat(Bytes.toHexString(ps.getResultMetadataId())).isEqualTo(Bytes.toHexString(idBefore)); - - // When - rs = session.execute(ps.bind(5, 5, 5)); - - // Then - // Failed conditional update => regular metadata - assertThat(rs.wasApplied()).isFalse(); - assertThat(rs.getColumnDefinitions()).hasSize(4); - Row row = rs.one(); - assertThat(row.getBoolean("[applied]")).isFalse(); - assertThat(row.getInt("a")).isEqualTo(5); - assertThat(row.getInt("b")).isEqualTo(5); - assertThat(row.getInt("c")).isEqualTo(5); - // The prepared statement still shouldn't have changed - assertThat(ps.getResultSetDefinitions()).hasSize(0); - assertThat(Bytes.toHexString(ps.getResultMetadataId())).isEqualTo(Bytes.toHexString(idBefore)); - - // When - session.execute("ALTER TABLE prepared_statement_test ADD d int"); - rs = session.execute(ps.bind(5, 5, 5)); - - // Then - // Failed conditional update => regular metadata that should also contain the new column - assertThat(rs.wasApplied()).isFalse(); - assertThat(rs.getColumnDefinitions()).hasSize(5); - row = rs.one(); - assertThat(row.getBoolean("[applied]")).isFalse(); - assertThat(row.getInt("a")).isEqualTo(5); - assertThat(row.getInt("b")).isEqualTo(5); - assertThat(row.getInt("c")).isEqualTo(5); - assertThat(row.isNull("d")).isTrue(); - assertThat(ps.getResultSetDefinitions()).hasSize(0); - assertThat(Bytes.toHexString(ps.getResultMetadataId())).isEqualTo(Bytes.toHexString(idBefore)); - } - - @Test - public void should_return_same_instance_when_repreparing_query() { - try (CqlSession session = sessionWithCacheSizeMetric()) { - // Given - assertThat(getPreparedCacheSize(session)).isEqualTo(0); - String query = "SELECT * FROM prepared_statement_test WHERE a = ?"; - - // Send prepare requests, keep hold of CompletionStage objects to prevent them being removed - // from CqlPrepareAsyncProcessor#cache, see JAVA-3062 - CompletionStage preparedStatement1Future = session.prepareAsync(query); - CompletionStage preparedStatement2Future = session.prepareAsync(query); - - PreparedStatement preparedStatement1 = - CompletableFutures.getUninterruptibly(preparedStatement1Future); - PreparedStatement preparedStatement2 = - CompletableFutures.getUninterruptibly(preparedStatement2Future); - - // Then - assertThat(preparedStatement1).isSameAs(preparedStatement2); - assertThat(getPreparedCacheSize(session)).isEqualTo(1); - } - } - - /** Just to illustrate that the driver does not sanitize query strings. */ - @Test - public void should_create_separate_instances_for_differently_formatted_queries() { - try (CqlSession session = sessionWithCacheSizeMetric()) { - // Given - assertThat(getPreparedCacheSize(session)).isEqualTo(0); - - // Send prepare requests, keep hold of CompletionStage objects to prevent them being removed - // from CqlPrepareAsyncProcessor#cache, see JAVA-3062 - CompletionStage preparedStatement1Future = - session.prepareAsync("SELECT * FROM prepared_statement_test WHERE a = ?"); - CompletionStage preparedStatement2Future = - session.prepareAsync("select * from prepared_statement_test where a = ?"); - - PreparedStatement preparedStatement1 = - CompletableFutures.getUninterruptibly(preparedStatement1Future); - PreparedStatement preparedStatement2 = - CompletableFutures.getUninterruptibly(preparedStatement2Future); - - // Then - assertThat(preparedStatement1).isNotSameAs(preparedStatement2); - assertThat(getPreparedCacheSize(session)).isEqualTo(2); - } - } - - @Test - public void should_create_separate_instances_for_different_statement_parameters() { - try (CqlSession session = sessionWithCacheSizeMetric()) { - // Given - assertThat(getPreparedCacheSize(session)).isEqualTo(0); - SimpleStatement statement = - SimpleStatement.newInstance("SELECT * FROM prepared_statement_test"); - - // Send prepare requests, keep hold of CompletionStage objects to prevent them being removed - // from CqlPrepareAsyncProcessor#cache, see JAVA-3062 - CompletionStage preparedStatement1Future = - session.prepareAsync(statement.setPageSize(1)); - CompletionStage preparedStatement2Future = - session.prepareAsync(statement.setPageSize(4)); - - PreparedStatement preparedStatement1 = - CompletableFutures.getUninterruptibly(preparedStatement1Future); - PreparedStatement preparedStatement2 = - CompletableFutures.getUninterruptibly(preparedStatement2Future); - - // Then - assertThat(preparedStatement1).isNotSameAs(preparedStatement2); - assertThat(getPreparedCacheSize(session)).isEqualTo(2); - // Each bound statement uses the page size it was prepared with - assertThat(firstPageOf(session.executeAsync(preparedStatement1.bind()))).hasSize(1); - assertThat(firstPageOf(session.executeAsync(preparedStatement2.bind()))).hasSize(4); - } - } - - /** - * This method reproduces CASSANDRA-15252 which is fixed in 3.0.26/3.11.12/4.0.2. - * - * @see CASSANDRA-15252 - */ - private AbstractThrowableAssert assertableReprepareAfterIdChange() { - try (CqlSession session = SessionUtils.newSession(ccmRule)) { - PreparedStatement preparedStatement = - session.prepare( - String.format( - "SELECT * FROM %s.prepared_statement_test WHERE a = ?", sessionRule.keyspace())); - - session.execute("USE " + sessionRule.keyspace().asCql(false)); - - // Drop and recreate the table to invalidate the prepared statement server-side - executeDdl("DROP TABLE prepared_statement_test"); - executeDdl("CREATE TABLE prepared_statement_test (a int PRIMARY KEY, b int, c int)"); - - return assertThatCode(() -> session.execute(preparedStatement.bind(1))); - } - } - - // Add version bounds to the DSE requirement if there is a version containing fix for - // CASSANDRA-15252 - @BackendRequirement( - type = BackendType.DSE, - description = "No DSE version contains fix for CASSANDRA-15252") - @BackendRequirement(type = BackendType.CASSANDRA, minInclusive = "3.0.0", maxExclusive = "3.0.26") - @BackendRequirement( - type = BackendType.CASSANDRA, - minInclusive = "3.11.0", - maxExclusive = "3.11.12") - @BackendRequirement(type = BackendType.CASSANDRA, minInclusive = "4.0.0", maxExclusive = "4.0.2") - @Test - public void should_fail_fast_if_id_changes_on_reprepare() { - assertableReprepareAfterIdChange() - .isInstanceOf(IllegalStateException.class) - .hasMessageContaining("ID mismatch while trying to reprepare"); - } - - @BackendRequirement( - type = BackendType.CASSANDRA, - minInclusive = "3.0.26", - maxExclusive = "3.11.0") - @BackendRequirement( - type = BackendType.CASSANDRA, - minInclusive = "3.11.12", - maxExclusive = "4.0.0") - @BackendRequirement(type = BackendType.CASSANDRA, minInclusive = "4.0.2") - @Test - public void handle_id_changes_on_reprepare() { - assertableReprepareAfterIdChange().doesNotThrowAnyException(); - } - - @Test - public void should_infer_routing_information_when_partition_key_is_bound() { - should_infer_routing_information_when_partition_key_is_bound( - "SELECT a FROM prepared_statement_test WHERE a = ?"); - should_infer_routing_information_when_partition_key_is_bound( - "INSERT INTO prepared_statement_test (a) VALUES (?)"); - should_infer_routing_information_when_partition_key_is_bound( - "UPDATE prepared_statement_test SET b = 1 WHERE a = ?"); - should_infer_routing_information_when_partition_key_is_bound( - "DELETE FROM prepared_statement_test WHERE a = ?"); - } - - private void should_infer_routing_information_when_partition_key_is_bound(String queryString) { - CqlSession session = sessionRule.session(); - TokenFactory tokenFactory = - ((DefaultTokenMap) session.getMetadata().getTokenMap().orElseThrow(AssertionError::new)) - .getTokenFactory(); - - // We'll bind a=1 in the query, check what token this is supposed to produce - Token expectedToken = - session - .execute("SELECT token(a) FROM prepared_statement_test WHERE a = 1") - .one() - .getToken(0); - - BoundStatement boundStatement = session.prepare(queryString).bind().setInt("a", 1); - - assertThat(boundStatement.getRoutingKeyspace()).isEqualTo(sessionRule.keyspace()); - assertThat(tokenFactory.hash(boundStatement.getRoutingKey())).isEqualTo(expectedToken); - } - - @Test - public void should_return_null_routing_information_when_single_partition_key_is_unbound() { - should_return_null_routing_information_when_single_partition_key_is_unbound( - "SELECT a FROM prepared_statement_test WHERE a = ?"); - should_return_null_routing_information_when_single_partition_key_is_unbound( - "INSERT INTO prepared_statement_test (a) VALUES (?)"); - should_return_null_routing_information_when_single_partition_key_is_unbound( - "UPDATE prepared_statement_test SET b = 1 WHERE a = ?"); - should_return_null_routing_information_when_single_partition_key_is_unbound( - "DELETE FROM prepared_statement_test WHERE a = ?"); - } - - private void should_return_null_routing_information_when_single_partition_key_is_unbound( - String queryString) { - CqlSession session = sessionRule.session(); - BoundStatement boundStatement = session.prepare(queryString).bind(); - assertThat(boundStatement.getRoutingKey()).isNull(); - } - - private static Iterable firstPageOf(CompletionStage stage) { - return CompletableFutures.getUninterruptibly(stage).currentPage(); - } - - private CqlSession sessionWithCacheSizeMetric() { - return SessionUtils.newSession( - ccmRule, - sessionRule.keyspace(), - SessionUtils.configLoaderBuilder() - .withInt(DefaultDriverOption.REQUEST_PAGE_SIZE, 2) - .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(30)) - .withStringList( - DefaultDriverOption.METRICS_SESSION_ENABLED, - ImmutableList.of(DefaultSessionMetric.CQL_PREPARED_CACHE_SIZE.getPath())) - .build()); - } - - @SuppressWarnings("unchecked") - private static long getPreparedCacheSize(CqlSession session) { - return session - .getMetrics() - .flatMap(metrics -> metrics.getSessionMetric(DefaultSessionMetric.CQL_PREPARED_CACHE_SIZE)) - .map(metric -> ((Gauge) metric).getValue()) - .orElseThrow( - () -> - new AssertionError( - "Could not access metric " - + DefaultSessionMetric.CQL_PREPARED_CACHE_SIZE.getPath())); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/QueryTraceIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/QueryTraceIT.java deleted file mode 100644 index 37a600efbc4..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/QueryTraceIT.java +++ /dev/null @@ -1,134 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.cql; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.catchThrowable; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.QueryTrace; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -public class QueryTraceIT { - - private static final CcmRule CCM_RULE = CcmRule.getInstance(); - - private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - @Test - public void should_not_have_tracing_id_when_tracing_disabled() { - ExecutionInfo executionInfo = - SESSION_RULE - .session() - .execute("SELECT release_version FROM system.local") - .getExecutionInfo(); - - assertThat(executionInfo.getTracingId()).isNull(); - - Throwable t = catchThrowable(executionInfo::getQueryTrace); - - assertThat(t) - .isInstanceOf(IllegalStateException.class) - .hasMessage("Tracing was disabled for this request"); - } - - @Test - public void should_fetch_trace_when_tracing_enabled() { - ExecutionInfo executionInfo = - SESSION_RULE - .session() - .execute( - SimpleStatement.builder("SELECT release_version FROM system.local") - .setTracing() - .build()) - .getExecutionInfo(); - - assertThat(executionInfo.getTracingId()).isNotNull(); - - EndPoint contactPoint = CCM_RULE.getContactPoints().iterator().next(); - InetAddress nodeAddress = ((InetSocketAddress) contactPoint.resolve()).getAddress(); - boolean expectPorts = - CCM_RULE.getCassandraVersion().nextStable().compareTo(Version.V4_0_0) >= 0 - && !CCM_RULE.isDistributionOf(BackendType.DSE); - - QueryTrace queryTrace = executionInfo.getQueryTrace(); - assertThat(queryTrace.getTracingId()).isEqualTo(executionInfo.getTracingId()); - assertThat(queryTrace.getRequestType()).isEqualTo("Execute CQL3 query"); - assertThat(queryTrace.getDurationMicros()).isPositive(); - assertThat(queryTrace.getCoordinatorAddress().getAddress()).isEqualTo(nodeAddress); - if (expectPorts) { - Row row = - SESSION_RULE - .session() - .execute( - "SELECT coordinator_port FROM system_traces.sessions WHERE session_id = " - + queryTrace.getTracingId()) - .one(); - assertThat(row).isNotNull(); - int expectedPort = row.getInt(0); - assertThat(queryTrace.getCoordinatorAddress().getPort()).isEqualTo(expectedPort); - } else { - assertThat(queryTrace.getCoordinatorAddress().getPort()).isEqualTo(0); - } - assertThat(queryTrace.getParameters()) - .containsEntry("consistency_level", "LOCAL_ONE") - .containsEntry("page_size", "5000") - .containsEntry("query", "SELECT release_version FROM system.local") - .containsEntry("serial_consistency_level", "SERIAL"); - assertThat(queryTrace.getStartedAt()).isPositive(); - // Don't want to get too deep into event testing because that could change across versions - assertThat(queryTrace.getEvents()).isNotEmpty(); - InetSocketAddress sourceAddress0 = queryTrace.getEvents().get(0).getSourceAddress(); - assertThat(sourceAddress0).isNotNull(); - assertThat(sourceAddress0.getAddress()).isEqualTo(nodeAddress); - if (expectPorts) { - Row row = - SESSION_RULE - .session() - .execute( - "SELECT source_port FROM system_traces.events WHERE session_id = " - + queryTrace.getTracingId() - + " LIMIT 1") - .one(); - assertThat(row).isNotNull(); - int expectedPort = row.getInt(0); - assertThat(sourceAddress0.getPort()).isEqualTo(expectedPort); - } else { - assertThat(sourceAddress0.getPort()).isEqualTo(0); - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/SimpleStatementCcmIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/SimpleStatementCcmIT.java deleted file mode 100644 index b903f59efcc..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/SimpleStatementCcmIT.java +++ /dev/null @@ -1,372 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.cql; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.servererrors.InvalidQueryException; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import java.util.List; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.TimeUnit; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Ignore; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestName; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -public class SimpleStatementCcmIT { - - private static final CcmRule CCM_RULE = CcmRule.getInstance(); - - private static final SessionRule SESSION_RULE = - SessionRule.builder(CCM_RULE) - .withConfigLoader( - SessionUtils.configLoaderBuilder() - .withInt(DefaultDriverOption.REQUEST_PAGE_SIZE, 20) - .build()) - .build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - @Rule public TestName name = new TestName(); - - private static final String KEY = "test"; - - @BeforeClass - public static void setupSchema() { - // table where every column forms the primary key. - SESSION_RULE - .session() - .execute( - SimpleStatement.builder( - "CREATE TABLE IF NOT EXISTS test (k text, v int, PRIMARY KEY(k, v))") - .setExecutionProfile(SESSION_RULE.slowProfile()) - .build()); - for (int i = 0; i < 100; i++) { - SESSION_RULE - .session() - .execute( - SimpleStatement.builder("INSERT INTO test (k, v) VALUES (?, ?)") - .addPositionalValues(KEY, i) - .build()); - } - - // table with simple primary key, single cell. - SESSION_RULE - .session() - .execute( - SimpleStatement.builder("CREATE TABLE IF NOT EXISTS test2 (k text primary key, v int)") - .setExecutionProfile(SESSION_RULE.slowProfile()) - .build()); - } - - @Test - public void should_use_paging_state_when_copied() { - Statement st = - SimpleStatement.builder(String.format("SELECT v FROM test WHERE k='%s'", KEY)).build(); - ResultSet result = SESSION_RULE.session().execute(st); - - // given a query created from a copy of a previous query with paging state from previous queries - // response. - st = st.copy(result.getExecutionInfo().getPagingState()); - - // when executing that query. - result = SESSION_RULE.session().execute(st); - - // then the response should start on the page boundary. - assertThat(result.iterator().next().getInt("v")).isEqualTo(20); - } - - @Test - public void should_use_paging_state_when_provided_to_new_statement() { - Statement st = - SimpleStatement.builder(String.format("SELECT v FROM test WHERE k='%s'", KEY)).build(); - ResultSet result = SESSION_RULE.session().execute(st); - - // given a query created from a copy of a previous query with paging state from previous queries - // response. - st = - SimpleStatement.builder(String.format("SELECT v FROM test where k='%s'", KEY)) - .setPagingState(result.getExecutionInfo().getPagingState()) - .build(); - - // when executing that query. - result = SESSION_RULE.session().execute(st); - - // then the response should start on the page boundary. - assertThat(result.iterator().next().getInt("v")).isEqualTo(20); - } - - @Test - @Ignore - public void should_fail_if_using_paging_state_from_different_query() { - Statement st = - SimpleStatement.builder("SELECT v FROM test WHERE k=:k").addNamedValue("k", KEY).build(); - ResultSet result = SESSION_RULE.session().execute(st); - - // TODO Expect PagingStateException - - // given a new different query and providing the paging state from the previous query - // then an exception should be thrown indicating incompatible paging state - SimpleStatement.builder("SELECT v FROM test") - .setPagingState(result.getExecutionInfo().getPagingState()) - .build(); - } - - @Test - public void should_use_timestamp_when_set() { - // given inserting data with a timestamp 40 days in the past. - long timestamp = System.currentTimeMillis() - TimeUnit.MILLISECONDS.convert(40, TimeUnit.DAYS); - SimpleStatement insert = - SimpleStatement.builder("INSERT INTO test2 (k, v) values (?, ?)") - .addPositionalValues(name.getMethodName(), 0) - .setQueryTimestamp(timestamp) - .build(); - - SESSION_RULE.session().execute(insert); - - // when retrieving writetime of cell from that insert. - SimpleStatement select = - SimpleStatement.builder("SELECT writetime(v) as wv from test2 where k = ?") - .addPositionalValue(name.getMethodName()) - .build(); - - ResultSet result = SESSION_RULE.session().execute(select); - List rows = result.all(); - assertThat(rows).hasSize(1); - - // then the writetime should equal the timestamp provided. - Row row = rows.iterator().next(); - assertThat(row.getLong("wv")).isEqualTo(timestamp); - } - - @Test - @Ignore - public void should_use_tracing_when_set() { - // TODO currently there's no way to validate tracing was set since trace id is not set - // also write test to verify it is not set. - SESSION_RULE - .session() - .execute(SimpleStatement.builder("select * from test").setTracing().build()); - } - - @Test - public void should_use_positional_values() { - // given a statement with positional values - SimpleStatement insert = - SimpleStatement.builder("INSERT into test2 (k, v) values (?, ?)") - .addPositionalValue(name.getMethodName()) - .addPositionalValue(4) - .build(); - - // when executing that statement - SESSION_RULE.session().execute(insert); - - // then we should be able to retrieve the data as inserted. - SimpleStatement select = - SimpleStatement.builder("select k,v from test2 where k=?") - .addPositionalValue(name.getMethodName()) - .build(); - - ResultSet result = SESSION_RULE.session().execute(select); - List rows = result.all(); - assertThat(rows).hasSize(1); - - Row row = rows.iterator().next(); - assertThat(row.getString("k")).isEqualTo(name.getMethodName()); - assertThat(row.getInt("v")).isEqualTo(4); - } - - @Test - public void should_allow_nulls_in_positional_values() { - // given a statement with positional values - SimpleStatement insert = - SimpleStatement.builder("INSERT into test2 (k, v) values (?, ?)") - .addPositionalValue(name.getMethodName()) - .addPositionalValue(null) - .build(); - - // when executing that statement - SESSION_RULE.session().execute(insert); - - // then we should be able to retrieve the data as inserted. - SimpleStatement select = - SimpleStatement.builder("select k,v from test2 where k=?") - .addPositionalValue(name.getMethodName()) - .build(); - - ResultSet result = SESSION_RULE.session().execute(select); - List rows = result.all(); - assertThat(rows).hasSize(1); - - Row row = rows.iterator().next(); - assertThat(row.getString("k")).isEqualTo(name.getMethodName()); - assertThat(row.getObject("v")).isNull(); - } - - @Test(expected = InvalidQueryException.class) - public void should_fail_when_too_many_positional_values_provided() { - // given a statement with more bound values than anticipated (3 given vs. 2 expected) - SimpleStatement insert = - SimpleStatement.builder("INSERT into test (k, v) values (?, ?)") - .addPositionalValues(KEY, 0, 7) - .build(); - - // when executing that statement - SESSION_RULE.session().execute(insert); - - // then the server will throw an InvalidQueryException which is thrown up to the client. - } - - @Test(expected = InvalidQueryException.class) - public void should_fail_when_not_enough_positional_values_provided() { - // given a statement with not enough bound values (1 given vs. 2 expected) - SimpleStatement insert = - SimpleStatement.builder("SELECT * from test where k = ? and v = ?") - .addPositionalValue(KEY) - .build(); - - // when executing that statement - SESSION_RULE.session().execute(insert); - - // then the server will throw an InvalidQueryException which is thrown up to the client. - } - - @Test - public void should_use_named_values() { - // given a statement with named values - SimpleStatement insert = - SimpleStatement.builder("INSERT into test2 (k, v) values (:k, :v)") - .addNamedValue("k", name.getMethodName()) - .addNamedValue("v", 7) - .build(); - - // when executing that statement - SESSION_RULE.session().execute(insert); - - // then we should be able to retrieve the data as inserted. - SimpleStatement select = - SimpleStatement.builder("select k,v from test2 where k=:k") - .addNamedValue("k", name.getMethodName()) - .build(); - - ResultSet result = SESSION_RULE.session().execute(select); - List rows = result.all(); - assertThat(rows).hasSize(1); - - Row row = rows.iterator().next(); - assertThat(row.getString("k")).isEqualTo(name.getMethodName()); - assertThat(row.getInt("v")).isEqualTo(7); - } - - @Test - public void should_allow_nulls_in_named_values() { - // given a statement with named values - SimpleStatement insert = - SimpleStatement.builder("INSERT into test2 (k, v) values (:k, :v)") - .addNamedValue("k", name.getMethodName()) - .addNamedValue("v", null) - .build(); - - // when executing that statement - SESSION_RULE.session().execute(insert); - - // then we should be able to retrieve the data as inserted. - SimpleStatement select = - SimpleStatement.builder("select k,v from test2 where k=:k") - .addNamedValue("k", name.getMethodName()) - .build(); - - ResultSet result = SESSION_RULE.session().execute(select); - List rows = result.all(); - assertThat(rows).hasSize(1); - - Row row = rows.iterator().next(); - assertThat(row.getString("k")).isEqualTo(name.getMethodName()); - assertThat(row.getObject("v")).isNull(); - } - - @Test(expected = InvalidQueryException.class) - public void should_fail_when_named_value_missing() { - // given a statement with a missing named value (:k) - SimpleStatement insert = - SimpleStatement.builder("SELECT * from test where k = :k and v = :v") - .addNamedValue("v", 0) - .build(); - - // when executing that statement - SESSION_RULE.session().execute(insert); - - // then the server will throw an InvalidQueryException which is thrown up to the client. - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_when_mixing_named_and_positional_values() { - SimpleStatement.builder("SELECT * from test where k = :k and v = :v") - .addNamedValue("k", KEY) - .addPositionalValue(0) - .build(); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_when_mixing_positional_and_named_values() { - SimpleStatement.builder("SELECT * from test where k = :k and v = :v") - .addPositionalValue(0) - .addNamedValue("k", KEY) - .build(); - } - - @Test - public void should_use_positional_value_with_case_sensitive_id() { - SimpleStatement statement = - SimpleStatement.builder("SELECT count(*) FROM test2 WHERE k=:\"theKey\"") - .addNamedValue(CqlIdentifier.fromCql("\"theKey\""), 0) - .build(); - Row row = SESSION_RULE.session().execute(statement).one(); - assertThat(row.getLong(0)).isEqualTo(0); - } - - @Test - public void should_use_page_size() { - Statement st = SimpleStatement.builder("SELECT v FROM test").setPageSize(10).build(); - CompletionStage future = SESSION_RULE.session().executeAsync(st); - AsyncResultSet result = CompletableFutures.getUninterruptibly(future); - - // Should have only fetched 10 (page size) rows. - assertThat(result.remaining()).isEqualTo(10); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/SimpleStatementSimulacronIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/SimpleStatementSimulacronIT.java deleted file mode 100644 index bb8b4f6b731..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/SimpleStatementSimulacronIT.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.cql; - -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.noRows; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.when; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.catchThrowable; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.DefaultConsistencyLevel; -import com.datastax.oss.driver.api.core.DriverTimeoutException; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.request.Query; -import com.datastax.oss.simulacron.common.cluster.ClusterSpec; -import com.datastax.oss.simulacron.common.cluster.QueryLog; -import java.time.Duration; -import java.util.List; -import java.util.concurrent.TimeUnit; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -public class SimpleStatementSimulacronIT { - - private static final SimulacronRule SIMULACRON_RULE = - new SimulacronRule(ClusterSpec.builder().withNodes(1)); - - private static final SessionRule SESSION_RULE = - SessionRule.builder(SIMULACRON_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(SIMULACRON_RULE).around(SESSION_RULE); - - @Before - public void clearPrimes() { - SIMULACRON_RULE.cluster().clearLogs(); - SIMULACRON_RULE.cluster().clearPrimes(true); - } - - @Test - public void should_use_consistencies() { - SimpleStatement st = - SimpleStatement.builder("SELECT * FROM test where k = ?") - .setConsistencyLevel(DefaultConsistencyLevel.TWO) - .setSerialConsistencyLevel(DefaultConsistencyLevel.LOCAL_SERIAL) - .build(); - SESSION_RULE.session().execute(st); - - List logs = SIMULACRON_RULE.cluster().getLogs().getQueryLogs(); - assertThat(logs).hasSize(1); - - QueryLog log = logs.get(0); - - Message message = log.getFrame().message; - assertThat(message).isInstanceOf(Query.class); - Query query = (Query) message; - assertThat(query.options.consistency).isEqualTo(DefaultConsistencyLevel.TWO.getProtocolCode()); - assertThat(query.options.serialConsistency) - .isEqualTo(DefaultConsistencyLevel.LOCAL_SERIAL.getProtocolCode()); - } - - @Test - public void should_use_timeout() { - SIMULACRON_RULE - .cluster() - .prime(when("mock query").then(noRows()).delay(1500, TimeUnit.MILLISECONDS)); - SimpleStatement st = - SimpleStatement.builder("mock query") - .setTimeout(Duration.ofSeconds(1)) - .setConsistencyLevel(DefaultConsistencyLevel.ONE) - .build(); - - Throwable t = catchThrowable(() -> SESSION_RULE.session().execute(st)); - - assertThat(t) - .isInstanceOf(DriverTimeoutException.class) - .hasMessage("Query timed out after PT1S"); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/reactive/DefaultReactiveResultSetIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/reactive/DefaultReactiveResultSetIT.java deleted file mode 100644 index c00cf064e51..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/reactive/DefaultReactiveResultSetIT.java +++ /dev/null @@ -1,316 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.cql.reactive; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveResultSet; -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveRow; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.BatchStatement; -import com.datastax.oss.driver.api.core.cql.BatchStatementBuilder; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.DefaultBatchType; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.ccm.SchemaChangeSynchronizer; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.internal.core.cql.EmptyColumnDefinitions; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import edu.umd.cs.findbugs.annotations.NonNull; -import io.reactivex.Flowable; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Set; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -@Category(ParallelizableTests.class) -public class DefaultReactiveResultSetIT { - - private static CcmRule ccmRule = CcmRule.getInstance(); - - private static SessionRule sessionRule = SessionRule.builder(ccmRule).build(); - - @ClassRule public static TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); - - @BeforeClass - public static void initialize() { - CqlSession session = sessionRule.session(); - SchemaChangeSynchronizer.withLock( - () -> { - session.execute("DROP TABLE IF EXISTS test_reactive_read"); - session.execute("DROP TABLE IF EXISTS test_reactive_write"); - session.checkSchemaAgreement(); - session.execute( - SimpleStatement.builder( - "CREATE TABLE test_reactive_read (pk int, cc int, v int, PRIMARY KEY ((pk), cc))") - .setExecutionProfile(sessionRule.slowProfile()) - .build()); - session.execute( - SimpleStatement.builder( - "CREATE TABLE test_reactive_write (pk int, cc int, v int, PRIMARY KEY ((pk), cc))") - .setExecutionProfile(sessionRule.slowProfile()) - .build()); - session.checkSchemaAgreement(); - }); - for (int i = 0; i < 1000; i++) { - session.execute( - SimpleStatement.builder("INSERT INTO test_reactive_read (pk, cc, v) VALUES (0, ?, ?)") - .addPositionalValue(i) - .addPositionalValue(i) - .setExecutionProfile(sessionRule.slowProfile()) - .build()); - } - } - - @Before - public void truncateTables() throws Exception { - CqlSession session = sessionRule.session(); - session.execute( - SimpleStatement.builder("TRUNCATE test_reactive_write") - .setExecutionProfile(sessionRule.slowProfile()) - .build()); - } - - @Test - @DataProvider( - value = {"1", "10", "100", "999", "1000", "1001", "2000"}, - format = "%m [page size %p[0]]") - public void should_retrieve_all_rows(int pageSize) { - DriverExecutionProfile profile = - sessionRule - .session() - .getContext() - .getConfig() - .getDefaultProfile() - .withInt(DefaultDriverOption.REQUEST_PAGE_SIZE, pageSize); - SimpleStatement statement = - SimpleStatement.builder("SELECT cc, v FROM test_reactive_read WHERE pk = 0") - .setExecutionProfile(profile) - .build(); - ReactiveResultSet rs = sessionRule.session().executeReactive(statement); - List results = Flowable.fromPublisher(rs).toList().blockingGet(); - assertThat(results.size()).isEqualTo(1000); - Set expectedExecInfos = new LinkedHashSet<>(); - for (int i = 0; i < results.size(); i++) { - ReactiveRow row = results.get(i); - assertThat(row.getColumnDefinitions()).isNotNull(); - assertThat(row.getExecutionInfo()).isNotNull(); - assertThat(row.wasApplied()).isTrue(); - assertThat(row.getInt("cc")).isEqualTo(i); - assertThat(row.getInt("v")).isEqualTo(i); - expectedExecInfos.add(row.getExecutionInfo()); - } - - List execInfos = - Flowable.fromPublisher(rs.getExecutionInfos()).toList().blockingGet(); - // DSE may send an empty page as it can't always know if it's done paging or not yet. - // See: CASSANDRA-8871. In this case, this page's execution info appears in - // rs.getExecutionInfos(), but is not present in expectedExecInfos since the page did not - // contain any rows. - assertThat(execInfos).containsAll(expectedExecInfos); - - List colDefs = - Flowable.fromPublisher(rs.getColumnDefinitions()).toList().blockingGet(); - ReactiveRow first = results.get(0); - assertThat(colDefs).hasSize(1).containsExactly(first.getColumnDefinitions()); - - List wasApplied = Flowable.fromPublisher(rs.wasApplied()).toList().blockingGet(); - assertThat(wasApplied).hasSize(1).containsExactly(first.wasApplied()); - } - - @Test - public void should_write() { - SimpleStatement statement = - SimpleStatement.builder("INSERT INTO test_reactive_write (pk, cc, v) VALUES (?, ?, ?)") - .addPositionalValue(0) - .addPositionalValue(1) - .addPositionalValue(2) - .setExecutionProfile(sessionRule.slowProfile()) - .build(); - ReactiveResultSet rs = sessionRule.session().executeReactive(statement); - List results = Flowable.fromPublisher(rs).toList().blockingGet(); - assertThat(results).isEmpty(); - - List execInfos = - Flowable.fromPublisher(rs.getExecutionInfos()).toList().blockingGet(); - assertThat(execInfos).hasSize(1); - - List colDefs = - Flowable.fromPublisher(rs.getColumnDefinitions()).toList().blockingGet(); - assertThat(colDefs).hasSize(1).containsExactly(EmptyColumnDefinitions.INSTANCE); - - List wasApplied = Flowable.fromPublisher(rs.wasApplied()).toList().blockingGet(); - assertThat(wasApplied).hasSize(1).containsExactly(true); - } - - @Test - public void should_write_cas() { - SimpleStatement statement = - SimpleStatement.builder( - "INSERT INTO test_reactive_write (pk, cc, v) VALUES (?, ?, ?) IF NOT EXISTS") - .addPositionalValue(0) - .addPositionalValue(1) - .addPositionalValue(2) - .setExecutionProfile(sessionRule.slowProfile()) - .build(); - // execute statement for the first time: the insert should succeed and the server should return - // only one acknowledgement row with just the [applied] column = true - ReactiveResultSet rs = sessionRule.session().executeReactive(statement); - List results = Flowable.fromPublisher(rs).toList().blockingGet(); - assertThat(results).hasSize(1); - ReactiveRow row = results.get(0); - assertThat(row.getExecutionInfo()).isNotNull(); - assertThat(row.getColumnDefinitions()).hasSize(1); - assertThat(row.wasApplied()).isTrue(); - assertThat(row.getBoolean("[applied]")).isTrue(); - - List execInfos = - Flowable.fromPublisher(rs.getExecutionInfos()).toList().blockingGet(); - assertThat(execInfos).hasSize(1).containsExactly(row.getExecutionInfo()); - - List colDefs = - Flowable.fromPublisher(rs.getColumnDefinitions()).toList().blockingGet(); - assertThat(colDefs).hasSize(1).containsExactly(row.getColumnDefinitions()); - - List wasApplied = Flowable.fromPublisher(rs.wasApplied()).toList().blockingGet(); - assertThat(wasApplied).hasSize(1).containsExactly(row.wasApplied()); - - // re-execute same statement: server should return one row with data that failed to be inserted, - // with [applied] = false - rs = sessionRule.session().executeReactive(statement); - results = Flowable.fromPublisher(rs).toList().blockingGet(); - assertThat(results).hasSize(1); - row = results.get(0); - assertThat(row.getExecutionInfo()).isNotNull(); - assertThat(row.getColumnDefinitions()).hasSize(4); - assertThat(row.wasApplied()).isFalse(); - assertThat(row.getBoolean("[applied]")).isFalse(); - assertThat(row.getInt("pk")).isEqualTo(0); - assertThat(row.getInt("cc")).isEqualTo(1); - assertThat(row.getInt("v")).isEqualTo(2); - - execInfos = - Flowable.fromPublisher(rs.getExecutionInfos()).toList().blockingGet(); - assertThat(execInfos).hasSize(1).containsExactly(row.getExecutionInfo()); - - colDefs = - Flowable.fromPublisher(rs.getColumnDefinitions()).toList().blockingGet(); - assertThat(colDefs).hasSize(1).containsExactly(row.getColumnDefinitions()); - - wasApplied = Flowable.fromPublisher(rs.wasApplied()).toList().blockingGet(); - assertThat(wasApplied).hasSize(1).containsExactly(row.wasApplied()); - } - - @Test - public void should_write_batch_cas() { - BatchStatement batch = createCASBatch(); - CqlSession session = sessionRule.session(); - // execute batch for the first time: all inserts should succeed and the server should return - // only one acknowledgement row with just the [applied] column = true - ReactiveResultSet rs = session.executeReactive(batch); - List results = Flowable.fromPublisher(rs).toList().blockingGet(); - assertThat(results).hasSize(1); - ReactiveRow row = results.get(0); - assertThat(row.getExecutionInfo()).isNotNull(); - assertThat(row.getColumnDefinitions()).hasSize(1); - assertThat(row.wasApplied()).isTrue(); - assertThat(row.getBoolean("[applied]")).isTrue(); - - List execInfos = - Flowable.fromPublisher(rs.getExecutionInfos()).toList().blockingGet(); - assertThat(execInfos).hasSize(1).containsExactly(row.getExecutionInfo()); - - List colDefs = - Flowable.fromPublisher(rs.getColumnDefinitions()).toList().blockingGet(); - assertThat(colDefs).hasSize(1).containsExactly(row.getColumnDefinitions()); - - List wasApplied = Flowable.fromPublisher(rs.wasApplied()).toList().blockingGet(); - assertThat(wasApplied).hasSize(1).containsExactly(row.wasApplied()); - - // delete 5 out of 10 rows - partiallyDeleteInsertedRows(); - - // re-execute same statement: server should return 5 rows for the 5 failed inserts, each one - // with [applied] = false - rs = session.executeReactive(batch); - results = Flowable.fromPublisher(rs).toList().blockingGet(); - assertThat(results).hasSize(5); - for (int i = 0; i < 5; i++) { - row = results.get(i); - assertThat(row.getExecutionInfo()).isNotNull(); - assertThat(row.getColumnDefinitions()).hasSize(4); - assertThat(row.wasApplied()).isFalse(); - assertThat(row.getBoolean("[applied]")).isFalse(); - assertThat(row.getInt("pk")).isEqualTo(0); - assertThat(row.getInt("cc")).isEqualTo(i); - assertThat(row.getInt("v")).isEqualTo(i + 1); - } - - execInfos = - Flowable.fromPublisher(rs.getExecutionInfos()).toList().blockingGet(); - assertThat(execInfos).hasSize(1).containsExactly(row.getExecutionInfo()); - - colDefs = - Flowable.fromPublisher(rs.getColumnDefinitions()).toList().blockingGet(); - assertThat(colDefs).hasSize(1).containsExactly(row.getColumnDefinitions()); - - wasApplied = Flowable.fromPublisher(rs.wasApplied()).toList().blockingGet(); - assertThat(wasApplied).hasSize(1).containsExactly(row.wasApplied()); - } - - @NonNull - private static BatchStatement createCASBatch() { - // Build a batch with CAS operations on the same partition (conditional batch updates cannot - // span multiple partitions). - BatchStatementBuilder builder = BatchStatement.builder(DefaultBatchType.UNLOGGED); - SimpleStatement insert = - SimpleStatement.builder( - "INSERT INTO test_reactive_write (pk, cc, v) VALUES (0, ?, ?) IF NOT EXISTS") - .setExecutionProfile(sessionRule.slowProfile()) - .build(); - PreparedStatement preparedStatement = sessionRule.session().prepare(insert); - for (int i = 0; i < 10; i++) { - builder.addStatement(preparedStatement.bind(i, i + 1)); - } - return builder.build(); - } - - private static void partiallyDeleteInsertedRows() { - CqlSession session = sessionRule.session(); - session.execute(" DELETE FROM test_reactive_write WHERE pk = 0 and cc = 5"); - session.execute(" DELETE FROM test_reactive_write WHERE pk = 0 and cc = 6"); - session.execute(" DELETE FROM test_reactive_write WHERE pk = 0 and cc = 7"); - session.execute(" DELETE FROM test_reactive_write WHERE pk = 0 and cc = 8"); - session.execute(" DELETE FROM test_reactive_write WHERE pk = 0 and cc = 9"); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/reactive/ReactiveRetryIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/reactive/ReactiveRetryIT.java deleted file mode 100644 index e59c29f4262..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/reactive/ReactiveRetryIT.java +++ /dev/null @@ -1,204 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.cql.reactive; - -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.rows; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.unavailable; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.when; -import static org.assertj.core.api.Assertions.assertThat; -import static org.junit.Assert.fail; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.verify; -import static org.mockito.internal.verification.VerificationModeFactory.times; - -import com.codahale.metrics.Metric; -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveRow; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; -import com.datastax.oss.driver.api.core.metrics.Metrics; -import com.datastax.oss.driver.api.core.servererrors.UnavailableException; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.testinfra.loadbalancing.NodeComparator; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.core.retry.PerProfileRetryPolicyIT.NoRetryPolicy; -import com.datastax.oss.simulacron.common.cluster.ClusterSpec; -import com.datastax.oss.simulacron.common.codec.ConsistencyLevel; -import com.datastax.oss.simulacron.server.BoundCluster; -import com.google.common.collect.Iterables; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import io.reactivex.Flowable; -import java.util.ArrayDeque; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Queue; -import java.util.TreeSet; -import java.util.UUID; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -/** Small test to validate the application-level retry behavior explained in the manual. */ -@Category(ParallelizableTests.class) -public class ReactiveRetryIT { - - private static final SimulacronRule SIMULACRON_RULE = - new SimulacronRule(ClusterSpec.builder().withNodes(3)); - - private static final SessionRule SESSION_RULE = - SessionRule.builder(SIMULACRON_RULE) - .withConfigLoader( - SessionUtils.configLoaderBuilder() - .withBoolean(DefaultDriverOption.REQUEST_DEFAULT_IDEMPOTENCE, true) - .withClass( - DefaultDriverOption.LOAD_BALANCING_POLICY_CLASS, - CyclingLoadBalancingPolicy.class) - .withClass(DefaultDriverOption.RETRY_POLICY_CLASS, NoRetryPolicy.class) - .withStringList( - DefaultDriverOption.METRICS_NODE_ENABLED, - Collections.singletonList("errors.request.unavailables")) - .build()) - .build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(SIMULACRON_RULE).around(SESSION_RULE); - - private static final String QUERY_STRING = "select * from foo"; - - private List nodes; - - @Before - public void clearPrimes() { - SIMULACRON_RULE.cluster().clearLogs(); - SIMULACRON_RULE.cluster().clearPrimes(true); - } - - @Before - public void createNodesList() { - nodes = new ArrayList<>(SESSION_RULE.session().getMetadata().getNodes().values()); - nodes.sort(NodeComparator.INSTANCE); - } - - @Test - public void should_retry_at_application_level() { - // Given - CqlSession session = spy(SESSION_RULE.session()); - BoundCluster cluster = SIMULACRON_RULE.cluster(); - cluster.node(0).prime(when(QUERY_STRING).then(unavailable(ConsistencyLevel.ONE, 1, 0))); - cluster.node(1).prime(when(QUERY_STRING).then(unavailable(ConsistencyLevel.ONE, 1, 0))); - cluster.node(2).prime(when(QUERY_STRING).then(rows().row("col1", "Yay!"))); - - // When - ReactiveRow row = - Flowable.defer(() -> session.executeReactive(QUERY_STRING)) - .retry( - (retry, error) -> { - assertThat(error).isInstanceOf(UnavailableException.class); - UnavailableException ue = (UnavailableException) error; - Node coordinator = ue.getCoordinator(); - if (retry == 1) { - assertCoordinator(0, coordinator); - return true; - } else if (retry == 2) { - assertCoordinator(1, coordinator); - return true; - } else { - fail("Unexpected retry attempt"); - return false; - } - }) - .blockingLast(); - - // Then - assertThat(row.getString(0)).isEqualTo("Yay!"); - verify(session, times(3)).executeReactive(QUERY_STRING); - assertUnavailableMetric(0, 1L); - assertUnavailableMetric(1, 1L); - assertUnavailableMetric(2, 0L); - } - - private void assertCoordinator(int expectedNodeIndex, Node actual) { - Node expected = nodes.get(expectedNodeIndex); - assertThat(actual).isSameAs(expected); - } - - private void assertUnavailableMetric(int nodeIndex, long expectedUnavailableCount) { - Metrics metrics = SESSION_RULE.session().getMetrics().orElseThrow(AssertionError::new); - Node node = nodes.get(nodeIndex); - Optional expectedMetric = metrics.getNodeMetric(node, DefaultNodeMetric.UNAVAILABLES); - assertThat(expectedMetric) - .isPresent() - .hasValueSatisfying( - metric -> assertThat(metric).extracting("count").isEqualTo(expectedUnavailableCount)); - } - - public static class CyclingLoadBalancingPolicy implements LoadBalancingPolicy { - - private final TreeSet nodes = new TreeSet<>(NodeComparator.INSTANCE); - private volatile Iterator iterator = Iterables.cycle(nodes).iterator(); - - @SuppressWarnings("unused") - public CyclingLoadBalancingPolicy(DriverContext context, String profileName) { - // constructor needed for loading via config. - } - - @Override - public void init(@NonNull Map nodes, @NonNull DistanceReporter distanceReporter) { - this.nodes.addAll(nodes.values()); - this.nodes.forEach(n -> distanceReporter.setDistance(n, NodeDistance.LOCAL)); - iterator = Iterables.cycle(this.nodes).iterator(); - } - - @NonNull - @Override - public Queue newQueryPlan(@Nullable Request request, @Nullable Session session) { - return new ArrayDeque<>(Collections.singleton(iterator.next())); - } - - @Override - public void onAdd(@NonNull Node node) {} - - @Override - public void onUp(@NonNull Node node) {} - - @Override - public void onDown(@NonNull Node node) {} - - @Override - public void onRemove(@NonNull Node node) {} - - @Override - public void close() {} - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/data/DataTypeIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/data/DataTypeIT.java deleted file mode 100644 index e3d891454de..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/data/DataTypeIT.java +++ /dev/null @@ -1,828 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.data; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.junit.Assert.fail; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.BoundStatementBuilder; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.data.CqlDuration; -import com.datastax.oss.driver.api.core.data.CqlVector; -import com.datastax.oss.driver.api.core.data.SettableByIndex; -import com.datastax.oss.driver.api.core.data.SettableByName; -import com.datastax.oss.driver.api.core.data.TupleValue; -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.core.type.CustomType; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.ListType; -import com.datastax.oss.driver.api.core.type.MapType; -import com.datastax.oss.driver.api.core.type.SetType; -import com.datastax.oss.driver.api.core.type.TupleType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.internal.core.type.DefaultListType; -import com.datastax.oss.driver.internal.core.type.DefaultMapType; -import com.datastax.oss.driver.internal.core.type.DefaultSetType; -import com.datastax.oss.driver.internal.core.type.DefaultTupleType; -import com.datastax.oss.driver.internal.core.type.DefaultUserDefinedType; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import com.datastax.oss.protocol.internal.util.Bytes; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.math.BigDecimal; -import java.math.BigInteger; -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.nio.ByteBuffer; -import java.time.Instant; -import java.time.LocalDate; -import java.time.LocalTime; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.stream.Collectors; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestName; -import org.junit.rules.TestRule; -import org.junit.runner.RunWith; - -@Category(ParallelizableTests.class) -@RunWith(DataProviderRunner.class) -public class DataTypeIT { - private static final CcmRule CCM_RULE = CcmRule.getInstance(); - - private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - @Rule public TestName name = new TestName(); - - private static final Map typeToColumnName = new HashMap<>(); - - private static final AtomicInteger typeCounter = new AtomicInteger(); - - private static final Map, String> userTypeToTypeName = new HashMap<>(); - - private static final AtomicInteger keyCounter = new AtomicInteger(); - - private static final String tableName = "data_types_it"; - - @DataProvider - public static Object[][] primitiveTypeSamples() { - InetAddress address; - try { - address = InetAddress.getByAddress(new byte[] {127, 0, 0, 1}); - } catch (UnknownHostException uhae) { - throw new AssertionError("Could not get address from 127.0.0.1", uhae); - } - - Object[][] samples = - new Object[][] { - new Object[] {DataTypes.ASCII, "ascii"}, - new Object[] {DataTypes.BIGINT, Long.MAX_VALUE}, - new Object[] {DataTypes.BIGINT, null, 0L}, - new Object[] {DataTypes.BLOB, Bytes.fromHexString("0xCAFE")}, - new Object[] {DataTypes.BOOLEAN, Boolean.TRUE}, - new Object[] {DataTypes.BOOLEAN, null, false}, - new Object[] {DataTypes.DECIMAL, new BigDecimal("12.3E+7")}, - new Object[] {DataTypes.DOUBLE, Double.MAX_VALUE}, - new Object[] {DataTypes.DOUBLE, null, 0.0}, - new Object[] {DataTypes.FLOAT, Float.MAX_VALUE}, - new Object[] {DataTypes.FLOAT, null, 0.0f}, - new Object[] {DataTypes.INET, address}, - new Object[] {DataTypes.TINYINT, Byte.MAX_VALUE}, - new Object[] {DataTypes.TINYINT, null, (byte) 0}, - new Object[] {DataTypes.SMALLINT, Short.MAX_VALUE}, - new Object[] {DataTypes.SMALLINT, null, (short) 0}, - new Object[] {DataTypes.INT, Integer.MAX_VALUE}, - new Object[] {DataTypes.INT, null, 0}, - new Object[] {DataTypes.DURATION, CqlDuration.from("PT30H20M")}, - new Object[] {DataTypes.TEXT, "text"}, - new Object[] {DataTypes.TIMESTAMP, Instant.ofEpochMilli(872835240000L)}, - new Object[] {DataTypes.DATE, LocalDate.ofEpochDay(16071)}, - new Object[] {DataTypes.TIME, LocalTime.ofNanoOfDay(54012123450000L)}, - new Object[] { - DataTypes.TIMEUUID, UUID.fromString("FE2B4360-28C6-11E2-81C1-0800200C9A66") - }, - new Object[] {DataTypes.UUID, UUID.fromString("067e6162-3b6f-4ae2-a171-2470b63dff00")}, - new Object[] { - DataTypes.VARINT, new BigInteger(Integer.toString(Integer.MAX_VALUE) + "000") - } - }; - - Version version = CCM_RULE.getCassandraVersion(); - // Filter types if they aren't supported by cassandra version in use. - return Arrays.stream(samples) - .filter( - o -> { - DataType dataType = (DataType) o[0]; - if (dataType == DataTypes.DURATION) { - return version.compareTo(Version.parse("3.10")) >= 0; - } else if (dataType == DataTypes.TINYINT - || dataType == DataTypes.SMALLINT - || dataType == DataTypes.DATE - || dataType == DataTypes.TIME) { - return version.compareTo(Version.V2_2_0) >= 0; - } - return true; - }) - .toArray(Object[][]::new); - } - - @DataProvider - @SuppressWarnings("unchecked") - public static Object[][] typeSamples() { - Object[][] primitiveSamples = primitiveTypeSamples(); - - // Build additional data samples from primitive type samples. For each sample: - // 1) include the sample itself. - // 2) include list. - // 3) include set. - // 4) include map - // 5) include map - // 6) include tuple - // 7) include udt - // 8) include vector - return Arrays.stream(primitiveSamples) - .flatMap( - o -> { - List samples = new ArrayList<>(); - samples.add(o); - - if (o[1] == null) { - // Don't use null values in collections. - return samples.stream(); - } - - DataType dataType = (DataType) o[0]; - - // list of type. - ListType listType = new DefaultListType((DataType) o[0], false); - List data = Collections.singletonList(o[1]); - samples.add(new Object[] {listType, data}); - - // set of type. - if (dataType != DataTypes.DURATION) { - // durations can't be in sets. - SetType setType = new DefaultSetType((DataType) o[0], false); - Set s = Collections.singleton(o[1]); - samples.add(new Object[] {setType, s}); - } - - // map of int, type. - MapType mapOfTypeElement = new DefaultMapType(DataTypes.INT, (DataType) o[0], false); - Map mElement = new HashMap(); - mElement.put(0, o[1]); - samples.add(new Object[] {mapOfTypeElement, mElement}); - - // map of type, int. - if (dataType != DataTypes.DURATION) { - // durations can't be map keys. - MapType mapOfTypeKey = new DefaultMapType((DataType) o[0], DataTypes.INT, false); - Map mKey = new HashMap(); - mKey.put(o[1], 0); - samples.add(new Object[] {mapOfTypeKey, mKey}); - } - - // tuple of int, type. - List types = new ArrayList<>(); - types.add(DataTypes.INT); - types.add(dataType); - TupleType tupleType = new DefaultTupleType(types); - TupleValue tupleValue = tupleType.newValue(); - tupleValue = tupleValue.setInt(0, 0); - setValue(1, tupleValue, dataType, o[1]); - samples.add(new Object[] {tupleType, tupleValue}); - - // tuple of int, type, created using newValue - TupleValue tupleValue2 = tupleType.newValue(1, o[1]); - samples.add(new Object[] {tupleType, tupleValue2}); - - // udt of int, type. - final AtomicInteger fieldNameCounter = new AtomicInteger(); - List typeNames = - types.stream() - .map( - n -> CqlIdentifier.fromCql("field_" + fieldNameCounter.incrementAndGet())) - .collect(Collectors.toList()); - - UserDefinedType udt = - new DefaultUserDefinedType( - SESSION_RULE.keyspace(), - CqlIdentifier.fromCql(userTypeFor(types)), - false, - typeNames, - types); - - UdtValue udtValue = udt.newValue(); - udtValue = udtValue.setInt(0, 0); - setValue(1, udtValue, dataType, o[1]); - samples.add(new Object[] {udt, udtValue}); - - // udt of int, type, created using newValue - UdtValue udtValue2 = udt.newValue(1, o[1]); - samples.add(new Object[] {udt, udtValue2}); - - if (CCM_RULE.getCassandraVersion().compareTo(Version.parse("5.0")) >= 0) { - // vector of type - CqlVector vector = CqlVector.newInstance(o[1]); - samples.add(new Object[] {DataTypes.vectorOf(dataType, 1), vector}); - } - - return samples.stream(); - }) - .toArray(Object[][]::new); - } - - @DataProvider - public static Object[][] addVectors() { - Object[][] previousSamples = typeSamples(); - if (CCM_RULE.getCassandraVersion().compareTo(Version.parse("5.0")) < 0) return previousSamples; - return Arrays.stream(previousSamples) - .flatMap( - o -> { - List samples = new ArrayList<>(); - samples.add(o); - if (o[1] == null) return samples.stream(); - DataType dataType = (DataType) o[0]; - CqlVector vector = CqlVector.newInstance(o[1]); - samples.add(new Object[] {DataTypes.vectorOf(dataType, 1), vector}); - return samples.stream(); - }) - .toArray(Object[][]::new); - } - - @BeforeClass - public static void createTable() { - // Create a table with all types being tested with. - // This is a bit more lenient than creating a table for each sample, which would put a lot of - // burden on C* and - // the filesystem. - int counter = 0; - - List columnData = new ArrayList<>(); - - for (Object[] sample : addVectors()) { - DataType dataType = (DataType) sample[0]; - - if (!typeToColumnName.containsKey(dataType)) { - int columnIndex = ++counter; - String columnName = "column_" + columnIndex; - typeToColumnName.put(dataType, columnName); - columnData.add(String.format("%s %s", columnName, typeFor(dataType))); - } - } - - SESSION_RULE - .session() - .execute( - SimpleStatement.builder( - String.format( - "CREATE TABLE IF NOT EXISTS %s (k int primary key, %s)", - tableName, String.join(",", columnData))) - .setExecutionProfile(SESSION_RULE.slowProfile()) - .build()); - } - - private static String columnNameFor(DataType dataType) { - return typeToColumnName.get(dataType); - } - - private static int nextKey() { - return keyCounter.incrementAndGet(); - } - - @UseDataProvider("addVectors") - @Test - public void should_insert_non_primary_key_column_simple_statement_using_format( - DataType dataType, K value, K expectedPrimitiveValue) { - TypeCodec codec = SESSION_RULE.session().getContext().getCodecRegistry().codecFor(dataType); - - int key = nextKey(); - String columnName = columnNameFor(dataType); - - SimpleStatement insert = - SimpleStatement.builder( - String.format( - "INSERT INTO %s (k, %s) values (?, %s)", - tableName, columnName, codec.format(value))) - .addPositionalValue(key) - .build(); - - SESSION_RULE.session().execute(insert); - - SimpleStatement select = - SimpleStatement.builder(String.format("SELECT %s FROM %s where k=?", columnName, tableName)) - .addPositionalValue(key) - .build(); - - readValue(select, dataType, value, expectedPrimitiveValue); - } - - @UseDataProvider("addVectors") - @Test - public void should_insert_non_primary_key_column_simple_statement_positional_value( - DataType dataType, K value, K expectedPrimitiveValue) { - int key = nextKey(); - String columnName = columnNameFor(dataType); - - SimpleStatement insert = - SimpleStatement.builder( - String.format("INSERT INTO %s (k, %s) values (?, ?)", tableName, columnName)) - .addPositionalValues(key, value) - .build(); - - SESSION_RULE.session().execute(insert); - - SimpleStatement select = - SimpleStatement.builder(String.format("SELECT %s FROM %s where k=?", columnName, tableName)) - .addPositionalValue(key) - .build(); - - readValue(select, dataType, value, expectedPrimitiveValue); - } - - @UseDataProvider("addVectors") - @Test - public void should_insert_non_primary_key_column_simple_statement_named_value( - DataType dataType, K value, K expectedPrimitiveValue) { - int key = nextKey(); - String columnName = columnNameFor(dataType); - - SimpleStatement insert = - SimpleStatement.builder( - String.format("INSERT INTO %s (k, %s) values (:k, :v)", tableName, columnName)) - .addNamedValue("k", key) - .addNamedValue("v", value) - .build(); - - SESSION_RULE.session().execute(insert); - - SimpleStatement select = - SimpleStatement.builder(String.format("SELECT %s FROM %s where k=?", columnName, tableName)) - .addNamedValue("k", key) - .build(); - - readValue(select, dataType, value, expectedPrimitiveValue); - } - - @UseDataProvider("addVectors") - @Test - public void should_insert_non_primary_key_column_bound_statement_positional_value( - DataType dataType, K value, K expectedPrimitiveValue) { - int key = nextKey(); - String columnName = columnNameFor(dataType); - - SimpleStatement insert = - SimpleStatement.builder( - String.format("INSERT INTO %s (k, %s) values (?, ?)", tableName, columnName)) - .build(); - - PreparedStatement preparedInsert = SESSION_RULE.session().prepare(insert); - BoundStatementBuilder boundBuilder = preparedInsert.boundStatementBuilder(); - boundBuilder = setValue(0, boundBuilder, DataTypes.INT, key); - boundBuilder = setValue(1, boundBuilder, dataType, value); - BoundStatement boundInsert = boundBuilder.build(); - SESSION_RULE.session().execute(boundInsert); - - SimpleStatement select = - SimpleStatement.builder(String.format("SELECT %s FROM %s where k=?", columnName, tableName)) - .build(); - - PreparedStatement preparedSelect = SESSION_RULE.session().prepare(select); - BoundStatement boundSelect = setValue(0, preparedSelect.bind(), DataTypes.INT, key); - - readValue(boundSelect, dataType, value, expectedPrimitiveValue); - } - - @UseDataProvider("addVectors") - @Test - public void should_insert_non_primary_key_column_bound_statement_named_value( - DataType dataType, K value, K expectedPrimitiveValue) { - int key = nextKey(); - String columnName = columnNameFor(dataType); - - SimpleStatement insert = - SimpleStatement.builder( - String.format("INSERT INTO %s (k, %s) values (:k, :v)", tableName, columnName)) - .build(); - - PreparedStatement preparedInsert = SESSION_RULE.session().prepare(insert); - BoundStatementBuilder boundBuilder = preparedInsert.boundStatementBuilder(); - boundBuilder = setValue("k", boundBuilder, DataTypes.INT, key); - boundBuilder = setValue("v", boundBuilder, dataType, value); - BoundStatement boundInsert = boundBuilder.build(); - SESSION_RULE.session().execute(boundInsert); - - SimpleStatement select = - SimpleStatement.builder( - String.format("SELECT %s FROM %s where k=:k", columnName, tableName)) - .build(); - - PreparedStatement preparedSelect = SESSION_RULE.session().prepare(select); - BoundStatement boundSelect = setValue("k", preparedSelect.bind(), DataTypes.INT, key); - boundSelect = boundSelect.setInt("k", key); - - readValue(boundSelect, dataType, value, expectedPrimitiveValue); - } - - private static > S setValue( - int index, S bs, DataType dataType, Object value) { - TypeCodec codec = - SESSION_RULE.session() != null - ? SESSION_RULE.session().getContext().getCodecRegistry().codecFor(dataType) - : null; - - // set to null if value is null instead of getting possible NPE when casting from null to - // primitive. - if (value == null) { - return bs.setToNull(index); - } - - switch (dataType.getProtocolCode()) { - case ProtocolConstants.DataType.ASCII: - case ProtocolConstants.DataType.VARCHAR: - bs = bs.setString(index, (String) value); - break; - case ProtocolConstants.DataType.BIGINT: - bs = bs.setLong(index, (long) value); - break; - case ProtocolConstants.DataType.BLOB: - bs = bs.setByteBuffer(index, (ByteBuffer) value); - break; - case ProtocolConstants.DataType.BOOLEAN: - bs = bs.setBoolean(index, (boolean) value); - break; - case ProtocolConstants.DataType.DECIMAL: - bs = bs.setBigDecimal(index, (BigDecimal) value); - break; - case ProtocolConstants.DataType.DOUBLE: - bs = bs.setDouble(index, (double) value); - break; - case ProtocolConstants.DataType.FLOAT: - bs = bs.setFloat(index, (float) value); - break; - case ProtocolConstants.DataType.INET: - bs = bs.setInetAddress(index, (InetAddress) value); - break; - case ProtocolConstants.DataType.TINYINT: - bs = bs.setByte(index, (byte) value); - break; - case ProtocolConstants.DataType.SMALLINT: - bs = bs.setShort(index, (short) value); - break; - case ProtocolConstants.DataType.INT: - bs = bs.setInt(index, (int) value); - break; - case ProtocolConstants.DataType.DURATION: - bs = bs.setCqlDuration(index, (CqlDuration) value); - break; - case ProtocolConstants.DataType.TIMESTAMP: - bs = bs.setInstant(index, (Instant) value); - break; - case ProtocolConstants.DataType.DATE: - bs = bs.setLocalDate(index, (LocalDate) value); - break; - case ProtocolConstants.DataType.TIME: - bs = bs.setLocalTime(index, (LocalTime) value); - break; - case ProtocolConstants.DataType.TIMEUUID: - case ProtocolConstants.DataType.UUID: - bs = bs.setUuid(index, (UUID) value); - break; - case ProtocolConstants.DataType.VARINT: - bs = bs.setBigInteger(index, (BigInteger) value); - break; - case ProtocolConstants.DataType.CUSTOM: - if (((CustomType) dataType) - .getClassName() - .equals("org.apache.cassandra.db.marshal.DurationType")) { - bs = bs.setCqlDuration(index, (CqlDuration) value); - break; - } - // fall through - case ProtocolConstants.DataType.LIST: - case ProtocolConstants.DataType.SET: - case ProtocolConstants.DataType.MAP: - bs = bs.set(index, value, codec); - break; - case ProtocolConstants.DataType.TUPLE: - bs = bs.setTupleValue(index, (TupleValue) value); - break; - case ProtocolConstants.DataType.UDT: - bs = bs.setUdtValue(index, (UdtValue) value); - break; - default: - fail("Unhandled DataType " + dataType); - } - return bs; - } - - private static > S setValue( - String name, S bs, DataType dataType, Object value) { - TypeCodec codec = - SESSION_RULE.session() != null - ? SESSION_RULE.session().getContext().getCodecRegistry().codecFor(dataType) - : null; - - // set to null if value is null instead of getting possible NPE when casting from null to - // primitive. - if (value == null) { - return bs.setToNull(name); - } - - switch (dataType.getProtocolCode()) { - case ProtocolConstants.DataType.ASCII: - case ProtocolConstants.DataType.VARCHAR: - bs = bs.setString(name, (String) value); - break; - case ProtocolConstants.DataType.BIGINT: - bs = bs.setLong(name, (long) value); - break; - case ProtocolConstants.DataType.BLOB: - bs = bs.setByteBuffer(name, (ByteBuffer) value); - break; - case ProtocolConstants.DataType.BOOLEAN: - bs = bs.setBoolean(name, (boolean) value); - break; - case ProtocolConstants.DataType.DECIMAL: - bs = bs.setBigDecimal(name, (BigDecimal) value); - break; - case ProtocolConstants.DataType.DOUBLE: - bs = bs.setDouble(name, (double) value); - break; - case ProtocolConstants.DataType.FLOAT: - bs = bs.setFloat(name, (float) value); - break; - case ProtocolConstants.DataType.INET: - bs = bs.setInetAddress(name, (InetAddress) value); - break; - case ProtocolConstants.DataType.TINYINT: - bs = bs.setByte(name, (byte) value); - break; - case ProtocolConstants.DataType.SMALLINT: - bs = bs.setShort(name, (short) value); - break; - case ProtocolConstants.DataType.INT: - bs = bs.setInt(name, (int) value); - break; - case ProtocolConstants.DataType.DURATION: - bs = bs.setCqlDuration(name, (CqlDuration) value); - break; - case ProtocolConstants.DataType.TIMESTAMP: - bs = bs.setInstant(name, (Instant) value); - break; - case ProtocolConstants.DataType.DATE: - bs = bs.setLocalDate(name, (LocalDate) value); - break; - case ProtocolConstants.DataType.TIME: - bs = bs.setLocalTime(name, (LocalTime) value); - break; - case ProtocolConstants.DataType.TIMEUUID: - case ProtocolConstants.DataType.UUID: - bs = bs.setUuid(name, (UUID) value); - break; - case ProtocolConstants.DataType.VARINT: - bs = bs.setBigInteger(name, (BigInteger) value); - break; - case ProtocolConstants.DataType.CUSTOM: - if (((CustomType) dataType) - .getClassName() - .equals("org.apache.cassandra.db.marshal.DurationType")) { - bs = bs.setCqlDuration(name, (CqlDuration) value); - break; - } - // fall through - case ProtocolConstants.DataType.LIST: - case ProtocolConstants.DataType.SET: - case ProtocolConstants.DataType.MAP: - bs = bs.set(name, value, codec); - break; - case ProtocolConstants.DataType.TUPLE: - bs = bs.setTupleValue(name, (TupleValue) value); - break; - case ProtocolConstants.DataType.UDT: - bs = bs.setUdtValue(name, (UdtValue) value); - break; - default: - fail("Unhandled DataType " + dataType); - } - return bs; - } - - private void readValue( - Statement select, DataType dataType, K value, K expectedPrimitiveValue) { - TypeCodec codec = - SESSION_RULE.session().getContext().getCodecRegistry().codecFor(dataType); - ResultSet result = SESSION_RULE.session().execute(select); - - String columnName = columnNameFor(dataType); - - List rows = result.all(); - assertThat(rows).hasSize(1); - - Row row = rows.iterator().next(); - - K expectedValue = expectedPrimitiveValue != null ? expectedPrimitiveValue : value; - - switch (dataType.getProtocolCode()) { - case ProtocolConstants.DataType.ASCII: - case ProtocolConstants.DataType.VARCHAR: - assertThat(row.getString(columnName)).isEqualTo(expectedValue); - assertThat(row.getString(0)).isEqualTo(expectedValue); - break; - case ProtocolConstants.DataType.BIGINT: - assertThat(row.getLong(columnName)).isEqualTo(expectedValue); - assertThat(row.getLong(0)).isEqualTo(expectedValue); - break; - case ProtocolConstants.DataType.BLOB: - assertThat(row.getByteBuffer(columnName)).isEqualTo(expectedValue); - assertThat(row.getByteBuffer(0)).isEqualTo(expectedValue); - break; - case ProtocolConstants.DataType.BOOLEAN: - assertThat(row.getBoolean(columnName)).isEqualTo(expectedValue); - assertThat(row.getBoolean(0)).isEqualTo(expectedValue); - break; - case ProtocolConstants.DataType.DECIMAL: - assertThat(row.getBigDecimal(columnName)).isEqualTo(expectedValue); - assertThat(row.getBigDecimal(0)).isEqualTo(expectedValue); - break; - case ProtocolConstants.DataType.DOUBLE: - assertThat(row.getDouble(columnName)).isEqualTo(expectedValue); - assertThat(row.getDouble(0)).isEqualTo(expectedValue); - break; - case ProtocolConstants.DataType.FLOAT: - assertThat(row.getFloat(columnName)).isEqualTo(expectedValue); - assertThat(row.getFloat(0)).isEqualTo(expectedValue); - break; - case ProtocolConstants.DataType.INET: - assertThat(row.getInetAddress(columnName)).isEqualTo(expectedValue); - assertThat(row.getInetAddress(0)).isEqualTo(expectedValue); - break; - case ProtocolConstants.DataType.TINYINT: - assertThat(row.getByte(columnName)).isEqualTo(expectedValue); - assertThat(row.getByte(0)).isEqualTo(expectedValue); - break; - case ProtocolConstants.DataType.SMALLINT: - assertThat(row.getShort(columnName)).isEqualTo(expectedValue); - assertThat(row.getShort(0)).isEqualTo(expectedValue); - break; - case ProtocolConstants.DataType.INT: - assertThat(row.getInt(columnName)).isEqualTo(expectedValue); - assertThat(row.getInt(0)).isEqualTo(expectedValue); - break; - case ProtocolConstants.DataType.DURATION: - assertThat(row.getCqlDuration(columnName)).isEqualTo(expectedValue); - assertThat(row.getCqlDuration(0)).isEqualTo(expectedValue); - break; - case ProtocolConstants.DataType.TIMESTAMP: - assertThat(row.getInstant(columnName)).isEqualTo(expectedValue); - assertThat(row.getInstant(0)).isEqualTo(expectedValue); - break; - case ProtocolConstants.DataType.DATE: - assertThat(row.getLocalDate(columnName)).isEqualTo(expectedValue); - assertThat(row.getLocalDate(0)).isEqualTo(expectedValue); - break; - case ProtocolConstants.DataType.TIME: - assertThat(row.getLocalTime(columnName)).isEqualTo(expectedValue); - assertThat(row.getLocalTime(0)).isEqualTo(expectedValue); - break; - case ProtocolConstants.DataType.TIMEUUID: - case ProtocolConstants.DataType.UUID: - assertThat(row.getUuid(columnName)).isEqualTo(expectedValue); - assertThat(row.getUuid(0)).isEqualTo(expectedValue); - break; - case ProtocolConstants.DataType.VARINT: - assertThat(row.getBigInteger(columnName)).isEqualTo(expectedValue); - assertThat(row.getBigInteger(0)).isEqualTo(expectedValue); - break; - case ProtocolConstants.DataType.CUSTOM: - if (((CustomType) dataType) - .getClassName() - .equals("org.apache.cassandra.db.marshal.DurationType")) { - assertThat(row.getCqlDuration(columnName)).isEqualTo(expectedValue); - assertThat(row.getCqlDuration(0)).isEqualTo(expectedValue); - break; - } - // fall through - case ProtocolConstants.DataType.LIST: - case ProtocolConstants.DataType.MAP: - case ProtocolConstants.DataType.SET: - assertThat(row.get(columnName, codec)).isEqualTo(expectedValue); - assertThat(row.get(0, codec)).isEqualTo(expectedValue); - break; - case ProtocolConstants.DataType.TUPLE: - TupleValue returnedValue = row.getTupleValue(columnName); - TupleValue exValue = (TupleValue) expectedValue; - - assertThat(returnedValue.getType()).isEqualTo(exValue.getType()); - assertThat(row.getTupleValue(columnName)).isEqualTo(expectedValue); - assertThat(row.getTupleValue(0)).isEqualTo(expectedValue); - break; - case ProtocolConstants.DataType.UDT: - UdtValue returnedUdtValue = row.getUdtValue(columnName); - UdtValue exUdtValue = (UdtValue) expectedValue; - - assertThat(returnedUdtValue.getType()).isEqualTo(exUdtValue.getType()); - assertThat(row.getUdtValue(columnName)).isEqualTo(expectedValue); - assertThat(row.getUdtValue(0)).isEqualTo(expectedValue); - break; - default: - fail("Unhandled DataType " + dataType); - } - - if (value == null) { - assertThat(row.isNull(columnName)).isTrue(); - assertThat(row.isNull(0)).isTrue(); - } - - // Decode directly using the codec - ProtocolVersion protocolVersion = SESSION_RULE.session().getContext().getProtocolVersion(); - assertThat(codec.decode(row.getBytesUnsafe(columnName), protocolVersion)).isEqualTo(value); - assertThat(codec.decode(row.getBytesUnsafe(0), protocolVersion)).isEqualTo(value); - } - - private static String typeFor(DataType dataType) { - String typeName = dataType.asCql(true, true); - if (dataType instanceof UserDefinedType) { - UserDefinedType udt = (UserDefinedType) dataType; - - // Create type if it doesn't already exist. - List fieldParts = new ArrayList<>(); - for (int i = 0; i < udt.getFieldNames().size(); i++) { - String fieldName = udt.getFieldNames().get(i).asCql(false); - String fieldType = typeFor(udt.getFieldTypes().get(i)); - fieldParts.add(fieldName + " " + fieldType); - } - - SESSION_RULE - .session() - .execute( - SimpleStatement.builder( - String.format( - "CREATE TYPE IF NOT EXISTS %s (%s)", - udt.getName().asCql(false), String.join(",", fieldParts))) - .setExecutionProfile(SESSION_RULE.slowProfile()) - .build()); - - // Chances are the UDT isn't labeled as frozen in the context we're given, so we add it as - // older versions of C* don't support non-frozen UDTs. - if (!udt.isFrozen()) { - typeName = "frozen<" + typeName + ">"; - } - } - return typeName; - } - - private static String userTypeFor(List dataTypes) { - if (userTypeToTypeName.containsKey(dataTypes)) { - return userTypeToTypeName.get(dataTypes); - } else { - String typeName = "udt_" + typeCounter.incrementAndGet(); - userTypeToTypeName.put(dataTypes, typeName); - return typeName; - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/heartbeat/HeartbeatDisabledIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/heartbeat/HeartbeatDisabledIT.java deleted file mode 100644 index 7d90f124fb3..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/heartbeat/HeartbeatDisabledIT.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.heartbeat; - -import static java.util.concurrent.TimeUnit.SECONDS; -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; -import com.datastax.oss.simulacron.common.cluster.ClusterSpec; -import java.time.Duration; -import java.util.concurrent.atomic.AtomicInteger; -import org.junit.ClassRule; -import org.junit.Test; - -/** This test is separate from {@link HeartbeatIT} because it can't be parallelized. */ -public class HeartbeatDisabledIT { - - @ClassRule - public static final SimulacronRule SIMULACRON_RULE = - new SimulacronRule(ClusterSpec.builder().withNodes(2)); - - @Test - public void should_not_send_heartbeat_when_disabled() throws InterruptedException { - // Disable heartbeats entirely, wait longer than the default timeout and make sure we didn't - // receive any - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withDuration(DefaultDriverOption.HEARTBEAT_INTERVAL, Duration.ofSeconds(0)) - .build(); - try (CqlSession ignored = SessionUtils.newSession(SIMULACRON_RULE, loader)) { - AtomicInteger heartbeats = registerHeartbeatListener(); - SECONDS.sleep(35); - - assertThat(heartbeats.get()).isZero(); - } - } - - private AtomicInteger registerHeartbeatListener() { - AtomicInteger nonControlHeartbeats = new AtomicInteger(); - SIMULACRON_RULE - .cluster() - .registerQueryListener( - (n, l) -> nonControlHeartbeats.incrementAndGet(), - false, - (l) -> l.getQuery().equals("OPTIONS")); - return nonControlHeartbeats; - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/heartbeat/HeartbeatIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/heartbeat/HeartbeatIT.java deleted file mode 100644 index 26658bd76d1..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/heartbeat/HeartbeatIT.java +++ /dev/null @@ -1,283 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.heartbeat; - -import static com.datastax.oss.driver.api.testinfra.utils.NodeUtils.waitForDown; -import static com.datastax.oss.driver.api.testinfra.utils.NodeUtils.waitForUp; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.closeConnection; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.noResult; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.when; -import static java.util.concurrent.TimeUnit.MILLISECONDS; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; -import static org.awaitility.Awaitility.await; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.config.ProgrammaticDriverConfigLoaderBuilder; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeState; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.protocol.internal.request.Register; -import com.datastax.oss.simulacron.common.cluster.ClusterSpec; -import com.datastax.oss.simulacron.common.cluster.QueryLog; -import com.datastax.oss.simulacron.common.request.Options; -import com.datastax.oss.simulacron.common.stubbing.CloseType; -import com.datastax.oss.simulacron.common.stubbing.DisconnectAction; -import com.datastax.oss.simulacron.common.stubbing.PrimeDsl; -import com.datastax.oss.simulacron.server.BoundNode; -import com.datastax.oss.simulacron.server.RejectScope; -import java.net.SocketAddress; -import java.time.Duration; -import java.util.List; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.stream.Collectors; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -@Category(ParallelizableTests.class) -public class HeartbeatIT { - - @ClassRule - public static final SimulacronRule SIMULACRON_RULE = - new SimulacronRule(ClusterSpec.builder().withNodes(1)); - - private static final String QUERY = "select * from foo"; - private BoundNode simulacronNode; - - @Before - public void setUp() { - SIMULACRON_RULE.cluster().acceptConnections(); - SIMULACRON_RULE.cluster().clearLogs(); - SIMULACRON_RULE.cluster().clearPrimes(true); - simulacronNode = SIMULACRON_RULE.cluster().getNodes().iterator().next(); - } - - @Test - public void node_should_go_down_gracefully_when_connection_closed_during_heartbeat() { - try (CqlSession session = newSession()) { - - Node node = session.getMetadata().getNodes().values().iterator().next(); - assertThat(node.getState()).isEqualTo(NodeState.UP); - - // Stop listening for new connections (so it can't reconnect) - simulacronNode.rejectConnections(0, RejectScope.UNBIND); - - int heartbeatCount = getHeartbeatsForNode().size(); - // When node receives a heartbeat, close the connection. - simulacronNode.prime( - when(Options.INSTANCE) - .then(closeConnection(DisconnectAction.Scope.CONNECTION, CloseType.DISCONNECT))); - - // Wait for heartbeat and for node to subsequently close its connection. - waitForDown(node); - - // Should have been a heartbeat received since that's what caused the disconnect. - assertThat(getHeartbeatsForNode().size()).isGreaterThan(heartbeatCount); - } - } - - @Test - public void should_not_send_heartbeat_during_protocol_initialization() { - // Configure node to reject startup. - simulacronNode.rejectConnections(0, RejectScope.REJECT_STARTUP); - - // Try to create a session. Note that the init query timeout is twice the heartbeat interval, so - // we're sure that at least one heartbeat would be sent if it was not properly disabled during - // init. - try (CqlSession ignored = newSession()) { - fail("Expected session creation to fail"); - } catch (Exception expected) { - // no heartbeats should have been sent while protocol was initializing, but one OPTIONS - // message is expected to be sent as part of the initialization process. - assertThat(getHeartbeatsForNode()).hasSize(1); - } - } - - @Test - public void should_send_heartbeat_on_control_connection() { - // Ensure we only have the control connection) - ProgrammaticDriverConfigLoaderBuilder loader = - SessionUtils.configLoaderBuilder() - .withInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE, 0); - try (CqlSession ignored = newSession(loader)) { - AtomicInteger heartbeats = countHeartbeatsOnControlConnection(); - await() - .pollInterval(500, TimeUnit.MILLISECONDS) - .atMost(60, TimeUnit.SECONDS) - .until(() -> heartbeats.get() > 0); - } - } - - @Test - public void should_send_heartbeat_on_regular_connection() throws InterruptedException { - // Prime a simple query so we get at least some results - simulacronNode.prime(when(QUERY).then(PrimeDsl.rows().row("column1", "1", "column2", "2"))); - - try (CqlSession session = newSession()) { - // Make a bunch of queries over two seconds. This should preempt any heartbeats. - assertThat(session.execute(QUERY)).hasSize(1); - final AtomicInteger nonControlHeartbeats = countHeartbeatsOnRegularConnection(); - for (int i = 0; i < 20; i++) { - assertThat(session.execute(QUERY)).hasSize(1); - MILLISECONDS.sleep(100); - } - - // No heartbeats should be sent, except those on the control connection. - assertThat(nonControlHeartbeats.get()).isZero(); - - // Stop querying, heartbeats should be sent again - await() - .pollInterval(500, TimeUnit.MILLISECONDS) - .atMost(60, TimeUnit.SECONDS) - .until(() -> nonControlHeartbeats.get() >= 1); - } - } - - @Test - public void should_send_heartbeat_when_requests_being_written_but_nothing_received() - throws InterruptedException { - // Prime a query that will never return a response. - String noResponseQueryStr = "delay"; - SIMULACRON_RULE.cluster().prime(when(noResponseQueryStr).then(noResult())); - - try (CqlSession session = newSession()) { - AtomicInteger heartbeats = countHeartbeatsOnRegularConnection(); - - for (int i = 0; i < 25; i++) { - session.executeAsync(noResponseQueryStr); - session.executeAsync(noResponseQueryStr); - MILLISECONDS.sleep(100); - } - - // We should expect at least 2 heartbeats - assertThat(heartbeats.get()).isGreaterThanOrEqualTo(2); - } - } - - @Test - public void should_close_connection_when_heartbeat_times_out() { - try (CqlSession session = newSession()) { - Node node = session.getMetadata().getNodes().values().iterator().next(); - assertThat(node.getState()).isEqualTo(NodeState.UP); - - // Ensure we get some heartbeats and the node remains up. - AtomicInteger heartbeats = new AtomicInteger(); - simulacronNode.registerQueryListener( - (n, l) -> heartbeats.incrementAndGet(), true, this::isOptionRequest); - - await() - .pollInterval(500, TimeUnit.MILLISECONDS) - .atMost(60, TimeUnit.SECONDS) - .until(() -> heartbeats.get() >= 2); - assertThat(node.getState()).isEqualTo(NodeState.UP); - - // configure node to not respond to options request, which should cause a timeout. - simulacronNode.prime(when(Options.INSTANCE).then(noResult())); - heartbeats.set(0); - - // wait for heartbeat to be sent. - await() - .pollInterval(500, TimeUnit.MILLISECONDS) - .atMost(60, TimeUnit.SECONDS) - .until(() -> heartbeats.get() >= 1); - heartbeats.set(0); - - // node should go down because heartbeat was unanswered. - waitForDown(node); - - // clear prime so now responds to options request again. - simulacronNode.clearPrimes(); - - // wait for node to come up again and ensure heartbeats are successful and node remains up. - waitForUp(node); - - await() - .pollInterval(500, TimeUnit.MILLISECONDS) - .atMost(60, TimeUnit.SECONDS) - .until(() -> heartbeats.get() >= 2); - assertThat(node.getState()).isEqualTo(NodeState.UP); - } - } - - private CqlSession newSession() { - return newSession(null); - } - - private CqlSession newSession(ProgrammaticDriverConfigLoaderBuilder loaderBuilder) { - if (loaderBuilder == null) { - loaderBuilder = SessionUtils.configLoaderBuilder(); - } - DriverConfigLoader loader = - loaderBuilder - .withDuration(DefaultDriverOption.HEARTBEAT_INTERVAL, Duration.ofSeconds(1)) - .withDuration(DefaultDriverOption.HEARTBEAT_TIMEOUT, Duration.ofMillis(500)) - .withDuration(DefaultDriverOption.CONNECTION_INIT_QUERY_TIMEOUT, Duration.ofSeconds(2)) - .withDuration(DefaultDriverOption.RECONNECTION_MAX_DELAY, Duration.ofSeconds(1)) - .build(); - return SessionUtils.newSession(SIMULACRON_RULE, loader); - } - - private AtomicInteger countHeartbeatsOnRegularConnection() { - return countHeartbeats(true); - } - - private AtomicInteger countHeartbeatsOnControlConnection() { - return countHeartbeats(false); - } - - private AtomicInteger countHeartbeats(boolean regularConnection) { - SocketAddress controlConnectionAddress = findControlConnectionAddress(); - AtomicInteger count = new AtomicInteger(); - SIMULACRON_RULE - .cluster() - .registerQueryListener( - (n, l) -> count.incrementAndGet(), - false, - (l) -> - isOptionRequest(l) - && (regularConnection ^ l.getConnection().equals(controlConnectionAddress))); - return count; - } - - private SocketAddress findControlConnectionAddress() { - List logs = simulacronNode.getLogs().getQueryLogs(); - for (QueryLog log : logs) { - if (log.getFrame().message instanceof Register) { - return log.getConnection(); - } - } - throw new AssertionError("Could not find address of control connection"); - } - - private List getHeartbeatsForNode() { - return simulacronNode.getLogs().getQueryLogs().stream() - .filter(l -> l.getQuery().equals("OPTIONS")) - .collect(Collectors.toList()); - } - - private boolean isOptionRequest(QueryLog l) { - return l.getQuery().equals("OPTIONS"); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/loadbalancing/AllLoadBalancingPoliciesSimulacronIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/loadbalancing/AllLoadBalancingPoliciesSimulacronIT.java deleted file mode 100644 index 855cd6bb6a2..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/loadbalancing/AllLoadBalancingPoliciesSimulacronIT.java +++ /dev/null @@ -1,505 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.loadbalancing; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.catchThrowable; -import static org.awaitility.Awaitility.await; - -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.DefaultConsistencyLevel; -import com.datastax.oss.driver.api.core.NoNodeAvailableException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeState; -import com.datastax.oss.driver.api.core.metadata.TokenMap; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.protocol.internal.request.Query; -import com.datastax.oss.simulacron.common.cluster.ClusterSpec; -import com.datastax.oss.simulacron.common.cluster.QueryLog; -import com.datastax.oss.simulacron.common.stubbing.PrimeDsl; -import com.datastax.oss.simulacron.common.stubbing.PrimeDsl.RowBuilder; -import com.datastax.oss.simulacron.server.BoundCluster; -import com.datastax.oss.simulacron.server.BoundNode; -import com.datastax.oss.simulacron.server.BoundTopic; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import java.net.SocketAddress; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.Arrays; -import java.util.List; -import java.util.Objects; -import java.util.function.Predicate; -import java.util.stream.Collectors; -import java.util.stream.Stream; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; - -@Category(ParallelizableTests.class) -@RunWith(DataProviderRunner.class) -public class AllLoadBalancingPoliciesSimulacronIT { - - @ClassRule - public static final SimulacronRule SIMULACRON_RULE = - new SimulacronRule(ClusterSpec.builder().withNodes(5, 5, 5)); - - @Before - public void reset() { - SIMULACRON_RULE.cluster().start(); - SIMULACRON_RULE.cluster().clearLogs(); - SIMULACRON_RULE.cluster().clearPrimes(true); - SIMULACRON_RULE - .cluster() - .prime( - PrimeDsl.when("SELECT * FROM system_schema.keyspaces") - .then(new RowBuilder().columnTypes(KEYSPACE_COLUMNS).row(KEYSPACE_ROW).build())); - } - - @Test - @DataProvider({ - "BasicLoadBalancingPolicy,dc1", - "DefaultLoadBalancingPolicy,dc1", - "DcInferringLoadBalancingPolicy,dc1", - "DcInferringLoadBalancingPolicy,null", - }) - public void should_round_robin_within_local_dc_when_dc_aware_but_not_token_aware( - String lbp, String dc) { - - // given: DC is provided or inferred, token awareness is disabled and remote DCs are allowed - try (CqlSession session = newSession(lbp, dc, 2, true, false)) { - - // when: a query is executed 50 times. - for (int i = 0; i < 50; i++) { - session.execute(QUERY); - } - - // then: each node in local DC should get an equal number of requests. - for (int i = 0; i < 5; i++) { - assertThat(queries(0, i).count()).isEqualTo(10); - } - - // then: no node in the remote DC should get a request. - assertThat(queries(1).count()).isEqualTo(0); - assertThat(queries(2).count()).isEqualTo(0); - } - } - - @Test - @DataProvider({ - "BasicLoadBalancingPolicy,dc1,ONE", - "BasicLoadBalancingPolicy,dc1,LOCAL_ONE", - "BasicLoadBalancingPolicy,dc1,TWO", - "BasicLoadBalancingPolicy,dc1,QUORUM", - "BasicLoadBalancingPolicy,dc1,LOCAL_QUORUM", - "DefaultLoadBalancingPolicy,dc1,ONE", - "DefaultLoadBalancingPolicy,dc1,LOCAL_ONE", - "DefaultLoadBalancingPolicy,dc1,TWO", - "DefaultLoadBalancingPolicy,dc1,QUORUM", - "DefaultLoadBalancingPolicy,dc1,LOCAL_QUORUM", - "DcInferringLoadBalancingPolicy,dc1,ONE", - "DcInferringLoadBalancingPolicy,dc1,LOCAL_ONE", - "DcInferringLoadBalancingPolicy,dc1,TWO", - "DcInferringLoadBalancingPolicy,dc1,QUORUM", - "DcInferringLoadBalancingPolicy,dc1,LOCAL_QUORUM", - "DcInferringLoadBalancingPolicy,null,ONE", - "DcInferringLoadBalancingPolicy,null,LOCAL_ONE", - "DcInferringLoadBalancingPolicy,null,TWO", - "DcInferringLoadBalancingPolicy,null,QUORUM", - "DcInferringLoadBalancingPolicy,null,LOCAL_QUORUM", - }) - public void should_use_local_replicas_when_dc_aware_and_token_aware_and_enough_local_replicas_up( - String lbp, String dc, DefaultConsistencyLevel cl) { - - // given: DC is provided or inferred, token awareness enabled, remotes allowed, CL <= 2 - try (CqlSession session = newSession(lbp, dc, 2, true)) { - - // given: one replica and 2 non-replicas down in local DC, but CL <= 2 still achievable - List aliveReplicas = degradeLocalDc(session); - - // when: a query is executed 50 times and some nodes are down in the local DC. - for (int i = 0; i < 50; i++) { - session.execute( - SimpleStatement.newInstance(QUERY) - .setConsistencyLevel(cl) - .setRoutingKeyspace("test") - .setRoutingKey(ROUTING_KEY)); - } - - // then: all requests should be distributed to the remaining up replicas in local DC - BoundNode alive1 = findNode(aliveReplicas.get(0)); - BoundNode alive2 = findNode(aliveReplicas.get(1)); - assertThat(queries(alive1).count() + queries(alive2).count()).isEqualTo(50); - - // then: no node in the remote DCs should get a request. - assertThat(queries(1).count()).isEqualTo(0); - assertThat(queries(2).count()).isEqualTo(0); - } - } - - @Test - public void should_round_robin_within_all_dcs_when_dc_agnostic() { - - // given: DC-agnostic LBP, no local DC, remotes not allowed, token awareness enabled - try (CqlSession session = newSession("BasicLoadBalancingPolicy", null, 0, false)) { - - // when: a query is executed 150 times. - for (int i = 0; i < 150; i++) { - session.execute( - SimpleStatement.newInstance(QUERY) - // local CL should be ignored since there is no local DC - .setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM)); - } - - // then: each node should get 10 requests, even remote ones since the LBP is DC-agnostic. - for (int dc = 0; dc < 3; dc++) { - for (int n = 0; n < 5; n++) { - assertThat(queries(dc, n).count()).isEqualTo(10); - } - } - } - } - - @Test - @DataProvider({ - "BasicLoadBalancingPolicy,dc1,ONE", - "BasicLoadBalancingPolicy,dc1,TWO", - "BasicLoadBalancingPolicy,dc1,THREE", - "BasicLoadBalancingPolicy,dc1,QUORUM", - "BasicLoadBalancingPolicy,dc1,ANY", - "DefaultLoadBalancingPolicy,dc1,ONE", - "DefaultLoadBalancingPolicy,dc1,TWO", - "DefaultLoadBalancingPolicy,dc1,THREE", - "DefaultLoadBalancingPolicy,dc1,QUORUM", - "DefaultLoadBalancingPolicy,dc1,ANY", - "DcInferringLoadBalancingPolicy,dc1,ONE", - "DcInferringLoadBalancingPolicy,dc1,TWO", - "DcInferringLoadBalancingPolicy,dc1,THREE", - "DcInferringLoadBalancingPolicy,dc1,QUORUM", - "DcInferringLoadBalancingPolicy,dc1,ANY", - "DcInferringLoadBalancingPolicy,null,ONE", - "DcInferringLoadBalancingPolicy,null,TWO", - "DcInferringLoadBalancingPolicy,null,THREE", - "DcInferringLoadBalancingPolicy,null,QUORUM", - "DcInferringLoadBalancingPolicy,null,ANY", - }) - public void should_use_remote_nodes_when_no_up_nodes_in_local_dc_for_non_local_cl( - String lbp, String dc, DefaultConsistencyLevel cl) { - - // given: 1 remote allowed per DC and a non-local CL, token awareness enabled - try (CqlSession session = newSession(lbp, dc, 1, false)) { - - // given: local DC is down - stopLocalDc(session); - - // when: a query is executed 50 times and all nodes are down in local DC. - for (int i = 0; i < 50; i++) { - session.execute( - SimpleStatement.newInstance(QUERY) - .setConsistencyLevel(cl) - .setRoutingKeyspace("test") - .setRoutingKey(ROUTING_KEY)); - } - - // then: only 1 node in each remote DC should get requests (we can't know which ones exactly). - assertThat(queries(1).count() + queries(2).count()).isEqualTo(50); - } - } - - @Test - @DataProvider({ - "BasicLoadBalancingPolicy,dc1,LOCAL_ONE", - "BasicLoadBalancingPolicy,dc1,LOCAL_QUORUM", - "BasicLoadBalancingPolicy,dc1,LOCAL_SERIAL", - "DefaultLoadBalancingPolicy,dc1,LOCAL_ONE", - "DefaultLoadBalancingPolicy,dc1,LOCAL_QUORUM", - "DefaultLoadBalancingPolicy,dc1,LOCAL_SERIAL", - "DcInferringLoadBalancingPolicy,dc1,LOCAL_ONE", - "DcInferringLoadBalancingPolicy,dc1,LOCAL_QUORUM", - "DcInferringLoadBalancingPolicy,dc1,LOCAL_SERIAL", - "DcInferringLoadBalancingPolicy,null,LOCAL_ONE", - "DcInferringLoadBalancingPolicy,null,LOCAL_QUORUM", - "DcInferringLoadBalancingPolicy,null,LOCAL_SERIAL", - }) - public void should_not_use_remote_nodes_when_using_local_cl( - String lbp, String dc, DefaultConsistencyLevel cl) { - - // given: remotes allowed but not for local CL, token awareness enabled, local CL - try (CqlSession session = newSession(lbp, dc, 5, false)) { - - // given: local DC is down - stopLocalDc(session); - - // when: a query is executed 50 times and all nodes are down in local DC. - for (int i = 0; i < 50; i++) { - Throwable t = - catchThrowable( - () -> - session.execute( - SimpleStatement.newInstance(QUERY) - .setConsistencyLevel(cl) - .setRoutingKeyspace("test") - .setRoutingKey(ROUTING_KEY))); - - // then: expect a NNAE for a local CL since no local replicas available. - assertThat(t).isInstanceOf(NoNodeAvailableException.class); - } - - // then: no node in the remote DCs should get a request. - assertThat(queries(1).count()).isEqualTo(0); - assertThat(queries(2).count()).isEqualTo(0); - } - } - - @Test - @DataProvider({ - "BasicLoadBalancingPolicy,dc1,LOCAL_ONE", - "BasicLoadBalancingPolicy,dc1,LOCAL_QUORUM", - "DefaultLoadBalancingPolicy,dc1,LOCAL_ONE", - "DefaultLoadBalancingPolicy,dc1,LOCAL_QUORUM", - "DcInferringLoadBalancingPolicy,dc1,LOCAL_ONE", - "DcInferringLoadBalancingPolicy,dc1,LOCAL_QUORUM", - "DcInferringLoadBalancingPolicy,null,LOCAL_ONE", - "DcInferringLoadBalancingPolicy,null,LOCAL_QUORUM", - }) - public void should_use_remote_nodes_when_using_local_cl_if_allowed( - String lbp, String dc, DefaultConsistencyLevel cl) { - - // given: only one node allowed per remote DC and remotes allowed even for local CLs. - try (CqlSession session = newSession(lbp, dc, 1, true)) { - - // given: local DC is down - stopLocalDc(session); - - // when: a query is executed 50 times and all nodes are down in local DC. - for (int i = 0; i < 50; i++) { - session.execute( - SimpleStatement.newInstance(QUERY) - .setConsistencyLevel(cl) - .setRoutingKeyspace("test") - .setRoutingKey(ROUTING_KEY)); - } - - // then: only 1 node in each remote DC should get requests (we can't know which ones exactly). - assertThat(queries(1).count() + queries(2).count()).isEqualTo(50); - } - } - - @Test - @DataProvider({ - "BasicLoadBalancingPolicy,dc1", - "DefaultLoadBalancingPolicy,dc1", - "DcInferringLoadBalancingPolicy,dc1", - "DcInferringLoadBalancingPolicy,null" - }) - public void should_not_use_excluded_dc_using_node_filter(String lbp, String dc) { - - // given: remotes allowed even for local CLs, but node filter excluding dc2 - try (CqlSession session = newSession(lbp, dc, 5, true, true, excludeDc("dc2"))) { - - // when: A query is made and nodes for the local dc are available. - for (int i = 0; i < 50; i++) { - session.execute( - SimpleStatement.newInstance(QUERY) - .setRoutingKeyspace("test") - .setRoutingKey(ROUTING_KEY)); - } - - // then: only nodes in the local DC should have been queried. - assertThat(queries(0).count()).isEqualTo(50); - assertThat(queries(1).count()).isEqualTo(0); - assertThat(queries(2).count()).isEqualTo(0); - - // given: local DC is down - stopLocalDc(session); - - SIMULACRON_RULE.cluster().clearLogs(); - - // when: A query is made and all nodes in the local dc are down. - for (int i = 0; i < 50; i++) { - session.execute( - SimpleStatement.newInstance(QUERY) - .setRoutingKeyspace("test") - .setRoutingKey(ROUTING_KEY)); - } - - // then: Only nodes in DC3 should have been queried, since DC2 is excluded and DC1 is down. - assertThat(queries(0).count()).isEqualTo(0); - assertThat(queries(1).count()).isEqualTo(0); - assertThat(queries(2).count()).isEqualTo(50); - } - } - - private static final ByteBuffer ROUTING_KEY = ByteBuffer.wrap(new byte[] {1, 2, 3, 4}); - - private static final String[] KEYSPACE_COLUMNS = - new String[] { - "keyspace_name", "varchar", - "durable_writes", "boolean", - "replication", "map" - }; - - private static final Object[] KEYSPACE_ROW = - new Object[] { - "keyspace_name", - "test", - "durable_writes", - true, - "replication", - ImmutableMap.of( - "class", - "org.apache.cassandra.locator.NetworkTopologyStrategy", - "dc1", - "3", - "dc2", - "3", - "dc3", - "3") - }; - - private static final String QUERY = "SELECT * FROM test.foo"; - - private CqlSession newSession(String lbp, String dc, int maxRemoteNodes, boolean allowLocalCl) { - return newSession(lbp, dc, maxRemoteNodes, allowLocalCl, true); - } - - private CqlSession newSession( - String lbp, String dc, int maxRemoteNodes, boolean allowLocalCl, boolean tokenAware) { - return newSession(lbp, dc, maxRemoteNodes, allowLocalCl, tokenAware, null); - } - - private CqlSession newSession( - String lbp, - String dc, - int maxRemoteNodes, - boolean allowLocalCl, - boolean tokenAware, - Predicate nodeFilter) { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withBoolean(DefaultDriverOption.METADATA_SCHEMA_ENABLED, tokenAware) - .withString(DefaultDriverOption.LOAD_BALANCING_POLICY_CLASS, lbp) - .withString(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER, dc) - .withInt( - DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_MAX_NODES_PER_REMOTE_DC, - maxRemoteNodes) - .withBoolean( - DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_ALLOW_FOR_LOCAL_CONSISTENCY_LEVELS, - allowLocalCl) - .build(); - return SessionUtils.newSession(SIMULACRON_RULE, null, null, null, nodeFilter, loader); - } - - private BoundNode findNode(Node node) { - BoundCluster simulacron = SIMULACRON_RULE.cluster(); - SocketAddress toFind = node.getEndPoint().resolve(); - for (BoundNode boundNode : simulacron.getNodes()) { - if (boundNode.getAddress().equals(toFind)) { - return boundNode; - } - } - throw new AssertionError("Could not find node: " + toFind); - } - - private void stopLocalDc(CqlSession session) { - SIMULACRON_RULE.cluster().dc(0).stop(); - awaitDown(nodesInDc(session, "dc1")); - } - - private List degradeLocalDc(CqlSession session) { - // stop 1 replica and 2 non-replicas in dc1 - List localReplicas = replicasInDc(session, "dc1"); - assertThat(localReplicas).hasSize(3); - BoundNode replica1 = findNode(localReplicas.get(0)); - - List localOthers = nonReplicasInDc(session, "dc1"); - assertThat(localOthers).hasSize(2); - BoundNode other1 = findNode(localOthers.get(0)); - BoundNode other2 = findNode(localOthers.get(1)); - - replica1.stop(); - other1.stop(); - other2.stop(); - - awaitDown(localReplicas.get(0), localOthers.get(0), localOthers.get(1)); - return localReplicas.subList(1, 3); - } - - private Stream queries(int dc, int node) { - return queries(SIMULACRON_RULE.cluster().dc(dc).node(node)); - } - - private Stream queries(int dc) { - return queries(SIMULACRON_RULE.cluster().dc(dc)); - } - - private Stream queries(BoundTopic topic) { - return topic.getLogs().getQueryLogs().stream() - .filter(q -> q.getFrame().message instanceof Query) - .filter(q -> ((Query) q.getFrame().message).query.equals(QUERY)); - } - - private List nodesInDc(CqlSession session, String dcName) { - return session.getMetadata().getNodes().values().stream() - .filter(n -> Objects.equals(n.getDatacenter(), dcName)) - .collect(Collectors.toList()); - } - - private List replicasInDc(CqlSession session, String dcName) { - assertThat(session.getMetadata().getTokenMap()).isPresent(); - TokenMap tokenMap = session.getMetadata().getTokenMap().get(); - return tokenMap.getReplicas("test", ROUTING_KEY).stream() - .filter(n -> Objects.equals(n.getDatacenter(), dcName)) - .collect(Collectors.toList()); - } - - private List nonReplicasInDc( - CqlSession session, @SuppressWarnings("SameParameterValue") String dcName) { - List nodes = nodesInDc(session, dcName); - nodes.removeAll(replicasInDc(session, dcName)); - return nodes; - } - - private Predicate excludeDc(@SuppressWarnings("SameParameterValue") String dcName) { - return node -> !Objects.equals(node.getDatacenter(), dcName); - } - - private void awaitDown(Node... nodes) { - awaitDown(Arrays.asList(nodes)); - } - - private void awaitDown(Iterable nodes) { - await() - .atMost(Duration.ofSeconds(10)) - .untilAsserted( - () -> { - for (Node node : nodes) { - assertThat(node.getState()).isEqualTo(NodeState.DOWN); - } - }); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/loadbalancing/DefaultLoadBalancingPolicyIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/loadbalancing/DefaultLoadBalancingPolicyIT.java deleted file mode 100644 index af454fc6458..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/loadbalancing/DefaultLoadBalancingPolicyIT.java +++ /dev/null @@ -1,287 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.loadbalancing; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; -import static org.assertj.core.api.Assertions.withinPercentage; -import static org.awaitility.Awaitility.await; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeState; -import com.datastax.oss.driver.api.core.metadata.TokenMap; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.TopologyEvent; -import com.google.common.collect.ImmutableList; -import java.net.InetSocketAddress; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.TimeUnit; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -public class DefaultLoadBalancingPolicyIT { - - private static final String LOCAL_DC = "dc1"; - - private static final CustomCcmRule CCM_RULE = CustomCcmRule.builder().withNodes(4, 1).build(); - - private static final SessionRule SESSION_RULE = - SessionRule.builder(CCM_RULE) - .withKeyspace(false) - .withConfigLoader( - SessionUtils.configLoaderBuilder() - .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(30)) - .build()) - .build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - @BeforeClass - public static void setup() { - CqlSession session = SESSION_RULE.session(); - session.execute( - "CREATE KEYSPACE test " - + "WITH replication = {'class': 'NetworkTopologyStrategy', 'dc1': 2, 'dc2': 1}"); - session.execute("CREATE TABLE test.foo (k int PRIMARY KEY)"); - } - - @Test - public void should_ignore_remote_dcs() { - for (Node node : SESSION_RULE.session().getMetadata().getNodes().values()) { - if (LOCAL_DC.equals(node.getDatacenter())) { - assertThat(node.getDistance()).isEqualTo(NodeDistance.LOCAL); - assertThat(node.getState()).isEqualTo(NodeState.UP); - // 1 regular connection, maybe 1 control connection - assertThat(node.getOpenConnections()).isBetween(1, 2); - assertThat(node.isReconnecting()).isFalse(); - } else { - assertThat(node.getDistance()).isEqualTo(NodeDistance.IGNORED); - assertThat(node.getOpenConnections()).isEqualTo(0); - assertThat(node.isReconnecting()).isFalse(); - } - } - } - - @Test - public void should_use_round_robin_on_local_dc_when_not_enough_routing_information() { - ByteBuffer routingKey = TypeCodecs.INT.encodePrimitive(1, ProtocolVersion.DEFAULT); - TokenMap tokenMap = SESSION_RULE.session().getMetadata().getTokenMap().get(); - // TODO add statements with setKeyspace when that is supported - List statements = - ImmutableList.of( - // No information at all - SimpleStatement.newInstance("SELECT * FROM test.foo WHERE k = 1"), - // Keyspace present, missing routing key - SimpleStatement.newInstance("SELECT * FROM test.foo WHERE k = 1") - .setRoutingKeyspace(CqlIdentifier.fromCql("test")), - // Routing key present, missing keyspace - SimpleStatement.newInstance("SELECT * FROM test.foo WHERE k = 1") - .setRoutingKey(routingKey), - // Routing token present, missing keyspace - SimpleStatement.newInstance("SELECT * FROM test.foo WHERE k = 1") - .setRoutingToken(tokenMap.newToken(routingKey))); - - for (Statement statement : statements) { - List coordinators = new ArrayList<>(); - for (int i = 0; i < 12; i++) { - ResultSet rs = SESSION_RULE.session().execute(statement); - Node coordinator = rs.getExecutionInfo().getCoordinator(); - assertThat(coordinator.getDatacenter()).isEqualTo(LOCAL_DC); - coordinators.add(coordinator); - } - for (int i = 0; i < 4; i++) { - assertThat(coordinators.get(i)) - .isEqualTo(coordinators.get(4 + i)) - .isEqualTo(coordinators.get(8 + i)); - } - } - } - - @Test - public void should_prioritize_replicas_when_routing_information_present() { - CqlIdentifier keyspace = CqlIdentifier.fromCql("test"); - ByteBuffer routingKey = TypeCodecs.INT.encodePrimitive(1, ProtocolVersion.DEFAULT); - TokenMap tokenMap = SESSION_RULE.session().getMetadata().getTokenMap().get(); - Set localReplicas = new HashSet<>(); - for (Node replica : tokenMap.getReplicas(keyspace, routingKey)) { - if (replica.getDatacenter().equals(LOCAL_DC)) { - localReplicas.add(replica); - } - } - assertThat(localReplicas).hasSize(2); - - // TODO add statements with setKeyspace when that is supported - List statements = - ImmutableList.of( - SimpleStatement.newInstance("SELECT * FROM test.foo WHERE k = 1") - .setRoutingKeyspace(keyspace) - .setRoutingKey(routingKey), - SimpleStatement.newInstance("SELECT * FROM test.foo WHERE k = 1") - .setRoutingKeyspace(keyspace) - .setRoutingToken(tokenMap.newToken(routingKey))); - - for (Statement statement : statements) { - // Since the exact order is randomized, just run a bunch of queries and check that we get a - // reasonable distribution: - Map hits = new HashMap<>(); - for (int i = 0; i < 2000; i++) { - ResultSet rs = SESSION_RULE.session().execute(statement); - Node coordinator = rs.getExecutionInfo().getCoordinator(); - assertThat(localReplicas).contains(coordinator); - assertThat(coordinator.getDatacenter()).isEqualTo(LOCAL_DC); - hits.merge(coordinator, 1, (a, b) -> a + b); - } - - for (Integer count : hits.values()) { - assertThat(count).isCloseTo(1000, withinPercentage(10)); - } - } - } - - @Test - public void should_hit_non_replicas_when_routing_information_present_but_all_replicas_down() { - CqlIdentifier keyspace = CqlIdentifier.fromCql("test"); - ByteBuffer routingKey = TypeCodecs.INT.encodePrimitive(1, ProtocolVersion.DEFAULT); - TokenMap tokenMap = SESSION_RULE.session().getMetadata().getTokenMap().get(); - - InternalDriverContext context = (InternalDriverContext) SESSION_RULE.session().getContext(); - - Set localReplicas = new HashSet<>(); - for (Node replica : tokenMap.getReplicas(keyspace, routingKey)) { - if (replica.getDatacenter().equals(LOCAL_DC)) { - localReplicas.add(replica); - context.getEventBus().fire(TopologyEvent.forceDown(replica.getBroadcastRpcAddress().get())); - await() - .pollInterval(500, TimeUnit.MILLISECONDS) - .atMost(60, TimeUnit.SECONDS) - .untilAsserted(() -> assertThat(replica.getOpenConnections()).isZero()); - } - } - assertThat(localReplicas).hasSize(2); - - // TODO add statements with setKeyspace when that is supported - List statements = - ImmutableList.of( - SimpleStatement.newInstance("SELECT * FROM test.foo WHERE k = 1") - .setRoutingKeyspace(keyspace) - .setRoutingKey(routingKey), - SimpleStatement.newInstance("SELECT * FROM test.foo WHERE k = 1") - .setRoutingKeyspace(keyspace) - .setRoutingToken(tokenMap.newToken(routingKey))); - - for (Statement statement : statements) { - List coordinators = new ArrayList<>(); - for (int i = 0; i < 6; i++) { - ResultSet rs = SESSION_RULE.session().execute(statement); - Node coordinator = rs.getExecutionInfo().getCoordinator(); - coordinators.add(coordinator); - assertThat(coordinator.getDatacenter()).isEqualTo(LOCAL_DC); - assertThat(localReplicas).doesNotContain(coordinator); - } - // Should round-robin on the two non-replicas - for (int i = 0; i < 2; i++) { - assertThat(coordinators.get(i)) - .isEqualTo(coordinators.get(2 + i)) - .isEqualTo(coordinators.get(4 + i)); - } - } - - for (Node replica : localReplicas) { - context.getEventBus().fire(TopologyEvent.forceUp(replica.getBroadcastRpcAddress().get())); - await() - .pollInterval(500, TimeUnit.MILLISECONDS) - .atMost(60, TimeUnit.SECONDS) - .untilAsserted(() -> assertThat(replica.getOpenConnections()).isPositive()); - } - } - - @Test - public void should_apply_node_filter() { - Set localNodes = new HashSet<>(); - for (Node node : SESSION_RULE.session().getMetadata().getNodes().values()) { - if (node.getDatacenter().equals(LOCAL_DC)) { - localNodes.add(node); - } - } - assertThat(localNodes.size()).isEqualTo(4); - // Pick a random node to exclude -- just ensure that it's not the default contact point since - // we assert 0 connections at the end of this test (the filter is not applied to contact - // points). - EndPoint ignoredEndPoint = firstNonDefaultContactPoint(localNodes); - - // Open a separate session with a filter - try (CqlSession session = - SessionUtils.newSession( - CCM_RULE, - SESSION_RULE.keyspace(), - null, - null, - node -> !node.getEndPoint().equals(ignoredEndPoint))) { - - // No routing information => should round-robin on white-listed nodes - SimpleStatement statement = SimpleStatement.newInstance("SELECT * FROM test.foo WHERE k = 1"); - for (int i = 0; i < 12; i++) { - ResultSet rs = session.execute(statement); - Node coordinator = rs.getExecutionInfo().getCoordinator(); - assertThat(coordinator.getEndPoint()).isNotEqualTo(ignoredEndPoint); - } - - assertThat(session.getMetadata().findNode(ignoredEndPoint)) - .hasValueSatisfying( - ignoredNode -> { - assertThat(ignoredNode.getOpenConnections()).isEqualTo(0); - }); - } - } - - private EndPoint firstNonDefaultContactPoint(Iterable nodes) { - for (Node localNode : nodes) { - EndPoint endPoint = localNode.getEndPoint(); - InetSocketAddress connectAddress = (InetSocketAddress) endPoint.resolve(); - if (!connectAddress.getAddress().getHostAddress().equals("127.0.0.1")) { - return endPoint; - } - } - fail("should have other nodes than the default contact point"); - return null; // never reached - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/loadbalancing/NodeTargetingIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/loadbalancing/NodeTargetingIT.java deleted file mode 100644 index f6a6176568a..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/loadbalancing/NodeTargetingIT.java +++ /dev/null @@ -1,144 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.datastax.oss.driver.core.loadbalancing; - -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.unavailable; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.when; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Fail.fail; -import static org.awaitility.Awaitility.await; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.NoNodeAvailableException; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeState; -import com.datastax.oss.driver.api.core.servererrors.UnavailableException; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.simulacron.common.cluster.ClusterSpec; -import com.datastax.oss.simulacron.common.codec.ConsistencyLevel; -import com.datastax.oss.simulacron.server.BoundNode; -import java.net.InetSocketAddress; -import java.util.concurrent.TimeUnit; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -public class NodeTargetingIT { - - private static final SimulacronRule SIMULACRON_RULE = - new SimulacronRule(ClusterSpec.builder().withNodes(5)); - - private static final SessionRule SESSION_RULE = - SessionRule.builder(SIMULACRON_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(SIMULACRON_RULE).around(SESSION_RULE); - - @Before - public void clear() { - SIMULACRON_RULE.cluster().clearLogs(); - SIMULACRON_RULE.cluster().clearPrimes(true); - SIMULACRON_RULE.cluster().node(4).stop(); - await() - .pollInterval(500, TimeUnit.MILLISECONDS) - .atMost(5, TimeUnit.SECONDS) - .until(() -> getNode(4).getState() == NodeState.DOWN); - } - - @Test - public void should_use_node_on_statement() { - for (int i = 0; i < 10; i++) { - int nodeIndex = i % 3 + 1; - Node node = getNode(nodeIndex); - - // given a statement with node explicitly set. - Statement statement = SimpleStatement.newInstance("select * system.local").setNode(node); - - // when statement is executed - ResultSet result = SESSION_RULE.session().execute(statement); - - // then the query should have been sent to the configured node. - assertThat(result.getExecutionInfo().getCoordinator()).isEqualTo(node); - } - } - - @Test - public void should_fail_if_node_fails_query() { - String query = "mock"; - SIMULACRON_RULE - .cluster() - .node(3) - .prime(when(query).then(unavailable(ConsistencyLevel.ALL, 1, 0))); - - // given a statement with a node configured to fail the given query. - Node node3 = getNode(3); - Statement statement = SimpleStatement.newInstance(query).setNode(node3); - // when statement is executed an error should be raised. - try { - SESSION_RULE.session().execute(statement); - fail("Should have thrown AllNodesFailedException"); - } catch (AllNodesFailedException e) { - assertThat(e.getAllErrors().size()).isEqualTo(1); - assertThat(e.getAllErrors().get(node3).get(0)).isInstanceOf(UnavailableException.class); - } - } - - @Test - public void should_fail_if_node_is_not_connected() { - // given a statement with node explicitly set that for which we have no active pool. - Node node4 = getNode(4); - - Statement statement = SimpleStatement.newInstance("select * system.local").setNode(node4); - try { - // when statement is executed - SESSION_RULE.session().execute(statement); - fail("Query should have failed"); - } catch (NoNodeAvailableException e) { - assertThat(e.getAllErrors()).isEmpty(); - } catch (AllNodesFailedException e) { - // its also possible that the query is tried. This can happen if the node was marked - // down, but not all connections have been closed yet. In this case, just verify that - // the expected host failed. - assertThat(e.getAllErrors().size()).isEqualTo(1); - assertThat(e.getAllErrors()).containsOnlyKeys(node4); - } - } - - private Node getNode(int id) { - BoundNode boundNode = SIMULACRON_RULE.cluster().node(id); - assertThat(boundNode).isNotNull(); - InetSocketAddress address = (InetSocketAddress) boundNode.getAddress(); - return SESSION_RULE - .session() - .getMetadata() - .findNode(address) - .orElseThrow( - () -> new AssertionError(String.format("Expected to find node %d in metadata", id))); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/loadbalancing/PerProfileLoadBalancingPolicyIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/loadbalancing/PerProfileLoadBalancingPolicyIT.java deleted file mode 100644 index 5113a8861b0..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/loadbalancing/PerProfileLoadBalancingPolicyIT.java +++ /dev/null @@ -1,145 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.loadbalancing; - -import static com.datastax.oss.driver.assertions.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.simulacron.common.cluster.ClusterSpec; -import java.util.Objects; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -public class PerProfileLoadBalancingPolicyIT { - - // 3 2-node DCs - private static final SimulacronRule SIMULACRON_RULE = - new SimulacronRule(ClusterSpec.builder().withNodes(2, 2, 2)); - - // default lb policy should consider dc1 local, profile1 dc3, profile2 empty. - private static final SessionRule SESSION_RULE = - SessionRule.builder(SIMULACRON_RULE) - .withConfigLoader( - SessionUtils.configLoaderBuilder() - .withString(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER, "dc1") - .startProfile("profile1") - .withString(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER, "dc3") - .startProfile("profile2") - .withString(DefaultDriverOption.REQUEST_CONSISTENCY, "ONE") - .build()) - .build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(SIMULACRON_RULE).around(SESSION_RULE); - - private static String QUERY_STRING = "select * from foo"; - private static final SimpleStatement QUERY = SimpleStatement.newInstance(QUERY_STRING); - - @Before - public void clear() { - SIMULACRON_RULE.cluster().clearLogs(); - } - - @BeforeClass - public static void setup() { - // sanity checks - DriverContext context = SESSION_RULE.session().getContext(); - DriverConfig config = context.getConfig(); - assertThat(config.getProfiles()).containsKeys("profile1", "profile2"); - - assertThat(context.getLoadBalancingPolicies()) - .hasSize(3) - .containsKeys(DriverExecutionProfile.DEFAULT_NAME, "profile1", "profile2"); - - LoadBalancingPolicy defaultPolicy = - context.getLoadBalancingPolicy(DriverExecutionProfile.DEFAULT_NAME); - LoadBalancingPolicy policy1 = context.getLoadBalancingPolicy("profile1"); - LoadBalancingPolicy policy2 = context.getLoadBalancingPolicy("profile2"); - - assertThat(defaultPolicy).isSameAs(policy2).isNotSameAs(policy1); - - for (Node node : SESSION_RULE.session().getMetadata().getNodes().values()) { - // if node is in dc2 it should be ignored, otherwise (dc1, dc3) it should be local. - NodeDistance expectedDistance = - Objects.equals(node.getDatacenter(), "dc2") ? NodeDistance.IGNORED : NodeDistance.LOCAL; - assertThat(node.getDistance()).isEqualTo(expectedDistance); - } - } - - @Test - public void should_use_policy_from_request_profile() { - // Since profile1 uses dc3 as localDC, only those nodes should receive these queries. - Statement statement = QUERY.setExecutionProfileName("profile1"); - for (int i = 0; i < 10; i++) { - ResultSet result = SESSION_RULE.session().execute(statement); - Node coordinator = result.getExecutionInfo().getCoordinator(); - assertThat(coordinator).isNotNull(); - assertThat(coordinator.getDatacenter()).isEqualTo("dc3"); - } - - assertQueryInDc(0, 0); - assertQueryInDc(1, 0); - assertQueryInDc(2, 5); - } - - @Test - public void should_use_policy_from_config_when_not_configured_in_request_profile() { - // Since profile2 does not define an lbp config, it should use default which uses dc1. - Statement statement = QUERY.setExecutionProfileName("profile2"); - for (int i = 0; i < 10; i++) { - ResultSet result = SESSION_RULE.session().execute(statement); - Node coordinator = result.getExecutionInfo().getCoordinator(); - assertThat(coordinator).isNotNull(); - assertThat(coordinator.getDatacenter()).isEqualTo("dc1"); - } - - assertQueryInDc(0, 5); - assertQueryInDc(1, 0); - assertQueryInDc(2, 0); - } - - private void assertQueryInDc(int dc, int expectedPerNode) { - for (int i = 0; i < 2; i++) { - assertThat( - SIMULACRON_RULE.cluster().dc(dc).node(i).getLogs().getQueryLogs().stream() - .filter(l -> l.getQuery().equals(QUERY_STRING))) - .as("Expected query count to be %d for dc %d", 5, i) - .hasSize(expectedPerNode); - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/ByteOrderedTokenIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/ByteOrderedTokenIT.java deleted file mode 100644 index 278bb106eda..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/ByteOrderedTokenIT.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.metadata; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.internal.core.metadata.token.ByteOrderedToken; -import java.time.Duration; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@BackendRequirement( - type = BackendType.CASSANDRA, - maxExclusive = "4.0-beta4", - description = - "Token allocation is not compatible with this partitioner, " - + "but is enabled by default in C* 4.0 (see CASSANDRA-7032 and CASSANDRA-13701)") -public class ByteOrderedTokenIT extends TokenITBase { - - private static final CustomCcmRule CCM_RULE = - CustomCcmRule.builder() - .withNodes(3) - .withCreateOption("-p ByteOrderedPartitioner") - .withCassandraConfiguration("range_request_timeout_in_ms", 45_000) - .withCassandraConfiguration("read_request_timeout_in_ms", 45_000) - .withCassandraConfiguration("write_request_timeout_in_ms", 45_000) - .withCassandraConfiguration("request_timeout_in_ms", 45_000) - .build(); - - private static final SessionRule SESSION_RULE = - SessionRule.builder(CCM_RULE) - .withKeyspace(false) - .withConfigLoader( - SessionUtils.configLoaderBuilder() - .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(30)) - .build()) - .build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - public ByteOrderedTokenIT() { - super("org.apache.cassandra.dht.ByteOrderedPartitioner", ByteOrderedToken.class, false); - } - - @Override - protected CqlSession session() { - return SESSION_RULE.session(); - } - - @BeforeClass - public static void createSchema() { - TokenITBase.createSchema(SESSION_RULE.session()); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/ByteOrderedTokenVnodesIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/ByteOrderedTokenVnodesIT.java deleted file mode 100644 index 4d7cf8ad631..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/ByteOrderedTokenVnodesIT.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.metadata; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.internal.core.metadata.token.ByteOrderedToken; -import java.time.Duration; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@BackendRequirement( - type = BackendType.CASSANDRA, - maxExclusive = "4.0-beta4", - description = - "Token allocation is not compatible with this partitioner, " - + "but is enabled by default in C* 4.0 (see CASSANDRA-7032 and CASSANDRA-13701)") -public class ByteOrderedTokenVnodesIT extends TokenITBase { - - private static final CustomCcmRule CCM_RULE = - CustomCcmRule.builder() - .withNodes(3) - .withCreateOption("-p ByteOrderedPartitioner") - .withCreateOption("--vnodes") - .withCassandraConfiguration("range_request_timeout_in_ms", 45_000) - .withCassandraConfiguration("read_request_timeout_in_ms", 45_000) - .withCassandraConfiguration("write_request_timeout_in_ms", 45_000) - .withCassandraConfiguration("request_timeout_in_ms", 45_000) - .build(); - - private static final SessionRule SESSION_RULE = - SessionRule.builder(CCM_RULE) - .withKeyspace(false) - .withConfigLoader( - SessionUtils.configLoaderBuilder() - .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(30)) - .build()) - .build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - public ByteOrderedTokenVnodesIT() { - super("org.apache.cassandra.dht.ByteOrderedPartitioner", ByteOrderedToken.class, true); - } - - @Override - protected CqlSession session() { - return SESSION_RULE.session(); - } - - @BeforeClass - public static void createSchema() { - TokenITBase.createSchema(SESSION_RULE.session()); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/CaseSensitiveUdtIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/CaseSensitiveUdtIT.java deleted file mode 100644 index f80b02207f8..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/CaseSensitiveUdtIT.java +++ /dev/null @@ -1,134 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.metadata; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.categories.ParallelizableTests; -import java.time.Duration; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -/** - * Checks that case-sensitive UDT names are properly handled in schema metadata. - * - *

    In Cassandra >= 2.2, whenever a UDT is referenced in a system table (e.g. {@code - * system_schema.columns.type}, it uses the CQL form. This is in contrast to the UDT definition - * itself ({@code system_schema.types.type_name}), which uses the internal form. - * - * @see JAVA-2028 - */ -@Category(ParallelizableTests.class) -public class CaseSensitiveUdtIT { - - private static final CcmRule CCM_RULE = CcmRule.getInstance(); - - private static final SessionRule SESSION_RULE = - SessionRule.builder(CCM_RULE) - .withConfigLoader( - SessionUtils.configLoaderBuilder() - .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(30)) - .build()) - .build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - @Test - public void should_expose_metadata_with_correct_case() { - boolean supportsFunctions = CCM_RULE.getCassandraVersion().compareTo(Version.V2_2_0) >= 0; - - CqlSession session = SESSION_RULE.session(); - - session.execute("CREATE TYPE \"Address\"(street text)"); - - session.execute("CREATE TABLE user(id uuid PRIMARY KEY, address frozen<\"Address\">)"); - session.execute("CREATE TYPE t(a frozen<\"Address\">)"); - if (supportsFunctions) { - session.execute( - "CREATE FUNCTION eq(a \"Address\") " - + "CALLED ON NULL INPUT " - + "RETURNS \"Address\" " - + "LANGUAGE java " - + "AS $$return a;$$"); - session.execute( - "CREATE FUNCTION left(l \"Address\", r \"Address\") " - + "CALLED ON NULL INPUT " - + "RETURNS \"Address\" " - + "LANGUAGE java " - + "AS $$return l;$$"); - session.execute( - "CREATE AGGREGATE ag(\"Address\") " - + "SFUNC left " - + "STYPE \"Address\" " - + "INITCOND {street: 'foo'};"); - } - - KeyspaceMetadata keyspace = - session - .getMetadata() - .getKeyspace(SESSION_RULE.keyspace()) - .orElseThrow(() -> new AssertionError("Couldn't find rule's keyspace")); - - UserDefinedType addressType = - keyspace - .getUserDefinedType(CqlIdentifier.fromInternal("Address")) - .orElseThrow(() -> new AssertionError("Couldn't find UDT definition")); - - assertThat(keyspace.getTable("user")) - .hasValueSatisfying( - table -> - assertThat(table.getColumn("address")) - .hasValueSatisfying( - column -> assertThat(column.getType()).isEqualTo(addressType))); - - assertThat(keyspace.getUserDefinedType("t")) - .hasValueSatisfying(type -> assertThat(type.getFieldTypes()).containsExactly(addressType)); - - if (supportsFunctions) { - assertThat(keyspace.getFunction("eq", addressType)) - .hasValueSatisfying( - function -> { - assertThat(function.getSignature().getParameterTypes()) - .containsExactly(addressType); - assertThat(function.getReturnType()).isEqualTo(addressType); - }); - - assertThat(keyspace.getAggregate("ag", addressType)) - .hasValueSatisfying( - aggregate -> { - assertThat(aggregate.getSignature().getParameterTypes()) - .containsExactly(addressType); - assertThat(aggregate.getStateType()).isEqualTo(addressType); - assertThat(aggregate.getReturnType()).isEqualTo(addressType); - }); - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/DescribeIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/DescribeIT.java deleted file mode 100644 index 4d6c2a7a3b1..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/DescribeIT.java +++ /dev/null @@ -1,247 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.metadata; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; - -import com.datastax.dse.driver.internal.core.metadata.schema.DefaultDseKeyspaceMetadata; -import com.datastax.dse.driver.internal.core.metadata.schema.DefaultDseTableMetadata; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.ccm.SchemaChangeSynchronizer; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.internal.SerializationHelper; -import com.datastax.oss.driver.internal.core.metadata.schema.DefaultKeyspaceMetadata; -import com.datastax.oss.driver.internal.core.metadata.schema.DefaultTableMetadata; -import com.datastax.oss.driver.shaded.guava.common.base.Charsets; -import com.datastax.oss.driver.shaded.guava.common.base.Splitter; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.google.common.io.Files; -import java.io.File; -import java.io.IOException; -import java.net.URL; -import java.time.Duration; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.regex.Pattern; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@Category(ParallelizableTests.class) -public class DescribeIT { - - private static final Logger LOG = LoggerFactory.getLogger(DescribeIT.class); - - private static final CcmRule CCM_RULE = CcmRule.getInstance(); - - private static final SessionRule SESSION_RULE = - SessionRule.builder(CCM_RULE) - .withConfigLoader( - SessionUtils.configLoaderBuilder() - .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(30)) - // disable debouncer to speed up test. - .withDuration(DefaultDriverOption.METADATA_SCHEMA_WINDOW, Duration.ofSeconds(0)) - .build()) - .build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - private static final Splitter STATEMENT_SPLITTER = - // Use a regex to ignore semicolons in function scripts - Splitter.on(Pattern.compile(";\n")).omitEmptyStrings(); - - private static Version serverVersion; - - private static final Map scriptFileForBackend = - ImmutableMap.builder() - .put(BackendType.CASSANDRA, "DescribeIT/oss") - .put(BackendType.DSE, "DescribeIT/dse") - .put(BackendType.HCD, "DescribeIT/hcd") - .build(); - - private static File scriptFile; - private static String scriptContents; - - @BeforeClass - public static void setup() { - serverVersion = - CCM_RULE.isDistributionOf(BackendType.CASSANDRA) - ? CCM_RULE.getCassandraVersion().nextStable() - : CCM_RULE.getDistributionVersion().nextStable(); - - scriptFile = getScriptFile(); - assertThat(scriptFile).exists(); - assertThat(scriptFile).isFile(); - assertThat(scriptFile).canRead(); - scriptContents = getScriptContents(); - - setupDatabase(); - } - - @Test - public void describe_output_should_match_creation_script() throws Exception { - - CqlSession session = SESSION_RULE.session(); - - KeyspaceMetadata keyspaceMetadata = - session.getMetadata().getKeyspace(SESSION_RULE.keyspace()).orElseThrow(AssertionError::new); - String describeOutput = keyspaceMetadata.describeWithChildren(true).trim(); - - assertThat(describeOutput) - .as( - "Describe output doesn't match create statements, " - + "maybe you need to add a new script in integration-tests/src/test/resources. " - + "Server version = %s %s, used script = %s", - CCM_RULE.getDistribution(), serverVersion, scriptFile) - .isEqualTo(scriptContents); - } - - private boolean atLeastVersion(Version dseVersion, Version ossVersion) { - Version comparison = CCM_RULE.isDistributionOf(BackendType.DSE) ? dseVersion : ossVersion; - return serverVersion.compareTo(comparison) >= 0; - } - - @Test - public void keyspace_metadata_should_be_serializable() throws Exception { - - CqlSession session = SESSION_RULE.session(); - - Optional ksOption = - session.getMetadata().getKeyspace(session.getKeyspace().get()); - assertThat(ksOption).isPresent(); - KeyspaceMetadata ks = ksOption.get(); - assertThat(ks).isInstanceOfAny(DefaultKeyspaceMetadata.class, DefaultDseKeyspaceMetadata.class); - - /* Validate that the keyspace metadata is fully populated */ - assertThat(ks.getUserDefinedTypes()).isNotEmpty(); - assertThat(ks.getTables()).isNotEmpty(); - if (atLeastVersion(Version.V5_0_0, Version.V3_0_0)) { - assertThat(ks.getViews()).isNotEmpty(); - } - if (atLeastVersion(Version.V5_0_0, Version.V2_2_0)) { - assertThat(ks.getFunctions()).isNotEmpty(); - assertThat(ks.getAggregates()).isNotEmpty(); - } - - /* A table with an explicit compound primary key + specified clustering column */ - Optional tableOption = ks.getTable("rank_by_year_and_name"); - assertThat(tableOption).isPresent(); - TableMetadata table = tableOption.get(); - assertThat(table).isInstanceOfAny(DefaultTableMetadata.class, DefaultDseTableMetadata.class); - - /* Validate that the table metadata is fully populated */ - assertThat(table.getPartitionKey()).isNotEmpty(); - assertThat(table.getClusteringColumns()).isNotEmpty(); - assertThat(table.getColumns()).isNotEmpty(); - assertThat(table.getOptions()).isNotEmpty(); - assertThat(table.getIndexes()).isNotEmpty(); - - KeyspaceMetadata deserialized = SerializationHelper.serializeAndDeserialize(ks); - assertThat(deserialized).isEqualTo(ks); - } - - /** - * Find a creation script in our test resources that matches the current server version. If we - * don't have an exact match, use the closest version below it. - */ - private static File getScriptFile() { - URL logbackTestUrl = DescribeIT.class.getResource("/logback-test.xml"); - if (logbackTestUrl == null || logbackTestUrl.getFile().isEmpty()) { - fail( - "Expected to use logback-test.xml to determine location of " - + "target/test-classes, but got URL %s", - logbackTestUrl); - } - File resourcesDir = new File(logbackTestUrl.getFile()).getParentFile(); - File scriptsDir = new File(resourcesDir, scriptFileForBackend.get(CCM_RULE.getDistribution())); - LOG.debug("Looking for a matching script in directory {}", scriptsDir); - - File[] candidates = scriptsDir.listFiles(); - assertThat(candidates).isNotNull(); - - File bestFile = null; - Version bestVersion = null; - for (File candidate : candidates) { - String fileName = candidate.getName(); - String candidateVersionString = fileName.substring(0, fileName.lastIndexOf('.')); - Version candidateVersion = Version.parse(candidateVersionString); - LOG.debug("Considering {}, which resolves to version {}", fileName, candidateVersion); - if (candidateVersion.compareTo(serverVersion) > 0) { - LOG.debug("too high, discarding"); - } else if (bestVersion != null && bestVersion.compareTo(candidateVersion) >= 0) { - LOG.debug("not higher than {}, discarding", bestVersion); - } else { - LOG.debug("best so far"); - bestVersion = candidateVersion; - bestFile = candidate; - } - } - assertThat(bestFile) - .as("Could not find create script with version <= %s in %s", serverVersion, scriptsDir) - .isNotNull(); - - LOG.info("Using {} to test against {} {}", bestFile, CCM_RULE.getDistribution(), serverVersion); - return bestFile; - } - - private static String getScriptContents() { - - try { - - return Files.asCharSource(scriptFile, Charsets.UTF_8) - .read() - .trim() - .replaceAll("ks_0", SESSION_RULE.keyspace().asCql(true)); - } catch (IOException ioe) { - fail("Exception reading script file " + scriptFile, ioe); - return null; - } - } - - private static void setupDatabase() { - List statements = STATEMENT_SPLITTER.splitToList(scriptContents); - SchemaChangeSynchronizer.withLock( - () -> { - // Skip the first statement (CREATE KEYSPACE), we already have a keyspace - for (int i = 1; i < statements.size(); i++) { - String statement = statements.get(i); - try { - SESSION_RULE.session().execute(statement); - } catch (Exception e) { - fail("Error executing statement %s (%s)", statement, e); - } - } - }); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/MetadataIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/MetadataIT.java deleted file mode 100644 index 1b1aed4b3de..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/MetadataIT.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.metadata; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.metadata.Metadata; -import com.datastax.oss.driver.api.testinfra.ccm.CcmBridge; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -public class MetadataIT { - - private CcmRule ccmRule = CcmRule.getInstance(); - - private SessionRule sessionRule = SessionRule.builder(ccmRule).build(); - - @Rule public TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); - - @Test - public void should_expose_cluster_name() { - Metadata metadata = sessionRule.session().getMetadata(); - assertThat(metadata.getClusterName()).hasValue(CcmBridge.CLUSTER_NAME); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/Murmur3TokenIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/Murmur3TokenIT.java deleted file mode 100644 index a119c503a20..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/Murmur3TokenIT.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.metadata; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.internal.core.metadata.token.Murmur3Token; -import java.time.Duration; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -public class Murmur3TokenIT extends TokenITBase { - - private static final CustomCcmRule CCM_RULE = - CustomCcmRule.builder() - .withNodes(3) - .withCassandraConfiguration("range_request_timeout_in_ms", 45_000) - .withCassandraConfiguration("read_request_timeout_in_ms", 45_000) - .withCassandraConfiguration("write_request_timeout_in_ms", 45_000) - .withCassandraConfiguration("request_timeout_in_ms", 45_000) - .build(); - - private static final SessionRule SESSION_RULE = - SessionRule.builder(CCM_RULE) - .withKeyspace(false) - .withConfigLoader( - SessionUtils.configLoaderBuilder() - .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(30)) - .build()) - .build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - public Murmur3TokenIT() { - super("org.apache.cassandra.dht.Murmur3Partitioner", Murmur3Token.class, false); - } - - @Override - protected CqlSession session() { - return SESSION_RULE.session(); - } - - @BeforeClass - public static void createSchema() { - TokenITBase.createSchema(SESSION_RULE.session()); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/Murmur3TokenVnodesIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/Murmur3TokenVnodesIT.java deleted file mode 100644 index cb80abc0a3f..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/Murmur3TokenVnodesIT.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.metadata; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.internal.core.metadata.token.Murmur3Token; -import java.time.Duration; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@BackendRequirement( - type = BackendType.CASSANDRA, - maxExclusive = "4.0-beta4", - // TODO Re-enable when CASSANDRA-16364 is fixed - description = "TODO Re-enable when CASSANDRA-16364 is fixed") -public class Murmur3TokenVnodesIT extends TokenITBase { - - private static final CustomCcmRule CCM_RULE = - CustomCcmRule.builder() - .withNodes(3) - .withCreateOption("--vnodes") - .withCassandraConfiguration("range_request_timeout_in_ms", 45_000) - .withCassandraConfiguration("read_request_timeout_in_ms", 45_000) - .withCassandraConfiguration("write_request_timeout_in_ms", 45_000) - .withCassandraConfiguration("request_timeout_in_ms", 45_000) - .build(); - - private static final SessionRule SESSION_RULE = - SessionRule.builder(CCM_RULE) - .withKeyspace(false) - .withConfigLoader( - SessionUtils.configLoaderBuilder() - .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(30)) - .build()) - .build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - public Murmur3TokenVnodesIT() { - super("org.apache.cassandra.dht.Murmur3Partitioner", Murmur3Token.class, true); - } - - @Override - protected CqlSession session() { - return SESSION_RULE.session(); - } - - @BeforeClass - public static void createSchema() { - createSchema(SESSION_RULE.session()); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/NodeMetadataIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/NodeMetadataIT.java deleted file mode 100644 index 8f5680ff41a..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/NodeMetadataIT.java +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.metadata; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.awaitility.Awaitility.await; - -import com.datastax.dse.driver.api.core.metadata.DseNodeProperties; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeState; -import com.datastax.oss.driver.api.testinfra.ccm.CcmBridge; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.internal.core.context.EventBus; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.TopologyEvent; -import java.net.InetSocketAddress; -import java.util.Collection; -import java.util.Set; -import java.util.concurrent.TimeUnit; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -@Category(ParallelizableTests.class) -public class NodeMetadataIT { - - @Rule public CcmRule ccmRule = CcmRule.getInstance(); - - @Test - public void should_expose_node_metadata() { - try (CqlSession session = SessionUtils.newSession(ccmRule)) { - - Node node = getUniqueNode(session); - // Run a few basic checks given what we know about our test environment: - assertThat(node.getEndPoint()).isNotNull(); - InetSocketAddress connectAddress = (InetSocketAddress) node.getEndPoint().resolve(); - node.getBroadcastAddress() - .ifPresent( - broadcastAddress -> - assertThat(broadcastAddress.getAddress()).isEqualTo(connectAddress.getAddress())); - assertThat(node.getListenAddress().get().getAddress()).isEqualTo(connectAddress.getAddress()); - assertThat(node.getDatacenter()).isEqualTo("dc1"); - assertThat(node.getRack()).isEqualTo("r1"); - if (CcmBridge.isDistributionOf(BackendType.CASSANDRA)) { - // CcmBridge does not report accurate C* versions for other distributions (e.g. DSE), only - // approximated values - assertThat(node.getCassandraVersion()).isEqualTo(ccmRule.getCassandraVersion()); - } - assertThat(node.getState()).isSameAs(NodeState.UP); - assertThat(node.getDistance()).isSameAs(NodeDistance.LOCAL); - assertThat(node.getHostId()).isNotNull(); - assertThat(node.getSchemaVersion()).isNotNull(); - long upTime1 = node.getUpSinceMillis(); - assertThat(upTime1).isGreaterThan(-1); - - // Note: open connections and reconnection status are covered in NodeStateIT - - // Force the node down and back up to check that upSinceMillis gets updated - EventBus eventBus = ((InternalDriverContext) session.getContext()).getEventBus(); - eventBus.fire(TopologyEvent.forceDown(node.getBroadcastRpcAddress().get())); - await() - .pollInterval(500, TimeUnit.MILLISECONDS) - .atMost(60, TimeUnit.SECONDS) - .until(() -> node.getState() == NodeState.FORCED_DOWN); - assertThat(node.getUpSinceMillis()).isEqualTo(-1); - eventBus.fire(TopologyEvent.forceUp(node.getBroadcastRpcAddress().get())); - await() - .pollInterval(500, TimeUnit.MILLISECONDS) - .atMost(60, TimeUnit.SECONDS) - .until(() -> node.getState() == NodeState.UP); - assertThat(node.getUpSinceMillis()).isGreaterThan(upTime1); - } - } - - @Test - @BackendRequirement(type = BackendType.DSE, minInclusive = "5.1") - public void should_expose_dse_node_properties() { - try (CqlSession session = SessionUtils.newSession(ccmRule)) { - - Node node = getUniqueNode(session); - - // Basic checks as we want something that will work with a large range of DSE versions: - assertThat(node.getExtras()) - .containsKeys( - DseNodeProperties.DSE_VERSION, - DseNodeProperties.DSE_WORKLOADS, - DseNodeProperties.SERVER_ID); - assertThat(node.getExtras().get(DseNodeProperties.DSE_VERSION)) - .isEqualTo(ccmRule.getDistributionVersion()); - assertThat(node.getExtras().get(DseNodeProperties.SERVER_ID)).isInstanceOf(String.class); - assertThat(node.getExtras().get(DseNodeProperties.DSE_WORKLOADS)).isInstanceOf(Set.class); - } - } - - private static Node getUniqueNode(CqlSession session) { - Collection nodes = session.getMetadata().getNodes().values(); - assertThat(nodes).hasSize(1); - return nodes.iterator().next(); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/NodeStateIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/NodeStateIT.java deleted file mode 100644 index e468e0a10d7..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/NodeStateIT.java +++ /dev/null @@ -1,749 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.metadata; - -import static com.datastax.oss.driver.assertions.Assertions.assertThat; -import static com.datastax.oss.driver.assertions.Assertions.fail; -import static org.awaitility.Awaitility.await; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.reset; -import static org.mockito.Mockito.timeout; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyNoMoreInteractions; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.core.metadata.Metadata; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeState; -import com.datastax.oss.driver.api.core.metadata.NodeStateListener; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.DefaultEndPoint; -import com.datastax.oss.driver.internal.core.metadata.DefaultNode; -import com.datastax.oss.driver.internal.core.metadata.NodeStateEvent; -import com.datastax.oss.driver.internal.core.metadata.TopologyEvent; -import com.datastax.oss.simulacron.common.cluster.ClusterSpec; -import com.datastax.oss.simulacron.common.cluster.NodeConnectionReport; -import com.datastax.oss.simulacron.common.stubbing.CloseType; -import com.datastax.oss.simulacron.server.BoundNode; -import com.datastax.oss.simulacron.server.RejectScope; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.net.ServerSocket; -import java.net.SocketAddress; -import java.time.Duration; -import java.util.Iterator; -import java.util.Map; -import java.util.Queue; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.ConcurrentLinkedQueue; -import java.util.concurrent.CopyOnWriteArraySet; -import java.util.concurrent.LinkedBlockingDeque; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; -import org.junit.After; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; -import org.junit.runner.RunWith; -import org.mockito.ArgumentCaptor; -import org.mockito.Captor; -import org.mockito.InOrder; -import org.mockito.junit.MockitoJUnitRunner; - -@Category(ParallelizableTests.class) -@RunWith(MockitoJUnitRunner.class) -public class NodeStateIT { - - private SimulacronRule simulacron = new SimulacronRule(ClusterSpec.builder().withNodes(2)); - - private NodeStateListener nodeStateListener = mock(NodeStateListener.class); - private InOrder inOrder; - - private SessionRule sessionRule = - SessionRule.builder(simulacron) - .withConfigLoader( - SessionUtils.configLoaderBuilder() - .withInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE, 2) - .withDuration(DefaultDriverOption.RECONNECTION_MAX_DELAY, Duration.ofSeconds(1)) - .withClass( - DefaultDriverOption.LOAD_BALANCING_POLICY_CLASS, - NodeStateIT.ConfigurableIgnoresPolicy.class) - .build()) - .withNodeStateListener(nodeStateListener) - .build(); - - @Rule public TestRule chain = RuleChain.outerRule(simulacron).around(sessionRule); - - private @Captor ArgumentCaptor nodeCaptor; - - private InternalDriverContext driverContext; - private ConfigurableIgnoresPolicy defaultLoadBalancingPolicy; - private final BlockingQueue stateEvents = new LinkedBlockingDeque<>(); - - private BoundNode simulacronControlNode; - private BoundNode simulacronRegularNode; - private DefaultNode metadataControlNode; - private DefaultNode metadataRegularNode; - - @Before - public void setup() { - inOrder = inOrder(nodeStateListener); - - AtomicBoolean nonInitialEvent = new AtomicBoolean(false); - driverContext = (InternalDriverContext) sessionRule.session().getContext(); - driverContext - .getEventBus() - .register( - NodeStateEvent.class, - (e) -> { - // Skip transition from unknown to up if we haven't received any other events, - // these may just be the initial events that have typically fired by now, but - // may not have depending on timing. - if (!nonInitialEvent.get() - && e.oldState == NodeState.UNKNOWN - && e.newState == NodeState.UP) { - return; - } - nonInitialEvent.set(true); - stateEvents.add(e); - }); - - defaultLoadBalancingPolicy = - (ConfigurableIgnoresPolicy) - driverContext.getLoadBalancingPolicy(DriverExecutionProfile.DEFAULT_NAME); - - // Sanity check: the driver should have connected to simulacron - await() - .alias("Connections established") - .pollInterval(500, TimeUnit.MILLISECONDS) - .until( - () -> - // 1 control connection + 2 pooled connections per node - simulacron.cluster().getActiveConnections() == 5); - - // Find out which node is the control node, and identify the corresponding Simulacron and driver - // metadata objects. - simulacronControlNode = simulacronRegularNode = null; - for (BoundNode boundNode : simulacron.cluster().getNodes()) { - if (boundNode.getActiveConnections() == 3) { - simulacronControlNode = boundNode; - } else { - simulacronRegularNode = boundNode; - } - } - assertThat(simulacronControlNode).isNotNull(); - assertThat(simulacronRegularNode).isNotNull(); - - Metadata metadata = sessionRule.session().getMetadata(); - metadataControlNode = - (DefaultNode) - metadata - .findNode(simulacronControlNode.inetSocketAddress()) - .orElseThrow(AssertionError::new); - metadataRegularNode = - (DefaultNode) - metadata - .findNode(simulacronRegularNode.inetSocketAddress()) - .orElseThrow(AssertionError::new); - - // SessionRule uses all nodes as contact points, so we only get onUp notifications for them (no - // onAdd) - inOrder.verify(nodeStateListener, timeout(500)).onUp(metadataControlNode); - inOrder.verify(nodeStateListener, timeout(500)).onUp(metadataRegularNode); - } - - @After - public void teardown() { - reset(nodeStateListener); - } - - @Test - public void should_report_connections_for_healthy_nodes() { - await() - .alias("Node metadata up-to-date") - .pollInterval(500, TimeUnit.MILLISECONDS) - .untilAsserted( - () -> { - assertThat(metadataControlNode).isUp().hasOpenConnections(3).isNotReconnecting(); - assertThat(metadataRegularNode).isUp().hasOpenConnections(2).isNotReconnecting(); - }); - } - - @Test - public void should_keep_regular_node_up_when_still_one_connection() { - simulacronRegularNode.rejectConnections(0, RejectScope.UNBIND); - NodeConnectionReport report = simulacronRegularNode.getConnections(); - simulacron.cluster().closeConnection(report.getConnections().get(0), CloseType.DISCONNECT); - - await() - .alias("Reconnection started") - .pollInterval(500, TimeUnit.MILLISECONDS) - .untilAsserted( - () -> assertThat(metadataRegularNode).isUp().hasOpenConnections(1).isReconnecting()); - inOrder.verify(nodeStateListener, never()).onDown(metadataRegularNode); - } - - @Test - public void should_mark_regular_node_down_when_no_more_connections() { - simulacronRegularNode.stop(); - - await() - .alias("Node going down") - .pollInterval(500, TimeUnit.MILLISECONDS) - .untilAsserted( - () -> assertThat(metadataRegularNode).isDown().hasOpenConnections(0).isReconnecting()); - - expect(NodeStateEvent.changed(NodeState.UP, NodeState.DOWN, metadataRegularNode)); - inOrder.verify(nodeStateListener, timeout(500)).onDown(metadataRegularNode); - } - - @Test - public void should_mark_control_node_down_when_control_connection_is_last_connection_and_dies() { - simulacronControlNode.rejectConnections(0, RejectScope.UNBIND); - - // Identify the control connection and close the two other ones - SocketAddress controlAddress = driverContext.getControlConnection().channel().localAddress(); - NodeConnectionReport report = simulacronControlNode.getConnections(); - for (SocketAddress address : report.getConnections()) { - if (!address.equals(controlAddress)) { - simulacron.cluster().closeConnection(address, CloseType.DISCONNECT); - } - } - await() - .alias("Control node lost its non-control connections") - .pollInterval(500, TimeUnit.MILLISECONDS) - .untilAsserted( - () -> assertThat(metadataControlNode).isUp().hasOpenConnections(1).isReconnecting()); - inOrder.verify(nodeStateListener, never()).onDown(metadataRegularNode); - - simulacron.cluster().closeConnection(controlAddress, CloseType.DISCONNECT); - await() - .alias("Control node going down") - .pollInterval(500, TimeUnit.MILLISECONDS) - .untilAsserted( - () -> assertThat(metadataControlNode).isDown().hasOpenConnections(0).isReconnecting()); - inOrder.verify(nodeStateListener, timeout(500)).onDown(metadataControlNode); - - expect(NodeStateEvent.changed(NodeState.UP, NodeState.DOWN, metadataControlNode)); - } - - @Test - public void should_bring_node_back_up_when_reconnection_succeeds() { - simulacronRegularNode.stop(); - - await() - .alias("Node going down") - .pollInterval(500, TimeUnit.MILLISECONDS) - .untilAsserted( - () -> assertThat(metadataRegularNode).isDown().hasOpenConnections(0).isReconnecting()); - inOrder.verify(nodeStateListener, timeout(500)).onDown(metadataRegularNode); - - simulacronRegularNode.acceptConnections(); - - await() - .alias("Connections re-established") - .pollInterval(500, TimeUnit.MILLISECONDS) - .untilAsserted( - () -> assertThat(metadataRegularNode).isUp().hasOpenConnections(2).isNotReconnecting()); - inOrder.verify(nodeStateListener, timeout(500)).onUp(metadataRegularNode); - - expect( - NodeStateEvent.changed(NodeState.UP, NodeState.DOWN, metadataRegularNode), - NodeStateEvent.changed(NodeState.DOWN, NodeState.UP, metadataRegularNode)); - } - - @Test - public void should_apply_up_and_down_topology_events_when_ignored() { - defaultLoadBalancingPolicy.ignore(metadataRegularNode); - - await() - .alias("Driver closed all connections to ignored node") - .pollInterval(500, TimeUnit.MILLISECONDS) - .untilAsserted( - () -> - assertThat(metadataRegularNode) - .isUp() - .isIgnored() - .hasOpenConnections(0) - .isNotReconnecting()); - - driverContext - .getEventBus() - .fire(TopologyEvent.suggestDown(metadataRegularNode.getBroadcastRpcAddress().get())); - await() - .alias("SUGGEST_DOWN event applied") - .pollInterval(500, TimeUnit.MILLISECONDS) - .untilAsserted( - () -> - assertThat(metadataRegularNode) - .isDown() - .isIgnored() - .hasOpenConnections(0) - .isNotReconnecting()); - inOrder.verify(nodeStateListener, timeout(500)).onDown(metadataRegularNode); - - driverContext - .getEventBus() - .fire(TopologyEvent.suggestUp(metadataRegularNode.getBroadcastRpcAddress().get())); - await() - .alias("SUGGEST_UP event applied") - .pollInterval(500, TimeUnit.MILLISECONDS) - .untilAsserted( - () -> - assertThat(metadataRegularNode) - .isUp() - .isIgnored() - .hasOpenConnections(0) - .isNotReconnecting()); - inOrder.verify(nodeStateListener, timeout(500)).onUp(metadataRegularNode); - - defaultLoadBalancingPolicy.stopIgnoring(metadataRegularNode); - } - - @Test - public void should_ignore_down_topology_event_when_still_connected() throws InterruptedException { - driverContext - .getEventBus() - .fire(TopologyEvent.suggestDown(metadataRegularNode.getBroadcastRpcAddress().get())); - TimeUnit.MILLISECONDS.sleep(500); - assertThat(metadataRegularNode).isUp().hasOpenConnections(2).isNotReconnecting(); - } - - @Test - public void should_force_immediate_reconnection_when_up_topology_event() - throws InterruptedException { - // This test requires a longer reconnection interval, so create a separate driver instance - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withDuration(DefaultDriverOption.RECONNECTION_BASE_DELAY, Duration.ofHours(1)) - .withDuration(DefaultDriverOption.RECONNECTION_MAX_DELAY, Duration.ofHours(1)) - .build(); - NodeStateListener localNodeStateListener = mock(NodeStateListener.class); - try (CqlSession session = - SessionUtils.newSession(simulacron, null, localNodeStateListener, null, null, loader)) { - - BoundNode localSimulacronNode = simulacron.cluster().getNodes().iterator().next(); - assertThat(localSimulacronNode).isNotNull(); - - DefaultNode localMetadataNode = - (DefaultNode) - session - .getMetadata() - .findNode(localSimulacronNode.inetSocketAddress()) - .orElseThrow(AssertionError::new); - // UP fired a first time as part of the init process - verify(localNodeStateListener, timeout(500)).onUp(localMetadataNode); - - localSimulacronNode.stop(); - - await() - .alias("Node going down") - .pollInterval(500, TimeUnit.MILLISECONDS) - .untilAsserted( - () -> assertThat(localMetadataNode).isDown().hasOpenConnections(0).isReconnecting()); - verify(localNodeStateListener, timeout(500)).onDown(localMetadataNode); - - expect(NodeStateEvent.changed(NodeState.UP, NodeState.DOWN, localMetadataNode)); - - localSimulacronNode.acceptConnections(); - ((InternalDriverContext) session.getContext()) - .getEventBus() - .fire(TopologyEvent.suggestUp(localMetadataNode.getBroadcastRpcAddress().get())); - - await() - .alias("Node coming back up") - .pollInterval(500, TimeUnit.MILLISECONDS) - .untilAsserted(() -> assertThat(localMetadataNode).isUp().isNotReconnecting()); - verify(localNodeStateListener, timeout(500).times(2)).onUp(localMetadataNode); - - expect(NodeStateEvent.changed(NodeState.DOWN, NodeState.UP, localMetadataNode)); - } - } - - @Test - public void should_force_down_when_not_ignored() throws InterruptedException { - driverContext - .getEventBus() - .fire(TopologyEvent.forceDown(metadataRegularNode.getBroadcastRpcAddress().get())); - await() - .alias("Node forced down") - .pollInterval(500, TimeUnit.MILLISECONDS) - .untilAsserted( - () -> - assertThat(metadataRegularNode) - .isForcedDown() - .hasOpenConnections(0) - .isNotReconnecting()); - inOrder.verify(nodeStateListener, timeout(500)).onDown(metadataRegularNode); - - // Should ignore up/down topology events while forced down - driverContext - .getEventBus() - .fire(TopologyEvent.suggestUp(metadataRegularNode.getBroadcastRpcAddress().get())); - TimeUnit.MILLISECONDS.sleep(500); - assertThat(metadataRegularNode).isForcedDown(); - - driverContext - .getEventBus() - .fire(TopologyEvent.suggestDown(metadataRegularNode.getBroadcastRpcAddress().get())); - TimeUnit.MILLISECONDS.sleep(500); - assertThat(metadataRegularNode).isForcedDown(); - - // Should only come back up on a FORCE_UP event - driverContext - .getEventBus() - .fire(TopologyEvent.forceUp(metadataRegularNode.getBroadcastRpcAddress().get())); - await() - .alias("Node forced back up") - .pollInterval(500, TimeUnit.MILLISECONDS) - .untilAsserted( - () -> assertThat(metadataRegularNode).isUp().hasOpenConnections(2).isNotReconnecting()); - inOrder.verify(nodeStateListener, timeout(500)).onUp(metadataRegularNode); - } - - @Test - public void should_force_down_when_ignored() throws InterruptedException { - defaultLoadBalancingPolicy.ignore(metadataRegularNode); - - driverContext - .getEventBus() - .fire(TopologyEvent.forceDown(metadataRegularNode.getBroadcastRpcAddress().get())); - await() - .alias("Node forced down") - .pollInterval(500, TimeUnit.MILLISECONDS) - .untilAsserted( - () -> - assertThat(metadataRegularNode) - .isForcedDown() - .hasOpenConnections(0) - .isNotReconnecting()); - inOrder.verify(nodeStateListener, timeout(500)).onDown(metadataRegularNode); - - // Should ignore up/down topology events while forced down - driverContext - .getEventBus() - .fire(TopologyEvent.suggestUp(metadataRegularNode.getBroadcastRpcAddress().get())); - TimeUnit.MILLISECONDS.sleep(500); - assertThat(metadataRegularNode).isForcedDown(); - - driverContext - .getEventBus() - .fire(TopologyEvent.suggestDown(metadataRegularNode.getBroadcastRpcAddress().get())); - TimeUnit.MILLISECONDS.sleep(500); - assertThat(metadataRegularNode).isForcedDown(); - - // Should only come back up on a FORCE_UP event, will not reopen connections since it is still - // ignored - driverContext - .getEventBus() - .fire(TopologyEvent.forceUp(metadataRegularNode.getBroadcastRpcAddress().get())); - await() - .alias("Node forced back up") - .pollInterval(500, TimeUnit.MILLISECONDS) - .untilAsserted( - () -> - assertThat(metadataRegularNode) - .isUp() - .isIgnored() - .hasOpenConnections(0) - .isNotReconnecting()); - inOrder.verify(nodeStateListener, timeout(500)).onUp(metadataRegularNode); - - defaultLoadBalancingPolicy.stopIgnoring(metadataRegularNode); - } - - @Test - public void should_signal_non_contact_points_as_added() { - // Since we need to observe the behavior of non-contact points, build a dedicated session with - // just one contact point. - Iterator contactPoints = simulacron.getContactPoints().iterator(); - EndPoint endPoint1 = contactPoints.next(); - EndPoint endPoint2 = contactPoints.next(); - NodeStateListener localNodeStateListener = mock(NodeStateListener.class); - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withDuration(DefaultDriverOption.RECONNECTION_BASE_DELAY, Duration.ofHours(1)) - .withDuration(DefaultDriverOption.RECONNECTION_MAX_DELAY, Duration.ofHours(1)) - .withInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE, 0) - .build(); - try (CqlSession localSession = - (CqlSession) - SessionUtils.baseBuilder() - .addContactEndPoint(endPoint1) - .withNodeStateListener(localNodeStateListener) - .withConfigLoader(loader) - .build()) { - - Metadata metadata = localSession.getMetadata(); - Node localMetadataNode1 = metadata.findNode(endPoint1).orElseThrow(AssertionError::new); - Node localMetadataNode2 = metadata.findNode(endPoint2).orElseThrow(AssertionError::new); - - // Successful contact point goes to up directly - verify(localNodeStateListener, timeout(500)).onUp(localMetadataNode1); - // Non-contact point only added since we don't have a connection or events for it yet - verify(localNodeStateListener, timeout(500)).onAdd(localMetadataNode2); - } - } - - @Test - public void should_remove_invalid_contact_point() { - - Iterator contactPoints = simulacron.getContactPoints().iterator(); - EndPoint endPoint1 = contactPoints.next(); - EndPoint endPoint2 = contactPoints.next(); - NodeStateListener localNodeStateListener = mock(NodeStateListener.class); - - // Initialize the driver with 1 wrong address and 1 valid address - EndPoint wrongContactPoint = withUnusedPort(endPoint1); - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withDuration(DefaultDriverOption.RECONNECTION_BASE_DELAY, Duration.ofHours(1)) - .withDuration(DefaultDriverOption.RECONNECTION_MAX_DELAY, Duration.ofHours(1)) - .build(); - try (CqlSession localSession = - (CqlSession) - SessionUtils.baseBuilder() - .addContactEndPoint(endPoint1) - .addContactEndPoint(wrongContactPoint) - .withNodeStateListener(localNodeStateListener) - .withConfigLoader(loader) - .build()) { - - Metadata metadata = localSession.getMetadata(); - assertThat(metadata.findNode(wrongContactPoint)).isEmpty(); - Node localMetadataNode1 = metadata.findNode(endPoint1).orElseThrow(AssertionError::new); - Node localMetadataNode2 = metadata.findNode(endPoint2).orElseThrow(AssertionError::new); - - // The order of the calls is not deterministic because contact points are shuffled, but it - // does not matter here since Mockito.verify does not enforce order. - verify(localNodeStateListener, timeout(500)).onRemove(nodeCaptor.capture()); - assertThat(nodeCaptor.getValue().getEndPoint()).isEqualTo(wrongContactPoint); - verify(localNodeStateListener, timeout(500)).onUp(localMetadataNode1); - verify(localNodeStateListener, timeout(500)).onAdd(localMetadataNode2); - - // Note: there might be an additional onDown for wrongContactPoint if it was hit first at - // init. This is hard to test since the node was removed later, so we simply don't call - // verifyNoMoreInteractions. - } - } - - @Test - public void should_mark_unreachable_contact_point_down() { - // This time we connect with two valid contact points, but is unresponsive, it should be marked - // down - Iterator simulacronNodes = simulacron.cluster().getNodes().iterator(); - BoundNode localSimulacronNode1 = simulacronNodes.next(); - BoundNode localSimulacronNode2 = simulacronNodes.next(); - - InetSocketAddress address1 = localSimulacronNode1.inetSocketAddress(); - InetSocketAddress address2 = localSimulacronNode2.inetSocketAddress(); - - NodeStateListener localNodeStateListener = mock(NodeStateListener.class); - - localSimulacronNode2.stop(); - try { - // Since contact points are shuffled, we have a 50% chance that our bad contact point will be - // hit first. So we retry the scenario a few times if needed. - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withDuration(DefaultDriverOption.RECONNECTION_BASE_DELAY, Duration.ofHours(1)) - .withDuration(DefaultDriverOption.RECONNECTION_MAX_DELAY, Duration.ofHours(1)) - .build(); - for (int i = 0; i < 10; i++) { - try (CqlSession localSession = - (CqlSession) - SessionUtils.baseBuilder() - .addContactPoint(address1) - .addContactPoint(address2) - .withNodeStateListener(localNodeStateListener) - .withConfigLoader(loader) - .build()) { - - Metadata metadata = localSession.getMetadata(); - Node localMetadataNode1 = metadata.findNode(address1).orElseThrow(AssertionError::new); - Node localMetadataNode2 = metadata.findNode(address2).orElseThrow(AssertionError::new); - if (localMetadataNode2.getState() == NodeState.DOWN) { - // Stopped node was tried first and marked down, that's our target scenario - verify(localNodeStateListener, timeout(500)).onDown(localMetadataNode2); - verify(localNodeStateListener, timeout(500)).onUp(localMetadataNode1); - verify(localNodeStateListener, timeout(500)).onSessionReady(localSession); - verifyNoMoreInteractions(localNodeStateListener); - return; - } else { - // Stopped node was not tried - assertThat(localMetadataNode2).isUnknown(); - verify(localNodeStateListener, timeout(500)).onUp(localMetadataNode1); - verifyNoMoreInteractions(localNodeStateListener); - } - } - reset(localNodeStateListener); - } - fail("Couldn't get the driver to try stopped node first (tried 5 times)"); - } finally { - localSimulacronNode2.acceptConnections(); - } - } - - private void expect(NodeStateEvent... expectedEvents) { - for (NodeStateEvent expected : expectedEvents) { - try { - NodeStateEvent actual = stateEvents.poll(10, TimeUnit.SECONDS); - assertThat(actual).isNotNull(); - - // Don't compare events directly: some tests call this method with nodes obtained from - // another session instance, and nodes are compared by reference. - assertThat(actual.oldState).isEqualTo(expected.oldState); - assertThat(actual.newState).isEqualTo(expected.newState); - assertThat(actual.node.getHostId()).isEqualTo(expected.node.getHostId()); - } catch (InterruptedException e) { - fail("Interrupted while waiting for event"); - } - } - } - - // Generates an endpoint that is not the connect address of one of the nodes in the cluster - private EndPoint withUnusedPort(EndPoint endPoint) { - InetSocketAddress address = (InetSocketAddress) endPoint.resolve(); - return new DefaultEndPoint(new InetSocketAddress(address.getAddress(), findAvailablePort())); - } - - /** - * Finds an available port in the ephemeral range. This is loosely inspired by Apache MINA's - * AvailablePortFinder. - */ - private static synchronized int findAvailablePort() throws RuntimeException { - // let the system pick an ephemeral port - try (ServerSocket ss = new ServerSocket(0)) { - ss.setReuseAddress(true); - return ss.getLocalPort(); - } catch (IOException e) { - throw new AssertionError(e); - } - } - - /** - * A load balancing policy that can be told to ignore a node temporarily (the rest of the - * implementation uses a simple round-robin, non DC-aware shuffle). - */ - public static class ConfigurableIgnoresPolicy implements LoadBalancingPolicy { - - private final CopyOnWriteArraySet liveNodes = new CopyOnWriteArraySet<>(); - private final AtomicInteger offset = new AtomicInteger(); - private final Set ignoredNodes = new CopyOnWriteArraySet<>(); - - private volatile DistanceReporter distanceReporter; - - public ConfigurableIgnoresPolicy( - @SuppressWarnings("unused") DriverContext context, - @SuppressWarnings("unused") String profileName) { - // nothing to do - } - - @Override - public void init(@NonNull Map nodes, @NonNull DistanceReporter distanceReporter) { - this.distanceReporter = distanceReporter; - for (Node node : nodes.values()) { - liveNodes.add(node); - distanceReporter.setDistance(node, NodeDistance.LOCAL); - } - } - - public void ignore(Node node) { - if (ignoredNodes.add(node)) { - liveNodes.remove(node); - distanceReporter.setDistance(node, NodeDistance.IGNORED); - } - } - - public void stopIgnoring(Node node) { - if (ignoredNodes.remove(node)) { - distanceReporter.setDistance(node, NodeDistance.LOCAL); - // There might be a short delay until the node's pool becomes usable, but clients know how - // to deal with that. - liveNodes.add(node); - } - } - - @NonNull - @Override - public Queue newQueryPlan(@NonNull Request request, @NonNull Session session) { - Object[] snapshot = liveNodes.toArray(); - Queue queryPlan = new ConcurrentLinkedQueue<>(); - int start = offset.getAndIncrement(); // Note: offset overflow won't be an issue in tests - for (int i = 0; i < snapshot.length; i++) { - queryPlan.add((Node) snapshot[(start + i) % snapshot.length]); - } - return queryPlan; - } - - @Override - public void onAdd(@NonNull Node node) { - if (ignoredNodes.contains(node)) { - distanceReporter.setDistance(node, NodeDistance.IGNORED); - } else { - // Setting to a non-ignored distance triggers the session to open a pool, which will in turn - // set the node UP when the first channel gets opened. - distanceReporter.setDistance(node, NodeDistance.LOCAL); - } - } - - @Override - public void onUp(@NonNull Node node) { - if (!ignoredNodes.contains(node)) { - liveNodes.add(node); - } - } - - @Override - public void onDown(@NonNull Node node) { - liveNodes.remove(node); - } - - @Override - public void onRemove(@NonNull Node node) { - liveNodes.remove(node); - } - - @Override - public void close() { - // nothing to do - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/RandomTokenIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/RandomTokenIT.java deleted file mode 100644 index 603783afb34..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/RandomTokenIT.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.metadata; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.internal.core.metadata.token.RandomToken; -import java.time.Duration; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -public class RandomTokenIT extends TokenITBase { - - private static final CustomCcmRule CCM_RULE = - CustomCcmRule.builder() - .withNodes(3) - .withCreateOption("-p RandomPartitioner") - .withCassandraConfiguration("range_request_timeout_in_ms", 45_000) - .withCassandraConfiguration("read_request_timeout_in_ms", 45_000) - .withCassandraConfiguration("write_request_timeout_in_ms", 45_000) - .withCassandraConfiguration("request_timeout_in_ms", 45_000) - .build(); - - private static final SessionRule SESSION_RULE = - SessionRule.builder(CCM_RULE) - .withKeyspace(false) - .withConfigLoader( - SessionUtils.configLoaderBuilder() - .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(30)) - .build()) - .build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - public RandomTokenIT() { - super("org.apache.cassandra.dht.RandomPartitioner", RandomToken.class, false); - } - - @Override - protected CqlSession session() { - return SESSION_RULE.session(); - } - - @BeforeClass - public static void createSchema() { - TokenITBase.createSchema(SESSION_RULE.session()); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/RandomTokenVnodesIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/RandomTokenVnodesIT.java deleted file mode 100644 index 683b5651f98..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/RandomTokenVnodesIT.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.metadata; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.internal.core.metadata.token.RandomToken; -import java.time.Duration; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@BackendRequirement( - type = BackendType.CASSANDRA, - maxExclusive = "4.0-beta4", - // TODO Re-enable when CASSANDRA-16364 is fixed - description = "TODO Re-enable when CASSANDRA-16364 is fixed") -public class RandomTokenVnodesIT extends TokenITBase { - - private static final CustomCcmRule CCM_RULE = - CustomCcmRule.builder() - .withNodes(3) - .withCreateOption("-p RandomPartitioner") - .withCreateOption("--vnodes") - .withCassandraConfiguration("range_request_timeout_in_ms", 45_000) - .withCassandraConfiguration("read_request_timeout_in_ms", 45_000) - .withCassandraConfiguration("write_request_timeout_in_ms", 45_000) - .withCassandraConfiguration("request_timeout_in_ms", 45_000) - .build(); - - private static final SessionRule SESSION_RULE = - SessionRule.builder(CCM_RULE) - .withKeyspace(false) - .withConfigLoader( - SessionUtils.configLoaderBuilder() - .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(30)) - .build()) - .build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - public RandomTokenVnodesIT() { - super("org.apache.cassandra.dht.RandomPartitioner", RandomToken.class, true); - } - - @Override - protected CqlSession session() { - return SESSION_RULE.session(); - } - - @BeforeClass - public static void createSchema() { - TokenITBase.createSchema(SESSION_RULE.session()); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/SchemaAgreementIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/SchemaAgreementIT.java deleted file mode 100644 index 724508d38a3..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/SchemaAgreementIT.java +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.metadata; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.loadbalancing.SortingLoadBalancingPolicy; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import java.time.Duration; -import java.util.concurrent.atomic.AtomicInteger; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.RuleChain; -import org.junit.rules.TestName; - -public class SchemaAgreementIT { - - private static final CustomCcmRule CCM_RULE = CustomCcmRule.builder().withNodes(3).build(); - private static final SessionRule SESSION_RULE = - SessionRule.builder(CCM_RULE) - .withConfigLoader( - SessionUtils.configLoaderBuilder() - .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(30)) - .withClass( - DefaultDriverOption.LOAD_BALANCING_POLICY_CLASS, - SortingLoadBalancingPolicy.class) - .withDuration( - DefaultDriverOption.CONTROL_CONNECTION_AGREEMENT_TIMEOUT, - Duration.ofSeconds(3)) - .build()) - .build(); - - @ClassRule - public static final RuleChain CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - @Rule public TestName name = new TestName(); - - @Test - public void should_succeed_when_all_nodes_agree() { - ResultSet result = createTable(); - - assertThat(result.getExecutionInfo().isSchemaInAgreement()).isTrue(); - assertThat(SESSION_RULE.session().checkSchemaAgreement()).isTrue(); - } - - @Test - public void should_fail_on_timeout() { - CCM_RULE.getCcmBridge().pause(2); - try { - // Can't possibly agree since one node is paused. - ResultSet result = createTable(); - - assertThat(result.getExecutionInfo().isSchemaInAgreement()).isFalse(); - assertThat(SESSION_RULE.session().checkSchemaAgreement()).isFalse(); - } finally { - CCM_RULE.getCcmBridge().resume(2); - } - } - - @Test - public void should_agree_when_up_nodes_agree() { - CCM_RULE.getCcmBridge().stop(2); - try { - // Should agree since up hosts should agree. - ResultSet result = createTable(); - - assertThat(result.getExecutionInfo().isSchemaInAgreement()).isTrue(); - assertThat(SESSION_RULE.session().checkSchemaAgreement()).isTrue(); - } finally { - CCM_RULE.getCcmBridge().start(2); - } - } - - @Test - public void should_fail_if_timeout_is_zero() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(30)) - .withDuration( - DefaultDriverOption.CONTROL_CONNECTION_AGREEMENT_TIMEOUT, Duration.ofSeconds(0)) - .build(); - try (CqlSession session = SessionUtils.newSession(CCM_RULE, SESSION_RULE.keyspace(), loader)) { - ResultSet result = createTable(session); - - // Should not agree because schema metadata is disabled - assertThat(result.getExecutionInfo().isSchemaInAgreement()).isFalse(); - assertThat(session.checkSchemaAgreement()).isFalse(); - } - } - - private ResultSet createTable() { - return createTable(SESSION_RULE.session()); - } - - private final AtomicInteger tableCounter = new AtomicInteger(); - - private ResultSet createTable(CqlSession session) { - String tableName = name.getMethodName(); - if (tableName.length() > 48) { - tableName = tableName.substring(0, 44) + tableCounter.getAndIncrement(); - } - return session.execute(String.format("CREATE TABLE %s (k int primary key, v int)", tableName)); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/SchemaChangesIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/SchemaChangesIT.java deleted file mode 100644 index 85fcfc02cdb..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/SchemaChangesIT.java +++ /dev/null @@ -1,667 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.metadata; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assumptions.assumeThat; -import static org.awaitility.Awaitility.await; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verify; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.metadata.Metadata; -import com.datastax.oss.driver.api.core.metadata.schema.SchemaChangeListener; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.testinfra.ccm.CcmBridge; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.Lists; -import java.time.Duration; -import java.util.List; -import java.util.Optional; -import java.util.concurrent.TimeUnit; -import java.util.function.BiConsumer; -import java.util.function.Consumer; -import java.util.function.Function; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -public class SchemaChangesIT { - - static { - CustomCcmRule.Builder builder = CustomCcmRule.builder(); - if (!CcmBridge.isDistributionOf( - BackendType.DSE, (dist, cass) -> cass.nextStable().compareTo(Version.V4_0_0) >= 0)) { - builder.withCassandraConfiguration("enable_materialized_views", true); - } - CCM_RULE = builder.build(); - } - - private static final CustomCcmRule CCM_RULE; - - // A client that we only use to set up the tests - private static final SessionRule ADMIN_SESSION_RULE = - SessionRule.builder(CCM_RULE) - .withConfigLoader( - SessionUtils.configLoaderBuilder() - .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(30)) - .withDuration(DefaultDriverOption.METADATA_SCHEMA_WINDOW, Duration.ofSeconds(0)) - .build()) - .build(); - - @ClassRule - public static TestRule chain = RuleChain.outerRule(CCM_RULE).around(ADMIN_SESSION_RULE); - - @Before - public void setup() { - // Always drop and re-create the keyspace to start from a clean state - ADMIN_SESSION_RULE - .session() - .execute(String.format("DROP KEYSPACE %s", ADMIN_SESSION_RULE.keyspace())); - SessionUtils.createKeyspace(ADMIN_SESSION_RULE.session(), ADMIN_SESSION_RULE.keyspace()); - } - - @Test - public void should_handle_keyspace_creation() { - CqlIdentifier newKeyspaceId = SessionUtils.uniqueKeyspaceId(); - should_handle_creation( - null, - String.format( - "CREATE KEYSPACE %s " - + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}", - newKeyspaceId), - metadata -> metadata.getKeyspace(newKeyspaceId), - keyspace -> { - assertThat(keyspace.getName()).isEqualTo(newKeyspaceId); - assertThat(keyspace.isDurableWrites()).isTrue(); - assertThat(keyspace.getReplication()) - .hasSize(2) - .containsEntry("class", "org.apache.cassandra.locator.SimpleStrategy") - .containsEntry("replication_factor", "1"); - }, - (listener, keyspace) -> verify(listener).onKeyspaceCreated(keyspace), - newKeyspaceId); - } - - @Test - public void should_handle_keyspace_drop() { - CqlIdentifier newKeyspaceId = SessionUtils.uniqueKeyspaceId(); - should_handle_drop( - ImmutableList.of( - String.format( - "CREATE KEYSPACE %s " - + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}", - newKeyspaceId.asCql(true))), - String.format("DROP KEYSPACE %s", newKeyspaceId.asCql(true)), - metadata -> metadata.getKeyspace(newKeyspaceId), - (listener, oldKeyspace) -> verify(listener).onKeyspaceDropped(oldKeyspace), - newKeyspaceId); - } - - @Test - public void should_handle_keyspace_update() { - CqlIdentifier newKeyspaceId = SessionUtils.uniqueKeyspaceId(); - should_handle_update( - ImmutableList.of( - String.format( - "CREATE KEYSPACE %s " - + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}", - newKeyspaceId.asCql(true))), - String.format( - "ALTER KEYSPACE %s " - + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1} " - + "AND durable_writes = 'false'", - newKeyspaceId.asCql(true)), - metadata -> metadata.getKeyspace(newKeyspaceId), - newKeyspace -> assertThat(newKeyspace.isDurableWrites()).isFalse(), - (listener, oldKeyspace, newKeyspace) -> - verify(listener).onKeyspaceUpdated(newKeyspace, oldKeyspace), - newKeyspaceId); - } - - @Test - public void should_handle_table_creation() { - should_handle_creation( - null, - "CREATE TABLE foo(k int primary key)", - metadata -> - metadata - .getKeyspace(ADMIN_SESSION_RULE.keyspace()) - .orElseThrow(IllegalStateException::new) - .getTable(CqlIdentifier.fromInternal("foo")), - table -> { - assertThat(table.getKeyspace()).isEqualTo(ADMIN_SESSION_RULE.keyspace()); - assertThat(table.getName().asInternal()).isEqualTo("foo"); - assertThat(table.getColumns()).containsOnlyKeys(CqlIdentifier.fromInternal("k")); - assertThat(table.getColumn(CqlIdentifier.fromInternal("k"))) - .hasValueSatisfying( - k -> { - assertThat(k.getType()).isEqualTo(DataTypes.INT); - assertThat(table.getPartitionKey()).containsExactly(k); - }); - assertThat(table.getClusteringColumns()).isEmpty(); - }, - (listener, table) -> verify(listener).onTableCreated(table)); - } - - @Test - public void should_handle_table_drop() { - should_handle_drop( - ImmutableList.of("CREATE TABLE foo(k int primary key)"), - "DROP TABLE foo", - metadata -> - metadata - .getKeyspace(ADMIN_SESSION_RULE.keyspace()) - .flatMap(ks -> ks.getTable(CqlIdentifier.fromInternal("foo"))), - (listener, oldTable) -> verify(listener).onTableDropped(oldTable)); - } - - @Test - public void should_handle_table_update() { - should_handle_update( - ImmutableList.of("CREATE TABLE foo(k int primary key)"), - "ALTER TABLE foo ADD v int", - metadata -> - metadata - .getKeyspace(ADMIN_SESSION_RULE.keyspace()) - .flatMap(ks -> ks.getTable(CqlIdentifier.fromInternal("foo"))), - newTable -> assertThat(newTable.getColumn(CqlIdentifier.fromInternal("v"))).isPresent(), - (listener, oldTable, newTable) -> verify(listener).onTableUpdated(newTable, oldTable)); - } - - @Test - public void should_handle_type_creation() { - should_handle_creation( - null, - "CREATE TYPE t(i int)", - metadata -> - metadata - .getKeyspace(ADMIN_SESSION_RULE.keyspace()) - .flatMap(ks -> ks.getUserDefinedType(CqlIdentifier.fromInternal("t"))), - type -> { - assertThat(type.getKeyspace()).isEqualTo(ADMIN_SESSION_RULE.keyspace()); - assertThat(type.getName().asInternal()).isEqualTo("t"); - assertThat(type.getFieldNames()).containsExactly(CqlIdentifier.fromInternal("i")); - assertThat(type.getFieldTypes()).containsExactly(DataTypes.INT); - }, - (listener, type) -> verify(listener).onUserDefinedTypeCreated(type)); - } - - @Test - public void should_handle_type_drop() { - should_handle_drop( - ImmutableList.of("CREATE TYPE t(i int)"), - "DROP TYPE t", - metadata -> - metadata - .getKeyspace(ADMIN_SESSION_RULE.keyspace()) - .flatMap(ks -> ks.getUserDefinedType(CqlIdentifier.fromInternal("t"))), - (listener, oldType) -> verify(listener).onUserDefinedTypeDropped(oldType)); - } - - @Test - public void should_handle_type_update() { - should_handle_update( - ImmutableList.of("CREATE TYPE t(i int)"), - "ALTER TYPE t ADD j int", - metadata -> - metadata - .getKeyspace(ADMIN_SESSION_RULE.keyspace()) - .flatMap(ks -> ks.getUserDefinedType(CqlIdentifier.fromInternal("t"))), - newType -> - assertThat(newType.getFieldNames()) - .containsExactly(CqlIdentifier.fromInternal("i"), CqlIdentifier.fromInternal("j")), - (listener, oldType, newType) -> - verify(listener).onUserDefinedTypeUpdated(newType, oldType)); - } - - @Test - public void should_handle_view_creation() { - assumeThat(CCM_RULE.getCcmBridge().getCassandraVersion().compareTo(Version.V3_0_0) >= 0) - .isTrue(); - should_handle_creation( - "CREATE TABLE scores(user text, game text, score int, PRIMARY KEY (user, game))", - "CREATE MATERIALIZED VIEW highscores " - + "AS SELECT game, user, score FROM scores " - + "WHERE game IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL " - + "PRIMARY KEY (game, score, user) " - + "WITH CLUSTERING ORDER BY (score DESC, user DESC)", - metadata -> - metadata - .getKeyspace(ADMIN_SESSION_RULE.keyspace()) - .flatMap(ks -> ks.getView(CqlIdentifier.fromInternal("highscores"))), - view -> { - assertThat(view.getKeyspace()).isEqualTo(ADMIN_SESSION_RULE.keyspace()); - assertThat(view.getName().asInternal()).isEqualTo("highscores"); - assertThat(view.getBaseTable().asInternal()).isEqualTo("scores"); - assertThat(view.includesAllColumns()).isFalse(); - assertThat(view.getWhereClause()) - .hasValue("game IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL"); - assertThat(view.getColumns()) - .containsOnlyKeys( - CqlIdentifier.fromInternal("game"), - CqlIdentifier.fromInternal("score"), - CqlIdentifier.fromInternal("user")); - }, - (listener, view) -> verify(listener).onViewCreated(view)); - } - - @Test - public void should_handle_view_drop() { - assumeThat(CCM_RULE.getCcmBridge().getCassandraVersion().compareTo(Version.V3_0_0) >= 0) - .isTrue(); - should_handle_drop( - ImmutableList.of( - "CREATE TABLE scores(user text, game text, score int, PRIMARY KEY (user, game))", - "CREATE MATERIALIZED VIEW highscores " - + "AS SELECT game, user, score FROM scores " - + "WHERE game IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL " - + "PRIMARY KEY (game, score, user) " - + "WITH CLUSTERING ORDER BY (score DESC, user DESC)"), - "DROP MATERIALIZED VIEW highscores", - metadata -> - metadata - .getKeyspace(ADMIN_SESSION_RULE.keyspace()) - .flatMap(ks -> ks.getView(CqlIdentifier.fromInternal("highscores"))), - (listener, oldView) -> verify(listener).onViewDropped(oldView)); - } - - @Test - public void should_handle_view_update() { - assumeThat(CCM_RULE.getCcmBridge().getCassandraVersion().compareTo(Version.V3_0_0) >= 0) - .isTrue(); - should_handle_update( - ImmutableList.of( - "CREATE TABLE scores(user text, game text, score int, PRIMARY KEY (user, game))", - "CREATE MATERIALIZED VIEW highscores " - + "AS SELECT game, user, score FROM scores " - + "WHERE game IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL " - + "PRIMARY KEY (game, score, user) " - + "WITH CLUSTERING ORDER BY (score DESC, user DESC)"), - "ALTER MATERIALIZED VIEW highscores WITH comment = 'The best score for each game'", - metadata -> - metadata - .getKeyspace(ADMIN_SESSION_RULE.keyspace()) - .flatMap(ks -> ks.getView(CqlIdentifier.fromInternal("highscores"))), - newView -> - assertThat(newView.getOptions().get(CqlIdentifier.fromInternal("comment"))) - .isEqualTo("The best score for each game"), - (listener, oldView, newView) -> verify(listener).onViewUpdated(newView, oldView)); - } - - @Test - public void should_handle_function_creation() { - assumeThat(CCM_RULE.getCcmBridge().getCassandraVersion().compareTo(Version.V2_2_0) >= 0) - .isTrue(); - should_handle_creation( - null, - "CREATE FUNCTION id(i int) RETURNS NULL ON NULL INPUT RETURNS int " - + "LANGUAGE java AS 'return i;'", - metadata -> - metadata - .getKeyspace(ADMIN_SESSION_RULE.keyspace()) - .flatMap(ks -> ks.getFunction(CqlIdentifier.fromInternal("id"), DataTypes.INT)), - function -> { - assertThat(function.getKeyspace()).isEqualTo(ADMIN_SESSION_RULE.keyspace()); - assertThat(function.getSignature().getName().asInternal()).isEqualTo("id"); - assertThat(function.getSignature().getParameterTypes()).containsExactly(DataTypes.INT); - assertThat(function.getReturnType()).isEqualTo(DataTypes.INT); - assertThat(function.getLanguage()).isEqualTo("java"); - assertThat(function.isCalledOnNullInput()).isFalse(); - assertThat(function.getBody()).isEqualTo("return i;"); - }, - (listener, function) -> verify(listener).onFunctionCreated(function)); - } - - @Test - public void should_handle_function_drop() { - assumeThat(CCM_RULE.getCcmBridge().getCassandraVersion().compareTo(Version.V2_2_0) >= 0) - .isTrue(); - should_handle_drop( - ImmutableList.of( - "CREATE FUNCTION id(i int) RETURNS NULL ON NULL INPUT RETURNS int " - + "LANGUAGE java AS 'return i;'"), - "DROP FUNCTION id", - metadata -> - metadata - .getKeyspace(ADMIN_SESSION_RULE.keyspace()) - .flatMap(ks -> ks.getFunction(CqlIdentifier.fromInternal("id"), DataTypes.INT)), - (listener, oldFunction) -> verify(listener).onFunctionDropped(oldFunction)); - } - - @Test - public void should_handle_function_update() { - assumeThat(CCM_RULE.getCcmBridge().getCassandraVersion().compareTo(Version.V2_2_0) >= 0) - .isTrue(); - should_handle_update_via_drop_and_recreate( - ImmutableList.of( - "CREATE FUNCTION id(i int) RETURNS NULL ON NULL INPUT RETURNS int " - + "LANGUAGE java AS 'return i;'"), - "DROP FUNCTION id", - "CREATE FUNCTION id(j int) RETURNS NULL ON NULL INPUT RETURNS int " - + "LANGUAGE java AS 'return j;'", - metadata -> - metadata - .getKeyspace(ADMIN_SESSION_RULE.keyspace()) - .flatMap(ks -> ks.getFunction(CqlIdentifier.fromInternal("id"), DataTypes.INT)), - newFunction -> assertThat(newFunction.getBody()).isEqualTo("return j;"), - (listener, oldFunction, newFunction) -> - verify(listener).onFunctionUpdated(newFunction, oldFunction)); - } - - @Test - public void should_handle_aggregate_creation() { - assumeThat(CCM_RULE.getCcmBridge().getCassandraVersion().compareTo(Version.V2_2_0) >= 0) - .isTrue(); - should_handle_creation( - "CREATE FUNCTION plus(i int, j int) RETURNS NULL ON NULL INPUT RETURNS int " - + "LANGUAGE java AS 'return i+j;'", - "CREATE AGGREGATE sum(int) SFUNC plus STYPE int INITCOND 0", - metadata -> - metadata - .getKeyspace(ADMIN_SESSION_RULE.keyspace()) - .flatMap(ks -> ks.getAggregate(CqlIdentifier.fromInternal("sum"), DataTypes.INT)), - aggregate -> { - assertThat(aggregate.getKeyspace()).isEqualTo(ADMIN_SESSION_RULE.keyspace()); - assertThat(aggregate.getSignature().getName().asInternal()).isEqualTo("sum"); - assertThat(aggregate.getSignature().getParameterTypes()).containsExactly(DataTypes.INT); - assertThat(aggregate.getStateType()).isEqualTo(DataTypes.INT); - assertThat(aggregate.getStateFuncSignature().getName().asInternal()).isEqualTo("plus"); - assertThat(aggregate.getStateFuncSignature().getParameterTypes()) - .containsExactly(DataTypes.INT, DataTypes.INT); - assertThat(aggregate.getFinalFuncSignature()).isEmpty(); - assertThat(aggregate.getInitCond()).hasValue(0); - }, - (listener, aggregate) -> verify(listener).onAggregateCreated(aggregate)); - } - - @Test - public void should_handle_aggregate_drop() { - assumeThat(CCM_RULE.getCcmBridge().getCassandraVersion().compareTo(Version.V2_2_0) >= 0) - .isTrue(); - should_handle_drop( - ImmutableList.of( - "CREATE FUNCTION plus(i int, j int) RETURNS NULL ON NULL INPUT RETURNS int " - + "LANGUAGE java AS 'return i+j;'", - "CREATE AGGREGATE sum(int) SFUNC plus STYPE int INITCOND 0"), - "DROP AGGREGATE sum", - metadata -> - metadata - .getKeyspace(ADMIN_SESSION_RULE.keyspace()) - .flatMap(ks -> ks.getAggregate(CqlIdentifier.fromInternal("sum"), DataTypes.INT)), - (listener, oldAggregate) -> verify(listener).onAggregateDropped(oldAggregate)); - } - - @Test - public void should_handle_aggregate_update() { - assumeThat(CCM_RULE.getCcmBridge().getCassandraVersion().compareTo(Version.V2_2_0) >= 0) - .isTrue(); - should_handle_update_via_drop_and_recreate( - ImmutableList.of( - "CREATE FUNCTION plus(i int, j int) RETURNS NULL ON NULL INPUT RETURNS int " - + "LANGUAGE java AS 'return i+j;'", - "CREATE AGGREGATE sum(int) SFUNC plus STYPE int INITCOND 0"), - "DROP AGGREGATE sum", - "CREATE AGGREGATE sum(int) SFUNC plus STYPE int INITCOND 1", - metadata -> - metadata - .getKeyspace(ADMIN_SESSION_RULE.keyspace()) - .flatMap(ks -> ks.getAggregate(CqlIdentifier.fromInternal("sum"), DataTypes.INT)), - newAggregate -> assertThat(newAggregate.getInitCond()).hasValue(1), - (listener, oldAggregate, newAggregate) -> - verify(listener).onAggregateUpdated(newAggregate, oldAggregate)); - } - - private void should_handle_creation( - String beforeStatement, - String createStatement, - Function> extract, - Consumer verifyMetadata, - BiConsumer verifyListener, - CqlIdentifier... keyspaces) { - - if (beforeStatement != null) { - ADMIN_SESSION_RULE.session().execute(beforeStatement); - } - - SchemaChangeListener listener1 = mock(SchemaChangeListener.class); - SchemaChangeListener listener2 = mock(SchemaChangeListener.class); - - // cluster1 executes the DDL query and gets a SCHEMA_CHANGE response. - // cluster2 gets a SCHEMA_CHANGE push event on its control connection. - - List keyspaceList = Lists.newArrayList(); - for (CqlIdentifier keyspace : keyspaces) { - keyspaceList.add(keyspace.asInternal()); - } - - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(30)) - .withStringList(DefaultDriverOption.METADATA_SCHEMA_REFRESHED_KEYSPACES, keyspaceList) - .build(); - - try (CqlSession session1 = - SessionUtils.newSession( - CCM_RULE, ADMIN_SESSION_RULE.keyspace(), null, listener1, null, loader); - CqlSession session2 = - SessionUtils.newSession(CCM_RULE, null, null, listener2, null, loader)) { - - session1.execute(createStatement); - - // Refreshes on a response are synchronous: - T newElement1 = extract.apply(session1.getMetadata()).orElseThrow(AssertionError::new); - verifyMetadata.accept(newElement1); - verifyListener.accept(listener1, newElement1); - - // Refreshes on a server event are asynchronous: - await() - .pollInterval(500, TimeUnit.MILLISECONDS) - .atMost(60, TimeUnit.SECONDS) - .untilAsserted( - () -> { - T newElement2 = - extract.apply(session2.getMetadata()).orElseThrow(AssertionError::new); - verifyMetadata.accept(newElement2); - verifyListener.accept(listener2, newElement2); - }); - } - } - - private void should_handle_drop( - Iterable beforeStatements, - String dropStatement, - Function> extract, - BiConsumer verifyListener, - CqlIdentifier... keyspaces) { - - for (String statement : beforeStatements) { - ADMIN_SESSION_RULE.session().execute(statement); - } - - SchemaChangeListener listener1 = mock(SchemaChangeListener.class); - SchemaChangeListener listener2 = mock(SchemaChangeListener.class); - - List keyspaceList = Lists.newArrayList(); - for (CqlIdentifier keyspace : keyspaces) { - keyspaceList.add(keyspace.asInternal()); - } - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(30)) - .withStringList(DefaultDriverOption.METADATA_SCHEMA_REFRESHED_KEYSPACES, keyspaceList) - .build(); - - try (CqlSession session1 = - SessionUtils.newSession( - CCM_RULE, ADMIN_SESSION_RULE.keyspace(), null, listener1, null, loader); - CqlSession session2 = - SessionUtils.newSession(CCM_RULE, null, null, listener2, null, loader)) { - - T oldElement = extract.apply(session1.getMetadata()).orElseThrow(AssertionError::new); - assertThat(oldElement).isNotNull(); - - session1.execute(dropStatement); - - assertThat(extract.apply(session1.getMetadata())).isEmpty(); - verifyListener.accept(listener1, oldElement); - - await() - .pollInterval(500, TimeUnit.MILLISECONDS) - .atMost(60, TimeUnit.SECONDS) - .untilAsserted( - () -> { - assertThat(extract.apply(session2.getMetadata())).isEmpty(); - verifyListener.accept(listener2, oldElement); - }); - } - } - - private void should_handle_update( - Iterable beforeStatements, - String updateStatement, - Function> extract, - Consumer verifyNewMetadata, - TriConsumer verifyListener, - CqlIdentifier... keyspaces) { - - for (String statement : beforeStatements) { - ADMIN_SESSION_RULE.session().execute(statement); - } - - SchemaChangeListener listener1 = mock(SchemaChangeListener.class); - SchemaChangeListener listener2 = mock(SchemaChangeListener.class); - List keyspaceList = Lists.newArrayList(); - for (CqlIdentifier keyspace : keyspaces) { - keyspaceList.add(keyspace.asInternal()); - } - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(30)) - .withStringList(DefaultDriverOption.METADATA_SCHEMA_REFRESHED_KEYSPACES, keyspaceList) - .build(); - - try (CqlSession session1 = - SessionUtils.newSession( - CCM_RULE, ADMIN_SESSION_RULE.keyspace(), null, listener1, null, loader); - CqlSession session2 = - SessionUtils.newSession(CCM_RULE, null, null, listener2, null, loader)) { - - T oldElement = extract.apply(session1.getMetadata()).orElseThrow(AssertionError::new); - assertThat(oldElement).isNotNull(); - - session1.execute(updateStatement); - - T newElement = extract.apply(session1.getMetadata()).orElseThrow(AssertionError::new); - verifyNewMetadata.accept(newElement); - verifyListener.accept(listener1, oldElement, newElement); - - await() - .pollInterval(500, TimeUnit.MILLISECONDS) - .atMost(60, TimeUnit.SECONDS) - .untilAsserted( - () -> { - verifyNewMetadata.accept( - extract.apply(session2.getMetadata()).orElseThrow(AssertionError::new)); - verifyListener.accept(listener2, oldElement, newElement); - }); - } - } - - // Some element types don't have an ALTER command, but we can still observe an update if they get - // dropped and recreated while schema metadata is disabled - private void should_handle_update_via_drop_and_recreate( - Iterable beforeStatements, - String dropStatement, - String recreateStatement, - Function> extract, - Consumer verifyNewMetadata, - TriConsumer verifyListener, - CqlIdentifier... keyspaces) { - - for (String statement : beforeStatements) { - ADMIN_SESSION_RULE.session().execute(statement); - } - - SchemaChangeListener listener1 = mock(SchemaChangeListener.class); - SchemaChangeListener listener2 = mock(SchemaChangeListener.class); - List keyspaceList = Lists.newArrayList(); - for (CqlIdentifier keyspace : keyspaces) { - keyspaceList.add(keyspace.asInternal()); - } - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(30)) - .withStringList(DefaultDriverOption.METADATA_SCHEMA_REFRESHED_KEYSPACES, keyspaceList) - .build(); - try (CqlSession session1 = - SessionUtils.newSession( - CCM_RULE, ADMIN_SESSION_RULE.keyspace(), null, listener1, null, loader); - CqlSession session2 = - SessionUtils.newSession(CCM_RULE, null, null, listener2, null, loader)) { - - T oldElement = extract.apply(session1.getMetadata()).orElseThrow(AssertionError::new); - assertThat(oldElement).isNotNull(); - - session1.setSchemaMetadataEnabled(false); - session2.setSchemaMetadataEnabled(false); - - session1.execute(dropStatement); - session1.execute(recreateStatement); - - session1.setSchemaMetadataEnabled(true); - session2.setSchemaMetadataEnabled(true); - - await() - .pollInterval(500, TimeUnit.MILLISECONDS) - .atMost(60, TimeUnit.SECONDS) - .untilAsserted( - () -> { - T newElement = - extract.apply(session1.getMetadata()).orElseThrow(AssertionError::new); - verifyNewMetadata.accept(newElement); - verifyListener.accept(listener1, oldElement, newElement); - }); - - await() - .pollInterval(500, TimeUnit.MILLISECONDS) - .atMost(60, TimeUnit.SECONDS) - .untilAsserted( - () -> { - T newElement = - extract.apply(session2.getMetadata()).orElseThrow(AssertionError::new); - verifyNewMetadata.accept(newElement); - verifyListener.accept(listener2, oldElement, newElement); - }); - } - } - - interface TriConsumer { - void accept(T t, U u, V v); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/SchemaIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/SchemaIT.java deleted file mode 100644 index df5571974c1..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/SchemaIT.java +++ /dev/null @@ -1,343 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.metadata; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.awaitility.Awaitility.await; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.metadata.Metadata; -import com.datastax.oss.driver.api.core.metadata.TokenMap; -import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.nio.ByteBuffer; -import java.util.Arrays; -import java.util.Collections; -import java.util.Map; -import java.util.Objects; -import java.util.concurrent.TimeUnit; -import org.junit.AssumptionViolatedException; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -public class SchemaIT { - - private static final Version DSE_MIN_VIRTUAL_TABLES = - Objects.requireNonNull(Version.parse("6.7.0")); - - private final CcmRule ccmRule = CcmRule.getInstance(); - - private final SessionRule sessionRule = SessionRule.builder(ccmRule).build(); - - @Rule public TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); - - @Test - public void should_not_expose_system_and_test_keyspace() { - Map keyspaces = - sessionRule.session().getMetadata().getKeyspaces(); - assertThat(keyspaces) - .doesNotContainKeys( - // Don't test exhaustively because system keyspaces depend on the Cassandra version, and - // keyspaces from other tests might also be present - CqlIdentifier.fromInternal("system"), CqlIdentifier.fromInternal("system_traces")); - } - - @Test - public void should_expose_test_keyspace() { - Map keyspaces = - sessionRule.session().getMetadata().getKeyspaces(); - assertThat(keyspaces).containsKey(sessionRule.keyspace()); - } - - @Test - public void should_filter_by_keyspaces() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withStringList( - DefaultDriverOption.METADATA_SCHEMA_REFRESHED_KEYSPACES, - Collections.singletonList(sessionRule.keyspace().asInternal())) - .build(); - try (CqlSession session = SessionUtils.newSession(ccmRule, loader)) { - - assertThat(session.getMetadata().getKeyspaces()).containsOnlyKeys(sessionRule.keyspace()); - - CqlIdentifier otherKeyspace = SessionUtils.uniqueKeyspaceId(); - SessionUtils.createKeyspace(session, otherKeyspace); - - assertThat(session.getMetadata().getKeyspaces()).containsOnlyKeys(sessionRule.keyspace()); - } - } - - @Test - public void should_not_load_schema_if_disabled_in_config() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withBoolean(DefaultDriverOption.METADATA_SCHEMA_ENABLED, false) - .build(); - try (CqlSession session = SessionUtils.newSession(ccmRule, loader)) { - - assertThat(session.isSchemaMetadataEnabled()).isFalse(); - assertThat(session.getMetadata().getKeyspaces()).isEmpty(); - } - } - - @Test - public void should_enable_schema_programmatically_when_disabled_in_config() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withBoolean(DefaultDriverOption.METADATA_SCHEMA_ENABLED, false) - .build(); - try (CqlSession session = SessionUtils.newSession(ccmRule, loader)) { - - assertThat(session.isSchemaMetadataEnabled()).isFalse(); - assertThat(session.getMetadata().getKeyspaces()).isEmpty(); - - session.setSchemaMetadataEnabled(true); - assertThat(session.isSchemaMetadataEnabled()).isTrue(); - - await() - .pollInterval(500, TimeUnit.MILLISECONDS) - .atMost(60, TimeUnit.SECONDS) - .untilAsserted(() -> assertThat(session.getMetadata().getKeyspaces()).isNotEmpty()); - assertThat(session.getMetadata().getKeyspaces()).containsKey(sessionRule.keyspace()); - - session.setSchemaMetadataEnabled(null); - assertThat(session.isSchemaMetadataEnabled()).isFalse(); - } - } - - @Test - public void should_disable_schema_programmatically_when_enabled_in_config() { - CqlSession session = sessionRule.session(); - session.setSchemaMetadataEnabled(false); - assertThat(session.isSchemaMetadataEnabled()).isFalse(); - - // Create a table, metadata should not be updated - DriverExecutionProfile slowProfile = SessionUtils.slowProfile(session); - sessionRule - .session() - .execute( - SimpleStatement.builder("CREATE TABLE foo(k int primary key)") - .setExecutionProfile(slowProfile) - .build()); - assertThat(session.getMetadata().getKeyspace(sessionRule.keyspace()).get().getTables()) - .doesNotContainKey(CqlIdentifier.fromInternal("foo")); - - // Reset to config value (true), should refresh and load the new table - session.setSchemaMetadataEnabled(null); - assertThat(session.isSchemaMetadataEnabled()).isTrue(); - await() - .pollInterval(500, TimeUnit.MILLISECONDS) - .atMost(60, TimeUnit.SECONDS) - .untilAsserted( - () -> - assertThat( - session.getMetadata().getKeyspace(sessionRule.keyspace()).get().getTables()) - .containsKey(CqlIdentifier.fromInternal("foo"))); - } - - @Test - public void should_refresh_schema_manually() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withBoolean(DefaultDriverOption.METADATA_SCHEMA_ENABLED, false) - .build(); - try (CqlSession session = SessionUtils.newSession(ccmRule, loader)) { - - assertThat(session.isSchemaMetadataEnabled()).isFalse(); - assertThat(session.getMetadata().getKeyspaces()).isEmpty(); - - Metadata newMetadata = session.refreshSchema(); - assertThat(newMetadata.getKeyspaces()).containsKey(sessionRule.keyspace()); - - assertThat(session.getMetadata()).isSameAs(newMetadata); - } - } - - @BackendRequirement( - type = BackendType.CASSANDRA, - minInclusive = "4.0", - description = "virtual tables introduced in 4.0") - @Test - public void should_get_virtual_metadata() { - skipIfDse60(); - - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withStringList( - DefaultDriverOption.METADATA_SCHEMA_REFRESHED_KEYSPACES, - Collections.singletonList("system_views")) - .build(); - try (CqlSession session = SessionUtils.newSession(ccmRule, loader)) { - - Metadata md = session.getMetadata(); - KeyspaceMetadata kmd = md.getKeyspace("system_views").get(); - - // Keyspace name should be set, marked as virtual, and have at least sstable_tasks table. - // All other values should be defaulted since they are not defined in the virtual schema - // tables. - assertThat(kmd.getTables().size()).isGreaterThanOrEqualTo(1); - assertThat(kmd.isVirtual()).isTrue(); - assertThat(kmd.isDurableWrites()).isFalse(); - assertThat(kmd.getName().asCql(true)).isEqualTo("system_views"); - - // Virtual tables lack User Types, Functions, Views and Aggregates - assertThat(kmd.getUserDefinedTypes().size()).isEqualTo(0); - assertThat(kmd.getFunctions().size()).isEqualTo(0); - assertThat(kmd.getViews().size()).isEqualTo(0); - assertThat(kmd.getAggregates().size()).isEqualTo(0); - - assertThat(kmd.describe(true)) - .isEqualTo( - "/* VIRTUAL KEYSPACE system_views WITH replication = { 'class' : 'null' } " - + "AND durable_writes = false; */"); - // Table name should be set, marked as virtual, and it should have columns set. - // indexes, views, clustering column, clustering order and id are not defined in the virtual - // schema tables. - TableMetadata tm = kmd.getTable("sstable_tasks").get(); - assertThat(tm).isNotNull(); - assertThat(tm.getName().toString()).isEqualTo("sstable_tasks"); - assertThat(tm.isVirtual()).isTrue(); - // DSE 6.8+ reports 7 columns, Cassandra 4+ reports 8 columns - assertThat(tm.getColumns().size()).isGreaterThanOrEqualTo(7); - assertThat(tm.getIndexes().size()).isEqualTo(0); - assertThat(tm.getPartitionKey().size()).isEqualTo(1); - assertThat(tm.getPartitionKey().get(0).getName().toString()).isEqualTo("keyspace_name"); - assertThat(tm.getClusteringColumns().size()).isEqualTo(2); - assertThat(tm.getId().isPresent()).isFalse(); - assertThat(tm.getOptions().size()).isEqualTo(0); - assertThat(tm.getKeyspace()).isEqualTo(kmd.getName()); - assertThat(tm.describe(true)) - .isIn( - // DSE 6.8+ - "/* VIRTUAL TABLE system_views.sstable_tasks (\n" - + " keyspace_name text,\n" - + " table_name text,\n" - + " task_id uuid,\n" - + " kind text,\n" - + " progress bigint,\n" - + " total bigint,\n" - + " unit text,\n" - + " PRIMARY KEY (keyspace_name, table_name, task_id)\n" - + "); */", - // Cassandra 4.0 - "/* VIRTUAL TABLE system_views.sstable_tasks (\n" - + " keyspace_name text,\n" - + " table_name text,\n" - + " task_id uuid,\n" - + " completion_ratio double,\n" - + " kind text,\n" - + " progress bigint,\n" - + " total bigint,\n" - + " unit text,\n" - + " PRIMARY KEY (keyspace_name, table_name, task_id)\n" - + "); */", - // Cassandra 4.1 - "/* VIRTUAL TABLE system_views.sstable_tasks (\n" - + " keyspace_name text,\n" - + " table_name text,\n" - + " task_id timeuuid,\n" - + " completion_ratio double,\n" - + " kind text,\n" - + " progress bigint,\n" - + " sstables int,\n" - + " total bigint,\n" - + " unit text,\n" - + " PRIMARY KEY (keyspace_name, table_name, task_id)\n" - + "); */", - // Cassandra 5.0 - "/* VIRTUAL TABLE system_views.sstable_tasks (\n" - + " keyspace_name text,\n" - + " table_name text,\n" - + " task_id timeuuid,\n" - + " completion_ratio double,\n" - + " kind text,\n" - + " progress bigint,\n" - + " sstables int,\n" - + " target_directory text,\n" - + " total bigint,\n" - + " unit text,\n" - + " PRIMARY KEY (keyspace_name, table_name, task_id)\n" - + "); */"); - // ColumnMetadata is as expected - ColumnMetadata cm = tm.getColumn("progress").get(); - assertThat(cm).isNotNull(); - assertThat(cm.getParent()).isEqualTo(tm.getName()); - assertThat(cm.getType()).isEqualTo(DataTypes.BIGINT); - assertThat(cm.getName().toString()).isEqualTo("progress"); - } - } - - @BackendRequirement( - type = BackendType.CASSANDRA, - minInclusive = "4.0", - description = "virtual tables introduced in 4.0") - @Test - public void should_exclude_virtual_keyspaces_from_token_map() { - skipIfDse60(); - - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withStringList( - DefaultDriverOption.METADATA_SCHEMA_REFRESHED_KEYSPACES, - Arrays.asList( - "system_views", "system_virtual_schema", sessionRule.keyspace().asInternal())) - .build(); - try (CqlSession session = SessionUtils.newSession(ccmRule, loader)) { - Metadata metadata = session.getMetadata(); - Map keyspaces = metadata.getKeyspaces(); - assertThat(keyspaces) - .containsKey(CqlIdentifier.fromCql("system_views")) - .containsKey(CqlIdentifier.fromCql("system_virtual_schema")); - - TokenMap tokenMap = metadata.getTokenMap().orElseThrow(AssertionError::new); - ByteBuffer partitionKey = Bytes.fromHexString("0x00"); // value does not matter - assertThat(tokenMap.getReplicas("system_views", partitionKey)).isEmpty(); - assertThat(tokenMap.getReplicas("system_virtual_schema", partitionKey)).isEmpty(); - // Check that a non-virtual keyspace is present - assertThat(tokenMap.getReplicas(sessionRule.keyspace(), partitionKey)).isNotEmpty(); - } - } - - private void skipIfDse60() { - // Special case: DSE 6.0 reports C* 4.0 but does not support virtual tables - if (!ccmRule.isDistributionOf( - BackendType.DSE, (dist, cass) -> dist.compareTo(DSE_MIN_VIRTUAL_TABLES) >= 0)) { - throw new AssumptionViolatedException("DSE 6.0 does not support virtual tables"); - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/TokenITBase.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/TokenITBase.java deleted file mode 100644 index 057461a1bd7..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/TokenITBase.java +++ /dev/null @@ -1,381 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.metadata; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.TokenMap; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.metadata.token.TokenRange; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.Lists; -import java.nio.ByteBuffer; -import java.util.Collection; -import java.util.Iterator; -import java.util.List; -import java.util.Set; -import java.util.TreeSet; -import org.junit.Test; - -public abstract class TokenITBase { - - protected static final CqlIdentifier KS1 = SessionUtils.uniqueKeyspaceId(); - protected static final CqlIdentifier KS2 = SessionUtils.uniqueKeyspaceId(); - - // Must be called in a @BeforeClass method in each subclass (unfortunately we can't do this - // automatically because it requires the session, which is not available from a static context in - // this class). - protected static void createSchema(CqlSession session) { - for (String statement : - ImmutableList.of( - String.format( - "CREATE KEYSPACE %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}", - KS1.asCql(false)), - String.format( - "CREATE KEYSPACE %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 2}", - KS2.asCql(false)), - - // Shouldn't really do that, but it makes the rest of the tests a bit prettier. - String.format("USE %s", KS1.asCql(false)), - "CREATE TABLE foo(i int primary key)", - "INSERT INTO foo (i) VALUES (1)", - "INSERT INTO foo (i) VALUES (2)", - "INSERT INTO foo (i) VALUES (3)")) { - session.execute(statement); - } - } - - private final String expectedPartitionerName; - private final Class expectedTokenType; - private final boolean useVnodes; - private final int tokensPerNode; - - protected TokenITBase( - String expectedPartitionerName, Class expectedTokenType, boolean useVnodes) { - this.expectedPartitionerName = expectedPartitionerName; - this.expectedTokenType = expectedTokenType; - this.useVnodes = useVnodes; - this.tokensPerNode = useVnodes ? 256 : 1; - } - - protected abstract CqlSession session(); - - /** - * Validates that the token metadata is consistent with server-side range queries. That is, - * querying the data in a range does return a PK that the driver thinks is in that range. - * - * @test_category metadata:token - * @expected_result token ranges are exposed and usable. - * @jira_ticket JAVA-312 - * @since 2.0.10, 2.1.5 - */ - @Test - public void should_be_consistent_with_range_queries() { - TokenMap tokenMap = getTokenMap(); - - // Find the replica for a given partition key of ks1.foo. - int key = 1; - ProtocolVersion protocolVersion = session().getContext().getProtocolVersion(); - ByteBuffer serializedKey = TypeCodecs.INT.encodePrimitive(key, protocolVersion); - assertThat(serializedKey).isNotNull(); - Set replicas = tokenMap.getReplicas(KS1, serializedKey); - assertThat(replicas).hasSize(1); - Node replica = replicas.iterator().next(); - - // Iterate the cluster's token ranges. For each one, use a range query to get all the keys of - // ks1.foo that are in this range. - PreparedStatement rangeStatement = - session().prepare("SELECT i FROM foo WHERE token(i) > ? and token(i) <= ?"); - - TokenRange foundRange = null; - for (TokenRange range : tokenMap.getTokenRanges()) { - List rows = rangeQuery(rangeStatement, range); - for (Row row : rows) { - if (row.getInt("i") == key) { - // We should find our initial key exactly once - assertThat(foundRange) - .describedAs("found the same key in two ranges: " + foundRange + " and " + range) - .isNull(); - foundRange = range; - - // That range should be managed by the replica - assertThat(tokenMap.getReplicas(KS1, range)).contains(replica); - } - } - } - assertThat(foundRange).isNotNull(); - } - - private List rangeQuery(PreparedStatement rangeStatement, TokenRange range) { - List rows = Lists.newArrayList(); - for (TokenRange subRange : range.unwrap()) { - Statement statement = rangeStatement.bind(subRange.getStart(), subRange.getEnd()); - session().execute(statement).forEach(rows::add); - } - return rows; - } - - /** - * Validates that a {@link Token} can be retrieved and parsed by executing 'select token(name)' - * and then used to find data matching that token. - * - *

    This test does the following: retrieve the token for the key with value '1', get it by - * index, and ensure if is of the expected token type; select data by token with a BoundStatement; - * select data by token using setToken by index. - * - * @test_category token - * @expected_result tokens are selectable, properly parsed, and usable as input. - * @jira_ticket JAVA-312 - * @since 2.0.10, 2.1.5 - */ - @Test - public void should_get_token_from_row_and_set_token_in_query() { - ResultSet rs = session().execute("SELECT token(i) FROM foo WHERE i = 1"); - Row row = rs.one(); - assertThat(row).isNotNull(); - - // Get by index: - Token token = row.getToken(0); - assertThat(token).isNotNull().isInstanceOf(expectedTokenType); - - // Get by name: the generated column name depends on the Cassandra version. - String tokenColumnName = - rs.getColumnDefinitions().contains("token(i)") ? "token(i)" : "system.token(i)"; - assertThat(row.getToken(tokenColumnName)).isEqualTo(token); - - PreparedStatement pst = session().prepare("SELECT * FROM foo WHERE token(i) = ?"); - // Bind with bind(...) - row = session().execute(pst.bind(token)).iterator().next(); - assertThat(row.getInt(0)).isEqualTo(1); - - // Bind with setToken by index - row = session().execute(pst.bind().setToken(0, token)).one(); - assertThat(row).isNotNull(); - assertThat(row.getInt(0)).isEqualTo(1); - - // Bind with setToken by name - row = session().execute(pst.bind().setToken("partition key token", token)).one(); - assertThat(row).isNotNull(); - assertThat(row.getInt(0)).isEqualTo(1); - } - - /** - * Validates that a {@link Token} can be retrieved and parsed by using bind variables and - * aliasing. - * - *

    This test does the following: retrieve the token by alias for the key '1', and ensure it - * matches the token by index; select data by token using setToken by name. - */ - @Test - public void should_get_token_from_row_and_set_token_in_query_with_binding_and_aliasing() { - Row row = session().execute("SELECT token(i) AS t FROM foo WHERE i = 1").one(); - assertThat(row).isNotNull(); - Token token = row.getToken("t"); - assertThat(token).isNotNull().isInstanceOf(expectedTokenType); - - PreparedStatement pst = session().prepare("SELECT * FROM foo WHERE token(i) = :myToken"); - row = session().execute(pst.bind().setToken("myToken", token)).one(); - assertThat(row).isNotNull(); - assertThat(row.getInt(0)).isEqualTo(1); - - row = - session() - .execute(SimpleStatement.newInstance("SELECT * FROM foo WHERE token(i) = ?", token)) - .one(); - assertThat(row).isNotNull(); - assertThat(row.getInt(0)).isEqualTo(1); - } - - /** - * Ensures that an exception is raised when attempting to retrieve a token from a column that - * doesn't match the CQL type of any token type. - * - * @test_category token - * @expected_result an exception is raised. - * @jira_ticket JAVA-312 - * @since 2.0.10, 2.1.5 - */ - @Test(expected = IllegalArgumentException.class) - public void should_raise_exception_when_getting_token_on_non_token_column() { - Row row = session().execute("SELECT i FROM foo WHERE i = 1").one(); - assertThat(row).isNotNull(); - row.getToken(0); - } - - /** - * Ensures that token ranges are exposed per node, the ranges are complete, the entire ring is - * represented, and that ranges do not overlap. - * - * @test_category metadata:token - * @expected_result The entire token range is represented collectively and the ranges do not - * overlap. - * @jira_ticket JAVA-312 - * @since 2.0.10, 2.1.5 - */ - @Test - public void should_expose_consistent_ranges() { - checkRanges(session()); - checkRanges(session(), KS1, 1); - checkRanges(session(), KS2, 2); - } - - private void checkRanges(Session session) { - assertThat(session.getMetadata().getTokenMap()).isPresent(); - TokenMap tokenMap = session.getMetadata().getTokenMap().get(); - checkRanges(tokenMap.getTokenRanges()); - } - - private void checkRanges(Session session, CqlIdentifier keyspace, int replicationFactor) { - assertThat(session.getMetadata().getTokenMap()).isPresent(); - TokenMap tokenMap = session.getMetadata().getTokenMap().get(); - List allRangesWithDuplicates = Lists.newArrayList(); - - // Get each host's ranges, the count should match the replication factor - for (Node node : session.getMetadata().getNodes().values()) { - Set hostRanges = tokenMap.getTokenRanges(keyspace, node); - // Special case: When using vnodes the tokens are not evenly assigned to each replica. - if (!useVnodes) { - assertThat(hostRanges) - .as( - "Node %s: expected %d ranges, got %d", - node, replicationFactor * tokensPerNode, hostRanges.size()) - .hasSize(replicationFactor * tokensPerNode); - } - allRangesWithDuplicates.addAll(hostRanges); - } - - // Special case check for vnodes to ensure that total number of replicated ranges is correct. - assertThat(allRangesWithDuplicates) - .as( - "Expected %d total replicated ranges with duplicates, got %d", - 3 * replicationFactor * tokensPerNode, allRangesWithDuplicates.size()) - .hasSize(3 * replicationFactor * tokensPerNode); - - // Once we ignore duplicates, the number of ranges should match the number of nodes. - Set allRanges = new TreeSet<>(allRangesWithDuplicates); - assertThat(allRanges) - .as("Expected %d total replicated ranges, got %d", 3 * tokensPerNode, allRanges.size()) - .hasSize(3 * tokensPerNode); - - // And the ranges should cover the whole ring and no ranges intersect. - checkRanges(allRanges); - } - - // Ensures that no ranges intersect and that they cover the entire ring. - private void checkRanges(Collection ranges) { - // Ensure no ranges intersect. - TokenRange[] rangesArray = ranges.toArray(new TokenRange[0]); - for (int i = 0; i < rangesArray.length; i++) { - TokenRange rangeI = rangesArray[i]; - for (int j = i + 1; j < rangesArray.length; j++) { - TokenRange rangeJ = rangesArray[j]; - assertThat(rangeI.intersects(rangeJ)) - .as("Range " + rangeI + " intersects with " + rangeJ) - .isFalse(); - } - } - - // Ensure the defined ranges cover the entire ring. - Iterator it = ranges.iterator(); - TokenRange mergedRange = it.next(); - while (it.hasNext()) { - TokenRange next = it.next(); - mergedRange = mergedRange.mergeWith(next); - } - boolean isFullRing = - mergedRange.getStart().equals(mergedRange.getEnd()) && !mergedRange.isEmpty(); - assertThat(isFullRing).as("Ring is not fully defined for cluster.").isTrue(); - } - - /** - * Ensures that for there is at most one wrapped range in the ring, and check that unwrapping it - * produces two ranges. - * - * @test_category metadata:token - * @expected_result there is at most one wrapped range. - * @jira_ticket JAVA-312 - * @since 2.0.10, 2.1.5 - */ - @Test - public void should_have_only_one_wrapped_range() { - TokenMap tokenMap = getTokenMap(); - TokenRange wrappedRange = null; - for (TokenRange range : tokenMap.getTokenRanges()) { - if (range.isWrappedAround()) { - assertThat(wrappedRange) - .as( - "Found a wrapped around TokenRange (%s) when one already exists (%s).", - range, wrappedRange) - .isNull(); - wrappedRange = range; - - assertThat(wrappedRange.unwrap()).hasSize(2); - } - } - } - - @Test - public void should_create_tokens_and_ranges() { - TokenMap tokenMap = getTokenMap(); - - // Pick a random range - TokenRange range = tokenMap.getTokenRanges().iterator().next(); - - Token start = tokenMap.parse(tokenMap.format(range.getStart())); - Token end = tokenMap.parse(tokenMap.format(range.getEnd())); - - assertThat(tokenMap.newTokenRange(start, end)).isEqualTo(range); - } - - @Test - public void should_create_token_from_partition_key() { - TokenMap tokenMap = getTokenMap(); - - Row row = session().execute("SELECT token(i) FROM foo WHERE i = 1").one(); - assertThat(row).isNotNull(); - Token expected = row.getToken(0); - - ProtocolVersion protocolVersion = session().getContext().getProtocolVersion(); - assertThat(tokenMap.newToken(TypeCodecs.INT.encodePrimitive(1, protocolVersion))) - .isEqualTo(expected); - } - - private TokenMap getTokenMap() { - return session() - .getMetadata() - .getTokenMap() - .map( - tokenMap -> { - assertThat(tokenMap.getPartitionerName()).isEqualTo(expectedPartitionerName); - return tokenMap; - }) - .orElseThrow(() -> new AssertionError("Expected token map to be present")); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/metrics/DropwizardMetricsIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/metrics/DropwizardMetricsIT.java deleted file mode 100644 index e0184516e21..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/metrics/DropwizardMetricsIT.java +++ /dev/null @@ -1,225 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.metrics; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.awaitility.Awaitility.await; - -import com.codahale.metrics.Counter; -import com.codahale.metrics.Gauge; -import com.codahale.metrics.Meter; -import com.codahale.metrics.Metric; -import com.codahale.metrics.MetricRegistry; -import com.codahale.metrics.Timer; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; -import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; -import com.datastax.oss.driver.api.core.metrics.Metrics; -import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metrics.MetricId; -import com.datastax.oss.driver.internal.core.metrics.MetricIdGenerator; -import com.datastax.oss.simulacron.common.cluster.ClusterSpec; -import java.util.ArrayList; -import java.util.List; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; - -@Category(ParallelizableTests.class) -public class DropwizardMetricsIT extends MetricsITBase { - - @ClassRule - public static final SimulacronRule SIMULACRON_RULE = - new SimulacronRule(ClusterSpec.builder().withNodes(3)); - - @Override - protected SimulacronRule simulacron() { - return SIMULACRON_RULE; - } - - @Override - protected MetricRegistry newMetricRegistry() { - return new MetricRegistry(); - } - - @Override - protected String getMetricsFactoryClass() { - return "DropwizardMetricsFactory"; - } - - @Override - protected void assertMetricsPresent(CqlSession session) { - - MetricRegistry registry = - (MetricRegistry) ((InternalDriverContext) session.getContext()).getMetricRegistry(); - assertThat(registry).isNotNull(); - - assertThat(registry.getMetrics()) - .hasSize(ENABLED_SESSION_METRICS.size() + ENABLED_NODE_METRICS.size() * 3); - - MetricIdGenerator metricIdGenerator = - ((InternalDriverContext) session.getContext()).getMetricIdGenerator(); - - assertThat(session.getMetrics()).isPresent(); - Metrics metrics = session.getMetrics().get(); - - for (DefaultSessionMetric metric : ENABLED_SESSION_METRICS) { - - MetricId id = metricIdGenerator.sessionMetricId(metric); - Metric m = registry.getMetrics().get(id.getName()); - assertThat(m).isNotNull(); - - // assert that the same metric is retrievable through the registry and through the driver API - assertThat(metrics.getSessionMetric(metric)) - .isPresent() - .hasValueSatisfying(v -> assertThat(v).isSameAs(m)); - - switch (metric) { - case CONNECTED_NODES: - assertThat(m).isInstanceOf(Gauge.class); - assertThat((Integer) ((Gauge) m).getValue()).isEqualTo(3); - break; - case CQL_REQUESTS: - assertThat(m).isInstanceOf(Timer.class); - await().untilAsserted(() -> assertThat(((Timer) m).getCount()).isEqualTo(30)); - break; - case CQL_PREPARED_CACHE_SIZE: - assertThat(m).isInstanceOf(Gauge.class); - assertThat((Long) ((Gauge) m).getValue()).isOne(); - break; - case BYTES_SENT: - case BYTES_RECEIVED: - assertThat(m).isInstanceOf(Meter.class); - assertThat(((Meter) m).getCount()).isGreaterThan(0); - break; - case CQL_CLIENT_TIMEOUTS: - case THROTTLING_ERRORS: - assertThat(m).isInstanceOf(Counter.class); - assertThat(((Counter) m).getCount()).isZero(); - break; - case THROTTLING_DELAY: - assertThat(m).isInstanceOf(Timer.class); - assertThat(((Timer) m).getCount()).isZero(); - break; - case THROTTLING_QUEUE_SIZE: - assertThat(m).isInstanceOf(Gauge.class); - assertThat((Integer) ((Gauge) m).getValue()).isZero(); - break; - } - } - - for (Node node : session.getMetadata().getNodes().values()) { - - for (DefaultNodeMetric metric : ENABLED_NODE_METRICS) { - - MetricId id = metricIdGenerator.nodeMetricId(node, metric); - Metric m = registry.getMetrics().get(id.getName()); - assertThat(m).isNotNull(); - - // assert that the same metric is retrievable through the registry and through the driver - // API - assertThat(metrics.getNodeMetric(node, metric)) - .isPresent() - .hasValueSatisfying(v -> assertThat(v).isSameAs(m)); - - switch (metric) { - case OPEN_CONNECTIONS: - assertThat(m).isInstanceOf(Gauge.class); - // control node has 2 connections - assertThat((Integer) ((Gauge) m).getValue()).isBetween(1, 2); - break; - case CQL_MESSAGES: - assertThat(m).isInstanceOf(Timer.class); - await().untilAsserted(() -> assertThat(((Timer) m).getCount()).isEqualTo(10)); - break; - case READ_TIMEOUTS: - case WRITE_TIMEOUTS: - case UNAVAILABLES: - case OTHER_ERRORS: - case ABORTED_REQUESTS: - case UNSENT_REQUESTS: - case RETRIES: - case IGNORES: - case RETRIES_ON_READ_TIMEOUT: - case RETRIES_ON_WRITE_TIMEOUT: - case RETRIES_ON_UNAVAILABLE: - case RETRIES_ON_OTHER_ERROR: - case RETRIES_ON_ABORTED: - case IGNORES_ON_READ_TIMEOUT: - case IGNORES_ON_WRITE_TIMEOUT: - case IGNORES_ON_UNAVAILABLE: - case IGNORES_ON_OTHER_ERROR: - case IGNORES_ON_ABORTED: - case SPECULATIVE_EXECUTIONS: - case CONNECTION_INIT_ERRORS: - case AUTHENTICATION_ERRORS: - assertThat(m).isInstanceOf(Counter.class); - assertThat(((Counter) m).getCount()).isZero(); - break; - case BYTES_SENT: - case BYTES_RECEIVED: - assertThat(m).isInstanceOf(Meter.class); - assertThat(((Meter) m).getCount()).isGreaterThan(0L); - break; - case AVAILABLE_STREAMS: - case IN_FLIGHT: - case ORPHANED_STREAMS: - assertThat(m).isInstanceOf(Gauge.class); - break; - } - } - } - } - - @Override - protected void assertNodeMetricsNotEvicted(CqlSession session, Node node) { - InternalDriverContext context = (InternalDriverContext) session.getContext(); - MetricRegistry registry = (MetricRegistry) context.getMetricRegistry(); - assertThat(registry).isNotNull(); - for (String id : nodeMetricIds(context, node)) { - assertThat(registry.getMetrics()).containsKey(id); - } - } - - @Override - protected void assertMetricsNotPresent(Object registry) { - MetricRegistry dropwizardRegistry = (MetricRegistry) registry; - assertThat(dropwizardRegistry.getMetrics()).isEmpty(); - } - - @Override - protected void assertNodeMetricsEvicted(CqlSession session, Node node) { - InternalDriverContext context = (InternalDriverContext) session.getContext(); - MetricRegistry registry = (MetricRegistry) context.getMetricRegistry(); - assertThat(registry).isNotNull(); - for (String id : nodeMetricIds(context, node)) { - assertThat(registry.getMetrics()).doesNotContainKey(id); - } - } - - private List nodeMetricIds(InternalDriverContext context, Node node) { - List ids = new ArrayList<>(); - for (DefaultNodeMetric metric : ENABLED_NODE_METRICS) { - MetricId id = context.getMetricIdGenerator().nodeMetricId(node, metric); - ids.add(id.getName()); - } - return ids; - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/metrics/MetricsITBase.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/metrics/MetricsITBase.java deleted file mode 100644 index e6121217619..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/metrics/MetricsITBase.java +++ /dev/null @@ -1,271 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.metrics; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.awaitility.Awaitility.await; - -import com.codahale.metrics.MetricRegistry; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.config.ProgrammaticDriverConfigLoaderBuilder; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeState; -import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; -import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; -import com.datastax.oss.driver.internal.core.context.EventBus; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.DefaultNode; -import com.datastax.oss.driver.internal.core.metadata.NodeStateEvent; -import com.datastax.oss.driver.internal.core.metrics.AbstractMetricUpdater; -import com.datastax.oss.driver.internal.core.metrics.DefaultMetricIdGenerator; -import com.datastax.oss.driver.internal.core.metrics.TaggingMetricIdGenerator; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.net.InetSocketAddress; -import java.time.Duration; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.stream.Collectors; -import org.junit.Assume; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public abstract class MetricsITBase { - - protected static final List ENABLED_SESSION_METRICS = - Arrays.asList(DefaultSessionMetric.values()); - - protected static final List ENABLED_NODE_METRICS = - Arrays.asList(DefaultNodeMetric.values()); - - protected abstract SimulacronRule simulacron(); - - protected abstract Object newMetricRegistry(); - - protected abstract String getMetricsFactoryClass(); - - protected abstract void assertMetricsPresent(CqlSession session); - - protected abstract void assertNodeMetricsEvicted(CqlSession session, Node node) throws Exception; - - protected abstract void assertNodeMetricsNotEvicted(CqlSession session, Node node) - throws Exception; - - @Before - public void resetSimulacron() { - simulacron().cluster().clearLogs(); - simulacron().cluster().clearPrimes(true); - } - - @Test - @UseDataProvider("descriptorsAndPrefixes") - public void should_expose_metrics_if_enabled_and_clear_metrics_if_closed( - Class metricIdGenerator, String prefix) { - - Object registry = newMetricRegistry(); - Assume.assumeFalse( - "Cannot use metric tags with Dropwizard", - metricIdGenerator.getSimpleName().contains("Tagging") - && getMetricsFactoryClass().contains("Dropwizard")); - - DriverConfigLoader loader = - allMetricsEnabled() - .withString( - DefaultDriverOption.METRICS_ID_GENERATOR_CLASS, metricIdGenerator.getSimpleName()) - .withString(DefaultDriverOption.METRICS_ID_GENERATOR_PREFIX, prefix) - .build(); - - try (CqlSession session = - CqlSession.builder() - .addContactEndPoints(simulacron().getContactPoints()) - .withConfigLoader(loader) - .withMetricRegistry(registry) - .build()) { - - session.prepare("irrelevant"); - queryAllNodes(session); - assertMetricsPresent(session); - } finally { - assertMetricsNotPresent(registry); - } - } - - @DataProvider - public static Object[][] descriptorsAndPrefixes() { - return new Object[][] { - new Object[] {DefaultMetricIdGenerator.class, ""}, - new Object[] {DefaultMetricIdGenerator.class, "cassandra"}, - new Object[] {TaggingMetricIdGenerator.class, ""}, - new Object[] {TaggingMetricIdGenerator.class, "cassandra"}, - }; - } - - @Test - public void should_not_expose_metrics_if_disabled() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withStringList(DefaultDriverOption.METRICS_SESSION_ENABLED, Collections.emptyList()) - .withStringList(DefaultDriverOption.METRICS_NODE_ENABLED, Collections.emptyList()) - .withString(DefaultDriverOption.METRICS_FACTORY_CLASS, getMetricsFactoryClass()) - .build(); - try (CqlSession session = - CqlSession.builder() - .addContactEndPoints(simulacron().getContactPoints()) - .withConfigLoader(loader) - .build()) { - queryAllNodes(session); - MetricRegistry registry = - (MetricRegistry) ((InternalDriverContext) session.getContext()).getMetricRegistry(); - assertThat(registry).isNull(); - assertThat(session.getMetrics()).isEmpty(); - } - } - - @Test - public void should_evict_down_node_metrics_when_timeout_fires() throws Exception { - // given - Duration expireAfter = Duration.ofSeconds(1); - DriverConfigLoader loader = - allMetricsEnabled() - .withDuration(DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER, expireAfter) - .build(); - - AbstractMetricUpdater.MIN_EXPIRE_AFTER = expireAfter; - - try (CqlSession session = - CqlSession.builder() - .addContactEndPoints(simulacron().getContactPoints()) - .withConfigLoader(loader) - .withMetricRegistry(newMetricRegistry()) - .build()) { - - queryAllNodes(session); - - DefaultNode node1 = findNode(session, 0); - DefaultNode node2 = findNode(session, 1); - DefaultNode node3 = findNode(session, 2); - - EventBus eventBus = ((InternalDriverContext) session.getContext()).getEventBus(); - - // trigger node1 UP -> DOWN - eventBus.fire(NodeStateEvent.changed(NodeState.UP, NodeState.DOWN, node1)); - - Thread.sleep(expireAfter.toMillis()); - - // then node-level metrics should be evicted from node1, but - // node2 and node3 metrics should not have been evicted - await().untilAsserted(() -> assertNodeMetricsEvicted(session, node1)); - assertNodeMetricsNotEvicted(session, node2); - assertNodeMetricsNotEvicted(session, node3); - - } finally { - AbstractMetricUpdater.MIN_EXPIRE_AFTER = Duration.ofMinutes(5); - } - } - - @Test - public void should_not_evict_down_node_metrics_when_node_is_back_up_before_timeout() - throws Exception { - // given - Duration expireAfter = Duration.ofSeconds(2); - DriverConfigLoader loader = - allMetricsEnabled() - .withDuration(DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER, expireAfter) - .build(); - - AbstractMetricUpdater.MIN_EXPIRE_AFTER = expireAfter; - - try (CqlSession session = - CqlSession.builder() - .addContactEndPoints(simulacron().getContactPoints()) - .withConfigLoader(loader) - .withMetricRegistry(newMetricRegistry()) - .build()) { - - queryAllNodes(session); - - DefaultNode node1 = findNode(session, 0); - DefaultNode node2 = findNode(session, 1); - DefaultNode node3 = findNode(session, 2); - - EventBus eventBus = ((InternalDriverContext) session.getContext()).getEventBus(); - - // trigger nodes UP -> DOWN - eventBus.fire(NodeStateEvent.changed(NodeState.UP, NodeState.DOWN, node1)); - eventBus.fire(NodeStateEvent.changed(NodeState.UP, NodeState.FORCED_DOWN, node2)); - eventBus.fire(NodeStateEvent.removed(node3)); - - Thread.sleep(500); - - // trigger nodes DOWN -> UP, should cancel the timeouts - eventBus.fire(NodeStateEvent.changed(NodeState.DOWN, NodeState.UP, node1)); - eventBus.fire(NodeStateEvent.changed(NodeState.FORCED_DOWN, NodeState.UP, node2)); - eventBus.fire(NodeStateEvent.added(node3)); - - Thread.sleep(expireAfter.toMillis()); - - // then no node-level metrics should be evicted - assertNodeMetricsNotEvicted(session, node1); - assertNodeMetricsNotEvicted(session, node2); - assertNodeMetricsNotEvicted(session, node3); - - } finally { - AbstractMetricUpdater.MIN_EXPIRE_AFTER = Duration.ofMinutes(5); - } - } - - private ProgrammaticDriverConfigLoaderBuilder allMetricsEnabled() { - return SessionUtils.configLoaderBuilder() - .withStringList( - DefaultDriverOption.METRICS_SESSION_ENABLED, - ENABLED_SESSION_METRICS.stream() - .map(DefaultSessionMetric::getPath) - .collect(Collectors.toList())) - .withStringList( - DefaultDriverOption.METRICS_NODE_ENABLED, - ENABLED_NODE_METRICS.stream() - .map(DefaultNodeMetric::getPath) - .collect(Collectors.toList())) - .withString(DefaultDriverOption.METRICS_FACTORY_CLASS, getMetricsFactoryClass()); - } - - private void queryAllNodes(CqlSession session) { - for (Node node : session.getMetadata().getNodes().values()) { - for (int i = 0; i < 10; i++) { - session.execute(SimpleStatement.newInstance("irrelevant").setNode(node)); - } - } - } - - private DefaultNode findNode(CqlSession session, int id) { - InetSocketAddress address1 = simulacron().cluster().node(id).inetSocketAddress(); - return (DefaultNode) - session.getMetadata().findNode(address1).orElseThrow(IllegalStateException::new); - } - - protected abstract void assertMetricsNotPresent(Object registry); -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/retry/ConsistencyDowngradingRetryPolicyIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/retry/ConsistencyDowngradingRetryPolicyIT.java deleted file mode 100644 index 0cab12c7fc4..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/retry/ConsistencyDowngradingRetryPolicyIT.java +++ /dev/null @@ -1,1328 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.retry; - -import static com.datastax.oss.simulacron.common.codec.WriteType.BATCH_LOG; -import static com.datastax.oss.simulacron.common.codec.WriteType.UNLOGGED_BATCH; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.closeConnection; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.readFailure; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.readTimeout; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.serverError; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.unavailable; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.when; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.writeFailure; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.writeTimeout; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.after; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.timeout; -import static org.mockito.Mockito.verify; - -import ch.qos.logback.classic.Level; -import ch.qos.logback.classic.Logger; -import ch.qos.logback.classic.spi.ILoggingEvent; -import ch.qos.logback.core.Appender; -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.DefaultConsistencyLevel; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.connection.ClosedConnectionException; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.retry.RetryVerdict; -import com.datastax.oss.driver.api.core.servererrors.DefaultWriteType; -import com.datastax.oss.driver.api.core.servererrors.ReadFailureException; -import com.datastax.oss.driver.api.core.servererrors.ReadTimeoutException; -import com.datastax.oss.driver.api.core.servererrors.ServerError; -import com.datastax.oss.driver.api.core.servererrors.UnavailableException; -import com.datastax.oss.driver.api.core.servererrors.WriteFailureException; -import com.datastax.oss.driver.api.core.servererrors.WriteTimeoutException; -import com.datastax.oss.driver.api.testinfra.loadbalancing.SortingLoadBalancingPolicy; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.api.testinfra.simulacron.QueryCounter; -import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.internal.core.retry.ConsistencyDowngradingRetryPolicy; -import com.datastax.oss.driver.internal.core.retry.ConsistencyDowngradingRetryVerdict; -import com.datastax.oss.simulacron.common.cluster.ClusterSpec; -import com.datastax.oss.simulacron.common.codec.ConsistencyLevel; -import com.datastax.oss.simulacron.common.codec.WriteType; -import com.datastax.oss.simulacron.common.request.Query; -import com.datastax.oss.simulacron.common.request.Request; -import com.datastax.oss.simulacron.common.stubbing.CloseType; -import com.datastax.oss.simulacron.common.stubbing.DisconnectAction; -import com.datastax.oss.simulacron.server.BoundNode; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import java.net.SocketAddress; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import org.junit.After; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.mockito.ArgumentCaptor; -import org.slf4j.LoggerFactory; -import org.slf4j.helpers.MessageFormatter; - -@RunWith(DataProviderRunner.class) -@Category(ParallelizableTests.class) -public class ConsistencyDowngradingRetryPolicyIT { - - @ClassRule - public static final SimulacronRule SIMULACRON_RULE = - new SimulacronRule(ClusterSpec.builder().withNodes(3)); - - public @Rule SessionRule sessionRule = - SessionRule.builder(SIMULACRON_RULE) - .withConfigLoader( - SessionUtils.configLoaderBuilder() - .withBoolean(DefaultDriverOption.REQUEST_DEFAULT_IDEMPOTENCE, true) - .withClass( - DefaultDriverOption.RETRY_POLICY_CLASS, - ConsistencyDowngradingRetryPolicy.class) - .withClass( - DefaultDriverOption.LOAD_BALANCING_POLICY_CLASS, - SortingLoadBalancingPolicy.class) - .build()) - .build(); - - private static final String QUERY_STR = "irrelevant"; - - private static final Request QUERY_LOCAL_QUORUM = - new Query(QUERY_STR, ImmutableList.of(ConsistencyLevel.LOCAL_QUORUM), null, null); - - private static final Request QUERY_ONE = - new Query(QUERY_STR, ImmutableList.of(ConsistencyLevel.ONE), null, null); - - private static final Request QUERY_LOCAL_SERIAL = - new Query(QUERY_STR, ImmutableList.of(ConsistencyLevel.LOCAL_SERIAL), null, null); - - private static final SimpleStatement STATEMENT_LOCAL_QUORUM = - SimpleStatement.builder(QUERY_STR) - .setConsistencyLevel(DefaultConsistencyLevel.LOCAL_QUORUM) - .build(); - - private static final SimpleStatement STATEMENT_LOCAL_SERIAL = - SimpleStatement.builder(QUERY_STR) - .setConsistencyLevel(DefaultConsistencyLevel.LOCAL_SERIAL) - .build(); - - private final QueryCounter localQuorumCounter = - QueryCounter.builder(SIMULACRON_RULE.cluster()) - .withFilter( - (l) -> - l.getQuery().equals(QUERY_STR) - && l.getConsistency().equals(ConsistencyLevel.LOCAL_QUORUM)) - .build(); - - private final QueryCounter oneCounter = - QueryCounter.builder(SIMULACRON_RULE.cluster()) - .withFilter( - (l) -> - l.getQuery().equals(QUERY_STR) && l.getConsistency().equals(ConsistencyLevel.ONE)) - .build(); - - private final QueryCounter localSerialCounter = - QueryCounter.builder(SIMULACRON_RULE.cluster()) - .withFilter( - (l) -> - l.getQuery().equals(QUERY_STR) - && l.getConsistency().equals(ConsistencyLevel.LOCAL_SERIAL)) - .build(); - - private ArgumentCaptor loggingEventCaptor; - private Appender appender; - private Logger logger; - private Level oldLevel; - private String logPrefix; - private BoundNode node0; - private BoundNode node1; - - @Before - public void setup() { - loggingEventCaptor = ArgumentCaptor.forClass(ILoggingEvent.class); - @SuppressWarnings("unchecked") - Appender appender = (Appender) mock(Appender.class); - this.appender = appender; - logger = (Logger) LoggerFactory.getLogger(ConsistencyDowngradingRetryPolicy.class); - oldLevel = logger.getLevel(); - logger.setLevel(Level.TRACE); - logger.addAppender(appender); - // the log prefix we expect in retry logging messages. - logPrefix = sessionRule.session().getName() + "|default"; - // clear activity logs and primes between tests since simulacron instance is shared. - SIMULACRON_RULE.cluster().clearLogs(); - SIMULACRON_RULE.cluster().clearPrimes(true); - node0 = SIMULACRON_RULE.cluster().node(0); - node1 = SIMULACRON_RULE.cluster().node(1); - } - - @After - public void teardown() { - logger.detachAppender(appender); - logger.setLevel(oldLevel); - } - - @Test - public void should_rethrow_on_read_timeout_when_enough_responses_and_data_present() { - // given a node that will respond to query with a read timeout where data is present and enough - // replicas replied. - node0.prime( - when(QUERY_LOCAL_QUORUM).then(readTimeout(ConsistencyLevel.LOCAL_QUORUM, 2, 2, true))); - - try { - sessionRule.session().execute(STATEMENT_LOCAL_QUORUM); - fail("expected a ReadTimeoutException"); - } catch (ReadTimeoutException rte) { - // then an exception should have been thrown - assertThat(rte) - .hasMessageContaining( - "Cassandra timeout during read query at consistency LOCAL_QUORUM (timeout while waiting for repair of inconsistent replica)"); - assertThat(rte.getConsistencyLevel()).isEqualTo(DefaultConsistencyLevel.LOCAL_QUORUM); - assertThat(rte.getReceived()).isEqualTo(2); - assertThat(rte.getBlockFor()).isEqualTo(2); - assertThat(rte.wasDataPresent()).isTrue(); - // should not have been retried - List> errors = rte.getExecutionInfo().getErrors(); - assertThat(errors).isEmpty(); - // the host that returned the response should be node 0. - assertThat(coordinatorAddress(rte.getExecutionInfo())).isEqualTo(node0.getAddress()); - } - - // there should have been no retry. - localQuorumCounter.assertTotalCount(1); - localQuorumCounter.assertNodeCounts(1, 0, 0); - oneCounter.assertTotalCount(0); - - // expect 1 message: RETHROW - verify(appender, timeout(500)).doAppend(loggingEventCaptor.capture()); - List loggedEvents = loggingEventCaptor.getAllValues(); - assertThat(loggedEvents).hasSize(1); - assertThat(loggedEvents.get(0).getFormattedMessage()) - .isEqualTo( - expectedMessage( - ConsistencyDowngradingRetryPolicy.VERDICT_ON_READ_TIMEOUT, - logPrefix, - DefaultConsistencyLevel.LOCAL_QUORUM, - 2, - 2, - true, - 0, - RetryVerdict.RETHROW)); - } - - @Test - public void should_retry_on_same_on_read_timeout_when_enough_responses_but_data_not_present() { - // given a node that will respond to query with a read timeout where data is present. - node0.prime( - when(QUERY_LOCAL_QUORUM).then(readTimeout(ConsistencyLevel.LOCAL_QUORUM, 2, 2, false))); - - try { - sessionRule.session().execute(STATEMENT_LOCAL_QUORUM); - fail("expected a ReadTimeoutException"); - } catch (ReadTimeoutException rte) { - // then an exception should have been thrown - assertThat(rte) - .hasMessageContaining( - "Cassandra timeout during read query at consistency LOCAL_QUORUM (the replica queried for data didn't respond)"); - assertThat(rte.getConsistencyLevel()).isEqualTo(DefaultConsistencyLevel.LOCAL_QUORUM); - assertThat(rte.getReceived()).isEqualTo(2); - assertThat(rte.getBlockFor()).isEqualTo(2); - assertThat(rte.wasDataPresent()).isFalse(); - // the host that returned the response should be node 0. - assertThat(coordinatorAddress(rte.getExecutionInfo())).isEqualTo(node0.getAddress()); - // should have failed at first attempt at LOCAL_QUORUM as well - List> errors = rte.getExecutionInfo().getErrors(); - assertThat(errors).hasSize(1); - Entry error = errors.get(0); - assertThat(error.getKey().getEndPoint().resolve()).isEqualTo(node0.getAddress()); - assertThat(error.getValue()) - .isInstanceOfSatisfying( - ReadTimeoutException.class, - rte1 -> { - assertThat(rte1) - .hasMessageContaining( - "Cassandra timeout during read query at consistency LOCAL_QUORUM (the replica queried for data didn't respond)"); - assertThat(rte1.getConsistencyLevel()) - .isEqualTo(DefaultConsistencyLevel.LOCAL_QUORUM); - assertThat(rte1.getReceived()).isEqualTo(2); - assertThat(rte1.getBlockFor()).isEqualTo(2); - assertThat(rte1.wasDataPresent()).isFalse(); - }); - } - - // there should have been a retry, and it should have been executed on the same host, - // with same consistency. - localQuorumCounter.assertTotalCount(2); - localQuorumCounter.assertNodeCounts(2, 0, 0); - oneCounter.assertTotalCount(0); - - // expect 2 messages: RETRY_SAME, then RETHROW - verify(appender, timeout(2000).times(2)).doAppend(loggingEventCaptor.capture()); - List loggedEvents = loggingEventCaptor.getAllValues(); - assertThat(loggedEvents).hasSize(2); - assertThat(loggedEvents.get(0).getFormattedMessage()) - .isEqualTo( - expectedMessage( - ConsistencyDowngradingRetryPolicy.VERDICT_ON_READ_TIMEOUT, - logPrefix, - DefaultConsistencyLevel.LOCAL_QUORUM, - 2, - 2, - false, - 0, - RetryVerdict.RETRY_SAME)); - assertThat(loggedEvents.get(1).getFormattedMessage()) - .isEqualTo( - expectedMessage( - ConsistencyDowngradingRetryPolicy.VERDICT_ON_READ_TIMEOUT, - logPrefix, - DefaultConsistencyLevel.LOCAL_QUORUM, - 2, - 2, - false, - 1, - RetryVerdict.RETHROW)); - } - - @Test - public void should_downgrade_on_read_timeout_when_not_enough_responses() { - // given a node that will respond to a query with a read timeout where 2 out of 3 responses are - // received. In this case, digest requests succeeded, but not the data request. - node0.prime( - when(QUERY_LOCAL_QUORUM).then(readTimeout(ConsistencyLevel.LOCAL_QUORUM, 1, 2, true))); - - ResultSet rs = sessionRule.session().execute(STATEMENT_LOCAL_QUORUM); - - // the host that returned the response should be node 0. - assertThat(coordinatorAddress(rs.getExecutionInfo())).isEqualTo(node0.getAddress()); - // should have failed at first attempt at LOCAL_QUORUM - List> errors = rs.getExecutionInfo().getErrors(); - assertThat(errors).hasSize(1); - Entry error = errors.get(0); - assertThat(error.getKey().getEndPoint().resolve()).isEqualTo(node0.getAddress()); - assertThat(error.getValue()) - .isInstanceOfSatisfying( - ReadTimeoutException.class, - rte -> { - assertThat(rte) - .hasMessageContaining( - "Cassandra timeout during read query at consistency LOCAL_QUORUM (2 responses were required but only 1 replica responded)"); - assertThat(rte.getConsistencyLevel()).isEqualTo(DefaultConsistencyLevel.LOCAL_QUORUM); - assertThat(rte.getReceived()).isEqualTo(1); - assertThat(rte.getBlockFor()).isEqualTo(2); - assertThat(rte.wasDataPresent()).isTrue(); - }); - - // should have succeeded in second attempt at ONE - Statement request = (Statement) rs.getExecutionInfo().getRequest(); - assertThat(request.getConsistencyLevel()).isEqualTo(DefaultConsistencyLevel.ONE); - - // there should have been a retry, and it should have been executed on the same host, - // but with consistency ONE. - localQuorumCounter.assertTotalCount(1); - localQuorumCounter.assertNodeCounts(1, 0, 0); - oneCounter.assertTotalCount(1); - oneCounter.assertNodeCounts(1, 0, 0); - - // expect 1 message: RETRY_SAME with ONE - verify(appender, timeout(500)).doAppend(loggingEventCaptor.capture()); - List loggedEvents = loggingEventCaptor.getAllValues(); - assertThat(loggedEvents).hasSize(1); - assertThat(loggedEvents.get(0).getFormattedMessage()) - .isEqualTo( - expectedMessage( - ConsistencyDowngradingRetryPolicy.VERDICT_ON_READ_TIMEOUT, - logPrefix, - DefaultConsistencyLevel.LOCAL_QUORUM, - 2, - 1, - true, - 0, - new ConsistencyDowngradingRetryVerdict(DefaultConsistencyLevel.ONE))); - } - - @Test - public void should_retry_on_read_timeout_when_enough_responses_and_data_not_present() { - // given a node that will respond to a query with a read timeout where 3 out of 3 responses are - // received, but data is not present. - node0.prime( - when(QUERY_LOCAL_QUORUM).then(readTimeout(ConsistencyLevel.LOCAL_QUORUM, 2, 2, false))); - - try { - // when executing a query. - sessionRule.session().execute(STATEMENT_LOCAL_QUORUM); - fail("Expected a ReadTimeoutException"); - } catch (ReadTimeoutException rte) { - // then a read timeout exception is thrown. - assertThat(rte) - .hasMessageContaining( - "Cassandra timeout during read query at consistency LOCAL_QUORUM (the replica queried for data didn't respond)"); - assertThat(rte.getConsistencyLevel()).isEqualTo(DefaultConsistencyLevel.LOCAL_QUORUM); - assertThat(rte.getReceived()).isEqualTo(2); - assertThat(rte.getBlockFor()).isEqualTo(2); - assertThat(rte.wasDataPresent()).isFalse(); - // the host that returned the response should be node 0. - assertThat(coordinatorAddress(rte.getExecutionInfo())).isEqualTo(node0.getAddress()); - // should have failed at first attempt at LOCAL_QUORUM - List> errors = rte.getExecutionInfo().getErrors(); - assertThat(errors).hasSize(1); - Entry error = errors.get(0); - assertThat(error.getKey().getEndPoint().resolve()).isEqualTo(node0.getAddress()); - assertThat(error.getValue()) - .isInstanceOfSatisfying( - ReadTimeoutException.class, - rte1 -> { - assertThat(rte) - .hasMessageContaining( - "Cassandra timeout during read query at consistency LOCAL_QUORUM (the replica queried for data didn't respond)"); - assertThat(rte1.getConsistencyLevel()) - .isEqualTo(DefaultConsistencyLevel.LOCAL_QUORUM); - assertThat(rte1.getReceived()).isEqualTo(2); - assertThat(rte1.getBlockFor()).isEqualTo(2); - assertThat(rte1.wasDataPresent()).isFalse(); - }); - } - - // there should have been a retry, and it should have been executed on the same host. - localQuorumCounter.assertTotalCount(2); - localQuorumCounter.assertNodeCounts(2, 0, 0); - oneCounter.assertTotalCount(0); - - // verify log events were emitted as expected - verify(appender, timeout(500).times(2)).doAppend(loggingEventCaptor.capture()); - List loggedEvents = loggingEventCaptor.getAllValues(); - assertThat(loggedEvents).hasSize(2); - assertThat(loggedEvents.get(0).getFormattedMessage()) - .isEqualTo( - expectedMessage( - ConsistencyDowngradingRetryPolicy.VERDICT_ON_READ_TIMEOUT, - logPrefix, - DefaultConsistencyLevel.LOCAL_QUORUM, - 2, - 2, - false, - 0, - RetryVerdict.RETRY_SAME)); - assertThat(loggedEvents.get(1).getFormattedMessage()) - .isEqualTo( - expectedMessage( - ConsistencyDowngradingRetryPolicy.VERDICT_ON_READ_TIMEOUT, - logPrefix, - DefaultConsistencyLevel.LOCAL_QUORUM, - 2, - 2, - false, - 1, - RetryVerdict.RETHROW)); - } - - @Test - public void should_only_retry_once_on_read_type() { - // given a node that will respond to a query with a read timeout at 2 CLs. - node0.prime( - when(QUERY_LOCAL_QUORUM).then(readTimeout(ConsistencyLevel.LOCAL_QUORUM, 1, 2, true))); - node0.prime(when(QUERY_ONE).then(readTimeout(ConsistencyLevel.ONE, 0, 1, false))); - try { - // when executing a query. - sessionRule.session().execute(STATEMENT_LOCAL_QUORUM); - fail("Expected a ReadTimeoutException"); - } catch (ReadTimeoutException wte) { - // then a read timeout exception is thrown - assertThat(wte) - .hasMessageContaining( - "Cassandra timeout during read query at consistency ONE (1 responses were required but only 0 replica responded)"); - assertThat(wte.getConsistencyLevel()).isEqualTo(DefaultConsistencyLevel.ONE); - assertThat(wte.getReceived()).isEqualTo(0); - assertThat(wte.getBlockFor()).isEqualTo(1); - assertThat(wte.wasDataPresent()).isFalse(); - // the host that returned the response should be node 0. - assertThat(coordinatorAddress(wte.getExecutionInfo())).isEqualTo(node0.getAddress()); - // should have failed at first attempt at LOCAL_QUORUM as well - List> errors = wte.getExecutionInfo().getErrors(); - assertThat(errors).hasSize(1); - Entry error = errors.get(0); - assertThat(error.getKey().getEndPoint().resolve()).isEqualTo(node0.getAddress()); - assertThat(error.getValue()) - .isInstanceOfSatisfying( - ReadTimeoutException.class, - wte1 -> { - assertThat(wte1) - .hasMessageContaining( - "Cassandra timeout during read query at consistency LOCAL_QUORUM (2 responses were required but only 1 replica responded)"); - assertThat(wte1.getConsistencyLevel()) - .isEqualTo(DefaultConsistencyLevel.LOCAL_QUORUM); - assertThat(wte1.getReceived()).isEqualTo(1); - assertThat(wte1.getBlockFor()).isEqualTo(2); - assertThat(wte1.wasDataPresent()).isTrue(); - }); - } - - // should have been retried on same host, but at consistency ONE. - localQuorumCounter.assertTotalCount(1); - localQuorumCounter.assertNodeCounts(1, 0, 0); - oneCounter.assertTotalCount(1); - oneCounter.assertNodeCounts(1, 0, 0); - - // verify log events were emitted as expected - verify(appender, timeout(500).times(2)).doAppend(loggingEventCaptor.capture()); - List loggedEvents = loggingEventCaptor.getAllValues(); - assertThat(loggedEvents).hasSize(2); - assertThat(loggedEvents.get(0).getFormattedMessage()) - .isEqualTo( - expectedMessage( - ConsistencyDowngradingRetryPolicy.VERDICT_ON_READ_TIMEOUT, - logPrefix, - DefaultConsistencyLevel.LOCAL_QUORUM, - 2, - 1, - true, - 0, - new ConsistencyDowngradingRetryVerdict(DefaultConsistencyLevel.ONE))); - assertThat(loggedEvents.get(1).getFormattedMessage()) - .isEqualTo( - expectedMessage( - ConsistencyDowngradingRetryPolicy.VERDICT_ON_READ_TIMEOUT, - logPrefix, - DefaultConsistencyLevel.ONE, - 1, - 0, - false, - 1, - RetryVerdict.RETHROW)); - } - - @Test - public void should_retry_on_write_timeout_if_write_type_batch_log() { - // given a node that will respond to query with a write timeout with write type of batch log. - node0.prime( - when(QUERY_LOCAL_QUORUM) - .then(writeTimeout(ConsistencyLevel.LOCAL_QUORUM, 1, 2, BATCH_LOG))); - - try { - // when executing a query. - sessionRule.session().execute(STATEMENT_LOCAL_QUORUM); - fail("WriteTimeoutException expected"); - } catch (WriteTimeoutException wte) { - // then a write timeout exception is thrown - assertThat(wte) - .hasMessageContaining( - "Cassandra timeout during BATCH_LOG write query at consistency LOCAL_QUORUM (2 replica were required but only 1 acknowledged the write)"); - assertThat(wte.getConsistencyLevel()).isEqualTo(DefaultConsistencyLevel.LOCAL_QUORUM); - assertThat(wte.getReceived()).isEqualTo(1); - assertThat(wte.getBlockFor()).isEqualTo(2); - assertThat(wte.getWriteType()).isEqualTo(DefaultWriteType.BATCH_LOG); - // the host that returned the response should be node 0. - assertThat(coordinatorAddress(wte.getExecutionInfo())).isEqualTo(node0.getAddress()); - // should have failed at first attempt at LOCAL_QUORUM as well - List> errors = wte.getExecutionInfo().getErrors(); - assertThat(errors).hasSize(1); - Entry error = errors.get(0); - assertThat(error.getKey().getEndPoint().resolve()).isEqualTo(node0.getAddress()); - assertThat(error.getValue()) - .isInstanceOfSatisfying( - WriteTimeoutException.class, - wte1 -> { - assertThat(wte1) - .hasMessageContaining( - "Cassandra timeout during BATCH_LOG write query at consistency LOCAL_QUORUM (2 replica were required but only 1 acknowledged the write)"); - assertThat(wte1.getConsistencyLevel()) - .isEqualTo(DefaultConsistencyLevel.LOCAL_QUORUM); - assertThat(wte1.getReceived()).isEqualTo(1); - assertThat(wte1.getBlockFor()).isEqualTo(2); - assertThat(wte1.getWriteType()).isEqualTo(DefaultWriteType.BATCH_LOG); - }); - } - - // there should have been a retry, and it should have been executed on the same host. - localQuorumCounter.assertTotalCount(2); - localQuorumCounter.assertNodeCounts(2, 0, 0); - oneCounter.assertTotalCount(0); - - // verify log events were emitted as expected - verify(appender, timeout(500).times(2)).doAppend(loggingEventCaptor.capture()); - List loggedEvents = loggingEventCaptor.getAllValues(); - assertThat(loggedEvents).hasSize(2); - assertThat(loggedEvents.get(0).getFormattedMessage()) - .isEqualTo( - expectedMessage( - ConsistencyDowngradingRetryPolicy.VERDICT_ON_WRITE_TIMEOUT, - logPrefix, - DefaultConsistencyLevel.LOCAL_QUORUM, - DefaultWriteType.BATCH_LOG, - 2, - 1, - 0, - RetryVerdict.RETRY_SAME)); - assertThat(loggedEvents.get(1).getFormattedMessage()) - .isEqualTo( - expectedMessage( - ConsistencyDowngradingRetryPolicy.VERDICT_ON_WRITE_TIMEOUT, - logPrefix, - DefaultConsistencyLevel.LOCAL_QUORUM, - DefaultWriteType.BATCH_LOG, - 2, - 1, - 1, - RetryVerdict.RETHROW)); - } - - @Test - public void should_not_retry_on_write_timeout_if_write_type_batch_log_but_non_idempotent() { - // given a node that will respond to query with a write timeout with write type of batch log. - node0.prime( - when(QUERY_LOCAL_QUORUM) - .then(writeTimeout(ConsistencyLevel.LOCAL_QUORUM, 1, 2, BATCH_LOG))); - - try { - // when executing a non-idempotent query. - sessionRule.session().execute(STATEMENT_LOCAL_QUORUM.setIdempotent(false)); - fail("WriteTimeoutException expected"); - } catch (WriteTimeoutException wte) { - // then a write timeout exception is thrown - assertThat(wte) - .hasMessageContaining( - "Cassandra timeout during BATCH_LOG write query at consistency LOCAL_QUORUM (2 replica were required but only 1 acknowledged the write)"); - assertThat(wte.getConsistencyLevel()).isEqualTo(DefaultConsistencyLevel.LOCAL_QUORUM); - assertThat(wte.getReceived()).isEqualTo(1); - assertThat(wte.getBlockFor()).isEqualTo(2); - assertThat(wte.getWriteType()).isEqualTo(DefaultWriteType.BATCH_LOG); - // the host that returned the response should be node 0. - assertThat(coordinatorAddress(wte.getExecutionInfo())).isEqualTo(node0.getAddress()); - // should not have been retried - List> errors = wte.getExecutionInfo().getErrors(); - assertThat(errors).isEmpty(); - } - - // should not have been retried. - localQuorumCounter.assertTotalCount(1); - oneCounter.assertTotalCount(0); - - // expect no logging messages since there was no retry - verify(appender, after(500).times(0)).doAppend(any(ILoggingEvent.class)); - } - - @DataProvider({"SIMPLE,SIMPLE", "BATCH,BATCH"}) - @Test - public void should_ignore_on_write_timeout_if_write_type_ignorable_and_at_least_one_ack_received( - WriteType writeType, DefaultWriteType driverWriteType) { - // given a node that will respond to query with a write timeout with write type that is either - // SIMPLE or BATCH. - node0.prime( - when(QUERY_LOCAL_QUORUM) - .then(writeTimeout(ConsistencyLevel.LOCAL_QUORUM, 1, 2, writeType))); - - // when executing a query. - ResultSet rs = sessionRule.session().execute(STATEMENT_LOCAL_QUORUM); - - // should have ignored the write timeout - assertThat(rs.all()).isEmpty(); - // the host that returned the response should be node 0. - assertThat(coordinatorAddress(rs.getExecutionInfo())).isEqualTo(node0.getAddress()); - assertThat(rs.getExecutionInfo().getErrors()).isEmpty(); - - // should not have been retried. - localQuorumCounter.assertTotalCount(1); - oneCounter.assertTotalCount(0); - - // verify log event was emitted for each host as expected - verify(appender, after(500)).doAppend(loggingEventCaptor.capture()); - // final log message should have 2 retries - assertThat(loggingEventCaptor.getValue().getFormattedMessage()) - .isEqualTo( - expectedMessage( - ConsistencyDowngradingRetryPolicy.VERDICT_ON_WRITE_TIMEOUT, - logPrefix, - DefaultConsistencyLevel.LOCAL_QUORUM, - driverWriteType, - 2, - 1, - 0, - RetryVerdict.IGNORE)); - } - - @DataProvider({"SIMPLE,SIMPLE", "BATCH,BATCH"}) - @Test - public void should_throw_on_write_timeout_if_write_type_ignorable_but_no_ack_received( - WriteType writeType, DefaultWriteType driverWriteType) { - // given a node that will respond to query with a write timeout with write type that is either - // SIMPLE or BATCH. - node0.prime( - when(QUERY_LOCAL_QUORUM) - .then(writeTimeout(ConsistencyLevel.LOCAL_QUORUM, 0, 2, writeType))); - - try { - // when executing a query. - sessionRule.session().execute(STATEMENT_LOCAL_QUORUM); - fail("WriteTimeoutException expected"); - } catch (WriteTimeoutException wte) { - // then a write timeout exception is thrown - assertThat(wte) - .hasMessageContaining( - "Cassandra timeout during " - + driverWriteType - + " write query at consistency LOCAL_QUORUM (2 replica were required but only 0 acknowledged the write)"); - assertThat(wte.getConsistencyLevel()).isEqualTo(DefaultConsistencyLevel.LOCAL_QUORUM); - assertThat(wte.getReceived()).isEqualTo(0); - assertThat(wte.getBlockFor()).isEqualTo(2); - assertThat(wte.getWriteType()).isEqualTo(driverWriteType); - // the host that returned the response should be node 0. - assertThat(coordinatorAddress(wte.getExecutionInfo())).isEqualTo(node0.getAddress()); - // should not have been retried - List> errors = wte.getExecutionInfo().getErrors(); - assertThat(errors).isEmpty(); - } - - // should not have been retried. - localQuorumCounter.assertTotalCount(1); - oneCounter.assertTotalCount(0); - - // verify log event was emitted for each host as expected - verify(appender, after(500)).doAppend(loggingEventCaptor.capture()); - // final log message should have 2 retries - assertThat(loggingEventCaptor.getValue().getFormattedMessage()) - .isEqualTo( - expectedMessage( - ConsistencyDowngradingRetryPolicy.VERDICT_ON_WRITE_TIMEOUT, - logPrefix, - DefaultConsistencyLevel.LOCAL_QUORUM, - driverWriteType, - 2, - 0, - 0, - RetryVerdict.RETHROW)); - } - - @Test - public void should_downgrade_on_write_timeout_if_write_type_unlogged_batch() { - // given a node that will respond to query with a write timeout with write type of batch log. - node0.prime( - when(QUERY_LOCAL_QUORUM) - .then(writeTimeout(ConsistencyLevel.LOCAL_QUORUM, 1, 2, UNLOGGED_BATCH))); - - // when executing a query. - ResultSet rs = sessionRule.session().execute(STATEMENT_LOCAL_QUORUM); - - // the host that returned the response should be node 0. - assertThat(coordinatorAddress(rs.getExecutionInfo())).isEqualTo(node0.getAddress()); - // should have failed at first attempt at LOCAL_QUORUM - List> errors = rs.getExecutionInfo().getErrors(); - assertThat(errors).hasSize(1); - Entry error = errors.get(0); - assertThat(error.getKey().getEndPoint().resolve()).isEqualTo(node0.getAddress()); - assertThat(error.getValue()) - .isInstanceOfSatisfying( - WriteTimeoutException.class, - wte -> { - assertThat(wte) - .hasMessageContaining( - "Cassandra timeout during UNLOGGED_BATCH write query at consistency LOCAL_QUORUM (2 replica were required but only 1 acknowledged the write)"); - assertThat(wte.getConsistencyLevel()).isEqualTo(DefaultConsistencyLevel.LOCAL_QUORUM); - assertThat(wte.getReceived()).isEqualTo(1); - assertThat(wte.getBlockFor()).isEqualTo(2); - assertThat(wte.getWriteType()).isEqualTo(DefaultWriteType.UNLOGGED_BATCH); - }); - - // should have succeeded in second attempt at ONE - Statement request = (Statement) rs.getExecutionInfo().getRequest(); - assertThat(request.getConsistencyLevel()).isEqualTo(DefaultConsistencyLevel.ONE); - - // there should have been a retry, and it should have been executed on the same host, - // but at consistency ONE. - localQuorumCounter.assertTotalCount(1); - localQuorumCounter.assertNodeCounts(1, 0, 0); - oneCounter.assertTotalCount(1); - oneCounter.assertNodeCounts(1, 0, 0); - - // verify 1 log event was emitted as expected - verify(appender, timeout(500)).doAppend(loggingEventCaptor.capture()); - List loggedEvents = loggingEventCaptor.getAllValues(); - assertThat(loggedEvents).hasSize(1); - assertThat(loggedEvents.get(0).getFormattedMessage()) - .isEqualTo( - expectedMessage( - ConsistencyDowngradingRetryPolicy.VERDICT_ON_WRITE_TIMEOUT, - logPrefix, - DefaultConsistencyLevel.LOCAL_QUORUM, - DefaultWriteType.UNLOGGED_BATCH, - 2, - 1, - 0, - new ConsistencyDowngradingRetryVerdict(DefaultConsistencyLevel.ONE))); - } - - @Test - public void - should_not_downgrade_on_write_timeout_if_write_type_unlogged_batch_and_non_idempotent() { - // given a node that will respond to query with a write timeout with write type UNLOGGED_BATCH. - node0.prime( - when(QUERY_LOCAL_QUORUM) - .then(writeTimeout(ConsistencyLevel.LOCAL_QUORUM, 1, 2, UNLOGGED_BATCH))); - - try { - // when executing a non-idempotent query. - sessionRule.session().execute(STATEMENT_LOCAL_QUORUM.setIdempotent(false)); - fail("WriteTimeoutException expected"); - } catch (WriteTimeoutException wte) { - // then a write timeout exception is thrown - assertThat(wte) - .hasMessageContaining( - "Cassandra timeout during UNLOGGED_BATCH write query at consistency LOCAL_QUORUM (2 replica were required but only 1 acknowledged the write)"); - assertThat(wte.getConsistencyLevel()).isEqualTo(DefaultConsistencyLevel.LOCAL_QUORUM); - assertThat(wte.getReceived()).isEqualTo(1); - assertThat(wte.getBlockFor()).isEqualTo(2); - assertThat(wte.getWriteType()).isEqualTo(DefaultWriteType.UNLOGGED_BATCH); - // the host that returned the response should be node 0. - assertThat(coordinatorAddress(wte.getExecutionInfo())).isEqualTo(node0.getAddress()); - // should not have been retried - List> errors = wte.getExecutionInfo().getErrors(); - assertThat(errors).isEmpty(); - } - - // should not have been retried. - localQuorumCounter.assertTotalCount(1); - oneCounter.assertTotalCount(0); - - // expect no logging messages since there was no retry - verify(appender, after(500).times(0)).doAppend(any(ILoggingEvent.class)); - } - - @Test - public void should_only_retry_once_on_write_type() { - // given a node that will respond to a query with a write timeout at 2 CLs. - node0.prime( - when(QUERY_LOCAL_QUORUM) - .then(writeTimeout(ConsistencyLevel.LOCAL_QUORUM, 1, 2, UNLOGGED_BATCH))); - node0.prime(when(QUERY_ONE).then(writeTimeout(ConsistencyLevel.ONE, 0, 1, UNLOGGED_BATCH))); - try { - // when executing a query. - sessionRule.session().execute(STATEMENT_LOCAL_QUORUM); - fail("Expected a WriteTimeoutException"); - } catch (WriteTimeoutException wte) { - // then a write timeout exception is thrown - assertThat(wte) - .hasMessageContaining( - "Cassandra timeout during UNLOGGED_BATCH write query at consistency ONE (1 replica were required but only 0 acknowledged the write)"); - assertThat(wte.getConsistencyLevel()).isEqualTo(DefaultConsistencyLevel.ONE); - assertThat(wte.getReceived()).isEqualTo(0); - assertThat(wte.getBlockFor()).isEqualTo(1); - assertThat(wte.getWriteType()).isEqualTo(DefaultWriteType.UNLOGGED_BATCH); - // the host that returned the response should be node 0. - assertThat(coordinatorAddress(wte.getExecutionInfo())).isEqualTo(node0.getAddress()); - // should have failed at first attempt at LOCAL_QUORUM as well - List> errors = wte.getExecutionInfo().getErrors(); - assertThat(errors).hasSize(1); - Entry error = errors.get(0); - assertThat(error.getKey().getEndPoint().resolve()).isEqualTo(node0.getAddress()); - assertThat(error.getValue()) - .isInstanceOfSatisfying( - WriteTimeoutException.class, - wte1 -> { - assertThat(wte1) - .hasMessageContaining( - "Cassandra timeout during UNLOGGED_BATCH write query at consistency LOCAL_QUORUM (2 replica were required but only 1 acknowledged the write)"); - assertThat(wte1.getConsistencyLevel()) - .isEqualTo(DefaultConsistencyLevel.LOCAL_QUORUM); - assertThat(wte1.getReceived()).isEqualTo(1); - assertThat(wte1.getBlockFor()).isEqualTo(2); - assertThat(wte1.getWriteType()).isEqualTo(DefaultWriteType.UNLOGGED_BATCH); - }); - } - - // should have been retried on same host, but at consistency ONE. - localQuorumCounter.assertTotalCount(1); - localQuorumCounter.assertNodeCounts(1, 0, 0); - oneCounter.assertTotalCount(1); - oneCounter.assertNodeCounts(1, 0, 0); - - // verify log events were emitted as expected - verify(appender, timeout(500).times(2)).doAppend(loggingEventCaptor.capture()); - List loggedEvents = loggingEventCaptor.getAllValues(); - assertThat(loggedEvents).hasSize(2); - assertThat(loggedEvents.get(0).getFormattedMessage()) - .isEqualTo( - expectedMessage( - ConsistencyDowngradingRetryPolicy.VERDICT_ON_WRITE_TIMEOUT, - logPrefix, - DefaultConsistencyLevel.LOCAL_QUORUM, - DefaultWriteType.UNLOGGED_BATCH, - 2, - 1, - 0, - new ConsistencyDowngradingRetryVerdict(DefaultConsistencyLevel.ONE))); - assertThat(loggedEvents.get(1).getFormattedMessage()) - .isEqualTo( - expectedMessage( - ConsistencyDowngradingRetryPolicy.VERDICT_ON_WRITE_TIMEOUT, - logPrefix, - DefaultConsistencyLevel.ONE, - DefaultWriteType.UNLOGGED_BATCH, - 1, - 0, - 1, - RetryVerdict.RETHROW)); - } - - @Test - public void should_retry_on_next_host_on_unavailable_if_LWT() { - // given a node that will respond to a query with an unavailable. - node0.prime(when(QUERY_LOCAL_SERIAL).then(unavailable(ConsistencyLevel.LOCAL_SERIAL, 2, 1))); - - // when executing a query. - ResultSet result = sessionRule.session().execute(STATEMENT_LOCAL_SERIAL); - // then we should get a response, and the host that returned the response should be node 1. - assertThat(coordinatorAddress(result.getExecutionInfo())).isEqualTo(node1.getAddress()); - // the execution info on the result set indicates there was - // an error on the host that received the query. - assertThat(result.getExecutionInfo().getErrors()).hasSize(1); - Map.Entry error = result.getExecutionInfo().getErrors().get(0); - assertThat(error.getKey().getEndPoint().resolve()).isEqualTo(node0.getAddress()); - assertThat(error.getValue()) - .isInstanceOfSatisfying( - UnavailableException.class, - ue -> { - assertThat(ue) - .hasMessageContaining( - "Not enough replicas available for query at consistency LOCAL_SERIAL (2 required but only 1 alive)"); - assertThat(ue.getConsistencyLevel()).isEqualTo(DefaultConsistencyLevel.LOCAL_SERIAL); - assertThat(ue.getAlive()).isEqualTo(1); - assertThat(ue.getRequired()).isEqualTo(2); - }); - - // should have been retried on another host. - localSerialCounter.assertTotalCount(2); - localSerialCounter.assertNodeCounts(1, 1, 0); - localQuorumCounter.assertTotalCount(0); - oneCounter.assertTotalCount(0); - - // verify log event was emitted as expected - verify(appender, timeout(500)).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getValue().getFormattedMessage()) - .isEqualTo( - expectedMessage( - ConsistencyDowngradingRetryPolicy.VERDICT_ON_UNAVAILABLE, - logPrefix, - DefaultConsistencyLevel.LOCAL_SERIAL, - 2, - 1, - 0, - RetryVerdict.RETRY_NEXT)); - } - - @Test - public void should_downgrade_on_unavailable() { - // given a node that will respond to a query with an unavailable. - node0.prime(when(QUERY_LOCAL_QUORUM).then(unavailable(ConsistencyLevel.LOCAL_QUORUM, 2, 1))); - - // when executing a query. - ResultSet rs = sessionRule.session().execute(STATEMENT_LOCAL_QUORUM); - // then we should get a response, and the host that returned the response should be node 0. - assertThat(coordinatorAddress(rs.getExecutionInfo())).isEqualTo(node0.getAddress()); - // the execution info on the result set indicates there was - // an error on the host that received the query. - assertThat(rs.getExecutionInfo().getErrors()).hasSize(1); - Map.Entry error = rs.getExecutionInfo().getErrors().get(0); - assertThat(error.getKey().getEndPoint().resolve()).isEqualTo(node0.getAddress()); - assertThat(error.getValue()) - .isInstanceOfSatisfying( - UnavailableException.class, - ue -> { - assertThat(ue) - .hasMessageContaining( - "Not enough replicas available for query at consistency LOCAL_QUORUM (2 required but only 1 alive)"); - assertThat(ue.getConsistencyLevel()).isEqualTo(DefaultConsistencyLevel.LOCAL_QUORUM); - assertThat(ue.getAlive()).isEqualTo(1); - assertThat(ue.getRequired()).isEqualTo(2); - }); - - // should have succeeded in second attempt at ONE - Statement request = (Statement) rs.getExecutionInfo().getRequest(); - assertThat(request.getConsistencyLevel()).isEqualTo(DefaultConsistencyLevel.ONE); - - // should have been retried on the same host, but at ONE. - localQuorumCounter.assertTotalCount(1); - localQuorumCounter.assertNodeCounts(1, 0, 0); - oneCounter.assertTotalCount(1); - oneCounter.assertNodeCounts(1, 0, 0); - - // verify log event was emitted as expected - verify(appender, timeout(500)).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getValue().getFormattedMessage()) - .isEqualTo( - expectedMessage( - ConsistencyDowngradingRetryPolicy.VERDICT_ON_UNAVAILABLE, - logPrefix, - DefaultConsistencyLevel.LOCAL_QUORUM, - 2, - 1, - 0, - new ConsistencyDowngradingRetryVerdict(DefaultConsistencyLevel.ONE))); - } - - @Test - public void should_only_retry_once_on_unavailable() { - // given two nodes that will respond to a query with an unavailable. - node0.prime(when(QUERY_LOCAL_QUORUM).then(unavailable(ConsistencyLevel.LOCAL_QUORUM, 2, 1))); - node0.prime(when(QUERY_ONE).then(unavailable(ConsistencyLevel.ONE, 1, 0))); - - try { - // when executing a query. - sessionRule.session().execute(STATEMENT_LOCAL_QUORUM); - fail("Expected an UnavailableException"); - } catch (UnavailableException ue) { - // then we should get an unavailable exception with the host being node 1 (since it was second - // tried). - assertThat(ue) - .hasMessageContaining( - "Not enough replicas available for query at consistency ONE (1 required but only 0 alive)"); - assertThat(ue.getConsistencyLevel()).isEqualTo(DefaultConsistencyLevel.ONE); - assertThat(ue.getRequired()).isEqualTo(1); - assertThat(ue.getAlive()).isEqualTo(0); - assertThat(ue.getExecutionInfo().getErrors()).hasSize(1); - Map.Entry error = ue.getExecutionInfo().getErrors().get(0); - assertThat(error.getKey().getEndPoint().resolve()).isEqualTo(node0.getAddress()); - assertThat(error.getValue()) - .isInstanceOfSatisfying( - UnavailableException.class, - ue1 -> { - assertThat(ue1) - .hasMessageContaining( - "Not enough replicas available for query at consistency LOCAL_QUORUM (2 required but only 1 alive)"); - assertThat(ue1.getConsistencyLevel()) - .isEqualTo(DefaultConsistencyLevel.LOCAL_QUORUM); - assertThat(ue1.getRequired()).isEqualTo(2); - assertThat(ue1.getAlive()).isEqualTo(1); - }); - } - - // should have been retried on same host, but at ONE. - localQuorumCounter.assertTotalCount(1); - localQuorumCounter.assertNodeCounts(1, 0, 0); - oneCounter.assertTotalCount(1); - oneCounter.assertNodeCounts(1, 0, 0); - - // verify log events were emitted as expected - verify(appender, timeout(500).times(2)).doAppend(loggingEventCaptor.capture()); - List loggedEvents = loggingEventCaptor.getAllValues(); - assertThat(loggedEvents).hasSize(2); - assertThat(loggedEvents.get(0).getFormattedMessage()) - .isEqualTo( - expectedMessage( - ConsistencyDowngradingRetryPolicy.VERDICT_ON_UNAVAILABLE, - logPrefix, - DefaultConsistencyLevel.LOCAL_QUORUM, - 2, - 1, - 0, - new ConsistencyDowngradingRetryVerdict(DefaultConsistencyLevel.ONE))); - assertThat(loggedEvents.get(1).getFormattedMessage()) - .isEqualTo( - expectedMessage( - ConsistencyDowngradingRetryPolicy.VERDICT_ON_UNAVAILABLE, - logPrefix, - DefaultConsistencyLevel.ONE, - 1, - 0, - 1, - RetryVerdict.RETHROW)); - } - - @Test - public void should_retry_on_next_host_on_connection_error_if_idempotent() { - // given a node that will close its connection as result of receiving a query. - node0.prime( - when(QUERY_LOCAL_QUORUM) - .then(closeConnection(DisconnectAction.Scope.CONNECTION, CloseType.DISCONNECT))); - - // when executing a query. - ResultSet result = sessionRule.session().execute(STATEMENT_LOCAL_QUORUM); - // then we should get a response, and the execution info on the result set indicates there was - // an error on the host that received the query. - assertThat(result.getExecutionInfo().getErrors()).hasSize(1); - Map.Entry error = result.getExecutionInfo().getErrors().get(0); - assertThat(error.getKey().getEndPoint().resolve()).isEqualTo(node0.getAddress()); - assertThat(error.getValue()).isInstanceOf(ClosedConnectionException.class); - // the host that returned the response should be node 1. - assertThat(coordinatorAddress(result.getExecutionInfo())).isEqualTo(node1.getAddress()); - - // should have been retried. - localQuorumCounter.assertTotalCount(2); - // expected query on node 0, and retry on node 2. - localQuorumCounter.assertNodeCounts(1, 1, 0); - oneCounter.assertTotalCount(0); - - // verify log event was emitted as expected - verify(appender, timeout(500)).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getValue().getFormattedMessage()) - .isEqualTo( - expectedMessage( - ConsistencyDowngradingRetryPolicy.VERDICT_ON_ABORTED, - logPrefix, - ClosedConnectionException.class.getSimpleName(), - error.getValue().getMessage(), - 0, - RetryVerdict.RETRY_NEXT)); - } - - @Test - public void should_keep_retrying_on_next_host_on_connection_error() { - // given a request for which every node will close its connection upon receiving it. - SIMULACRON_RULE - .cluster() - .prime( - when(QUERY_LOCAL_QUORUM) - .then(closeConnection(DisconnectAction.Scope.CONNECTION, CloseType.DISCONNECT))); - - try { - // when executing a query. - sessionRule.session().execute(STATEMENT_LOCAL_QUORUM); - fail("AllNodesFailedException expected"); - } catch (AllNodesFailedException ex) { - // then an AllNodesFailedException should be raised indicating that all nodes failed the - // request. - assertThat(ex.getAllErrors()).hasSize(3); - } - - // should have been tried on all nodes. - // should have been retried. - localQuorumCounter.assertTotalCount(3); - // expected query on node 0, and retry on node 2 and 3. - localQuorumCounter.assertNodeCounts(1, 1, 1); - oneCounter.assertTotalCount(0); - - // verify log event was emitted for each host as expected - verify(appender, after(500).times(3)).doAppend(loggingEventCaptor.capture()); - // final log message should have 2 retries - assertThat(loggingEventCaptor.getValue().getFormattedMessage()) - .isEqualTo( - expectedMessage( - ConsistencyDowngradingRetryPolicy.VERDICT_ON_ABORTED, - logPrefix, - ClosedConnectionException.class.getSimpleName(), - "Lost connection to remote peer", - 2, - RetryVerdict.RETRY_NEXT)); - } - - @Test - public void should_not_retry_on_connection_error_if_non_idempotent() { - // given a node that will close its connection as result of receiving a query. - node0.prime( - when(QUERY_LOCAL_QUORUM) - .then(closeConnection(DisconnectAction.Scope.CONNECTION, CloseType.DISCONNECT))); - - try { - // when executing a non-idempotent query. - sessionRule.session().execute(STATEMENT_LOCAL_QUORUM.setIdempotent(false)); - fail("ClosedConnectionException expected"); - } catch (ClosedConnectionException ex) { - // then a ClosedConnectionException should be raised, indicating that the connection closed - // while handling the request on that node. - // this clearly indicates that the request wasn't retried. - // Exception should indicate that node 0 was the failing node. - // FIXME JAVA-2908 - // Node coordinator = ex.getExecutionInfo().getCoordinator(); - // assertThat(coordinator).isNotNull(); - // assertThat(coordinator.getEndPoint().resolve()) - // .isEqualTo(SIMULACRON_RULE.cluster().node(0).getAddress()); - } - - // should not have been retried. - localQuorumCounter.assertTotalCount(1); - oneCounter.assertTotalCount(0); - - // expect no logging messages since there was no retry - verify(appender, after(500).times(0)).doAppend(any(ILoggingEvent.class)); - } - - @Test - public void should_keep_retrying_on_next_host_on_error_response() { - // given every node responding with a server error. - SIMULACRON_RULE - .cluster() - .prime(when(QUERY_LOCAL_QUORUM).then(serverError("this is a server error"))); - - try { - // when executing a query. - sessionRule.session().execute(STATEMENT_LOCAL_QUORUM); - fail("Expected an AllNodesFailedException"); - } catch (AllNodesFailedException e) { - // then we should get an all nodes failed exception, indicating the query was tried each node. - assertThat(e.getAllErrors()).hasSize(3); - for (List nodeErrors : e.getAllErrors().values()) { - for (Throwable nodeError : nodeErrors) { - assertThat(nodeError).isInstanceOf(ServerError.class); - assertThat(nodeError).hasMessage("this is a server error"); - } - } - } - - // should have been tried on all nodes. - localQuorumCounter.assertTotalCount(3); - localQuorumCounter.assertNodeCounts(1, 1, 1); - - // verify log event was emitted for each host as expected - verify(appender, after(500).times(3)).doAppend(loggingEventCaptor.capture()); - // final log message should have 2 retries - assertThat(loggingEventCaptor.getValue().getFormattedMessage()) - .isEqualTo( - expectedMessage( - ConsistencyDowngradingRetryPolicy.VERDICT_ON_ERROR, - logPrefix, - ServerError.class.getSimpleName(), - "this is a server error", - 2, - RetryVerdict.RETRY_NEXT)); - } - - @Test - public void should_not_retry_on_next_host_on_error_response_if_write_failure() { - // given every node responding with a write failure. - SIMULACRON_RULE - .cluster() - .prime( - when(QUERY_LOCAL_QUORUM) - .then( - writeFailure( - ConsistencyLevel.LOCAL_QUORUM, 1, 2, ImmutableMap.of(), WriteType.SIMPLE))); - try { - // when executing a query - sessionRule.session().execute(STATEMENT_LOCAL_QUORUM); - fail("Expected a WriteFailureException"); - } catch (WriteFailureException wfe) { - // then we should get a write failure exception with the host being node 1 (since it was - // second tried). - assertThat(wfe) - .hasMessageContaining( - "Cassandra failure during write query at consistency LOCAL_QUORUM (2 responses were required but only 1 replica responded, 0 failed)"); - assertThat(wfe.getConsistencyLevel()).isEqualTo(DefaultConsistencyLevel.LOCAL_QUORUM); - assertThat(wfe.getBlockFor()).isEqualTo(2); - assertThat(wfe.getReceived()).isEqualTo(1); - assertThat(wfe.getWriteType()).isEqualTo(DefaultWriteType.SIMPLE); - assertThat(wfe.getReasonMap()).isEmpty(); - } - - // should only have been tried on first node. - localQuorumCounter.assertTotalCount(1); - localQuorumCounter.assertNodeCounts(1, 0, 0); - oneCounter.assertTotalCount(0); - - // verify log event was emitted as expected - verify(appender, timeout(500)).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getValue().getFormattedMessage()) - .isEqualTo( - expectedMessage( - ConsistencyDowngradingRetryPolicy.VERDICT_ON_ERROR, - logPrefix, - WriteFailureException.class.getSimpleName(), - "Cassandra failure during write query at consistency LOCAL_QUORUM (2 responses were required but only 1 replica responded, 0 failed)", - 0, - RetryVerdict.RETHROW)); - } - - @Test - public void should_not_retry_on_next_host_on_error_response_if_read_failure() { - // given every node responding with a read failure. - SIMULACRON_RULE - .cluster() - .prime( - when(QUERY_LOCAL_QUORUM) - .then(readFailure(ConsistencyLevel.LOCAL_QUORUM, 1, 2, ImmutableMap.of(), true))); - try { - // when executing a query - sessionRule.session().execute(STATEMENT_LOCAL_QUORUM); - fail("Expected a ReadFailureException"); - } catch (ReadFailureException rfe) { - // then we should get a read failure exception with the host being node 1 (since it was - // second tried). - assertThat(rfe) - .hasMessageContaining( - "Cassandra failure during read query at consistency LOCAL_QUORUM (2 responses were required but only 1 replica responded, 0 failed)"); - assertThat(rfe.getConsistencyLevel()).isEqualTo(DefaultConsistencyLevel.LOCAL_QUORUM); - assertThat(rfe.getBlockFor()).isEqualTo(2); - assertThat(rfe.getReceived()).isEqualTo(1); - assertThat(rfe.wasDataPresent()).isTrue(); - assertThat(rfe.getReasonMap()).isEmpty(); - } - - // should only have been tried on first node. - localQuorumCounter.assertTotalCount(1); - localQuorumCounter.assertNodeCounts(1, 0, 0); - oneCounter.assertTotalCount(0); - - // verify log event was emitted as expected - verify(appender, timeout(500)).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getValue().getFormattedMessage()) - .isEqualTo( - expectedMessage( - ConsistencyDowngradingRetryPolicy.VERDICT_ON_ERROR, - logPrefix, - ReadFailureException.class.getSimpleName(), - "Cassandra failure during read query at consistency LOCAL_QUORUM (2 responses were required but only 1 replica responded, 0 failed)", - 0, - RetryVerdict.RETHROW)); - } - - @Test - public void should_not_retry_on_next_host_on_error_response_if_non_idempotent() { - // given every node responding with a server error. - SIMULACRON_RULE - .cluster() - .prime(when(QUERY_LOCAL_QUORUM).then(serverError("this is a server error"))); - - try { - // when executing a query that is not idempotent - sessionRule.session().execute(STATEMENT_LOCAL_QUORUM.setIdempotent(false)); - fail("Expected a ServerError"); - } catch (ServerError e) { - // then should get a server error from first host. - assertThat(e.getMessage()).isEqualTo("this is a server error"); - } - - // should only have been tried on first node. - localQuorumCounter.assertTotalCount(1); - localQuorumCounter.assertNodeCounts(1, 0, 0); - oneCounter.assertTotalCount(0); - - // expect no logging messages since there was no retry - verify(appender, after(500).times(0)).doAppend(any(ILoggingEvent.class)); - } - - private String expectedMessage(String template, Object... args) { - return MessageFormatter.arrayFormat(template, args).getMessage(); - } - - private SocketAddress coordinatorAddress(ExecutionInfo executionInfo) { - Node coordinator = executionInfo.getCoordinator(); - assertThat(coordinator).isNotNull(); - return coordinator.getEndPoint().resolve(); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/retry/DefaultRetryPolicyIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/retry/DefaultRetryPolicyIT.java deleted file mode 100644 index 4a3cebf914f..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/retry/DefaultRetryPolicyIT.java +++ /dev/null @@ -1,547 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.retry; - -import static com.datastax.oss.simulacron.common.codec.ConsistencyLevel.LOCAL_QUORUM; -import static com.datastax.oss.simulacron.common.codec.WriteType.BATCH_LOG; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.closeConnection; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.readTimeout; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.serverError; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.unavailable; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.when; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.writeTimeout; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.after; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.timeout; -import static org.mockito.Mockito.verify; - -import ch.qos.logback.classic.Level; -import ch.qos.logback.classic.Logger; -import ch.qos.logback.classic.spi.ILoggingEvent; -import ch.qos.logback.core.Appender; -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.DefaultConsistencyLevel; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.connection.ClosedConnectionException; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.servererrors.DefaultWriteType; -import com.datastax.oss.driver.api.core.servererrors.ReadTimeoutException; -import com.datastax.oss.driver.api.core.servererrors.ServerError; -import com.datastax.oss.driver.api.core.servererrors.UnavailableException; -import com.datastax.oss.driver.api.core.servererrors.WriteTimeoutException; -import com.datastax.oss.driver.api.testinfra.loadbalancing.SortingLoadBalancingPolicy; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.api.testinfra.simulacron.QueryCounter; -import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; -import com.datastax.oss.driver.internal.core.retry.DefaultRetryPolicy; -import com.datastax.oss.simulacron.common.cluster.ClusterSpec; -import com.datastax.oss.simulacron.common.stubbing.CloseType; -import com.datastax.oss.simulacron.common.stubbing.DisconnectAction; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.Arrays; -import java.util.List; -import java.util.Map; -import org.junit.After; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.ArgumentCaptor; -import org.slf4j.LoggerFactory; -import org.slf4j.helpers.MessageFormatter; - -@RunWith(DataProviderRunner.class) -public class DefaultRetryPolicyIT { - @ClassRule - public static final SimulacronRule SIMULACRON_RULE = - new SimulacronRule(ClusterSpec.builder().withNodes(3)); - - public @Rule SessionRule sessionRule = - SessionRule.builder(SIMULACRON_RULE) - .withConfigLoader( - SessionUtils.configLoaderBuilder() - .withBoolean(DefaultDriverOption.REQUEST_DEFAULT_IDEMPOTENCE, true) - .withClass( - DefaultDriverOption.LOAD_BALANCING_POLICY_CLASS, - SortingLoadBalancingPolicy.class) - .build()) - .build(); - - private static String queryStr = "select * from foo"; - private static final SimpleStatement query = SimpleStatement.builder(queryStr).build(); - - private ArgumentCaptor loggingEventCaptor = - ArgumentCaptor.forClass(ILoggingEvent.class); - - @SuppressWarnings("unchecked") - private Appender appender = (Appender) mock(Appender.class); - - private Logger logger; - private Level oldLevel; - private String logPrefix; - - private final QueryCounter counter = - QueryCounter.builder(SIMULACRON_RULE.cluster()) - .withFilter((l) -> l.getQuery().equals(queryStr)) - .build(); - - @Before - public void setup() { - logger = (Logger) LoggerFactory.getLogger(DefaultRetryPolicy.class); - oldLevel = logger.getLevel(); - logger.setLevel(Level.TRACE); - logger.addAppender(appender); - // the log prefix we expect in retry logging messages. - logPrefix = sessionRule.session().getName() + "|default"; - // clear activity logs and primes between tests since simulacron instance is shared. - SIMULACRON_RULE.cluster().clearLogs(); - SIMULACRON_RULE.cluster().clearPrimes(true); - } - - @After - public void teardown() { - logger.detachAppender(appender); - logger.setLevel(oldLevel); - } - - @Test - public void should_not_retry_on_read_timeout_when_data_present() { - // given a node that will respond to query with a read timeout where data is present. - SIMULACRON_RULE - .cluster() - .node(0) - .prime(when(queryStr).then(readTimeout(LOCAL_QUORUM, 1, 3, true))); - - try { - // when executing a query - sessionRule.session().execute(query); - fail("Expected a ReadTimeoutException"); - } catch (ReadTimeoutException rte) { - // then a read timeout exception is thrown - assertThat(rte.getConsistencyLevel()).isEqualTo(DefaultConsistencyLevel.LOCAL_QUORUM); - assertThat(rte.getReceived()).isEqualTo(1); - assertThat(rte.getBlockFor()).isEqualTo(3); - assertThat(rte.wasDataPresent()).isTrue(); - } - - // should not have been retried. - counter.assertTotalCount(1); - - // expect no logging messages since there was no retry - verify(appender, after(500).times(0)).doAppend(any(ILoggingEvent.class)); - } - - @Test - public void should_not_retry_on_read_timeout_when_less_than_blockFor_received() { - // given a node that will respond to a query with a read timeout where 2 out of 3 responses are - // received. - // in this case, digest requests succeeded, but not the data request. - SIMULACRON_RULE - .cluster() - .node(0) - .prime(when(queryStr).then(readTimeout(LOCAL_QUORUM, 2, 3, false))); - - try { - // when executing a query - sessionRule.session().execute(query); - fail("Expected a ReadTimeoutException"); - } catch (ReadTimeoutException rte) { - // then a read timeout exception is thrown - assertThat(rte.getConsistencyLevel()).isEqualTo(DefaultConsistencyLevel.LOCAL_QUORUM); - assertThat(rte.getReceived()).isEqualTo(2); - assertThat(rte.getBlockFor()).isEqualTo(3); - assertThat(rte.wasDataPresent()).isFalse(); - } - - // should not have been retried. - counter.assertTotalCount(1); - - // expect no logging messages since there was no retry - verify(appender, after(500).times(0)).doAppend(any(ILoggingEvent.class)); - } - - @Test - public void should_retry_on_read_timeout_when_enough_responses_and_data_not_present() { - // given a node that will respond to a query with a read timeout where 3 out of 3 responses are - // received, - // but data is not present. - SIMULACRON_RULE - .cluster() - .node(0) - .prime(when(queryStr).then(readTimeout(LOCAL_QUORUM, 3, 3, false))); - - try { - // when executing a query. - sessionRule.session().execute(query); - fail("Expected a ReadTimeoutException"); - } catch (ReadTimeoutException rte) { - // then a read timeout exception is thrown. - assertThat(rte.getConsistencyLevel()).isEqualTo(DefaultConsistencyLevel.LOCAL_QUORUM); - assertThat(rte.getReceived()).isEqualTo(3); - assertThat(rte.getBlockFor()).isEqualTo(3); - assertThat(rte.wasDataPresent()).isFalse(); - } - - // there should have been a retry, and it should have been executed on the same host. - counter.assertTotalCount(2); - counter.assertNodeCounts(2, 0, 0); - - // verify log event was emitted as expected - verify(appender, timeout(500)).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getValue().getFormattedMessage()) - .isEqualTo( - expectedMessage( - DefaultRetryPolicy.RETRYING_ON_READ_TIMEOUT, - logPrefix, - "LOCAL_QUORUM", - 3, - 3, - false, - 0)); - } - - @Test - public void should_retry_on_next_host_on_connection_error_if_idempotent() { - // given a node that will close its connection as result of receiving a query. - SIMULACRON_RULE - .cluster() - .node(0) - .prime( - when(queryStr) - .then(closeConnection(DisconnectAction.Scope.CONNECTION, CloseType.DISCONNECT))); - - // when executing a query. - ResultSet result = sessionRule.session().execute(query); - // then we should get a response, and the execution info on the result set indicates there was - // an error on - // the host that received the query. - assertThat(result.getExecutionInfo().getErrors()).hasSize(1); - Map.Entry error = result.getExecutionInfo().getErrors().get(0); - assertThat(error.getKey().getEndPoint().resolve()) - .isEqualTo(SIMULACRON_RULE.cluster().node(0).inetSocketAddress()); - assertThat(error.getValue()).isInstanceOf(ClosedConnectionException.class); - // the host that returned the response should be node 1. - assertThat(result.getExecutionInfo().getCoordinator().getEndPoint().resolve()) - .isEqualTo(SIMULACRON_RULE.cluster().node(1).inetSocketAddress()); - - // should have been retried. - counter.assertTotalCount(2); - // expected query on node 0, and retry on node 2. - counter.assertNodeCounts(1, 1, 0); - - // verify log event was emitted as expected - verify(appender, timeout(500)).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getValue().getFormattedMessage()) - .isEqualTo(expectedMessage(DefaultRetryPolicy.RETRYING_ON_ABORTED, logPrefix, 0)); - } - - @Test - public void should_keep_retrying_on_next_host_on_connection_error() { - // given a request for which every node will close its connection upon receiving it. - SIMULACRON_RULE - .cluster() - .prime( - when(queryStr) - .then(closeConnection(DisconnectAction.Scope.CONNECTION, CloseType.DISCONNECT))); - - try { - // when executing a query. - sessionRule.session().execute(query); - fail("AllNodesFailedException expected"); - } catch (AllNodesFailedException ex) { - // then an AllNodesFailedException should be raised indicating that all nodes failed the - // request. - assertThat(ex.getAllErrors()).hasSize(3); - } - - // should have been tried on all nodes. - // should have been retried. - counter.assertTotalCount(3); - // expected query on node 0, and retry on node 2 and 3. - counter.assertNodeCounts(1, 1, 1); - - // verify log event was emitted for each host as expected - verify(appender, after(500).times(3)).doAppend(loggingEventCaptor.capture()); - // final log message should have 2 retries - assertThat(loggingEventCaptor.getValue().getFormattedMessage()) - .isEqualTo(expectedMessage(DefaultRetryPolicy.RETRYING_ON_ABORTED, logPrefix, 2)); - } - - @Test - public void should_not_retry_on_connection_error_if_non_idempotent() { - // given a node that will close its connection as result of receiving a query. - SIMULACRON_RULE - .cluster() - .node(0) - .prime( - when(queryStr) - .then(closeConnection(DisconnectAction.Scope.CONNECTION, CloseType.DISCONNECT))); - - try { - // when executing a non-idempotent query. - sessionRule - .session() - .execute(SimpleStatement.builder(queryStr).setIdempotence(false).build()); - fail("ClosedConnectionException expected"); - } catch (ClosedConnectionException ex) { - // then a ClosedConnectionException should be raised, indicating that the connection closed - // while handling - // the request on that node. - // this clearly indicates that the request wasn't retried. - // Exception should indicate that node 0 was the failing node. - // TODO: Validate the address on the connection if made available. - } - - // should not have been retried. - counter.assertTotalCount(1); - - // expect no logging messages since there was no retry - verify(appender, after(500).times(0)).doAppend(any(ILoggingEvent.class)); - } - - @Test - public void should_retry_on_write_timeout_if_write_type_batch_log() { - // given a node that will respond to query with a write timeout with write type of batch log. - SIMULACRON_RULE - .cluster() - .node(0) - .prime(when(queryStr).then(writeTimeout(LOCAL_QUORUM, 1, 3, BATCH_LOG))); - - try { - // when executing a query. - sessionRule.session().execute(queryStr); - fail("WriteTimeoutException expected"); - } catch (WriteTimeoutException wte) { - // then a write timeout exception is thrown - assertThat(wte.getConsistencyLevel()).isEqualTo(DefaultConsistencyLevel.LOCAL_QUORUM); - assertThat(wte.getReceived()).isEqualTo(1); - assertThat(wte.getBlockFor()).isEqualTo(3); - assertThat(wte.getWriteType()).isEqualTo(DefaultWriteType.BATCH_LOG); - } - - // there should have been a retry, and it should have been executed on the same host. - counter.assertTotalCount(2); - counter.assertNodeCounts(2, 0, 0); - - // verify log event was emitted as expected - verify(appender, timeout(500)).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getValue().getFormattedMessage()) - .isEqualTo( - expectedMessage( - DefaultRetryPolicy.RETRYING_ON_WRITE_TIMEOUT, - logPrefix, - "LOCAL_QUORUM", - "BATCH_LOG", - 3, - 1, - 0)); - } - - /** - * @return All WriteTypes that are not BATCH_LOG, on write timeout of these, the driver should not - * retry. - */ - @DataProvider - public static Object[] nonBatchLogWriteTypes() { - return Arrays.stream(com.datastax.oss.simulacron.common.codec.WriteType.values()) - .filter(wt -> wt != BATCH_LOG) - .toArray(); - } - - @UseDataProvider("nonBatchLogWriteTypes") - @Test - public void should_not_retry_on_write_timeout_if_write_type_non_batch_log( - com.datastax.oss.simulacron.common.codec.WriteType writeType) { - // given a node that will respond to query with a write timeout with write type that is not - // batch log. - SIMULACRON_RULE - .cluster() - .node(0) - .prime(when(queryStr).then(writeTimeout(LOCAL_QUORUM, 1, 3, writeType))); - - try { - // when executing a query. - sessionRule.session().execute(queryStr); - fail("WriteTimeoutException expected"); - } catch (WriteTimeoutException wte) { - // then a write timeout exception is thrown - assertThat(wte.getConsistencyLevel()).isEqualTo(DefaultConsistencyLevel.LOCAL_QUORUM); - assertThat(wte.getReceived()).isEqualTo(1); - assertThat(wte.getBlockFor()).isEqualTo(3); - } - - // should not have been retried. - counter.assertTotalCount(1); - - // expect no logging messages since there was no retry - verify(appender, after(500).times(0)).doAppend(any(ILoggingEvent.class)); - } - - @Test - public void should_not_retry_on_write_timeout_if_write_type_batch_log_but_non_idempotent() { - // given a node that will respond to query with a write timeout with write type of batch log. - SIMULACRON_RULE - .cluster() - .node(0) - .prime(when(queryStr).then(writeTimeout(LOCAL_QUORUM, 1, 3, BATCH_LOG))); - - try { - // when executing a non-idempotent query. - sessionRule - .session() - .execute(SimpleStatement.builder(queryStr).setIdempotence(false).build()); - fail("WriteTimeoutException expected"); - } catch (WriteTimeoutException wte) { - // then a write timeout exception is thrown - assertThat(wte.getConsistencyLevel()).isEqualTo(DefaultConsistencyLevel.LOCAL_QUORUM); - assertThat(wte.getReceived()).isEqualTo(1); - assertThat(wte.getBlockFor()).isEqualTo(3); - assertThat(wte.getWriteType()).isEqualTo(DefaultWriteType.BATCH_LOG); - } - - // should not have been retried. - counter.assertTotalCount(1); - - // expect no logging messages since there was no retry - verify(appender, after(500).times(0)).doAppend(any(ILoggingEvent.class)); - } - - @Test - public void should_retry_on_next_host_on_unavailable() { - // given a node that will respond to a query with an unavailable. - SIMULACRON_RULE.cluster().node(0).prime(when(queryStr).then(unavailable(LOCAL_QUORUM, 3, 0))); - - // when executing a query. - ResultSet result = sessionRule.session().execute(queryStr); - // then we should get a response, and the execution info on the result set indicates there was - // an error on - // the host that received the query. - assertThat(result.getExecutionInfo().getErrors()).hasSize(1); - Map.Entry error = result.getExecutionInfo().getErrors().get(0); - assertThat(error.getKey().getEndPoint().resolve()) - .isEqualTo(SIMULACRON_RULE.cluster().node(0).inetSocketAddress()); - assertThat(error.getValue()).isInstanceOf(UnavailableException.class); - // the host that returned the response should be node 1. - assertThat(result.getExecutionInfo().getCoordinator().getEndPoint().resolve()) - .isEqualTo(SIMULACRON_RULE.cluster().node(1).inetSocketAddress()); - - // should have been retried on another host. - counter.assertTotalCount(2); - counter.assertNodeCounts(1, 1, 0); - - // verify log event was emitted as expected - verify(appender, timeout(500)).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getValue().getFormattedMessage()) - .isEqualTo( - expectedMessage( - DefaultRetryPolicy.RETRYING_ON_UNAVAILABLE, logPrefix, "LOCAL_QUORUM", 3, 0, 0)); - } - - @Test - public void should_only_retry_once_on_unavailable() { - // given two nodes that will respond to a query with an unavailable. - SIMULACRON_RULE.cluster().node(0).prime(when(queryStr).then(unavailable(LOCAL_QUORUM, 3, 0))); - SIMULACRON_RULE.cluster().node(1).prime(when(queryStr).then(unavailable(LOCAL_QUORUM, 3, 0))); - - try { - // when executing a query. - sessionRule.session().execute(queryStr); - fail("Expected an UnavailableException"); - } catch (UnavailableException ue) { - // then we should get an unavailable exception with the host being node 1 (since it was second - // tried). - assertThat(ue.getCoordinator().getEndPoint().resolve()) - .isEqualTo(SIMULACRON_RULE.cluster().node(1).inetSocketAddress()); - assertThat(ue.getConsistencyLevel()).isEqualTo(DefaultConsistencyLevel.LOCAL_QUORUM); - assertThat(ue.getRequired()).isEqualTo(3); - assertThat(ue.getAlive()).isEqualTo(0); - } - - // should have been retried on another host. - counter.assertTotalCount(2); - counter.assertNodeCounts(1, 1, 0); - } - - @Test - public void should_keep_retrying_on_next_host_on_error_response() { - // given every node responding with a server error. - SIMULACRON_RULE.cluster().prime(when(queryStr).then(serverError("this is a server error"))); - - try { - // when executing a query. - sessionRule.session().execute(queryStr); - fail("Expected an AllNodesFailedException"); - } catch (AllNodesFailedException e) { - // then we should get an all nodes failed exception, indicating the query was tried each node. - assertThat(e.getAllErrors()).hasSize(3); - for (List nodeErrors : e.getAllErrors().values()) { - for (Throwable nodeError : nodeErrors) { - assertThat(nodeError).isInstanceOf(ServerError.class); - } - } - } - - // should have been tried on all nodes. - counter.assertTotalCount(3); - counter.assertNodeCounts(1, 1, 1); - - // verify log event was emitted for each host as expected - verify(appender, after(500).times(3)).doAppend(loggingEventCaptor.capture()); - // final log message should have 2 retries - assertThat(loggingEventCaptor.getValue().getFormattedMessage()) - .isEqualTo(expectedMessage(DefaultRetryPolicy.RETRYING_ON_ERROR, logPrefix, 2)); - } - - @Test - public void should_not_retry_on_next_host_on_error_response_if_non_idempotent() { - // given every node responding with a server error. - SIMULACRON_RULE.cluster().prime(when(queryStr).then(serverError("this is a server error"))); - - try { - // when executing a query that is not idempotent - sessionRule - .session() - .execute(SimpleStatement.builder(queryStr).setIdempotence(false).build()); - fail("Expected a ServerError"); - } catch (ServerError e) { - // then should get a server error from first host. - assertThat(e.getMessage()).isEqualTo("this is a server error"); - } - - // should only have been tried on first node. - counter.assertTotalCount(1); - counter.assertNodeCounts(1, 0, 0); - - // expect no logging messages since there was no retry - verify(appender, after(500).times(0)).doAppend(any(ILoggingEvent.class)); - } - - private String expectedMessage(String template, Object... args) { - return MessageFormatter.arrayFormat(template, args).getMessage(); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/retry/PerProfileRetryPolicyIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/retry/PerProfileRetryPolicyIT.java deleted file mode 100644 index b2e53bb09d0..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/retry/PerProfileRetryPolicyIT.java +++ /dev/null @@ -1,208 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.retry; - -import static com.datastax.oss.driver.assertions.Assertions.assertThat; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.noRows; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.unavailable; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.when; - -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.retry.RetryDecision; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.servererrors.CoordinatorException; -import com.datastax.oss.driver.api.core.servererrors.UnavailableException; -import com.datastax.oss.driver.api.core.servererrors.WriteType; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.testinfra.loadbalancing.SortingLoadBalancingPolicy; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.api.testinfra.simulacron.QueryCounter; -import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.internal.core.retry.DefaultRetryPolicy; -import com.datastax.oss.simulacron.common.cluster.ClusterSpec; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.List; -import java.util.Map; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -public class PerProfileRetryPolicyIT { - - // Shared across all tests methods. - private static final SimulacronRule SIMULACRON_RULE = - new SimulacronRule(ClusterSpec.builder().withNodes(2)); - - private static final SessionRule SESSION_RULE = - SessionRule.builder(SIMULACRON_RULE) - .withConfigLoader( - SessionUtils.configLoaderBuilder() - .withClass( - DefaultDriverOption.LOAD_BALANCING_POLICY_CLASS, - SortingLoadBalancingPolicy.class) - .withClass(DefaultDriverOption.RETRY_POLICY_CLASS, DefaultRetryPolicy.class) - .startProfile("profile1") - .withClass(DefaultDriverOption.RETRY_POLICY_CLASS, NoRetryPolicy.class) - .startProfile("profile2") - .withInt(DefaultDriverOption.REQUEST_PAGE_SIZE, 100) - .build()) - .build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(SIMULACRON_RULE).around(SESSION_RULE); - - private static String QUERY_STRING = "select * from foo"; - private static final SimpleStatement QUERY = SimpleStatement.newInstance(QUERY_STRING); - - private final QueryCounter counter = - QueryCounter.builder(SIMULACRON_RULE.cluster()) - .withFilter((l) -> l.getQuery().equals(QUERY_STRING)) - .build(); - - @Before - public void clear() { - SIMULACRON_RULE.cluster().clearLogs(); - } - - @BeforeClass - public static void setup() { - // node 0 will return an unavailable to query. - SIMULACRON_RULE - .cluster() - .node(0) - .prime( - when(QUERY_STRING) - .then( - unavailable( - com.datastax.oss.simulacron.common.codec.ConsistencyLevel.ONE, 1, 0))); - // node 1 will return a valid empty rows response. - SIMULACRON_RULE.cluster().node(1).prime(when(QUERY_STRING).then(noRows())); - - // sanity checks - DriverContext context = SESSION_RULE.session().getContext(); - DriverConfig config = context.getConfig(); - assertThat(config.getProfiles()).containsKeys("profile1", "profile2"); - - assertThat(context.getRetryPolicies()) - .hasSize(3) - .containsKeys(DriverExecutionProfile.DEFAULT_NAME, "profile1", "profile2"); - - RetryPolicy defaultPolicy = context.getRetryPolicy(DriverExecutionProfile.DEFAULT_NAME); - RetryPolicy policy1 = context.getRetryPolicy("profile1"); - RetryPolicy policy2 = context.getRetryPolicy("profile2"); - assertThat(defaultPolicy) - .isInstanceOf(DefaultRetryPolicy.class) - .isSameAs(policy2) - .isNotSameAs(policy1); - assertThat(policy1).isInstanceOf(NoRetryPolicy.class); - } - - @Test(expected = UnavailableException.class) - public void should_use_policy_from_request_profile() { - // since profile1 uses a NoRetryPolicy, UnavailableException should surface to client. - SESSION_RULE.session().execute(QUERY.setExecutionProfileName("profile1")); - } - - @Test - public void should_use_policy_from_config_when_not_configured_in_request_profile() { - // since profile2 has no configured retry policy, it should defer to configuration which uses - // DefaultRetryPolicy, which should try request on next host (host 1). - ResultSet result = SESSION_RULE.session().execute(QUERY.setExecutionProfileName("profile2")); - - // expect an unavailable exception to be present in errors. - List> errors = result.getExecutionInfo().getErrors(); - assertThat(errors).hasSize(1); - assertThat(errors.get(0).getValue()).isInstanceOf(UnavailableException.class); - - counter.assertNodeCounts(1, 1); - } - - // A policy that simply rethrows always. - public static class NoRetryPolicy implements RetryPolicy { - - @SuppressWarnings("unused") - public NoRetryPolicy(DriverContext context, String profileName) {} - - @Override - @Deprecated - public RetryDecision onReadTimeout( - @NonNull Request request, - @NonNull ConsistencyLevel cl, - int blockFor, - int received, - boolean dataPresent, - int retryCount) { - return RetryDecision.RETHROW; - } - - @Override - @Deprecated - public RetryDecision onWriteTimeout( - @NonNull Request request, - @NonNull ConsistencyLevel cl, - @NonNull WriteType writeType, - int blockFor, - int received, - int retryCount) { - return RetryDecision.RETHROW; - } - - @Override - @Deprecated - public RetryDecision onUnavailable( - @NonNull Request request, - @NonNull ConsistencyLevel cl, - int required, - int alive, - int retryCount) { - return RetryDecision.RETHROW; - } - - @Override - @Deprecated - public RetryDecision onRequestAborted( - @NonNull Request request, @NonNull Throwable error, int retryCount) { - return RetryDecision.RETHROW; - } - - @Override - @Deprecated - public RetryDecision onErrorResponse( - @NonNull Request request, @NonNull CoordinatorException error, int retryCount) { - return RetryDecision.RETHROW; - } - - @Override - public void close() {} - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/session/AddedNodeIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/session/AddedNodeIT.java deleted file mode 100644 index 1ce3fd1ca0e..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/session/AddedNodeIT.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.session; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.awaitility.Awaitility.await; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeStateListener; -import com.datastax.oss.driver.api.core.metadata.token.TokenRange; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.internal.core.pool.ChannelPool; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.TimeUnit; -import org.junit.ClassRule; -import org.junit.Test; - -public class AddedNodeIT { - - @ClassRule - public static final CustomCcmRule CCM_RULE = CustomCcmRule.builder().withNodes(3).build(); - - @Test - public void should_signal_and_create_pool_when_node_gets_added() { - AddListener addListener = new AddListener(); - try (CqlSession session = SessionUtils.newSession(CCM_RULE, null, addListener, null, null)) { - assertThat(session.getMetadata().getTokenMap()).isPresent(); - Set tokenRanges = session.getMetadata().getTokenMap().get().getTokenRanges(); - assertThat(tokenRanges).hasSize(3); - CCM_RULE.getCcmBridge().add(4, "dc1"); - await() - .pollInterval(500, TimeUnit.MILLISECONDS) - .atMost(60, TimeUnit.SECONDS) - .until(() -> addListener.addedNode != null); - Map pools = ((DefaultSession) session).getPools(); - await() - .pollInterval(500, TimeUnit.MILLISECONDS) - .atMost(60, TimeUnit.SECONDS) - .until(() -> pools.containsKey(addListener.addedNode)); - await() - .pollInterval(500, TimeUnit.MILLISECONDS) - .atMost(60, TimeUnit.SECONDS) - .until(() -> session.getMetadata().getTokenMap().get().getTokenRanges().size() == 4); - } - } - - static class AddListener implements NodeStateListener { - - volatile Node addedNode; - - @Override - public void onRemove(@NonNull Node node) {} - - @Override - public void onAdd(@NonNull Node node) { - addedNode = node; - } - - @Override - public void onUp(@NonNull Node node) {} - - @Override - public void onDown(@NonNull Node node) {} - - @Override - public void close() throws Exception {} - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/session/ExceptionIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/session/ExceptionIT.java deleted file mode 100644 index b3a96dde3b9..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/session/ExceptionIT.java +++ /dev/null @@ -1,127 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.session; - -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.unavailable; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.when; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.servererrors.InvalidQueryException; -import com.datastax.oss.driver.api.core.servererrors.UnavailableException; -import com.datastax.oss.driver.api.testinfra.loadbalancing.SortingLoadBalancingPolicy; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.internal.core.retry.DefaultRetryPolicy; -import com.datastax.oss.simulacron.common.cluster.ClusterSpec; -import com.datastax.oss.simulacron.common.stubbing.PrimeDsl; -import java.util.List; -import java.util.Map; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -public class ExceptionIT { - - private static final SimulacronRule SIMULACRON_RULE = - new SimulacronRule(ClusterSpec.builder().withNodes(2)); - - private static final SessionRule SESSION_RULE = - SessionRule.builder(SIMULACRON_RULE) - .withConfigLoader( - SessionUtils.configLoaderBuilder() - .withClass( - DefaultDriverOption.LOAD_BALANCING_POLICY_CLASS, - SortingLoadBalancingPolicy.class) - .withClass(DefaultDriverOption.RETRY_POLICY_CLASS, DefaultRetryPolicy.class) - .build()) - .build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(SIMULACRON_RULE).around(SESSION_RULE); - - private static String QUERY_STRING = "select * from foo"; - - @Before - public void clear() { - SIMULACRON_RULE.cluster().clearLogs(); - } - - @Test - public void should_expose_execution_info_on_exceptions() { - // Given - SIMULACRON_RULE - .cluster() - .node(0) - .prime( - when(QUERY_STRING) - .then( - unavailable( - com.datastax.oss.simulacron.common.codec.ConsistencyLevel.ONE, 1, 0))); - SIMULACRON_RULE - .cluster() - .node(1) - .prime(when(QUERY_STRING).then(PrimeDsl.invalid("Mock error message"))); - - // Then - assertThatThrownBy(() -> SESSION_RULE.session().execute(QUERY_STRING)) - .isInstanceOf(InvalidQueryException.class) - .satisfies( - exception -> { - ExecutionInfo info = ((InvalidQueryException) exception).getExecutionInfo(); - assertThat(info).isNotNull(); - assertThat(info.getCoordinator().getEndPoint().resolve()) - .isEqualTo(SIMULACRON_RULE.cluster().node(1).inetSocketAddress()); - assertThat(((SimpleStatement) info.getRequest()).getQuery()).isEqualTo(QUERY_STRING); - - // specex disabled => the initial execution completed the response - assertThat(info.getSpeculativeExecutionCount()).isEqualTo(0); - assertThat(info.getSuccessfulExecutionIndex()).isEqualTo(0); - - assertThat(info.getTracingId()).isNull(); - assertThat(info.getPagingState()).isNull(); - assertThat(info.getIncomingPayload()).isEmpty(); - assertThat(info.getWarnings()).isEmpty(); - assertThat(info.isSchemaInAgreement()).isTrue(); - assertThat(info.getResponseSizeInBytes()) - .isEqualTo(info.getCompressedResponseSizeInBytes()) - .isEqualTo(-1); - - List> errors = info.getErrors(); - assertThat(errors).hasSize(1); - Map.Entry entry0 = errors.get(0); - assertThat(entry0.getKey().getEndPoint().resolve()) - .isEqualTo(SIMULACRON_RULE.cluster().node(0).inetSocketAddress()); - Throwable node0Exception = entry0.getValue(); - assertThat(node0Exception).isInstanceOf(UnavailableException.class); - // ExecutionInfo is not exposed for retried errors - assertThat(((UnavailableException) node0Exception).getExecutionInfo()).isNull(); - }); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/session/ListenersIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/session/ListenersIT.java deleted file mode 100644 index 0fa089483fd..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/session/ListenersIT.java +++ /dev/null @@ -1,217 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.session; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.timeout; -import static org.mockito.Mockito.verify; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeStateListener; -import com.datastax.oss.driver.api.core.metadata.NodeStateListenerBase; -import com.datastax.oss.driver.api.core.metadata.SafeInitNodeStateListener; -import com.datastax.oss.driver.api.core.metadata.schema.SchemaChangeListener; -import com.datastax.oss.driver.api.core.metadata.schema.SchemaChangeListenerBase; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.tracker.RequestTracker; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.shaded.guava.common.util.concurrent.Uninterruptibles; -import com.datastax.oss.simulacron.common.cluster.ClusterSpec; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Collections; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.mockito.ArgumentCaptor; -import org.mockito.Captor; -import org.mockito.InOrder; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@Category(ParallelizableTests.class) -@RunWith(MockitoJUnitRunner.class) -public class ListenersIT { - - @ClassRule - public static final SimulacronRule SIMULACRON_RULE = - new SimulacronRule(ClusterSpec.builder().withNodes(1)); - - @Mock private NodeStateListener nodeListener1; - @Mock private NodeStateListener nodeListener2; - @Mock private SchemaChangeListener schemaListener1; - @Mock private SchemaChangeListener schemaListener2; - @Mock private RequestTracker requestTracker1; - @Mock private RequestTracker requestTracker2; - - @Captor private ArgumentCaptor nodeCaptor1; - @Captor private ArgumentCaptor nodeCaptor2; - - @Test - public void should_inject_session_in_listeners() throws Exception { - try (CqlSession session = - (CqlSession) - SessionUtils.baseBuilder() - .addContactEndPoints(SIMULACRON_RULE.getContactPoints()) - .addNodeStateListener(new SafeInitNodeStateListener(nodeListener1, true)) - .addNodeStateListener(new SafeInitNodeStateListener(nodeListener2, true)) - .addSchemaChangeListener(schemaListener1) - .addSchemaChangeListener(schemaListener2) - .addRequestTracker(requestTracker1) - .addRequestTracker(requestTracker2) - .withConfigLoader( - SessionUtils.configLoaderBuilder() - .withClassList( - DefaultDriverOption.METADATA_NODE_STATE_LISTENER_CLASSES, - Collections.singletonList(MyNodeStateListener.class)) - .withClassList( - DefaultDriverOption.METADATA_SCHEMA_CHANGE_LISTENER_CLASSES, - Collections.singletonList(MySchemaChangeListener.class)) - .withClassList( - DefaultDriverOption.REQUEST_TRACKER_CLASSES, - Collections.singletonList(MyRequestTracker.class)) - .build()) - .build()) { - - // These NodeStateListeners are wrapped with SafeInitNodeStateListener which delays #onUp - // callbacks until #onSessionReady is called, these will all happen during session - // initialization - InOrder inOrder1 = inOrder(nodeListener1); - inOrder1.verify(nodeListener1).onSessionReady(session); - inOrder1.verify(nodeListener1).onUp(nodeCaptor1.capture()); - - InOrder inOrder2 = inOrder(nodeListener2); - inOrder2.verify(nodeListener2).onSessionReady(session); - inOrder2.verify(nodeListener2).onUp(nodeCaptor2.capture()); - - assertThat(nodeCaptor1.getValue().getEndPoint()) - .isEqualTo(SIMULACRON_RULE.getContactPoints().iterator().next()); - - assertThat(nodeCaptor2.getValue().getEndPoint()) - .isEqualTo(SIMULACRON_RULE.getContactPoints().iterator().next()); - - // SchemaChangeListener#onSessionReady is called asynchronously from AdminExecutor so we may - // have to wait a little - verify(schemaListener1, timeout(500).times(1)).onSessionReady(session); - verify(schemaListener2, timeout(500).times(1)).onSessionReady(session); - - // Request tracker #onSessionReady is called synchronously during session initialization - verify(requestTracker1).onSessionReady(session); - verify(requestTracker2).onSessionReady(session); - - assertThat(MyNodeStateListener.onSessionReadyCalled).isTrue(); - assertThat(MyNodeStateListener.onUpCalled).isTrue(); - - // SchemaChangeListener#onSessionReady is called asynchronously from AdminExecutor so we may - // have to wait a little - assertThat( - Uninterruptibles.awaitUninterruptibly( - MySchemaChangeListener.onSessionReadyLatch, 500, TimeUnit.MILLISECONDS)) - .isTrue(); - - assertThat(MyRequestTracker.onSessionReadyCalled).isTrue(); - } - - // CqlSession#close waits for all listener close methods to be called - verify(nodeListener1).close(); - verify(nodeListener2).close(); - - verify(schemaListener1).close(); - verify(schemaListener2).close(); - - verify(requestTracker1).close(); - verify(requestTracker2).close(); - - assertThat(MyNodeStateListener.closeCalled).isTrue(); - assertThat(MySchemaChangeListener.closeCalled).isTrue(); - assertThat(MyRequestTracker.closeCalled).isTrue(); - } - - public static class MyNodeStateListener extends SafeInitNodeStateListener { - - private static volatile boolean onSessionReadyCalled = false; - private static volatile boolean onUpCalled = false; - private static volatile boolean closeCalled = false; - - public MyNodeStateListener(@SuppressWarnings("unused") DriverContext ignored) { - super( - new NodeStateListenerBase() { - - @Override - public void onSessionReady(@NonNull Session session) { - onSessionReadyCalled = true; - } - - @Override - public void onUp(@NonNull Node node) { - onUpCalled = true; - } - - @Override - public void close() { - closeCalled = true; - } - }, - true); - } - } - - public static class MySchemaChangeListener extends SchemaChangeListenerBase { - - private static CountDownLatch onSessionReadyLatch = new CountDownLatch(1); - private static volatile boolean closeCalled = false; - - public MySchemaChangeListener(@SuppressWarnings("unused") DriverContext ignored) {} - - @Override - public void onSessionReady(@NonNull Session session) { - onSessionReadyLatch.countDown(); - } - - @Override - public void close() throws Exception { - closeCalled = true; - } - } - - public static class MyRequestTracker implements RequestTracker { - - private static volatile boolean onSessionReadyCalled = false; - private static volatile boolean closeCalled = false; - - public MyRequestTracker(@SuppressWarnings("unused") DriverContext ignored) {} - - @Override - public void onSessionReady(@NonNull Session session) { - onSessionReadyCalled = true; - } - - @Override - public void close() throws Exception { - closeCalled = true; - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/session/RemovedNodeIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/session/RemovedNodeIT.java deleted file mode 100644 index e0f33291544..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/session/RemovedNodeIT.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.session; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.awaitility.Awaitility.await; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeStateListener; -import com.datastax.oss.driver.api.core.metadata.token.TokenRange; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.internal.core.pool.ChannelPool; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.TimeUnit; -import org.junit.ClassRule; -import org.junit.Test; - -public class RemovedNodeIT { - - @ClassRule - public static final CustomCcmRule CCM_RULE = - CustomCcmRule.builder() - // We need 4 nodes to run this test against DSE, because it requires at least 3 nodes to - // maintain RF=3 for keyspace system_distributed - .withNodes(4) - .build(); - - @Test - public void should_signal_and_destroy_pool_when_node_gets_removed() { - RemovalListener removalListener = new RemovalListener(); - try (CqlSession session = - SessionUtils.newSession(CCM_RULE, null, removalListener, null, null)) { - assertThat(session.getMetadata().getTokenMap()).isPresent(); - Set tokenRanges = session.getMetadata().getTokenMap().get().getTokenRanges(); - assertThat(tokenRanges).hasSize(4); - CCM_RULE.getCcmBridge().decommission(2); - await() - .pollInterval(500, TimeUnit.MILLISECONDS) - .atMost(60, TimeUnit.SECONDS) - .until(() -> removalListener.removedNode != null); - Map pools = ((DefaultSession) session).getPools(); - await() - .pollInterval(500, TimeUnit.MILLISECONDS) - .atMost(60, TimeUnit.SECONDS) - .until(() -> !pools.containsKey(removalListener.removedNode)); - await() - .pollInterval(500, TimeUnit.MILLISECONDS) - .atMost(60, TimeUnit.SECONDS) - .until(() -> session.getMetadata().getTokenMap().get().getTokenRanges().size() == 3); - } - } - - static class RemovalListener implements NodeStateListener { - - volatile Node removedNode; - - @Override - public void onRemove(@NonNull Node node) { - removedNode = node; - } - - @Override - public void onAdd(@NonNull Node node) {} - - @Override - public void onUp(@NonNull Node node) {} - - @Override - public void onDown(@NonNull Node node) {} - - @Override - public void close() throws Exception {} - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/session/RequestProcessorIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/session/RequestProcessorIT.java deleted file mode 100644 index e2b3caeb1f4..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/session/RequestProcessorIT.java +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.session; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.catchThrowable; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.example.guava.api.GuavaSession; -import com.datastax.oss.driver.example.guava.api.GuavaSessionUtils; -import com.datastax.oss.driver.example.guava.internal.DefaultGuavaSession; -import com.datastax.oss.driver.example.guava.internal.GuavaDriverContext; -import com.datastax.oss.driver.example.guava.internal.KeyRequest; -import com.datastax.oss.driver.example.guava.internal.KeyRequestProcessor; -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import com.google.common.collect.Iterables; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.Uninterruptibles; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -/** - * A suite of tests for exercising registration of custom {@link - * com.datastax.oss.driver.internal.core.session.RequestProcessor} implementations to add-in - * additional request handling and response types. - * - *

    Uses {@link DefaultGuavaSession} which is a specialized session implementation that uses - * {@link GuavaDriverContext} which overrides {@link - * DefaultDriverContext#getRequestProcessorRegistry()} to provide its own {@link - * com.datastax.oss.driver.internal.core.session.RequestProcessor} implementations for returning - * {@link ListenableFuture}s rather than {@link java.util.concurrent.CompletionStage}s in async - * method responses. - * - *

    {@link GuavaSession} provides execute method implementation shortcuts that mimics {@link - * CqlSession}'s async methods. - * - *

    {@link KeyRequestProcessor} is also registered for handling {@link KeyRequest}s which - * simplifies a certain query down to 1 parameter. - */ -@Category(ParallelizableTests.class) -public class RequestProcessorIT { - - private static final CcmRule CCM_RULE = CcmRule.getInstance(); - - private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - public static final String KEY = "test"; - - @BeforeClass - public static void setupSchema() { - // table with clustering key where v1 == v0 * 2. - SESSION_RULE - .session() - .execute( - SimpleStatement.builder( - "CREATE TABLE IF NOT EXISTS test (k text, v0 int, v1 int, PRIMARY KEY(k, v0))") - .setExecutionProfile(SESSION_RULE.slowProfile()) - .build()); - for (int i = 0; i < 100; i++) { - SESSION_RULE - .session() - .execute( - SimpleStatement.builder("INSERT INTO test (k, v0, v1) VALUES (?, ?, ?)") - .addPositionalValues(KEY, i, i * 2) - .build()); - } - } - - private GuavaSession newSession(CqlIdentifier keyspace) { - return GuavaSessionUtils.builder() - .addContactEndPoints(CCM_RULE.getContactPoints()) - .withKeyspace(keyspace) - .build(); - } - - @Test - public void should_use_custom_request_processor_for_prepareAsync() throws Exception { - try (GuavaSession session = newSession(SESSION_RULE.keyspace())) { - ListenableFuture preparedFuture = - session.prepareAsync("select * from test"); - - PreparedStatement prepared = Uninterruptibles.getUninterruptibly(preparedFuture); - - assertThat(prepared.getResultSetDefinitions().contains("k")).isTrue(); - assertThat(prepared.getResultSetDefinitions().contains("v0")).isTrue(); - assertThat(prepared.getResultSetDefinitions().contains("v1")).isTrue(); - - ListenableFuture future = session.executeAsync(prepared.bind()); - AsyncResultSet result = Uninterruptibles.getUninterruptibly(future); - assertThat(Iterables.size(result.currentPage())).isEqualTo(100); - } - } - - @Test - public void should_use_custom_request_processor_for_handling_special_request_type() - throws Exception { - try (GuavaSession session = newSession(SESSION_RULE.keyspace())) { - // RequestProcessor executes "select v from test where k = " and returns v as Integer. - int v1 = session.execute(new KeyRequest(5), KeyRequestProcessor.INT_TYPE); - assertThat(v1).isEqualTo(10); // v1 = v0 * 2 - - // RequestProcessor returns Integer.MIN_VALUE if key not found in data (no rows in result). - v1 = session.execute(new KeyRequest(200), KeyRequestProcessor.INT_TYPE); - assertThat(v1).isEqualTo(Integer.MIN_VALUE); - } - } - - @Test - public void should_use_custom_request_processor_for_executeAsync() throws Exception { - try (GuavaSession session = newSession(SESSION_RULE.keyspace())) { - ListenableFuture future = session.executeAsync("select * from test"); - AsyncResultSet result = Uninterruptibles.getUninterruptibly(future); - assertThat(Iterables.size(result.currentPage())).isEqualTo(100); - } - } - - @Test - public void should_throw_illegal_argument_exception_if_no_matching_processor_found() - throws Exception { - // Since cluster does not have a processor registered for returning ListenableFuture, an - // IllegalArgumentException - // should be thrown. - Throwable t = - catchThrowable( - () -> - SESSION_RULE - .session() - .execute( - SimpleStatement.newInstance("select * from test"), GuavaSession.ASYNC)); - - assertThat(t).isInstanceOf(IllegalArgumentException.class); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/session/ShutdownIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/session/ShutdownIT.java deleted file mode 100644 index 7763f1ba866..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/session/ShutdownIT.java +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.session; - -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.noRows; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.when; -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.connection.ClosedConnectionException; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.simulacron.common.cluster.ClusterSpec; -import java.util.Set; -import java.util.concurrent.ConcurrentSkipListSet; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Semaphore; -import java.util.concurrent.TimeUnit; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -@Category(ParallelizableTests.class) -public class ShutdownIT { - - @ClassRule - public static final SimulacronRule SIMULACRON_RULE = - new SimulacronRule(ClusterSpec.builder().withNodes(1)); - - private static final String QUERY_STRING = "select * from foo"; - - @Test - public void should_fail_requests_when_session_is_closed() throws Exception { - // Given - // Prime with a bit of delay to increase the chance that a query will be aborted in flight when - // we force-close the session - SIMULACRON_RULE - .cluster() - .prime(when(QUERY_STRING).then(noRows()).delay(20, TimeUnit.MILLISECONDS)); - CqlSession session = SessionUtils.newSession(SIMULACRON_RULE); - - // When - // Max out the in-flight requests on the connection (from a separate thread pool to get a bit of - // contention), then force-close the session abruptly. - Set unexpectedErrors = new ConcurrentSkipListSet<>(); - ExecutorService requestExecutor = Executors.newFixedThreadPool(4); - int maxConcurrentRequests = - session - .getContext() - .getConfig() - .getDefaultProfile() - .getInt(DefaultDriverOption.CONNECTION_MAX_REQUESTS); - Semaphore semaphore = new Semaphore(maxConcurrentRequests); - CountDownLatch gotSessionClosedError = new CountDownLatch(1); - for (int i = 0; i < 4; i++) { - requestExecutor.execute( - () -> { - try { - while (!Thread.currentThread().isInterrupted()) { - semaphore.acquire(); - session - .executeAsync(QUERY_STRING) - .whenComplete( - (ignoredResult, error) -> { - semaphore.release(); - // Four things can happen: - // - DefaultSession.execute() detects that it's closed and fails the - // request immediately - // - the request was in flight and gets aborted when its channel is - // force-closed => ClosedConnectionException - // - the request races with the shutdown: it gets past execute() but by - // the time it tries to acquire a channel the pool was closed - // => NoNodeAvailableException - // - the request races with the channel closing: it acquires a channel, - // but by the time it tries to write on it is closing - // => AllNodesFailedException wrapping IllegalStateException - if (error instanceof IllegalStateException - && "Session is closed".equals(error.getMessage())) { - gotSessionClosedError.countDown(); - } else if (error instanceof AllNodesFailedException) { - AllNodesFailedException anfe = (AllNodesFailedException) error; - // if there were 0 errors, its a NoNodeAvailableException which is - // acceptable. - if (anfe.getAllErrors().size() > 0) { - assertThat(anfe.getAllErrors()).hasSize(1); - error = anfe.getAllErrors().values().iterator().next().get(0); - if (!(error instanceof IllegalStateException) - && !error.getMessage().endsWith("is closing")) { - unexpectedErrors.add(error.toString()); - } - } - } else if (error != null - && !(error instanceof ClosedConnectionException)) { - unexpectedErrors.add(error.toString()); - } - }); - } - } catch (InterruptedException e) { - // return - } - }); - } - TimeUnit.MILLISECONDS.sleep(1000); - session.forceCloseAsync(); - assertThat(gotSessionClosedError.await(10, TimeUnit.SECONDS)) - .as("Expected to get the 'Session is closed' error shortly after shutting down") - .isTrue(); - requestExecutor.shutdownNow(); - - // Then - assertThat(unexpectedErrors).isEmpty(); - } - - @Test - public void should_handle_getting_closed_twice() { - CqlSession session = SessionUtils.newSession(SIMULACRON_RULE); - session.close(); - session.close(); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/specex/SpeculativeExecutionIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/specex/SpeculativeExecutionIT.java deleted file mode 100644 index cc13c821b9e..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/specex/SpeculativeExecutionIT.java +++ /dev/null @@ -1,436 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.specex; - -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.isBootstrapping; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.noRows; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.when; -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.config.ProgrammaticDriverConfigLoaderBuilder; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.specex.SpeculativeExecutionPolicy; -import com.datastax.oss.driver.api.testinfra.loadbalancing.SortingLoadBalancingPolicy; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.api.testinfra.simulacron.QueryCounter; -import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.internal.core.specex.ConstantSpeculativeExecutionPolicy; -import com.datastax.oss.driver.internal.core.specex.NoSpeculativeExecutionPolicy; -import com.datastax.oss.simulacron.common.cluster.ClusterSpec; -import com.datastax.oss.simulacron.common.stubbing.PrimeDsl; -import java.time.Duration; -import java.util.concurrent.TimeUnit; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -@Category(ParallelizableTests.class) -public class SpeculativeExecutionIT { - - // Note: it looks like shorter delays cause precision issues with Netty timers - private static final long SPECULATIVE_DELAY = 1000; - - private static final String QUERY_STRING = "select * from foo"; - private static final SimpleStatement QUERY = SimpleStatement.newInstance(QUERY_STRING); - - // Shared across all tests methods. - @ClassRule - public static final SimulacronRule SIMULACRON_RULE = - new SimulacronRule(ClusterSpec.builder().withNodes(3)); - - private final QueryCounter counter = - QueryCounter.builder(SIMULACRON_RULE.cluster()) - .withFilter((l) -> l.getQuery().equals(QUERY_STRING)) - .build(); - - @Before - public void clear() { - SIMULACRON_RULE.cluster().clearPrimes(true); - } - - @Test - public void should_not_start_speculative_executions_if_not_idempotent() { - primeNode( - 0, when(QUERY_STRING).then(noRows()).delay(3 * SPECULATIVE_DELAY, TimeUnit.MILLISECONDS)); - - try (CqlSession session = buildSession(2, SPECULATIVE_DELAY)) { - ResultSet resultSet = session.execute(QUERY.setIdempotent(false)); - - assertThat(resultSet.getExecutionInfo().getSuccessfulExecutionIndex()).isEqualTo(0); - assertThat(resultSet.getExecutionInfo().getSpeculativeExecutionCount()).isEqualTo(0); - - counter.assertNodeCounts(1, 0, 0); - } - } - - @Test - public void should_complete_from_first_speculative_execution_if_faster() { - primeNode( - 0, when(QUERY_STRING).then(noRows()).delay(3 * SPECULATIVE_DELAY, TimeUnit.MILLISECONDS)); - primeNode(1, when(QUERY_STRING).then(noRows())); - - try (CqlSession session = buildSession(2, SPECULATIVE_DELAY)) { - ResultSet resultSet = session.execute(QUERY); - - assertThat(resultSet.getExecutionInfo().getSuccessfulExecutionIndex()).isEqualTo(1); - assertThat(resultSet.getExecutionInfo().getSpeculativeExecutionCount()).isEqualTo(1); - - counter.assertNodeCounts(1, 1, 0); - } - } - - @Test - public void should_complete_from_initial_execution_if_speculative_is_started_but_slower() { - primeNode( - 0, when(QUERY_STRING).then(noRows()).delay(3 * SPECULATIVE_DELAY, TimeUnit.MILLISECONDS)); - primeNode( - 1, when(QUERY_STRING).then(noRows()).delay(3 * SPECULATIVE_DELAY, TimeUnit.MILLISECONDS)); - - try (CqlSession session = buildSession(2, SPECULATIVE_DELAY)) { - ResultSet resultSet = session.execute(QUERY); - - assertThat(resultSet.getExecutionInfo().getSuccessfulExecutionIndex()).isEqualTo(0); - assertThat(resultSet.getExecutionInfo().getSpeculativeExecutionCount()).isEqualTo(1); - - counter.assertNodeCounts(1, 1, 0); - } - } - - @Test - public void should_complete_from_second_speculative_execution_if_faster() { - primeNode( - 0, when(QUERY_STRING).then(noRows()).delay(3 * SPECULATIVE_DELAY, TimeUnit.MILLISECONDS)); - primeNode( - 1, when(QUERY_STRING).then(noRows()).delay(3 * SPECULATIVE_DELAY, TimeUnit.MILLISECONDS)); - primeNode(2, when(QUERY_STRING).then(noRows())); - - try (CqlSession session = buildSession(3, SPECULATIVE_DELAY)) { - ResultSet resultSet = session.execute(QUERY); - - assertThat(resultSet.getExecutionInfo().getSuccessfulExecutionIndex()).isEqualTo(2); - assertThat(resultSet.getExecutionInfo().getSpeculativeExecutionCount()).isEqualTo(2); - - counter.assertNodeCounts(1, 1, 1); - } - } - - @Test - public void should_retry_within_initial_execution() { - // This triggers a retry on the next node: - primeNode(0, when(QUERY_STRING).then(isBootstrapping())); - primeNode(1, when(QUERY_STRING).then(noRows())); - - try (CqlSession session = buildSession(3, SPECULATIVE_DELAY)) { - ResultSet resultSet = session.execute(QUERY); - - assertThat(resultSet.getExecutionInfo().getSuccessfulExecutionIndex()).isEqualTo(0); - assertThat(resultSet.getExecutionInfo().getSpeculativeExecutionCount()).isEqualTo(0); - - counter.assertNodeCounts(1, 1, 0); - } - } - - @Test - public void should_retry_within_speculative_execution() { - primeNode( - 0, when(QUERY_STRING).then(noRows()).delay(3 * SPECULATIVE_DELAY, TimeUnit.MILLISECONDS)); - // This triggers a retry on the next node: - primeNode(1, when(QUERY_STRING).then(isBootstrapping())); - primeNode(2, when(QUERY_STRING).then(noRows())); - - try (CqlSession session = buildSession(3, SPECULATIVE_DELAY)) { - ResultSet resultSet = session.execute(QUERY); - - assertThat(resultSet.getExecutionInfo().getSuccessfulExecutionIndex()).isEqualTo(1); - assertThat(resultSet.getExecutionInfo().getSpeculativeExecutionCount()).isEqualTo(1); - - counter.assertNodeCounts(1, 1, 1); - } - } - - @Test - public void should_wait_for_last_execution_to_complete() { - // Initial execution uses node0 which takes a long time to reply - primeNode( - 0, when(QUERY_STRING).then(noRows()).delay(3 * SPECULATIVE_DELAY, TimeUnit.MILLISECONDS)); - // specex1 runs fast, but only encounters failing nodes and stops - primeNode(1, when(QUERY_STRING).then(isBootstrapping())); - primeNode(2, when(QUERY_STRING).then(isBootstrapping())); - - try (CqlSession session = buildSession(2, SPECULATIVE_DELAY)) { - ResultSet resultSet = session.execute(QUERY); - - assertThat(resultSet.getExecutionInfo().getSuccessfulExecutionIndex()).isEqualTo(0); - assertThat(resultSet.getExecutionInfo().getSpeculativeExecutionCount()).isEqualTo(1); - - counter.assertNodeCounts(1, 1, 1); - } - } - - @Test(expected = AllNodesFailedException.class) - public void should_fail_if_all_executions_reach_end_of_query_plan() { - // Each execution gets a BOOTSTRAPPING response, but by the time it retries, the query plan will - // be empty. - for (int i = 0; i < 3; i++) { - primeNode( - i, - when(QUERY_STRING) - .then(isBootstrapping()) - .delay((3 - i) * SPECULATIVE_DELAY, TimeUnit.MILLISECONDS)); - } - try (CqlSession session = buildSession(3, SPECULATIVE_DELAY)) { - session.execute(QUERY); - } finally { - counter.assertNodeCounts(1, 1, 1); - } - } - - @Test - public void should_allow_zero_delay() { - // All executions start at the same time, but one of them is faster - for (int i = 0; i < 2; i++) { - primeNode( - i, when(QUERY_STRING).then(noRows()).delay(2 * SPECULATIVE_DELAY, TimeUnit.MILLISECONDS)); - } - primeNode(2, when(QUERY_STRING).then(noRows()).delay(SPECULATIVE_DELAY, TimeUnit.MILLISECONDS)); - - try (CqlSession session = buildSession(3, 0)) { - ResultSet resultSet = session.execute(QUERY); - - assertThat(resultSet.getExecutionInfo().getSuccessfulExecutionIndex()).isEqualTo(2); - assertThat(resultSet.getExecutionInfo().getSpeculativeExecutionCount()).isEqualTo(2); - - counter.assertNodeCounts(1, 1, 1); - } - } - - @Test - public void should_use_policy_from_request_profile() { - // each host takes same amount of time - for (int i = 0; i < 2; i++) { - primeNode( - i, when(QUERY_STRING).then(noRows()).delay(2 * SPECULATIVE_DELAY, TimeUnit.MILLISECONDS)); - } - - // Set large delay for default so we ensure profile is used. - try (CqlSession session = buildSessionWithProfile(3, 100, 2, 0)) { - ResultSet resultSet = session.execute(QUERY.setExecutionProfileName("profile1")); - - // Expect only 1 speculative execution as that is all profile called for. - assertThat(resultSet.getExecutionInfo().getSpeculativeExecutionCount()).isEqualTo(1); - - // Expect node 0 and 1 to be queried, but not 2. - counter.assertNodeCounts(1, 1, 0); - } - } - - @Test - public void should_use_policy_from_request_profile_when_not_configured_in_config() { - // each host takes same amount of time - for (int i = 0; i < 2; i++) { - primeNode( - i, when(QUERY_STRING).then(noRows()).delay(2 * SPECULATIVE_DELAY, TimeUnit.MILLISECONDS)); - } - - // Disable in primary configuration - try (CqlSession session = buildSessionWithProfile(-1, -1, 3, 0)) { - ResultSet resultSet = session.execute(QUERY.setExecutionProfileName("profile1")); - - // Expect speculative executions on each node. - assertThat(resultSet.getExecutionInfo().getSpeculativeExecutionCount()).isEqualTo(2); - - // Expect all nodes to be queried. - counter.assertNodeCounts(1, 1, 1); - } - } - - @Test - public void should_use_policy_from_config_when_not_configured_in_request_profile() { - // each host takes same amount of time - for (int i = 0; i < 2; i++) { - primeNode( - i, when(QUERY_STRING).then(noRows()).delay(2 * SPECULATIVE_DELAY, TimeUnit.MILLISECONDS)); - } - - try (CqlSession session = buildSessionWithProfile(3, 0, 3, 0)) { - // use profile where speculative execution is not configured. - ResultSet resultSet = session.execute(QUERY.setExecutionProfileName("profile2")); - - // Expect speculative executions on each node since default configuration is used. - assertThat(resultSet.getExecutionInfo().getSpeculativeExecutionCount()).isEqualTo(2); - - counter.assertNodeCounts(1, 1, 1); - } - } - - @Test - public void should_not_speculatively_execute_when_defined_in_profile() { - // each host takes same amount of time - for (int i = 0; i < 2; i++) { - primeNode( - i, when(QUERY_STRING).then(noRows()).delay(2 * SPECULATIVE_DELAY, TimeUnit.MILLISECONDS)); - } - - // Disable in profile - try (CqlSession session = buildSessionWithProfile(3, 100, -1, -1)) { - ResultSet resultSet = session.execute(QUERY.setExecutionProfileName("profile1")); - - // Expect no speculative executions. - assertThat(resultSet.getExecutionInfo().getSpeculativeExecutionCount()).isEqualTo(0); - - // Expect only node 0 to be queried since speculative execution is disabled for this profile. - counter.assertNodeCounts(1, 0, 0); - } - } - - // Build a new Cluster instance for each test, because we need different configurations - private CqlSession buildSession(int maxSpeculativeExecutions, long speculativeDelayMs) { - return SessionUtils.newSession( - SIMULACRON_RULE, - SessionUtils.configLoaderBuilder() - .withDuration( - DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofMillis(SPECULATIVE_DELAY * 10)) - .withBoolean(DefaultDriverOption.REQUEST_DEFAULT_IDEMPOTENCE, true) - .withClass( - DefaultDriverOption.LOAD_BALANCING_POLICY_CLASS, SortingLoadBalancingPolicy.class) - .withClass( - DefaultDriverOption.SPECULATIVE_EXECUTION_POLICY_CLASS, - ConstantSpeculativeExecutionPolicy.class) - .withInt(DefaultDriverOption.SPECULATIVE_EXECUTION_MAX, maxSpeculativeExecutions) - .withDuration( - DefaultDriverOption.SPECULATIVE_EXECUTION_DELAY, - Duration.ofMillis(speculativeDelayMs)) - .build()); - } - - private CqlSession buildSessionWithProfile( - int defaultMaxSpeculativeExecutions, - long defaultSpeculativeDelayMs, - int profile1MaxSpeculativeExecutions, - long profile1SpeculativeDelayMs) { - - ProgrammaticDriverConfigLoaderBuilder builder = - SessionUtils.configLoaderBuilder() - .withDuration( - DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofMillis(SPECULATIVE_DELAY * 10)) - .withBoolean(DefaultDriverOption.REQUEST_DEFAULT_IDEMPOTENCE, true) - .withClass( - DefaultDriverOption.LOAD_BALANCING_POLICY_CLASS, SortingLoadBalancingPolicy.class); - - if (defaultMaxSpeculativeExecutions != -1 || defaultSpeculativeDelayMs != -1) { - builder = - builder.withClass( - DefaultDriverOption.SPECULATIVE_EXECUTION_POLICY_CLASS, - ConstantSpeculativeExecutionPolicy.class); - if (defaultMaxSpeculativeExecutions != -1) { - builder = - builder.withInt( - DefaultDriverOption.SPECULATIVE_EXECUTION_MAX, defaultMaxSpeculativeExecutions); - } - if (defaultSpeculativeDelayMs != -1) { - builder = - builder.withDuration( - DefaultDriverOption.SPECULATIVE_EXECUTION_DELAY, - Duration.ofMillis(defaultSpeculativeDelayMs)); - } - } else { - builder = - builder.withClass( - DefaultDriverOption.SPECULATIVE_EXECUTION_POLICY_CLASS, - NoSpeculativeExecutionPolicy.class); - } - - builder = builder.startProfile("profile1"); - if (profile1MaxSpeculativeExecutions != -1 || profile1SpeculativeDelayMs != -1) { - builder = - builder.withClass( - DefaultDriverOption.SPECULATIVE_EXECUTION_POLICY_CLASS, - ConstantSpeculativeExecutionPolicy.class); - - if (profile1MaxSpeculativeExecutions != -1) { - builder = - builder.withInt( - DefaultDriverOption.SPECULATIVE_EXECUTION_MAX, profile1MaxSpeculativeExecutions); - } - if (profile1SpeculativeDelayMs != -1) { - builder = - builder.withDuration( - DefaultDriverOption.SPECULATIVE_EXECUTION_DELAY, - Duration.ofMillis(profile1SpeculativeDelayMs)); - } - } else { - builder = - builder.withClass( - DefaultDriverOption.SPECULATIVE_EXECUTION_POLICY_CLASS, - NoSpeculativeExecutionPolicy.class); - } - - builder = - builder.startProfile("profile2").withString(DefaultDriverOption.REQUEST_CONSISTENCY, "ONE"); - - CqlSession session = SessionUtils.newSession(SIMULACRON_RULE, builder.build()); - - // validate profile data - DriverContext context = session.getContext(); - DriverConfig driverConfig = context.getConfig(); - assertThat(driverConfig.getProfiles()).containsKeys("profile1", "profile2"); - - assertThat(context.getSpeculativeExecutionPolicies()) - .hasSize(3) - .containsKeys(DriverExecutionProfile.DEFAULT_NAME, "profile1", "profile2"); - - SpeculativeExecutionPolicy defaultPolicy = - context.getSpeculativeExecutionPolicy(DriverExecutionProfile.DEFAULT_NAME); - SpeculativeExecutionPolicy policy1 = context.getSpeculativeExecutionPolicy("profile1"); - SpeculativeExecutionPolicy policy2 = context.getSpeculativeExecutionPolicy("profile2"); - Class expectedDefaultPolicyClass = - defaultMaxSpeculativeExecutions != -1 || defaultSpeculativeDelayMs != -1 - ? ConstantSpeculativeExecutionPolicy.class - : NoSpeculativeExecutionPolicy.class; - assertThat(defaultPolicy).isInstanceOf(expectedDefaultPolicyClass).isSameAs(policy2); - - // If configuration was same, same policy instance should be used. - if (defaultMaxSpeculativeExecutions == profile1MaxSpeculativeExecutions - && defaultSpeculativeDelayMs == profile1SpeculativeDelayMs) { - assertThat(defaultPolicy).isSameAs(policy1); - } else { - assertThat(defaultPolicy).isNotSameAs(policy1); - } - - Class expectedProfile1PolicyClass = - profile1MaxSpeculativeExecutions != -1 || profile1SpeculativeDelayMs != -1 - ? ConstantSpeculativeExecutionPolicy.class - : NoSpeculativeExecutionPolicy.class; - assertThat(policy1).isInstanceOf(expectedProfile1PolicyClass); - - return session; - } - - private void primeNode(int id, PrimeDsl.PrimeBuilder primeBuilder) { - SIMULACRON_RULE.cluster().node(id).prime(primeBuilder); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/ssl/DefaultSslEngineFactoryHostnameValidationIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/ssl/DefaultSslEngineFactoryHostnameValidationIT.java deleted file mode 100644 index e2e39be5cd6..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/ssl/DefaultSslEngineFactoryHostnameValidationIT.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.ssl; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.testinfra.ccm.CcmBridge; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.internal.core.ssl.DefaultSslEngineFactory; -import org.junit.ClassRule; -import org.junit.Test; - -public class DefaultSslEngineFactoryHostnameValidationIT { - - @ClassRule - public static final CustomCcmRule CCM_RULE = CustomCcmRule.builder().withSslLocalhostCn().build(); - - /** - * Ensures that SSL connectivity can be established with hostname validation enabled when the - * server's certificate has a common name that matches its hostname. In this case the certificate - * uses a CN of 'localhost' which is expected to work, but may not if localhost does not resolve - * to 127.0.0.1. - */ - @Test - public void should_connect_if_hostname_validation_enabled_and_hostname_matches() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withClass(DefaultDriverOption.SSL_ENGINE_FACTORY_CLASS, DefaultSslEngineFactory.class) - .withBoolean(DefaultDriverOption.SSL_HOSTNAME_VALIDATION, true) - .withString( - DefaultDriverOption.SSL_TRUSTSTORE_PATH, - CcmBridge.DEFAULT_CLIENT_TRUSTSTORE_FILE.getAbsolutePath()) - .withString( - DefaultDriverOption.SSL_TRUSTSTORE_PASSWORD, - CcmBridge.DEFAULT_CLIENT_TRUSTSTORE_PASSWORD) - .build(); - try (CqlSession session = SessionUtils.newSession(CCM_RULE, loader)) { - session.execute("select * from system.local"); - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/ssl/DefaultSslEngineFactoryIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/ssl/DefaultSslEngineFactoryIT.java deleted file mode 100644 index a2afeade3ce..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/ssl/DefaultSslEngineFactoryIT.java +++ /dev/null @@ -1,157 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.ssl; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.testinfra.ccm.CcmBridge; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.assertions.Assertions; -import com.datastax.oss.driver.internal.core.ssl.DefaultSslEngineFactory; -import java.net.InetSocketAddress; -import org.junit.ClassRule; -import org.junit.Test; - -public class DefaultSslEngineFactoryIT { - - @ClassRule public static final CustomCcmRule CCM_RULE = CustomCcmRule.builder().withSsl().build(); - - @Test - public void should_connect_with_ssl() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withClass(DefaultDriverOption.SSL_ENGINE_FACTORY_CLASS, DefaultSslEngineFactory.class) - .withBoolean(DefaultDriverOption.SSL_HOSTNAME_VALIDATION, false) - .withString( - DefaultDriverOption.SSL_TRUSTSTORE_PATH, - CcmBridge.DEFAULT_CLIENT_TRUSTSTORE_FILE.getAbsolutePath()) - .withString( - DefaultDriverOption.SSL_TRUSTSTORE_PASSWORD, - CcmBridge.DEFAULT_CLIENT_TRUSTSTORE_PASSWORD) - .build(); - - try (CqlSession session = SessionUtils.newSession(CCM_RULE, loader)) { - session.execute("select * from system.local"); - } - } - - @Test(expected = AllNodesFailedException.class) - public void should_not_connect_if_hostname_validation_enabled_and_hostname_does_not_match() { - // should not succeed as certificate does not have a CN that would match hostname, - // (unless hostname is node1). - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withClass(DefaultDriverOption.SSL_ENGINE_FACTORY_CLASS, DefaultSslEngineFactory.class) - .withString( - DefaultDriverOption.SSL_TRUSTSTORE_PATH, - CcmBridge.DEFAULT_CLIENT_TRUSTSTORE_FILE.getAbsolutePath()) - .withString( - DefaultDriverOption.SSL_TRUSTSTORE_PASSWORD, - CcmBridge.DEFAULT_CLIENT_TRUSTSTORE_PASSWORD) - .build(); - try (CqlSession session = SessionUtils.newSession(CCM_RULE, loader)) { - session.execute("select * from system.local"); - } - } - - @Test(expected = AllNodesFailedException.class) - public void should_not_connect_if_truststore_not_provided() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withClass(DefaultDriverOption.SSL_ENGINE_FACTORY_CLASS, DefaultSslEngineFactory.class) - .withBoolean(DefaultDriverOption.SSL_HOSTNAME_VALIDATION, false) - .build(); - try (CqlSession session = SessionUtils.newSession(CCM_RULE, loader)) { - session.execute("select * from system.local"); - } - } - - @Test(expected = AllNodesFailedException.class) - public void should_not_connect_if_not_using_ssl() { - try (CqlSession session = SessionUtils.newSession(CCM_RULE)) { - session.execute("select * from system.local"); - } - } - - public static class InstrumentedSslEngineFactory extends DefaultSslEngineFactory { - int countReverseLookups = 0; - int countNoLookups = 0; - - public InstrumentedSslEngineFactory(DriverContext driverContext) { - super(driverContext); - } - - @Override - protected String hostMaybeFromDnsReverseLookup(InetSocketAddress addr) { - countReverseLookups++; - return super.hostMaybeFromDnsReverseLookup(addr); - } - - @Override - protected String hostNoLookup(InetSocketAddress addr) { - countNoLookups++; - return super.hostNoLookup(addr); - } - }; - - @Test - public void should_respect_config_for_san_resolution() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withClass( - DefaultDriverOption.SSL_ENGINE_FACTORY_CLASS, InstrumentedSslEngineFactory.class) - .withBoolean(DefaultDriverOption.SSL_HOSTNAME_VALIDATION, false) - .withString( - DefaultDriverOption.SSL_TRUSTSTORE_PATH, - CcmBridge.DEFAULT_CLIENT_TRUSTSTORE_FILE.getAbsolutePath()) - .withString( - DefaultDriverOption.SSL_TRUSTSTORE_PASSWORD, - CcmBridge.DEFAULT_CLIENT_TRUSTSTORE_PASSWORD) - .build(); - try (CqlSession session = SessionUtils.newSession(CCM_RULE, loader)) { - InstrumentedSslEngineFactory ssl = - (InstrumentedSslEngineFactory) session.getContext().getSslEngineFactory().get(); - Assertions.assertThat(ssl.countReverseLookups).isGreaterThan(0); - Assertions.assertThat(ssl.countNoLookups).isEqualTo(0); - } - - loader = - SessionUtils.configLoaderBuilder() - .withClass( - DefaultDriverOption.SSL_ENGINE_FACTORY_CLASS, InstrumentedSslEngineFactory.class) - .withBoolean(DefaultDriverOption.SSL_HOSTNAME_VALIDATION, false) - .withString( - DefaultDriverOption.SSL_TRUSTSTORE_PATH, - CcmBridge.DEFAULT_CLIENT_TRUSTSTORE_FILE.getAbsolutePath()) - .withString( - DefaultDriverOption.SSL_TRUSTSTORE_PASSWORD, - CcmBridge.DEFAULT_CLIENT_TRUSTSTORE_PASSWORD) - .withBoolean(DefaultDriverOption.SSL_ALLOW_DNS_REVERSE_LOOKUP_SAN, false) - .build(); - try (CqlSession session = SessionUtils.newSession(CCM_RULE, loader)) { - InstrumentedSslEngineFactory ssl = - (InstrumentedSslEngineFactory) session.getContext().getSslEngineFactory().get(); - Assertions.assertThat(ssl.countReverseLookups).isEqualTo(0); - Assertions.assertThat(ssl.countNoLookups).isGreaterThan(0); - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/ssl/DefaultSslEngineFactoryPropertyBasedIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/ssl/DefaultSslEngineFactoryPropertyBasedIT.java deleted file mode 100644 index fc6c67c0307..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/ssl/DefaultSslEngineFactoryPropertyBasedIT.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.ssl; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.testinfra.ccm.CcmBridge; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.categories.IsolatedTests; -import com.datastax.oss.driver.internal.core.ssl.DefaultSslEngineFactory; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -@Category(IsolatedTests.class) -public class DefaultSslEngineFactoryPropertyBasedIT { - - @ClassRule - public static final CustomCcmRule CCM_RULE = CustomCcmRule.builder().withSslLocalhostCn().build(); - - @Test - public void should_connect_with_ssl() { - System.setProperty( - "javax.net.ssl.trustStore", CcmBridge.DEFAULT_CLIENT_TRUSTSTORE_FILE.getAbsolutePath()); - System.setProperty( - "javax.net.ssl.trustStorePassword", CcmBridge.DEFAULT_CLIENT_TRUSTSTORE_PASSWORD); - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withClass(DefaultDriverOption.SSL_ENGINE_FACTORY_CLASS, DefaultSslEngineFactory.class) - .build(); - try (CqlSession session = SessionUtils.newSession(CCM_RULE, loader)) { - session.execute("select * from system.local"); - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/ssl/DefaultSslEngineFactoryPropertyBasedWithClientAuthIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/ssl/DefaultSslEngineFactoryPropertyBasedWithClientAuthIT.java deleted file mode 100644 index 43f2b9d5a99..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/ssl/DefaultSslEngineFactoryPropertyBasedWithClientAuthIT.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.ssl; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.testinfra.ccm.CcmBridge; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.categories.IsolatedTests; -import com.datastax.oss.driver.internal.core.ssl.DefaultSslEngineFactory; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -@Category(IsolatedTests.class) -public class DefaultSslEngineFactoryPropertyBasedWithClientAuthIT { - - @ClassRule - public static final CustomCcmRule CCM_RULE = CustomCcmRule.builder().withSslAuth().build(); - - @Test - public void should_connect_with_ssl_using_client_auth() { - System.setProperty( - "javax.net.ssl.keyStore", CcmBridge.DEFAULT_CLIENT_KEYSTORE_FILE.getAbsolutePath()); - System.setProperty( - "javax.net.ssl.keyStorePassword", CcmBridge.DEFAULT_CLIENT_KEYSTORE_PASSWORD); - System.setProperty( - "javax.net.ssl.trustStore", CcmBridge.DEFAULT_CLIENT_TRUSTSTORE_FILE.getAbsolutePath()); - System.setProperty( - "javax.net.ssl.trustStorePassword", CcmBridge.DEFAULT_CLIENT_TRUSTSTORE_PASSWORD); - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withClass(DefaultDriverOption.SSL_ENGINE_FACTORY_CLASS, DefaultSslEngineFactory.class) - .withBoolean(DefaultDriverOption.SSL_HOSTNAME_VALIDATION, false) - .build(); - try (CqlSession session = SessionUtils.newSession(CCM_RULE, loader)) { - session.execute("select * from system.local"); - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/ssl/DefaultSslEngineFactoryWithClientAuthIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/ssl/DefaultSslEngineFactoryWithClientAuthIT.java deleted file mode 100644 index ab98dcc953d..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/ssl/DefaultSslEngineFactoryWithClientAuthIT.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.ssl; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.testinfra.ccm.CcmBridge; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.internal.core.ssl.DefaultSslEngineFactory; -import org.junit.ClassRule; -import org.junit.Test; - -public class DefaultSslEngineFactoryWithClientAuthIT { - - @ClassRule - public static final CustomCcmRule CCM_RULE = CustomCcmRule.builder().withSslAuth().build(); - - @Test - public void should_connect_with_ssl_using_client_auth() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withClass(DefaultDriverOption.SSL_ENGINE_FACTORY_CLASS, DefaultSslEngineFactory.class) - .withBoolean(DefaultDriverOption.SSL_HOSTNAME_VALIDATION, false) - .withString( - DefaultDriverOption.SSL_KEYSTORE_PATH, - CcmBridge.DEFAULT_CLIENT_KEYSTORE_FILE.getAbsolutePath()) - .withString( - DefaultDriverOption.SSL_KEYSTORE_PASSWORD, - CcmBridge.DEFAULT_CLIENT_KEYSTORE_PASSWORD) - .withString( - DefaultDriverOption.SSL_TRUSTSTORE_PATH, - CcmBridge.DEFAULT_CLIENT_TRUSTSTORE_FILE.getAbsolutePath()) - .withString( - DefaultDriverOption.SSL_TRUSTSTORE_PASSWORD, - CcmBridge.DEFAULT_CLIENT_TRUSTSTORE_PASSWORD) - .build(); - try (CqlSession session = SessionUtils.newSession(CCM_RULE, loader)) { - session.execute("select * from system.local"); - } - } - - @Test(expected = AllNodesFailedException.class) - public void should_not_connect_with_ssl_using_client_auth_if_keystore_not_set() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withClass(DefaultDriverOption.SSL_ENGINE_FACTORY_CLASS, DefaultSslEngineFactory.class) - .withBoolean(DefaultDriverOption.SSL_HOSTNAME_VALIDATION, false) - .withString( - DefaultDriverOption.SSL_TRUSTSTORE_PATH, - CcmBridge.DEFAULT_CLIENT_TRUSTSTORE_FILE.getAbsolutePath()) - .withString( - DefaultDriverOption.SSL_TRUSTSTORE_PASSWORD, - CcmBridge.DEFAULT_CLIENT_TRUSTSTORE_PASSWORD) - .build(); - try (CqlSession session = SessionUtils.newSession(CCM_RULE, loader)) { - session.execute("select * from system.local"); - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/ssl/ProgrammaticSslIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/ssl/ProgrammaticSslIT.java deleted file mode 100644 index 148c7c91baa..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/ssl/ProgrammaticSslIT.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.ssl; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.ssl.ProgrammaticSslEngineFactory; -import com.datastax.oss.driver.api.core.ssl.SslEngineFactory; -import com.datastax.oss.driver.api.testinfra.ccm.CcmBridge; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import java.io.InputStream; -import java.nio.file.Files; -import java.nio.file.Paths; -import java.security.KeyStore; -import java.security.SecureRandom; -import javax.net.ssl.KeyManagerFactory; -import javax.net.ssl.SSLContext; -import javax.net.ssl.TrustManagerFactory; -import org.junit.ClassRule; -import org.junit.Test; - -public class ProgrammaticSslIT { - - @ClassRule public static final CustomCcmRule CCM_RULE = CustomCcmRule.builder().withSsl().build(); - - @Test - public void should_connect_with_programmatic_factory() { - SslEngineFactory factory = new ProgrammaticSslEngineFactory(createSslContext()); - try (CqlSession session = - (CqlSession) - SessionUtils.baseBuilder() - .addContactEndPoints(CCM_RULE.getContactPoints()) - .withSslEngineFactory(factory) - .build()) { - session.execute("select * from system.local"); - } - } - - @Test - public void should_connect_with_programmatic_ssl_context() { - SSLContext sslContext = createSslContext(); - try (CqlSession session = - (CqlSession) - SessionUtils.baseBuilder() - .addContactEndPoints(CCM_RULE.getContactPoints()) - .withSslContext(sslContext) - .build()) { - session.execute("select * from system.local"); - } - } - - private static SSLContext createSslContext() { - try { - SSLContext context = SSLContext.getInstance("SSL"); - TrustManagerFactory tmf = - TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); - try (InputStream tsf = - Files.newInputStream( - Paths.get(CcmBridge.DEFAULT_CLIENT_TRUSTSTORE_FILE.getAbsolutePath()))) { - KeyStore ts = KeyStore.getInstance("JKS"); - char[] password = CcmBridge.DEFAULT_CLIENT_TRUSTSTORE_PASSWORD.toCharArray(); - ts.load(tsf, password); - tmf.init(ts); - } - KeyManagerFactory kmf = - KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); - try (InputStream ksf = - Files.newInputStream( - Paths.get(CcmBridge.DEFAULT_CLIENT_KEYSTORE_FILE.getAbsolutePath()))) { - KeyStore ks = KeyStore.getInstance("JKS"); - char[] password = CcmBridge.DEFAULT_CLIENT_KEYSTORE_PASSWORD.toCharArray(); - ks.load(ksf, password); - kmf.init(ks, password); - } - context.init(kmf.getKeyManagers(), tmf.getTrustManagers(), new SecureRandom()); - return context; - } catch (Exception e) { - throw new AssertionError("Unexpected error while creating SSL context", e); - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/throttling/ThrottlingIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/throttling/ThrottlingIT.java deleted file mode 100644 index 6fa1a37355b..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/throttling/ThrottlingIT.java +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.throttling; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.catchThrowable; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.RequestThrottlingException; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.internal.core.session.throttling.ConcurrencyLimitingRequestThrottler; -import com.datastax.oss.simulacron.common.cluster.ClusterSpec; -import com.datastax.oss.simulacron.common.stubbing.PrimeDsl; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.TimeUnit; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -@Category(ParallelizableTests.class) -public class ThrottlingIT { - - private static final String QUERY = "select * from foo"; - private static final int maxConcurrentRequests = 10; - private static final int maxQueueSize = 10; - - @Rule public SimulacronRule simulacron = new SimulacronRule(ClusterSpec.builder().withNodes(1)); - - private DriverConfigLoader loader = null; - - @Before - public void setUp() { - // Add a delay so that requests don't complete during the test - simulacron - .cluster() - .prime(PrimeDsl.when(QUERY).then(PrimeDsl.noRows()).delay(5, TimeUnit.SECONDS)); - loader = - SessionUtils.configLoaderBuilder() - .withClass( - DefaultDriverOption.REQUEST_THROTTLER_CLASS, - ConcurrencyLimitingRequestThrottler.class) - .withInt( - DefaultDriverOption.REQUEST_THROTTLER_MAX_CONCURRENT_REQUESTS, - maxConcurrentRequests) - .withInt(DefaultDriverOption.REQUEST_THROTTLER_MAX_QUEUE_SIZE, maxQueueSize) - .build(); - } - - @Test - public void should_reject_request_when_throttling_by_concurrency() { - try (CqlSession session = SessionUtils.newSession(simulacron, loader)) { - - // Saturate the session and fill the queue - for (int i = 0; i < maxConcurrentRequests + maxQueueSize; i++) { - session.executeAsync(QUERY); - } - - // The next query should be rejected - Throwable t = catchThrowable(() -> session.execute(QUERY)); - - assertThat(t) - .isInstanceOf(RequestThrottlingException.class) - .hasMessage( - "The session has reached its maximum capacity " - + "(concurrent requests: 10, queue size: 10)"); - } - } - - @Test - public void should_propagate_cancel_to_throttler() { - try (CqlSession session = SessionUtils.newSession(simulacron, loader)) { - - // Try to saturate the session and fill the queue - for (int i = 0; i < maxConcurrentRequests + maxQueueSize; i++) { - CompletionStage future = session.executeAsync(QUERY); - future.toCompletableFuture().cancel(true); - } - - // The next query should be successful, because the previous queries were cancelled - session.execute(QUERY); - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/tracker/RequestIdGeneratorIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/tracker/RequestIdGeneratorIT.java deleted file mode 100644 index 516a62bb1f7..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/tracker/RequestIdGeneratorIT.java +++ /dev/null @@ -1,144 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.tracker; - -import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.tracker.RequestIdGenerator; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.nio.charset.StandardCharsets; -import java.util.Map; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -public class RequestIdGeneratorIT { - private CcmRule ccmRule = CcmRule.getInstance(); - - @Rule public TestRule chain = RuleChain.outerRule(ccmRule); - - @Test - public void should_write_uuid_to_custom_payload_with_key() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withString(DefaultDriverOption.REQUEST_ID_GENERATOR_CLASS, "UuidRequestIdGenerator") - .build(); - try (CqlSession session = SessionUtils.newSession(ccmRule, loader)) { - String query = "SELECT * FROM system.local"; - ResultSet rs = session.execute(query); - ByteBuffer id = rs.getExecutionInfo().getRequest().getCustomPayload().get("request-id"); - assertThat(id.remaining()).isEqualTo(73); - } - } - - @Test - public void should_write_default_request_id_to_custom_payload_with_key() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withString( - DefaultDriverOption.REQUEST_ID_GENERATOR_CLASS, "W3CContextRequestIdGenerator") - .build(); - try (CqlSession session = SessionUtils.newSession(ccmRule, loader)) { - String query = "SELECT * FROM system.local"; - ResultSet rs = session.execute(query); - ByteBuffer id = rs.getExecutionInfo().getRequest().getCustomPayload().get("request-id"); - assertThat(id.remaining()).isEqualTo(55); - } - } - - @Test - public void should_use_customized_request_id_generator() { - RequestIdGenerator myRequestIdGenerator = - new RequestIdGenerator() { - @Override - public String getSessionRequestId() { - return "123"; - } - - @Override - public String getNodeRequestId(@NonNull Request statement, @NonNull String parentId) { - return "456"; - } - - @Override - public Statement getDecoratedStatement( - @NonNull Statement statement, @NonNull String requestId) { - Map customPayload = - NullAllowingImmutableMap.builder() - .putAll(statement.getCustomPayload()) - .put("trace_key", ByteBuffer.wrap(requestId.getBytes(StandardCharsets.UTF_8))) - .build(); - return statement.setCustomPayload(customPayload); - } - }; - try (CqlSession session = - (CqlSession) - SessionUtils.baseBuilder() - .addContactEndPoints(ccmRule.getContactPoints()) - .withRequestIdGenerator(myRequestIdGenerator) - .build()) { - String query = "SELECT * FROM system.local"; - ResultSet rs = session.execute(query); - ByteBuffer id = rs.getExecutionInfo().getRequest().getCustomPayload().get("trace_key"); - assertThat(id).isEqualTo(ByteBuffer.wrap("456".getBytes(StandardCharsets.UTF_8))); - } - } - - @Test - public void should_not_write_id_to_custom_payload_when_key_is_not_set() { - DriverConfigLoader loader = SessionUtils.configLoaderBuilder().build(); - try (CqlSession session = SessionUtils.newSession(ccmRule, loader)) { - String query = "SELECT * FROM system.local"; - ResultSet rs = session.execute(query); - assertThat(rs.getExecutionInfo().getRequest().getCustomPayload().get("request-id")).isNull(); - } - } - - @Test - public void should_succeed_with_null_value_in_custom_payload() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withString( - DefaultDriverOption.REQUEST_ID_GENERATOR_CLASS, "W3CContextRequestIdGenerator") - .build(); - try (CqlSession session = SessionUtils.newSession(ccmRule, loader)) { - String query = "SELECT * FROM system.local"; - Map customPayload = - new NullAllowingImmutableMap.Builder(1).put("my_key", null).build(); - SimpleStatement statement = - SimpleStatement.newInstance(query).setCustomPayload(customPayload); - assertThatStage(session.executeAsync(statement)).isSuccess(); - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/tracker/RequestLoggerIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/tracker/RequestLoggerIT.java deleted file mode 100644 index ae2c46fe4a0..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/tracker/RequestLoggerIT.java +++ /dev/null @@ -1,407 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.tracker; - -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.rows; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.serverError; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.unavailable; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.when; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.timeout; -import static org.mockito.Mockito.verify; - -import ch.qos.logback.classic.Level; -import ch.qos.logback.classic.Logger; -import ch.qos.logback.classic.spi.ILoggingEvent; -import ch.qos.logback.classic.spi.LoggingEvent; -import ch.qos.logback.core.Appender; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.servererrors.ServerError; -import com.datastax.oss.driver.api.testinfra.loadbalancing.SortingLoadBalancingPolicy; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.internal.core.tracker.RequestLogger; -import com.datastax.oss.simulacron.common.cluster.ClusterSpec; -import com.datastax.oss.simulacron.common.codec.ConsistencyLevel; -import java.time.Duration; -import java.util.Collections; -import java.util.List; -import java.util.concurrent.TimeUnit; -import java.util.function.Predicate; -import java.util.regex.Pattern; -import org.junit.After; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; -import org.junit.runner.RunWith; -import org.mockito.ArgumentCaptor; -import org.mockito.Captor; -import org.mockito.Mock; -import org.mockito.internal.verification.VerificationModeFactory; -import org.mockito.junit.MockitoJUnitRunner; -import org.mockito.verification.Timeout; -import org.slf4j.LoggerFactory; - -@RunWith(MockitoJUnitRunner.class) -@Category(ParallelizableTests.class) -public class RequestLoggerIT { - private static final Pattern LOG_PREFIX_PER_REQUEST = Pattern.compile("\\[s\\d*\\|\\d*]"); - - @SuppressWarnings("UnnecessaryLambda") - private static final Predicate WITH_PER_REQUEST_PREFIX = - log -> LOG_PREFIX_PER_REQUEST.matcher(log).lookingAt(); - - private static final Pattern LOG_PREFIX_WITH_EXECUTION_NUMBER = - Pattern.compile("\\[s\\d*\\|\\d*\\|\\d*]"); - - @SuppressWarnings("UnnecessaryLambda") - private static final Predicate WITH_EXECUTION_PREFIX = - log -> LOG_PREFIX_WITH_EXECUTION_NUMBER.matcher(log).lookingAt(); - - private static final String QUERY = "SELECT release_version FROM system.local"; - - private final SimulacronRule simulacronRule = - new SimulacronRule(ClusterSpec.builder().withNodes(3)); - - private final DriverConfigLoader requestLoader = - SessionUtils.configLoaderBuilder() - .withClassList( - DefaultDriverOption.REQUEST_TRACKER_CLASSES, - Collections.singletonList(RequestLogger.class)) - .withBoolean(DefaultDriverOption.REQUEST_LOGGER_SUCCESS_ENABLED, true) - .withBoolean(DefaultDriverOption.REQUEST_LOGGER_SLOW_ENABLED, true) - .withBoolean(DefaultDriverOption.REQUEST_LOGGER_ERROR_ENABLED, true) - .withInt( - DefaultDriverOption.REQUEST_LOGGER_MAX_QUERY_LENGTH, - RequestLogger.DEFAULT_REQUEST_LOGGER_MAX_QUERY_LENGTH) - .withBoolean( - DefaultDriverOption.REQUEST_LOGGER_VALUES, - RequestLogger.DEFAULT_REQUEST_LOGGER_SHOW_VALUES) - .withInt( - DefaultDriverOption.REQUEST_LOGGER_MAX_VALUE_LENGTH, - RequestLogger.DEFAULT_REQUEST_LOGGER_MAX_VALUE_LENGTH) - .withInt( - DefaultDriverOption.REQUEST_LOGGER_MAX_VALUES, - RequestLogger.DEFAULT_REQUEST_LOGGER_MAX_VALUES) - .withBoolean(DefaultDriverOption.REQUEST_LOGGER_STACK_TRACES, true) - .startProfile("low-threshold") - .withDuration(DefaultDriverOption.REQUEST_LOGGER_SLOW_THRESHOLD, Duration.ofNanos(1)) - .startProfile("no-logs") - .withBoolean(DefaultDriverOption.REQUEST_LOGGER_SUCCESS_ENABLED, false) - .withBoolean(DefaultDriverOption.REQUEST_LOGGER_SLOW_ENABLED, false) - .withBoolean(DefaultDriverOption.REQUEST_LOGGER_ERROR_ENABLED, false) - .startProfile("no-traces") - .withBoolean(DefaultDriverOption.REQUEST_LOGGER_STACK_TRACES, false) - .build(); - - private final SessionRule sessionRuleRequest = - SessionRule.builder(simulacronRule).withConfigLoader(requestLoader).build(); - - private final DriverConfigLoader nodeLoader = - SessionUtils.configLoaderBuilder() - .withClassList( - DefaultDriverOption.REQUEST_TRACKER_CLASSES, - Collections.singletonList(RequestNodeLoggerExample.class)) - .withBoolean(DefaultDriverOption.REQUEST_LOGGER_SUCCESS_ENABLED, true) - .withBoolean(DefaultDriverOption.REQUEST_LOGGER_SLOW_ENABLED, true) - .withBoolean(DefaultDriverOption.REQUEST_LOGGER_ERROR_ENABLED, true) - .withInt( - DefaultDriverOption.REQUEST_LOGGER_MAX_QUERY_LENGTH, - RequestLogger.DEFAULT_REQUEST_LOGGER_MAX_QUERY_LENGTH) - .withBoolean( - DefaultDriverOption.REQUEST_LOGGER_VALUES, - RequestLogger.DEFAULT_REQUEST_LOGGER_SHOW_VALUES) - .withInt( - DefaultDriverOption.REQUEST_LOGGER_MAX_VALUE_LENGTH, - RequestLogger.DEFAULT_REQUEST_LOGGER_MAX_VALUE_LENGTH) - .withInt( - DefaultDriverOption.REQUEST_LOGGER_MAX_VALUES, - RequestLogger.DEFAULT_REQUEST_LOGGER_MAX_VALUES) - .withBoolean(DefaultDriverOption.REQUEST_LOGGER_STACK_TRACES, true) - .startProfile("low-threshold") - .withDuration(DefaultDriverOption.REQUEST_LOGGER_SLOW_THRESHOLD, Duration.ofNanos(1)) - .startProfile("no-logs") - .withBoolean(DefaultDriverOption.REQUEST_LOGGER_SUCCESS_ENABLED, false) - .withBoolean(DefaultDriverOption.REQUEST_LOGGER_SLOW_ENABLED, false) - .withBoolean(DefaultDriverOption.REQUEST_LOGGER_ERROR_ENABLED, false) - .startProfile("no-traces") - .withBoolean(DefaultDriverOption.REQUEST_LOGGER_STACK_TRACES, false) - .startProfile("sorting-lbp") - .withClass( - DefaultDriverOption.LOAD_BALANCING_POLICY_CLASS, SortingLoadBalancingPolicy.class) - .build(); - - private final SessionRule sessionRuleNode = - SessionRule.builder(simulacronRule).withConfigLoader(nodeLoader).build(); - - private final SessionRule sessionRuleDefaults = - SessionRule.builder(simulacronRule) - .withConfigLoader( - SessionUtils.configLoaderBuilder() - .withClassList( - DefaultDriverOption.REQUEST_TRACKER_CLASSES, - Collections.singletonList(RequestLogger.class)) - .withBoolean(DefaultDriverOption.REQUEST_LOGGER_SUCCESS_ENABLED, true) - .withBoolean(DefaultDriverOption.REQUEST_LOGGER_ERROR_ENABLED, true) - .startProfile("low-threshold") - .withDuration( - DefaultDriverOption.REQUEST_LOGGER_SLOW_THRESHOLD, Duration.ofNanos(1)) - .startProfile("no-logs") - .withBoolean(DefaultDriverOption.REQUEST_LOGGER_SUCCESS_ENABLED, false) - .withBoolean(DefaultDriverOption.REQUEST_LOGGER_SLOW_ENABLED, false) - .withBoolean(DefaultDriverOption.REQUEST_LOGGER_ERROR_ENABLED, false) - .startProfile("no-traces") - .withBoolean(DefaultDriverOption.REQUEST_LOGGER_STACK_TRACES, false) - .build()) - .build(); - - @Rule - public TestRule chain = - RuleChain.outerRule(simulacronRule) - .around(sessionRuleRequest) - .around(sessionRuleNode) - .around(sessionRuleDefaults); - - @Captor private ArgumentCaptor loggingEventCaptor; - @Mock private Appender appender; - private Logger logger; - private Level oldLevel; - - @Before - public void setup() { - logger = (Logger) LoggerFactory.getLogger(RequestLogger.class); - oldLevel = logger.getLevel(); - logger.setLevel(Level.INFO); - logger.addAppender(appender); - } - - @After - public void teardown() { - logger.detachAppender(appender); - logger.setLevel(oldLevel); - } - - @Test - public void should_log_successful_request() { - // Given - simulacronRule.cluster().prime(when(QUERY).then(rows().row("release_version", "3.0.0"))); - - // When - sessionRuleRequest.session().execute(QUERY); - - // Then - verify(appender, timeout(5000)).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getValue().getFormattedMessage()) - .contains("Success", "[0 values]", QUERY) - .matches(WITH_PER_REQUEST_PREFIX); - } - - @Test - public void should_log_successful_request_with_defaults() { - // Given - simulacronRule.cluster().prime(when(QUERY).then(rows().row("release_version", "3.0.0"))); - - // When - sessionRuleDefaults.session().execute(QUERY); - - // Then - verify(appender, timeout(5000)).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getValue().getFormattedMessage()) - .contains("Success", "[0 values]", QUERY) - .matches(WITH_PER_REQUEST_PREFIX); - } - - @Test - public void should_log_failed_request_with_stack_trace() { - // Given - simulacronRule.cluster().prime(when(QUERY).then(serverError("test"))); - - // When - try { - sessionRuleRequest.session().execute(QUERY); - fail("Expected a ServerError"); - } catch (ServerError error) { - // expected - } - - // Then - verify(appender, timeout(5000)).doAppend(loggingEventCaptor.capture()); - ILoggingEvent log = loggingEventCaptor.getValue(); - assertThat(log.getFormattedMessage()) - .contains("Error", "[0 values]", QUERY) - .doesNotContain(ServerError.class.getName()) - .matches(WITH_PER_REQUEST_PREFIX); - assertThat(log.getThrowableProxy().getClassName()).isEqualTo(ServerError.class.getName()); - } - - @Test - public void should_log_failed_request_with_stack_trace_with_defaults() { - // Given - simulacronRule.cluster().prime(when(QUERY).then(serverError("test"))); - - // When - try { - sessionRuleDefaults.session().execute(QUERY); - fail("Expected a ServerError"); - } catch (ServerError error) { - // expected - } - - // Then - verify(appender, timeout(5000)).doAppend(loggingEventCaptor.capture()); - ILoggingEvent log = loggingEventCaptor.getValue(); - assertThat(log.getFormattedMessage()) - .contains("Error", "[0 values]", QUERY, ServerError.class.getName()) - .matches(WITH_PER_REQUEST_PREFIX); - } - - @Test - public void should_log_failed_request_without_stack_trace() { - // Given - simulacronRule.cluster().prime(when(QUERY).then(serverError("test"))); - - // When - try { - sessionRuleRequest - .session() - .execute(SimpleStatement.builder(QUERY).setExecutionProfileName("no-traces").build()); - fail("Expected a ServerError"); - } catch (ServerError error) { - // expected - } - - // Then - verify(appender, timeout(5000)).doAppend(loggingEventCaptor.capture()); - ILoggingEvent log = loggingEventCaptor.getValue(); - assertThat(log.getFormattedMessage()) - .contains("Error", "[0 values]", QUERY, ServerError.class.getName()) - .matches(WITH_PER_REQUEST_PREFIX); - assertThat(log.getThrowableProxy()).isNull(); - } - - @Test - public void should_log_slow_request() { - // Given - simulacronRule.cluster().prime(when(QUERY).then(rows().row("release_version", "3.0.0"))); - - // When - sessionRuleRequest - .session() - .execute(SimpleStatement.builder(QUERY).setExecutionProfileName("low-threshold").build()); - - // Then - verify(appender, timeout(5000)).doAppend(loggingEventCaptor.capture()); - assertThat(loggingEventCaptor.getValue().getFormattedMessage()) - .contains("Slow", "[0 values]", QUERY) - .matches(WITH_PER_REQUEST_PREFIX); - } - - @Test - public void should_not_log_when_disabled() throws InterruptedException { - // Given - simulacronRule.cluster().prime(when(QUERY).then(rows().row("release_version", "3.0.0"))); - - // When - sessionRuleRequest - .session() - .execute(SimpleStatement.builder(QUERY).setExecutionProfileName("no-logs").build()); - - // Then - // We expect no messages. The request logger is invoked asynchronously, so simply wait a bit - TimeUnit.MILLISECONDS.sleep(500); - verify(appender, never()).doAppend(any(LoggingEvent.class)); - } - - @Test - public void should_log_failed_nodes_on_successful_request() { - // Given - simulacronRule - .cluster() - .node(0) - .prime(when(QUERY).then(unavailable(ConsistencyLevel.ONE, 1, 3))); - simulacronRule - .cluster() - .node(1) - .prime(when(QUERY).then(rows().row("release_version", "3.0.0"))); - simulacronRule - .cluster() - .node(2) - .prime(when(QUERY).then(rows().row("release_version", "3.0.0"))); - - // When - sessionRuleNode - .session() - // use the sorting LBP here to ensure that node 0 is always hit first - .execute(SimpleStatement.builder(QUERY).setExecutionProfileName("sorting-lbp").build()); - - // Then - verify(appender, new Timeout(5000, VerificationModeFactory.times(3))) - .doAppend(loggingEventCaptor.capture()); - List events = loggingEventCaptor.getAllValues(); - assertThat(events.get(0).getFormattedMessage()) - .contains("Error", "[0 values]", QUERY) - .matches(WITH_EXECUTION_PREFIX); - assertThat(events.get(1).getFormattedMessage()) - .contains("Success", "[0 values]", QUERY) - .matches(WITH_PER_REQUEST_PREFIX); - assertThat(events.get(2).getFormattedMessage()) - .contains("Success", "[0 values]", QUERY) - .matches(WITH_PER_REQUEST_PREFIX); - } - - @Test - public void should_log_successful_nodes_on_successful_request() { - simulacronRule - .cluster() - .node(0) - .prime(when(QUERY).then(rows().row("release_version", "3.0.0"))); - simulacronRule - .cluster() - .node(1) - .prime(when(QUERY).then(rows().row("release_version", "3.0.0"))); - simulacronRule - .cluster() - .node(2) - .prime(when(QUERY).then(rows().row("release_version", "3.0.0"))); - - // When - sessionRuleNode.session().execute(QUERY); - - // Then - verify(appender, new Timeout(5000, VerificationModeFactory.times(2))) - .doAppend(loggingEventCaptor.capture()); - List events = loggingEventCaptor.getAllValues(); - assertThat(events.get(0).getFormattedMessage()) - .contains("Success", "[0 values]", QUERY) - .matches(WITH_PER_REQUEST_PREFIX); - assertThat(events.get(1).getFormattedMessage()) - .contains("Success", "[0 values]", QUERY) - .matches(WITH_PER_REQUEST_PREFIX); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/tracker/RequestNodeLoggerExample.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/tracker/RequestNodeLoggerExample.java deleted file mode 100644 index 8eb2fb80a73..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/tracker/RequestNodeLoggerExample.java +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.tracker; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.internal.core.tracker.RequestLogFormatter; -import com.datastax.oss.driver.internal.core.tracker.RequestLogger; -import edu.umd.cs.findbugs.annotations.NonNull; - -public class RequestNodeLoggerExample extends RequestLogger { - - public RequestNodeLoggerExample(DriverContext context) { - super(new RequestLogFormatter(context)); - } - - @Override - public void onNodeError( - @NonNull Request request, - @NonNull Throwable error, - long latencyNanos, - @NonNull DriverExecutionProfile executionProfile, - @NonNull Node node, - @NonNull String nodeRequestLogPrefix) { - if (!executionProfile.getBoolean(DefaultDriverOption.REQUEST_LOGGER_ERROR_ENABLED)) { - return; - } - - int maxQueryLength = - executionProfile.getInt(DefaultDriverOption.REQUEST_LOGGER_MAX_QUERY_LENGTH); - boolean showValues = executionProfile.getBoolean(DefaultDriverOption.REQUEST_LOGGER_VALUES); - int maxValues = - showValues ? executionProfile.getInt(DefaultDriverOption.REQUEST_LOGGER_MAX_VALUES) : 0; - int maxValueLength = - showValues - ? executionProfile.getInt(DefaultDriverOption.REQUEST_LOGGER_MAX_VALUE_LENGTH) - : 0; - boolean showStackTraces = - executionProfile.getBoolean(DefaultDriverOption.REQUEST_LOGGER_STACK_TRACES); - - logError( - request, - error, - latencyNanos, - node, - maxQueryLength, - showValues, - maxValues, - maxValueLength, - showStackTraces, - nodeRequestLogPrefix); - } - - @Override - public void onNodeSuccess( - @NonNull Request request, - long latencyNanos, - @NonNull DriverExecutionProfile executionProfile, - @NonNull Node node, - @NonNull String nodeRequestLogPrefix) { - boolean successEnabled = - executionProfile.getBoolean(DefaultDriverOption.REQUEST_LOGGER_SUCCESS_ENABLED); - boolean slowEnabled = - executionProfile.getBoolean(DefaultDriverOption.REQUEST_LOGGER_SLOW_ENABLED); - if (!successEnabled && !slowEnabled) { - return; - } - - long slowThresholdNanos = - executionProfile.isDefined(DefaultDriverOption.REQUEST_LOGGER_SLOW_THRESHOLD) - ? executionProfile - .getDuration(DefaultDriverOption.REQUEST_LOGGER_SLOW_THRESHOLD) - .toNanos() - : Long.MAX_VALUE; - boolean isSlow = latencyNanos > slowThresholdNanos; - if ((isSlow && !slowEnabled) || (!isSlow && !successEnabled)) { - return; - } - - int maxQueryLength = - executionProfile.getInt(DefaultDriverOption.REQUEST_LOGGER_MAX_QUERY_LENGTH); - boolean showValues = executionProfile.getBoolean(DefaultDriverOption.REQUEST_LOGGER_VALUES); - int maxValues = - showValues ? executionProfile.getInt(DefaultDriverOption.REQUEST_LOGGER_MAX_VALUES) : 0; - int maxValueLength = - showValues - ? executionProfile.getInt(DefaultDriverOption.REQUEST_LOGGER_MAX_VALUE_LENGTH) - : 0; - - logSuccess( - request, - latencyNanos, - isSlow, - node, - maxQueryLength, - showValues, - maxValues, - maxValueLength, - nodeRequestLogPrefix); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/type/codec/CqlIntToStringCodec.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/type/codec/CqlIntToStringCodec.java deleted file mode 100644 index f509439fe35..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/type/codec/CqlIntToStringCodec.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.type.codec; - -import com.datastax.oss.driver.api.core.type.codec.MappingCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** - * A sample user codec implementation that we use in our tests. - * - *

    It maps a CQL string to a Java string containing its textual representation. - */ -public class CqlIntToStringCodec extends MappingCodec { - - public CqlIntToStringCodec() { - super(TypeCodecs.INT, GenericType.STRING); - } - - @Nullable - @Override - protected String innerToOuter(@Nullable Integer value) { - return value == null ? null : value.toString(); - } - - @Nullable - @Override - protected Integer outerToInner(@Nullable String value) { - return value == null ? null : Integer.parseInt(value); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/type/codec/ExtraTypeCodecsIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/type/codec/ExtraTypeCodecsIT.java deleted file mode 100644 index c5db0376efb..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/type/codec/ExtraTypeCodecsIT.java +++ /dev/null @@ -1,300 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; -import com.fasterxml.jackson.databind.ObjectMapper; -import java.time.Instant; -import java.time.LocalDateTime; -import java.time.ZoneId; -import java.time.ZonedDateTime; -import java.time.temporal.ChronoUnit; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.Objects; -import java.util.Optional; -import java.util.UUID; -import java.util.stream.Stream; -import org.junit.Assume; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -public class ExtraTypeCodecsIT { - - private static final CcmRule CCM_RULE = CcmRule.getInstance(); - - private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - private enum TableField { - cql_text("text_value", "text"), - cql_int("integer_value", "int"), - cql_vector("vector_value", "vector"), - cql_list_of_text("list_of_text_value", "list"), - cql_timestamp("timestamp_value", "timestamp"), - cql_boolean("boolean_value", "boolean"), - ; - - final String name; - final String ty; - - TableField(String name, String ty) { - this.name = name; - this.ty = ty; - } - - private String definition() { - return String.format("%s %s", name, ty); - } - } - - @BeforeClass - public static void setupSchema() { - List fieldDefinitions = new ArrayList<>(); - fieldDefinitions.add("key uuid PRIMARY KEY"); - Stream.of(TableField.values()) - .forEach( - tf -> { - // TODO: Move this check to BackendRequirementRule once JAVA-3069 is resolved. - if (tf == TableField.cql_vector - && CCM_RULE.getCassandraVersion().compareTo(Version.parse("5.0")) < 0) { - // don't add vector type before cassandra version 5.0 - return; - } - fieldDefinitions.add(tf.definition()); - }); - SESSION_RULE - .session() - .execute( - SimpleStatement.builder( - String.format( - "CREATE TABLE IF NOT EXISTS extra_type_codecs_it (%s)", - String.join(", ", fieldDefinitions))) - .setExecutionProfile(SESSION_RULE.slowProfile()) - .build()); - } - - private void insertAndRead(TableField field, T value, TypeCodec codec) { - CqlSession session = SESSION_RULE.session(); - // write value under new key using provided codec - UUID key = UUID.randomUUID(); - - PreparedStatement preparedInsert = - session.prepare( - SimpleStatement.builder( - String.format( - "INSERT INTO extra_type_codecs_it (key, %s) VALUES (?, ?)", field.name)) - .build()); - BoundStatement boundInsert = - preparedInsert - .boundStatementBuilder() - .setUuid("key", key) - .set(field.name, value, codec) - .build(); - session.execute(boundInsert); - - // read value using provided codec and assert result - PreparedStatement preparedSelect = - session.prepare( - SimpleStatement.builder( - String.format("SELECT %s FROM extra_type_codecs_it WHERE key = ?", field.name)) - .build()); - BoundStatement boundSelect = preparedSelect.boundStatementBuilder().setUuid("key", key).build(); - assertThat(session.execute(boundSelect).one().get(field.name, codec)).isEqualTo(value); - } - - @Test - public void enum_names_of() { - insertAndRead( - TableField.cql_text, TestEnum.value1, ExtraTypeCodecs.enumNamesOf(TestEnum.class)); - } - - @Test - public void enum_ordinals_of() { - insertAndRead( - TableField.cql_int, TestEnum.value1, ExtraTypeCodecs.enumOrdinalsOf(TestEnum.class)); - } - - // Also requires -Dccm.branch=vsearch and the ability to build that branch locally - @BackendRequirement(type = BackendType.CASSANDRA, minInclusive = "5.0.0") - @Test - public void float_to_vector_array() { - // @BackRequirement on test methods that use @ClassRule to configure CcmRule require @Rule - // BackendRequirementRule included with fix JAVA-3069. Until then we will ignore this test with - // an assume. - Assume.assumeTrue( - "Requires Cassandra 5.0 or greater", - CCM_RULE.getCassandraVersion().compareTo(Version.parse("5.0")) >= 0); - insertAndRead( - TableField.cql_vector, - new float[] {1.1f, 0f, Float.NaN}, - ExtraTypeCodecs.floatVectorToArray(3)); - } - - @Test - public void json_java_class() { - insertAndRead( - TableField.cql_text, - new TestJsonAnnotatedPojo("example", Arrays.asList(1, 2, 3)), - ExtraTypeCodecs.json(TestJsonAnnotatedPojo.class)); - } - - @Test - public void json_java_class_and_object_mapper() { - insertAndRead( - TableField.cql_text, - TestPojo.create(1, "abc", "def"), - ExtraTypeCodecs.json(TestPojo.class, new ObjectMapper())); - } - - @Test - public void list_to_array_of() { - insertAndRead( - TableField.cql_list_of_text, - new String[] {"hello", "kitty"}, - ExtraTypeCodecs.listToArrayOf(TypeCodecs.TEXT)); - } - - @Test - public void local_timestamp_at() { - ZoneId systemZoneId = ZoneId.systemDefault(); - insertAndRead( - TableField.cql_timestamp, - LocalDateTime.now(systemZoneId).truncatedTo(ChronoUnit.MILLIS), - ExtraTypeCodecs.localTimestampAt(systemZoneId)); - } - - @Test - public void optional_of() { - insertAndRead( - TableField.cql_boolean, Optional.empty(), ExtraTypeCodecs.optionalOf(TypeCodecs.BOOLEAN)); - insertAndRead( - TableField.cql_boolean, Optional.of(true), ExtraTypeCodecs.optionalOf(TypeCodecs.BOOLEAN)); - } - - @Test - public void timestamp_at() { - ZoneId systemZoneId = ZoneId.systemDefault(); - insertAndRead( - TableField.cql_timestamp, - Instant.now().truncatedTo(ChronoUnit.MILLIS), - ExtraTypeCodecs.timestampAt(systemZoneId)); - } - - @Test - public void timestamp_millis_at() { - ZoneId systemZoneId = ZoneId.systemDefault(); - insertAndRead( - TableField.cql_timestamp, - Instant.now().toEpochMilli(), - ExtraTypeCodecs.timestampMillisAt(systemZoneId)); - } - - @Test - public void zoned_timestamp_at() { - ZoneId systemZoneId = ZoneId.systemDefault(); - insertAndRead( - TableField.cql_timestamp, - ZonedDateTime.now(systemZoneId).truncatedTo(ChronoUnit.MILLIS), - ExtraTypeCodecs.zonedTimestampAt(systemZoneId)); - } - - private enum TestEnum { - value1, - value2, - value3, - } - - // Public for JSON serialization - public static final class TestJsonAnnotatedPojo { - public final String info; - public final List values; - - @JsonCreator - public TestJsonAnnotatedPojo( - @JsonProperty("info") String info, @JsonProperty("values") List values) { - this.info = info; - this.values = values; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - TestJsonAnnotatedPojo testJsonAnnotatedPojo = (TestJsonAnnotatedPojo) o; - return Objects.equals(info, testJsonAnnotatedPojo.info) - && Objects.equals(values, testJsonAnnotatedPojo.values); - } - - @Override - public int hashCode() { - return Objects.hash(info, values); - } - } - - public static final class TestPojo { - public int id; - public String[] messages; - - public static TestPojo create(int id, String... messages) { - TestPojo obj = new TestPojo(); - obj.id = id; - obj.messages = messages; - return obj; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - TestPojo testPojo = (TestPojo) o; - return id == testPojo.id && Arrays.equals(messages, testPojo.messages); - } - - @Override - public int hashCode() { - int result = Objects.hash(id); - result = 31 * result + Arrays.hashCode(messages); - return result; - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/type/codec/registry/CodecRegistryIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/type/codec/registry/CodecRegistryIT.java deleted file mode 100644 index 74472e8bab9..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/core/type/codec/registry/CodecRegistryIT.java +++ /dev/null @@ -1,535 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.core.type.codec.registry; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.catchThrowable; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.api.core.type.codec.CodecNotFoundException; -import com.datastax.oss.driver.api.core.type.codec.MappingCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.codec.registry.MutableCodecRegistry; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.ccm.SchemaChangeSynchronizer; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.internal.core.type.codec.IntCodec; -import com.datastax.oss.driver.internal.core.type.codec.UdtCodec; -import com.datastax.oss.driver.internal.core.type.codec.extras.OptionalCodec; -import com.google.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; -import org.assertj.core.util.Maps; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestName; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -public class CodecRegistryIT { - - private static final CcmRule CCM_RULE = CcmRule.getInstance(); - - private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - @Rule public TestName name = new TestName(); - - @BeforeClass - public static void createSchema() { - SchemaChangeSynchronizer.withLock( - () -> { - // table with simple primary key, single cell. - SESSION_RULE - .session() - .execute( - SimpleStatement.builder( - "CREATE TABLE IF NOT EXISTS test (k text primary key, v int)") - .setExecutionProfile(SESSION_RULE.slowProfile()) - .build()); - // table with map value - SESSION_RULE - .session() - .execute( - SimpleStatement.builder( - "CREATE TABLE IF NOT EXISTS test2 (k0 text, k1 int, v map, primary key (k0, k1))") - .setExecutionProfile(SESSION_RULE.slowProfile()) - .build()); - // table with UDT - SESSION_RULE - .session() - .execute( - SimpleStatement.builder("CREATE TYPE IF NOT EXISTS coordinates (x int, y int)") - .setExecutionProfile(SESSION_RULE.slowProfile()) - .build()); - SESSION_RULE - .session() - .execute( - SimpleStatement.builder( - "CREATE TABLE IF NOT EXISTS test3 (k0 text, k1 int, v map>, primary key (k0, k1))") - .setExecutionProfile(SESSION_RULE.slowProfile()) - .build()); - }); - } - - // A simple codec that allows float values to be used for cassandra int column type. - private static class FloatCIntCodec implements TypeCodec { - - private static final IntCodec intCodec = new IntCodec(); - - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.of(Float.class); - } - - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.INT; - } - - @Override - public ByteBuffer encode(Float value, @NonNull ProtocolVersion protocolVersion) { - return intCodec.encode(value.intValue(), protocolVersion); - } - - @Override - public Float decode(ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - return intCodec.decode(bytes, protocolVersion).floatValue(); - } - - @NonNull - @Override - public String format(Float value) { - return intCodec.format(value.intValue()); - } - - @Override - public Float parse(String value) { - return intCodec.parse(value).floatValue(); - } - } - - @Test - public void should_throw_exception_if_no_codec_registered_for_type_set() { - PreparedStatement prepared = - SESSION_RULE.session().prepare("INSERT INTO test (k, v) values (?, ?)"); - - // float value for int column should not work since no applicable codec. - Throwable t = - catchThrowable( - () -> - prepared - .boundStatementBuilder() - .setString(0, name.getMethodName()) - .setFloat(1, 3.14f) - .build()); - - assertThat(t).isInstanceOf(CodecNotFoundException.class); - } - - @Test - public void should_throw_exception_if_no_codec_registered_for_type_get() { - PreparedStatement prepared = - SESSION_RULE.session().prepare("INSERT INTO test (k, v) values (?, ?)"); - - BoundStatement insert = - prepared.boundStatementBuilder().setString(0, name.getMethodName()).setInt(1, 2).build(); - SESSION_RULE.session().execute(insert); - - ResultSet result = - SESSION_RULE - .session() - .execute( - SimpleStatement.builder("SELECT v from test where k = ?") - .addPositionalValue(name.getMethodName()) - .build()); - - List rows = result.all(); - assertThat(rows).hasSize(1); - - // should not be able to access int column as float as no codec is registered to handle that. - Row row = rows.iterator().next(); - - Throwable t = catchThrowable(() -> assertThat(row.getFloat("v")).isEqualTo(3.0f)); - - assertThat(t).isInstanceOf(CodecNotFoundException.class); - } - - @Test - public void should_be_able_to_register_and_use_custom_codec() { - // create a cluster with a registered codec from Float <-> cql int. - try (CqlSession session = - (CqlSession) - SessionUtils.baseBuilder() - .addTypeCodecs(new FloatCIntCodec()) - .addContactEndPoints(CCM_RULE.getContactPoints()) - .withKeyspace(SESSION_RULE.keyspace()) - .build()) { - PreparedStatement prepared = session.prepare("INSERT INTO test (k, v) values (?, ?)"); - - // float value for int column should work. - BoundStatement insert = - prepared - .boundStatementBuilder() - .setString(0, name.getMethodName()) - .setFloat(1, 3.14f) - .build(); - session.execute(insert); - - ResultSet result = - session.execute( - SimpleStatement.builder("SELECT v from test where k = ?") - .addPositionalValue(name.getMethodName()) - .build()); - - List rows = result.all(); - assertThat(rows).hasSize(1); - - // should be able to retrieve value back as float, some precision is lost due to going from - // int -> float. - Row row = rows.iterator().next(); - assertThat(row.getFloat("v")).isEqualTo(3.0f); - assertThat(row.getFloat(0)).isEqualTo(3.0f); - } - } - - @Test - public void should_register_custom_codec_at_runtime() { - // Still create a separate session because we don't want to interfere with other tests - try (CqlSession session = SessionUtils.newSession(CCM_RULE, SESSION_RULE.keyspace())) { - - MutableCodecRegistry registry = - (MutableCodecRegistry) session.getContext().getCodecRegistry(); - registry.register(new FloatCIntCodec()); - - PreparedStatement prepared = session.prepare("INSERT INTO test (k, v) values (?, ?)"); - - // float value for int column should work. - BoundStatement insert = - prepared - .boundStatementBuilder() - .setString(0, name.getMethodName()) - .setFloat(1, 3.14f) - .build(); - session.execute(insert); - - ResultSet result = - session.execute( - SimpleStatement.builder("SELECT v from test where k = ?") - .addPositionalValue(name.getMethodName()) - .build()); - - List rows = result.all(); - assertThat(rows).hasSize(1); - - // should be able to retrieve value back as float, some precision is lost due to going from - // int -> float. - Row row = rows.iterator().next(); - assertThat(row.getFloat("v")).isEqualTo(3.0f); - assertThat(row.getFloat(0)).isEqualTo(3.0f); - } - } - - @Test - public void should_be_able_to_register_and_use_custom_codec_with_generic_type() { - // create a cluster with registered codecs using OptionalCodec - OptionalCodec> optionalMapCodec = - new OptionalCodec<>(TypeCodecs.mapOf(TypeCodecs.INT, TypeCodecs.TEXT)); - TypeCodec>> mapWithOptionalValueCodec = - TypeCodecs.mapOf(TypeCodecs.INT, new OptionalCodec<>(TypeCodecs.TEXT)); - - try (CqlSession session = - (CqlSession) - SessionUtils.baseBuilder() - .addTypeCodecs(optionalMapCodec, mapWithOptionalValueCodec) - .addContactEndPoints(CCM_RULE.getContactPoints()) - .withKeyspace(SESSION_RULE.keyspace()) - .build()) { - PreparedStatement prepared = - session.prepare("INSERT INTO test2 (k0, k1, v) values (?, ?, ?)"); - - // optional map should work. - Map v0 = Maps.newHashMap(0, "value"); - Optional> v0Opt = Optional.of(v0); - BoundStatement insert = - prepared - .boundStatementBuilder() - .setString(0, name.getMethodName()) - .setInt(1, 0) - .set( - 2, - v0Opt, - optionalMapCodec - .getJavaType()) // use java type so has to be looked up in registry. - .build(); - session.execute(insert); - - // optional absent map should work. - Optional> absentMap = Optional.empty(); - insert = - prepared - .boundStatementBuilder() - .setString(0, name.getMethodName()) - .setInt(1, 1) - .set(2, absentMap, optionalMapCodec.getJavaType()) - .build(); - session.execute(insert); - - // map with optional value should work - note that you can't have null values in collections, - // so this is not technically practical but want to validate that custom codec resolution - // works - // when it's composed in a collection codec. - Map> v2Map = Maps.newHashMap(1, Optional.of("hello")); - insert = - prepared - .boundStatementBuilder() - .setString(0, name.getMethodName()) - .setInt(1, 2) - .set(2, v2Map, mapWithOptionalValueCodec.getJavaType()) - .build(); - session.execute(insert); - - ResultSet result = - session.execute( - SimpleStatement.builder("SELECT v from test2 where k0 = ?") - .addPositionalValues(name.getMethodName()) - .build()); - - List rows = result.all(); - assertThat(rows).hasSize(3); - - Iterator iterator = rows.iterator(); - // row (at key 0) should have v0 - Row row = iterator.next(); - // should be able to retrieve value back as an optional map. - assertThat(row.get(0, optionalMapCodec.getJavaType())).isEqualTo(v0Opt); - // should be able to retrieve value back as map. - assertThat(row.getMap(0, Integer.class, String.class)).isEqualTo(v0); - - // next row (at key 1) should be absent (null value). - row = iterator.next(); - // value should be null. - assertThat(row.isNull(0)).isTrue(); - // getting with codec should return Optional.empty() - assertThat(row.get(0, optionalMapCodec.getJavaType())).isEqualTo(absentMap); - // getting with map should return an empty map. - assertThat(row.getMap(0, Integer.class, String.class)).isEmpty(); - - // next row (at key 2) should have v2 - row = iterator.next(); - // getting with codec should return with the correct type. - assertThat(row.get(0, mapWithOptionalValueCodec.getJavaType())).isEqualTo(v2Map); - // getting with map should return a map without optional value. - assertThat(row.getMap(0, Integer.class, String.class)).isEqualTo(Maps.newHashMap(1, "hello")); - } - } - - @Test - public void should_be_able_to_handle_empty_collections() { - try (CqlSession session = - (CqlSession) - SessionUtils.baseBuilder() - .addContactEndPoints(CCM_RULE.getContactPoints()) - .withKeyspace(SESSION_RULE.keyspace()) - .build()) { - - // Using prepared statements (CQL type is known) - PreparedStatement prepared = - session.prepare("INSERT INTO test2 (k0, k1, v) values (?, ?, ?)"); - - BoundStatement insert = - prepared - .boundStatementBuilder() - .setString(0, name.getMethodName()) - .setInt(1, 0) - .setMap(2, new HashMap<>(), Integer.class, String.class) - .build(); - session.execute(insert); - - // Using simple statements (CQL type is unknown) - session.execute( - SimpleStatement.newInstance( - "INSERT INTO test2 (k0, k1, v) values (?, ?, ?)", - name.getMethodName(), - 1, - new HashMap<>())); - - ResultSet result = - session.execute( - SimpleStatement.builder("SELECT v from test2 where k0 = ?") - .addPositionalValues(name.getMethodName()) - .build()); - - List rows = result.all(); - assertThat(rows).hasSize(2); - - Row row1 = rows.get(0); - assertThat(row1.isNull(0)).isTrue(); - assertThat(row1.getMap(0, Integer.class, String.class)).isEmpty(); - - Row row2 = rows.get(1); - assertThat(row2.isNull(0)).isTrue(); - assertThat(row2.getMap(0, Integer.class, String.class)).isEmpty(); - } - } - - private static final class Coordinates { - - public final int x; - public final int y; - - public Coordinates(int x, int y) { - this.x = x; - this.y = y; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - Coordinates that = (Coordinates) o; - return this.x == that.x && this.y == that.y; - } - - @Override - public int hashCode() { - return Objects.hash(x, y); - } - } - - private static class CoordinatesCodec extends MappingCodec { - - public CoordinatesCodec(@NonNull TypeCodec innerCodec) { - super(innerCodec, GenericType.of(Coordinates.class)); - } - - @NonNull - @Override - public UserDefinedType getCqlType() { - return (UserDefinedType) super.getCqlType(); - } - - @Nullable - @Override - protected Coordinates innerToOuter(@Nullable UdtValue value) { - return value == null ? null : new Coordinates(value.getInt("x"), value.getInt("y")); - } - - @Nullable - @Override - protected UdtValue outerToInner(@Nullable Coordinates value) { - return value == null - ? null - : getCqlType().newValue().setInt("x", value.x).setInt("y", value.y); - } - } - - @Test - public void should_register_and_use_custom_codec_for_user_defined_type() { - - Map coordinatesMap = ImmutableMap.of("home", new Coordinates(12, 34)); - GenericType> coordinatesMapType = - GenericType.mapOf(String.class, Coordinates.class); - - // Still create a separate session because we don't want to interfere with other tests - try (CqlSession session = SessionUtils.newSession(CCM_RULE, SESSION_RULE.keyspace())) { - - // register the mapping codec for UDT coordinates - UserDefinedType coordinatesUdt = - session - .getMetadata() - .getKeyspace(SESSION_RULE.keyspace()) - .flatMap(ks -> ks.getUserDefinedType("coordinates")) - .orElseThrow(IllegalStateException::new); - MutableCodecRegistry codecRegistry = - (MutableCodecRegistry) session.getContext().getCodecRegistry(); - - // Retrieve the inner codec - TypeCodec innerCodec = codecRegistry.codecFor(coordinatesUdt); - assertThat(innerCodec).isInstanceOf(UdtCodec.class); - - // Create the "outer" codec and register it - CoordinatesCodec coordinatesCodec = new CoordinatesCodec(innerCodec); - codecRegistry.register(coordinatesCodec); - - // Test that the codec will be used to create on-the-fly codecs - assertThat(codecRegistry.codecFor(Coordinates.class)).isSameAs(coordinatesCodec); - assertThat(codecRegistry.codecFor(coordinatesMapType).accepts(coordinatesMap)).isTrue(); - - // test insertion - PreparedStatement prepared = - session.prepare("INSERT INTO test3 (k0, k1, v) values (?, ?, ?)"); - BoundStatement insert = - prepared - .boundStatementBuilder() - .setString(0, name.getMethodName()) - .setInt(1, 0) - .set( - 2, - coordinatesMap, - coordinatesMapType) // use java type so has to be looked up in registry. - .build(); - session.execute(insert); - - // test retrieval - ResultSet result = - session.execute( - SimpleStatement.builder("SELECT v from test3 where k0 = ? AND k1 = ?") - .addPositionalValues(name.getMethodName(), 0) - .build()); - List rows = result.all(); - assertThat(rows).hasSize(1); - Row row = rows.get(0); - assertThat(row.get(0, coordinatesMapType)).isEqualTo(coordinatesMap); - assertThat(row.getMap(0, String.class, Coordinates.class)).isEqualTo(coordinatesMap); - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/api/GuavaSession.java b/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/api/GuavaSession.java deleted file mode 100644 index 082858803af..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/api/GuavaSession.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.example.guava.api; - -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.cql.DefaultPrepareRequest; -import com.google.common.util.concurrent.ListenableFuture; - -public interface GuavaSession extends Session { - - GenericType> ASYNC = - new GenericType>() {}; - - GenericType> ASYNC_PREPARED = - new GenericType>() {}; - - default ListenableFuture executeAsync(Statement statement) { - return this.execute(statement, ASYNC); - } - - default ListenableFuture executeAsync(String statement) { - return this.executeAsync(SimpleStatement.newInstance(statement)); - } - - default ListenableFuture prepareAsync(SimpleStatement statement) { - return this.execute(new DefaultPrepareRequest(statement), ASYNC_PREPARED); - } - - default ListenableFuture prepareAsync(String statement) { - return this.prepareAsync(SimpleStatement.newInstance(statement)); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/api/GuavaSessionBuilder.java b/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/api/GuavaSessionBuilder.java deleted file mode 100644 index fe20d0fdc8a..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/api/GuavaSessionBuilder.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.example.guava.api; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.session.ProgrammaticArguments; -import com.datastax.oss.driver.api.core.session.SessionBuilder; -import com.datastax.oss.driver.example.guava.internal.DefaultGuavaSession; -import com.datastax.oss.driver.example.guava.internal.GuavaDriverContext; -import edu.umd.cs.findbugs.annotations.NonNull; - -public class GuavaSessionBuilder extends SessionBuilder { - - @Override - protected DriverContext buildContext( - DriverConfigLoader configLoader, ProgrammaticArguments programmaticArguments) { - return new GuavaDriverContext(configLoader, programmaticArguments); - } - - @Override - protected GuavaSession wrap(@NonNull CqlSession defaultSession) { - return new DefaultGuavaSession(defaultSession); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/api/GuavaSessionUtils.java b/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/api/GuavaSessionUtils.java deleted file mode 100644 index afa8439c487..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/api/GuavaSessionUtils.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.example.guava.api; - -public class GuavaSessionUtils { - public static GuavaSessionBuilder builder() { - return new GuavaSessionBuilder(); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/internal/DefaultGuavaSession.java b/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/internal/DefaultGuavaSession.java deleted file mode 100644 index 7418526520b..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/internal/DefaultGuavaSession.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.example.guava.internal; - -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.example.guava.api.GuavaSession; -import com.datastax.oss.driver.internal.core.session.SessionWrapper; - -public class DefaultGuavaSession extends SessionWrapper implements GuavaSession { - - public DefaultGuavaSession(Session delegate) { - super(delegate); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/internal/GuavaDriverContext.java b/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/internal/GuavaDriverContext.java deleted file mode 100644 index 692ad951187..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/internal/GuavaDriverContext.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.example.guava.internal; - -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.cql.PrepareRequest; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.session.ProgrammaticArguments; -import com.datastax.oss.driver.example.guava.api.GuavaSession; -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import com.datastax.oss.driver.internal.core.cql.CqlPrepareAsyncProcessor; -import com.datastax.oss.driver.internal.core.cql.CqlPrepareSyncProcessor; -import com.datastax.oss.driver.internal.core.cql.CqlRequestAsyncProcessor; -import com.datastax.oss.driver.internal.core.cql.CqlRequestSyncProcessor; -import com.datastax.oss.driver.internal.core.session.RequestProcessorRegistry; -import java.util.Optional; - -/** - * A Custom {@link DefaultDriverContext} that overrides {@link #getRequestProcessorRegistry()} to - * return a {@link RequestProcessorRegistry} that includes processors for returning guava futures. - */ -public class GuavaDriverContext extends DefaultDriverContext { - - public GuavaDriverContext( - DriverConfigLoader configLoader, ProgrammaticArguments programmaticArguments) { - super(configLoader, programmaticArguments); - } - - @Override - public RequestProcessorRegistry buildRequestProcessorRegistry() { - // Register the typical request processors, except instead of the normal async processors, - // use GuavaRequestAsyncProcessor to return ListenableFutures in async methods. - - CqlRequestAsyncProcessor cqlRequestAsyncProcessor = new CqlRequestAsyncProcessor(); - CqlPrepareAsyncProcessor cqlPrepareAsyncProcessor = - new CqlPrepareAsyncProcessor(Optional.of(this)); - CqlRequestSyncProcessor cqlRequestSyncProcessor = - new CqlRequestSyncProcessor(cqlRequestAsyncProcessor); - - return new RequestProcessorRegistry( - getSessionName(), - cqlRequestSyncProcessor, - new CqlPrepareSyncProcessor(cqlPrepareAsyncProcessor), - new GuavaRequestAsyncProcessor<>( - cqlRequestAsyncProcessor, Statement.class, GuavaSession.ASYNC), - new GuavaRequestAsyncProcessor<>( - cqlPrepareAsyncProcessor, PrepareRequest.class, GuavaSession.ASYNC_PREPARED), - // Register KeyRequestProcessor for handling KeyRequest and returning Integer. - new KeyRequestProcessor(cqlRequestAsyncProcessor)); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/internal/GuavaRequestAsyncProcessor.java b/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/internal/GuavaRequestAsyncProcessor.java deleted file mode 100644 index 20cb60323e9..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/internal/GuavaRequestAsyncProcessor.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.example.guava.internal; - -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.internal.core.session.RequestProcessor; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.SettableFuture; -import java.util.concurrent.CompletionStage; - -/** - * Wraps a {@link RequestProcessor} that returns {@link CompletionStage}s and converts them to a - * {@link ListenableFuture}s. - * - * @param The type of request - * @param The type of responses enclosed in the future response. - */ -public class GuavaRequestAsyncProcessor - implements RequestProcessor> { - - private final RequestProcessor> subProcessor; - - private final GenericType resultType; - - private final Class requestClass; - - GuavaRequestAsyncProcessor( - RequestProcessor> subProcessor, - Class requestClass, - GenericType resultType) { - this.subProcessor = subProcessor; - this.requestClass = requestClass; - this.resultType = resultType; - } - - @Override - public boolean canProcess(Request request, GenericType resultType) { - return requestClass.isInstance(request) && resultType.equals(this.resultType); - } - - @Override - public ListenableFuture process( - T request, DefaultSession session, InternalDriverContext context, String sessionLogPrefix) { - SettableFuture future = SettableFuture.create(); - subProcessor - .process(request, session, context, sessionLogPrefix) - .whenComplete( - (r, ex) -> { - if (ex != null) { - future.setException(ex); - } else { - future.set(r); - } - }); - return future; - } - - @Override - public ListenableFuture newFailure(RuntimeException error) { - return Futures.immediateFailedFuture(error); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/internal/KeyRequest.java b/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/internal/KeyRequest.java deleted file mode 100644 index ef582cce1b9..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/internal/KeyRequest.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.example.guava.internal; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.time.Duration; -import java.util.Map; - -/** A custom request that simply wraps an integer key and uses it as a parameter for a query. */ -public class KeyRequest implements Request { - - private final int key; - - public KeyRequest(int key) { - this.key = key; - } - - public int getKey() { - return key; - } - - @Override - public String getExecutionProfileName() { - return null; - } - - @Override - public DriverExecutionProfile getExecutionProfile() { - return null; - } - - @Override - public CqlIdentifier getKeyspace() { - return null; - } - - @Override - public CqlIdentifier getRoutingKeyspace() { - return null; - } - - @Override - public ByteBuffer getRoutingKey() { - return null; - } - - @Override - public Token getRoutingToken() { - return null; - } - - @NonNull - @Override - public Map getCustomPayload() { - return NullAllowingImmutableMap.of(); - } - - @Override - public Boolean isIdempotent() { - return true; - } - - @Nullable - @Override - public Duration getTimeout() { - return null; - } - - @Nullable - @Override - public Node getNode() { - return null; - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/internal/KeyRequestProcessor.java b/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/internal/KeyRequestProcessor.java deleted file mode 100644 index 1fcfb9dd3b2..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/internal/KeyRequestProcessor.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.example.guava.internal; - -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.core.session.RequestProcessorIT; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.cql.CqlRequestAsyncProcessor; -import com.datastax.oss.driver.internal.core.session.DefaultSession; -import com.datastax.oss.driver.internal.core.session.RequestProcessor; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; - -/** - * A request processor that takes a given {@link KeyRequest#getKey} and generates a query, delegates - * it to {@link CqlRequestAsyncProcessor} to get the integer value of a row and return it as a - * result. - */ -public class KeyRequestProcessor implements RequestProcessor { - - public static final GenericType INT_TYPE = GenericType.of(Integer.class); - - private final CqlRequestAsyncProcessor subProcessor; - - KeyRequestProcessor(CqlRequestAsyncProcessor subProcessor) { - this.subProcessor = subProcessor; - } - - @Override - public boolean canProcess(Request request, GenericType resultType) { - return request instanceof KeyRequest && resultType.equals(INT_TYPE); - } - - @Override - public Integer process( - KeyRequest request, - DefaultSession session, - InternalDriverContext context, - String sessionLogPrefix) { - - // Create statement from key and delegate it to CqlRequestSyncProcessor - SimpleStatement statement = - SimpleStatement.newInstance( - "select v1 from test where k = ? and v0 = ?", RequestProcessorIT.KEY, request.getKey()); - AsyncResultSet result = - CompletableFutures.getUninterruptibly( - subProcessor.process(statement, session, context, sessionLogPrefix)); - // If not exactly 1 rows were found, return Integer.MIN_VALUE, otherwise return the value. - if (result.remaining() != 1) { - return Integer.MIN_VALUE; - } else { - return result.currentPage().iterator().next().getInt("v1"); - } - } - - @Override - public Integer newFailure(RuntimeException error) { - throw error; - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/internal/core/type/codec/UdtCodecIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/internal/core/type/codec/UdtCodecIT.java deleted file mode 100644 index 804a078bbe0..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/internal/core/type/codec/UdtCodecIT.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import java.util.Objects; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -public class UdtCodecIT { - - private CcmRule ccmRule = CcmRule.getInstance(); - - private SessionRule sessionRule = SessionRule.builder(ccmRule).build(); - - @Rule public TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); - - @Test - public void should_decoding_udt_be_backward_compatible() { - CqlSession session = sessionRule.session(); - session.execute("CREATE TYPE test_type_1 (a text, b int)"); - session.execute("CREATE TABLE test_table_1 (e int primary key, f frozen)"); - // insert a row using version 1 of the UDT schema - session.execute("INSERT INTO test_table_1(e, f) VALUES(1, {a: 'a', b: 1})"); - UserDefinedType udt = - session - .getMetadata() - .getKeyspace(sessionRule.keyspace()) - .flatMap(ks -> ks.getUserDefinedType("test_type_1")) - .orElseThrow(IllegalStateException::new); - TypeCodec oldCodec = session.getContext().getCodecRegistry().codecFor(udt); - // update UDT schema - session.execute("ALTER TYPE test_type_1 add i text"); - // insert a row using version 2 of the UDT schema - session.execute("INSERT INTO test_table_1(e, f) VALUES(2, {a: 'b', b: 2, i: 'b'})"); - Row row = - Objects.requireNonNull(session.execute("SELECT f FROM test_table_1 WHERE e = ?", 2).one()); - // Try to read new row with old codec. Using row.getUdtValue() would not cause any issues, - // because new codec will be automatically registered (using all 3 attributes). - // If application leverages generic row.get(String, Codec) method, data reading with old codec - // should - // be backward-compatible. - UdtValue value = Objects.requireNonNull((UdtValue) row.get("f", oldCodec)); - assertThat(value.getString("a")).isEqualTo("b"); - assertThat(value.getInt("b")).isEqualTo(2); - assertThatThrownBy(() -> value.getString("i")).hasMessage("i is not a field in this UDT"); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/DriverBlockHoundIntegrationCcmIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/DriverBlockHoundIntegrationCcmIT.java deleted file mode 100644 index e0f058b00d0..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/DriverBlockHoundIntegrationCcmIT.java +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.concurrent; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Fail.fail; - -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.dse.driver.api.core.cql.continuous.ContinuousAsyncResultSet; -import com.datastax.dse.driver.api.core.cql.continuous.ContinuousPagingITBase; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.DriverTimeoutException; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.categories.IsolatedTests; -import java.time.Duration; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.ExecutionException; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import reactor.blockhound.BlockHound; -import reactor.core.publisher.Flux; -import reactor.core.scheduler.Schedulers; -import reactor.test.StepVerifier; - -/** - * This test exercises the driver with BlockHound installed and tests that the rules defined in - * {@link DriverBlockHoundIntegration} are being applied, and especially when continuous paging is - * used. - */ -@BackendRequirement( - type = BackendType.DSE, - minInclusive = "5.1.0", - description = "Continuous paging is only available from 5.1.0 onwards") -@Category(IsolatedTests.class) -public class DriverBlockHoundIntegrationCcmIT extends ContinuousPagingITBase { - - private static final Logger LOGGER = - LoggerFactory.getLogger(DriverBlockHoundIntegrationCcmIT.class); - - private static final CustomCcmRule CCM_RULE = CustomCcmRule.builder().build(); - - // Note: Insights monitoring will be detected by BlockHound, but the error is swallowed and - // logged by DefaultSession.SingleThreaded.notifyListeners, so it's not necessary to explicitly - // disable Insights here. - private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - - @ClassRule public static TestRule chain = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - @BeforeClass - public static void setUp() { - try { - BlockHound.install(); - } catch (Throwable t) { - LOGGER.error("BlockHound could not be installed", t); - fail("BlockHound could not be installed", t); - } - initialize(SESSION_RULE.session(), SESSION_RULE.slowProfile()); - } - - @Test - public void should_not_detect_blocking_call_with_continuous_paging() { - CqlSession session = SESSION_RULE.session(); - SimpleStatement statement = SimpleStatement.newInstance("SELECT v from test where k=?", KEY); - Flux rows = - Flux.range(0, 10) - .flatMap( - i -> - Flux.fromIterable(session.executeContinuously(statement)) - .subscribeOn(Schedulers.parallel())); - StepVerifier.create(rows).expectNextCount(1000).expectComplete().verify(); - } - - /** Copied from com.datastax.dse.driver.api.core.cql.continuous.ContinuousPagingIT. */ - @Test - public void should_not_detect_blocking_call_with_continuous_paging_when_timeout() - throws Exception { - CqlSession session = SESSION_RULE.session(); - SimpleStatement statement = SimpleStatement.newInstance("SELECT v from test where k=?", KEY); - // Throttle server at a page per second and set client timeout much lower so that the client - // will experience a timeout. - // Note that this might not be perfect if there are pauses in the JVM and the timeout - // doesn't fire soon enough. - DriverExecutionProfile profile = - session - .getContext() - .getConfig() - .getDefaultProfile() - .withInt(DseDriverOption.CONTINUOUS_PAGING_PAGE_SIZE, 10) - .withInt(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND, 1) - .withDuration( - DseDriverOption.CONTINUOUS_PAGING_TIMEOUT_OTHER_PAGES, Duration.ofMillis(100)); - CompletionStage future = - session.executeContinuouslyAsync(statement.setExecutionProfile(profile)); - ContinuousAsyncResultSet pagingResult = CompletableFutures.getUninterruptibly(future); - try { - pagingResult.fetchNextPage().toCompletableFuture().get(); - fail("Expected a timeout"); - } catch (ExecutionException e) { - assertThat(e.getCause()) - .isInstanceOf(DriverTimeoutException.class) - .hasMessageContaining("Timed out waiting for page 2"); - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/DriverBlockHoundIntegrationIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/DriverBlockHoundIntegrationIT.java deleted file mode 100644 index 278e3081ea1..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/DriverBlockHoundIntegrationIT.java +++ /dev/null @@ -1,146 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.concurrent; - -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.rows; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.when; -import static org.assertj.core.api.Fail.fail; - -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveRow; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.uuid.Uuids; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; -import com.datastax.oss.driver.categories.IsolatedTests; -import com.datastax.oss.simulacron.common.cluster.ClusterSpec; -import java.util.UUID; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import reactor.blockhound.BlockHound; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; -import reactor.core.scheduler.Schedulers; -import reactor.test.StepVerifier; - -/** - * This test exercises the driver with BlockHound installed and tests that the rules defined in - * {@link DriverBlockHoundIntegration} are being applied. - */ -@Category(IsolatedTests.class) -public class DriverBlockHoundIntegrationIT { - - private static final Logger LOGGER = LoggerFactory.getLogger(DriverBlockHoundIntegrationIT.class); - - @ClassRule - public static final SimulacronRule SIMULACRON_RULE = - new SimulacronRule(ClusterSpec.builder().withNodes(1)); - - @BeforeClass - public static void setUp() { - try { - BlockHound.install(); - } catch (Throwable t) { - LOGGER.error("BlockHound could not be installed", t); - fail("BlockHound could not be installed", t); - } - } - - @Before - public void setup() { - SIMULACRON_RULE.cluster().prime(when("SELECT c1, c2 FROM ks.t1").then(rows().row("foo", 42))); - } - - @Test - @SuppressWarnings("BlockingMethodInNonBlockingContext") - public void should_detect_blocking_call() { - // this is just to make sure the detection mechanism is properly installed - Mono blockingPublisher = - Mono.fromCallable( - () -> { - Thread.sleep(1); - return 0; - }) - .subscribeOn(Schedulers.parallel()); - StepVerifier.create(blockingPublisher) - .expectErrorMatches(e -> e instanceof Error && e.getMessage().contains("Blocking call!")) - .verify(); - } - - @Test - public void should_not_detect_blocking_call_on_asynchronous_execution() { - try (CqlSession session = SessionUtils.newSession(SIMULACRON_RULE)) { - Flux rows = - Flux.range(0, 1000) - .flatMap( - i -> - Flux.from(session.executeReactive("SELECT c1, c2 FROM ks.t1")) - .subscribeOn(Schedulers.parallel())); - StepVerifier.create(rows).expectNextCount(1000).expectComplete().verify(); - } - } - - @Test - public void should_not_detect_blocking_call_on_asynchronous_execution_prepared() { - try (CqlSession session = SessionUtils.newSession(SIMULACRON_RULE)) { - Flux rows = - Mono.fromCompletionStage(() -> session.prepareAsync("SELECT c1, c2 FROM ks.t1")) - .flatMapMany( - ps -> - Flux.range(0, 1000) - .map(i -> ps.bind()) - .flatMap( - bs -> - Flux.from(session.executeReactive(bs)) - .subscribeOn(Schedulers.parallel()))); - StepVerifier.create(rows).expectNextCount(1000).expectComplete().verify(); - } - } - - @Test - public void should_not_detect_blocking_call_on_random_uuid_generation() { - Flux uuids = - Flux.create( - sink -> { - for (int i = 0; i < 1_000_000; ++i) { - sink.next(Uuids.random()); - } - sink.complete(); - }) - .subscribeOn(Schedulers.parallel()); - StepVerifier.create(uuids).expectNextCount(1_000_000).expectComplete().verify(); - } - - @Test - public void should_not_detect_blocking_call_on_time_based_uuid_generation() { - Flux uuids = - Flux.create( - sink -> { - for (int i = 0; i < 1_000_000; ++i) { - sink.next(Uuids.timeBased()); - } - sink.complete(); - }) - .subscribeOn(Schedulers.parallel()); - StepVerifier.create(uuids).expectNextCount(1_000_000).expectComplete().verify(); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/ComputedIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/ComputedIT.java deleted file mode 100644 index 5ef66f15bfb..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/ComputedIT.java +++ /dev/null @@ -1,359 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.mapper; - -import static com.datastax.oss.driver.assertions.Assertions.assertThat; -import static org.assertj.core.api.Assertions.catchThrowable; -import static org.assertj.core.data.Offset.offset; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.BoundStatementBuilder; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.mapper.annotations.ClusteringColumn; -import com.datastax.oss.driver.api.mapper.annotations.Computed; -import com.datastax.oss.driver.api.mapper.annotations.CqlName; -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; -import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; -import com.datastax.oss.driver.api.mapper.annotations.Delete; -import com.datastax.oss.driver.api.mapper.annotations.Entity; -import com.datastax.oss.driver.api.mapper.annotations.GetEntity; -import com.datastax.oss.driver.api.mapper.annotations.Insert; -import com.datastax.oss.driver.api.mapper.annotations.Mapper; -import com.datastax.oss.driver.api.mapper.annotations.PartitionKey; -import com.datastax.oss.driver.api.mapper.annotations.Query; -import com.datastax.oss.driver.api.mapper.annotations.Select; -import com.datastax.oss.driver.api.mapper.annotations.SetEntity; -import com.datastax.oss.driver.api.mapper.annotations.Update; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import java.util.concurrent.atomic.AtomicInteger; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -public class ComputedIT { - - private static final CcmRule CCM_RULE = CcmRule.getInstance(); - - private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - private static TestMapper mapper; - - private static AtomicInteger keyProvider = new AtomicInteger(); - - @BeforeClass - public static void setup() { - CqlSession session = SESSION_RULE.session(); - - for (String query : - ImmutableList.of( - "CREATE TABLE computed_entity(id int, c_id int, v int, primary key (id, c_id))")) { - session.execute( - SimpleStatement.builder(query).setExecutionProfile(SESSION_RULE.slowProfile()).build()); - } - - mapper = new ComputedIT_TestMapperBuilder(session).build(); - } - - @Test - public void should_not_include_computed_values_in_insert() { - ComputedDao computedDao = mapper.computedDao(SESSION_RULE.keyspace()); - int key = keyProvider.incrementAndGet(); - - ComputedEntity entity = new ComputedEntity(key, 1, 2); - computedDao.save(entity); - - ComputedEntity retrievedValue = computedDao.findById(key, 1); - assertThat(retrievedValue.getId()).isEqualTo(key); - assertThat(retrievedValue.getcId()).isEqualTo(1); - assertThat(retrievedValue.getV()).isEqualTo(2); - } - - @Test - public void should_return_computed_values_in_select() { - ComputedDao computedDao = mapper.computedDao(SESSION_RULE.keyspace()); - int key = keyProvider.incrementAndGet(); - - long time = System.currentTimeMillis() - 1000; - ComputedEntity entity = new ComputedEntity(key, 1, 2); - computedDao.saveWithTime(entity, 3600, time); - - ComputedEntity retrievedValue = computedDao.findById(key, 1); - assertThat(retrievedValue.getId()).isEqualTo(key); - assertThat(retrievedValue.getcId()).isEqualTo(1); - assertThat(retrievedValue.getV()).isEqualTo(2); - assertThat(retrievedValue.getTtl()).isCloseTo(3600, offset(10)); - assertThat(retrievedValue.getWritetime()).isEqualTo(time); - } - - @Test - public void should_not_include_computed_values_in_delete() { - // should not be the case since delete operates on primary key.. - ComputedDao computedDao = mapper.computedDao(SESSION_RULE.keyspace()); - int key = keyProvider.incrementAndGet(); - - ComputedEntity entity = new ComputedEntity(key, 1, 2); - computedDao.save(entity); - - // retrieve values so computed values are present. - ComputedEntity retrievedValue = computedDao.findById(key, 1); - - computedDao.delete(retrievedValue); - - assertThat(computedDao.findById(key, 1)).isNull(); - } - - @Test - public void should_not_include_computed_values_in_SetEntity() { - ComputedDao computedDao = mapper.computedDao(SESSION_RULE.keyspace()); - int key = keyProvider.incrementAndGet(); - - CqlSession session = SESSION_RULE.session(); - PreparedStatement preparedStatement = - session.prepare("INSERT INTO computed_entity (id, c_id, v) VALUES (?, ?, ?)"); - BoundStatementBuilder builder = preparedStatement.boundStatementBuilder(); - ComputedEntity entity = new ComputedEntity(key, 1, 2); - BoundStatement statement = computedDao.set(builder, entity).build(); - session.execute(statement); - - // retrieve values to ensure was successful. - ComputedEntity retrievedValue = computedDao.findById(key, 1); - - assertThat(retrievedValue.getId()).isEqualTo(key); - assertThat(retrievedValue.getcId()).isEqualTo(1); - assertThat(retrievedValue.getV()).isEqualTo(2); - } - - @Test - public void should_return_computed_values_in_GetEntity() { - ComputedDao computedDao = mapper.computedDao(SESSION_RULE.keyspace()); - int key = keyProvider.incrementAndGet(); - - long time = System.currentTimeMillis() - 1000; - ComputedEntity entity = new ComputedEntity(key, 1, 2); - computedDao.saveWithTime(entity, 3600, time); - - CqlSession session = SESSION_RULE.session(); - - /* - * Query with the computed values included. - * - * Since the mapper expects the result name to match the property name, we used aliasing - * here. - * - * In the case of ttl(v), since we annotated ttl as @CqlName("myttl") we expect myttl to - * be the alias. - */ - ResultSet result = - session.execute( - SimpleStatement.newInstance( - "select id, c_id, v, writetime(v) as writetime, ttl(v) as myttl from " - + "computed_entity where " - + "id=? and " - + "c_id=? limit 1", - key, - 1)); - assertThat(result.getAvailableWithoutFetching()).isEqualTo(1); - - ComputedEntity retrievedValue = computedDao.get(result.one()); - assertThat(retrievedValue.getId()).isEqualTo(key); - assertThat(retrievedValue.getcId()).isEqualTo(1); - assertThat(retrievedValue.getV()).isEqualTo(2); - - // these should be set - assertThat(retrievedValue.getTtl()).isCloseTo(3600, offset(10)); - assertThat(retrievedValue.getWritetime()).isEqualTo(time); - } - - @Test - public void should_fail_if_alias_does_not_match_cqlName() { - ComputedDao computedDao = mapper.computedDao(SESSION_RULE.keyspace()); - int key = keyProvider.incrementAndGet(); - - long time = System.currentTimeMillis() - 1000; - ComputedEntity entity = new ComputedEntity(key, 1, 2); - computedDao.saveWithTime(entity, 3600, time); - - CqlSession session = SESSION_RULE.session(); - - /* - * Query with the computed values included. - * - * Since the mapper expects the result name to match the property name, we used aliasing - * here and used the wrong name for the alias 'notwritetime' which does not map to the cqlName. - */ - ResultSet result = - session.execute( - SimpleStatement.newInstance( - "select id, c_id, v, writetime(v) as notwritetime, ttl(v) as myttl from " - + "computed_entity where " - + "id=? and " - + "c_id=? limit 1", - key, - 1)); - - // should raise an exception as 'writetime' is not found in result set. - Throwable t = catchThrowable(() -> computedDao.get(result.one())); - - assertThat(t) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage("writetime is not a column in this row"); - } - - @Test - public void should_return_computed_values_in_query() { - ComputedDao computedDao = mapper.computedDao(SESSION_RULE.keyspace()); - int key = keyProvider.incrementAndGet(); - - long time = System.currentTimeMillis() - 1000; - ComputedEntity entity = new ComputedEntity(key, 1, 2); - computedDao.saveWithTime(entity, 3600, time); - - ComputedEntity retrievedValue = computedDao.findByIdQuery(key, 1); - assertThat(retrievedValue.getId()).isEqualTo(key); - assertThat(retrievedValue.getcId()).isEqualTo(1); - assertThat(retrievedValue.getV()).isEqualTo(2); - - // these should be set - assertThat(retrievedValue.getTtl()).isCloseTo(3600, offset(10)); - assertThat(retrievedValue.getWritetime()).isEqualTo(time); - } - - @Entity - public static class ComputedEntity { - - @PartitionKey private int id; - - @ClusteringColumn private int cId; - - private int v; - - @Computed("writetime(v)") - private long writetime; - - // use CqlName to ensure it is used for the alias. - @CqlName("myttl") - @Computed("ttl(v)") - private int ttl; - - public ComputedEntity() {} - - public ComputedEntity(int id, int cId, int v) { - this.id = id; - this.cId = cId; - this.v = v; - } - - public int getId() { - return id; - } - - public void setId(int id) { - this.id = id; - } - - public int getcId() { - return cId; - } - - public void setcId(int cId) { - this.cId = cId; - } - - public int getV() { - return v; - } - - public void setV(int v) { - this.v = v; - } - - public long getWritetime() { - return writetime; - } - - public void setWritetime(long writetime) { - this.writetime = writetime; - } - - public int getTtl() { - return ttl; - } - - public void setTtl(int ttl) { - this.ttl = ttl; - } - } - - @Dao - public interface ComputedDao { - @Select - ComputedEntity findById(int id, int cId); - - @Insert(nullSavingStrategy = NullSavingStrategy.SET_TO_NULL) - void save(ComputedEntity entity); - - @Insert( - ttl = ":ttl", - timestamp = ":writeTime", - nullSavingStrategy = NullSavingStrategy.SET_TO_NULL) - void saveWithTime(ComputedEntity entity, int ttl, long writeTime); - - @Delete - void delete(ComputedEntity entity); - - @SetEntity(nullSavingStrategy = NullSavingStrategy.SET_TO_NULL) - BoundStatementBuilder set(BoundStatementBuilder builder, ComputedEntity computedEntity); - - @GetEntity - ComputedEntity get(Row row); - - @Query( - value = - "select id, c_id, v, ttl(v) as myttl, writetime(v) as writetime from " - + "${qualifiedTableId} WHERE id = :id and " - + "c_id = :cId", - nullSavingStrategy = NullSavingStrategy.SET_TO_NULL) - ComputedEntity findByIdQuery(int id, int cId); - - @Update(nullSavingStrategy = NullSavingStrategy.SET_TO_NULL) - void update(ComputedEntity entity); - } - - @Mapper - public interface TestMapper { - @DaoFactory - ComputedDao computedDao(@DaoKeyspace CqlIdentifier keyspace); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/CustomResultTypeIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/CustomResultTypeIT.java deleted file mode 100644 index c218dcfcc86..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/CustomResultTypeIT.java +++ /dev/null @@ -1,172 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.mapper; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.mapper.MapperBuilder; -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; -import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; -import com.datastax.oss.driver.api.mapper.annotations.DefaultNullSavingStrategy; -import com.datastax.oss.driver.api.mapper.annotations.Delete; -import com.datastax.oss.driver.api.mapper.annotations.Insert; -import com.datastax.oss.driver.api.mapper.annotations.Mapper; -import com.datastax.oss.driver.api.mapper.annotations.Query; -import com.datastax.oss.driver.api.mapper.annotations.Select; -import com.datastax.oss.driver.api.mapper.annotations.Update; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.shaded.guava.common.util.concurrent.ListenableFuture; -import java.util.UUID; -import java.util.concurrent.ExecutionException; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -public class CustomResultTypeIT extends InventoryITBase { - - private static final CcmRule CCM_RULE = CcmRule.getInstance(); - - private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - private static ProductDao dao; - - @BeforeClass - public static void setup() { - CqlSession session = SESSION_RULE.session(); - - for (String query : createStatements(CCM_RULE)) { - session.execute( - SimpleStatement.builder(query).setExecutionProfile(SESSION_RULE.slowProfile()).build()); - } - - InventoryMapper mapper = InventoryMapper.builder(SESSION_RULE.session()).build(); - dao = mapper.productDao(SESSION_RULE.keyspace()); - } - - @Test - public void should_use_custom_result_for_insert_method() - throws ExecutionException, InterruptedException { - - ListenableFuture insertFuture = dao.insert(FLAMETHROWER); - insertFuture.get(); - - Row row = SESSION_RULE.session().execute("SELECT id FROM product").one(); - UUID insertedId = row.getUuid(0); - assertThat(insertedId).isEqualTo(FLAMETHROWER.getId()); - } - - @Test - public void should_use_custom_result_for_select_method() - throws ExecutionException, InterruptedException { - - dao.insert(FLAMETHROWER).get(); - - ListenableFuture selectFuture = dao.select(FLAMETHROWER.getId()); - Product selectedProduct = selectFuture.get(); - assertThat(selectedProduct).isEqualTo(FLAMETHROWER); - } - - @Test - public void should_use_custom_result_for_update_method() - throws ExecutionException, InterruptedException { - - dao.insert(FLAMETHROWER).get(); - - Product productToUpdate = dao.select(FLAMETHROWER.getId()).get(); - productToUpdate.setDescription("changed description"); - ListenableFuture updateFuture = dao.update(productToUpdate); - updateFuture.get(); - - Product selectedProduct = dao.select(FLAMETHROWER.getId()).get(); - assertThat(selectedProduct.getDescription()).isEqualTo("changed description"); - } - - @Test - public void should_use_custom_result_for_delete_method() - throws ExecutionException, InterruptedException { - dao.insert(FLAMETHROWER).get(); - - ListenableFuture deleteFuture = dao.delete(FLAMETHROWER); - deleteFuture.get(); - - Product selectedProduct = dao.select(FLAMETHROWER.getId()).get(); - assertThat(selectedProduct).isNull(); - } - - @Test - public void should_use_custom_result_for_query_method() - throws ExecutionException, InterruptedException { - dao.insert(FLAMETHROWER).get(); - - ListenableFuture deleteFuture = dao.deleteById(FLAMETHROWER.getId()); - deleteFuture.get(); - - Product selectedProduct = dao.select(FLAMETHROWER.getId()).get(); - assertThat(selectedProduct).isNull(); - } - - @DefaultNullSavingStrategy(NullSavingStrategy.SET_TO_NULL) - public interface ListenableFutureDao { - - @Select - ListenableFuture select(UUID id); - - @Update - ListenableFuture update(EntityT entity); - - @Insert - ListenableFuture insert(EntityT entity); - - @Delete - ListenableFuture delete(EntityT entity); - } - - @Dao - public interface ProductDao extends ListenableFutureDao { - - // We could do this easier with @Delete, but the goal here is to test @Query - @Query("DELETE FROM ${keyspaceId}.product WHERE id = :id") - ListenableFuture deleteById(UUID id); - } - - @Mapper - public interface InventoryMapper { - - @DaoFactory - ProductDao productDao(@DaoKeyspace CqlIdentifier keyspace); - - static MapperBuilder builder(CqlSession session) { - return new CustomResultTypeIT_InventoryMapperBuilder(session); - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/DefaultKeyspaceIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/DefaultKeyspaceIT.java deleted file mode 100644 index 30a808e87a9..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/DefaultKeyspaceIT.java +++ /dev/null @@ -1,430 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.mapper; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.BoundStatementBuilder; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.servererrors.InvalidQueryException; -import com.datastax.oss.driver.api.mapper.MapperBuilder; -import com.datastax.oss.driver.api.mapper.MapperException; -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; -import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; -import com.datastax.oss.driver.api.mapper.annotations.DaoTable; -import com.datastax.oss.driver.api.mapper.annotations.DefaultNullSavingStrategy; -import com.datastax.oss.driver.api.mapper.annotations.Entity; -import com.datastax.oss.driver.api.mapper.annotations.GetEntity; -import com.datastax.oss.driver.api.mapper.annotations.Mapper; -import com.datastax.oss.driver.api.mapper.annotations.PartitionKey; -import com.datastax.oss.driver.api.mapper.annotations.Select; -import com.datastax.oss.driver.api.mapper.annotations.SetEntity; -import com.datastax.oss.driver.api.mapper.annotations.Update; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import java.util.Objects; -import java.util.UUID; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -public class DefaultKeyspaceIT { - private static final String DEFAULT_KEYSPACE = "default_keyspace"; - private static final CcmRule CCM_RULE = CcmRule.getInstance(); - - private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - - private static final SessionRule SESSION_WITH_NO_KEYSPACE_RULE = - SessionRule.builder(CCM_RULE).withKeyspace(false).build(); - - @ClassRule - public static final TestRule chain = - RuleChain.outerRule(CCM_RULE).around(SESSION_RULE).around(SESSION_WITH_NO_KEYSPACE_RULE); - - private static InventoryMapper mapper; - - @BeforeClass - public static void setup() { - CqlSession session = SESSION_RULE.session(); - session.execute( - SimpleStatement.builder( - String.format( - "CREATE KEYSPACE IF NOT EXISTS %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}", - DEFAULT_KEYSPACE)) - .setExecutionProfile(SESSION_RULE.slowProfile()) - .build()); - - session.execute( - SimpleStatement.builder( - String.format( - "CREATE TABLE %s.product_simple_default_ks(id uuid PRIMARY KEY, description text)", - DEFAULT_KEYSPACE)) - .setExecutionProfile(SESSION_RULE.slowProfile()) - .build()); - - session.execute( - SimpleStatement.builder( - "CREATE TABLE product_simple_without_ks(id uuid PRIMARY KEY, description text)") - .setExecutionProfile(SESSION_RULE.slowProfile()) - .build()); - - session.execute( - SimpleStatement.builder( - "CREATE TABLE product_simple_default_ks(id uuid PRIMARY KEY, description text)") - .setExecutionProfile(SESSION_RULE.slowProfile()) - .build()); - - mapper = new DefaultKeyspaceIT_InventoryMapperBuilder(session).build(); - } - - @Test - public void should_insert_using_default_keyspace_on_entity_level() { - // Given - ProductSimpleDefaultKs product = new ProductSimpleDefaultKs(UUID.randomUUID(), "desc_1"); - ProductSimpleDaoDefaultKs dao = mapper.productDaoDefaultKs(); - assertThat(dao.findById(product.id)).isNull(); - - // When - dao.update(product); - - // Then - assertThat(dao.findById(product.id)).isEqualTo(product); - } - - @Test - public void should_fail_to_insert_if_default_ks_and_dao_ks_not_provided() { - // Given - assertThatThrownBy( - () -> { - InventoryMapperKsNotSet mapper = - new DefaultKeyspaceIT_InventoryMapperKsNotSetBuilder(SESSION_RULE.session()) - .withCustomState(MapperBuilder.SCHEMA_VALIDATION_ENABLED_SETTING, false) - .build(); - mapper.productDaoDefaultKsNotSet(); - }) - .isInstanceOf(InvalidQueryException.class); - // don't check the error message, as it's not consistent across Cassandra/DSE versions - } - - @Test - public void should_insert_without_ks_if_table_is_created_for_session_default_ks() { - // Given - ProductSimpleWithoutKs product = new ProductSimpleWithoutKs(UUID.randomUUID(), "desc_1"); - ProductSimpleDaoWithoutKs dao = mapper.productDaoWithoutKs(); - assertThat(dao.findById(product.id)).isNull(); - - // When - dao.update(product); - - // Then - assertThat(dao.findById(product.id)).isEqualTo(product); - } - - @Test - public void should_insert_preferring_dao_factory_ks_over_entity_default_ks() { - // Given - ProductSimpleDefaultKs product = new ProductSimpleDefaultKs(UUID.randomUUID(), "desc_1"); - ProductSimpleDaoDefaultKs dao = - mapper.productDaoEntityDefaultOverridden(SESSION_RULE.keyspace()); - assertThat(dao.findById(product.id)).isNull(); - - // When - dao.update(product); - - // Then - assertThat(dao.findById(product.id)).isEqualTo(product); - } - - @Test - public void should_fail_dao_initialization_if_keyspace_not_specified() { - // Given - assertThatThrownBy( - () -> { - // session has no keyspace - // dao has no keyspace - // entity has no keyspace - InventoryMapperKsNotSet mapper = - new DefaultKeyspaceIT_InventoryMapperKsNotSetBuilder( - SESSION_WITH_NO_KEYSPACE_RULE.session()) - .build(); - mapper.productDaoDefaultKsNotSet(); - }) - .isInstanceOf(MapperException.class) - .hasMessage( - "Missing keyspace. Suggestions: use SessionBuilder.withKeyspace() " - + "when creating your session, specify a default keyspace on " - + "ProductSimpleDefaultKsNotSet with @Entity(defaultKeyspace), or use a " - + "@DaoFactory method with a @DaoKeyspace parameter"); - } - - @Test - public void should_initialize_dao_if_keyspace_not_specified_but_not_needed() { - // session has no keyspace - // dao has no keyspace - // entity has no keyspace - // but dao methods don't require keyspace (GetEntity, SetEntity) - InventoryMapperKsNotSet mapper = - new DefaultKeyspaceIT_InventoryMapperKsNotSetBuilder( - SESSION_WITH_NO_KEYSPACE_RULE.session()) - .build(); - mapper.productDaoGetAndSetOnly(); - } - - @Test - public void should_initialize_dao_if_default_ks_provided() { - InventoryMapper mapper = - new DefaultKeyspaceIT_InventoryMapperBuilder(SESSION_WITH_NO_KEYSPACE_RULE.session()) - .build(); - // session has no keyspace, but entity does - mapper.productDaoDefaultKs(); - mapper.productDaoEntityDefaultOverridden(SESSION_RULE.keyspace()); - } - - @Test - public void should_initialize_dao_if_dao_ks_provided() { - InventoryMapperKsNotSet mapper = - new DefaultKeyspaceIT_InventoryMapperKsNotSetBuilder( - SESSION_WITH_NO_KEYSPACE_RULE.session()) - .build(); - // session has no keyspace, but dao has parameter - mapper.productDaoDefaultKsNotSetOverridden( - SESSION_RULE.keyspace(), CqlIdentifier.fromCql("product_simple_default_ks")); - } - - @Mapper - public interface InventoryMapper { - @DaoFactory - ProductSimpleDaoDefaultKs productDaoDefaultKs(); - - @DaoFactory - ProductSimpleDaoWithoutKs productDaoWithoutKs(); - - @DaoFactory - ProductSimpleDaoDefaultKs productDaoEntityDefaultOverridden( - @DaoKeyspace CqlIdentifier keyspace); - } - - @Mapper - public interface InventoryMapperKsNotSet { - - @DaoFactory - ProductSimpleDaoDefaultKsNotSet productDaoDefaultKsNotSet(); - - @DaoFactory - ProductSimpleDaoDefaultKsNotSet productDaoDefaultKsNotSetOverridden( - @DaoKeyspace CqlIdentifier keyspace, @DaoTable CqlIdentifier table); - - @DaoFactory - ProductSimpleDaoDefaultKsNotSetGetAndSetOnly productDaoGetAndSetOnly(); - } - - @DefaultNullSavingStrategy(NullSavingStrategy.SET_TO_NULL) - public interface BaseDao { - @Update - void update(T product); - - @Select - T findById(UUID productId); - } - - @Dao - public interface ProductSimpleDaoDefaultKs extends BaseDao {} - - @Dao - public interface ProductSimpleDaoWithoutKs extends BaseDao {} - - @Dao - public interface ProductSimpleDaoDefaultKsNotSet extends BaseDao {} - - @Dao - @DefaultNullSavingStrategy(NullSavingStrategy.SET_TO_NULL) - public interface ProductSimpleDaoDefaultKsNotSetGetAndSetOnly { - @SetEntity - void set(BoundStatementBuilder builder, ProductSimpleDefaultKsNotSet product); - - @GetEntity - ProductSimpleDefaultKsNotSet get(Row row); - } - - @Entity(defaultKeyspace = DEFAULT_KEYSPACE) - public static class ProductSimpleDefaultKs { - @PartitionKey private UUID id; - private String description; - - public ProductSimpleDefaultKs() {} - - public ProductSimpleDefaultKs(UUID id, String description) { - this.id = id; - this.description = description; - } - - public UUID getId() { - return id; - } - - public void setId(UUID id) { - this.id = id; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - @Override - public boolean equals(Object other) { - if (this == other) { - return true; - } else if (other instanceof ProductSimpleDefaultKs) { - ProductSimpleDefaultKs that = (ProductSimpleDefaultKs) other; - return Objects.equals(this.id, that.id) - && Objects.equals(this.description, that.description); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(id, description); - } - - @Override - public String toString() { - return "ProductSimple{" + "id=" + id + ", description='" + description + '\'' + '}'; - } - } - - @Entity - public static class ProductSimpleDefaultKsNotSet { - @PartitionKey private UUID id; - private String description; - - public ProductSimpleDefaultKsNotSet() {} - - public ProductSimpleDefaultKsNotSet(UUID id, String description) { - this.id = id; - this.description = description; - } - - public UUID getId() { - return id; - } - - public void setId(UUID id) { - this.id = id; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - @Override - public boolean equals(Object other) { - if (this == other) { - return true; - } else if (other instanceof ProductSimpleDefaultKsNotSet) { - ProductSimpleDefaultKsNotSet that = (ProductSimpleDefaultKsNotSet) other; - return Objects.equals(this.id, that.id) - && Objects.equals(this.description, that.description); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(id, description); - } - - @Override - public String toString() { - return "ProductSimple{" + "id=" + id + ", description='" + description + '\'' + '}'; - } - } - - @Entity - public static class ProductSimpleWithoutKs { - @PartitionKey private UUID id; - private String description; - - public ProductSimpleWithoutKs() {} - - public ProductSimpleWithoutKs(UUID id, String description) { - this.id = id; - this.description = description; - } - - public UUID getId() { - return id; - } - - public void setId(UUID id) { - this.id = id; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - @Override - public boolean equals(Object other) { - if (this == other) { - return true; - } else if (other instanceof ProductSimpleWithoutKs) { - ProductSimpleWithoutKs that = (ProductSimpleWithoutKs) other; - return Objects.equals(this.id, that.id) - && Objects.equals(this.description, that.description); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(id, description); - } - - @Override - public String toString() { - return "ProductSimple{" + "id=" + id + ", description='" + description + '\'' + '}'; - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/DefaultNullSavingStrategyIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/DefaultNullSavingStrategyIT.java deleted file mode 100644 index fc88b5bbf7f..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/DefaultNullSavingStrategyIT.java +++ /dev/null @@ -1,339 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.mapper; - -import static com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy.DO_NOT_SET; -import static com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy.SET_TO_NULL; -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.BoundStatementBuilder; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; -import com.datastax.oss.driver.api.mapper.annotations.DefaultNullSavingStrategy; -import com.datastax.oss.driver.api.mapper.annotations.Entity; -import com.datastax.oss.driver.api.mapper.annotations.Insert; -import com.datastax.oss.driver.api.mapper.annotations.Mapper; -import com.datastax.oss.driver.api.mapper.annotations.PartitionKey; -import com.datastax.oss.driver.api.mapper.annotations.Query; -import com.datastax.oss.driver.api.mapper.annotations.SetEntity; -import com.datastax.oss.driver.api.mapper.annotations.Update; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import java.util.function.BiConsumer; -import java.util.function.Consumer; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -/** - * Covers null saving strategy interaction between DAO method annotations and {@link - * DefaultNullSavingStrategy} annotation. - */ -@Category(ParallelizableTests.class) -@BackendRequirement( - type = BackendType.CASSANDRA, - minInclusive = "2.2", - description = "support for unset values") -public class DefaultNullSavingStrategyIT { - - private static final CcmRule CCM_RULE = CcmRule.getInstance(); - private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - private static TestMapper mapper; - private static PreparedStatement prepared; - - @BeforeClass - public static void createSchema() { - CqlSession session = SESSION_RULE.session(); - - session.execute( - SimpleStatement.builder("CREATE TABLE foo(k int PRIMARY KEY, v int)") - .setExecutionProfile(SESSION_RULE.slowProfile()) - .build()); - - mapper = new DefaultNullSavingStrategyIT_TestMapperBuilder(session).build(); - prepared = SESSION_RULE.session().prepare("INSERT INTO foo (k, v) values (:k, :v)"); - } - - @Test - public void should_respect_strategy_inheritance_rules() { - DaoWithNoStrategy daoWithNoStrategy = mapper.daoWithNoStrategy(); - DaoWithDoNotSet daoWithDoNotSet = mapper.daoWithDoNotSet(); - DaoWithSetToNull daoWithSetToNull = mapper.daoWithSetToNull(); - - assertStrategy(daoWithNoStrategy::queryWithNoStrategy, DO_NOT_SET); - assertStrategy(daoWithNoStrategy::queryWithDoNotSet, DO_NOT_SET); - assertStrategy(daoWithNoStrategy::queryWithSetToNull, SET_TO_NULL); - assertStrategy(daoWithNoStrategy::insertWithNoStrategy, DO_NOT_SET); - assertStrategy(daoWithNoStrategy::insertWithDoNotSet, DO_NOT_SET); - assertStrategy(daoWithNoStrategy::insertWithSetToNull, SET_TO_NULL); - assertStrategy(daoWithNoStrategy::updateWithNoStrategy, DO_NOT_SET); - assertStrategy(daoWithNoStrategy::updateWithDoNotSet, DO_NOT_SET); - assertStrategy(daoWithNoStrategy::updateWithSetToNull, SET_TO_NULL); - assertSetEntityStrategy(daoWithNoStrategy::setWithNoStrategy, DO_NOT_SET); - assertSetEntityStrategy(daoWithNoStrategy::setWithDoNotSet, DO_NOT_SET); - assertSetEntityStrategy(daoWithNoStrategy::setWithSetToNull, SET_TO_NULL); - - assertStrategy(daoWithDoNotSet::queryWithNoStrategy, DO_NOT_SET); - assertStrategy(daoWithDoNotSet::queryWithDoNotSet, DO_NOT_SET); - assertStrategy(daoWithDoNotSet::queryWithSetToNull, SET_TO_NULL); - assertStrategy(daoWithDoNotSet::insertWithNoStrategy, DO_NOT_SET); - assertStrategy(daoWithDoNotSet::insertWithDoNotSet, DO_NOT_SET); - assertStrategy(daoWithDoNotSet::insertWithSetToNull, SET_TO_NULL); - assertStrategy(daoWithDoNotSet::updateWithNoStrategy, DO_NOT_SET); - assertStrategy(daoWithDoNotSet::updateWithDoNotSet, DO_NOT_SET); - assertStrategy(daoWithDoNotSet::updateWithSetToNull, SET_TO_NULL); - assertSetEntityStrategy(daoWithDoNotSet::setWithNoStrategy, DO_NOT_SET); - assertSetEntityStrategy(daoWithDoNotSet::setWithDoNotSet, DO_NOT_SET); - assertSetEntityStrategy(daoWithDoNotSet::setWithSetToNull, SET_TO_NULL); - - assertStrategy(daoWithSetToNull::queryWithNoStrategy, SET_TO_NULL); - assertStrategy(daoWithSetToNull::queryWithDoNotSet, DO_NOT_SET); - assertStrategy(daoWithSetToNull::queryWithSetToNull, SET_TO_NULL); - assertStrategy(daoWithSetToNull::insertWithNoStrategy, SET_TO_NULL); - assertStrategy(daoWithSetToNull::insertWithDoNotSet, DO_NOT_SET); - assertStrategy(daoWithSetToNull::insertWithSetToNull, SET_TO_NULL); - assertStrategy(daoWithSetToNull::updateWithNoStrategy, SET_TO_NULL); - assertStrategy(daoWithSetToNull::updateWithDoNotSet, DO_NOT_SET); - assertStrategy(daoWithSetToNull::updateWithSetToNull, SET_TO_NULL); - assertSetEntityStrategy(daoWithSetToNull::setWithNoStrategy, SET_TO_NULL); - assertSetEntityStrategy(daoWithSetToNull::setWithDoNotSet, DO_NOT_SET); - assertSetEntityStrategy(daoWithSetToNull::setWithSetToNull, SET_TO_NULL); - } - - private void assertStrategy( - BiConsumer daoMethod, NullSavingStrategy expectedStrategy) { - reset(); - daoMethod.accept(1, null); - validateData(expectedStrategy); - } - - private void assertStrategy(Consumer daoMethod, NullSavingStrategy expectedStrategy) { - reset(); - Foo foo = new Foo(1, null); - daoMethod.accept(foo); - validateData(expectedStrategy); - } - - private void assertSetEntityStrategy( - BiConsumer daoMethod, NullSavingStrategy expectedStrategy) { - reset(); - Foo foo = new Foo(1, null); - BoundStatementBuilder builder = prepared.boundStatementBuilder(); - daoMethod.accept(builder, foo); - SESSION_RULE.session().execute(builder.build()); - validateData(expectedStrategy); - } - - private void reset() { - CqlSession session = SESSION_RULE.session(); - session.execute("INSERT INTO foo (k, v) VALUES (1, 1)"); - } - - private void validateData(NullSavingStrategy expectedStrategy) { - CqlSession session = SESSION_RULE.session(); - Row row = session.execute("SELECT v FROM foo WHERE k = 1").one(); - switch (expectedStrategy) { - case DO_NOT_SET: - assertThat(row.getInt("v")).isEqualTo(1); - break; - case SET_TO_NULL: - assertThat(row.isNull("v")).isTrue(); - break; - default: - throw new AssertionError("unhandled strategy " + expectedStrategy); - } - } - - @Dao - public interface DaoWithNoStrategy { - @Query("INSERT INTO foo (k, v) values (:k, :v)") - void queryWithNoStrategy(Integer k, Integer v); - - @Query(value = "INSERT INTO foo (k, v) values (:k, :v)", nullSavingStrategy = SET_TO_NULL) - void queryWithSetToNull(Integer k, Integer v); - - @Query(value = "INSERT INTO foo (k, v) values (:k, :v)", nullSavingStrategy = DO_NOT_SET) - void queryWithDoNotSet(Integer k, Integer v); - - @Insert - void insertWithNoStrategy(Foo foo); - - @Insert(nullSavingStrategy = NullSavingStrategy.SET_TO_NULL) - void insertWithSetToNull(Foo foo); - - @Insert(nullSavingStrategy = NullSavingStrategy.DO_NOT_SET) - void insertWithDoNotSet(Foo foo); - - @Update - void updateWithNoStrategy(Foo foo); - - @Update(nullSavingStrategy = NullSavingStrategy.SET_TO_NULL) - void updateWithSetToNull(Foo foo); - - @Update(nullSavingStrategy = NullSavingStrategy.DO_NOT_SET) - void updateWithDoNotSet(Foo foo); - - @SetEntity - void setWithNoStrategy(BoundStatementBuilder builder, Foo foo); - - @SetEntity(nullSavingStrategy = NullSavingStrategy.SET_TO_NULL) - void setWithSetToNull(BoundStatementBuilder builder, Foo foo); - - @SetEntity(nullSavingStrategy = NullSavingStrategy.DO_NOT_SET) - void setWithDoNotSet(BoundStatementBuilder builder, Foo foo); - } - - @Dao - @DefaultNullSavingStrategy(SET_TO_NULL) - public interface DaoWithSetToNull { - @Query("INSERT INTO foo (k, v) values (:k, :v)") - void queryWithNoStrategy(Integer k, Integer v); - - @Query(value = "INSERT INTO foo (k, v) values (:k, :v)", nullSavingStrategy = SET_TO_NULL) - void queryWithSetToNull(Integer k, Integer v); - - @Query(value = "INSERT INTO foo (k, v) values (:k, :v)", nullSavingStrategy = DO_NOT_SET) - void queryWithDoNotSet(Integer k, Integer v); - - @Insert - void insertWithNoStrategy(Foo foo); - - @Insert(nullSavingStrategy = NullSavingStrategy.SET_TO_NULL) - void insertWithSetToNull(Foo foo); - - @Insert(nullSavingStrategy = NullSavingStrategy.DO_NOT_SET) - void insertWithDoNotSet(Foo foo); - - @Update - void updateWithNoStrategy(Foo foo); - - @Update(nullSavingStrategy = NullSavingStrategy.SET_TO_NULL) - void updateWithSetToNull(Foo foo); - - @Update(nullSavingStrategy = NullSavingStrategy.DO_NOT_SET) - void updateWithDoNotSet(Foo foo); - - @SetEntity - void setWithNoStrategy(BoundStatementBuilder builder, Foo foo); - - @SetEntity(nullSavingStrategy = NullSavingStrategy.SET_TO_NULL) - void setWithSetToNull(BoundStatementBuilder builder, Foo foo); - - @SetEntity(nullSavingStrategy = NullSavingStrategy.DO_NOT_SET) - void setWithDoNotSet(BoundStatementBuilder builder, Foo foo); - } - - @Dao - @DefaultNullSavingStrategy(DO_NOT_SET) - public interface DaoWithDoNotSet { - @Query("INSERT INTO foo (k, v) values (:k, :v)") - void queryWithNoStrategy(Integer k, Integer v); - - @Query(value = "INSERT INTO foo (k, v) values (:k, :v)", nullSavingStrategy = SET_TO_NULL) - void queryWithSetToNull(Integer k, Integer v); - - @Query(value = "INSERT INTO foo (k, v) values (:k, :v)", nullSavingStrategy = DO_NOT_SET) - void queryWithDoNotSet(Integer k, Integer v); - - @Insert - void insertWithNoStrategy(Foo foo); - - @Insert(nullSavingStrategy = NullSavingStrategy.SET_TO_NULL) - void insertWithSetToNull(Foo foo); - - @Insert(nullSavingStrategy = NullSavingStrategy.DO_NOT_SET) - void insertWithDoNotSet(Foo foo); - - @Update - void updateWithNoStrategy(Foo foo); - - @Update(nullSavingStrategy = NullSavingStrategy.SET_TO_NULL) - void updateWithSetToNull(Foo foo); - - @Update(nullSavingStrategy = NullSavingStrategy.DO_NOT_SET) - void updateWithDoNotSet(Foo foo); - - @SetEntity - void setWithNoStrategy(BoundStatementBuilder builder, Foo foo); - - @SetEntity(nullSavingStrategy = NullSavingStrategy.SET_TO_NULL) - void setWithSetToNull(BoundStatementBuilder builder, Foo foo); - - @SetEntity(nullSavingStrategy = NullSavingStrategy.DO_NOT_SET) - void setWithDoNotSet(BoundStatementBuilder builder, Foo foo); - } - - @Mapper - public interface TestMapper { - @DaoFactory - DaoWithNoStrategy daoWithNoStrategy(); - - @DaoFactory - DaoWithSetToNull daoWithSetToNull(); - - @DaoFactory - DaoWithDoNotSet daoWithDoNotSet(); - } - - @Entity - public static class Foo { - @PartitionKey private int k; - private Integer v; - - public Foo() { - this.k = 0; - this.v = null; - } - - public Foo(int k, Integer v) { - this.k = k; - this.v = v; - } - - public int getK() { - return k; - } - - public void setK(int k) { - this.k = k; - } - - public Integer getV() { - return v; - } - - public void setV(Integer v) { - this.v = v; - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/DeleteIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/DeleteIT.java deleted file mode 100644 index 03e3597501c..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/DeleteIT.java +++ /dev/null @@ -1,520 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.mapper; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.PagingIterable; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.uuid.Uuids; -import com.datastax.oss.driver.api.mapper.annotations.CqlName; -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; -import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; -import com.datastax.oss.driver.api.mapper.annotations.DefaultNullSavingStrategy; -import com.datastax.oss.driver.api.mapper.annotations.Delete; -import com.datastax.oss.driver.api.mapper.annotations.Insert; -import com.datastax.oss.driver.api.mapper.annotations.Mapper; -import com.datastax.oss.driver.api.mapper.annotations.Select; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import java.util.UUID; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -// Do not run LWT tests in parallel because they may interfere. Tests operate on the same row. -@BackendRequirement( - type = BackendType.CASSANDRA, - minInclusive = "3.0", - description = ">= in WHERE clause not supported in legacy versions") -public class DeleteIT extends InventoryITBase { - - private static CustomCcmRule CCM_RULE = CustomCcmRule.builder().build(); - - private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - private static ProductDao dao; - - private static ProductSaleDao saleDao; - - @BeforeClass - public static void setup() { - CqlSession session = SESSION_RULE.session(); - - for (String query : createStatements(CCM_RULE)) { - session.execute( - SimpleStatement.builder(query).setExecutionProfile(SESSION_RULE.slowProfile()).build()); - } - - InventoryMapper inventoryMapper = new DeleteIT_InventoryMapperBuilder(session).build(); - dao = inventoryMapper.productDao(SESSION_RULE.keyspace()); - saleDao = inventoryMapper.productSaleDao(SESSION_RULE.keyspace()); - } - - @Before - public void insertFixtures() { - dao.save(FLAMETHROWER); - - saleDao.save(FLAMETHROWER_SALE_1); - saleDao.save(FLAMETHROWER_SALE_2); - saleDao.save(FLAMETHROWER_SALE_3); - saleDao.save(FLAMETHROWER_SALE_4); - saleDao.save(FLAMETHROWER_SALE_5); - saleDao.save(MP3_DOWNLOAD_SALE_1); - } - - @Test - public void should_delete_entity() { - UUID id = FLAMETHROWER.getId(); - assertThat(dao.findById(id)).isNotNull(); - - dao.delete(FLAMETHROWER); - assertThat(dao.findById(id)).isNull(); - } - - @Test - public void should_delete_entity_asynchronously() { - UUID id = FLAMETHROWER.getId(); - assertThat(dao.findById(id)).isNotNull(); - - CompletableFutures.getUninterruptibly(dao.deleteAsync(FLAMETHROWER)); - assertThat(dao.findById(id)).isNull(); - } - - @Test - public void should_delete_by_id() { - UUID id = FLAMETHROWER.getId(); - assertThat(dao.findById(id)).isNotNull(); - - dao.deleteById(id); - assertThat(dao.findById(id)).isNull(); - - // Non-existing id should be silently ignored - dao.deleteById(id); - } - - @Test - public void should_delete_by_id_asynchronously() { - UUID id = FLAMETHROWER.getId(); - assertThat(dao.findById(id)).isNotNull(); - - CompletableFutures.getUninterruptibly(dao.deleteAsyncById(id)); - assertThat(dao.findById(id)).isNull(); - - // Non-existing id should be silently ignored - CompletableFutures.getUninterruptibly(dao.deleteAsyncById(id)); - } - - @Test - public void should_delete_if_exists() { - UUID id = FLAMETHROWER.getId(); - assertThat(dao.findById(id)).isNotNull(); - - assertThat(dao.deleteIfExists(FLAMETHROWER)).isTrue(); - assertThat(dao.findById(id)).isNull(); - - assertThat(dao.deleteIfExists(FLAMETHROWER)).isFalse(); - } - - @Test - public void should_delete_if_exists_asynchronously() { - UUID id = FLAMETHROWER.getId(); - assertThat(dao.findById(id)).isNotNull(); - - assertThat(CompletableFutures.getUninterruptibly(dao.deleteAsyncIfExists(FLAMETHROWER))) - .isTrue(); - assertThat(dao.findById(id)).isNull(); - - assertThat(CompletableFutures.getUninterruptibly(dao.deleteAsyncIfExists(FLAMETHROWER))) - .isFalse(); - } - - @Test - public void should_delete_with_condition() { - UUID id = FLAMETHROWER.getId(); - assertThat(dao.findById(id)).isNotNull(); - - ResultSet rs = dao.deleteIfDescriptionMatches(id, "foo"); - assertThat(rs.wasApplied()).isFalse(); - assertThat(rs.one().getString("description")).isEqualTo(FLAMETHROWER.getDescription()); - - rs = dao.deleteIfDescriptionMatches(id, FLAMETHROWER.getDescription()); - assertThat(rs.wasApplied()).isTrue(); - assertThat(dao.findById(id)).isNull(); - } - - @Test - public void should_delete_with_condition_statement() { - UUID id = FLAMETHROWER.getId(); - assertThat(dao.findById(id)).isNotNull(); - - BoundStatement bs = dao.deleteIfDescriptionMatchesStatement(id, "foo"); - ResultSet rs = SESSION_RULE.session().execute(bs); - assertThat(rs.wasApplied()).isFalse(); - assertThat(rs.one().getString("description")).isEqualTo(FLAMETHROWER.getDescription()); - - rs = dao.deleteIfDescriptionMatches(id, FLAMETHROWER.getDescription()); - assertThat(rs.wasApplied()).isTrue(); - assertThat(dao.findById(id)).isNull(); - } - - @Test - public void should_delete_with_condition_asynchronously() { - UUID id = FLAMETHROWER.getId(); - assertThat(dao.findById(id)).isNotNull(); - - AsyncResultSet rs = - CompletableFutures.getUninterruptibly(dao.deleteAsyncIfDescriptionMatches(id, "foo")); - assertThat(rs.wasApplied()).isFalse(); - assertThat(rs.one().getString("description")).isEqualTo(FLAMETHROWER.getDescription()); - - rs = - CompletableFutures.getUninterruptibly( - dao.deleteAsyncIfDescriptionMatches(id, FLAMETHROWER.getDescription())); - assertThat(rs.wasApplied()).isTrue(); - assertThat(dao.findById(id)).isNull(); - } - - @Test - public void should_delete_by_partition_key() { - // should delete FLAMETHROWER_SALE_[1-4] - saleDao.deleteByIdForDay(FLAMETHROWER.getId(), DATE_1); - assertThat(saleDao.all().all()).containsOnly(FLAMETHROWER_SALE_5, MP3_DOWNLOAD_SALE_1); - } - - @Test - public void should_delete_by_partition_key_statement() { - // should delete FLAMETHROWER_SALE_[1-4] - SESSION_RULE.session().execute(saleDao.deleteByIdForDayStatement(FLAMETHROWER.getId(), DATE_1)); - assertThat(saleDao.all().all()).containsOnly(FLAMETHROWER_SALE_5, MP3_DOWNLOAD_SALE_1); - } - - @Test - public void should_delete_by_partition_key_and_partial_clustering() { - // should delete FLAMETHROWER_SALE_{1,3,4] - saleDao.deleteByIdForCustomer(FLAMETHROWER.getId(), DATE_1, 1); - assertThat(saleDao.all().all()) - .containsOnly(FLAMETHROWER_SALE_2, FLAMETHROWER_SALE_5, MP3_DOWNLOAD_SALE_1); - } - - @Test - public void should_delete_by_partition_key_and_partial_clustering_statement() { - // should delete FLAMETHROWER_SALE_{1,3,4] - SESSION_RULE - .session() - .execute(saleDao.deleteByIdForCustomerStatement(FLAMETHROWER.getId(), DATE_1, 1)); - assertThat(saleDao.all().all()) - .containsOnly(FLAMETHROWER_SALE_2, FLAMETHROWER_SALE_5, MP3_DOWNLOAD_SALE_1); - } - - @Test - public void should_delete_by_primary_key_sales() { - // should delete FLAMETHROWER_SALE_2 - saleDao.deleteByIdForCustomerAtTime( - FLAMETHROWER.getId(), DATE_1, 2, FLAMETHROWER_SALE_2.getTs()); - assertThat(saleDao.all().all()) - .containsOnly( - FLAMETHROWER_SALE_1, - FLAMETHROWER_SALE_3, - FLAMETHROWER_SALE_4, - FLAMETHROWER_SALE_5, - MP3_DOWNLOAD_SALE_1); - } - - @Test - public void should_delete_by_primary_key_sales_statement() { - // should delete FLAMETHROWER_SALE_2 - SESSION_RULE - .session() - .execute( - saleDao.deleteByIdForCustomerAtTimeStatement( - FLAMETHROWER.getId(), DATE_1, 2, FLAMETHROWER_SALE_2.getTs())); - assertThat(saleDao.all().all()) - .containsOnly( - FLAMETHROWER_SALE_1, - FLAMETHROWER_SALE_3, - FLAMETHROWER_SALE_4, - FLAMETHROWER_SALE_5, - MP3_DOWNLOAD_SALE_1); - } - - @Test - public void should_delete_if_price_matches() { - ResultSet result = - saleDao.deleteIfPriceMatches( - FLAMETHROWER.getId(), DATE_1, 2, FLAMETHROWER_SALE_2.getTs(), 250.0); - - assertThat(result.wasApplied()).isFalse(); - Row row = result.one(); - assertThat(row).isNotNull(); - assertThat(row.getDouble("price")).isEqualTo(500.0); - - result = - saleDao.deleteIfPriceMatches( - FLAMETHROWER.getId(), DATE_1, 2, FLAMETHROWER_SALE_2.getTs(), 500.0); - - assertThat(result.wasApplied()).isTrue(); - } - - @Test - public void should_delete_if_price_matchesStatement() { - BoundStatement bs = - saleDao.deleteIfPriceMatchesStatement( - FLAMETHROWER.getId(), DATE_1, 2, FLAMETHROWER_SALE_2.getTs(), 250.0); - ResultSet result = SESSION_RULE.session().execute(bs); - - assertThat(result.wasApplied()).isFalse(); - Row row = result.one(); - assertThat(row).isNotNull(); - assertThat(row.getDouble("price")).isEqualTo(500.0); - - bs = - saleDao.deleteIfPriceMatchesStatement( - FLAMETHROWER.getId(), DATE_1, 2, FLAMETHROWER_SALE_2.getTs(), 500.0); - result = SESSION_RULE.session().execute(bs); - - assertThat(result.wasApplied()).isTrue(); - } - - @Test - public void should_delete_if_exists_sales() { - assertThat(saleDao.deleteIfExists(FLAMETHROWER.getId(), DATE_1, 2, FLAMETHROWER_SALE_2.getTs())) - .isTrue(); - - assertThat(saleDao.deleteIfExists(FLAMETHROWER.getId(), DATE_1, 2, FLAMETHROWER_SALE_2.getTs())) - .isFalse(); - } - - @Test - public void should_delete_within_time_range() { - // should delete FLAMETHROWER_SALE_{1,3}, but not 4 because range ends before - saleDao.deleteInTimeRange( - FLAMETHROWER.getId(), - DATE_1, - 1, - FLAMETHROWER_SALE_1.getTs(), - Uuids.startOf(Uuids.unixTimestamp(FLAMETHROWER_SALE_4.getTs()) - 1000)); - - assertThat(saleDao.all().all()) - .containsOnly( - FLAMETHROWER_SALE_2, FLAMETHROWER_SALE_4, FLAMETHROWER_SALE_5, MP3_DOWNLOAD_SALE_1); - } - - @Test - public void should_delete_within_time_range_statement() { - // should delete FLAMETHROWER_SALE_{1,3}, but not 4 because range ends before - SESSION_RULE - .session() - .execute( - saleDao.deleteInTimeRangeStatement( - FLAMETHROWER.getId(), - DATE_1, - 1, - FLAMETHROWER_SALE_1.getTs(), - Uuids.startOf(Uuids.unixTimestamp(FLAMETHROWER_SALE_4.getTs()) - 1000))); - - assertThat(saleDao.all().all()) - .containsOnly( - FLAMETHROWER_SALE_2, FLAMETHROWER_SALE_4, FLAMETHROWER_SALE_5, MP3_DOWNLOAD_SALE_1); - } - - @Test - public void should_delete_if_price_matches_custom_where() { - ResultSet result = - saleDao.deleteCustomWhereCustomIf( - 2, FLAMETHROWER.getId(), DATE_1, FLAMETHROWER_SALE_2.getTs(), 250.0); - - assertThat(result.wasApplied()).isFalse(); - Row row = result.one(); - assertThat(row).isNotNull(); - assertThat(row.getDouble("price")).isEqualTo(500.0); - - result = - saleDao.deleteCustomWhereCustomIf( - 2, FLAMETHROWER.getId(), DATE_1, FLAMETHROWER_SALE_2.getTs(), 500.0); - - assertThat(result.wasApplied()).isTrue(); - } - - @Test - public void should_delete_if_price_matches_custom_where_statement() { - BoundStatement bs = - saleDao.deleteCustomWhereCustomIfStatement( - 2, FLAMETHROWER.getId(), DATE_1, FLAMETHROWER_SALE_2.getTs(), 250.0); - ResultSet result = SESSION_RULE.session().execute(bs); - - assertThat(result.wasApplied()).isFalse(); - Row row = result.one(); - assertThat(row).isNotNull(); - assertThat(row.getDouble("price")).isEqualTo(500.0); - - bs = - saleDao.deleteCustomWhereCustomIfStatement( - 2, FLAMETHROWER.getId(), DATE_1, FLAMETHROWER_SALE_2.getTs(), 500.0); - result = SESSION_RULE.session().execute(bs); - - assertThat(result.wasApplied()).isTrue(); - } - - @Mapper - public interface InventoryMapper { - @DaoFactory - ProductDao productDao(@DaoKeyspace CqlIdentifier keyspace); - - @DaoFactory - ProductSaleDao productSaleDao(@DaoKeyspace CqlIdentifier keyspace); - } - - @Dao - @DefaultNullSavingStrategy(NullSavingStrategy.SET_TO_NULL) - public interface ProductDao { - - @Delete - void delete(Product product); - - @Delete(entityClass = Product.class) - void deleteById(UUID productId); - - @Delete(ifExists = true) - boolean deleteIfExists(Product product); - - @Delete(entityClass = Product.class, customIfClause = "description = :expectedDescription") - ResultSet deleteIfDescriptionMatches(UUID productId, String expectedDescription); - - @Delete(entityClass = Product.class, customIfClause = "description = :expectedDescription") - BoundStatement deleteIfDescriptionMatchesStatement(UUID productId, String expectedDescription); - - @Delete - CompletionStage deleteAsync(Product product); - - @Delete(entityClass = Product.class) - CompletableFuture deleteAsyncById(UUID productId); - - @Delete(ifExists = true) - CompletableFuture deleteAsyncIfExists(Product product); - - @Delete(entityClass = Product.class, customIfClause = "description = :\"ExpectedDescription\"") - CompletableFuture deleteAsyncIfDescriptionMatches( - UUID productId, @CqlName("\"ExpectedDescription\"") String expectedDescription); - - @Select - Product findById(UUID productId); - - @Insert - void save(Product product); - } - - @Dao - @DefaultNullSavingStrategy(NullSavingStrategy.SET_TO_NULL) - public interface ProductSaleDao { - @Delete - void delete(ProductSale product); - - // delete all rows in partition - @Delete(entityClass = ProductSale.class) - ResultSet deleteByIdForDay(UUID id, String day); - - // delete all rows in partition - @Delete(entityClass = ProductSale.class) - BoundStatement deleteByIdForDayStatement(UUID id, String day); - - // delete by partition key and partial clustering key - @Delete(entityClass = ProductSale.class) - ResultSet deleteByIdForCustomer(UUID id, String day, int customerId); - - // delete by partition key and partial clustering key - @Delete(entityClass = ProductSale.class) - BoundStatement deleteByIdForCustomerStatement(UUID id, String day, int customerId); - - // delete row (full primary key) - @Delete(entityClass = ProductSale.class) - ResultSet deleteByIdForCustomerAtTime(UUID id, String day, int customerId, UUID ts); - - // delete row (full primary key) - @Delete(entityClass = ProductSale.class) - BoundStatement deleteByIdForCustomerAtTimeStatement( - UUID id, String day, int customerId, UUID ts); - - @Delete(entityClass = ProductSale.class, customIfClause = "price = :expectedPrice") - ResultSet deleteIfPriceMatches( - UUID id, String day, int customerId, UUID ts, double expectedPrice); - - @Delete(entityClass = ProductSale.class, customIfClause = "price = :expectedPrice") - BoundStatement deleteIfPriceMatchesStatement( - UUID id, String day, int customerId, UUID ts, double expectedPrice); - - @Delete( - entityClass = ProductSale.class, - customWhereClause = - "id = :id and day = :day and customer_id = :customerId and ts >= :startTs and ts < " - + ":endTs") - ResultSet deleteInTimeRange(UUID id, String day, int customerId, UUID startTs, UUID endTs); - - @Delete( - entityClass = ProductSale.class, - customWhereClause = - "id = :id and day = :day and customer_id = :customerId and ts >= :startTs and ts < " - + ":endTs") - BoundStatement deleteInTimeRangeStatement( - UUID id, String day, int customerId, UUID startTs, UUID endTs); - - // transpose order of parameters so doesn't match primary key to ensure that works. - @Delete( - entityClass = ProductSale.class, - customWhereClause = "id = :id and day = :day and customer_id = :customerId and ts = :ts", - customIfClause = "price = :expectedPrice") - ResultSet deleteCustomWhereCustomIf( - int customerId, UUID id, String day, UUID ts, double expectedPrice); - - // transpose order of parameters so doesn't match primary key to ensure that works. - @Delete( - entityClass = ProductSale.class, - customWhereClause = "id = :id and day = :day and customer_id = :customerId and ts = :ts", - customIfClause = "price = :expectedPrice") - BoundStatement deleteCustomWhereCustomIfStatement( - int customerId, UUID id, String day, UUID ts, double expectedPrice); - - @Delete(entityClass = ProductSale.class, ifExists = true) - boolean deleteIfExists(UUID id, String day, int customerId, UUID ts); - - @Select - PagingIterable all(); - - @Insert - void save(ProductSale sale); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/DeleteReactiveIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/DeleteReactiveIT.java deleted file mode 100644 index 2eb898021ba..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/DeleteReactiveIT.java +++ /dev/null @@ -1,192 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.mapper; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveResultSet; -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveRow; -import com.datastax.dse.driver.api.mapper.reactive.MappedReactiveResultSet; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; -import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; -import com.datastax.oss.driver.api.mapper.annotations.DefaultNullSavingStrategy; -import com.datastax.oss.driver.api.mapper.annotations.Delete; -import com.datastax.oss.driver.api.mapper.annotations.Insert; -import com.datastax.oss.driver.api.mapper.annotations.Mapper; -import com.datastax.oss.driver.api.mapper.annotations.Select; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import com.datastax.oss.driver.api.testinfra.ccm.CcmBridge; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import io.reactivex.Flowable; -import java.util.UUID; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -// Do not run LWT tests in parallel because they may interfere. Tests operate on the same row. -public class DeleteReactiveIT extends InventoryITBase { - - private static CustomCcmRule ccmRule = configureCcm(CustomCcmRule.builder()).build(); - - private static SessionRule sessionRule = SessionRule.builder(ccmRule).build(); - - @ClassRule public static TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); - - private static CustomCcmRule.Builder configureCcm(CustomCcmRule.Builder builder) { - if (!CcmBridge.isDistributionOf( - BackendType.DSE, (dist, cass) -> cass.nextStable().compareTo(Version.V4_0_0) >= 0)) { - builder.withCassandraConfiguration("enable_sasi_indexes", true); - } - return builder; - } - - private static DseProductDao dao; - - @BeforeClass - public static void setup() { - CqlSession session = sessionRule.session(); - - for (String query : createStatements(ccmRule)) { - session.execute( - SimpleStatement.builder(query).setExecutionProfile(sessionRule.slowProfile()).build()); - } - - DseInventoryMapper inventoryMapper = - new DeleteReactiveIT_DseInventoryMapperBuilder(session).build(); - dao = inventoryMapper.productDao(sessionRule.keyspace()); - } - - @Before - public void insertFixtures() { - Flowable.fromPublisher(dao.saveReactive(FLAMETHROWER)).blockingSubscribe(); - } - - @Test - public void should_delete_entity_reactive() { - UUID id = FLAMETHROWER.getId(); - assertThat(Flowable.fromPublisher(dao.findByIdReactive(id)).blockingSingle()).isNotNull(); - - ReactiveResultSet rs = dao.deleteEntityReactive(FLAMETHROWER); - ReactiveRow row = Flowable.fromPublisher(rs).singleElement().blockingGet(); - - assertThat(row).isNull(); - assertThat(Flowable.fromPublisher(dao.findByIdReactive(id)).singleElement().blockingGet()) - .isNull(); - } - - @Test - public void should_delete_by_id_reactive() { - UUID id = FLAMETHROWER.getId(); - assertThat(Flowable.fromPublisher(dao.findByIdReactive(id)).blockingSingle()).isNotNull(); - - ReactiveResultSet rs = dao.deleteByIdReactive(id); - ReactiveRow row = Flowable.fromPublisher(rs).singleElement().blockingGet(); - - assertThat(row).isNull(); - assertThat(Flowable.fromPublisher(dao.findByIdReactive(id)).singleElement().blockingGet()) - .isNull(); - - // Non-existing id should be silently ignored - rs = dao.deleteByIdReactive(id); - row = Flowable.fromPublisher(rs).singleElement().blockingGet(); - - assertThat(row).isNull(); - } - - @Test - public void should_delete_if_exists_reactive() { - UUID id = FLAMETHROWER.getId(); - assertThat(Flowable.fromPublisher(dao.findByIdReactive(id)).blockingSingle()).isNotNull(); - { - ReactiveResultSet rs = dao.deleteIfExistsReactive(FLAMETHROWER); - ReactiveRow row = Flowable.fromPublisher(rs).blockingSingle(); - assertThat(row.wasApplied()).isTrue(); - assertThat(Flowable.fromPublisher(rs.wasApplied()).blockingSingle()).isTrue(); - } - assertThat(Flowable.fromPublisher(dao.findByIdReactive(id)).singleElement().blockingGet()) - .isNull(); - { - ReactiveResultSet rs = dao.deleteIfExistsReactive(FLAMETHROWER); - ReactiveRow row = Flowable.fromPublisher(rs).singleElement().blockingGet(); - assertThat(row.wasApplied()).isFalse(); - assertThat(Flowable.fromPublisher(rs.wasApplied()).blockingSingle()).isFalse(); - } - } - - @Test - public void should_delete_with_condition_reactive() { - UUID id = FLAMETHROWER.getId(); - assertThat(Flowable.fromPublisher(dao.findByIdReactive(id)).blockingSingle()).isNotNull(); - { - ReactiveResultSet rs = dao.deleteIfDescriptionMatchesReactive(id, "foo"); - ReactiveRow row = Flowable.fromPublisher(rs).blockingSingle(); - assertThat(row.wasApplied()).isFalse(); - assertThat(Flowable.fromPublisher(rs.wasApplied()).blockingSingle()).isFalse(); - assertThat(row.getString("description")).isEqualTo(FLAMETHROWER.getDescription()); - } - { - ReactiveResultSet rs = - dao.deleteIfDescriptionMatchesReactive(id, FLAMETHROWER.getDescription()); - ReactiveRow row = Flowable.fromPublisher(rs).blockingSingle(); - assertThat(row.wasApplied()).isTrue(); - assertThat(Flowable.fromPublisher(rs.wasApplied()).blockingSingle()).isTrue(); - } - assertThat(Flowable.fromPublisher(dao.findByIdReactive(id)).singleElement().blockingGet()) - .isNull(); - } - - @Mapper - public interface DseInventoryMapper { - @DaoFactory - DseProductDao productDao(@DaoKeyspace CqlIdentifier keyspace); - } - - @Dao - @DefaultNullSavingStrategy(NullSavingStrategy.SET_TO_NULL) - public interface DseProductDao { - - @Delete - ReactiveResultSet deleteEntityReactive(Product product); - - @Delete(entityClass = Product.class) - ReactiveResultSet deleteByIdReactive(UUID productId); - - @Delete(ifExists = true) - ReactiveResultSet deleteIfExistsReactive(Product product); - - @Delete(entityClass = Product.class, customIfClause = "description = :expectedDescription") - ReactiveResultSet deleteIfDescriptionMatchesReactive( - UUID productId, String expectedDescription); - - @Select - MappedReactiveResultSet findByIdReactive(UUID productId); - - @Insert - ReactiveResultSet saveReactive(Product product); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/EntityPolymorphismIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/EntityPolymorphismIT.java deleted file mode 100644 index 3e532e97c00..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/EntityPolymorphismIT.java +++ /dev/null @@ -1,1019 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.mapper; - -import static com.datastax.oss.driver.assertions.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.BoundStatementBuilder; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.mapper.annotations.Computed; -import com.datastax.oss.driver.api.mapper.annotations.CqlName; -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; -import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; -import com.datastax.oss.driver.api.mapper.annotations.DaoTable; -import com.datastax.oss.driver.api.mapper.annotations.DefaultNullSavingStrategy; -import com.datastax.oss.driver.api.mapper.annotations.Delete; -import com.datastax.oss.driver.api.mapper.annotations.Entity; -import com.datastax.oss.driver.api.mapper.annotations.GetEntity; -import com.datastax.oss.driver.api.mapper.annotations.HierarchyScanStrategy; -import com.datastax.oss.driver.api.mapper.annotations.Insert; -import com.datastax.oss.driver.api.mapper.annotations.Mapper; -import com.datastax.oss.driver.api.mapper.annotations.PartitionKey; -import com.datastax.oss.driver.api.mapper.annotations.Query; -import com.datastax.oss.driver.api.mapper.annotations.Select; -import com.datastax.oss.driver.api.mapper.annotations.SetEntity; -import com.datastax.oss.driver.api.mapper.annotations.Transient; -import com.datastax.oss.driver.api.mapper.annotations.Update; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.ccm.SchemaChangeSynchronizer; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.Sets; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.Objects; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.CompletableFuture; -import java.util.function.Consumer; -import java.util.function.Function; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; -import org.junit.runner.RunWith; - -@Category(ParallelizableTests.class) -@RunWith(DataProviderRunner.class) -public class EntityPolymorphismIT { - private static final CcmRule CCM_RULE = CcmRule.getInstance(); - - private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - private static TestMapper mapper; - - @BeforeClass - public static void setup() { - CqlSession session = SESSION_RULE.session(); - SchemaChangeSynchronizer.withLock( - () -> { - for (String query : - ImmutableList.of( - "CREATE TYPE point2d (\"X\" int, \"Y\" int)", - "CREATE TYPE point3d (\"X\" int, \"Y\" int, \"Z\" int)", - "CREATE TABLE circles (circle_id uuid PRIMARY KEY, center2d frozen, radius " - + "double, tags set)", - "CREATE TABLE rectangles (rect_id uuid PRIMARY KEY, bottom_left frozen, top_right frozen, tags set)", - "CREATE TABLE squares (square_id uuid PRIMARY KEY, bottom_left frozen, top_right frozen, tags set)", - "CREATE TABLE spheres (sphere_id uuid PRIMARY KEY, center3d frozen, radius " - + "double, tags set)", - "CREATE TABLE devices (device_id uuid PRIMARY KEY, name text)", - "CREATE TABLE tracked_devices (device_id uuid PRIMARY KEY, name text, location text)", - "CREATE TABLE simple_devices (id uuid PRIMARY KEY, in_use boolean)")) { - session.execute( - SimpleStatement.builder(query) - .setExecutionProfile(SESSION_RULE.slowProfile()) - .build()); - } - }); - mapper = new EntityPolymorphismIT_TestMapperBuilder(session).build(); - } - - // define parent interface with dao methods. - @DefaultNullSavingStrategy(NullSavingStrategy.SET_TO_NULL) - interface BaseDao { - @Insert - void save(T t); - - @Select - T findById(UUID id); - - @Delete - void delete(T t); - - @SetEntity - void bind(T t, BoundStatementBuilder builder); - - @GetEntity - T one(ResultSet result); - - @Update - void update(T t); - } - - // another parent interface with dao methods - interface WriteTimeDao extends BaseDao { - @Insert(timestamp = ":writeTime") - void saveWithTime(Y y, long writeTime); - } - - @Dao - interface RectangleDao extends BaseDao {} - - // Define an intermediate interface with same type variable name to ensure - // this doesn't cause any issue in code generation. - interface ArbitraryInterface { - default long increment(Y input) { - return input.longValue() + 1; - } - } - - @Dao - interface SquareDao extends WriteTimeDao, ArbitraryInterface {} - - @Dao - interface CircleDao extends WriteTimeDao {} - - @Dao - interface SphereDao extends WriteTimeDao {} - - interface NamedDeviceDao extends BaseDao { - @Query("UPDATE ${qualifiedTableId} SET name = :name WHERE device_id = :id") - void updateName(String name, UUID id); - - @Query("SELECT * FROM ${qualifiedTableId} WHERE device_id = :id") - CompletableFuture findByIdQueryAsync(UUID id); - } - - @Dao - interface DeviceDao extends NamedDeviceDao {} - - @Dao - interface TrackedDeviceDao extends NamedDeviceDao {} - - @Dao - interface SimpleDeviceDao extends BaseDao {} - - @Mapper - public interface TestMapper { - @DaoFactory - CircleDao circleDao(@DaoKeyspace CqlIdentifier keyspace); - - @DaoFactory - RectangleDao rectangleDao(@DaoKeyspace CqlIdentifier keyspace); - - @DaoFactory - SquareDao squareDao(@DaoKeyspace CqlIdentifier keyspace); - - @DaoFactory - SphereDao sphereDao(@DaoKeyspace CqlIdentifier keyspace); - - @DaoFactory - DeviceDao deviceDao(@DaoKeyspace CqlIdentifier keyspace, @DaoTable CqlIdentifier table); - - @DaoFactory - TrackedDeviceDao trackedDeviceDao( - @DaoKeyspace CqlIdentifier keyspace, @DaoTable CqlIdentifier table); - - @DaoFactory - SimpleDeviceDao simpleDeviceDao(@DaoKeyspace CqlIdentifier keyspace); - } - - @DataProvider - public static Object[][] setAndGetProvider() { - Function circleDao = keyspace -> mapper.circleDao(keyspace); - Function rectangleDao = keyspace -> mapper.rectangleDao(keyspace); - Function squareDao = keyspace -> mapper.squareDao(keyspace); - Function sphereDao = keyspace -> mapper.sphereDao(keyspace); - return new Object[][] { - { - new Rectangle(new Point2D(20, 30), new Point2D(50, 60)), - rectangleDao, - (Consumer) (Rectangle r) -> r.setTopRight(new Point2D(21, 31)), - SimpleStatement.newInstance( - "insert into rectangles (rect_id, bottom_left, top_right, " - + "tags) values (?, ?, ?, ?)"), - SimpleStatement.newInstance("select * from rectangles where rect_id = :id limit 1") - }, - { - new Circle(new Point2D(11, 22), 12.34), - circleDao, - (Consumer) (Circle c) -> c.setRadius(13.33), - SimpleStatement.newInstance( - "insert into circles (circle_id, center2d, radius, tags) " + "values (?, ?, ?, ?)"), - SimpleStatement.newInstance( - "select circle_id, center2d, radius, tags, writetime(radius) " - + "as write_time from circles where circle_id = :id limit 1") - }, - { - new Square(new Point2D(20, 30), new Point2D(50, 60)), - squareDao, - (Consumer) (Square s) -> s.setBottomLeft(new Point2D(10, 20)), - SimpleStatement.newInstance( - "insert into squares (square_id, bottom_left, top_right, " - + "tags) values (?, ?, ?, ?)"), - SimpleStatement.newInstance( - "select square_id, bottom_left, top_right, tags, writetime" - + "(bottom_left) as write_time from squares where square_id = :id limit 1") - }, - { - new Sphere(new Point3D(11, 22, 33), 34.56), - sphereDao, - (Consumer) (Sphere s) -> s.setCenter(new Point3D(10, 20, 30)), - SimpleStatement.newInstance( - "insert into spheres (sphere_id, center3d, radius, tags) " + "values (?, ?, ?, ?)"), - SimpleStatement.newInstance( - "select sphere_id, center3d, radius, tags, writetime(radius) " - + "as write_time from spheres where sphere_id = :id limit 1") - }, - }; - } - - @UseDataProvider("setAndGetProvider") - @Test - public void should_set_and_get_entity_then_update_then_delete( - T t, - Function> daoProvider, - Consumer updater, - SimpleStatement insertStatement, - SimpleStatement selectStatement) { - BaseDao dao = daoProvider.apply(SESSION_RULE.keyspace()); - CqlSession session = SESSION_RULE.session(); - PreparedStatement prepared = session.prepare(insertStatement); - - BoundStatementBuilder bs = prepared.boundStatementBuilder(); - dao.bind(t, bs); - - session.execute(bs.build()); - - PreparedStatement selectPrepared = session.prepare(selectStatement); - BoundStatement selectBs = selectPrepared.bind(t.getId()); - T retrieved = dao.one(session.execute(selectBs)); - assertThat(retrieved).isEqualTo(t); - - // update value - updater.accept(t); - dao.update(t); - - // retrieve value to ensure update worked correctly. - assertThat(dao.one(session.execute(selectBs))).isEqualTo(t); - - // delete value and ensure it's gone - dao.delete(t); - assertThat(dao.one(session.execute(selectBs))).isNull(); - } - - @Test - public void should_save_and_retrieve_circle() { - // verifies the inheritance behavior around Circle. - // * CqlName("circle_id") on getId renames id property to circle_id - // * annotations, but these are primarily used for - // verifying inheritance behavior in Sphere. - // * verifies writeTime is set. - CircleDao dao = mapper.circleDao(SESSION_RULE.keyspace()); - - long writeTime = System.currentTimeMillis() - 1000; - Circle circle = new Circle(new Point2D(11, 22), 12.34); - dao.saveWithTime(circle, writeTime); - - Circle retrievedCircle = dao.findById(circle.getId()); - assertThat(retrievedCircle).isEqualTo(circle); - assertThat(retrievedCircle.getWriteTime()).isEqualTo(writeTime); - } - - @Test - public void should_save_and_retrieve_rectangle() { - // verifies the inheritance behavior around Rectangle: - // * CqlName("rect_id") on getId renames id property to rect_id - // * annotations work, but these are primarily used for - // verifying inheritance behavior in Square. - RectangleDao dao = mapper.rectangleDao(SESSION_RULE.keyspace()); - - Rectangle rectangle = new Rectangle(new Point2D(20, 30), new Point2D(50, 60)); - dao.save(rectangle); - - assertThat(dao.findById(rectangle.getId())).isEqualTo(rectangle); - } - - @Test - public void should_save_and_retrieve_square() { - // verifies the inheritance behavior around Square: - // * CqlName("square_id") on getId renames id property to square_id - // * height remains transient even though we define field/getter/setter - // * getBottomLeft() retains CqlName from parent. - // * verifies writeTime is set. - SquareDao dao = mapper.squareDao(SESSION_RULE.keyspace()); - - long writeTime = System.currentTimeMillis() - 1000; - Square square = new Square(new Point2D(20, 30), new Point2D(50, 60)); - dao.saveWithTime(square, writeTime); - - Square retrievedSquare = dao.findById(square.getId()); - assertThat(retrievedSquare).isEqualTo(square); - assertThat(retrievedSquare.getWriteTime()).isEqualTo(writeTime); - } - - @Test - public void should_save_and_retrieve_sphere() { - // verifies the inheritance behavior around Circle: - // * @CqlName("sphere_id") on getId renames id property to sphere_id - // * @CqlName("center3d") on getCenter renames center property from center2d - // which was renamed on the parent field for Circle - // * That getCenter returns Point3D influences get/set behavior to use Point3D - // * Override setRadius to return Sphere causes no issues. - // * Interface method getVolume() is skipped because no field exists. - // * WriteTime is inherited, so queried and set. - SphereDao dao = mapper.sphereDao(SESSION_RULE.keyspace()); - - long writeTime = System.currentTimeMillis() - 1000; - Sphere sphere = new Sphere(new Point3D(11, 22, 33), 34.56); - dao.saveWithTime(sphere, writeTime); - - Sphere retrievedSphere = dao.findById(sphere.getId()); - assertThat(retrievedSphere).isEqualTo(sphere); - assertThat(retrievedSphere.getWriteTime()).isEqualTo(writeTime); - } - - @Test - public void should_save_and_retrieve_device() throws Exception { - // verifies the hierarchy scanner behavior around Device: - // * by virtue of Assert setting highestAncestor to Asset.class, location property from - // LocatableItem should not be included - DeviceDao dao = mapper.deviceDao(SESSION_RULE.keyspace(), CqlIdentifier.fromCql("devices")); - - // save should be successful as location property omitted. - Device device = new Device("my device", "New York"); - dao.save(device); - - Device retrievedDevice = dao.findById(device.getId()); - assertThat(retrievedDevice.getId()).isEqualTo(device.getId()); - assertThat(retrievedDevice.getName()).isEqualTo(device.getName()); - // location should be null. - assertThat(retrievedDevice.getLocation()).isNull(); - - // should be able to use @Query with update - String name = "my new name"; - dao.updateName(name, device.getId()); - - // should be able to use @Query returning Entity and name should be applied. - retrievedDevice = dao.findByIdQueryAsync(device.getId()).get(); - assertThat(retrievedDevice.getName()).isEqualTo(name); - } - - @Test - public void should_save_and_retrieve_tracked_device() throws Exception { - // verifies the hierarchy scanner behavior around TrackedDevice: - // * Since TrackedDevice defines a default @HierarchyScanStrategy it should - // include LocatableItem's location property, even though Asset defines - // a strategy that excludes it. - TrackedDeviceDao dao = - mapper.trackedDeviceDao(SESSION_RULE.keyspace(), CqlIdentifier.fromCql("tracked_devices")); - - TrackedDevice device = new TrackedDevice("my device", "New York"); - dao.save(device); - - // location property should be present, thus should equal saved item. - TrackedDevice retrievedDevice = dao.findById(device.getId()); - assertThat(retrievedDevice).isEqualTo(device); - - // should be able to use @Query with update - String name = "my new name"; - dao.updateName(name, device.getId()); - - // should be able to use @Query returning Entity and name should be applied. - retrievedDevice = dao.findByIdQueryAsync(device.getId()).get(); - assertThat(retrievedDevice.getName()).isEqualTo(name); - } - - @Test - public void should_save_and_retrieve_simple_device() { - // verifies the hierarchy scanner behavior around SimpleDevice: - // * Since SimpleDevice defines a @HierarchyScanStrategy that prevents - // scanning of ancestors, only its properties (id, inUse) should be included. - SimpleDeviceDao dao = mapper.simpleDeviceDao(SESSION_RULE.keyspace()); - - SimpleDevice device = new SimpleDevice(true); - dao.save(device); - - SimpleDevice retrievedDevice = dao.findById(device.getId()); - assertThat(retrievedDevice.getId()).isEqualTo(device.getId()); - assertThat(retrievedDevice.getInUse()).isEqualTo(device.getInUse()); - // location and name should be null - assertThat(retrievedDevice.getLocation()).isNull(); - assertThat(retrievedDevice.getName()).isNull(); - } - - @Entity - static class Point2D { - private int x; - private int y; - - public Point2D() {} - - public Point2D(int x, int y) { - this.x = x; - this.y = y; - } - - @CqlName("\"X\"") - public int getX() { - return x; - } - - public void setX(int x) { - this.x = x; - } - - @CqlName("\"Y\"") - public int getY() { - return y; - } - - public void setY(int y) { - this.y = y; - } - - @Override - public boolean equals(Object other) { - if (this == other) return true; - else if (other instanceof Point2D) { - Point2D that = (Point2D) other; - return this.x == that.x && this.y == that.y; - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(x, y); - } - } - - @Entity - static class Point3D extends Point2D { - private int z; - - public Point3D() {} - - public Point3D(int x, int y, int z) { - super(x, y); - this.z = z; - } - - @CqlName("\"Z\"") - public int getZ() { - return z; - } - - public void setZ(int z) { - this.z = z; - } - - @Override - public boolean equals(Object other) { - if (this == other) { - return true; - } else if (other instanceof Point3D) { - Point3D that = (Point3D) other; - return super.equals(that) && this.z == that.z; - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(super.hashCode(), z); - } - } - - interface Shape2D { - Set getTags(); - - // test annotation on interface method, should get inherited everywhere - @Transient - double getArea(); - } - - interface Shape3D { - double getVolume(); - } - - abstract static class Shape implements Shape2D { - - @PartitionKey // annotated field on superclass; annotation will get inherited in all subclasses - protected UUID id; - - protected Set tags; - - protected String location; - - public Shape() { - this.id = UUID.randomUUID(); - this.tags = Sets.newHashSet("cool", "awesome"); - } - - @CqlName("wrong") - public abstract UUID getId(); - - public void setId(UUID id) { - this.id = id; - } - - @Override - public Set getTags() { - return tags; - } - - public void setTags(Set tags) { - this.tags = tags; - } - - @Override - public boolean equals(Object other) { - if (this == other) { - return true; - } else if (other instanceof Shape) { - Shape that = (Shape) other; - return Objects.equals(id, that.id) && Objects.equals(tags, that.tags); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(id, tags); - } - } - - interface WriteTimeProvider { - long getWriteTime(); - } - - @CqlName("circles") - @Entity - static class Circle extends Shape implements WriteTimeProvider { - - @CqlName("center2d") - protected Point2D center; - - protected double radius; - - private long writeTime; - - public Circle() {} - - public Circle(Point2D center, double radius) { - super(); - this.center = center; - this.radius = radius; - } - - @Override - @CqlName("circle_id") - public UUID getId() { - return id; - } - - @Override - public double getArea() { - return Math.PI * Math.pow(getRadius(), 2); - } - - public double getRadius() { - return this.radius; - } - - public Circle setRadius(double radius) { - this.radius = radius; - return this; - } - - @Computed("writetime(radius)") - @Override - public long getWriteTime() { - return writeTime; - } - - public void setWriteTime(long writeTime) { - this.writeTime = writeTime; - } - - public Point2D getCenter() { - return center; - } - - public void setCenter(Point2D center) { - this.center = center; - } - - @Override - public boolean equals(Object other) { - if (this == other) { - return true; - } else if (other instanceof Circle) { - Circle that = (Circle) other; - return super.equals(that) - && Double.compare(that.radius, radius) == 0 - && center.equals(that.center); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(super.hashCode(), center, radius); - } - } - - @CqlName("rectangles") - @Entity - static class Rectangle extends Shape { - private Point2D bottomLeft; - private Point2D topRight; - - public Rectangle() {} - - public Rectangle(Point2D bottomLeft, Point2D topRight) { - super(); - this.bottomLeft = bottomLeft; - this.topRight = topRight; - } - - @CqlName("rect_id") - @Override - public UUID getId() { - return id; - } - - @CqlName("bottom_left") - public Point2D getBottomLeft() { - return bottomLeft; - } - - public void setBottomLeft(Point2D bottomLeft) { - this.bottomLeft = bottomLeft; - } - - @CqlName("top_right") - public Point2D getTopRight() { - return topRight; - } - - public void setTopRight(Point2D topRight) { - this.topRight = topRight; - } - - // test annotation in class method - @Transient - public double getWidth() { - return Math.abs(topRight.getX() - bottomLeft.getX()); - } - - @Transient - public double getHeight() { - return Math.abs(topRight.getY() - bottomLeft.getY()); - } - - @Override - public double getArea() { - return getWidth() * getHeight(); - } - - @Override - public boolean equals(Object other) { - if (this == other) { - return true; - } else if (other instanceof Rectangle) { - Rectangle that = (Rectangle) other; - return super.equals(that) - && bottomLeft.equals(that.bottomLeft) - && topRight.equals(that.topRight); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(super.hashCode(), bottomLeft, topRight); - } - } - - @CqlName("squares") - @Entity - static class Square extends Rectangle implements WriteTimeProvider { - - @Computed("writetime(bottom_left)") - private long writeTime; - - public Square() {} - - public Square(Point2D bottomLeft, Point2D topRight) { - super(bottomLeft, topRight); - assert getHeight() == getWidth(); - } - - @CqlName("square_id") - @Override - public UUID getId() { - return id; - } - - @Override - // inherits @CqlName - public Point2D getBottomLeft() { - return super.getBottomLeft(); - } - - @Override - // inherits @Transient - public double getHeight() { - return getWidth(); - } - - public void setHeight(Point2D height) { - throw new IllegalArgumentException("This method should never be called"); - } - - @Override - public long getWriteTime() { - return writeTime; - } - - public void setWriteTime(long writeTime) { - this.writeTime = writeTime; - } - } - - @CqlName("spheres") - @Entity - static class Sphere extends Circle implements Shape3D { - - // ignored field - private long writeTime; - - public Sphere() {} - - public Sphere(Point3D center, double radius) { - super(center, radius); - } - - @CqlName("sphere_id") - @Override - public UUID getId() { - return id; - } - - // overrides field annotation in Circle, - // note that the property type is narrowed down to Point3D - @CqlName("center3d") - @Override - public Point3D getCenter() { - return (Point3D) center; - } - - @Override - public void setCenter(Point2D center) { - assert center instanceof Point3D; - this.center = center; - } - - // overridden builder-style setter - @Override - public Sphere setRadius(double radius) { - super.setRadius(radius); - return this; - } - - @Override - public double getVolume() { - return 4d / 3d * Math.PI * Math.pow(getRadius(), 3); - } - - @Override - public boolean equals(Object other) { - if (this == other) { - return true; - } else if (other instanceof Sphere) { - Sphere that = (Sphere) other; - return super.equals(that) && writeTime == that.writeTime; - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(super.hashCode(), writeTime); - } - } - - abstract static class LocatableItem { - - protected String location; - - LocatableItem() {} - - LocatableItem(String location) { - this.location = location; - } - - public String getLocation() { - return location; - } - - public void setLocation(String location) { - this.location = location; - } - - @Override - public boolean equals(Object other) { - if (this == other) { - return true; - } else if (other instanceof LocatableItem) { - LocatableItem that = (LocatableItem) other; - return Objects.equals(this.location, that.location); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(location); - } - } - - // define strategy that stops scanning at Asset, meaning LocatableItem's location will - // not be considered a property. - @HierarchyScanStrategy(highestAncestor = Asset.class, includeHighestAncestor = true) - abstract static class Asset extends LocatableItem { - protected String name; - - Asset() {} - - Asset(String name, String location) { - super(location); - this.name = name; - } - - public String getName() { - return name; - } - - public void setName(String name) { - this.name = name; - } - - @Override - public boolean equals(Object other) { - if (this == other) { - return true; - } else if (other instanceof Asset) { - Asset that = (Asset) other; - return super.equals(that) && Objects.equals(this.name, that.name); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(super.hashCode(), name); - } - } - - // should inherit scan strategy from Asset, thus location will not be present. - @Entity - @CqlName("devices") - static class Device extends Asset { - - @PartitionKey protected UUID id; - - Device() {} - - Device(String name, String location) { - super(name, location); - this.id = UUID.randomUUID(); - } - - // rename to device_id, if Device not included in scanning, this won't be used. - @CqlName("device_id") - public UUID getId() { - return id; - } - - public void setId(UUID id) { - this.id = id; - } - - @Override - public boolean equals(Object other) { - if (this == other) { - return true; - } else if (other instanceof Device) { - Device that = (Device) other; - return super.equals(that) && this.id.equals(that.id); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(super.hashCode(), id); - } - } - - // use default strategy, which should override Assert's strategy, and thus location - // should be considered a property. - @HierarchyScanStrategy - @Entity - @CqlName("tracked_devices") - static class TrackedDevice extends Device { - TrackedDevice() {} - - TrackedDevice(String name, String location) { - super(name, location); - } - } - - // do not scan ancestors, so only id and inUse should be considered properties. - @HierarchyScanStrategy(scanAncestors = false) - @Entity - @CqlName("simple_devices") - static class SimpleDevice extends Device { - boolean inUse; - - // suppress error prone warning as we are doing this with intent - @SuppressWarnings("HidingField") - @PartitionKey - private UUID id; - - SimpleDevice() {} - - SimpleDevice(boolean inUse) { - super(null, null); - this.id = UUID.randomUUID(); - this.inUse = inUse; - } - - public boolean getInUse() { - return inUse; - } - - public void setInUse(boolean inUse) { - this.inUse = inUse; - } - - @Override - public UUID getId() { - return id; - } - - @Override - public void setId(UUID id) { - this.id = id; - } - - @Override - public boolean equals(Object other) { - if (this == other) { - return true; - } else if (other instanceof SimpleDevice) { - SimpleDevice that = (SimpleDevice) other; - return super.equals(that) && this.inUse == that.inUse && this.id.equals(that.id); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(super.hashCode(), inUse, id); - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/FluentEntityIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/FluentEntityIT.java deleted file mode 100644 index fa93e4e768b..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/FluentEntityIT.java +++ /dev/null @@ -1,163 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.mapper; - -import static com.datastax.oss.driver.api.mapper.entity.naming.GetterStyle.FLUENT; -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.mapper.MapperBuilder; -import com.datastax.oss.driver.api.mapper.annotations.CqlName; -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; -import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; -import com.datastax.oss.driver.api.mapper.annotations.DefaultNullSavingStrategy; -import com.datastax.oss.driver.api.mapper.annotations.Entity; -import com.datastax.oss.driver.api.mapper.annotations.Insert; -import com.datastax.oss.driver.api.mapper.annotations.Mapper; -import com.datastax.oss.driver.api.mapper.annotations.PartitionKey; -import com.datastax.oss.driver.api.mapper.annotations.PropertyStrategy; -import com.datastax.oss.driver.api.mapper.annotations.Select; -import com.datastax.oss.driver.api.mapper.entity.naming.SetterStyle; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import java.util.Objects; -import java.util.UUID; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -public class FluentEntityIT extends InventoryITBase { - - private static final CcmRule CCM_RULE = CcmRule.getInstance(); - private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - private static FluentProductDao dao; - - @BeforeClass - public static void setup() { - CqlSession session = SESSION_RULE.session(); - - for (String query : createStatements(CCM_RULE)) { - session.execute( - SimpleStatement.builder(query).setExecutionProfile(SESSION_RULE.slowProfile()).build()); - } - - InventoryMapper mapper = InventoryMapper.builder(session).build(); - dao = mapper.immutableProductDao(SESSION_RULE.keyspace()); - } - - @Test - public void should_insert_and_retrieve_immutable_entities() { - FluentProduct originalProduct = - new FluentProduct() - .id(UUID.randomUUID()) - .description("mock description") - .dimensions(new Dimensions(1, 2, 3)); - dao.save(originalProduct); - - FluentProduct retrievedProduct = dao.findById(originalProduct.id()); - assertThat(retrievedProduct).isEqualTo(originalProduct); - } - - @Entity - @CqlName("product") - @PropertyStrategy(getterStyle = FLUENT, setterStyle = SetterStyle.FLUENT) - public static class FluentProduct { - @PartitionKey private UUID id; - private String description; - private Dimensions dimensions; - - public UUID id() { - return id; - } - - public FluentProduct id(UUID id) { - this.id = id; - return this; - } - - public String description() { - return description; - } - - public FluentProduct description(String description) { - this.description = description; - return this; - } - - public Dimensions dimensions() { - return dimensions; - } - - public FluentProduct dimensions(Dimensions dimensions) { - this.dimensions = dimensions; - return this; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof FluentProduct) { - FluentProduct that = (FluentProduct) other; - return Objects.equals(this.id, that.id) - && Objects.equals(this.description, that.description) - && Objects.equals(this.dimensions, that.dimensions); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(id, description, dimensions); - } - } - - @Mapper - public interface InventoryMapper { - static MapperBuilder builder(CqlSession session) { - return new FluentEntityIT_InventoryMapperBuilder(session); - } - - @DaoFactory - FluentProductDao immutableProductDao(@DaoKeyspace CqlIdentifier keyspace); - } - - @Dao - @DefaultNullSavingStrategy(NullSavingStrategy.SET_TO_NULL) - public interface FluentProductDao { - @Select - FluentProduct findById(UUID productId); - - @Insert - void save(FluentProduct product); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/GetEntityIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/GetEntityIT.java deleted file mode 100644 index d3f3eec93ae..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/GetEntityIT.java +++ /dev/null @@ -1,277 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.mapper; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.catchThrowable; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.MappedAsyncPagingIterable; -import com.datastax.oss.driver.api.core.PagingIterable; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; -import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; -import com.datastax.oss.driver.api.mapper.annotations.DefaultNullSavingStrategy; -import com.datastax.oss.driver.api.mapper.annotations.GetEntity; -import com.datastax.oss.driver.api.mapper.annotations.Insert; -import com.datastax.oss.driver.api.mapper.annotations.Mapper; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.driver.shaded.guava.common.collect.Sets; -import java.util.UUID; -import java.util.stream.Stream; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -public class GetEntityIT extends InventoryITBase { - - private static final CcmRule CCM_RULE = CcmRule.getInstance(); - - private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - private static final UUID PRODUCT_2D_ID = UUID.randomUUID(); - - private static ProductDao dao; - - @BeforeClass - public static void setup() { - CqlSession session = SESSION_RULE.session(); - - for (String query : createStatements(CCM_RULE)) { - session.execute( - SimpleStatement.builder(query).setExecutionProfile(SESSION_RULE.slowProfile()).build()); - } - - UserDefinedType dimensions2d = - session - .getKeyspace() - .flatMap(ks -> session.getMetadata().getKeyspace(ks)) - .flatMap(ks -> ks.getUserDefinedType("dimensions2d")) - .orElseThrow(AssertionError::new); - session.execute( - "INSERT INTO product2d (id, description, dimensions) VALUES (?, ?, ?)", - PRODUCT_2D_ID, - "2D product", - dimensions2d.newValue(12, 34)); - - InventoryMapper inventoryMapper = new GetEntityIT_InventoryMapperBuilder(session).build(); - dao = inventoryMapper.productDao(SESSION_RULE.keyspace()); - - dao.save(FLAMETHROWER); - dao.save(MP3_DOWNLOAD); - } - - @Test - public void should_get_entity_from_complete_row() { - CqlSession session = SESSION_RULE.session(); - ResultSet rs = - session.execute( - SimpleStatement.newInstance( - "SELECT id, description, dimensions, now() FROM product WHERE id = ?", - FLAMETHROWER.getId())); - Row row = rs.one(); - assertThat(row).isNotNull(); - - Product product = dao.get(row); - assertThat(product).isEqualTo(FLAMETHROWER); - } - - @Test - public void should_not_get_entity_from_partial_row_when_not_lenient() { - CqlSession session = SESSION_RULE.session(); - ResultSet rs = - session.execute( - SimpleStatement.newInstance( - "SELECT id, description, now() FROM product WHERE id = ?", FLAMETHROWER.getId())); - Row row = rs.one(); - assertThat(row).isNotNull(); - - Throwable error = catchThrowable(() -> dao.get(row)); - assertThat(error).hasMessage("dimensions is not a column in this row"); - } - - @Test - public void should_get_entity_from_partial_row_when_lenient() { - CqlSession session = SESSION_RULE.session(); - ResultSet rs = - session.execute( - SimpleStatement.newInstance( - "SELECT id, dimensions FROM product2d WHERE id = ?", PRODUCT_2D_ID)); - Row row = rs.one(); - assertThat(row).isNotNull(); - - Product product = dao.getLenient(row); - assertThat(product.getId()).isEqualTo(PRODUCT_2D_ID); - assertThat(product.getDescription()).isNull(); - assertThat(product.getDimensions()).isNotNull(); - assertThat(product.getDimensions().getWidth()).isEqualTo(12); - assertThat(product.getDimensions().getHeight()).isEqualTo(34); - assertThat(product.getDimensions().getLength()).isZero(); - } - - @Test - public void should_get_entity_from_complete_udt_value() { - CqlSession session = SESSION_RULE.session(); - ResultSet rs = - session.execute( - SimpleStatement.newInstance( - "SELECT dimensions FROM product WHERE id = ?", FLAMETHROWER.getId())); - Row row = rs.one(); - assertThat(row).isNotNull(); - - Dimensions dimensions = dao.get(row.getUdtValue(0)); - assertThat(dimensions).isEqualTo(FLAMETHROWER.getDimensions()); - } - - @Test - public void should_not_get_entity_from_partial_udt_value_when_not_lenient() { - CqlSession session = SESSION_RULE.session(); - ResultSet rs = - session.execute( - SimpleStatement.newInstance( - "SELECT dimensions FROM product2d WHERE id = ?", PRODUCT_2D_ID)); - Row row = rs.one(); - assertThat(row).isNotNull(); - - Throwable error = catchThrowable(() -> dao.get(row.getUdtValue(0))); - assertThat(error).hasMessage("length is not a field in this UDT"); - } - - @Test - public void should_get_entity_from_partial_udt_value_when_lenient() { - CqlSession session = SESSION_RULE.session(); - ResultSet rs = - session.execute( - SimpleStatement.newInstance( - "SELECT dimensions FROM product2d WHERE id = ?", PRODUCT_2D_ID)); - Row row = rs.one(); - assertThat(row).isNotNull(); - - Dimensions dimensions = dao.getLenient(row.getUdtValue(0)); - assertThat(dimensions).isNotNull(); - assertThat(dimensions.getWidth()).isEqualTo(12); - assertThat(dimensions.getHeight()).isEqualTo(34); - assertThat(dimensions.getLength()).isZero(); - } - - @Test - public void should_get_entity_from_first_row_of_result_set() { - CqlSession session = SESSION_RULE.session(); - ResultSet rs = session.execute("SELECT * FROM product"); - - Product product = dao.getOne(rs); - // The order depends on the IDs, which are generated dynamically. This is good enough: - assertThat(product.equals(FLAMETHROWER) || product.equals(MP3_DOWNLOAD)).isTrue(); - } - - @Test - public void should_get_entity_from_first_row_of_async_result_set() { - CqlSession session = SESSION_RULE.session(); - AsyncResultSet rs = - CompletableFutures.getUninterruptibly(session.executeAsync("SELECT * FROM product")); - - Product product = dao.getOne(rs); - // The order depends on the IDs, which are generated dynamically. This is good enough: - assertThat(product.equals(FLAMETHROWER) || product.equals(MP3_DOWNLOAD)).isTrue(); - } - - @Test - public void should_get_iterable_from_result_set() { - CqlSession session = SESSION_RULE.session(); - ResultSet rs = session.execute("SELECT * FROM product"); - PagingIterable products = dao.get(rs); - assertThat(Sets.newHashSet(products)).containsOnly(FLAMETHROWER, MP3_DOWNLOAD); - } - - @Test - public void should_get_stream_from_result_set() { - CqlSession session = SESSION_RULE.session(); - ResultSet rs = session.execute("SELECT * FROM product"); - Stream products = dao.getAsStream(rs); - assertThat(products).containsOnly(FLAMETHROWER, MP3_DOWNLOAD); - } - - @Test - public void should_get_async_iterable_from_async_result_set() { - CqlSession session = SESSION_RULE.session(); - AsyncResultSet rs = - CompletableFutures.getUninterruptibly(session.executeAsync("SELECT * FROM product")); - MappedAsyncPagingIterable products = dao.get(rs); - assertThat(Sets.newHashSet(products.currentPage())).containsOnly(FLAMETHROWER, MP3_DOWNLOAD); - assertThat(products.hasMorePages()).isFalse(); - } - - @Mapper - public interface InventoryMapper { - @DaoFactory - ProductDao productDao(@DaoKeyspace CqlIdentifier keyspace); - } - - @Dao - @DefaultNullSavingStrategy(NullSavingStrategy.SET_TO_NULL) - public interface ProductDao { - - @GetEntity - Product get(Row row); - - @GetEntity(lenient = true) - Product getLenient(Row row); - - @GetEntity - Dimensions get(UdtValue row); - - @GetEntity(lenient = true) - Dimensions getLenient(UdtValue row); - - @GetEntity - PagingIterable get(ResultSet resultSet); - - @GetEntity - Stream getAsStream(ResultSet resultSet); - - @GetEntity - MappedAsyncPagingIterable get(AsyncResultSet resultSet); - - @GetEntity - Product getOne(ResultSet resultSet); - - @GetEntity - Product getOne(AsyncResultSet resultSet); - - @Insert - void save(Product product); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/GuavaFutureProducerService.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/GuavaFutureProducerService.java deleted file mode 100644 index 759b01a4e20..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/GuavaFutureProducerService.java +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.mapper; - -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.api.mapper.MapperContext; -import com.datastax.oss.driver.api.mapper.entity.EntityHelper; -import com.datastax.oss.driver.api.mapper.result.MapperResultProducer; -import com.datastax.oss.driver.api.mapper.result.MapperResultProducerService; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.util.concurrent.Futures; -import com.datastax.oss.driver.shaded.guava.common.util.concurrent.ListenableFuture; -import com.datastax.oss.driver.shaded.guava.common.util.concurrent.SettableFuture; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -public class GuavaFutureProducerService implements MapperResultProducerService { - - @Override - public Iterable getProducers() { - return ImmutableList.of( - // Note that order matters, both producers operate on ListenableFuture, - // the most specific must come first. - new VoidListenableFutureProducer(), new SingleEntityListenableFutureProducer()); - } - - public abstract static class ListenableFutureProducer implements MapperResultProducer { - - @Nullable - @Override - public ListenableFuture execute( - @NonNull Statement statement, - @NonNull MapperContext context, - @Nullable EntityHelper entityHelper) { - SettableFuture result = SettableFuture.create(); - context - .getSession() - .executeAsync(statement) - .whenComplete( - (resultSet, error) -> { - if (error != null) { - result.setException(error); - } else { - result.set(convert(resultSet, entityHelper)); - } - }); - return result; - } - - @Nullable - protected abstract Object convert( - @NonNull AsyncResultSet resultSet, @Nullable EntityHelper entityHelper); - - @Nullable - @Override - public ListenableFuture wrapError(@NonNull Exception e) { - return Futures.immediateFailedFuture(e); - } - } - - public static class VoidListenableFutureProducer extends ListenableFutureProducer { - - private static final GenericType> PRODUCED_TYPE = - new GenericType>() {}; - - @Override - public boolean canProduce(@NonNull GenericType resultType) { - return resultType.equals(PRODUCED_TYPE); - } - - @Nullable - @Override - protected Object convert( - @NonNull AsyncResultSet resultSet, @Nullable EntityHelper entityHelper) { - // ignore results - return null; - } - } - - public static class SingleEntityListenableFutureProducer extends ListenableFutureProducer { - - @Override - public boolean canProduce(@NonNull GenericType resultType) { - return resultType.getRawType().equals(ListenableFuture.class); - } - - @Nullable - @Override - protected Object convert( - @NonNull AsyncResultSet resultSet, @Nullable EntityHelper entityHelper) { - assert entityHelper != null; - Row row = resultSet.one(); - return (row == null) ? null : entityHelper.get(row, false); - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/ImmutableEntityIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/ImmutableEntityIT.java deleted file mode 100644 index bdfe92a23f9..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/ImmutableEntityIT.java +++ /dev/null @@ -1,309 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.mapper; - -import static com.datastax.oss.driver.api.mapper.entity.naming.GetterStyle.FLUENT; -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.api.mapper.MapperBuilder; -import com.datastax.oss.driver.api.mapper.annotations.Computed; -import com.datastax.oss.driver.api.mapper.annotations.CqlName; -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; -import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; -import com.datastax.oss.driver.api.mapper.annotations.DefaultNullSavingStrategy; -import com.datastax.oss.driver.api.mapper.annotations.Entity; -import com.datastax.oss.driver.api.mapper.annotations.GetEntity; -import com.datastax.oss.driver.api.mapper.annotations.Insert; -import com.datastax.oss.driver.api.mapper.annotations.Mapper; -import com.datastax.oss.driver.api.mapper.annotations.PartitionKey; -import com.datastax.oss.driver.api.mapper.annotations.PropertyStrategy; -import com.datastax.oss.driver.api.mapper.annotations.Select; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.ccm.SchemaChangeSynchronizer; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import java.util.Objects; -import java.util.UUID; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -public class ImmutableEntityIT extends InventoryITBase { - - private static final CcmRule CCM_RULE = CcmRule.getInstance(); - private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - private static final UUID PRODUCT_2D_ID = UUID.randomUUID(); - - private static ImmutableProductDao dao; - - @BeforeClass - public static void setup() { - CqlSession session = SESSION_RULE.session(); - - SchemaChangeSynchronizer.withLock( - () -> { - for (String query : createStatements(CCM_RULE)) { - session.execute( - SimpleStatement.builder(query) - .setExecutionProfile(SESSION_RULE.slowProfile()) - .build()); - } - }); - - UserDefinedType dimensions2d = - session - .getKeyspace() - .flatMap(ks -> session.getMetadata().getKeyspace(ks)) - .flatMap(ks -> ks.getUserDefinedType("dimensions2d")) - .orElseThrow(AssertionError::new); - session.execute( - "INSERT INTO product2d (id, description, dimensions) VALUES (?, ?, ?)", - PRODUCT_2D_ID, - "2D product", - dimensions2d.newValue(12, 34)); - - InventoryMapper mapper = InventoryMapper.builder(session).build(); - dao = mapper.immutableProductDao(SESSION_RULE.keyspace()); - } - - @Test - public void should_insert_and_retrieve_immutable_entities() { - ImmutableProduct originalProduct = - new ImmutableProduct( - UUID.randomUUID(), "mock description", new ImmutableDimensions(1, 2, 3), -1); - dao.save(originalProduct); - - ImmutableProduct retrievedProduct = dao.findById(originalProduct.id()); - assertThat(retrievedProduct).isEqualTo(originalProduct); - } - - @Test - public void should_map_immutable_entity_from_complete_row() { - ImmutableProduct originalProduct = - new ImmutableProduct( - UUID.randomUUID(), "mock description", new ImmutableDimensions(1, 2, 3), -1); - dao.save(originalProduct); - Row row = - SESSION_RULE - .session() - .execute( - "SELECT id, description, dimensions, writetime(description) AS writetime, now() " - + "FROM product WHERE id = ?", - originalProduct.id()) - .one(); - ImmutableProduct retrievedProduct = dao.mapStrict(row); - assertThat(retrievedProduct.id()).isEqualTo(originalProduct.id()); - assertThat(retrievedProduct.description()).isEqualTo(originalProduct.description()); - assertThat(retrievedProduct.dimensions()).isEqualTo(originalProduct.dimensions()); - assertThat(retrievedProduct.writetime()).isGreaterThan(0); - } - - @Test - public void should_map_immutable_entity_from_partial_row_when_lenient() { - Row row = - SESSION_RULE - .session() - .execute("SELECT id, dimensions FROM product2d WHERE id = ?", PRODUCT_2D_ID) - .one(); - ImmutableProduct retrievedProduct = dao.mapLenient(row); - assertThat(retrievedProduct.id()).isEqualTo(PRODUCT_2D_ID); - assertThat(retrievedProduct.dimensions()).isEqualTo(new ImmutableDimensions(0, 12, 34)); - assertThat(retrievedProduct.description()).isNull(); - assertThat(retrievedProduct.writetime()).isZero(); - } - - @Test - public void should_map_immutable_entity_from_complete_udt() { - ImmutableProduct originalProduct = - new ImmutableProduct( - UUID.randomUUID(), "mock description", new ImmutableDimensions(1, 2, 3), -1); - dao.save(originalProduct); - Row row = - SESSION_RULE - .session() - .execute("SELECT dimensions FROM product WHERE id = ?", originalProduct.id()) - .one(); - assertThat(row).isNotNull(); - ImmutableDimensions retrievedDimensions = dao.mapStrict(row.getUdtValue(0)); - assertThat(retrievedDimensions).isEqualTo(originalProduct.dimensions()); - } - - @Test - public void should_map_immutable_entity_from_partial_udt_when_lenient() { - Row row = - SESSION_RULE - .session() - .execute("SELECT dimensions FROM product2d WHERE id = ?", PRODUCT_2D_ID) - .one(); - assertThat(row).isNotNull(); - ImmutableDimensions retrievedDimensions = dao.mapLenient(row.getUdtValue(0)); - assertThat(retrievedDimensions).isEqualTo(new ImmutableDimensions(0, 12, 34)); - } - - @Entity - @CqlName("product") - @PropertyStrategy(getterStyle = FLUENT, mutable = false) - public static class ImmutableProduct { - @PartitionKey private final UUID id; - private final String description; - private final ImmutableDimensions dimensions; - - @Computed("writetime(description)") - private final long writetime; - - public ImmutableProduct( - UUID id, String description, ImmutableDimensions dimensions, long writetime) { - this.id = id; - this.description = description; - this.dimensions = dimensions; - this.writetime = writetime; - } - - public UUID id() { - return id; - } - - public String description() { - return description; - } - - public ImmutableDimensions dimensions() { - return dimensions; - } - - public long writetime() { - return writetime; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof ImmutableProduct) { - ImmutableProduct that = (ImmutableProduct) other; - return Objects.equals(this.id, that.id) - && Objects.equals(this.description, that.description) - && Objects.equals(this.dimensions, that.dimensions); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(id, description, dimensions); - } - } - - @Entity - @PropertyStrategy(mutable = false) - public static class ImmutableDimensions { - - private final int length; - private final int width; - private final int height; - - public ImmutableDimensions(int length, int width, int height) { - this.length = length; - this.width = width; - this.height = height; - } - - public int getLength() { - return length; - } - - public int getWidth() { - return width; - } - - public int getHeight() { - return height; - } - - @Override - public boolean equals(Object other) { - if (this == other) { - return true; - } else if (other instanceof ImmutableDimensions) { - ImmutableDimensions that = (ImmutableDimensions) other; - return this.length == that.length && this.width == that.width && this.height == that.height; - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(length, width, height); - } - - @Override - public String toString() { - return "Dimensions{length=" + length + ", width=" + width + ", height=" + height + '}'; - } - } - - @Mapper - public interface InventoryMapper { - static MapperBuilder builder(CqlSession session) { - return new ImmutableEntityIT_InventoryMapperBuilder(session); - } - - @DaoFactory - ImmutableProductDao immutableProductDao(@DaoKeyspace CqlIdentifier keyspace); - } - - @Dao - @DefaultNullSavingStrategy(NullSavingStrategy.SET_TO_NULL) - public interface ImmutableProductDao { - @Select - ImmutableProduct findById(UUID productId); - - @Insert - void save(ImmutableProduct product); - - @GetEntity - ImmutableProduct mapStrict(Row row); - - @GetEntity(lenient = true) - ImmutableProduct mapLenient(Row row); - - @GetEntity - ImmutableDimensions mapStrict(UdtValue udt); - - @GetEntity(lenient = true) - ImmutableDimensions mapLenient(UdtValue udt); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/IncrementIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/IncrementIT.java deleted file mode 100644 index 16b6668ea56..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/IncrementIT.java +++ /dev/null @@ -1,220 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.mapper; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; -import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; -import com.datastax.oss.driver.api.mapper.annotations.DefaultNullSavingStrategy; -import com.datastax.oss.driver.api.mapper.annotations.Entity; -import com.datastax.oss.driver.api.mapper.annotations.Increment; -import com.datastax.oss.driver.api.mapper.annotations.Mapper; -import com.datastax.oss.driver.api.mapper.annotations.PartitionKey; -import com.datastax.oss.driver.api.mapper.annotations.Select; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import java.util.Objects; -import java.util.UUID; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -public class IncrementIT { - - private static final CcmRule CCM_RULE = CcmRule.getInstance(); - - private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - private static ProductRatingDao dao; - - @BeforeClass - public static void setup() { - CqlSession session = SESSION_RULE.session(); - - session.execute( - SimpleStatement.builder( - "CREATE TABLE product_rating(product_id uuid PRIMARY KEY, " - + "one_star counter, two_star counter, three_star counter, " - + "four_star counter, five_star counter)") - .setExecutionProfile(SESSION_RULE.slowProfile()) - .build()); - - InventoryMapper inventoryMapper = new IncrementIT_InventoryMapperBuilder(session).build(); - dao = inventoryMapper.productRatingDao(SESSION_RULE.keyspace()); - } - - @Test - public void should_increment_counters() { - UUID productId1 = UUID.randomUUID(); - UUID productId2 = UUID.randomUUID(); - - dao.incrementFiveStar(productId1, 1); - dao.incrementFiveStar(productId1, 1); - dao.incrementFourStar(productId1, 1); - - dao.incrementTwoStar(productId2, 1); - dao.incrementThreeStar(productId2, 1); - dao.incrementOneStar(productId2, 1); - - ProductRating product1Totals = dao.get(productId1); - assertThat(product1Totals.getFiveStar()).isEqualTo(2); - assertThat(product1Totals.getFourStar()).isEqualTo(1); - assertThat(product1Totals.getThreeStar()).isEqualTo(0); - assertThat(product1Totals.getTwoStar()).isEqualTo(0); - assertThat(product1Totals.getOneStar()).isEqualTo(0); - - ProductRating product2Totals = dao.get(productId2); - assertThat(product2Totals.getFiveStar()).isEqualTo(0); - assertThat(product2Totals.getFourStar()).isEqualTo(0); - assertThat(product2Totals.getThreeStar()).isEqualTo(1); - assertThat(product2Totals.getTwoStar()).isEqualTo(1); - assertThat(product2Totals.getOneStar()).isEqualTo(1); - } - - @Mapper - public interface InventoryMapper { - @DaoFactory - ProductRatingDao productRatingDao(@DaoKeyspace CqlIdentifier keyspace); - } - - @Dao - @DefaultNullSavingStrategy(NullSavingStrategy.SET_TO_NULL) - public interface ProductRatingDao { - @Select - ProductRating get(UUID productId); - - @Increment(entityClass = ProductRating.class) - void incrementOneStar(UUID productId, long oneStar); - - @Increment(entityClass = ProductRating.class) - void incrementTwoStar(UUID productId, long twoStar); - - @Increment(entityClass = ProductRating.class) - void incrementThreeStar(UUID productId, long threeStar); - - @Increment(entityClass = ProductRating.class) - void incrementFourStar(UUID productId, long fourStar); - - @Increment(entityClass = ProductRating.class) - void incrementFiveStar(UUID productId, long fiveStar); - } - - @Entity - public static class ProductRating { - - @PartitionKey private UUID productId; - private long oneStar; - private long twoStar; - private long threeStar; - private long fourStar; - private long fiveStar; - - public ProductRating() {} - - public UUID getProductId() { - return productId; - } - - public void setProductId(UUID productId) { - this.productId = productId; - } - - public long getOneStar() { - return oneStar; - } - - public void setOneStar(long oneStar) { - this.oneStar = oneStar; - } - - public long getTwoStar() { - return twoStar; - } - - public void setTwoStar(long twoStar) { - this.twoStar = twoStar; - } - - public long getThreeStar() { - return threeStar; - } - - public void setThreeStar(long threeStar) { - this.threeStar = threeStar; - } - - public long getFourStar() { - return fourStar; - } - - public void setFourStar(long fourStar) { - this.fourStar = fourStar; - } - - public long getFiveStar() { - return fiveStar; - } - - public void setFiveStar(long fiveStar) { - this.fiveStar = fiveStar; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof ProductRating) { - ProductRating that = (ProductRating) other; - return Objects.equals(this.productId, that.productId) - && this.oneStar == that.oneStar - && this.twoStar == that.twoStar - && this.threeStar == that.threeStar - && this.fourStar == that.fourStar - && this.fiveStar == that.fiveStar; - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(productId, oneStar, twoStar, threeStar, fourStar, fiveStar); - } - - @Override - public String toString() { - return String.format( - "ProductRating(id=%s, 1*=%d, 2*=%d, 3*=%d, 4*=%d, 5*=%d)", - productId, oneStar, twoStar, threeStar, fourStar, fiveStar); - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/IncrementWithNullsIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/IncrementWithNullsIT.java deleted file mode 100644 index 9020a80afed..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/IncrementWithNullsIT.java +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.mapper; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; -import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; -import com.datastax.oss.driver.api.mapper.annotations.DefaultNullSavingStrategy; -import com.datastax.oss.driver.api.mapper.annotations.Increment; -import com.datastax.oss.driver.api.mapper.annotations.Mapper; -import com.datastax.oss.driver.api.mapper.annotations.Select; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.mapper.IncrementIT.ProductRating; -import java.util.UUID; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -@BackendRequirement(type = BackendType.CASSANDRA, minInclusive = "2.2") -public class IncrementWithNullsIT { - - private static final CcmRule CCM_RULE = CcmRule.getInstance(); - - private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - private static ProductRatingDao dao; - - @BeforeClass - public static void setup() { - CqlSession session = SESSION_RULE.session(); - - session.execute( - SimpleStatement.builder( - "CREATE TABLE product_rating(product_id uuid PRIMARY KEY, " - + "one_star counter, two_star counter, three_star counter, " - + "four_star counter, five_star counter)") - .setExecutionProfile(SESSION_RULE.slowProfile()) - .build()); - - InventoryMapper inventoryMapper = - new IncrementWithNullsIT_InventoryMapperBuilder(session).build(); - dao = inventoryMapper.productRatingDao(SESSION_RULE.keyspace()); - } - - @Test - public void should_increment_counters() { - UUID productId1 = UUID.randomUUID(); - UUID productId2 = UUID.randomUUID(); - - dao.increment(productId1, null, null, null, null, 1L); - dao.increment(productId1, null, null, null, null, 1L); - dao.increment(productId1, null, null, null, 1L, null); - - dao.increment(productId2, null, 1L, null, null, null); - dao.increment(productId2, null, null, 1L, null, null); - dao.increment(productId2, 1L, null, null, null, null); - - ProductRating product1Totals = dao.get(productId1); - assertThat(product1Totals.getFiveStar()).isEqualTo(2); - assertThat(product1Totals.getFourStar()).isEqualTo(1); - assertThat(product1Totals.getThreeStar()).isEqualTo(0); - assertThat(product1Totals.getTwoStar()).isEqualTo(0); - assertThat(product1Totals.getOneStar()).isEqualTo(0); - - ProductRating product2Totals = dao.get(productId2); - assertThat(product2Totals.getFiveStar()).isEqualTo(0); - assertThat(product2Totals.getFourStar()).isEqualTo(0); - assertThat(product2Totals.getThreeStar()).isEqualTo(1); - assertThat(product2Totals.getTwoStar()).isEqualTo(1); - assertThat(product2Totals.getOneStar()).isEqualTo(1); - } - - @Mapper - public interface InventoryMapper { - @DaoFactory - ProductRatingDao productRatingDao(@DaoKeyspace CqlIdentifier keyspace); - } - - @Dao - @DefaultNullSavingStrategy(NullSavingStrategy.DO_NOT_SET) - public interface ProductRatingDao { - @Select - ProductRating get(UUID productId); - - @Increment(entityClass = ProductRating.class) - void increment( - UUID productId, Long oneStar, Long twoStar, Long threeStar, Long fourStar, Long fiveStar); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/InsertIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/InsertIT.java deleted file mode 100644 index 5c7530bf69e..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/InsertIT.java +++ /dev/null @@ -1,350 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.mapper; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.mapper.annotations.CqlName; -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; -import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; -import com.datastax.oss.driver.api.mapper.annotations.DefaultNullSavingStrategy; -import com.datastax.oss.driver.api.mapper.annotations.Insert; -import com.datastax.oss.driver.api.mapper.annotations.Mapper; -import com.datastax.oss.driver.api.mapper.annotations.Select; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import java.util.Optional; -import java.util.UUID; -import java.util.concurrent.CompletableFuture; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -public class InsertIT extends InventoryITBase { - - private static final CcmRule CCM_RULE = CcmRule.getInstance(); - - private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - private static ProductDao dao; - - @BeforeClass - public static void setup() { - CqlSession session = SESSION_RULE.session(); - - for (String query : createStatements(CCM_RULE)) { - session.execute( - SimpleStatement.builder(query).setExecutionProfile(SESSION_RULE.slowProfile()).build()); - } - - InventoryMapper inventoryMapper = new InsertIT_InventoryMapperBuilder(session).build(); - dao = inventoryMapper.productDao(SESSION_RULE.keyspace()); - } - - @Before - public void clearProductData() { - CqlSession session = SESSION_RULE.session(); - session.execute( - SimpleStatement.builder("TRUNCATE product") - .setExecutionProfile(SESSION_RULE.slowProfile()) - .build()); - } - - @Test - public void should_insert_entity() { - assertThat(dao.findById(FLAMETHROWER.getId())).isNull(); - - dao.save(FLAMETHROWER); - assertThat(dao.findById(FLAMETHROWER.getId())).isEqualTo(FLAMETHROWER); - } - - @Test - public void should_insert_entity_returning_result_set() { - assertThat(dao.findById(FLAMETHROWER.getId())).isNull(); - - ResultSet rs = dao.saveReturningResultSet(FLAMETHROWER); - assertThat(rs.getAvailableWithoutFetching()).isZero(); - assertThat(dao.findById(FLAMETHROWER.getId())).isEqualTo(FLAMETHROWER); - } - - @Test - public void should_return_bound_statement_to_execute() { - assertThat(dao.findById(FLAMETHROWER.getId())).isNull(); - BoundStatement bs = dao.saveReturningBoundStatement(FLAMETHROWER); - ResultSet rs = SESSION_RULE.session().execute(bs); - assertThat(rs.getAvailableWithoutFetching()).isZero(); - assertThat(dao.findById(FLAMETHROWER.getId())).isEqualTo(FLAMETHROWER); - } - - @Test - public void should_insert_entity_asynchronously() { - assertThat(dao.findById(FLAMETHROWER.getId())).isNull(); - - CompletableFutures.getUninterruptibly(dao.saveAsync(FLAMETHROWER)); - assertThat(dao.findById(FLAMETHROWER.getId())).isEqualTo(FLAMETHROWER); - } - - @Test - public void should_insert_entity_asynchronously_returning_result_set() { - assertThat(dao.findById(FLAMETHROWER.getId())).isNull(); - - AsyncResultSet rs = - CompletableFutures.getUninterruptibly(dao.saveAsyncReturningAsyncResultSet(FLAMETHROWER)); - assertThat(rs.currentPage().iterator()).isExhausted(); - assertThat(dao.findById(FLAMETHROWER.getId())).isEqualTo(FLAMETHROWER); - } - - @Test - public void should_insert_entity_with_bound_timestamp() { - assertThat(dao.findById(FLAMETHROWER.getId())).isNull(); - - long timestamp = 1234; - dao.saveWithBoundTimestamp(FLAMETHROWER, timestamp); - - CqlSession session = SESSION_RULE.session(); - Row row = - session - .execute( - SimpleStatement.newInstance( - "SELECT WRITETIME(description) FROM product WHERE id = ?", - FLAMETHROWER.getId())) - .one(); - long writeTime = row.getLong(0); - assertThat(writeTime).isEqualTo(timestamp); - } - - @Test - public void should_insert_entity_with_literal_timestamp() { - assertThat(dao.findById(FLAMETHROWER.getId())).isNull(); - - dao.saveWithLiteralTimestamp(FLAMETHROWER); - - CqlSession session = SESSION_RULE.session(); - Row row = - session - .execute( - SimpleStatement.newInstance( - "SELECT WRITETIME(description) FROM product WHERE id = ?", - FLAMETHROWER.getId())) - .one(); - long writeTime = row.getLong(0); - assertThat(writeTime).isEqualTo(1234); - } - - @Test - public void should_insert_entity_with_bound_ttl() { - assertThat(dao.findById(FLAMETHROWER.getId())).isNull(); - - int insertedTtl = 86400; - dao.saveWithBoundTtl(FLAMETHROWER, insertedTtl); - - CqlSession session = SESSION_RULE.session(); - Row row = - session - .execute( - SimpleStatement.newInstance( - "SELECT TTL(description) FROM product WHERE id = ?", FLAMETHROWER.getId())) - .one(); - Integer retrievedTtl = row.get(0, Integer.class); - assertThat(retrievedTtl).isNotNull().isLessThanOrEqualTo(insertedTtl); - } - - @Test - public void should_insert_entity_with_literal_ttl() { - assertThat(dao.findById(FLAMETHROWER.getId())).isNull(); - - dao.saveWithLiteralTtl(FLAMETHROWER); - - CqlSession session = SESSION_RULE.session(); - Row row = - session - .execute( - SimpleStatement.newInstance( - "SELECT TTL(description) FROM product WHERE id = ?", FLAMETHROWER.getId())) - .one(); - Integer retrievedTtl = row.get(0, Integer.class); - assertThat(retrievedTtl).isNotNull().isLessThanOrEqualTo(86400); - } - - @Test - public void should_insert_entity_with_bound_timestamp_asynchronously() { - assertThat(dao.findById(FLAMETHROWER.getId())).isNull(); - - long timestamp = 1234; - CompletableFutures.getUninterruptibly(dao.saveAsyncWithBoundTimestamp(FLAMETHROWER, timestamp)); - - CqlSession session = SESSION_RULE.session(); - Row row = - session - .execute( - SimpleStatement.newInstance( - "SELECT WRITETIME(description) FROM product WHERE id = ?", - FLAMETHROWER.getId())) - .one(); - long writeTime = row.getLong(0); - assertThat(writeTime).isEqualTo(timestamp); - } - - @Test - public void should_insert_entity_if_not_exists() { - assertThat(dao.saveIfNotExists(FLAMETHROWER)).isNull(); - - Product otherProduct = - new Product(FLAMETHROWER.getId(), "Other description", new Dimensions(1, 1, 1)); - assertThat(dao.saveIfNotExists(otherProduct)).isEqualTo(FLAMETHROWER); - } - - @Test - public void should_insert_entity_if_not_exists_returning_boolean() { - assertThat(dao.saveIfNotExistsReturningBoolean(FLAMETHROWER)).isTrue(); - - Product otherProduct = - new Product(FLAMETHROWER.getId(), "Other description", new Dimensions(1, 1, 1)); - assertThat(dao.saveIfNotExistsReturningBoolean(otherProduct)).isFalse(); - } - - @Test - public void should_insert_entity_if_not_exists_asynchronously() { - assertThat(CompletableFutures.getUninterruptibly(dao.saveAsyncIfNotExists(FLAMETHROWER))) - .isNull(); - - Product otherProduct = - new Product(FLAMETHROWER.getId(), "Other description", new Dimensions(1, 1, 1)); - assertThat(CompletableFutures.getUninterruptibly(dao.saveAsyncIfNotExists(otherProduct))) - .isEqualTo(FLAMETHROWER); - } - - @Test - public void should_insert_entity_if_not_exists_asynchronously_returning_boolean() { - assertThat( - CompletableFutures.getUninterruptibly( - dao.saveAsyncIfNotExistsReturningBoolean(FLAMETHROWER))) - .isTrue(); - - Product otherProduct = - new Product(FLAMETHROWER.getId(), "Other description", new Dimensions(1, 1, 1)); - assertThat( - CompletableFutures.getUninterruptibly( - dao.saveAsyncIfNotExistsReturningBoolean(otherProduct))) - .isFalse(); - } - - @Test - public void should_insert_entity_if_not_exists_returning_optional() { - assertThat(dao.saveIfNotExistsOptional(FLAMETHROWER)).isEmpty(); - - Product otherProduct = - new Product(FLAMETHROWER.getId(), "Other description", new Dimensions(1, 1, 1)); - assertThat(dao.saveIfNotExistsOptional(otherProduct)).contains(FLAMETHROWER); - } - - @Test - public void should_insert_entity_if_not_exists_returning_optional_asynchronously() { - assertThat( - CompletableFutures.getUninterruptibly(dao.saveAsyncIfNotExistsOptional(FLAMETHROWER))) - .isEmpty(); - - Product otherProduct = - new Product(FLAMETHROWER.getId(), "Other description", new Dimensions(1, 1, 1)); - assertThat( - CompletableFutures.getUninterruptibly(dao.saveAsyncIfNotExistsOptional(otherProduct))) - .contains(FLAMETHROWER); - } - - @Mapper - public interface InventoryMapper { - @DaoFactory - ProductDao productDao(@DaoKeyspace CqlIdentifier keyspace); - } - - @Dao - @DefaultNullSavingStrategy(NullSavingStrategy.SET_TO_NULL) - public interface ProductDao { - - @Insert - void save(Product product); - - @Insert - ResultSet saveReturningResultSet(Product product); - - @Insert - BoundStatement saveReturningBoundStatement(Product product); - - @Insert(timestamp = ":timestamp") - void saveWithBoundTimestamp(Product product, long timestamp); - - @Insert(timestamp = "1234") - void saveWithLiteralTimestamp(Product product); - - @Insert(ttl = ":ttl") - void saveWithBoundTtl(Product product, int ttl); - - @Insert(ttl = "86400") - void saveWithLiteralTtl(Product product); - - @Insert(ifNotExists = true) - Product saveIfNotExists(Product product); - - @Insert(ifNotExists = true) - boolean saveIfNotExistsReturningBoolean(Product product); - - @Insert(ifNotExists = true) - Optional saveIfNotExistsOptional(Product product); - - @Insert - CompletableFuture saveAsync(Product product); - - @Insert - CompletableFuture saveAsyncReturningAsyncResultSet(Product product); - - @Insert(timestamp = ":\"TIMESTAMP\"") - CompletableFuture saveAsyncWithBoundTimestamp( - Product product, @CqlName("\"TIMESTAMP\"") long timestamp); - - @Insert(ifNotExists = true) - CompletableFuture saveAsyncIfNotExists(Product product); - - @Insert(ifNotExists = true) - CompletableFuture saveAsyncIfNotExistsReturningBoolean(Product product); - - @Insert(ifNotExists = true) - CompletableFuture> saveAsyncIfNotExistsOptional(Product product); - - @Select - Product findById(UUID productId); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/InsertReactiveIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/InsertReactiveIT.java deleted file mode 100644 index e9b9879fcb8..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/InsertReactiveIT.java +++ /dev/null @@ -1,138 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.mapper; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveResultSet; -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveRow; -import com.datastax.dse.driver.api.mapper.reactive.MappedReactiveResultSet; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; -import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; -import com.datastax.oss.driver.api.mapper.annotations.DefaultNullSavingStrategy; -import com.datastax.oss.driver.api.mapper.annotations.Insert; -import com.datastax.oss.driver.api.mapper.annotations.Mapper; -import com.datastax.oss.driver.api.mapper.annotations.Select; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import io.reactivex.Flowable; -import java.util.UUID; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -public class InsertReactiveIT extends InventoryITBase { - - private static CcmRule ccmRule = CcmRule.getInstance(); - - private static SessionRule sessionRule = SessionRule.builder(ccmRule).build(); - - @ClassRule public static TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); - - private static DseProductDao dao; - - @BeforeClass - public static void setup() { - CqlSession session = sessionRule.session(); - - for (String query : createStatements(ccmRule)) { - session.execute( - SimpleStatement.builder(query).setExecutionProfile(sessionRule.slowProfile()).build()); - } - - DseInventoryMapper dseInventoryMapper = - new InsertReactiveIT_DseInventoryMapperBuilder(session).build(); - dao = dseInventoryMapper.productDao(sessionRule.keyspace()); - } - - @Before - public void clearProductData() { - CqlSession session = sessionRule.session(); - session.execute( - SimpleStatement.builder("TRUNCATE product") - .setExecutionProfile(sessionRule.slowProfile()) - .build()); - } - - @Test - public void should_insert_entity_returning_reactive_result_set() { - assertThat( - Flowable.fromPublisher(dao.findByIdReactive(FLAMETHROWER.getId())) - .singleElement() - .blockingGet()) - .isNull(); - assertThat(Flowable.fromPublisher(dao.saveReactive(FLAMETHROWER)).singleElement().blockingGet()) - .isNull(); - assertThat(Flowable.fromPublisher(dao.findByIdReactive(FLAMETHROWER.getId())).blockingSingle()) - .isEqualTo(FLAMETHROWER); - } - - @Test - public void should_insert_entity_if_not_exists_reactive() { - UUID id = FLAMETHROWER.getId(); - assertThat(Flowable.fromPublisher(dao.findByIdReactive(id)).singleElement().blockingGet()) - .isNull(); - { - ReactiveResultSet rs = dao.saveIfNotExistsReactive(FLAMETHROWER); - ReactiveRow row = Flowable.fromPublisher(rs).blockingSingle(); - assertThat(row.wasApplied()).isTrue(); - assertThat(Flowable.fromPublisher(rs.wasApplied()).blockingSingle()).isTrue(); - } - assertThat(Flowable.fromPublisher(dao.findByIdReactive(id)).blockingSingle()) - .isNotNull() - .isEqualTo(FLAMETHROWER); - { - ReactiveResultSet rs = dao.saveIfNotExistsReactive(FLAMETHROWER); - ReactiveRow row = Flowable.fromPublisher(rs).singleElement().blockingGet(); - assertThat(row.wasApplied()).isFalse(); - assertThat(Flowable.fromPublisher(rs.wasApplied()).blockingSingle()).isFalse(); - } - } - - @Mapper - public interface DseInventoryMapper { - - @DaoFactory - DseProductDao productDao(@DaoKeyspace CqlIdentifier keyspace); - } - - @Dao - @DefaultNullSavingStrategy(NullSavingStrategy.SET_TO_NULL) - public interface DseProductDao { - - @Insert - ReactiveResultSet saveReactive(Product product); - - @Insert(ifNotExists = true) - ReactiveResultSet saveIfNotExistsReactive(Product product); - - @Select - MappedReactiveResultSet findByIdReactive(UUID productId); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/InventoryITBase.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/InventoryITBase.java deleted file mode 100644 index 1bd899e4541..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/InventoryITBase.java +++ /dev/null @@ -1,442 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.mapper; - -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.uuid.Uuids; -import com.datastax.oss.driver.api.mapper.annotations.ClusteringColumn; -import com.datastax.oss.driver.api.mapper.annotations.Entity; -import com.datastax.oss.driver.api.mapper.annotations.PartitionKey; -import com.datastax.oss.driver.api.testinfra.ccm.BaseCcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import java.util.List; -import java.util.Objects; -import java.util.UUID; - -/** Factors common code for mapper tests that rely on a simple inventory model. */ -public abstract class InventoryITBase { - - protected static Product FLAMETHROWER = - new Product(UUID.randomUUID(), "Flamethrower", new InventoryITBase.Dimensions(30, 10, 8)); - protected static Product MP3_DOWNLOAD = new Product(UUID.randomUUID(), "MP3 download", null); - - protected static String DATE_1 = "2019-06-27"; - protected static String DATE_2 = "2019-06-28"; - protected static String DATE_3 = "2019-01-01"; - - protected static ProductSale FLAMETHROWER_SALE_1 = - new ProductSale(FLAMETHROWER.getId(), DATE_1, 1, Uuids.startOf(1561643130), 500.00, 5); - - protected static ProductSale FLAMETHROWER_SALE_2 = - new ProductSale(FLAMETHROWER.getId(), DATE_1, 2, Uuids.startOf(1561645130), 500.00, 1); - - protected static ProductSale FLAMETHROWER_SALE_3 = - new ProductSale(FLAMETHROWER.getId(), DATE_1, 1, Uuids.startOf(1561653130), 500.00, 2); - - protected static ProductSale FLAMETHROWER_SALE_4 = - new ProductSale(FLAMETHROWER.getId(), DATE_1, 1, Uuids.startOf(1561657504), 702.00, 3); - - protected static ProductSale FLAMETHROWER_SALE_5 = - new ProductSale(FLAMETHROWER.getId(), DATE_2, 1, Uuids.startOf(1561729530), 500.00, 23); - - protected static ProductSale MP3_DOWNLOAD_SALE_1 = - new ProductSale(MP3_DOWNLOAD.getId(), DATE_3, 7, Uuids.startOf(915192000), 0.99, 12); - - protected static List createStatements(BaseCcmRule ccmRule) { - return createStatements(ccmRule, false); - } - - protected static List createStatements(BaseCcmRule ccmRule, boolean requiresSasiIndex) { - ImmutableList.Builder builder = - ImmutableList.builder() - .add( - "CREATE TYPE dimensions(length int, width int, height int)", - "CREATE TABLE product(id uuid PRIMARY KEY, description text, dimensions frozen)", - "CREATE TYPE dimensions2d(width int, height int)", - "CREATE TABLE product2d(id uuid PRIMARY KEY, description text, dimensions frozen)", - "CREATE TABLE product_without_id(id uuid, clustering int, description text, " - + "PRIMARY KEY((id), clustering))", - "CREATE TABLE product_sale(id uuid, day text, ts uuid, customer_id int, price " - + "double, count int, PRIMARY KEY ((id, day), customer_id, ts))"); - - if (requiresSasiIndex && supportsSASI(ccmRule) && !isSasiBroken(ccmRule)) { - builder.add( - "CREATE CUSTOM INDEX product_description ON product(description) " - + "USING 'org.apache.cassandra.index.sasi.SASIIndex' " - + "WITH OPTIONS = {" - + "'mode': 'CONTAINS'," - + "'analyzer_class': 'org.apache.cassandra.index.sasi.analyzer.StandardAnalyzer'," - + "'tokenization_enable_stemming': 'true'," - + "'tokenization_locale': 'en'," - + "'tokenization_skip_stop_words': 'true'," - + "'analyzed': 'true'," - + "'tokenization_normalize_lowercase': 'true'" - + "}"); - } - - return builder.build(); - } - - private static final Version MINIMUM_SASI_VERSION = - Objects.requireNonNull(Version.parse("3.4.0")); - private static final Version BROKEN_SASI_VERSION = Objects.requireNonNull(Version.parse("6.8.0")); - - protected static boolean isSasiBroken(BaseCcmRule ccmRule) { - // creating SASI indexes is broken in DSE 6.8.0 - return ccmRule.isDistributionOf( - BackendType.DSE, (dist, cass) -> dist.compareTo(BROKEN_SASI_VERSION) == 0); - } - - protected static boolean supportsSASI(BaseCcmRule ccmRule) { - return ccmRule.getCassandraVersion().compareTo(MINIMUM_SASI_VERSION) >= 0; - } - - @Entity - public static class Product { - - @PartitionKey private UUID id; - private String description; - private Dimensions dimensions; - - public Product() {} - - public Product(UUID id, String description, Dimensions dimensions) { - this.id = id; - this.description = description; - this.dimensions = dimensions; - } - - public UUID getId() { - return id; - } - - public void setId(UUID id) { - this.id = id; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - public Dimensions getDimensions() { - return dimensions; - } - - public void setDimensions(Dimensions dimensions) { - this.dimensions = dimensions; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof Product) { - Product that = (Product) other; - return Objects.equals(id, that.id) - && Objects.equals(description, that.description) - && Objects.equals(dimensions, that.dimensions); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(id, description, dimensions); - } - - @Override - public String toString() { - return "Product{" - + "id=" - + id - + ", description='" - + description - + '\'' - + ", dimensions=" - + dimensions - + '}'; - } - } - - @Entity - public static class ProductWithoutId { - private String description; - - public ProductWithoutId() {} - - public ProductWithoutId(String description) { - this.description = description; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof ProductWithoutId) { - ProductWithoutId that = (ProductWithoutId) other; - return Objects.equals(description, that.description); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(description); - } - - @Override - public String toString() { - return "ProductWithoutId{" + "description='" + description + '\'' + '}'; - } - } - - @Entity - public static class Dimensions { - - private int length; - private int width; - private int height; - - public Dimensions() {} - - public Dimensions(int length, int width, int height) { - this.length = length; - this.width = width; - this.height = height; - } - - public int getLength() { - return length; - } - - public void setLength(int length) { - this.length = length; - } - - public int getWidth() { - return width; - } - - public void setWidth(int width) { - this.width = width; - } - - public int getHeight() { - return height; - } - - public void setHeight(int height) { - this.height = height; - } - - @Override - public boolean equals(Object other) { - if (this == other) { - return true; - } else if (other instanceof Dimensions) { - Dimensions that = (Dimensions) other; - return this.length == that.length && this.width == that.width && this.height == that.height; - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(length, width, height); - } - - @Override - public String toString() { - return "Dimensions{" + "length=" + length + ", width=" + width + ", height=" + height + '}'; - } - } - - @Entity - public static class OnlyPK { - @PartitionKey private UUID id; - - public OnlyPK() {} - - public OnlyPK(UUID id) { - this.id = id; - } - - public UUID getId() { - return id; - } - - public void setId(UUID id) { - this.id = id; - } - - @Override - public boolean equals(Object other) { - if (this == other) { - return true; - } else if (other instanceof OnlyPK) { - OnlyPK that = (OnlyPK) other; - return Objects.equals(this.id, that.id); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(id); - } - - @Override - public String toString() { - return "OnlyPK{" + "id=" + id + '}'; - } - } - - @Entity - public static class ProductSale { - @PartitionKey private UUID id; - - @PartitionKey(1) - private String day; - - @ClusteringColumn private int customerId; - - @ClusteringColumn(1) - private UUID ts; - - private double price; - - private int count; - - public ProductSale() {} - - public ProductSale(UUID id, String day, int customerId, UUID ts, double price, int count) { - this.id = id; - this.day = day; - this.customerId = customerId; - this.ts = ts; - this.price = price; - this.count = count; - } - - public UUID getId() { - return id; - } - - public void setId(UUID id) { - this.id = id; - } - - public String getDay() { - return day; - } - - public void setDay(String day) { - this.day = day; - } - - public UUID getTs() { - return ts; - } - - public void setTs(UUID ts) { - this.ts = ts; - } - - public int getCustomerId() { - return customerId; - } - - public void setCustomerId(int customerId) { - this.customerId = customerId; - } - - public double getPrice() { - return price; - } - - public void setPrice(double price) { - this.price = price; - } - - public int getCount() { - return count; - } - - public void setCount(int count) { - this.count = count; - } - - @Override - public boolean equals(Object other) { - if (this == other) { - return true; - } else if (other instanceof ProductSale) { - ProductSale that = (ProductSale) other; - return Double.compare(this.price, that.price) == 0 - && this.count == that.count - && this.id.equals(that.id) - && this.day.equals(that.day) - && this.ts.equals(that.ts) - && this.customerId == that.customerId; - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(id, day, ts, customerId, price, count); - } - - @Override - public String toString() { - return "ProductSale{" - + "id=" - + id - + ", day='" - + day - + '\'' - + ", customerId=" - + customerId - + ", ts=" - + ts - + ", price=" - + price - + ", count=" - + count - + '}'; - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/NamingStrategyIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/NamingStrategyIT.java deleted file mode 100644 index 974e4bad7c3..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/NamingStrategyIT.java +++ /dev/null @@ -1,261 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.mapper; - -import static com.datastax.oss.driver.api.mapper.entity.naming.NamingConvention.UPPER_SNAKE_CASE; -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.mapper.annotations.CqlName; -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; -import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; -import com.datastax.oss.driver.api.mapper.annotations.DefaultNullSavingStrategy; -import com.datastax.oss.driver.api.mapper.annotations.Entity; -import com.datastax.oss.driver.api.mapper.annotations.Insert; -import com.datastax.oss.driver.api.mapper.annotations.Mapper; -import com.datastax.oss.driver.api.mapper.annotations.NamingStrategy; -import com.datastax.oss.driver.api.mapper.annotations.PartitionKey; -import com.datastax.oss.driver.api.mapper.annotations.Select; -import com.datastax.oss.driver.api.mapper.entity.naming.NameConverter; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import edu.umd.cs.findbugs.annotations.NonNull; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -/** - * Runs simple queries for entities with various naming strategies: - * - *
      - *
    • {@link DefaultStrategyEntity default} - *
    • {@link UpperSnakeCaseEntity non-default built-in convention} - *
    • {@link NameConverterEntity custom name converter class} - *
    • {@link CustomNamesEntity custom names provided through annotations} - *
    - * - *

    See each entity's corresponding table schema in {@link #setup()}. - */ -@Category(ParallelizableTests.class) -public class NamingStrategyIT { - - private static final CcmRule CCM_RULE = CcmRule.getInstance(); - - private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - private static TestMapper mapper; - - @BeforeClass - public static void setup() { - CqlSession session = SESSION_RULE.session(); - - for (String query : - ImmutableList.of( - "CREATE TABLE default_strategy_entity(entity_id int primary key)", - "CREATE TABLE \"UPPER_SNAKE_CASE_ENTITY\"(\"ENTITY_ID\" int primary key)", - "CREATE TABLE test_NameConverterEntity(test_entityId int primary key)", - "CREATE TABLE custom_entity(custom_id int primary key)")) { - session.execute( - SimpleStatement.builder(query).setExecutionProfile(SESSION_RULE.slowProfile()).build()); - } - - mapper = new NamingStrategyIT_TestMapperBuilder(session).build(); - } - - @Test - public void should_map_entity_with_default_naming_strategy() { - DefaultStrategyEntityDao dao = mapper.defaultStrategyEntityDao(SESSION_RULE.keyspace()); - DefaultStrategyEntity entity = new DefaultStrategyEntity(1); - - dao.save(entity); - DefaultStrategyEntity retrievedEntity = dao.findById(1); - assertThat(retrievedEntity.getEntityId()).isEqualTo(1); - } - - @Test - public void should_map_entity_with_non_default_convention() { - UpperSnakeCaseEntityDao dao = mapper.upperSnakeCaseEntityDao(SESSION_RULE.keyspace()); - UpperSnakeCaseEntity entity = new UpperSnakeCaseEntity(1); - - dao.save(entity); - UpperSnakeCaseEntity retrievedEntity = dao.findById(1); - assertThat(retrievedEntity.getEntityId()).isEqualTo(1); - } - - @Test - public void should_map_entity_with_name_converter() { - NameConverterEntityDao dao = mapper.nameConverterEntityDao(SESSION_RULE.keyspace()); - NameConverterEntity entity = new NameConverterEntity(1); - - dao.save(entity); - NameConverterEntity retrievedEntity = dao.findById(1); - assertThat(retrievedEntity.getEntityId()).isEqualTo(1); - } - - @Test - public void should_map_entity_with_custom_names() { - CustomNamesEntityDao dao = mapper.customNamesEntityDao(SESSION_RULE.keyspace()); - CustomNamesEntity entity = new CustomNamesEntity(1); - - dao.save(entity); - CustomNamesEntity retrievedEntity = dao.findById(1); - assertThat(retrievedEntity.getEntityId()).isEqualTo(1); - } - - @Entity - public static class DefaultStrategyEntity { - @PartitionKey private int entityId; - - public DefaultStrategyEntity() {} - - public DefaultStrategyEntity(int entityId) { - this.entityId = entityId; - } - - public int getEntityId() { - return entityId; - } - - public void setEntityId(int entityId) { - this.entityId = entityId; - } - } - - @Entity - @NamingStrategy(convention = UPPER_SNAKE_CASE) - public static class UpperSnakeCaseEntity { - - @PartitionKey private int entityId; - - public UpperSnakeCaseEntity() {} - - public UpperSnakeCaseEntity(int entityId) { - this.entityId = entityId; - } - - public int getEntityId() { - return entityId; - } - - public void setEntityId(int entityId) { - this.entityId = entityId; - } - } - - @Entity - @NamingStrategy(customConverterClass = TestNameConverter.class) - public static class NameConverterEntity { - - @PartitionKey private int entityId; - - public NameConverterEntity() {} - - public NameConverterEntity(int entityId) { - this.entityId = entityId; - } - - public int getEntityId() { - return entityId; - } - - public void setEntityId(int entityId) { - this.entityId = entityId; - } - } - - public static class TestNameConverter implements NameConverter { - - @Override - @NonNull - public String toCassandraName(@NonNull String javaName) { - // Pretty silly but we don't need this to be realistic - return "test_" + javaName; - } - } - - @Entity - @CqlName("custom_entity") - public static class CustomNamesEntity { - - @PartitionKey - @CqlName("custom_id") - private int entityId; - - public CustomNamesEntity() {} - - public CustomNamesEntity(int entityId) { - this.entityId = entityId; - } - - public int getEntityId() { - return entityId; - } - - public void setEntityId(int entityId) { - this.entityId = entityId; - } - } - - @DefaultNullSavingStrategy(NullSavingStrategy.SET_TO_NULL) - interface BaseDao { - @Select - T findById(int id); - - @Insert - void save(T entity); - } - - @Dao - public interface DefaultStrategyEntityDao extends BaseDao {} - - @Dao - public interface UpperSnakeCaseEntityDao extends BaseDao {} - - @Dao - public interface NameConverterEntityDao extends BaseDao {} - - @Dao - public interface CustomNamesEntityDao extends BaseDao {} - - @Mapper - public interface TestMapper { - @DaoFactory - DefaultStrategyEntityDao defaultStrategyEntityDao(@DaoKeyspace CqlIdentifier keyspace); - - @DaoFactory - UpperSnakeCaseEntityDao upperSnakeCaseEntityDao(@DaoKeyspace CqlIdentifier keyspace); - - @DaoFactory - NameConverterEntityDao nameConverterEntityDao(@DaoKeyspace CqlIdentifier keyspace); - - @DaoFactory - CustomNamesEntityDao customNamesEntityDao(@DaoKeyspace CqlIdentifier keyspace); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/NestedUdtIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/NestedUdtIT.java deleted file mode 100644 index d61b6f6e628..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/NestedUdtIT.java +++ /dev/null @@ -1,490 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.mapper; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.catchThrowable; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.BoundStatementBuilder; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; -import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; -import com.datastax.oss.driver.api.mapper.annotations.Entity; -import com.datastax.oss.driver.api.mapper.annotations.GetEntity; -import com.datastax.oss.driver.api.mapper.annotations.Insert; -import com.datastax.oss.driver.api.mapper.annotations.Mapper; -import com.datastax.oss.driver.api.mapper.annotations.PartitionKey; -import com.datastax.oss.driver.api.mapper.annotations.Select; -import com.datastax.oss.driver.api.mapper.annotations.SetEntity; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.ccm.SchemaChangeSynchronizer; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Set; -import java.util.UUID; -import org.assertj.core.util.Lists; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -/** Tests that entities with UDTs nested at various levels are properly mapped. */ -@Category(ParallelizableTests.class) -@BackendRequirement( - type = BackendType.CASSANDRA, - minInclusive = "2.2", - description = "support for unset values") -public class NestedUdtIT { - - private static final CcmRule CCM_RULE = CcmRule.getInstance(); - - private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - private static final UUID CONTAINER_ID = UUID.randomUUID(); - - private static final Container SAMPLE_CONTAINER = - new Container( - CONTAINER_ID, - ImmutableList.of(new Type1("a1", "a2"), new Type1("b1", "b2")), - ImmutableMap.of( - "cd", - ImmutableList.of(new Type1("c1", "c2"), new Type1("d1", "d2")), - "ef", - ImmutableList.of(new Type1("e1", "e2"), new Type1("f1", "f2"))), - ImmutableMap.of( - new Type1("12", "34"), - ImmutableSet.of( - ImmutableList.of(new Type2(1, 2)), ImmutableList.of(new Type2(3, 4)))), - ImmutableMap.of( - new Type1("12", "34"), - ImmutableMap.of("12", ImmutableSet.of(new Type2(1, 2), new Type2(3, 4))))); - - private static final Container SAMPLE_CONTAINER_NULL_LIST = - new Container( - CONTAINER_ID, - null, - ImmutableMap.of( - "cd", - ImmutableList.of(new Type1("c1", "c2"), new Type1("d1", "d2")), - "ef", - ImmutableList.of(new Type1("e1", "e2"), new Type1("f1", "f2"))), - ImmutableMap.of( - new Type1("12", "34"), - ImmutableSet.of( - ImmutableList.of(new Type2(1, 2)), ImmutableList.of(new Type2(3, 4)))), - ImmutableMap.of( - new Type1("12", "34"), - ImmutableMap.of("12", ImmutableSet.of(new Type2(1, 2), new Type2(3, 4))))); - - private static ContainerDao containerDao; - - @BeforeClass - public static void setup() { - CqlSession session = SESSION_RULE.session(); - - SchemaChangeSynchronizer.withLock( - () -> { - for (String query : - ImmutableList.of( - "CREATE TYPE type1(s1 text, s2 text)", - "CREATE TYPE type2(i1 int, i2 int)", - "CREATE TYPE type1_partial(s1 text)", - "CREATE TYPE type2_partial(i1 int)", - "CREATE TABLE container(id uuid PRIMARY KEY, " - + "list frozen>, " - + "map1 frozen>>, " - + "map2 frozen>>>," - + "map3 frozen>>>" - + ")", - "CREATE TABLE container_partial(id uuid PRIMARY KEY, " - + "list frozen>, " - + "map1 frozen>>, " - + "map2 frozen>>>," - + "map3 frozen>>>" - + ")")) { - session.execute( - SimpleStatement.builder(query) - .setExecutionProfile(SESSION_RULE.slowProfile()) - .build()); - } - }); - - UserDefinedType type1Partial = - session - .getKeyspace() - .flatMap(ks -> session.getMetadata().getKeyspace(ks)) - .flatMap(ks -> ks.getUserDefinedType("type1_partial")) - .orElseThrow(AssertionError::new); - - session.execute( - SimpleStatement.newInstance( - "INSERT INTO container_partial (id, list) VALUES (?, ?)", - SAMPLE_CONTAINER.getId(), - Lists.newArrayList(type1Partial.newValue("a"), type1Partial.newValue("b")))); - - UdtsMapper udtsMapper = new NestedUdtIT_UdtsMapperBuilder(session).build(); - containerDao = udtsMapper.containerDao(SESSION_RULE.keyspace()); - } - - @Before - public void clearContainerData() { - CqlSession session = SESSION_RULE.session(); - session.execute( - SimpleStatement.builder("TRUNCATE container") - .setExecutionProfile(SESSION_RULE.slowProfile()) - .build()); - } - - @Test - public void should_insert_and_retrieve_entity_with_nested_udts() { - // When - containerDao.save(SAMPLE_CONTAINER); - Container retrievedEntity = containerDao.loadByPk(SAMPLE_CONTAINER.getId()); - - // Then - assertThat(retrievedEntity).isEqualTo(SAMPLE_CONTAINER); - } - - @Test - public void should_insert_do_not_set_to_null_udts() { - // Given - containerDao.save(SAMPLE_CONTAINER); - Container retrievedEntity = containerDao.loadByPk(SAMPLE_CONTAINER.getId()); - - assertThat(retrievedEntity.list).isNotNull(); - - // When - containerDao.saveDoNotSetNull(SAMPLE_CONTAINER_NULL_LIST); - Container retrievedEntitySecond = containerDao.loadByPk(SAMPLE_CONTAINER.getId()); - assertThat(retrievedEntitySecond.list).isNotNull(); - } - - @Test - public void should_insert_set_to_null_udts() { - // Given - containerDao.save(SAMPLE_CONTAINER); - Container retrievedEntity = containerDao.loadByPk(SAMPLE_CONTAINER.getId()); - - assertThat(retrievedEntity.list).isNotNull(); - - // When - containerDao.saveSetToNull(SAMPLE_CONTAINER_NULL_LIST); - Container retrievedEntitySecond = containerDao.loadByPk(SAMPLE_CONTAINER.getId()); - assertThat(retrievedEntitySecond.list).isEmpty(); - } - - @Test - public void should_get_entity_from_complete_row() { - CqlSession session = SESSION_RULE.session(); - containerDao.save(SAMPLE_CONTAINER); - ResultSet rs = - session.execute( - SimpleStatement.newInstance( - "SELECT * FROM container WHERE id = ?", SAMPLE_CONTAINER.getId())); - Row row = rs.one(); - assertThat(row).isNotNull(); - Container actual = containerDao.get(row); - assertThat(actual).isEqualTo(SAMPLE_CONTAINER); - } - - @Test - public void should_not_get_entity_from_partial_row_when_not_lenient() { - CqlSession session = SESSION_RULE.session(); - containerDao.save(SAMPLE_CONTAINER); - ResultSet rs = - session.execute( - SimpleStatement.newInstance( - "SELECT id FROM container WHERE id = ?", SAMPLE_CONTAINER.getId())); - Row row = rs.one(); - assertThat(row).isNotNull(); - Throwable error = catchThrowable(() -> containerDao.get(row)); - assertThat(error).hasMessage("list is not a column in this row"); - } - - @Test - public void should_get_entity_from_partial_row_when_lenient() { - CqlSession session = SESSION_RULE.session(); - ResultSet rs = - session.execute( - SimpleStatement.newInstance( - "SELECT id, list FROM container_partial WHERE id = ?", SAMPLE_CONTAINER.getId())); - Row row = rs.one(); - assertThat(row).isNotNull(); - Container actual = containerDao.getLenient(row); - assertThat(actual.getId()).isEqualTo(SAMPLE_CONTAINER.getId()); - assertThat(actual.getList()).containsExactly(new Type1("a", null), new Type1("b", null)); - assertThat(actual.getMap1()).isNull(); - assertThat(actual.getMap2()).isNull(); - assertThat(actual.getMap3()).isNull(); - } - - @Test - public void should_set_entity_on_partial_statement_builder_when_lenient() { - CqlSession session = SESSION_RULE.session(); - PreparedStatement ps = - session.prepare("INSERT INTO container_partial (id, list) VALUES (?, ?)"); - BoundStatementBuilder builder = ps.boundStatementBuilder(); - containerDao.setLenient(SAMPLE_CONTAINER, builder); - assertThat(builder.getUuid(0)).isEqualTo(SAMPLE_CONTAINER.getId()); - assertThat(builder.getList(1, UdtValue.class)).hasSize(2); - } - - @Test - public void should_not_set_entity_on_partial_statement_builder_when_not_lenient() { - CqlSession session = SESSION_RULE.session(); - PreparedStatement ps = session.prepare("INSERT INTO container (id, list) VALUES (?, ?)"); - Throwable error = - catchThrowable(() -> containerDao.set(SAMPLE_CONTAINER, ps.boundStatementBuilder())); - assertThat(error).hasMessage("map1 is not a variable in this bound statement"); - } - - @Mapper - public interface UdtsMapper { - @DaoFactory - ContainerDao containerDao(@DaoKeyspace CqlIdentifier keyspace); - } - - @Dao - public interface ContainerDao { - - @Select - Container loadByPk(UUID id); - - @Insert - void save(Container container); - - @Insert(nullSavingStrategy = NullSavingStrategy.DO_NOT_SET) - void saveDoNotSetNull(Container container); - - @Insert(nullSavingStrategy = NullSavingStrategy.SET_TO_NULL) - void saveSetToNull(Container container); - - @GetEntity - Container get(Row source); - - @GetEntity(lenient = true) - Container getLenient(Row source); - - @SetEntity - void set(Container container, BoundStatementBuilder target); - - @SetEntity(lenient = true) - void setLenient(Container container, BoundStatementBuilder target); - } - - @Entity - public static class Container { - - @PartitionKey private UUID id; - private List list; - private Map> map1; - private Map>> map2; - private Map>> map3; - - public Container() {} - - public Container( - UUID id, - List list, - Map> map1, - Map>> map2, - Map>> map3) { - this.id = id; - this.list = list; - this.map1 = map1; - this.map2 = map2; - this.map3 = map3; - } - - public UUID getId() { - return id; - } - - public void setId(UUID id) { - this.id = id; - } - - public List getList() { - return list; - } - - public void setList(List list) { - this.list = list; - } - - public Map> getMap1() { - return map1; - } - - public void setMap1(Map> map1) { - this.map1 = map1; - } - - public Map>> getMap2() { - return map2; - } - - public void setMap2(Map>> map2) { - this.map2 = map2; - } - - public Map>> getMap3() { - return map3; - } - - public void setMap3(Map>> map3) { - this.map3 = map3; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof Container) { - Container that = (Container) other; - return Objects.equals(this.id, that.id) - && Objects.equals(this.list, that.list) - && Objects.equals(this.map1, that.map1) - && Objects.equals(this.map2, that.map2) - && Objects.equals(this.map3, that.map3); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(id, list, map1, map2, map3); - } - } - - @Entity - public static class Type1 { - private String s1; - private String s2; - - public Type1() {} - - public Type1(String s1, String s2) { - this.s1 = s1; - this.s2 = s2; - } - - public String getS1() { - return s1; - } - - public void setS1(String s1) { - this.s1 = s1; - } - - public String getS2() { - return s2; - } - - public void setS2(String s2) { - this.s2 = s2; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof Type1)) { - return false; - } - Type1 type1 = (Type1) o; - return Objects.equals(s1, type1.s1) && Objects.equals(s2, type1.s2); - } - - @Override - public int hashCode() { - return Objects.hash(s1, s2); - } - } - - @Entity - public static class Type2 { - private int i1; - private int i2; - - public Type2() {} - - public Type2(int i1, int i2) { - this.i1 = i1; - this.i2 = i2; - } - - public int getI1() { - return i1; - } - - public void setI1(int i1) { - this.i1 = i1; - } - - public int getI2() { - return i2; - } - - public void setI2(int i2) { - this.i2 = i2; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof Type2)) { - return false; - } - Type2 type2 = (Type2) o; - return i1 == type2.i1 && i2 == type2.i2; - } - - @Override - public int hashCode() { - return Objects.hash(i1, i2); - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/NullSavingStrategyIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/NullSavingStrategyIT.java deleted file mode 100644 index ea96c12e57b..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/NullSavingStrategyIT.java +++ /dev/null @@ -1,291 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.mapper; - -import static org.assertj.core.api.Assertions.assertThatCode; -import static org.assertj.core.api.Assertions.assertThatThrownBy; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.mapper.MapperException; -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; -import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; -import com.datastax.oss.driver.api.mapper.annotations.DefaultNullSavingStrategy; -import com.datastax.oss.driver.api.mapper.annotations.Entity; -import com.datastax.oss.driver.api.mapper.annotations.Mapper; -import com.datastax.oss.driver.api.mapper.annotations.PartitionKey; -import com.datastax.oss.driver.api.mapper.annotations.Select; -import com.datastax.oss.driver.api.mapper.annotations.Update; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.categories.ParallelizableTests; -import java.util.Objects; -import java.util.UUID; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -public class NullSavingStrategyIT { - - private static final CcmRule CCM_RULE = CcmRule.getInstance(); - - private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - - // JAVA-3076: V3 protocol calls that could trigger cassandra to issue client warnings appear to be - // inherently unstable when used at the same time as V4+ protocol clients (common since this is - // part of the parallelizable test suite). - // - // For this test we'll use latest protocol version for SessionRule set-up, which creates the - // keyspace and could potentially result in warning about too many keyspaces, and then create a - // new client for the tests to use, which they access via the static InventoryMapper instance - // `mapper`. - // - // This additional client is created in the @BeforeClass method #setup() and guaranteed to be - // closed in @AfterClass method #teardown(). - // - // Note: The standard junit runner executes rules before class/test setup so the order of - // execution will be CcmRule#before > SessionRule#before > NullSavingStrategyIT#setup, meaning - // CCM_RULE/SESSION_RULE should be fully initialized by the time #setup() is invoked. - private static CqlSession v3Session; - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - private static InventoryMapper mapper; - - @BeforeClass - public static void setup() { - // setup table for use in tests, this can use the default session - SESSION_RULE - .session() - .execute( - SimpleStatement.builder( - "CREATE TABLE product_simple(id uuid PRIMARY KEY, description text)") - .setExecutionProfile(SESSION_RULE.slowProfile()) - .build()); - - // Create V3 protocol session for use in tests, will be closed in #teardown() - v3Session = - SessionUtils.newSession( - CCM_RULE, - SESSION_RULE.keyspace(), - DriverConfigLoader.programmaticBuilder() - .withString(DefaultDriverOption.PROTOCOL_VERSION, "V3") - .build()); - - // Hand V3 session to InventoryMapper which the tests will use to perform db calls - mapper = new NullSavingStrategyIT_InventoryMapperBuilder(v3Session).build(); - } - - @AfterClass - public static void teardown() { - // Close V3 session (SESSION_RULE will be closed separately by @ClassRule handling) - if (v3Session != null) { - v3Session.close(); - } - } - - @Test - public void should_throw_when_try_to_construct_dao_with_DO_NOT_SET_strategy_for_V3_protocol() { - assertThatThrownBy(() -> mapper.productDao(SESSION_RULE.keyspace())) - .isInstanceOf(MapperException.class) - .hasMessage("You cannot use NullSavingStrategy.DO_NOT_SET for protocol version V3."); - } - - @Test - public void - should_throw_when_try_to_construct_dao_with_DO_NOT_SET_implicit_strategy_for_V3_protocol() { - assertThatThrownBy(() -> mapper.productDaoImplicit(SESSION_RULE.keyspace())) - .isInstanceOf(MapperException.class) - .hasMessage("You cannot use NullSavingStrategy.DO_NOT_SET for protocol version V3."); - } - - @Test - public void - should_throw_when_try_to_construct_dao_with_DO_NOT_SET_strategy_set_globally_for_V3_protocol() { - assertThatThrownBy(() -> mapper.productDaoDefault(SESSION_RULE.keyspace())) - .isInstanceOf(MapperException.class) - .hasMessage("You cannot use NullSavingStrategy.DO_NOT_SET for protocol version V3."); - } - - @Test - public void should_do_not_throw_when_construct_dao_with_global_level_SET_TO_NULL() { - assertThatCode(() -> mapper.productDaoGlobalLevelSetToNull(SESSION_RULE.keyspace())) - .doesNotThrowAnyException(); - } - - @Test - public void should_do_not_throw_when_construct_dao_with_parent_interface_SET_TO_NULL() { - assertThatCode(() -> mapper.productDaoSetToNullFromParentInterface(SESSION_RULE.keyspace())) - .doesNotThrowAnyException(); - } - - @Test - public void - should_do_not_throw_when_construct_dao_with_global_level_DO_NOT_SET_and_local_override_to_SET_TO_NULL() { - assertThatCode(() -> mapper.productDaoLocalOverride(SESSION_RULE.keyspace())) - .doesNotThrowAnyException(); - } - - @Mapper - public interface InventoryMapper { - @DaoFactory - ProductSimpleDao productDao(@DaoKeyspace CqlIdentifier keyspace); - - @DaoFactory - ProductSimpleDaoDefault productDaoDefault(@DaoKeyspace CqlIdentifier keyspace); - - @DaoFactory - ProductSimpleDaoImplicit productDaoImplicit(@DaoKeyspace CqlIdentifier keyspace); - - @DaoFactory - ProductSimpleDaoGlobalLevelSetToNull productDaoGlobalLevelSetToNull( - @DaoKeyspace CqlIdentifier keyspace); - - @DaoFactory - ProductSimpleDaoSetToNullFromParentInterface productDaoSetToNullFromParentInterface( - @DaoKeyspace CqlIdentifier keyspace); - - @DaoFactory - ProductSimpleDaoGlobalLevelDoNotSetOverrideSetToNull productDaoLocalOverride( - @DaoKeyspace CqlIdentifier keyspace); - } - - @DefaultNullSavingStrategy(NullSavingStrategy.SET_TO_NULL) - public interface SetToNull {} - - @Dao - public interface ProductSimpleDao { - - @Update(nullSavingStrategy = NullSavingStrategy.DO_NOT_SET) - void update(ProductSimple product); - - @Select - ProductSimple findById(UUID productId); - } - - @Dao - public interface ProductSimpleDaoImplicit { - - @Update - void update(ProductSimple product); - - @Select - ProductSimple findById(UUID productId); - } - - @Dao - @DefaultNullSavingStrategy(NullSavingStrategy.DO_NOT_SET) - public interface ProductSimpleDaoDefault extends SetToNull { - @Update - void update(ProductSimple product); - - @Select - ProductSimple findById(UUID productId); - } - - @Dao - @DefaultNullSavingStrategy(NullSavingStrategy.SET_TO_NULL) - public interface ProductSimpleDaoGlobalLevelSetToNull { - - @Update - void update(ProductSimple product); - - @Select - ProductSimple findById(UUID productId); - } - - @Dao - public interface ProductSimpleDaoSetToNullFromParentInterface - extends ProductSimpleDaoImplicit, SetToNull {} - - @Dao - @DefaultNullSavingStrategy(NullSavingStrategy.DO_NOT_SET) - public interface ProductSimpleDaoGlobalLevelDoNotSetOverrideSetToNull { - - @Update(nullSavingStrategy = NullSavingStrategy.SET_TO_NULL) - void update(ProductSimple product); - - @Select - ProductSimple findById(UUID productId); - } - - @Entity - public static class ProductSimple { - @PartitionKey private UUID id; - private String description; - - public ProductSimple() {} - - public ProductSimple(UUID id, String description) { - this.id = id; - this.description = description; - } - - public UUID getId() { - return id; - } - - public void setId(UUID id) { - this.id = id; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - @Override - public boolean equals(Object other) { - - if (this == other) { - return true; - } else if (other instanceof ProductSimple) { - ProductSimple that = (ProductSimple) other; - return Objects.equals(this.id, that.id) - && Objects.equals(this.description, that.description); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(id, description); - } - - @Override - public String toString() { - return "ProductSimple{" + "id=" + id + ", description='" + description + '\'' + '}'; - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/PrimitivesIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/PrimitivesIT.java deleted file mode 100644 index d63e3834188..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/PrimitivesIT.java +++ /dev/null @@ -1,223 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.mapper; - -import static com.datastax.oss.driver.assertions.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; -import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; -import com.datastax.oss.driver.api.mapper.annotations.Entity; -import com.datastax.oss.driver.api.mapper.annotations.Insert; -import com.datastax.oss.driver.api.mapper.annotations.Mapper; -import com.datastax.oss.driver.api.mapper.annotations.PartitionKey; -import com.datastax.oss.driver.api.mapper.annotations.Select; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import java.util.Objects; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -@BackendRequirement( - type = BackendType.CASSANDRA, - minInclusive = "2.2", - description = "smallint is a reserved keyword in 2.1") -public class PrimitivesIT { - - private static final CcmRule CCM_RULE = CcmRule.getInstance(); - - private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - private static TestMapper mapper; - - @BeforeClass - public static void setup() { - CqlSession session = SESSION_RULE.session(); - session.execute( - SimpleStatement.builder( - "CREATE TABLE primitives_entity(" - + "id int PRIMARY KEY, " - + "boolean_col boolean, " - + "byte_col tinyint, " - + "short_col smallint, " - + "long_col bigint, " - + "float_col float," - + "double_col double)") - .setExecutionProfile(SESSION_RULE.slowProfile()) - .build()); - mapper = new PrimitivesIT_TestMapperBuilder(session).build(); - } - - @Test - public void should_not_include_computed_values_in_insert() { - PrimitivesDao primitivesDao = mapper.primitivesDao(SESSION_RULE.keyspace()); - - PrimitivesEntity expected = new PrimitivesEntity(0, true, (byte) 2, (short) 3, 4L, 5.0f, 6.0d); - primitivesDao.save(expected); - - PrimitivesEntity actual = primitivesDao.findById(0); - assertThat(actual).isEqualTo(expected); - } - - @Entity - public static class PrimitivesEntity { - - @PartitionKey private int id; - - private boolean booleanCol; - - private byte byteCol; - - private short shortCol; - - private long longCol; - - private float floatCol; - - private double doubleCol; - - public PrimitivesEntity() {} - - public PrimitivesEntity( - int id, - boolean booleanCol, - byte byteCol, - short shortCol, - long longCol, - float floatCol, - double doubleCol) { - this.id = id; - this.booleanCol = booleanCol; - this.byteCol = byteCol; - this.shortCol = shortCol; - this.longCol = longCol; - this.floatCol = floatCol; - this.doubleCol = doubleCol; - } - - public int getId() { - return id; - } - - public void setId(int id) { - this.id = id; - } - - public boolean isBooleanCol() { - return booleanCol; - } - - public void setBooleanCol(boolean booleanCol) { - this.booleanCol = booleanCol; - } - - public byte getByteCol() { - return byteCol; - } - - public void setByteCol(byte byteCol) { - this.byteCol = byteCol; - } - - public short getShortCol() { - return shortCol; - } - - public void setShortCol(short shortCol) { - this.shortCol = shortCol; - } - - public long getLongCol() { - return longCol; - } - - public void setLongCol(long longCol) { - this.longCol = longCol; - } - - public float getFloatCol() { - return floatCol; - } - - public void setFloatCol(float floatCol) { - this.floatCol = floatCol; - } - - public double getDoubleCol() { - return doubleCol; - } - - public void setDoubleCol(double doubleCol) { - this.doubleCol = doubleCol; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof PrimitivesEntity)) { - return false; - } - PrimitivesEntity that = (PrimitivesEntity) o; - return this.id == that.id - && this.booleanCol == that.booleanCol - && this.byteCol == that.byteCol - && this.shortCol == that.shortCol - && this.longCol == that.longCol - && Float.compare(this.floatCol, that.floatCol) == 0 - && Double.compare(this.doubleCol, that.doubleCol) == 0; - } - - @Override - public int hashCode() { - return Objects.hash(id, booleanCol, byteCol, shortCol, longCol, floatCol, doubleCol); - } - } - - @Dao - public interface PrimitivesDao { - - @Select - PrimitivesEntity findById(int id); - - @Insert(nullSavingStrategy = NullSavingStrategy.SET_TO_NULL) - void save(PrimitivesEntity entity); - } - - @Mapper - public interface TestMapper { - @DaoFactory - PrimitivesDao primitivesDao(@DaoKeyspace CqlIdentifier keyspace); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/ProfileIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/ProfileIT.java deleted file mode 100644 index 3ed2a48cced..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/ProfileIT.java +++ /dev/null @@ -1,410 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.mapper; - -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.noRows; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.query; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.when; -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.BoundStatementBuilder; -import com.datastax.oss.driver.api.mapper.MapperBuilder; -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; -import com.datastax.oss.driver.api.mapper.annotations.DaoProfile; -import com.datastax.oss.driver.api.mapper.annotations.Delete; -import com.datastax.oss.driver.api.mapper.annotations.Entity; -import com.datastax.oss.driver.api.mapper.annotations.Insert; -import com.datastax.oss.driver.api.mapper.annotations.Mapper; -import com.datastax.oss.driver.api.mapper.annotations.PartitionKey; -import com.datastax.oss.driver.api.mapper.annotations.Query; -import com.datastax.oss.driver.api.mapper.annotations.Select; -import com.datastax.oss.driver.api.mapper.annotations.StatementAttributes; -import com.datastax.oss.driver.api.mapper.annotations.Update; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.request.Execute; -import com.datastax.oss.simulacron.common.cluster.ClusterSpec; -import com.datastax.oss.simulacron.common.cluster.QueryLog; -import com.datastax.oss.simulacron.common.stubbing.PrimeDsl; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.Lists; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Objects; -import java.util.UUID; -import java.util.concurrent.TimeUnit; -import java.util.function.UnaryOperator; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -public class ProfileIT { - - private static final SimulacronRule SIMULACRON_RULE = - new SimulacronRule(ClusterSpec.builder().withNodes(1)); - - private static final SessionRule SESSION_RULE = - SessionRule.builder(SIMULACRON_RULE) - .withConfigLoader( - SessionUtils.configLoaderBuilder() - .startProfile("cl_one") - .withString(DefaultDriverOption.REQUEST_CONSISTENCY, "ONE") - .build()) - .build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(SIMULACRON_RULE).around(SESSION_RULE); - - private static final Simple SAMPLE_ENTITY = new Simple(UUID.randomUUID(), "DATA"); - - private static DriverExecutionProfile clTwoProfile; - private MapperBuilder mapperBuilder; - - @BeforeClass - public static void setupClass() { - primeDeleteQuery(); - primeInsertQuery(); - primeSelectQuery(); - primeCountQuery(); - primeUpdateQuery(); - - // Deliberately based on the default profile, so that we can assert that a dynamically-set - // option is correctly taken into account - clTwoProfile = - SESSION_RULE - .session() - .getContext() - .getConfig() - .getDefaultProfile() - .withString(DefaultDriverOption.REQUEST_CONSISTENCY, "TWO"); - } - - @Before - public void setup() { - SIMULACRON_RULE.cluster().clearLogs(); - mapperBuilder = SimpleMapper.builder(SESSION_RULE.session()); - } - - @Test - public void should_build_dao_with_profile_name() { - SimpleMapper mapper = mapperBuilder.build(); - SimpleDao dao = mapper.simpleDao("cl_one"); - assertClForAllQueries(dao, ConsistencyLevel.ONE); - } - - @Test - public void should_build_dao_with_profile() { - SimpleMapper mapper = mapperBuilder.build(); - SimpleDao dao = mapper.simpleDao(clTwoProfile); - assertClForAllQueries(dao, ConsistencyLevel.TWO); - } - - @Test - public void should_inherit_mapper_profile_name() { - SimpleMapper mapper = mapperBuilder.withDefaultExecutionProfileName("cl_one").build(); - SimpleDao dao = mapper.simpleDao(); - assertClForAllQueries(dao, ConsistencyLevel.ONE); - } - - @Test - public void should_inherit_mapper_profile() { - SimpleMapper mapper = mapperBuilder.withDefaultExecutionProfile(clTwoProfile).build(); - SimpleDao dao = mapper.simpleDao(); - assertClForAllQueries(dao, ConsistencyLevel.TWO); - } - - @Test - public void should_override_mapper_profile_name() { - SimpleMapper mapper = - mapperBuilder - .withDefaultExecutionProfileName("defaultProfile") // doesn't need to exist - .build(); - SimpleDao dao = mapper.simpleDao("cl_one"); - assertClForAllQueries(dao, ConsistencyLevel.ONE); - } - - @Test - public void should_override_mapper_profile() { - DriverExecutionProfile clThreeProfile = - SESSION_RULE - .session() - .getContext() - .getConfig() - .getDefaultProfile() - .withString(DefaultDriverOption.REQUEST_CONSISTENCY, "THREE"); - SimpleMapper mapper = mapperBuilder.withDefaultExecutionProfile(clThreeProfile).build(); - SimpleDao dao = mapper.simpleDao(clTwoProfile); - assertClForAllQueries(dao, ConsistencyLevel.TWO); - } - - @Test - public void should_override_mapper_profile_name_with_a_profile() { - SimpleMapper mapper = - mapperBuilder - .withDefaultExecutionProfileName("defaultProfile") // doesn't need to exist - .build(); - SimpleDao dao = mapper.simpleDao(clTwoProfile); - assertClForAllQueries(dao, ConsistencyLevel.TWO); - } - - @Test - public void should_override_mapper_profile_with_a_name() { - SimpleMapper mapper = mapperBuilder.withDefaultExecutionProfile(clTwoProfile).build(); - SimpleDao dao = mapper.simpleDao("cl_one"); - assertClForAllQueries(dao, ConsistencyLevel.ONE); - } - - @Test - public void should_use_default_when_no_profile() { - SimpleMapper mapper = mapperBuilder.build(); - SimpleDao dao = mapper.simpleDao(); - // Default CL inherited from reference.conf - assertClForAllQueries(dao, ConsistencyLevel.LOCAL_ONE); - } - - private void assertClForAllQueries(SimpleDao dao, ConsistencyLevel expectedLevel) { - dao.save(SAMPLE_ENTITY); - assertServerSideCl(expectedLevel); - dao.delete(SAMPLE_ENTITY); - assertServerSideCl(expectedLevel); - dao.update(SAMPLE_ENTITY); - assertServerSideCl(expectedLevel); - dao.findByPk(SAMPLE_ENTITY.pk); - assertServerSideCl(expectedLevel); - - // Special cases: profile defined at the method level with statement attributes, should override - // dao-level profile. - dao.saveWithClOne(SAMPLE_ENTITY); - assertServerSideCl(ConsistencyLevel.ONE); - dao.saveWithCustomAttributes(SAMPLE_ENTITY, bs -> bs.setExecutionProfileName("cl_one")); - assertServerSideCl(ConsistencyLevel.ONE); - } - - private void assertServerSideCl(ConsistencyLevel expectedCl) { - List queryLogs = SIMULACRON_RULE.cluster().getLogs().getQueryLogs(); - QueryLog lastLog = queryLogs.get(queryLogs.size() - 1); - Message message = lastLog.getFrame().message; - assertThat(message).isInstanceOf(Execute.class); - Execute queryExecute = (Execute) message; - assertThat(queryExecute.options.consistency).isEqualTo(expectedCl.getProtocolCode()); - } - - private static void primeInsertQuery() { - LinkedHashMap params = - new LinkedHashMap<>( - ImmutableMap.of("pk", SAMPLE_ENTITY.getPk(), "data", SAMPLE_ENTITY.getData())); - LinkedHashMap paramTypes = - new LinkedHashMap<>(ImmutableMap.of("pk", "uuid", "data", "ascii")); - SIMULACRON_RULE - .cluster() - .prime( - when(query( - "INSERT INTO ks.simple (pk,data) VALUES (:pk,:data)", - Lists.newArrayList( - com.datastax.oss.simulacron.common.codec.ConsistencyLevel.LOCAL_ONE, - com.datastax.oss.simulacron.common.codec.ConsistencyLevel.ONE, - com.datastax.oss.simulacron.common.codec.ConsistencyLevel.TWO), - params, - paramTypes)) - .then(noRows())); - } - - private static void primeDeleteQuery() { - LinkedHashMap params = - new LinkedHashMap<>(ImmutableMap.of("pk", SAMPLE_ENTITY.getPk())); - LinkedHashMap paramTypes = new LinkedHashMap<>(ImmutableMap.of("pk", "uuid")); - SIMULACRON_RULE - .cluster() - .prime( - when(query( - "DELETE FROM ks.simple WHERE pk=:pk", - Lists.newArrayList( - com.datastax.oss.simulacron.common.codec.ConsistencyLevel.LOCAL_ONE, - com.datastax.oss.simulacron.common.codec.ConsistencyLevel.ONE, - com.datastax.oss.simulacron.common.codec.ConsistencyLevel.TWO), - params, - paramTypes)) - .then(noRows()) - .delay(1, TimeUnit.MILLISECONDS)); - } - - private static void primeSelectQuery() { - LinkedHashMap params = - new LinkedHashMap<>(ImmutableMap.of("pk", SAMPLE_ENTITY.getPk())); - LinkedHashMap paramTypes = new LinkedHashMap<>(ImmutableMap.of("pk", "uuid")); - SIMULACRON_RULE - .cluster() - .prime( - when(query( - "SELECT pk,data FROM ks.simple WHERE pk=:pk", - Lists.newArrayList( - com.datastax.oss.simulacron.common.codec.ConsistencyLevel.LOCAL_ONE, - com.datastax.oss.simulacron.common.codec.ConsistencyLevel.ONE, - com.datastax.oss.simulacron.common.codec.ConsistencyLevel.TWO), - params, - paramTypes)) - .then(noRows()) - .delay(1, TimeUnit.MILLISECONDS)); - } - - private static void primeCountQuery() { - LinkedHashMap params = - new LinkedHashMap<>(ImmutableMap.of("pk", SAMPLE_ENTITY.getPk())); - LinkedHashMap paramTypes = new LinkedHashMap<>(ImmutableMap.of("pk", "uuid")); - SIMULACRON_RULE - .cluster() - .prime( - when(query( - "SELECT count(*) FROM ks.simple WHERE pk=:pk", - Lists.newArrayList( - com.datastax.oss.simulacron.common.codec.ConsistencyLevel.LOCAL_ONE, - com.datastax.oss.simulacron.common.codec.ConsistencyLevel.ONE, - com.datastax.oss.simulacron.common.codec.ConsistencyLevel.TWO), - params, - paramTypes)) - .then(PrimeDsl.rows().row("count", 1L).columnTypes("count", "bigint").build()) - .delay(1, TimeUnit.MILLISECONDS)); - } - - private static void primeUpdateQuery() { - LinkedHashMap params = - new LinkedHashMap<>( - ImmutableMap.of("pk", SAMPLE_ENTITY.getPk(), "data", SAMPLE_ENTITY.getData())); - LinkedHashMap paramTypes = - new LinkedHashMap<>(ImmutableMap.of("pk", "uuid", "data", "ascii")); - SIMULACRON_RULE - .cluster() - .prime( - when(query( - "UPDATE ks.simple SET data=:data WHERE pk=:pk", - Lists.newArrayList( - com.datastax.oss.simulacron.common.codec.ConsistencyLevel.ONE, - com.datastax.oss.simulacron.common.codec.ConsistencyLevel.ANY), - params, - paramTypes)) - .then(noRows())); - } - - @Mapper - public interface SimpleMapper { - @DaoFactory - SimpleDao simpleDao(); - - @DaoFactory - SimpleDao simpleDao(@DaoProfile String executionProfile); - - @DaoFactory - SimpleDao simpleDao(@DaoProfile DriverExecutionProfile executionProfile); - - static MapperBuilder builder(CqlSession session) { - return new ProfileIT_SimpleMapperBuilder(session); - } - } - - @Dao - public interface SimpleDao { - @Insert - void save(Simple simple); - - @Delete - void delete(Simple simple); - - @Select - @SuppressWarnings("UnusedReturnValue") - Simple findByPk(UUID pk); - - @Query("SELECT count(*) FROM ks.simple WHERE pk=:pk") - long count(UUID pk); - - @Update - void update(Simple simple); - - @Insert - @StatementAttributes(executionProfileName = "cl_one") - void saveWithClOne(Simple simple); - - @Insert - void saveWithCustomAttributes(Simple simple, UnaryOperator attributes); - } - - @Entity(defaultKeyspace = "ks") - public static class Simple { - @PartitionKey private UUID pk; - private String data; - - public Simple() {} - - public Simple(UUID pk, String data) { - this.pk = pk; - this.data = data; - } - - public UUID getPk() { - return pk; - } - - public String getData() { - return data; - } - - public void setPk(UUID pk) { - - this.pk = pk; - } - - public void setData(String data) { - this.data = data; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof ProfileIT.Simple)) { - return false; - } - ProfileIT.Simple simple = (ProfileIT.Simple) o; - return Objects.equals(pk, simple.pk) && Objects.equals(data, simple.data); - } - - @Override - public int hashCode() { - - return Objects.hash(pk, data); - } - - @Override - public String toString() { - return "Simple{" + "pk=" + pk + ", data='" + data + '\'' + '}'; - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/QueryKeyspaceAndTableIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/QueryKeyspaceAndTableIT.java deleted file mode 100644 index 9391c0363f8..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/QueryKeyspaceAndTableIT.java +++ /dev/null @@ -1,164 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.mapper; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.catchThrowable; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.mapper.MapperException; -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; -import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; -import com.datastax.oss.driver.api.mapper.annotations.DaoTable; -import com.datastax.oss.driver.api.mapper.annotations.DefaultNullSavingStrategy; -import com.datastax.oss.driver.api.mapper.annotations.Mapper; -import com.datastax.oss.driver.api.mapper.annotations.Query; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -/** Covers the keyspace and table placeholders in {@link Query} methods. */ -@Category(ParallelizableTests.class) -public class QueryKeyspaceAndTableIT { - - private static final CcmRule CCM_RULE = CcmRule.getInstance(); - private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - private static final CqlIdentifier FOO_TABLE_ID = CqlIdentifier.fromCql("foo"); - private static final CqlIdentifier OTHER_KEYSPACE = - CqlIdentifier.fromCql(QueryKeyspaceAndTableIT.class.getSimpleName() + "_alt"); - - private static TestMapper mapper; - - @BeforeClass - public static void createSchema() { - CqlSession session = SESSION_RULE.session(); - - for (String query : - ImmutableList.of( - "CREATE TABLE foo(k int PRIMARY KEY)", - String.format( - "CREATE KEYSPACE IF NOT EXISTS %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}", - OTHER_KEYSPACE.asCql(false)), - String.format("CREATE TABLE %s.foo(k int PRIMARY KEY)", OTHER_KEYSPACE.asCql(false)))) { - session.execute( - SimpleStatement.builder(query).setExecutionProfile(SESSION_RULE.slowProfile()).build()); - } - - session.execute("INSERT INTO foo (k) VALUES (1)"); - session.execute( - String.format("INSERT INTO %s.foo (k) VALUES (1)", OTHER_KEYSPACE.asCql(false))); - session.execute( - String.format("INSERT INTO %s.foo (k) VALUES (2)", OTHER_KEYSPACE.asCql(false))); - - mapper = new QueryKeyspaceAndTableIT_TestMapperBuilder(session).build(); - } - - @Test - public void should_substitute_keyspaceId_and_tableId() { - DaoWithKeyspaceAndTableId dao = - mapper.daoWithKeyspaceAndTableId(SESSION_RULE.keyspace(), FOO_TABLE_ID); - assertThat(dao.count()).isEqualTo(1); - } - - @Test - public void should_fail_to_substitute_keyspaceId_if_dao_has_no_keyspace() { - Throwable t = catchThrowable(() -> mapper.daoWithKeyspaceAndTableId(null, FOO_TABLE_ID)); - assertThat(t) - .isInstanceOf(MapperException.class) - .hasMessage( - "Cannot substitute ${keyspaceId} in query " - + "'SELECT count(*) FROM ${keyspaceId}.${tableId}': " - + "the DAO wasn't built with a keyspace"); - } - - @Test - public void should_fail_to_substitute_tableId_if_dao_has_no_table() { - Throwable t = - catchThrowable(() -> mapper.daoWithKeyspaceAndTableId(SESSION_RULE.keyspace(), null)); - assertThat(t) - .isInstanceOf(MapperException.class) - .hasMessage( - "Cannot substitute ${tableId} in query " - + "'SELECT count(*) FROM ${keyspaceId}.${tableId}': " - + "the DAO wasn't built with a table"); - } - - @Test - public void should_use_keyspace_in_qualifiedTableId_when_dao_has_keyspace() { - DaoWithQualifiedTableId dao = mapper.daoWithQualifiedTableId(OTHER_KEYSPACE, FOO_TABLE_ID); - assertThat(dao.count()).isEqualTo(2); - } - - @Test - public void should_not_use_keyspace_in_qualifiedTableId_when_dao_has_no_keyspace() { - DaoWithQualifiedTableId dao = mapper.daoWithQualifiedTableId(null, FOO_TABLE_ID); - assertThat(dao.count()).isEqualTo(1); - } - - @Test - public void should_fail_to_substitute_qualifiedTableId_if_dao_has_no_table() { - Throwable t = - catchThrowable(() -> mapper.daoWithQualifiedTableId(SESSION_RULE.keyspace(), null)); - assertThat(t) - .isInstanceOf(MapperException.class) - .hasMessage( - "Cannot substitute ${qualifiedTableId} in query " - + "'SELECT count(*) FROM ${qualifiedTableId}': " - + "the DAO wasn't built with a table"); - } - - @Dao - @DefaultNullSavingStrategy(NullSavingStrategy.SET_TO_NULL) - public interface DaoWithKeyspaceAndTableId { - @Query("SELECT count(*) FROM ${keyspaceId}.${tableId}") - long count(); - } - - @Dao - @DefaultNullSavingStrategy(NullSavingStrategy.SET_TO_NULL) - public interface DaoWithQualifiedTableId { - @Query("SELECT count(*) FROM ${qualifiedTableId}") - long count(); - } - - @Mapper - public interface TestMapper { - @DaoFactory - DaoWithKeyspaceAndTableId daoWithKeyspaceAndTableId( - @DaoKeyspace CqlIdentifier keyspace, @DaoTable CqlIdentifier table); - - @DaoFactory - DaoWithQualifiedTableId daoWithQualifiedTableId( - @DaoKeyspace CqlIdentifier keyspace, @DaoTable CqlIdentifier table); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/QueryProviderIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/QueryProviderIT.java deleted file mode 100644 index 4bfda4fdfdb..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/QueryProviderIT.java +++ /dev/null @@ -1,269 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.mapper; - -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.bindMarker; -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.PagingIterable; -import com.datastax.oss.driver.api.core.cql.BoundStatementBuilder; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.mapper.MapperContext; -import com.datastax.oss.driver.api.mapper.annotations.ClusteringColumn; -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; -import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; -import com.datastax.oss.driver.api.mapper.annotations.DefaultNullSavingStrategy; -import com.datastax.oss.driver.api.mapper.annotations.Entity; -import com.datastax.oss.driver.api.mapper.annotations.Insert; -import com.datastax.oss.driver.api.mapper.annotations.Mapper; -import com.datastax.oss.driver.api.mapper.annotations.PartitionKey; -import com.datastax.oss.driver.api.mapper.annotations.QueryProvider; -import com.datastax.oss.driver.api.mapper.entity.EntityHelper; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import com.datastax.oss.driver.api.querybuilder.select.Select; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import java.util.Objects; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.stream.Stream; -import java.util.stream.StreamSupport; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -public class QueryProviderIT { - - private static final CcmRule CCM_RULE = CcmRule.getInstance(); - private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - // Dummy counter to exercize the "custom state" feature: it gets incremented each time the query - // provider is called. - private static AtomicInteger executionCount = new AtomicInteger(); - - private static SensorDao dao; - - @BeforeClass - public static void setup() { - CqlSession session = SESSION_RULE.session(); - - session.execute( - SimpleStatement.builder( - "CREATE TABLE sensor_reading(id int, month int, day int, value double, " - + "PRIMARY KEY (id, month, day)) " - + "WITH CLUSTERING ORDER BY (month DESC, day DESC)") - .setExecutionProfile(SESSION_RULE.slowProfile()) - .build()); - - SensorMapper mapper = - new QueryProviderIT_SensorMapperBuilder(session) - .withCustomState("executionCount", executionCount) - .build(); - dao = mapper.sensorDao(SESSION_RULE.keyspace()); - } - - @Test - public void should_invoke_query_provider() { - SensorReading readingFeb3 = new SensorReading(1, 2, 3, 9.3); - SensorReading readingFeb2 = new SensorReading(1, 2, 2, 8.6); - SensorReading readingFeb1 = new SensorReading(1, 2, 1, 8.7); - SensorReading readingJan31 = new SensorReading(1, 1, 31, 8.2); - dao.save(readingFeb3); - dao.save(readingFeb2); - dao.save(readingFeb1); - dao.save(readingJan31); - - assertThat(executionCount.get()).isEqualTo(0); - - assertThat(dao.findSlice(1, null, null).all()) - .containsExactly(readingFeb3, readingFeb2, readingFeb1, readingJan31); - assertThat(dao.findSlice(1, 2, null).all()) - .containsExactly(readingFeb3, readingFeb2, readingFeb1); - assertThat(dao.findSlice(1, 2, 3).all()).containsExactly(readingFeb3); - - assertThat(executionCount.get()).isEqualTo(3); - } - - @Mapper - public interface SensorMapper { - @DaoFactory - SensorDao sensorDao(@DaoKeyspace CqlIdentifier keyspace); - } - - @Dao - @DefaultNullSavingStrategy(NullSavingStrategy.SET_TO_NULL) - public interface SensorDao { - - @QueryProvider(providerClass = FindSliceProvider.class, entityHelpers = SensorReading.class) - PagingIterable findSlice(int id, Integer month, Integer day); - - @QueryProvider( - providerClass = FindSliceStreamProvider.class, - entityHelpers = SensorReading.class) - Stream findSliceAsStream(int id, Integer month, Integer day); - - @Insert - void save(SensorReading reading); - } - - public static class FindSliceProvider { - private final CqlSession session; - private final AtomicInteger executionCount; - private final EntityHelper sensorReadingHelper; - private final Select selectStart; - - public FindSliceProvider( - MapperContext context, EntityHelper sensorReadingHelper) { - this.session = context.getSession(); - this.executionCount = ((AtomicInteger) context.getCustomState().get("executionCount")); - this.sensorReadingHelper = sensorReadingHelper; - this.selectStart = - sensorReadingHelper.selectStart().whereColumn("id").isEqualTo(bindMarker()); - } - - public PagingIterable findSlice(int id, Integer month, Integer day) { - if (month == null && day != null) { - throw new IllegalArgumentException("Can't specify day if month is null"); - } - - executionCount.incrementAndGet(); - - Select select = this.selectStart; - if (month != null) { - select = select.whereColumn("month").isEqualTo(bindMarker()); - if (day != null) { - select = select.whereColumn("day").isEqualTo(bindMarker()); - } - } - PreparedStatement preparedStatement = session.prepare(select.build()); - BoundStatementBuilder boundStatementBuilder = - preparedStatement.boundStatementBuilder().setInt("id", id); - if (month != null) { - boundStatementBuilder = boundStatementBuilder.setInt("month", month); - if (day != null) { - boundStatementBuilder = boundStatementBuilder.setInt("day", day); - } - } - return session - .execute(boundStatementBuilder.build()) - .map(row -> sensorReadingHelper.get(row, false)); - } - } - - public static class FindSliceStreamProvider extends FindSliceProvider { - - public FindSliceStreamProvider( - MapperContext context, EntityHelper sensorReadingHelper) { - super(context, sensorReadingHelper); - } - - public Stream findSliceAsStream(int id, Integer month, Integer day) { - return StreamSupport.stream(findSlice(id, month, day).spliterator(), false); - } - } - - @Entity - public static class SensorReading { - @PartitionKey private int id; - - @ClusteringColumn(1) - private int month; - - @ClusteringColumn(2) - private int day; - - private double value; - - public SensorReading() {} - - public SensorReading(int id, int month, int day, double value) { - this.id = id; - this.month = month; - this.day = day; - this.value = value; - } - - public int getId() { - return id; - } - - public void setId(int id) { - this.id = id; - } - - public int getMonth() { - return month; - } - - public void setMonth(int month) { - this.month = month; - } - - public int getDay() { - return day; - } - - public void setDay(int day) { - this.day = day; - } - - public double getValue() { - return value; - } - - public void setValue(double value) { - this.value = value; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof SensorReading) { - SensorReading that = (SensorReading) other; - return this.id == that.id - && this.month == that.month - && this.day == that.day - && this.value == that.value; - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(id, month, day, value); - } - - @Override - public String toString() { - return String.format("%d %d/%d %f", id, month, day, value); - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/QueryReactiveIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/QueryReactiveIT.java deleted file mode 100644 index d04ab5150ec..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/QueryReactiveIT.java +++ /dev/null @@ -1,158 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.mapper; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveResultSet; -import com.datastax.dse.driver.api.mapper.reactive.MappedReactiveResultSet; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.mapper.annotations.ClusteringColumn; -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; -import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; -import com.datastax.oss.driver.api.mapper.annotations.DefaultNullSavingStrategy; -import com.datastax.oss.driver.api.mapper.annotations.Entity; -import com.datastax.oss.driver.api.mapper.annotations.Insert; -import com.datastax.oss.driver.api.mapper.annotations.Mapper; -import com.datastax.oss.driver.api.mapper.annotations.PartitionKey; -import com.datastax.oss.driver.api.mapper.annotations.Query; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import io.reactivex.Flowable; -import java.util.List; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -public class QueryReactiveIT { - - private static CcmRule ccmRule = CcmRule.getInstance(); - - private static SessionRule sessionRule = SessionRule.builder(ccmRule).build(); - - @ClassRule public static TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); - - private static DseTestDao dao; - - @BeforeClass - public static void setup() { - CqlSession session = sessionRule.session(); - - session.execute( - SimpleStatement.builder( - "CREATE TABLE test_entity(id int, rank int, value int, PRIMARY KEY(id, rank))") - .setExecutionProfile(sessionRule.slowProfile()) - .build()); - - TestMapper testMapper = new QueryReactiveIT_TestMapperBuilder(session).build(); - dao = testMapper.productDao(sessionRule.keyspace()); - } - - @Before - public void insertData() { - for (int i = 0; i < 10; i++) { - dao.insert(new TestEntity(1, i, i)); - } - } - - @Test - public void should_query_reactive() { - ReactiveResultSet rs = dao.findByIdReactive(1); - assertThat(Flowable.fromPublisher(rs).count().blockingGet()).isEqualTo(10); - } - - @Test - public void should_query_reactive_mapped() { - MappedReactiveResultSet rs = dao.findByIdReactiveMapped(1); - List results = Flowable.fromPublisher(rs).toList().blockingGet(); - assertThat(results).hasSize(10); - assertThat(results).extracting("rank").containsExactly(0, 1, 2, 3, 4, 5, 6, 7, 8, 9); - } - - @Mapper - public interface TestMapper { - - @DaoFactory - DseTestDao productDao(@DaoKeyspace CqlIdentifier keyspace); - } - - @Dao - @DefaultNullSavingStrategy(NullSavingStrategy.SET_TO_NULL) - public interface DseTestDao { - - @Insert - void insert(TestEntity entity); - - @Query("SELECT * FROM ${qualifiedTableId} WHERE id = :id") - MappedReactiveResultSet findByIdReactiveMapped(int id); - - @Query("SELECT * FROM ${keyspaceId}.test_entity WHERE id = :id") - ReactiveResultSet findByIdReactive(int id); - } - - @Entity - public static class TestEntity { - @PartitionKey private int id; - - @ClusteringColumn private int rank; - - private Integer value; - - public TestEntity() {} - - public TestEntity(int id, int rank, Integer value) { - this.id = id; - this.rank = rank; - this.value = value; - } - - public int getId() { - return id; - } - - public void setId(int id) { - this.id = id; - } - - public int getRank() { - return rank; - } - - public void setRank(int rank) { - this.rank = rank; - } - - public Integer getValue() { - return value; - } - - public void setValue(Integer value) { - this.value = value; - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/QueryReturnTypesIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/QueryReturnTypesIT.java deleted file mode 100644 index c6e90912206..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/QueryReturnTypesIT.java +++ /dev/null @@ -1,364 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.mapper; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.catchThrowable; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.MappedAsyncPagingIterable; -import com.datastax.oss.driver.api.core.PagingIterable; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.mapper.MapperException; -import com.datastax.oss.driver.api.mapper.annotations.ClusteringColumn; -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; -import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; -import com.datastax.oss.driver.api.mapper.annotations.DaoTable; -import com.datastax.oss.driver.api.mapper.annotations.DefaultNullSavingStrategy; -import com.datastax.oss.driver.api.mapper.annotations.Entity; -import com.datastax.oss.driver.api.mapper.annotations.Insert; -import com.datastax.oss.driver.api.mapper.annotations.Mapper; -import com.datastax.oss.driver.api.mapper.annotations.PartitionKey; -import com.datastax.oss.driver.api.mapper.annotations.Query; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import java.util.Optional; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.ExecutionException; -import java.util.stream.Stream; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -/** Covers the return types of {@link Query} methods. */ -@Category(ParallelizableTests.class) -public class QueryReturnTypesIT { - - private static final CcmRule CCM_RULE = CcmRule.getInstance(); - private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - private static TestDao dao; - - @BeforeClass - public static void createSchema() { - CqlSession session = SESSION_RULE.session(); - - session.execute( - SimpleStatement.builder( - "CREATE TABLE test_entity(id int, rank int, value int, PRIMARY KEY(id, rank))") - .setExecutionProfile(SESSION_RULE.slowProfile()) - .build()); - - TestMapper mapper = new QueryReturnTypesIT_TestMapperBuilder(session).build(); - dao = mapper.dao(SESSION_RULE.keyspace(), CqlIdentifier.fromCql("test_entity")); - } - - @Before - public void insertData() { - for (int i = 0; i < 10; i++) { - dao.insert(new TestEntity(1, i, i)); - } - } - - @Test - public void should_execute_query_and_map_to_void() { - dao.delete(1, 1); - assertThat(dao.findByIdAndRank(1, 1)).isNull(); - } - - @Test - public void should_execute_async_query_and_map_to_void() { - CompletableFutures.getUninterruptibly(dao.deleteAsync(1, 1).toCompletableFuture()); - assertThat(dao.findByIdAndRank(1, 1)).isNull(); - } - - @Test - public void should_execute_conditional_query_and_map_to_boolean() { - assertThat(dao.deleteIfExists(1, 1)).isTrue(); - assertThat(dao.deleteIfExists(1, 1)).isFalse(); - } - - @Test - public void should_execute_async_conditional_query_and_map_to_boolean() { - assertThat(CompletableFutures.getUninterruptibly(dao.deleteIfExistsAsync(1, 1))).isTrue(); - assertThat(CompletableFutures.getUninterruptibly(dao.deleteIfExistsAsync(1, 1))).isFalse(); - } - - @Test - public void should_execute_count_query_and_map_to_long() { - assertThat(dao.countById(1)).isEqualTo(10); - } - - @Test - public void should_fail_to_map_to_long_if_query_returns_other_type() { - Throwable t = catchThrowable(() -> dao.wrongCount()); - assertThat(t) - .isInstanceOf(MapperException.class) - .hasMessage( - "Expected the query to return a column with CQL type BIGINT in first position " - + "(return type long is intended for COUNT queries)"); - } - - @Test - public void should_execute_async_count_query_and_map_to_long() { - assertThat(CompletableFutures.getUninterruptibly(dao.countByIdAsync(1))).isEqualTo(10); - } - - @Test - public void should_execute_query_and_map_to_row() { - Row row = dao.findRowByIdAndRank(1, 1); - assertThat(row).isNotNull(); - assertThat(row.getColumnDefinitions().size()).isEqualTo(3); - assertThat(row.getInt("id")).isEqualTo(1); - assertThat(row.getInt("rank")).isEqualTo(1); - assertThat(row.getInt("value")).isEqualTo(1); - } - - @Test - public void should_execute_async_query_and_map_to_row() { - Row row = CompletableFutures.getUninterruptibly(dao.findRowByIdAndRankAsync(1, 1)); - assertThat(row).isNotNull(); - assertThat(row.getColumnDefinitions().size()).isEqualTo(3); - assertThat(row.getInt("id")).isEqualTo(1); - assertThat(row.getInt("rank")).isEqualTo(1); - assertThat(row.getInt("value")).isEqualTo(1); - } - - @Test - public void should_execute_query_and_map_to_result_set() { - ResultSet resultSet = dao.findRowsById(1); - assertThat(resultSet.all()).hasSize(10); - } - - @Test - public void should_execute_async_query_and_map_to_result_set() { - AsyncResultSet resultSet = CompletableFutures.getUninterruptibly(dao.findRowsByIdAsync(1)); - assertThat(ImmutableList.copyOf(resultSet.currentPage())).hasSize(10); - assertThat(resultSet.hasMorePages()).isFalse(); - } - - @Test - public void should_execute_query_and_map_to_entity() { - TestEntity entity = dao.findByIdAndRank(1, 1); - assertThat(entity.getId()).isEqualTo(1); - assertThat(entity.getRank()).isEqualTo(1); - assertThat(entity.getValue()).isEqualTo(1); - - entity = dao.findByIdAndRank(2, 1); - assertThat(entity).isNull(); - } - - @Test - public void should_execute_async_query_and_map_to_entity() { - TestEntity entity = CompletableFutures.getUninterruptibly(dao.findByIdAndRankAsync(1, 1)); - assertThat(entity.getId()).isEqualTo(1); - assertThat(entity.getRank()).isEqualTo(1); - assertThat(entity.getValue()).isEqualTo(1); - - entity = dao.findByIdAndRank(2, 1); - assertThat(entity).isNull(); - } - - @Test - public void should_execute_query_and_map_to_optional_entity() { - Optional maybeEntity = dao.findOptionalByIdAndRank(1, 1); - assertThat(maybeEntity) - .hasValueSatisfying( - entity -> { - assertThat(entity.getId()).isEqualTo(1); - assertThat(entity.getRank()).isEqualTo(1); - assertThat(entity.getValue()).isEqualTo(1); - }); - - maybeEntity = dao.findOptionalByIdAndRank(2, 1); - assertThat(maybeEntity).isEmpty(); - } - - @Test - public void should_execute_async_query_and_map_to_optional_entity() { - Optional maybeEntity = - CompletableFutures.getUninterruptibly(dao.findOptionalByIdAndRankAsync(1, 1)); - assertThat(maybeEntity) - .hasValueSatisfying( - entity -> { - assertThat(entity.getId()).isEqualTo(1); - assertThat(entity.getRank()).isEqualTo(1); - assertThat(entity.getValue()).isEqualTo(1); - }); - - maybeEntity = dao.findOptionalByIdAndRank(2, 1); - assertThat(maybeEntity).isEmpty(); - } - - @Test - public void should_execute_query_and_map_to_iterable() { - PagingIterable iterable = dao.findById(1); - assertThat(iterable.all()).hasSize(10); - } - - @Test - public void should_execute_query_and_map_to_stream() { - Stream stream = dao.findByIdAsStream(1); - assertThat(stream).hasSize(10); - } - - @Test - public void should_execute_async_query_and_map_to_iterable() { - MappedAsyncPagingIterable iterable = - CompletableFutures.getUninterruptibly(dao.findByIdAsync(1)); - assertThat(ImmutableList.copyOf(iterable.currentPage())).hasSize(10); - assertThat(iterable.hasMorePages()).isFalse(); - } - - @Test - public void should_execute_query_and_map_to_stream_async() - throws ExecutionException, InterruptedException { - CompletableFuture> stream = dao.findByIdAsStreamAsync(1); - assertThat(stream.get()).hasSize(10); - } - - @Dao - @DefaultNullSavingStrategy(NullSavingStrategy.SET_TO_NULL) - public interface TestDao { - @Insert - void insert(TestEntity entity); - - @Query("DELETE FROM ${qualifiedTableId} WHERE id = :id and rank = :rank") - void delete(int id, int rank); - - @Query("DELETE FROM ${qualifiedTableId} WHERE id = :id and rank = :rank") - CompletionStage deleteAsync(int id, int rank); - - @Query("DELETE FROM ${qualifiedTableId} WHERE id = :id and rank = :rank IF EXISTS") - boolean deleteIfExists(int id, int rank); - - @Query("DELETE FROM ${qualifiedTableId} WHERE id = :id and rank = :rank IF EXISTS") - CompletableFuture deleteIfExistsAsync(int id, int rank); - - @Query("SELECT count(*) FROM ${qualifiedTableId} WHERE id = :id") - long countById(int id); - - @Query("SELECT count(*) FROM ${qualifiedTableId} WHERE id = :id") - CompletableFuture countByIdAsync(int id); - - // Error: the query does not return a long as the first column - @Query("SELECT release_version FROM system.local WHERE key='local'") - @SuppressWarnings("UnusedReturnValue") - long wrongCount(); - - @Query("SELECT * FROM ${qualifiedTableId} WHERE id = :id AND rank = :rank") - Row findRowByIdAndRank(int id, int rank); - - @Query("SELECT * FROM ${qualifiedTableId} WHERE id = :id AND rank = :rank") - CompletableFuture findRowByIdAndRankAsync(int id, int rank); - - @Query("SELECT * FROM ${qualifiedTableId} WHERE id = :id") - ResultSet findRowsById(int id); - - @Query("SELECT * FROM ${qualifiedTableId} WHERE id = :id") - CompletableFuture findRowsByIdAsync(int id); - - @Query("SELECT * FROM ${qualifiedTableId} WHERE id = :id AND rank = :rank") - TestEntity findByIdAndRank(int id, int rank); - - @Query("SELECT * FROM ${qualifiedTableId} WHERE id = :id AND rank = :rank") - CompletableFuture findByIdAndRankAsync(int id, int rank); - - @Query("SELECT * FROM ${qualifiedTableId} WHERE id = :id AND rank = :rank") - Optional findOptionalByIdAndRank(int id, int rank); - - @Query("SELECT * FROM ${qualifiedTableId} WHERE id = :id AND rank = :rank") - CompletableFuture> findOptionalByIdAndRankAsync(int id, int rank); - - @Query("SELECT * FROM ${qualifiedTableId} WHERE id = :id") - PagingIterable findById(int id); - - @Query("SELECT * FROM ${qualifiedTableId} WHERE id = :id") - Stream findByIdAsStream(int id); - - @Query("SELECT * FROM ${qualifiedTableId} WHERE id = :id") - CompletableFuture> findByIdAsync(int id); - - @Query("SELECT * FROM ${qualifiedTableId} WHERE id = :id") - CompletableFuture> findByIdAsStreamAsync(int id); - } - - @Entity - public static class TestEntity { - @PartitionKey private int id; - - @ClusteringColumn private int rank; - - private Integer value; - - public TestEntity() {} - - public TestEntity(int id, int rank, Integer value) { - this.id = id; - this.rank = rank; - this.value = value; - } - - public int getId() { - return id; - } - - public void setId(int id) { - this.id = id; - } - - public int getRank() { - return rank; - } - - public void setRank(int rank) { - this.rank = rank; - } - - public Integer getValue() { - return value; - } - - public void setValue(Integer value) { - this.value = value; - } - } - - @Mapper - public interface TestMapper { - @DaoFactory - TestDao dao(@DaoKeyspace CqlIdentifier keyspace, @DaoTable CqlIdentifier table); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/SchemaValidationIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/SchemaValidationIT.java deleted file mode 100644 index 5bf6fc2d27a..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/SchemaValidationIT.java +++ /dev/null @@ -1,1256 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.mapper; - -import static com.datastax.oss.driver.api.mapper.annotations.SchemaHint.TargetElement; -import static com.datastax.oss.driver.internal.core.util.LoggerTest.setupTestLogger; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatCode; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.timeout; -import static org.mockito.Mockito.verify; - -import ch.qos.logback.classic.Level; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.servererrors.InvalidQueryException; -import com.datastax.oss.driver.api.mapper.MapperContext; -import com.datastax.oss.driver.api.mapper.annotations.ClusteringColumn; -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; -import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; -import com.datastax.oss.driver.api.mapper.annotations.Entity; -import com.datastax.oss.driver.api.mapper.annotations.Mapper; -import com.datastax.oss.driver.api.mapper.annotations.PartitionKey; -import com.datastax.oss.driver.api.mapper.annotations.QueryProvider; -import com.datastax.oss.driver.api.mapper.annotations.SchemaHint; -import com.datastax.oss.driver.api.mapper.annotations.Select; -import com.datastax.oss.driver.api.mapper.annotations.Update; -import com.datastax.oss.driver.api.mapper.entity.EntityHelper; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.internal.core.util.LoggerTest; -import java.util.Arrays; -import java.util.List; -import java.util.Objects; -import java.util.UUID; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -@BackendRequirement( - type = BackendType.CASSANDRA, - minInclusive = "3.4", - description = "Creates a SASI index") -public class SchemaValidationIT extends InventoryITBase { - - private static CcmRule ccm = CcmRule.getInstance(); - - private static SessionRule sessionRule = SessionRule.builder(ccm).build(); - - private static InventoryMapper mapper; - private static InventoryMapper mapperDisabledValidation; - - @ClassRule public static TestRule chain = RuleChain.outerRule(ccm).around(sessionRule); - - @BeforeClass - public static void setup() { - CqlSession session = sessionRule.session(); - List statements = - Arrays.asList( - "CREATE TABLE product_simple(id uuid PRIMARY KEY, description text, unmapped text)", - "CREATE TABLE product_simple_missing_p_k(id uuid PRIMARY KEY, description text, unmapped text)", - "CREATE TABLE product_simple_missing_clustering_column(id uuid PRIMARY KEY, description text, unmapped text)", - "CREATE TABLE product_pk_and_clustering(id uuid, c_id uuid, PRIMARY KEY (id, c_id))", - "CREATE TABLE product_wrong_type(id uuid PRIMARY KEY, wrong_type_column text)", - "CREATE TYPE dimensions_with_incorrect_name(length int, width int, height int)", - "CREATE TYPE dimensions_with_wrong_type(length int, width int, height text)", - "CREATE TYPE dimensions_with_incorrect_name_schema_hint_udt(length int, width int, height int)", - "CREATE TYPE dimensions_with_incorrect_name_schema_hint_table(length int, width int, height int)", - "CREATE TABLE product_with_incorrect_udt(id uuid PRIMARY KEY, description text, dimensions dimensions_with_incorrect_name)", - "CREATE TABLE product_with_incorrect_udt_schema_hint_udt(id uuid PRIMARY KEY, description text, dimensions dimensions_with_incorrect_name_schema_hint_udt)", - "CREATE TABLE product_with_incorrect_udt_schema_hint_table(id uuid PRIMARY KEY, description text, dimensions dimensions_with_incorrect_name_schema_hint_table)", - "CREATE TABLE product_with_udt_wrong_type(id uuid PRIMARY KEY, description text, dimensions dimensions_with_wrong_type)"); - - for (String query : statements) { - session.execute( - SimpleStatement.builder(query).setExecutionProfile(sessionRule.slowProfile()).build()); - } - for (String query : createStatements(ccm)) { - session.execute( - SimpleStatement.builder(query).setExecutionProfile(sessionRule.slowProfile()).build()); - } - mapper = - new SchemaValidationIT_InventoryMapperBuilder(session) - .withSchemaValidationEnabled(true) - .build(); - mapperDisabledValidation = - new SchemaValidationIT_InventoryMapperBuilder(session) - .withSchemaValidationEnabled(false) - .build(); - } - - @Before - public void clearData() { - CqlSession session = sessionRule.session(); - session.execute( - SimpleStatement.builder("TRUNCATE product_simple") - .setExecutionProfile(sessionRule.slowProfile()) - .build()); - session.execute( - SimpleStatement.builder("TRUNCATE product") - .setExecutionProfile(sessionRule.slowProfile()) - .build()); - session.execute( - SimpleStatement.builder("TRUNCATE product_with_incorrect_udt") - .setExecutionProfile(sessionRule.slowProfile()) - .build()); - session.execute( - SimpleStatement.builder("TRUNCATE product_with_incorrect_udt_schema_hint_udt") - .setExecutionProfile(sessionRule.slowProfile()) - .build()); - session.execute( - SimpleStatement.builder("TRUNCATE product_with_incorrect_udt_schema_hint_table") - .setExecutionProfile(sessionRule.slowProfile()) - .build()); - session.execute( - SimpleStatement.builder("TRUNCATE product_wrong_type") - .setExecutionProfile(sessionRule.slowProfile()) - .build()); - session.execute( - SimpleStatement.builder("TRUNCATE product_pk_and_clustering") - .setExecutionProfile(sessionRule.slowProfile()) - .build()); - session.execute( - SimpleStatement.builder("TRUNCATE product_with_udt_wrong_type") - .setExecutionProfile(sessionRule.slowProfile()) - .build()); - } - - @Test - public void should_throw_when_use_not_properly_mapped_entity() { - assertThatThrownBy(() -> mapper.productSimpleDao(sessionRule.keyspace())) - .isInstanceOf(IllegalArgumentException.class) - .hasMessageContaining( - String.format( - "The CQL ks.table: %s.product_simple has missing columns: [description_with_incorrect_name, some_other_not_mapped_field] that are defined in the entity class: com.datastax.oss.driver.mapper.SchemaValidationIT.ProductSimple", - sessionRule.keyspace())); - } - - @Test - public void - should_throw_when_use_not_properly_mapped_entity_when_ks_is_passed_as_null_extracting_ks_from_session() { - assertThatThrownBy(() -> mapper.productSimpleDao(null)) - .isInstanceOf(IllegalArgumentException.class) - .hasMessageContaining( - String.format( - "The CQL ks.table: %s.product_simple has missing columns: [description_with_incorrect_name, some_other_not_mapped_field] that are defined in the entity class: com.datastax.oss.driver.mapper.SchemaValidationIT.ProductSimple", - sessionRule.keyspace())); - } - - @Test - public void should_log_warn_when_entity_has_no_corresponding_cql_table() { - LoggerTest.LoggerSetup logger = - setupTestLogger( - SchemaValidationIT_ProductCqlTableMissingHelper__MapperGenerated.class, Level.WARN); - try { - assertThatThrownBy(() -> mapper.productCqlTableMissingDao(sessionRule.keyspace())) - .isInstanceOf(InvalidQueryException.class); - - verify(logger.appender, timeout(500).times(1)).doAppend(logger.loggingEventCaptor.capture()); - assertThat(logger.loggingEventCaptor.getValue().getMessage()).isNotNull(); - assertThat(logger.loggingEventCaptor.getValue().getFormattedMessage()) - .contains( - String.format( - "There is no ks.table or UDT: %s.product_cql_table_missing for the entity class: com.datastax.oss.driver.mapper.SchemaValidationIT.ProductCqlTableMissing, or metadata is out of date.", - sessionRule.keyspace())); - - } finally { - logger.close(); - } - } - - @Test - public void should_throw_general_driver_exception_when_schema_validation_check_is_disabled() { - assertThatThrownBy( - () -> mapperDisabledValidation.productDaoValidationDisabled(sessionRule.keyspace())) - .isInstanceOf(InvalidQueryException.class) - .hasMessageContaining("Undefined column name description_with_incorrect_name"); - } - - @Test - public void should_not_throw_on_table_with_properly_mapped_udt_field() { - assertThatCode(() -> mapper.productDao(sessionRule.keyspace())).doesNotThrowAnyException(); - } - - @Test - public void should_throw_when_use_not_properly_mapped_entity_with_udt() { - assertThatThrownBy(() -> mapper.productWithIncorrectUdtDao(sessionRule.keyspace())) - .isInstanceOf(IllegalArgumentException.class) - .hasStackTraceContaining( - String.format( - "The CQL ks.udt: %s.dimensions_with_incorrect_name has missing columns: [length_not_present] that are defined in the entity class: com.datastax.oss.driver.mapper.SchemaValidationIT.DimensionsWithIncorrectName", - sessionRule.keyspace())); - } - - @Test - public void should_throw_when_use_not_properly_mapped_entity_with_udt_with_udt_schema_hint() { - assertThatThrownBy(() -> mapper.productWithIncorrectUdtSchemaHintUdt(sessionRule.keyspace())) - .isInstanceOf(IllegalArgumentException.class) - .hasStackTraceContaining( - String.format( - "The CQL ks.udt: %s.dimensions_with_incorrect_name_schema_hint_udt has missing columns: [length_not_present] that are defined in the entity class: com.datastax.oss.driver.mapper.SchemaValidationIT.DimensionsWithIncorrectNameSchemaHintUdt", - sessionRule.keyspace())); - } - - @Test - public void - should_warn_about_missing_table_when_use_not_properly_mapped_entity_with_udt_with_table_schema_hint() { - LoggerTest.LoggerSetup logger = - setupTestLogger( - SchemaValidationIT_DimensionsWithIncorrectNameSchemaHintTableHelper__MapperGenerated - .class, - Level.WARN); - try { - // when - mapper.productWithIncorrectUdtSchemaHintTable(sessionRule.keyspace()); - - verify(logger.appender, timeout(500).times(1)).doAppend(logger.loggingEventCaptor.capture()); - assertThat(logger.loggingEventCaptor.getValue().getMessage()).isNotNull(); - assertThat(logger.loggingEventCaptor.getValue().getFormattedMessage()) - .contains( - String.format( - "There is no ks.table or UDT: %s.dimensions_with_incorrect_name_schema_hint_table for the entity class: com.datastax.oss.driver.mapper.SchemaValidationIT.DimensionsWithIncorrectNameSchemaHintTable, or metadata is out of date.", - sessionRule.keyspace())); - } finally { - logger.close(); - } - } - - @Test - public void should_throw_when_table_is_missing_PKs() { - assertThatThrownBy(() -> mapper.productSimpleMissingPKDao(sessionRule.keyspace())) - .isInstanceOf(IllegalArgumentException.class) - .hasMessageContaining( - String.format( - "The CQL ks.table: %s.product_simple_missing_p_k has missing Primary Key columns: [id_not_present] that are defined in the entity class: com.datastax.oss.driver.mapper.SchemaValidationIT.ProductSimpleMissingPK", - sessionRule.keyspace())); - } - - @Test - public void should_throw_when_table_is_missing_clustering_column() { - assertThatThrownBy(() -> mapper.productSimpleMissingClusteringColumn(sessionRule.keyspace())) - .isInstanceOf(IllegalArgumentException.class) - .hasMessageContaining( - String.format( - "The CQL ks.table: %s.product_simple_missing_clustering_column has missing Clustering columns: [not_existing_clustering_column] that are defined in the entity class: com.datastax.oss.driver.mapper.SchemaValidationIT.ProductSimpleMissingClusteringColumn", - sessionRule.keyspace())); - } - - @Test - public void should_throw_when_type_defined_in_table_does_not_match_type_from_entity() { - assertThatThrownBy(() -> mapper.productDaoWrongType(sessionRule.keyspace())) - .isInstanceOf(IllegalArgumentException.class) - .hasMessageContaining( - String.format( - "The CQL ks.table: %s.product_wrong_type defined in the entity class: com.datastax.oss.driver.mapper.SchemaValidationIT.ProductWrongType declares type mappings that are not supported by the codec registry:\n" - + "Field: wrong_type_column, Entity Type: java.lang.Integer, CQL type: TEXT", - sessionRule.keyspace())); - } - - @Test - public void should_throw_when_type_defined_in_udt_does_not_match_type_from_entity() { - assertThatThrownBy(() -> mapper.productWithUdtWrongTypeDao(sessionRule.keyspace())) - .isInstanceOf(IllegalArgumentException.class) - .hasMessageContaining( - String.format( - "The CQL ks.udt: %s.dimensions_with_wrong_type defined in the entity class: com.datastax.oss.driver.mapper.SchemaValidationIT.DimensionsWithWrongType declares type mappings that are not supported by the codec registry:\n" - + "Field: height, Entity Type: java.lang.Integer, CQL type: TEXT", - sessionRule.keyspace())); - } - - @Test - public void should_not_throw_when_have_correct_pk_and_clustering() { - assertThatCode(() -> mapper.productPkAndClusteringDao(sessionRule.keyspace())) - .doesNotThrowAnyException(); - } - - @Test - public void should_log_warning_when_passing_not_existing_keyspace() { - LoggerTest.LoggerSetup logger = - setupTestLogger(SchemaValidationIT_ProductSimpleHelper__MapperGenerated.class, Level.WARN); - try { - // when - assertThatThrownBy( - () -> mapper.productSimpleDao(CqlIdentifier.fromCql("not_existing_keyspace"))) - .isInstanceOf(InvalidQueryException.class) - .hasMessageContaining("not_existing_keyspace does not exist"); - - // then - verify(logger.appender, timeout(500).times(1)).doAppend(logger.loggingEventCaptor.capture()); - assertThat(logger.loggingEventCaptor.getValue().getMessage()).isNotNull(); - assertThat(logger.loggingEventCaptor.getValue().getFormattedMessage()) - .contains( - "Unable to validate table: product_simple for the entity class: com.datastax.oss.driver.mapper.SchemaValidationIT.ProductSimple because the session metadata has no information about the keyspace: not_existing_keyspace."); - } finally { - logger.close(); - } - } - - @Test - public void should_not_warn_or_throw_when_target_element_is_NONE() { - LoggerTest.LoggerSetup logger = - setupTestLogger( - SchemaValidationIT_DoesNotExistNoValidationHelper__MapperGenerated.class, Level.WARN); - - // when - mapper.noValidationDao(sessionRule.keyspace()); - - // then - // no exceptions, no logs - verify(logger.appender, never()).doAppend(any()); - } - - @Mapper - public interface InventoryMapper { - @DaoFactory - ProductSimpleDao productSimpleDao(@DaoKeyspace CqlIdentifier keyspace); - - @DaoFactory - ProductSimpleCqlTableMissingDao productCqlTableMissingDao(@DaoKeyspace CqlIdentifier keyspace); - - @DaoFactory - ProductSimpleDaoValidationDisabledDao productDaoValidationDisabled( - @DaoKeyspace CqlIdentifier keyspace); - - @DaoFactory - ProductDao productDao(@DaoKeyspace CqlIdentifier keyspace); - - @DaoFactory - ProductWithIncorrectUdtDao productWithIncorrectUdtDao(@DaoKeyspace CqlIdentifier keyspace); - - @DaoFactory - ProductWithIncorrectUdtSchemaHintUdtDao productWithIncorrectUdtSchemaHintUdt( - @DaoKeyspace CqlIdentifier keyspace); - - @DaoFactory - ProductWithIncorrectUdtSchemaHintTableDao productWithIncorrectUdtSchemaHintTable( - @DaoKeyspace CqlIdentifier keyspace); - - @DaoFactory - ProductWithUdtWrongTypeDao productWithUdtWrongTypeDao(@DaoKeyspace CqlIdentifier keyspace); - - @DaoFactory - ProductSimpleMissingPKDao productSimpleMissingPKDao(@DaoKeyspace CqlIdentifier keyspace); - - @DaoFactory - ProductSimpleMissingClusteringColumnDao productSimpleMissingClusteringColumn( - @DaoKeyspace CqlIdentifier keyspace); - - @DaoFactory - ProductDaoWrongTypeDao productDaoWrongType(@DaoKeyspace CqlIdentifier keyspace); - - @DaoFactory - ProductPkAndClusteringDao productPkAndClusteringDao(@DaoKeyspace CqlIdentifier keyspace); - - @DaoFactory - NoValidationDao noValidationDao(@DaoKeyspace CqlIdentifier keyspace); - } - - @Dao - public interface ProductWithIncorrectUdtDao { - - @Update(customWhereClause = "id = :id") - void updateWhereId(ProductWithIncorrectUdt product, UUID id); - } - - @Dao - public interface ProductWithIncorrectUdtSchemaHintUdtDao { - - @Update(customWhereClause = "id = :id") - void updateWhereId(ProductWithIncorrectUdtSchemaHintUdt product, UUID id); - } - - @Dao - public interface ProductWithIncorrectUdtSchemaHintTableDao { - - @Update(customWhereClause = "id = :id") - void updateWhereId(ProductWithIncorrectUdtSchemaHintTable product, UUID id); - } - - @Dao - public interface ProductWithUdtWrongTypeDao { - - @Update(customWhereClause = "id = :id") - void updateWhereId(ProductWithUdtWrongType product, UUID id); - } - - @Dao - public interface ProductDao { - - @Update(customWhereClause = "id = :id") - void updateWhereId(Product product, UUID id); - } - - @Dao - public interface ProductSimpleDao { - - @Select - ProductSimple findById(UUID productId); - } - - @Dao - public interface ProductSimpleDaoValidationDisabledDao { - - @Select - ProductSimple findById(UUID productId); - } - - @Dao - public interface ProductSimpleCqlTableMissingDao { - - @Select - ProductCqlTableMissing findById(UUID productId); - } - - @Dao - public interface ProductSimpleMissingPKDao { - @Select - ProductSimpleMissingPK findById(UUID productId); - } - - @Dao - public interface ProductSimpleMissingClusteringColumnDao { - @Select - ProductSimpleMissingClusteringColumn findById(UUID productId); - } - - @Dao - public interface ProductDaoWrongTypeDao { - - @Select - ProductWrongType findById(UUID productId); - } - - @Dao - public interface ProductPkAndClusteringDao { - - @Select - ProductPkAndClustering findById(UUID productId); - } - - @Dao - public interface NoValidationDao { - // Not a real query, we just need to reference the entities - @QueryProvider( - providerClass = DummyProvider.class, - entityHelpers = {DoesNotExistNoValidation.class, ProductCqlTableMissingNoValidation.class}) - void doNothing(); - } - - @SuppressWarnings("unused") - static class DummyProvider { - DummyProvider( - MapperContext context, - EntityHelper helper1, - EntityHelper helper2) {} - - void doNothing() {} - } - - @Entity - public static class ProductCqlTableMissing { - @PartitionKey private UUID id; - - public ProductCqlTableMissing() {} - - public UUID getId() { - return id; - } - - public void setId(UUID id) { - this.id = id; - } - } - - @Entity - public static class ProductSimpleMissingPK { - @PartitionKey private UUID idNotPresent; - - public ProductSimpleMissingPK() {} - - public UUID getIdNotPresent() { - return idNotPresent; - } - - public void setIdNotPresent(UUID idNotPresent) { - this.idNotPresent = idNotPresent; - } - } - - @Entity - public static class ProductWrongType { - @PartitionKey private UUID id; - private Integer wrongTypeColumn; - - public ProductWrongType() {} - - public UUID getId() { - return id; - } - - public void setId(UUID id) { - this.id = id; - } - - public Integer getWrongTypeColumn() { - return wrongTypeColumn; - } - - public void setWrongTypeColumn(Integer wrongTypeColumn) { - this.wrongTypeColumn = wrongTypeColumn; - } - } - - @Entity - public static class ProductSimpleMissingClusteringColumn { - @PartitionKey private UUID id; - @ClusteringColumn private Integer notExistingClusteringColumn; - - public ProductSimpleMissingClusteringColumn() {} - - public UUID getId() { - return id; - } - - public void setId(UUID id) { - this.id = id; - } - - public Integer getNotExistingClusteringColumn() { - return notExistingClusteringColumn; - } - - public void setNotExistingClusteringColumn(Integer notExistingClusteringColumn) { - this.notExistingClusteringColumn = notExistingClusteringColumn; - } - } - - @Entity - public static class ProductPkAndClustering { - @PartitionKey private UUID id; - @ClusteringColumn private UUID cId; - - public ProductPkAndClustering() {} - - public UUID getId() { - return id; - } - - public void setId(UUID id) { - this.id = id; - } - - public UUID getcId() { - return cId; - } - - public void setcId(UUID cId) { - this.cId = cId; - } - } - - @Entity - public static class ProductSimple { - @PartitionKey private UUID id; - private String descriptionWithIncorrectName; - private Integer someOtherNotMappedField; - - public ProductSimple() {} - - public ProductSimple( - UUID id, String descriptionWithIncorrectName, Integer someOtherNotMappedField) { - this.id = id; - this.descriptionWithIncorrectName = descriptionWithIncorrectName; - this.someOtherNotMappedField = someOtherNotMappedField; - } - - public UUID getId() { - return id; - } - - public void setId(UUID id) { - this.id = id; - } - - public String getDescriptionWithIncorrectName() { - return descriptionWithIncorrectName; - } - - public void setDescriptionWithIncorrectName(String descriptionWithIncorrectName) { - this.descriptionWithIncorrectName = descriptionWithIncorrectName; - } - - public Integer getSomeOtherNotMappedField() { - return someOtherNotMappedField; - } - - public void setSomeOtherNotMappedField(Integer someOtherNotMappedField) { - this.someOtherNotMappedField = someOtherNotMappedField; - } - - @Override - public boolean equals(Object o) { - - if (this == o) { - return true; - } - if (!(o instanceof ProductSimple)) { - return false; - } - ProductSimple that = (ProductSimple) o; - return this.id.equals(that.id) - && this.someOtherNotMappedField.equals(that.someOtherNotMappedField) - && this.descriptionWithIncorrectName.equals(that.descriptionWithIncorrectName); - } - - @Override - public int hashCode() { - return Objects.hash(id, descriptionWithIncorrectName, someOtherNotMappedField); - } - - @Override - public String toString() { - return "ProductSimple{" - + "id=" - + id - + ", descriptionWithIncorrectName='" - + descriptionWithIncorrectName - + '\'' - + ", someOtherNotMappedField=" - + someOtherNotMappedField - + '}'; - } - } - - @Entity - public static class ProductWithIncorrectUdt { - - @PartitionKey private UUID id; - private String description; - private DimensionsWithIncorrectName dimensions; - - public ProductWithIncorrectUdt() {} - - public ProductWithIncorrectUdt( - UUID id, String description, DimensionsWithIncorrectName dimensions) { - this.id = id; - this.description = description; - this.dimensions = dimensions; - } - - public UUID getId() { - return id; - } - - public void setId(UUID id) { - this.id = id; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - public DimensionsWithIncorrectName getDimensions() { - return dimensions; - } - - public void setDimensions(DimensionsWithIncorrectName dimensions) { - this.dimensions = dimensions; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof ProductWithIncorrectUdt)) { - return false; - } - ProductWithIncorrectUdt that = (ProductWithIncorrectUdt) o; - return this.id.equals(that.id) - && this.description.equals(that.description) - && this.dimensions.equals(that.dimensions); - } - - @Override - public int hashCode() { - return Objects.hash(id, description, dimensions); - } - - @Override - public String toString() { - return "ProductWithIncorrectUdt{" - + "id=" - + id - + ", description='" - + description - + '\'' - + ", dimensions=" - + dimensions - + '}'; - } - } - - @Entity - public static class ProductWithUdtWrongType { - - @PartitionKey private UUID id; - private String description; - private DimensionsWithWrongType dimensions; - - public ProductWithUdtWrongType() {} - - public ProductWithUdtWrongType( - UUID id, String description, DimensionsWithWrongType dimensions) { - this.id = id; - this.description = description; - this.dimensions = dimensions; - } - - public UUID getId() { - return id; - } - - public void setId(UUID id) { - this.id = id; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - public DimensionsWithWrongType getDimensions() { - return dimensions; - } - - public void setDimensions(DimensionsWithWrongType dimensions) { - this.dimensions = dimensions; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof ProductWithUdtWrongType)) { - return false; - } - ProductWithUdtWrongType that = (ProductWithUdtWrongType) o; - return this.id.equals(that.id) - && this.description.equals(that.description) - && this.dimensions.equals(that.dimensions); - } - - @Override - public int hashCode() { - return Objects.hash(id, description, dimensions); - } - - @Override - public String toString() { - return "ProductWithUdtWrongType{" - + "id=" - + id - + ", description='" - + description - + '\'' - + ", dimensions=" - + dimensions - + '}'; - } - } - - @Entity - public static class ProductWithIncorrectUdtSchemaHintUdt { - - @PartitionKey private UUID id; - private String description; - private DimensionsWithIncorrectNameSchemaHintUdt dimensions; - - public ProductWithIncorrectUdtSchemaHintUdt() {} - - public ProductWithIncorrectUdtSchemaHintUdt( - UUID id, String description, DimensionsWithIncorrectNameSchemaHintUdt dimensions) { - this.id = id; - this.description = description; - this.dimensions = dimensions; - } - - public UUID getId() { - return id; - } - - public void setId(UUID id) { - this.id = id; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - public DimensionsWithIncorrectNameSchemaHintUdt getDimensions() { - return dimensions; - } - - public void setDimensions(DimensionsWithIncorrectNameSchemaHintUdt dimensions) { - this.dimensions = dimensions; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof ProductWithIncorrectUdtSchemaHintUdt)) { - return false; - } - ProductWithIncorrectUdtSchemaHintUdt that = (ProductWithIncorrectUdtSchemaHintUdt) o; - return this.id.equals(that.id) - && this.description.equals(that.description) - && this.dimensions.equals(that.dimensions); - } - - @Override - public int hashCode() { - return Objects.hash(id, description, dimensions); - } - - @Override - public String toString() { - return "ProductWithIncorrectUdtSchemaHint{" - + "id=" - + id - + ", description='" - + description - + '\'' - + ", dimensions=" - + dimensions - + '}'; - } - } - - @Entity - public static class ProductWithIncorrectUdtSchemaHintTable { - - @PartitionKey private UUID id; - private String description; - private DimensionsWithIncorrectNameSchemaHintTable dimensions; - - public ProductWithIncorrectUdtSchemaHintTable() {} - - public ProductWithIncorrectUdtSchemaHintTable( - UUID id, String description, DimensionsWithIncorrectNameSchemaHintTable dimensions) { - this.id = id; - this.description = description; - this.dimensions = dimensions; - } - - public UUID getId() { - return id; - } - - public void setId(UUID id) { - this.id = id; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - public DimensionsWithIncorrectNameSchemaHintTable getDimensions() { - return dimensions; - } - - public void setDimensions(DimensionsWithIncorrectNameSchemaHintTable dimensions) { - this.dimensions = dimensions; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof ProductWithIncorrectUdtSchemaHintTable)) { - return false; - } - ProductWithIncorrectUdtSchemaHintTable that = (ProductWithIncorrectUdtSchemaHintTable) o; - return this.id.equals(that.id) - && this.description.equals(that.description) - && this.dimensions.equals(that.dimensions); - } - - @Override - public int hashCode() { - return Objects.hash(id, description, dimensions); - } - - @Override - public String toString() { - return "ProductWithIncorrectUdtSchemaHintTable{" - + "id=" - + id - + ", description='" - + description - + '\'' - + ", dimensions=" - + dimensions - + '}'; - } - } - - @Entity - public static class DimensionsWithIncorrectName { - - private int lengthNotPresent; - private int width; - private int height; - - public DimensionsWithIncorrectName() {} - - public DimensionsWithIncorrectName(int lengthNotPresent, int width, int height) { - this.lengthNotPresent = lengthNotPresent; - this.width = width; - this.height = height; - } - - public int getLengthNotPresent() { - return lengthNotPresent; - } - - public void setLengthNotPresent(int lengthNotPresent) { - this.lengthNotPresent = lengthNotPresent; - } - - public int getWidth() { - return width; - } - - public void setWidth(int width) { - this.width = width; - } - - public int getHeight() { - return height; - } - - public void setHeight(int height) { - this.height = height; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof DimensionsWithIncorrectName)) { - return false; - } - DimensionsWithIncorrectName that = (DimensionsWithIncorrectName) o; - return this.lengthNotPresent == that.lengthNotPresent - && this.height == that.height - && this.width == that.width; - } - - @Override - public int hashCode() { - return Objects.hash(lengthNotPresent, width, height); - } - - @Override - public String toString() { - return "DimensionsWithIncorrectName{" - + "lengthNotPresent=" - + lengthNotPresent - + ", width=" - + width - + ", height=" - + height - + '}'; - } - } - - @Entity - public static class DimensionsWithWrongType { - - private int length; - private int width; - private int height; - - public DimensionsWithWrongType() {} - - public DimensionsWithWrongType(int length, int width, int height) { - this.length = length; - this.width = width; - this.height = height; - } - - public int getLength() { - return length; - } - - public void setLength(int length) { - this.length = length; - } - - public int getWidth() { - return width; - } - - public void setWidth(int width) { - this.width = width; - } - - public int getHeight() { - return height; - } - - public void setHeight(int height) { - this.height = height; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof DimensionsWithWrongType)) { - return false; - } - DimensionsWithWrongType that = (DimensionsWithWrongType) o; - return this.length == that.length && this.height == that.height && this.width == that.width; - } - - @Override - public int hashCode() { - return Objects.hash(length, width, height); - } - - @Override - public String toString() { - return "DimensionsWithWrongType{" - + "length=" - + length - + ", width=" - + width - + ", height=" - + height - + '}'; - } - } - - @Entity - @SchemaHint(targetElement = TargetElement.UDT) - public static class DimensionsWithIncorrectNameSchemaHintUdt { - - private int lengthNotPresent; - private int width; - private int height; - - public DimensionsWithIncorrectNameSchemaHintUdt() {} - - public DimensionsWithIncorrectNameSchemaHintUdt(int lengthNotPresent, int width, int height) { - this.lengthNotPresent = lengthNotPresent; - this.width = width; - this.height = height; - } - - public int getLengthNotPresent() { - return lengthNotPresent; - } - - public void setLengthNotPresent(int lengthNotPresent) { - this.lengthNotPresent = lengthNotPresent; - } - - public int getWidth() { - return width; - } - - public void setWidth(int width) { - this.width = width; - } - - public int getHeight() { - return height; - } - - public void setHeight(int height) { - this.height = height; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof DimensionsWithIncorrectNameSchemaHintUdt)) { - return false; - } - DimensionsWithIncorrectNameSchemaHintUdt that = (DimensionsWithIncorrectNameSchemaHintUdt) o; - return this.lengthNotPresent == that.lengthNotPresent - && this.height == that.height - && this.width == that.width; - } - - @Override - public int hashCode() { - return Objects.hash(lengthNotPresent, width, height); - } - - @Override - public String toString() { - return "DimensionsWithIncorrectNameSchemaHintUdt{" - + "lengthNotPresent=" - + lengthNotPresent - + ", width=" - + width - + ", height=" - + height - + '}'; - } - } - - @Entity - @SchemaHint(targetElement = TargetElement.TABLE) - public static class DimensionsWithIncorrectNameSchemaHintTable { - - private int lengthNotPresent; - private int width; - private int height; - - public DimensionsWithIncorrectNameSchemaHintTable() {} - - public DimensionsWithIncorrectNameSchemaHintTable(int lengthNotPresent, int width, int height) { - this.lengthNotPresent = lengthNotPresent; - this.width = width; - this.height = height; - } - - public int getLengthNotPresent() { - return lengthNotPresent; - } - - public void setLengthNotPresent(int lengthNotPresent) { - this.lengthNotPresent = lengthNotPresent; - } - - public int getWidth() { - return width; - } - - public void setWidth(int width) { - this.width = width; - } - - public int getHeight() { - return height; - } - - public void setHeight(int height) { - this.height = height; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof DimensionsWithIncorrectNameSchemaHintTable)) { - return false; - } - DimensionsWithIncorrectNameSchemaHintTable that = - (DimensionsWithIncorrectNameSchemaHintTable) o; - return this.lengthNotPresent == that.lengthNotPresent - && this.height == that.height - && this.width == that.width; - } - - @Override - public int hashCode() { - return Objects.hash(lengthNotPresent, width, height); - } - - @Override - public String toString() { - return "DimensionsWithIncorrectNameSchemaHintTable{" - + "lengthNotPresent=" - + lengthNotPresent - + ", width=" - + width - + ", height=" - + height - + '}'; - } - } - - @Entity - @SchemaHint(targetElement = TargetElement.NONE) - public static class DoesNotExistNoValidation { - private int k; - - public int getK() { - return k; - } - - public void setK(int k) { - this.k = k; - } - } - - @Entity - @SchemaHint(targetElement = TargetElement.NONE) - public static class ProductCqlTableMissingNoValidation extends ProductCqlTableMissing {} -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/SelectCustomWhereClauseIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/SelectCustomWhereClauseIT.java deleted file mode 100644 index 1f1b92b8623..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/SelectCustomWhereClauseIT.java +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.mapper; - -import static com.datastax.oss.driver.assertions.Assertions.assertThat; -import static org.awaitility.Awaitility.await; -import static org.junit.Assume.assumeFalse; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.MappedAsyncPagingIterable; -import com.datastax.oss.driver.api.core.PagingIterable; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.mapper.annotations.CqlName; -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; -import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; -import com.datastax.oss.driver.api.mapper.annotations.Delete; -import com.datastax.oss.driver.api.mapper.annotations.Insert; -import com.datastax.oss.driver.api.mapper.annotations.Mapper; -import com.datastax.oss.driver.api.mapper.annotations.Select; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.ccm.SchemaChangeSynchronizer; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import java.time.Duration; -import java.util.concurrent.CompletionStage; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -@BackendRequirement( - type = BackendType.CASSANDRA, - minInclusive = "3.4", - description = "Creates a SASI index") -public class SelectCustomWhereClauseIT extends InventoryITBase { - - private static final CcmRule CCM_RULE = CcmRule.getInstance(); - private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - private static ProductDao dao; - - @BeforeClass - public static void setup() { - // SASI index creation is broken in DSE 6.8.0 - // All tests in this class require SASI, so ensure it's working - assumeFalse(InventoryITBase.isSasiBroken(CCM_RULE)); - - CqlSession session = SESSION_RULE.session(); - - SchemaChangeSynchronizer.withLock( - () -> { - for (String query : createStatements(CCM_RULE, true)) { - session.execute( - SimpleStatement.builder(query) - .setExecutionProfile(SESSION_RULE.slowProfile()) - .build()); - } - }); - - InventoryMapper inventoryMapper = - new SelectCustomWhereClauseIT_InventoryMapperBuilder(session).build(); - dao = inventoryMapper.productDao(SESSION_RULE.keyspace()); - dao.save(FLAMETHROWER); - dao.save(MP3_DOWNLOAD); - } - - @Test - public void should_select_with_custom_clause() { - await() - .atMost(Duration.ofMinutes(1)) - .untilAsserted( - () -> { - PagingIterable products = dao.findByDescription("%mp3%"); - assertThat(products.one()).isEqualTo(MP3_DOWNLOAD); - assertThat(products.iterator()).isExhausted(); - }); - } - - @Test - public void should_select_with_custom_clause_asynchronously() { - await() - .atMost(Duration.ofMinutes(1)) - .untilAsserted( - () -> { - MappedAsyncPagingIterable iterable = - CompletableFutures.getUninterruptibly( - dao.findByDescriptionAsync("%mp3%").toCompletableFuture()); - assertThat(iterable.one()).isEqualTo(MP3_DOWNLOAD); - assertThat(iterable.currentPage().iterator()).isExhausted(); - assertThat(iterable.hasMorePages()).isFalse(); - }); - } - - @Mapper - public interface InventoryMapper { - @DaoFactory - ProductDao productDao(@DaoKeyspace CqlIdentifier keyspace); - } - - @Dao - public interface ProductDao { - /** Note that this relies on a SASI index. */ - @Select(customWhereClause = "description LIKE :searchString") - PagingIterable findByDescription(String searchString); - - /** Note that this relies on a SASI index. */ - @Select(customWhereClause = "description LIKE :\"Search String\"") - CompletionStage> findByDescriptionAsync( - @CqlName("\"Search String\"") String searchString); - - @Delete - void delete(Product product); - - @Insert - void save(Product product); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/SelectIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/SelectIT.java deleted file mode 100644 index fcb78c3075d..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/SelectIT.java +++ /dev/null @@ -1,283 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.mapper; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.MappedAsyncPagingIterable; -import com.datastax.oss.driver.api.core.PagingIterable; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; -import com.datastax.oss.driver.api.mapper.annotations.DefaultNullSavingStrategy; -import com.datastax.oss.driver.api.mapper.annotations.Delete; -import com.datastax.oss.driver.api.mapper.annotations.Insert; -import com.datastax.oss.driver.api.mapper.annotations.Mapper; -import com.datastax.oss.driver.api.mapper.annotations.Select; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import java.util.Optional; -import java.util.UUID; -import java.util.concurrent.CompletionStage; -import java.util.stream.Stream; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -public class SelectIT extends InventoryITBase { - - private static final CcmRule CCM_RULE = CcmRule.getInstance(); - private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - private static ProductDao dao; - - private static ProductSaleDao saleDao; - - @BeforeClass - public static void setup() { - CqlSession session = SESSION_RULE.session(); - - for (String query : createStatements(CCM_RULE)) { - session.execute( - SimpleStatement.builder(query).setExecutionProfile(SESSION_RULE.slowProfile()).build()); - } - - InventoryMapper inventoryMapper = - new SelectIT_InventoryMapperBuilder(session) - .withDefaultKeyspace(SESSION_RULE.keyspace()) - .build(); - dao = inventoryMapper.productDao(); - saleDao = inventoryMapper.productSaleDao(); - } - - @Before - public void insertData() { - dao.save(FLAMETHROWER); - dao.save(MP3_DOWNLOAD); - - saleDao.save(FLAMETHROWER_SALE_1); - saleDao.save(FLAMETHROWER_SALE_2); - saleDao.save(FLAMETHROWER_SALE_3); - saleDao.save(FLAMETHROWER_SALE_4); - saleDao.save(FLAMETHROWER_SALE_5); - saleDao.save(MP3_DOWNLOAD_SALE_1); - } - - @Test - public void should_select_by_primary_key() { - assertThat(dao.findById(FLAMETHROWER.getId())).isEqualTo(FLAMETHROWER); - - dao.delete(FLAMETHROWER); - assertThat(dao.findById(FLAMETHROWER.getId())).isNull(); - } - - @Test - public void should_select_all() { - assertThat(dao.all().all()).hasSize(2); - } - - @Test - public void should_select_all_async() { - assertThat(CompletableFutures.getUninterruptibly(dao.allAsync()).currentPage()).hasSize(2); - } - - @Test - public void should_select_all_stream() { - assertThat(dao.stream()).hasSize(2); - } - - @Test - public void should_select_all_stream_async() { - assertThat(CompletableFutures.getUninterruptibly(dao.streamAsync())).hasSize(2); - } - - @Test - public void should_select_by_primary_key_asynchronously() { - assertThat(CompletableFutures.getUninterruptibly(dao.findByIdAsync(FLAMETHROWER.getId()))) - .isEqualTo(FLAMETHROWER); - - dao.delete(FLAMETHROWER); - assertThat(CompletableFutures.getUninterruptibly(dao.findByIdAsync(FLAMETHROWER.getId()))) - .isNull(); - } - - @Test - public void should_select_by_primary_key_and_return_optional() { - assertThat(dao.findOptionalById(FLAMETHROWER.getId())).contains(FLAMETHROWER); - - dao.delete(FLAMETHROWER); - assertThat(dao.findOptionalById(FLAMETHROWER.getId())).isEmpty(); - } - - @Test - public void should_select_by_primary_key_and_return_optional_asynchronously() { - assertThat( - CompletableFutures.getUninterruptibly(dao.findOptionalByIdAsync(FLAMETHROWER.getId()))) - .contains(FLAMETHROWER); - - dao.delete(FLAMETHROWER); - assertThat( - CompletableFutures.getUninterruptibly(dao.findOptionalByIdAsync(FLAMETHROWER.getId()))) - .isEmpty(); - } - - @Test - public void should_select_all_sales() { - assertThat(saleDao.all().all()) - .containsOnly( - FLAMETHROWER_SALE_1, - FLAMETHROWER_SALE_3, - FLAMETHROWER_SALE_4, - FLAMETHROWER_SALE_2, - FLAMETHROWER_SALE_5, - MP3_DOWNLOAD_SALE_1); - } - - @Test - public void should_select_all_sales_stream() { - assertThat(saleDao.stream()) - .containsOnly( - FLAMETHROWER_SALE_1, - FLAMETHROWER_SALE_3, - FLAMETHROWER_SALE_4, - FLAMETHROWER_SALE_2, - FLAMETHROWER_SALE_5, - MP3_DOWNLOAD_SALE_1); - } - - @Test - public void should_select_by_partition_key() { - assertThat(saleDao.salesByIdForDay(FLAMETHROWER.getId(), DATE_1).all()) - .containsOnly( - FLAMETHROWER_SALE_1, FLAMETHROWER_SALE_3, FLAMETHROWER_SALE_2, FLAMETHROWER_SALE_4); - } - - @Test - public void should_select_by_partition_key_stream() { - assertThat(saleDao.salesByIdForDayStream(FLAMETHROWER.getId(), DATE_1)) - .containsOnly( - FLAMETHROWER_SALE_1, FLAMETHROWER_SALE_3, FLAMETHROWER_SALE_2, FLAMETHROWER_SALE_4); - } - - @Test - public void should_select_by_partition_key_and_partial_clustering() { - assertThat(saleDao.salesByIdForCustomer(FLAMETHROWER.getId(), DATE_1, 1).all()) - .containsOnly(FLAMETHROWER_SALE_1, FLAMETHROWER_SALE_3, FLAMETHROWER_SALE_4); - } - - @Test - public void should_select_by_partition_key_and_partial_clustering_stream() { - assertThat(saleDao.salesByIdForCustomerStream(FLAMETHROWER.getId(), DATE_1, 1)) - .containsOnly(FLAMETHROWER_SALE_1, FLAMETHROWER_SALE_3, FLAMETHROWER_SALE_4); - } - - @Test - public void should_select_by_primary_key_sales() { - assertThat( - saleDao.salesByIdForCustomerAtTime( - MP3_DOWNLOAD.getId(), DATE_3, 7, MP3_DOWNLOAD_SALE_1.getTs())) - .isEqualTo(MP3_DOWNLOAD_SALE_1); - } - - @Mapper - public interface InventoryMapper { - @DaoFactory - ProductDao productDao(); - - @DaoFactory - ProductSaleDao productSaleDao(); - } - - @Dao - @DefaultNullSavingStrategy(NullSavingStrategy.SET_TO_NULL) - public interface ProductDao { - @Select - Product findById(UUID productId); - - @Select - PagingIterable all(); - - @Select - CompletionStage> allAsync(); - - @Select - Stream stream(); - - @Select - CompletionStage> streamAsync(); - - @Select - Optional findOptionalById(UUID productId); - - @Select - CompletionStage findByIdAsync(UUID productId); - - @Select - CompletionStage> findOptionalByIdAsync(UUID productId); - - @Delete - void delete(Product product); - - @Insert - void save(Product product); - } - - @Dao - @DefaultNullSavingStrategy(NullSavingStrategy.SET_TO_NULL) - public interface ProductSaleDao { - // range query - @Select - PagingIterable all(); - - @Select - Stream stream(); - - // partition key provided - @Select - PagingIterable salesByIdForDay(UUID id, String day); - - @Select - Stream salesByIdForDayStream(UUID id, String day); - - // partition key and partial clustering key - @Select - PagingIterable salesByIdForCustomer(UUID id, String day, int customerId); - - @Select - Stream salesByIdForCustomerStream(UUID id, String day, int customerId); - - // full primary key - @Select - ProductSale salesByIdForCustomerAtTime(UUID id, String day, int customerId, UUID ts); - - @Insert - void save(ProductSale sale); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/SelectOtherClausesIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/SelectOtherClausesIT.java deleted file mode 100644 index 3eb40fd8520..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/SelectOtherClausesIT.java +++ /dev/null @@ -1,295 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.mapper; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.PagingIterable; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.mapper.MapperBuilder; -import com.datastax.oss.driver.api.mapper.annotations.ClusteringColumn; -import com.datastax.oss.driver.api.mapper.annotations.Computed; -import com.datastax.oss.driver.api.mapper.annotations.CqlName; -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; -import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; -import com.datastax.oss.driver.api.mapper.annotations.Entity; -import com.datastax.oss.driver.api.mapper.annotations.Insert; -import com.datastax.oss.driver.api.mapper.annotations.Mapper; -import com.datastax.oss.driver.api.mapper.annotations.PartitionKey; -import com.datastax.oss.driver.api.mapper.annotations.Select; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import java.util.HashMap; -import java.util.Map; -import java.util.Objects; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -@BackendRequirement( - type = BackendType.CASSANDRA, - minInclusive = "3.6", - description = "Uses PER PARTITION LIMIT") -public class SelectOtherClausesIT { - - private static final CcmRule CCM_RULE = CcmRule.getInstance(); - private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - private static SimpleDao dao; - - @BeforeClass - public static void setup() { - CqlSession session = SESSION_RULE.session(); - - for (String query : - ImmutableList.of("CREATE TABLE simple (k int, cc int, v int, PRIMARY KEY (k, cc))")) { - session.execute( - SimpleStatement.builder(query).setExecutionProfile(SESSION_RULE.slowProfile()).build()); - } - - TestMapper mapper = TestMapper.builder(session).build(); - dao = mapper.simpleDao(SESSION_RULE.keyspace()); - - for (int k = 0; k < 2; k++) { - for (int cc = 0; cc < 10; cc++) { - dao.insert(new Simple(k, cc, 1)); - } - } - } - - @Test - public void should_select_with_limit() { - PagingIterable elements = dao.selectWithLimit(10); - assertThat(elements.isFullyFetched()).isTrue(); - assertThat(elements.getAvailableWithoutFetching()).isEqualTo(10); - - elements = dao.selectWithLimit(0, 5); - assertThat(elements.isFullyFetched()).isTrue(); - assertThat(elements.getAvailableWithoutFetching()).isEqualTo(5); - - elements = dao.selectWithLimit(0, 0, 1); - assertThat(elements.isFullyFetched()).isTrue(); - assertThat(elements.getAvailableWithoutFetching()).isEqualTo(1); - } - - @Test - public void should_select_with_per_partition_limit() { - PagingIterable elements = dao.selectWithPerPartitionLimit(5); - assertThat(elements.isFullyFetched()).isTrue(); - assertThat(elements.getAvailableWithoutFetching()).isEqualTo(10); - - Map elementCountPerPartition = new HashMap<>(); - for (Simple element : elements) { - elementCountPerPartition.compute(element.getK(), (k, v) -> (v == null) ? 1 : v + 1); - } - assertThat(elementCountPerPartition).hasSize(2).containsEntry(0, 5).containsEntry(1, 5); - } - - @Test - public void should_select_with_order_by() { - PagingIterable elements = dao.selectByCcDesc(0); - int previousCc = Integer.MAX_VALUE; - for (Simple element : elements) { - assertThat(element.getCc()).isLessThan(previousCc); - previousCc = element.getCc(); - } - } - - @Test - public void should_select_with_group_by() { - PagingIterable sums = dao.selectSumByK(); - assertThat(sums.all()).hasSize(2).containsOnly(new Sum(0, 10), new Sum(1, 10)); - } - - @Test - public void should_select_with_allow_filtering() { - PagingIterable elements = dao.selectByCc(1); - assertThat(elements.all()).hasSize(2).containsOnly(new Simple(0, 1, 1), new Simple(1, 1, 1)); - } - - @Mapper - public interface TestMapper { - @DaoFactory - SimpleDao simpleDao(@DaoKeyspace CqlIdentifier keyspace); - - static MapperBuilder builder(CqlSession session) { - return new SelectOtherClausesIT_TestMapperBuilder(session); - } - } - - @Dao - public interface SimpleDao { - @Insert - void insert(Simple simple); - - @Select(limit = ":l") - PagingIterable selectWithLimit(@CqlName("l") int l); - - @Select(limit = ":l") - PagingIterable selectWithLimit(int k, @CqlName("l") int l); - - /** - * Contrived since the query will return at most a single row, but this is just to check that - * {@code l} doesn't need an explicit name when the full primary key is provided. - */ - @Select(limit = ":l") - PagingIterable selectWithLimit(int k, int cc, int l); - - @Select(perPartitionLimit = ":perPartitionLimit") - PagingIterable selectWithPerPartitionLimit( - @CqlName("perPartitionLimit") int perPartitionLimit); - - @Select(orderBy = "cc DESC") - PagingIterable selectByCcDesc(int k); - - @Select(groupBy = "k") - PagingIterable selectSumByK(); - - @Select(customWhereClause = "cc = :cc", allowFiltering = true) - PagingIterable selectByCc(int cc); - } - - @Entity - public static class Simple { - @PartitionKey private int k; - @ClusteringColumn private int cc; - private int v; - - public Simple() {} - - public Simple(int k, int cc, int v) { - this.k = k; - this.cc = cc; - this.v = v; - } - - public int getK() { - return k; - } - - public void setK(int k) { - this.k = k; - } - - public int getCc() { - return cc; - } - - public void setCc(int cc) { - this.cc = cc; - } - - public int getV() { - return v; - } - - public void setV(int v) { - this.v = v; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof Simple) { - Simple that = (Simple) other; - return this.k == that.k && this.cc == that.cc && this.v == that.v; - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(k, cc, v); - } - - @Override - public String toString() { - return String.format("Simple(%d, %d, %d)", k, cc, v); - } - } - - @Entity - @CqlName("simple") - public static class Sum { - private int k; - - @Computed("sum(v)") - private int value; - - public Sum() {} - - public Sum(int k, int value) { - this.k = k; - this.value = value; - } - - public int getK() { - return k; - } - - public void setK(int k) { - this.k = k; - } - - public int getValue() { - return value; - } - - public void setValue(int value) { - this.value = value; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof Sum) { - Sum that = (Sum) other; - return this.k == that.k && this.value == that.value; - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(k, value); - } - - @Override - public String toString() { - return String.format("Sum(%d, %d)", k, value); - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/SelectReactiveIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/SelectReactiveIT.java deleted file mode 100644 index 79e4d2b33ea..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/SelectReactiveIT.java +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.mapper; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveResultSet; -import com.datastax.dse.driver.api.mapper.reactive.MappedReactiveResultSet; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; -import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; -import com.datastax.oss.driver.api.mapper.annotations.DefaultNullSavingStrategy; -import com.datastax.oss.driver.api.mapper.annotations.Delete; -import com.datastax.oss.driver.api.mapper.annotations.Insert; -import com.datastax.oss.driver.api.mapper.annotations.Mapper; -import com.datastax.oss.driver.api.mapper.annotations.Select; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.ccm.SchemaChangeSynchronizer; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import io.reactivex.Flowable; -import java.util.UUID; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -public class SelectReactiveIT extends InventoryITBase { - - private static CcmRule ccmRule = CcmRule.getInstance(); - - private static SessionRule sessionRule = SessionRule.builder(ccmRule).build(); - - @ClassRule public static TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); - - private static DseProductDao dao; - - @BeforeClass - public static void setup() { - CqlSession session = sessionRule.session(); - - SchemaChangeSynchronizer.withLock( - () -> { - for (String query : createStatements(ccmRule)) { - session.execute( - SimpleStatement.builder(query) - .setExecutionProfile(sessionRule.slowProfile()) - .build()); - } - }); - - DseInventoryMapper inventoryMapper = - new SelectReactiveIT_DseInventoryMapperBuilder(session).build(); - dao = inventoryMapper.productDao(sessionRule.keyspace()); - } - - @Before - public void insertData() { - Flowable.fromPublisher(dao.saveReactive(FLAMETHROWER)).blockingSubscribe(); - Flowable.fromPublisher(dao.saveReactive(MP3_DOWNLOAD)).blockingSubscribe(); - } - - @Test - public void should_select_by_primary_key_reactive() { - assertThat(Flowable.fromPublisher(dao.findByIdReactive(FLAMETHROWER.getId())).blockingSingle()) - .isEqualTo(FLAMETHROWER); - Flowable.fromPublisher(dao.deleteReactive(FLAMETHROWER)).blockingSubscribe(); - assertThat( - Flowable.fromPublisher(dao.findByIdReactive(FLAMETHROWER.getId())) - .singleElement() - .blockingGet()) - .isNull(); - } - - @Mapper - public interface DseInventoryMapper { - - @DaoFactory - DseProductDao productDao(@DaoKeyspace CqlIdentifier keyspace); - } - - @Dao - @DefaultNullSavingStrategy(NullSavingStrategy.SET_TO_NULL) - public interface DseProductDao { - - @Select - MappedReactiveResultSet findByIdReactive(UUID productId); - - @Delete - ReactiveResultSet deleteReactive(Product product); - - @Insert - ReactiveResultSet saveReactive(Product product); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/SetEntityIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/SetEntityIT.java deleted file mode 100644 index 3bf6557347a..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/SetEntityIT.java +++ /dev/null @@ -1,262 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.mapper; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.catchThrowable; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.BoundStatementBuilder; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.data.GettableByName; -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; -import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; -import com.datastax.oss.driver.api.mapper.annotations.Mapper; -import com.datastax.oss.driver.api.mapper.annotations.SetEntity; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -public class SetEntityIT extends InventoryITBase { - - private static final CcmRule CCM_RULE = CcmRule.getInstance(); - private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - private static ProductDao dao; - private static UserDefinedType dimensions2d; - - @BeforeClass - public static void setup() { - CqlSession session = SESSION_RULE.session(); - - for (String query : createStatements(CCM_RULE)) { - session.execute( - SimpleStatement.builder(query).setExecutionProfile(SESSION_RULE.slowProfile()).build()); - } - - InventoryMapper inventoryMapper = new SetEntityIT_InventoryMapperBuilder(session).build(); - dao = inventoryMapper.productDao(SESSION_RULE.keyspace()); - dimensions2d = - session - .getKeyspace() - .flatMap(ks -> session.getMetadata().getKeyspace(ks)) - .flatMap(ks -> ks.getUserDefinedType("dimensions2d")) - .orElseThrow(AssertionError::new); - } - - @Test - public void should_set_entity_on_bound_statement() { - CqlSession session = SESSION_RULE.session(); - PreparedStatement preparedStatement = - session.prepare("INSERT INTO product (id, description, dimensions) VALUES (?, ?, ?)"); - BoundStatement boundStatement = preparedStatement.bind(); - - boundStatement = dao.set(FLAMETHROWER, boundStatement); - - assertMatches(boundStatement, FLAMETHROWER); - } - - @Test - public void should_set_entity_on_bound_statement_builder() { - CqlSession session = SESSION_RULE.session(); - PreparedStatement preparedStatement = - session.prepare("INSERT INTO product (id, description, dimensions) VALUES (?, ?, ?)"); - BoundStatementBuilder builder = preparedStatement.boundStatementBuilder(); - - dao.set(builder, FLAMETHROWER); - BoundStatement boundStatement = builder.build(); - - assertMatches(boundStatement, FLAMETHROWER); - } - - @Test - public void should_set_entity_on_bound_statement_setting_null() { - CqlSession session = SESSION_RULE.session(); - PreparedStatement preparedStatement = - session.prepare("INSERT INTO product (id, description, dimensions) VALUES (?, ?, ?)"); - BoundStatementBuilder builder = preparedStatement.boundStatementBuilder(); - - dao.setNullFields( - builder, new Product(FLAMETHROWER.getId(), null, FLAMETHROWER.getDimensions())); - BoundStatement boundStatement = builder.build(); - - assertMatches( - boundStatement, new Product(FLAMETHROWER.getId(), null, FLAMETHROWER.getDimensions())); - } - - @Test - public void should_set_entity_on_bound_statement_without_setting_null() { - CqlSession session = SESSION_RULE.session(); - PreparedStatement preparedStatement = - session.prepare("INSERT INTO product (id, description, dimensions) VALUES (?, ?, ?)"); - BoundStatementBuilder builder = preparedStatement.boundStatementBuilder(); - - dao.setDoNotSetNullFields( - builder, new Product(FLAMETHROWER.getId(), null, FLAMETHROWER.getDimensions())); - BoundStatement boundStatement = builder.build(); - - // "" is in description because it was not set - assertMatches( - boundStatement, new Product(FLAMETHROWER.getId(), "", FLAMETHROWER.getDimensions())); - } - - @Test - public void should_set_entity_on_udt_value() { - CqlSession session = SESSION_RULE.session(); - UserDefinedType udtType = - session - .getMetadata() - .getKeyspace(SESSION_RULE.keyspace()) - .orElseThrow(AssertionError::new) - .getUserDefinedType("dimensions") - .orElseThrow(AssertionError::new); - UdtValue udtValue = udtType.newValue(); - Dimensions dimensions = new Dimensions(30, 10, 8); - - dao.set(dimensions, udtValue); - - assertThat(udtValue.getInt("length")).isEqualTo(dimensions.getLength()); - assertThat(udtValue.getInt("width")).isEqualTo(dimensions.getWidth()); - assertThat(udtValue.getInt("height")).isEqualTo(dimensions.getHeight()); - } - - @Test - public void should_set_entity_on_partial_statement_when_lenient() { - CqlSession session = SESSION_RULE.session(); - PreparedStatement ps = session.prepare("INSERT INTO product (id, description) VALUES (?, ?)"); - BoundStatement bound = dao.setLenient(FLAMETHROWER, ps.bind()); - assertThat(bound.getUuid(0)).isEqualTo(FLAMETHROWER.getId()); - assertThat(bound.getString(1)).isEqualTo(FLAMETHROWER.getDescription()); - } - - @Test - public void should_set_entity_on_partial_statement_builder_when_lenient() { - CqlSession session = SESSION_RULE.session(); - PreparedStatement ps = session.prepare("INSERT INTO product (id, description) VALUES (?, ?)"); - BoundStatementBuilder builder = ps.boundStatementBuilder(); - dao.setLenient(FLAMETHROWER, builder); - assertThat(builder.getUuid(0)).isEqualTo(FLAMETHROWER.getId()); - assertThat(builder.getString(1)).isEqualTo(FLAMETHROWER.getDescription()); - } - - @Test - @SuppressWarnings("ResultOfMethodCallIgnored") - public void should_set_entity_on_partial_udt_when_lenient() { - CqlSession session = SESSION_RULE.session(); - PreparedStatement ps = session.prepare("INSERT INTO product2d (id, dimensions) VALUES (?, ?)"); - BoundStatementBuilder builder = ps.boundStatementBuilder(); - builder.setUuid(0, FLAMETHROWER.getId()); - UdtValue dimensionsUdt = dimensions2d.newValue(); - Dimensions dimensions = new Dimensions(12, 34, 56); - dao.setLenient(dimensions, dimensionsUdt); - builder.setUdtValue(1, dimensionsUdt); - assertThat(dimensionsUdt.getInt("width")).isEqualTo(34); - assertThat(dimensionsUdt.getInt("height")).isEqualTo(56); - } - - @Test - public void should_not_set_entity_on_partial_statement_when_not_lenient() { - CqlSession session = SESSION_RULE.session(); - PreparedStatement ps = session.prepare("INSERT INTO product (id, description) VALUES (?, ?)"); - Throwable error = catchThrowable(() -> dao.set(FLAMETHROWER, ps.bind())); - assertThat(error).hasMessage("dimensions is not a variable in this bound statement"); - } - - @Test - public void should_not_set_entity_on_partial_statement_builder_when_not_lenient() { - CqlSession session = SESSION_RULE.session(); - PreparedStatement ps = session.prepare("INSERT INTO product (id, description) VALUES (?, ?)"); - Throwable error = catchThrowable(() -> dao.set(ps.boundStatementBuilder(), FLAMETHROWER)); - assertThat(error).hasMessage("dimensions is not a variable in this bound statement"); - } - - @Test - @SuppressWarnings("ResultOfMethodCallIgnored") - public void should_not_set_entity_on_partial_udt_when_not_lenient() { - CqlSession session = SESSION_RULE.session(); - PreparedStatement ps = session.prepare("INSERT INTO product2d (id, dimensions) VALUES (?, ?)"); - BoundStatementBuilder builder = ps.boundStatementBuilder(); - builder.setUuid(0, FLAMETHROWER.getId()); - UdtValue dimensionsUdt = dimensions2d.newValue(); - Dimensions dimensions = new Dimensions(12, 34, 56); - Throwable error = catchThrowable(() -> dao.set(dimensions, dimensionsUdt)); - assertThat(error).hasMessage("length is not a field in this UDT"); - } - - private static void assertMatches(GettableByName data, Product entity) { - assertThat(data.getUuid("id")).isEqualTo(entity.getId()); - assertThat(data.getString("description")).isEqualTo(entity.getDescription()); - UdtValue udtValue = data.getUdtValue("dimensions"); - assertThat(udtValue).isNotNull(); - assertThat(udtValue.getType().getName().asInternal()).isEqualTo("dimensions"); - assertThat(udtValue.getInt("length")).isEqualTo(entity.getDimensions().getLength()); - assertThat(udtValue.getInt("width")).isEqualTo(entity.getDimensions().getWidth()); - assertThat(udtValue.getInt("height")).isEqualTo(entity.getDimensions().getHeight()); - } - - @Mapper - public interface InventoryMapper { - @DaoFactory - ProductDao productDao(@DaoKeyspace CqlIdentifier keyspace); - } - - @Dao - public interface ProductDao { - - @SetEntity - BoundStatement set(Product product, BoundStatement boundStatement); - - @SetEntity(nullSavingStrategy = NullSavingStrategy.SET_TO_NULL) - void setNullFields(BoundStatementBuilder builder, Product product); - - @SetEntity(nullSavingStrategy = NullSavingStrategy.DO_NOT_SET) - void setDoNotSetNullFields(BoundStatementBuilder builder, Product product); - - @SetEntity - void set(BoundStatementBuilder builder, Product product); - - @SetEntity - void set(Dimensions dimensions, UdtValue udtValue); - - @SetEntity(lenient = true) - BoundStatement setLenient(Product product, BoundStatement boundStatement); - - @SetEntity(lenient = true) - void setLenient(Product product, BoundStatementBuilder builder); - - @SetEntity(lenient = true) - void setLenient(Dimensions dimensions, UdtValue udtValue); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/StatementAttributesIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/StatementAttributesIT.java deleted file mode 100644 index c5099baaf35..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/StatementAttributesIT.java +++ /dev/null @@ -1,410 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.mapper; - -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.noRows; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.query; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.when; -import static java.nio.charset.StandardCharsets.UTF_8; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.catchThrowable; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.DefaultConsistencyLevel; -import com.datastax.oss.driver.api.core.cql.BoundStatementBuilder; -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; -import com.datastax.oss.driver.api.mapper.annotations.Delete; -import com.datastax.oss.driver.api.mapper.annotations.Entity; -import com.datastax.oss.driver.api.mapper.annotations.Insert; -import com.datastax.oss.driver.api.mapper.annotations.Mapper; -import com.datastax.oss.driver.api.mapper.annotations.PartitionKey; -import com.datastax.oss.driver.api.mapper.annotations.Query; -import com.datastax.oss.driver.api.mapper.annotations.Select; -import com.datastax.oss.driver.api.mapper.annotations.StatementAttributes; -import com.datastax.oss.driver.api.mapper.annotations.Update; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.request.Execute; -import com.datastax.oss.simulacron.common.cluster.ClusterQueryLogReport; -import com.datastax.oss.simulacron.common.cluster.ClusterSpec; -import com.datastax.oss.simulacron.common.cluster.QueryLog; -import com.datastax.oss.simulacron.common.stubbing.PrimeDsl; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.Lists; -import java.nio.ByteBuffer; -import java.util.LinkedHashMap; -import java.util.Objects; -import java.util.UUID; -import java.util.concurrent.TimeUnit; -import java.util.function.Function; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -public class StatementAttributesIT { - - private static final SimulacronRule SIMULACRON_RULE = - new SimulacronRule(ClusterSpec.builder().withNodes(1)); - private static final SessionRule SESSION_RULE = - SessionRule.builder(SIMULACRON_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(SIMULACRON_RULE).around(SESSION_RULE); - - private static String PAGING_STATE = "paging_state"; - private static int PAGE_SIZE = 13; - - private static final Simple simple = new Simple(UUID.randomUUID(), "DATA"); - - @SuppressWarnings("UnnecessaryLambda") - private static final Function statementFunction = - builder -> - builder - .setConsistencyLevel(DefaultConsistencyLevel.ANY) - .setPageSize(PAGE_SIZE) - .setSerialConsistencyLevel(DefaultConsistencyLevel.QUORUM) - .setPagingState(ByteBuffer.wrap(PAGING_STATE.getBytes(UTF_8))); - - @SuppressWarnings("UnnecessaryLambda") - private static final Function badStatementFunction = - builder -> { - throw new IllegalStateException("mock error"); - }; - - private static SimpleDao dao; - - @BeforeClass - public static void setupClass() { - primeDeleteQuery(); - primeInsertQuery(); - primeSelectQuery(); - primeCountQuery(); - primeUpdateQuery(); - - InventoryMapper inventoryMapper = - new StatementAttributesIT_InventoryMapperBuilder(SESSION_RULE.session()).build(); - dao = inventoryMapper.simpleDao(); - } - - @Before - public void setup() { - SIMULACRON_RULE.cluster().clearLogs(); - } - - @Test - public void should_honor_runtime_attributes_on_insert() { - dao.save(simple, statementFunction); - - ClusterQueryLogReport report = SIMULACRON_RULE.cluster().getLogs(); - validateQueryOptions(report.getQueryLogs().get(0), true); - } - - @Test - public void should_honor_annotation_attributes_on_insert() { - dao.save2(simple); - ClusterQueryLogReport report = SIMULACRON_RULE.cluster().getLogs(); - validateQueryOptions(report.getQueryLogs().get(0), false); - } - - @Test - public void should_use_runtime_attributes_over_annotation_attributes() { - dao.save3(simple, statementFunction); - ClusterQueryLogReport report = SIMULACRON_RULE.cluster().getLogs(); - validateQueryOptions(report.getQueryLogs().get(0), false); - } - - @Test - public void should_honor_runtime_attributes_on_delete() { - dao.delete(simple, statementFunction); - ClusterQueryLogReport report = SIMULACRON_RULE.cluster().getLogs(); - validateQueryOptions(report.getQueryLogs().get(0), true); - } - - @Test - public void should_honor_annotation_attributes_on_delete() { - dao.delete2(simple); - ClusterQueryLogReport report = SIMULACRON_RULE.cluster().getLogs(); - validateQueryOptions(report.getQueryLogs().get(0), false); - } - - @Test - public void should_honor_runtime_attributes_on_select() { - dao.findByPk(simple.getPk(), statementFunction); - ClusterQueryLogReport report = SIMULACRON_RULE.cluster().getLogs(); - validateQueryOptions(report.getQueryLogs().get(0), true); - } - - @Test - public void should_honor_annotation_attributes_on_select() { - dao.findByPk2(simple.getPk()); - ClusterQueryLogReport report = SIMULACRON_RULE.cluster().getLogs(); - validateQueryOptions(report.getQueryLogs().get(0), false); - } - - @Test - public void should_honor_runtime_attributes_on_query() { - dao.count(simple.getPk(), statementFunction); - ClusterQueryLogReport report = SIMULACRON_RULE.cluster().getLogs(); - validateQueryOptions(report.getQueryLogs().get(0), true); - } - - @Test - public void should_honor_annotation_attributes_on_query() { - dao.count2(simple.getPk()); - ClusterQueryLogReport report = SIMULACRON_RULE.cluster().getLogs(); - validateQueryOptions(report.getQueryLogs().get(0), false); - } - - @Test - public void should_honor_runtime_attributes_on_update() { - dao.update(simple, statementFunction); - ClusterQueryLogReport report = SIMULACRON_RULE.cluster().getLogs(); - validateQueryOptions(report.getQueryLogs().get(0), true); - } - - @Test - public void should_honor_annotation_attributes_on_update() { - dao.update2(simple); - ClusterQueryLogReport report = SIMULACRON_RULE.cluster().getLogs(); - validateQueryOptions(report.getQueryLogs().get(0), false); - } - - @Test - public void should_fail_runtime_attributes_bad() { - Throwable t = catchThrowable(() -> dao.save(simple, badStatementFunction)); - assertThat(t).isInstanceOf(IllegalStateException.class).hasMessage("mock error"); - } - - private static void primeInsertQuery() { - LinkedHashMap params = - new LinkedHashMap<>(ImmutableMap.of("pk", simple.getPk(), "data", simple.getData())); - LinkedHashMap paramTypes = - new LinkedHashMap<>(ImmutableMap.of("pk", "uuid", "data", "ascii")); - SIMULACRON_RULE - .cluster() - .prime( - when(query( - "INSERT INTO ks.simple (pk,data) VALUES (:pk,:data)", - Lists.newArrayList( - com.datastax.oss.simulacron.common.codec.ConsistencyLevel.ONE, - com.datastax.oss.simulacron.common.codec.ConsistencyLevel.ANY), - params, - paramTypes)) - .then(noRows())); - } - - private static void primeDeleteQuery() { - LinkedHashMap params = - new LinkedHashMap<>(ImmutableMap.of("pk", simple.getPk())); - LinkedHashMap paramTypes = new LinkedHashMap<>(ImmutableMap.of("pk", "uuid")); - SIMULACRON_RULE - .cluster() - .prime( - when(query( - "DELETE FROM ks.simple WHERE pk=:pk", - Lists.newArrayList( - com.datastax.oss.simulacron.common.codec.ConsistencyLevel.ONE, - com.datastax.oss.simulacron.common.codec.ConsistencyLevel.ANY), - params, - paramTypes)) - .then(noRows()) - .delay(1, TimeUnit.MILLISECONDS)); - } - - private static void primeSelectQuery() { - LinkedHashMap params = - new LinkedHashMap<>(ImmutableMap.of("pk", simple.getPk())); - LinkedHashMap paramTypes = new LinkedHashMap<>(ImmutableMap.of("pk", "uuid")); - SIMULACRON_RULE - .cluster() - .prime( - when(query( - "SELECT pk,data FROM ks.simple WHERE pk=:pk", - Lists.newArrayList( - com.datastax.oss.simulacron.common.codec.ConsistencyLevel.ONE, - com.datastax.oss.simulacron.common.codec.ConsistencyLevel.ANY), - params, - paramTypes)) - .then(noRows()) - .delay(1, TimeUnit.MILLISECONDS)); - } - - private static void primeCountQuery() { - LinkedHashMap params = - new LinkedHashMap<>(ImmutableMap.of("pk", simple.getPk())); - LinkedHashMap paramTypes = new LinkedHashMap<>(ImmutableMap.of("pk", "uuid")); - SIMULACRON_RULE - .cluster() - .prime( - when(query( - "SELECT count(*) FROM ks.simple WHERE pk=:pk", - Lists.newArrayList( - com.datastax.oss.simulacron.common.codec.ConsistencyLevel.ONE, - com.datastax.oss.simulacron.common.codec.ConsistencyLevel.ANY), - params, - paramTypes)) - .then(PrimeDsl.rows().row("count", 1L).columnTypes("count", "bigint").build()) - .delay(1, TimeUnit.MILLISECONDS)); - } - - private static void primeUpdateQuery() { - LinkedHashMap params = - new LinkedHashMap<>(ImmutableMap.of("pk", simple.getPk(), "data", simple.getData())); - LinkedHashMap paramTypes = - new LinkedHashMap<>(ImmutableMap.of("pk", "uuid", "data", "ascii")); - SIMULACRON_RULE - .cluster() - .prime( - when(query( - "UPDATE ks.simple SET data=:data WHERE pk=:pk", - Lists.newArrayList( - com.datastax.oss.simulacron.common.codec.ConsistencyLevel.ONE, - com.datastax.oss.simulacron.common.codec.ConsistencyLevel.ANY), - params, - paramTypes)) - .then(noRows())); - } - - private void validateQueryOptions(QueryLog log, boolean validatePageState) { - - Message message = log.getFrame().message; - assertThat(message).isInstanceOf(Execute.class); - Execute queryExecute = (Execute) message; - assertThat(queryExecute.options.consistency) - .isEqualTo(DefaultConsistencyLevel.ANY.getProtocolCode()); - assertThat(queryExecute.options.serialConsistency) - .isEqualTo(DefaultConsistencyLevel.QUORUM.getProtocolCode()); - assertThat(queryExecute.options.pageSize).isEqualTo(PAGE_SIZE); - if (validatePageState) { - String pagingState = UTF_8.decode(queryExecute.options.pagingState).toString(); - assertThat(pagingState).isEqualTo(PAGING_STATE); - } - } - - @Mapper - public interface InventoryMapper { - @DaoFactory - StatementAttributesIT.SimpleDao simpleDao(); - } - - @Dao - public interface SimpleDao { - @Insert - void save(Simple simple, Function function); - - @Insert - @StatementAttributes(consistencyLevel = "ANY", serialConsistencyLevel = "QUORUM", pageSize = 13) - void save2(Simple simple); - - @Insert - @StatementAttributes(consistencyLevel = "ONE", pageSize = 500) - void save3(Simple simple, Function function); - - @Delete - void delete(Simple simple, Function function); - - @Delete - @StatementAttributes(consistencyLevel = "ANY", serialConsistencyLevel = "QUORUM", pageSize = 13) - void delete2(Simple simple); - - @Select - @SuppressWarnings("UnusedReturnValue") - Simple findByPk(UUID pk, Function function); - - @Select - @StatementAttributes(consistencyLevel = "ANY", serialConsistencyLevel = "QUORUM", pageSize = 13) - @SuppressWarnings("UnusedReturnValue") - Simple findByPk2(UUID pk); - - @Query("SELECT count(*) FROM ks.simple WHERE pk=:pk") - long count(UUID pk, Function function); - - @Query("SELECT count(*) FROM ks.simple WHERE pk=:pk") - @StatementAttributes(consistencyLevel = "ANY", serialConsistencyLevel = "QUORUM", pageSize = 13) - @SuppressWarnings("UnusedReturnValue") - long count2(UUID pk); - - @Update - void update(Simple simple, Function function); - - @Update - @StatementAttributes(consistencyLevel = "ANY", serialConsistencyLevel = "QUORUM", pageSize = 13) - void update2(Simple simple); - } - - @Entity(defaultKeyspace = "ks") - public static class Simple { - @PartitionKey private UUID pk; - private String data; - - public Simple() {} - - public Simple(UUID pk, String data) { - this.pk = pk; - this.data = data; - } - - public UUID getPk() { - return pk; - } - - public String getData() { - return data; - } - - public void setPk(UUID pk) { - - this.pk = pk; - } - - public void setData(String data) { - this.data = data; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof Simple)) { - return false; - } - Simple simple = (Simple) o; - return Objects.equals(pk, simple.pk) && Objects.equals(data, simple.data); - } - - @Override - public int hashCode() { - - return Objects.hash(pk, data); - } - - @Override - public String toString() { - return "Simple{" + "pk=" + pk + ", data='" + data + '\'' + '}'; - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/TransientIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/TransientIT.java deleted file mode 100644 index 0fab03569d1..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/TransientIT.java +++ /dev/null @@ -1,393 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.mapper; - -import static com.datastax.oss.driver.assertions.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; -import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; -import com.datastax.oss.driver.api.mapper.annotations.DaoTable; -import com.datastax.oss.driver.api.mapper.annotations.DefaultNullSavingStrategy; -import com.datastax.oss.driver.api.mapper.annotations.Entity; -import com.datastax.oss.driver.api.mapper.annotations.Insert; -import com.datastax.oss.driver.api.mapper.annotations.Mapper; -import com.datastax.oss.driver.api.mapper.annotations.PartitionKey; -import com.datastax.oss.driver.api.mapper.annotations.Select; -import com.datastax.oss.driver.api.mapper.annotations.Transient; -import com.datastax.oss.driver.api.mapper.annotations.TransientProperties; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import java.util.concurrent.atomic.AtomicInteger; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -public class TransientIT { - - private static final CcmRule CCM_RULE = CcmRule.getInstance(); - private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - private static TestMapper mapper; - - private static final AtomicInteger keyProvider = new AtomicInteger(0); - - @BeforeClass - public static void setup() { - CqlSession session = SESSION_RULE.session(); - - session.execute( - SimpleStatement.builder("CREATE TABLE entity(id int primary key, v int)") - .setExecutionProfile(SESSION_RULE.slowProfile()) - .build()); - - mapper = new TransientIT_TestMapperBuilder(session).build(); - } - - @Test - public void should_ignore_field_with_transient_annotated_field() { - EntityWithTransientAnnotatedFieldDao dao = - mapper.entityWithTransientAnnotatedFieldDao( - SESSION_RULE.keyspace(), CqlIdentifier.fromCql("entity")); - - int key = keyProvider.incrementAndGet(); - EntityWithTransientAnnotatedField entity = new EntityWithTransientAnnotatedField(key, 1, 7); - dao.save(entity); - - EntityWithTransientAnnotatedField retrievedEntity = dao.findById(key); - assertThat(retrievedEntity.getId()).isEqualTo(key); - assertThat(retrievedEntity.getV()).isEqualTo(1); - // column should not have been set since field was @Transient-annotated - assertThat(retrievedEntity.getNotAColumn()).isNull(); - } - - @Test - public void should_ignore_field_with_transient_annotated_getter() { - EntityWithTransientAnnotatedGetterDao dao = - mapper.entityWithTransientAnnotatedGetterDao( - SESSION_RULE.keyspace(), CqlIdentifier.fromCql("entity")); - - int key = keyProvider.incrementAndGet(); - EntityWithTransientAnnotatedGetter entity = new EntityWithTransientAnnotatedGetter(key, 1, 7); - dao.save(entity); - - EntityWithTransientAnnotatedGetter retrievedEntity = dao.findById(key); - assertThat(retrievedEntity.getId()).isEqualTo(key); - assertThat(retrievedEntity.getV()).isEqualTo(1); - // column should not have been set since getter was @Transient-annotated - assertThat(retrievedEntity.getNotAColumn()).isNull(); - } - - @Test - public void should_ignore_field_with_transient_keyword() { - EntityWithTransientKeywordDao dao = - mapper.entityWithTransientKeywordDao( - SESSION_RULE.keyspace(), CqlIdentifier.fromCql("entity")); - - int key = keyProvider.incrementAndGet(); - EntityWithTransientKeyword entity = new EntityWithTransientKeyword(key, 1, 7); - dao.save(entity); - - EntityWithTransientKeyword retrievedEntity = dao.findById(key); - assertThat(retrievedEntity.getId()).isEqualTo(key); - assertThat(retrievedEntity.getV()).isEqualTo(1); - // column should not have been set since field had transient keyword - assertThat(retrievedEntity.getNotAColumn()).isNull(); - } - - @Test - public void should_ignore_properties_included_in_transient_properties_keyword() { - EntityWithTransientPropertiesAnnotationDao dao = - mapper.entityWithTransientPropertiesAnnotation( - SESSION_RULE.keyspace(), CqlIdentifier.fromCql("entity")); - - int key = keyProvider.incrementAndGet(); - EntityWithTransientPropertiesAnnotation entity = - new EntityWithTransientPropertiesAnnotation(key, 1, 7, 10L); - dao.save(entity); - - EntityWithTransientPropertiesAnnotation retrievedEntity = dao.findById(key); - assertThat(retrievedEntity.getId()).isEqualTo(key); - assertThat(retrievedEntity.getV()).isEqualTo(1); - // columns should not have been set since field was @Transient-annotated - assertThat(retrievedEntity.getNotAColumn()).isNull(); - assertThat(retrievedEntity.getAlsoNotAColumn()).isNull(); - } - - @Entity - public static class EntityWithTransientAnnotatedField { - - @PartitionKey private int id; - - private int v; - - @Transient private Integer notAColumn; - - EntityWithTransientAnnotatedField() {} - - EntityWithTransientAnnotatedField(int id, int v, Integer notAColumn) { - this.id = id; - this.v = v; - this.notAColumn = notAColumn; - } - - public int getId() { - return id; - } - - public void setId(int id) { - this.id = id; - } - - public int getV() { - return v; - } - - public void setV(int v) { - this.v = v; - } - - @SuppressWarnings("WeakerAccess") - public Integer getNotAColumn() { - return notAColumn; - } - - @SuppressWarnings("unused") - public void setNotAColumn(Integer notAColumn) { - this.notAColumn = notAColumn; - } - } - - @Entity - public static class EntityWithTransientAnnotatedGetter { - - @PartitionKey private int id; - - private int v; - - private Integer notAColumn; - - EntityWithTransientAnnotatedGetter() {} - - EntityWithTransientAnnotatedGetter(int id, int v, Integer notAColumn) { - this.id = id; - this.v = v; - this.notAColumn = notAColumn; - } - - public int getId() { - return id; - } - - public void setId(int id) { - this.id = id; - } - - public int getV() { - return v; - } - - public void setV(int v) { - this.v = v; - } - - @Transient - @SuppressWarnings("WeakerAccess") - public Integer getNotAColumn() { - return notAColumn; - } - - @SuppressWarnings("unused") - public void setNotAColumn(Integer notAColumn) { - this.notAColumn = notAColumn; - } - } - - @Entity - public static class EntityWithTransientKeyword { - - @PartitionKey private int id; - - private int v; - - private transient Integer notAColumn; - - EntityWithTransientKeyword() {} - - EntityWithTransientKeyword(int id, int v, Integer notAColumn) { - this.id = id; - this.v = v; - this.notAColumn = notAColumn; - } - - public int getId() { - return id; - } - - public void setId(int id) { - this.id = id; - } - - public int getV() { - return v; - } - - public void setV(int v) { - this.v = v; - } - - @SuppressWarnings("WeakerAccess") - public Integer getNotAColumn() { - return notAColumn; - } - - @SuppressWarnings("unused") - public void setNotAColumn(Integer notAColumn) { - this.notAColumn = notAColumn; - } - } - - @TransientProperties({"notAColumn", "alsoNotAColumn"}) - @Entity - public static class EntityWithTransientPropertiesAnnotation { - - @PartitionKey private int id; - - private int v; - - private transient Integer notAColumn; - - private transient Long alsoNotAColumn; - - EntityWithTransientPropertiesAnnotation() {} - - EntityWithTransientPropertiesAnnotation( - int id, int v, Integer notAColumn, Long alsoNotAColumn) { - this.id = id; - this.v = v; - this.notAColumn = notAColumn; - this.alsoNotAColumn = alsoNotAColumn; - } - - public int getId() { - return id; - } - - public void setId(int id) { - this.id = id; - } - - public int getV() { - return v; - } - - public void setV(int v) { - this.v = v; - } - - @SuppressWarnings("WeakerAccess") - public Integer getNotAColumn() { - return notAColumn; - } - - @SuppressWarnings("unused") - public void setNotAColumn(Integer notAColumn) { - this.notAColumn = notAColumn; - } - - @SuppressWarnings("WeakerAccess") - public Long getAlsoNotAColumn() { - return alsoNotAColumn; - } - - @SuppressWarnings("unused") - public void setAlsoNotAColumn(Long alsoNotAColumn) { - this.alsoNotAColumn = alsoNotAColumn; - } - } - - @DefaultNullSavingStrategy(NullSavingStrategy.SET_TO_NULL) - interface BaseDao {} - - @Dao - public interface EntityWithTransientAnnotatedFieldDao extends BaseDao { - @Select - EntityWithTransientAnnotatedField findById(int id); - - @Insert - void save(EntityWithTransientAnnotatedField entity); - } - - @Dao - public interface EntityWithTransientAnnotatedGetterDao extends BaseDao { - @Select - EntityWithTransientAnnotatedGetter findById(int id); - - @Insert - void save(EntityWithTransientAnnotatedGetter entity); - } - - @Dao - public interface EntityWithTransientKeywordDao extends BaseDao { - @Select - EntityWithTransientKeyword findById(int id); - - @Insert - void save(EntityWithTransientKeyword entity); - } - - @Dao - public interface EntityWithTransientPropertiesAnnotationDao extends BaseDao { - @Select - EntityWithTransientPropertiesAnnotation findById(int id); - - @Insert - void save(EntityWithTransientPropertiesAnnotation entity); - } - - @Mapper - public interface TestMapper { - @DaoFactory - EntityWithTransientAnnotatedFieldDao entityWithTransientAnnotatedFieldDao( - @DaoKeyspace CqlIdentifier keyspace, @DaoTable CqlIdentifier table); - - @DaoFactory - EntityWithTransientAnnotatedGetterDao entityWithTransientAnnotatedGetterDao( - @DaoKeyspace CqlIdentifier keyspace, @DaoTable CqlIdentifier table); - - @DaoFactory - EntityWithTransientKeywordDao entityWithTransientKeywordDao( - @DaoKeyspace CqlIdentifier keyspace, @DaoTable CqlIdentifier table); - - @DaoFactory - EntityWithTransientPropertiesAnnotationDao entityWithTransientPropertiesAnnotation( - @DaoKeyspace CqlIdentifier keyspace, @DaoTable CqlIdentifier table); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/UdtKeyIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/UdtKeyIT.java deleted file mode 100644 index c17cd290451..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/UdtKeyIT.java +++ /dev/null @@ -1,201 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.mapper; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; -import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; -import com.datastax.oss.driver.api.mapper.annotations.DefaultNullSavingStrategy; -import com.datastax.oss.driver.api.mapper.annotations.Entity; -import com.datastax.oss.driver.api.mapper.annotations.Insert; -import com.datastax.oss.driver.api.mapper.annotations.Mapper; -import com.datastax.oss.driver.api.mapper.annotations.PartitionKey; -import com.datastax.oss.driver.api.mapper.annotations.Select; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import java.util.List; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -public class UdtKeyIT { - - private static final CcmRule CCM_RULE = CcmRule.getInstance(); - - private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - private static RecordDao dao; - - @BeforeClass - public static void setup() { - CqlSession session = SESSION_RULE.session(); - for (String ddlQuery : - ImmutableList.of( - "CREATE TYPE key (value int)", - "CREATE TABLE record(key frozen PRIMARY KEY, value int)", - "CREATE TABLE multi_key_record(key frozen> PRIMARY KEY, value int)")) { - session.execute( - SimpleStatement.builder(ddlQuery) - .setExecutionProfile(SESSION_RULE.slowProfile()) - .build()); - } - - TestMapper mapper = new UdtKeyIT_TestMapperBuilder(SESSION_RULE.session()).build(); - dao = mapper.recordDao(SESSION_RULE.keyspace()); - } - - @Test - public void should_save_and_retrieve_entity_with_udt_pk() { - // Given - Key key = new Key(1); - dao.save(new Record(key, 42)); - - // When - Record record = dao.findByKey(key); - - // Then - assertThat(record.getValue()).isEqualTo(42); - } - - @Test - public void should_save_and_retrieve_entity_with_udt_collection_pk() { - // Given - List key = ImmutableList.of(new Key(1), new Key(2)); - dao.saveMulti(new MultiKeyRecord(key, 42)); - - // When - MultiKeyRecord record = dao.findMultiByKey(key); - - // Then - assertThat(record.getValue()).isEqualTo(42); - } - - @Entity - public static class Key { - private int value; - - public Key() {} - - public Key(int value) { - this.value = value; - } - - public int getValue() { - return value; - } - - public void setValue(int value) { - this.value = value; - } - } - - @Entity - public static class Record { - @PartitionKey private Key key; - private int value; - - public Record() {} - - public Record(Key key, int value) { - this.key = key; - this.value = value; - } - - public Key getKey() { - return key; - } - - public void setKey(Key key) { - this.key = key; - } - - public int getValue() { - return value; - } - - public void setValue(int value) { - this.value = value; - } - } - - @Entity - public static class MultiKeyRecord { - @PartitionKey private List key; - private int value; - - public MultiKeyRecord() {} - - public MultiKeyRecord(List key, int value) { - this.key = key; - this.value = value; - } - - public List getKey() { - return key; - } - - public void setKey(List key) { - this.key = key; - } - - public int getValue() { - return value; - } - - public void setValue(int value) { - this.value = value; - } - } - - @Dao - @DefaultNullSavingStrategy(NullSavingStrategy.SET_TO_NULL) - interface RecordDao { - @Select - Record findByKey(Key key); - - @Insert - void save(Record record); - - @Select - MultiKeyRecord findMultiByKey(List key); - - @Insert - void saveMulti(MultiKeyRecord record); - } - - @Mapper - interface TestMapper { - @DaoFactory - RecordDao recordDao(@DaoKeyspace CqlIdentifier keyspace); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/UpdateCustomIfClauseIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/UpdateCustomIfClauseIT.java deleted file mode 100644 index ebdd2dfd40a..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/UpdateCustomIfClauseIT.java +++ /dev/null @@ -1,210 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.mapper; - -import static com.datastax.oss.driver.assertions.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.mapper.annotations.CqlName; -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; -import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; -import com.datastax.oss.driver.api.mapper.annotations.Mapper; -import com.datastax.oss.driver.api.mapper.annotations.Select; -import com.datastax.oss.driver.api.mapper.annotations.Update; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import java.util.UUID; -import java.util.concurrent.CompletableFuture; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -@BackendRequirement( - type = BackendType.CASSANDRA, - minInclusive = "3.11.0", - description = "UDT fields in IF clause") -public class UpdateCustomIfClauseIT extends InventoryITBase { - - private static final CcmRule CCM_RULE = CcmRule.getInstance(); - private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - private static ProductDao dao; - - @BeforeClass - public static void setup() { - CqlSession session = SESSION_RULE.session(); - - for (String query : createStatements(CCM_RULE)) { - session.execute( - SimpleStatement.builder(query).setExecutionProfile(SESSION_RULE.slowProfile()).build()); - } - - InventoryMapper inventoryMapper = - new UpdateCustomIfClauseIT_InventoryMapperBuilder(session).build(); - dao = inventoryMapper.productDao(SESSION_RULE.keyspace()); - } - - @Before - public void clearProductData() { - CqlSession session = SESSION_RULE.session(); - session.execute( - SimpleStatement.builder("TRUNCATE product") - .setExecutionProfile(SESSION_RULE.slowProfile()) - .build()); - } - - @Test - public void should_update_entity_if_condition_is_met() { - dao.update( - new Product(FLAMETHROWER.getId(), "Description for length 10", new Dimensions(10, 1, 1))); - assertThat(dao.findById(FLAMETHROWER.getId())).isNotNull(); - - Product otherProduct = - new Product(FLAMETHROWER.getId(), "Other description", new Dimensions(1, 1, 1)); - assertThat(dao.updateIfLength(otherProduct, 10).wasApplied()).isEqualTo(true); - } - - @Test - public void should_update_entity_if_condition_is_met_statement() { - dao.update( - new Product(FLAMETHROWER.getId(), "Description for length 10", new Dimensions(10, 1, 1))); - assertThat(dao.findById(FLAMETHROWER.getId())).isNotNull(); - - Product otherProduct = - new Product(FLAMETHROWER.getId(), "Other description", new Dimensions(1, 1, 1)); - assertThat( - SESSION_RULE - .session() - .execute(dao.updateIfLengthStatement(otherProduct, 10)) - .wasApplied()) - .isEqualTo(true); - } - - @Test - public void should_not_update_entity_if_condition_is_not_met() { - dao.update( - new Product(FLAMETHROWER.getId(), "Description for length 10", new Dimensions(10, 1, 1))); - assertThat(dao.findById(FLAMETHROWER.getId())).isNotNull(); - - Product otherProduct = - new Product(FLAMETHROWER.getId(), "Other description", new Dimensions(1, 1, 1)); - assertThat(dao.updateIfLength(otherProduct, 20).wasApplied()).isEqualTo(false); - } - - @Test - public void should_not_update_entity_if_condition_is_not_met_statement() { - dao.update( - new Product(FLAMETHROWER.getId(), "Description for length 10", new Dimensions(10, 1, 1))); - assertThat(dao.findById(FLAMETHROWER.getId())).isNotNull(); - - Product otherProduct = - new Product(FLAMETHROWER.getId(), "Other description", new Dimensions(1, 1, 1)); - assertThat( - SESSION_RULE - .session() - .execute(dao.updateIfLengthStatement(otherProduct, 20)) - .wasApplied()) - .isEqualTo(false); - } - - @Test - public void should_async_update_entity_if_condition_is_met() { - dao.update( - new Product(FLAMETHROWER.getId(), "Description for length 10", new Dimensions(10, 1, 1))); - assertThat(dao.findById(FLAMETHROWER.getId())).isNotNull(); - - Product otherProduct = - new Product(FLAMETHROWER.getId(), "Other description", new Dimensions(1, 1, 1)); - assertThat( - CompletableFutures.getUninterruptibly(dao.updateIfLengthAsync(otherProduct, 10)) - .wasApplied()) - .isEqualTo(true); - } - - @Test - public void should_not_async_update_entity_if_condition_is_not_met() { - dao.update( - new Product(FLAMETHROWER.getId(), "Description for length 10", new Dimensions(10, 1, 1))); - assertThat(dao.findById(FLAMETHROWER.getId())).isNotNull(); - - Product otherProduct = - new Product(FLAMETHROWER.getId(), "Other description", new Dimensions(1, 1, 1)); - assertThat( - CompletableFutures.getUninterruptibly(dao.updateIfLengthAsync(otherProduct, 20)) - .wasApplied()) - .isEqualTo(false); - } - - @Test - public void should_update_entity_if_condition_is_met_using_ttl() { - dao.update( - new Product(FLAMETHROWER.getId(), "Description for length 10", new Dimensions(10, 1, 1))); - assertThat(dao.findById(FLAMETHROWER.getId())).isNotNull(); - - Product otherProduct = - new Product(FLAMETHROWER.getId(), "Other description", new Dimensions(1, 1, 1)); - assertThat(dao.updateIfLengthUsingTtl(otherProduct, 10).wasApplied()).isEqualTo(true); - } - - @Mapper - public interface InventoryMapper { - @DaoFactory - ProductDao productDao(@DaoKeyspace CqlIdentifier keyspace); - } - - @Dao - public interface ProductDao { - - @Update - void update(Product product); - - @Update(customIfClause = "dimensions.length = :length") - ResultSet updateIfLength(Product product, int length); - - @Update(customIfClause = "dimensions.length = :length", ttl = "20") - ResultSet updateIfLengthUsingTtl(Product product, int length); - - @Update(customIfClause = "dimensions.length = :length") - BoundStatement updateIfLengthStatement(Product product, int length); - - @Update(customIfClause = "dimensions.length = :\"Length\"") - CompletableFuture updateIfLengthAsync( - Product product, @CqlName("\"Length\"") int length); - - @Select - Product findById(UUID productId); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/UpdateIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/UpdateIT.java deleted file mode 100644 index 3fac733c900..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/UpdateIT.java +++ /dev/null @@ -1,506 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.mapper; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.mapper.MapperException; -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; -import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; -import com.datastax.oss.driver.api.mapper.annotations.DefaultNullSavingStrategy; -import com.datastax.oss.driver.api.mapper.annotations.Mapper; -import com.datastax.oss.driver.api.mapper.annotations.Select; -import com.datastax.oss.driver.api.mapper.annotations.Update; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.ccm.SchemaChangeSynchronizer; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import java.util.UUID; -import java.util.concurrent.CompletableFuture; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -public class UpdateIT extends InventoryITBase { - - private static final CcmRule CCM_RULE = CcmRule.getInstance(); - private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - private static ProductDao dao; - private static InventoryMapper inventoryMapper; - - @BeforeClass - public static void setup() { - CqlSession session = SESSION_RULE.session(); - SchemaChangeSynchronizer.withLock( - () -> { - for (String query : createStatements(CCM_RULE)) { - session.execute( - SimpleStatement.builder(query) - .setExecutionProfile(SESSION_RULE.slowProfile()) - .build()); - } - session.execute( - SimpleStatement.newInstance("CREATE TABLE only_p_k(id uuid PRIMARY KEY)") - .setExecutionProfile(SESSION_RULE.slowProfile())); - }); - - inventoryMapper = new UpdateIT_InventoryMapperBuilder(session).build(); - dao = inventoryMapper.productDao(SESSION_RULE.keyspace()); - } - - @Before - public void clearProductData() { - CqlSession session = SESSION_RULE.session(); - session.execute( - SimpleStatement.builder("TRUNCATE product") - .setExecutionProfile(SESSION_RULE.slowProfile()) - .build()); - } - - @Test - public void should_update_entity() { - assertThat(dao.findById(FLAMETHROWER.getId())).isNull(); - - dao.update(FLAMETHROWER); - assertThat(dao.findById(FLAMETHROWER.getId())).isEqualTo(FLAMETHROWER); - } - - @Test - public void should_update_entity_matching_custom_where_clause() { - // given - Product toBeUpdated = new Product(UUID.randomUUID(), "a", new Dimensions(1, 1, 1)); - Product shouldNotBeUpdated = new Product(UUID.randomUUID(), "b", new Dimensions(1, 1, 1)); - - dao.update(toBeUpdated); - dao.update(shouldNotBeUpdated); - - assertThat(dao.findById(toBeUpdated.getId())).isEqualTo(toBeUpdated); - assertThat(dao.findById(shouldNotBeUpdated.getId())).isEqualTo(shouldNotBeUpdated); - - // when - Product afterUpdate = new Product(toBeUpdated.getId(), "c", new Dimensions(1, 1, 1)); - dao.updateWhereId(afterUpdate, toBeUpdated.getId()); - - // then - assertThat(dao.findById(toBeUpdated.getId())).isEqualTo(afterUpdate); - assertThat(dao.findById(shouldNotBeUpdated.getId())).isEqualTo(shouldNotBeUpdated); - } - - @Test - public void should_update_entity_matching_custom_where_in_clause() { - // given - Product toBeUpdated = new Product(UUID.randomUUID(), "a", new Dimensions(1, 1, 1)); - Product toBeUpdated2 = new Product(UUID.randomUUID(), "b", new Dimensions(1, 1, 1)); - - dao.update(toBeUpdated); - dao.update(toBeUpdated2); - - assertThat(dao.findById(toBeUpdated.getId())).isEqualTo(toBeUpdated); - assertThat(dao.findById(toBeUpdated2.getId())).isEqualTo(toBeUpdated2); - - // when - Product afterUpdate = new Product(toBeUpdated.getId(), "c", new Dimensions(1, 1, 1)); - dao.updateWhereIdIn(afterUpdate, toBeUpdated.getId(), toBeUpdated2.getId()); - - // then - assertThat(dao.findById(toBeUpdated.getId()).getDescription()) - .isEqualTo(afterUpdate.getDescription()); - assertThat(dao.findById(toBeUpdated2.getId()).getDescription()) - .isEqualTo(afterUpdate.getDescription()); - } - - @Test - public void should_update_entity_asynchronously() { - assertThat(dao.findById(FLAMETHROWER.getId())).isNull(); - - CompletableFutures.getUninterruptibly(dao.updateAsync(FLAMETHROWER)); - assertThat(dao.findById(FLAMETHROWER.getId())).isEqualTo(FLAMETHROWER); - } - - @Test - public void should_update_entity_with_timestamp() { - assertThat(dao.findById(FLAMETHROWER.getId())).isNull(); - - long timestamp = 1234; - dao.updateWithBoundTimestamp(FLAMETHROWER, timestamp); - - CqlSession session = SESSION_RULE.session(); - Row row = - session - .execute( - SimpleStatement.newInstance( - "SELECT WRITETIME(description) FROM product WHERE id = ?", - FLAMETHROWER.getId())) - .one(); - assertThat(row).isNotNull(); - long writeTime = row.getLong(0); - assertThat(writeTime).isEqualTo(timestamp); - } - - @Test - public void should_update_entity_with_timestamp_literal() { - assertThat(dao.findById(FLAMETHROWER.getId())).isNull(); - - dao.updateWithTimestampLiteral(FLAMETHROWER); - - CqlSession session = SESSION_RULE.session(); - Row row = - session - .execute( - SimpleStatement.newInstance( - "SELECT WRITETIME(description) FROM product WHERE id = ?", - FLAMETHROWER.getId())) - .one(); - assertThat(row).isNotNull(); - long writeTime = row.getLong(0); - assertThat(writeTime).isEqualTo(1000L); - } - - @Test - public void should_update_entity_with_ttl() { - assertThat(dao.findById(FLAMETHROWER.getId())).isNull(); - - int ttl = 100_000; - dao.updateWithBoundTtl(FLAMETHROWER, ttl); - - CqlSession session = SESSION_RULE.session(); - Row row = - session - .execute( - SimpleStatement.newInstance( - "SELECT TTL(description) FROM product WHERE id = ?", FLAMETHROWER.getId())) - .one(); - assertThat(row).isNotNull(); - int writeTime = row.getInt(0); - assertThat(writeTime).isBetween(ttl - 10, ttl); - } - - @Test - public void should_update_entity_with_ttl_literal() { - assertThat(dao.findById(FLAMETHROWER.getId())).isNull(); - - dao.updateWithTtlLiteral(FLAMETHROWER); - - CqlSession session = SESSION_RULE.session(); - Row row = - session - .execute( - SimpleStatement.newInstance( - "SELECT TTL(description) FROM product WHERE id = ?", FLAMETHROWER.getId())) - .one(); - assertThat(row).isNotNull(); - int writeTime = row.getInt(0); - assertThat(writeTime).isBetween(990, 1000); - } - - @Test - public void should_update_entity_with_timestamp_asynchronously() { - assertThat(dao.findById(FLAMETHROWER.getId())).isNull(); - - long timestamp = 1234; - CompletableFutures.getUninterruptibly( - dao.updateAsyncWithBoundTimestamp(FLAMETHROWER, timestamp)); - - CqlSession session = SESSION_RULE.session(); - Row row = - session - .execute( - SimpleStatement.newInstance( - "SELECT WRITETIME(description) FROM product WHERE id = ?", - FLAMETHROWER.getId())) - .one(); - assertThat(row).isNotNull(); - long writeTime = row.getLong(0); - assertThat(writeTime).isEqualTo(timestamp); - } - - @Test - public void should_update_entity_if_exists() { - dao.update(FLAMETHROWER); - assertThat(dao.findById(FLAMETHROWER.getId())).isNotNull(); - - Product otherProduct = - new Product(FLAMETHROWER.getId(), "Other description", new Dimensions(1, 1, 1)); - assertThat(dao.updateIfExists(otherProduct).wasApplied()).isEqualTo(true); - } - - @Test - public void should_update_entity_if_exists_statement() { - dao.update(FLAMETHROWER); - assertThat(dao.findById(FLAMETHROWER.getId())).isNotNull(); - - Product otherProduct = - new Product(FLAMETHROWER.getId(), "Other description", new Dimensions(1, 1, 1)); - assertThat( - SESSION_RULE.session().execute(dao.updateIfExistsStatement(otherProduct)).wasApplied()) - .isEqualTo(true); - } - - @Test - public void should_not_update_entity_if_not_exists() { - assertThat(dao.findById(FLAMETHROWER.getId())).isNull(); - - Product otherProduct = - new Product(FLAMETHROWER.getId(), "Other description", new Dimensions(1, 1, 1)); - assertThat(dao.updateIfExists(otherProduct).wasApplied()).isEqualTo(false); - } - - @Test - public void should_not_update_entity_if_not_exists_statement() { - assertThat(dao.findById(FLAMETHROWER.getId())).isNull(); - - Product otherProduct = - new Product(FLAMETHROWER.getId(), "Other description", new Dimensions(1, 1, 1)); - assertThat( - SESSION_RULE.session().execute(dao.updateIfExistsStatement(otherProduct)).wasApplied()) - .isEqualTo(false); - } - - @Test - public void should_update_entity_if_exists_asynchronously() { - dao.update(FLAMETHROWER); - assertThat(dao.findById(FLAMETHROWER.getId())).isNotNull(); - - Product otherProduct = - new Product(FLAMETHROWER.getId(), "Other description", new Dimensions(1, 1, 1)); - assertThat( - CompletableFutures.getUninterruptibly(dao.updateAsyncIfExists(otherProduct)) - .wasApplied()) - .isEqualTo(true); - } - - @Test - public void should_not_update_entity_if_not_exists_asynchronously() { - assertThat(dao.findById(FLAMETHROWER.getId())).isNull(); - - Product otherProduct = - new Product(FLAMETHROWER.getId(), "Other description", new Dimensions(1, 1, 1)); - assertThat( - CompletableFutures.getUninterruptibly(dao.updateAsyncIfExists(otherProduct)) - .wasApplied()) - .isEqualTo(false); - } - - @Test - public void should_throw_when_try_to_use_dao_with_update_only_pk() { - assertThatThrownBy(() -> inventoryMapper.onlyPkDao(SESSION_RULE.keyspace())) - .isInstanceOf(MapperException.class) - .hasMessageContaining("Entity OnlyPK does not have any non PK columns."); - } - - @Test - public void should_update_entity_and_return_was_applied() { - dao.update(FLAMETHROWER); - assertThat(dao.findById(FLAMETHROWER.getId())).isNotNull(); - - assertThat(dao.updateReturnWasApplied(FLAMETHROWER)).isTrue(); - assertThat(dao.findById(FLAMETHROWER.getId())).isEqualTo(FLAMETHROWER); - } - - @Test - public void should_not_update_entity_and_return_was_not_applied() { - assertThat(dao.findById(FLAMETHROWER.getId())).isNull(); - - assertThat(dao.updateReturnWasApplied(FLAMETHROWER)).isFalse(); - assertThat(dao.findById(FLAMETHROWER.getId())).isNull(); - } - - @Test - public void should_update_entity_and_return_was_applied_async() { - dao.update(FLAMETHROWER); - assertThat(dao.findById(FLAMETHROWER.getId())).isNotNull(); - - assertThat(CompletableFutures.getUninterruptibly(dao.updateReturnWasAppliedAsync(FLAMETHROWER))) - .isTrue(); - assertThat(dao.findById(FLAMETHROWER.getId())).isEqualTo(FLAMETHROWER); - } - - @Test - public void should_not_update_entity_and_return_was_not_applied_async() { - assertThat(dao.findById(FLAMETHROWER.getId())).isNull(); - - assertThat(CompletableFutures.getUninterruptibly(dao.updateReturnWasAppliedAsync(FLAMETHROWER))) - .isFalse(); - assertThat(dao.findById(FLAMETHROWER.getId())).isNull(); - } - - @Test - public void should_update_entity_without_pk_placeholders_matching_custom_where_in_clause() { - // given - ProductWithoutIdDao dao = inventoryMapper.productWithoutIdDao(SESSION_RULE.keyspace()); - UUID idOne = UUID.randomUUID(); - UUID idTwo = UUID.randomUUID(); - SESSION_RULE - .session() - .execute( - SimpleStatement.newInstance( - "INSERT INTO product_without_id (id, clustering, description) VALUES (?,?,?)", - idOne, - 1, - "a")); - SESSION_RULE - .session() - .execute( - SimpleStatement.newInstance( - "INSERT INTO product_without_id (id, clustering, description) VALUES (?,?,?)", - idTwo, - 1, - "b")); - - assertThat(dao.findById(idOne).getDescription()).isEqualTo("a"); - assertThat(dao.findById(idTwo).getDescription()).isEqualTo("b"); - - // when - ProductWithoutId afterUpdate = new ProductWithoutId("c"); - dao.updateWhereIdInSetWithoutPKPlaceholders(afterUpdate, idOne, idTwo); - - // then - assertThat(dao.findById(idOne).getDescription()).isEqualTo(afterUpdate.getDescription()); - assertThat(dao.findById(idTwo).getDescription()).isEqualTo(afterUpdate.getDescription()); - } - - @Test - public void should_update_entity_and_set_null_field() { - // given - assertThat(dao.findById(FLAMETHROWER.getId())).isNull(); - dao.update(FLAMETHROWER); - assertThat(dao.findById(FLAMETHROWER.getId()).getDescription()).isNotNull(); - - // when - dao.updateSetNull(new Product(FLAMETHROWER.getId(), null, FLAMETHROWER.getDimensions())); - - // then - assertThat(dao.findById(FLAMETHROWER.getId()).getDescription()).isNull(); - } - - @Test - public void should_update_entity_udt_and_set_null_field() { - // given - assertThat(dao.findById(FLAMETHROWER.getId())).isNull(); - dao.update(FLAMETHROWER); - assertThat(dao.findById(FLAMETHROWER.getId()).getDimensions()).isNotNull(); - - // when - dao.updateSetNull(new Product(FLAMETHROWER.getId(), "desc", null)); - - // then - assertThat(dao.findById(FLAMETHROWER.getId()).getDimensions()).isNull(); - } - - @Mapper - public interface InventoryMapper { - @DaoFactory - ProductDao productDao(@DaoKeyspace CqlIdentifier keyspace); - - @DaoFactory - OnlyPKDao onlyPkDao(@DaoKeyspace CqlIdentifier keyspace); - - @DaoFactory - ProductWithoutIdDao productWithoutIdDao(@DaoKeyspace CqlIdentifier keyspace); - } - - @Dao - @DefaultNullSavingStrategy(NullSavingStrategy.SET_TO_NULL) - public interface ProductDao { - - @Update - void update(Product product); - - @Update(nullSavingStrategy = NullSavingStrategy.SET_TO_NULL) - void updateSetNull(Product product); - - @Update(customWhereClause = "id = :id") - void updateWhereId(Product product, UUID id); - - @Update(customWhereClause = "id IN (:id1, :id2)") - void updateWhereIdIn(Product product, UUID id1, UUID id2); - - @Update(timestamp = ":timestamp") - void updateWithBoundTimestamp(Product product, long timestamp); - - @Update(timestamp = "1000") - void updateWithTimestampLiteral(Product product); - - @Update(ttl = ":ttl") - void updateWithBoundTtl(Product product, int ttl); - - @Update(ttl = "1000") - void updateWithTtlLiteral(Product product); - - @Update(ifExists = true) - ResultSet updateIfExists(Product product); - - @Update(ifExists = true) - BoundStatement updateIfExistsStatement(Product product); - - @Update - CompletableFuture updateAsync(Product product); - - @Update(timestamp = ":timestamp") - CompletableFuture updateAsyncWithBoundTimestamp(Product product, long timestamp); - - @Update(ifExists = true) - CompletableFuture updateAsyncIfExists(Product product); - - @Update(ifExists = true) - boolean updateReturnWasApplied(Product product); - - @Update(ifExists = true) - CompletableFuture updateReturnWasAppliedAsync(Product product); - - @Select - Product findById(UUID productId); - } - - @Dao - @DefaultNullSavingStrategy(NullSavingStrategy.SET_TO_NULL) - public interface OnlyPKDao { - @Update - void update(OnlyPK onlyPK); - } - - @Dao - @DefaultNullSavingStrategy(NullSavingStrategy.SET_TO_NULL) - public interface ProductWithoutIdDao { - @Update(customWhereClause = "id IN (:id, :id2) AND clustering = 1") - void updateWhereIdInSetWithoutPKPlaceholders(ProductWithoutId product, UUID id, UUID id2); - - @Select(customWhereClause = "id = :productId") - ProductWithoutId findById(UUID productId); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/UpdateNamingIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/UpdateNamingIT.java deleted file mode 100644 index c1b15b2cbca..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/UpdateNamingIT.java +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.mapper; - -import static com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy.SET_TO_NULL; -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.mapper.MapperBuilder; -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; -import com.datastax.oss.driver.api.mapper.annotations.DefaultNullSavingStrategy; -import com.datastax.oss.driver.api.mapper.annotations.Entity; -import com.datastax.oss.driver.api.mapper.annotations.Mapper; -import com.datastax.oss.driver.api.mapper.annotations.NamingStrategy; -import com.datastax.oss.driver.api.mapper.annotations.PartitionKey; -import com.datastax.oss.driver.api.mapper.annotations.Select; -import com.datastax.oss.driver.api.mapper.annotations.Update; -import com.datastax.oss.driver.api.mapper.entity.naming.NamingConvention; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -/** - * For JAVA-2367: ensure that PK column names are properly handled in the WHERE clause of a - * generated UPDATE query. - */ -@Category(ParallelizableTests.class) -public class UpdateNamingIT { - private static final CcmRule CCM_RULE = CcmRule.getInstance(); - private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - private static TestDao dao; - - @BeforeClass - public static void setup() { - CqlSession session = SESSION_RULE.session(); - session.execute( - SimpleStatement.builder("CREATE TABLE foo(mykey int PRIMARY KEY, value int)") - .setExecutionProfile(SESSION_RULE.slowProfile()) - .build()); - - TestMapper mapper = - TestMapper.builder(session).withDefaultKeyspace(SESSION_RULE.keyspace()).build(); - dao = mapper.dao(); - } - - @Test - public void should_update_with_case_insensitive_pk_name() { - dao.update(new Foo(1, 1)); - Foo foo = dao.get(1); - assertThat(foo.getValue()).isEqualTo(1); - } - - @Mapper - public interface TestMapper { - - @DaoFactory - TestDao dao(); - - static MapperBuilder builder(CqlSession session) { - return new UpdateNamingIT_TestMapperBuilder(session); - } - } - - @Dao - @DefaultNullSavingStrategy(SET_TO_NULL) - public interface TestDao { - @Select - Foo get(int key); - - @Update - void update(Foo template); - } - - @Entity - @NamingStrategy(convention = NamingConvention.CASE_INSENSITIVE) - public static class Foo { - @PartitionKey private int myKey; - private int value; - - public Foo() {} - - public Foo(int myKey, int value) { - this.myKey = myKey; - this.value = value; - } - - public int getMyKey() { - return myKey; - } - - public void setMyKey(int myKey) { - this.myKey = myKey; - } - - public int getValue() { - return value; - } - - public void setValue(int value) { - this.value = value; - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/UpdateReactiveIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/UpdateReactiveIT.java deleted file mode 100644 index fa171441b50..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/UpdateReactiveIT.java +++ /dev/null @@ -1,171 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.mapper; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveResultSet; -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveRow; -import com.datastax.dse.driver.api.mapper.reactive.MappedReactiveResultSet; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; -import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; -import com.datastax.oss.driver.api.mapper.annotations.DefaultNullSavingStrategy; -import com.datastax.oss.driver.api.mapper.annotations.Mapper; -import com.datastax.oss.driver.api.mapper.annotations.Select; -import com.datastax.oss.driver.api.mapper.annotations.Update; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import io.reactivex.Flowable; -import io.reactivex.Single; -import java.util.UUID; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -@BackendRequirement( - type = BackendType.CASSANDRA, - minInclusive = "3.6", - description = "Uses UDT fields in IF conditions (CASSANDRA-7423)") -public class UpdateReactiveIT extends InventoryITBase { - - private static CcmRule ccmRule = CcmRule.getInstance(); - - private static SessionRule sessionRule = SessionRule.builder(ccmRule).build(); - - @ClassRule public static TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); - - private static DseProductDao dao; - - @BeforeClass - public static void setup() { - CqlSession session = sessionRule.session(); - - for (String query : createStatements(ccmRule)) { - session.execute( - SimpleStatement.builder(query).setExecutionProfile(sessionRule.slowProfile()).build()); - } - - DseInventoryMapper dseInventoryMapper = - new UpdateReactiveIT_DseInventoryMapperBuilder(session).build(); - dao = dseInventoryMapper.productDao(sessionRule.keyspace()); - } - - @Before - public void clearProductData() { - CqlSession session = sessionRule.session(); - session.execute( - SimpleStatement.builder("TRUNCATE product") - .setExecutionProfile(sessionRule.slowProfile()) - .build()); - } - - @Test - public void should_update_entity_if_exists_reactive() { - Flowable.fromPublisher(dao.updateReactive(FLAMETHROWER)).blockingSubscribe(); - assertThat(Flowable.fromPublisher(dao.findByIdReactive(FLAMETHROWER.getId())).blockingSingle()) - .isNotNull(); - - Product otherProduct = - new Product(FLAMETHROWER.getId(), "Other description", new Dimensions(1, 1, 1)); - ReactiveResultSet rs = dao.updateIfExistsReactive(otherProduct); - assertThat(Flowable.fromPublisher(rs).count().blockingGet()).isOne(); - assertThat( - Single.fromPublisher(rs.getColumnDefinitions()).blockingGet().contains("description")) - .isFalse(); - assertThat(Single.fromPublisher(rs.wasApplied()).blockingGet()).isTrue(); - } - - @Test - public void should_update_entity_if_condition_is_met_reactive() { - Flowable.fromPublisher( - dao.updateReactive( - new Product( - FLAMETHROWER.getId(), "Description for length 10", new Dimensions(10, 1, 1)))) - .blockingSubscribe(); - assertThat(Flowable.fromPublisher(dao.findByIdReactive(FLAMETHROWER.getId())).blockingSingle()) - .isNotNull(); - Product otherProduct = - new Product(FLAMETHROWER.getId(), "Other description", new Dimensions(1, 1, 1)); - ReactiveResultSet rs = dao.updateIfLengthReactive(otherProduct, 10); - ReactiveRow row = Flowable.fromPublisher(rs).blockingSingle(); - assertThat(row.wasApplied()).isTrue(); - assertThat(row.getColumnDefinitions().contains("dimensions")).isFalse(); - assertThat(Single.fromPublisher(rs.getColumnDefinitions()).blockingGet().contains("dimensions")) - .isFalse(); - assertThat(Single.fromPublisher(rs.wasApplied()).blockingGet()).isTrue(); - } - - @Test - public void should_not_update_entity_if_condition_is_not_met_reactive() { - Flowable.fromPublisher( - dao.updateReactive( - new Product( - FLAMETHROWER.getId(), "Description for length 10", new Dimensions(10, 1, 1)))) - .blockingSubscribe(); - assertThat(Flowable.fromPublisher(dao.findByIdReactive(FLAMETHROWER.getId())).blockingSingle()) - .isNotNull() - .extracting("description") - .isEqualTo("Description for length 10"); - ReactiveResultSet rs = - dao.updateIfLengthReactive( - new Product(FLAMETHROWER.getId(), "Other description", new Dimensions(1, 1, 1)), 20); - ReactiveRow row = Flowable.fromPublisher(rs).blockingSingle(); - assertThat(row.wasApplied()).isFalse(); - assertThat(row.getColumnDefinitions().contains("dimensions")).isTrue(); - assertThat(Single.fromPublisher(rs.getColumnDefinitions()).blockingGet().contains("dimensions")) - .isTrue(); - assertThat(Single.fromPublisher(rs.wasApplied()).blockingGet()).isFalse(); - } - - @Mapper - public interface DseInventoryMapper { - - @DaoFactory - DseProductDao productDao(@DaoKeyspace CqlIdentifier keyspace); - } - - @Dao - @DefaultNullSavingStrategy(NullSavingStrategy.SET_TO_NULL) - public interface DseProductDao { - - @Update - ReactiveResultSet updateReactive(Product product); - - @Update(ifExists = true) - ReactiveResultSet updateIfExistsReactive(Product product); - - @Update(customIfClause = "dimensions.length = :length") - ReactiveResultSet updateIfLengthReactive(Product product, int length); - - @Select - MappedReactiveResultSet findByIdReactive(UUID productId); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/metrics/micrometer/MicrometerMetricsIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/metrics/micrometer/MicrometerMetricsIT.java deleted file mode 100644 index c38df1e2026..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/metrics/micrometer/MicrometerMetricsIT.java +++ /dev/null @@ -1,208 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.metrics.micrometer; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.awaitility.Awaitility.await; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; -import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; -import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.core.metrics.MetricsITBase; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metrics.MetricId; -import com.datastax.oss.driver.internal.core.metrics.MetricIdGenerator; -import com.datastax.oss.driver.internal.metrics.micrometer.MicrometerTags; -import com.datastax.oss.simulacron.common.cluster.ClusterSpec; -import io.micrometer.core.instrument.Counter; -import io.micrometer.core.instrument.Gauge; -import io.micrometer.core.instrument.Meter; -import io.micrometer.core.instrument.MeterRegistry; -import io.micrometer.core.instrument.Tag; -import io.micrometer.core.instrument.Timer; -import io.micrometer.core.instrument.simple.SimpleMeterRegistry; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; - -@Category(ParallelizableTests.class) -public class MicrometerMetricsIT extends MetricsITBase { - - @ClassRule - public static final SimulacronRule SIMULACRON_RULE = - new SimulacronRule(ClusterSpec.builder().withNodes(3)); - - @Override - protected SimulacronRule simulacron() { - return SIMULACRON_RULE; - } - - @Override - protected MeterRegistry newMetricRegistry() { - return new SimpleMeterRegistry(); - } - - @Override - protected String getMetricsFactoryClass() { - return "MicrometerMetricsFactory"; - } - - @Override - protected void assertMetricsPresent(CqlSession session) { - - MeterRegistry registry = - (MeterRegistry) ((InternalDriverContext) session.getContext()).getMetricRegistry(); - assertThat(registry).isNotNull(); - - assertThat(registry.getMeters()) - .hasSize(ENABLED_SESSION_METRICS.size() + ENABLED_NODE_METRICS.size() * 3); - - MetricIdGenerator metricIdGenerator = - ((InternalDriverContext) session.getContext()).getMetricIdGenerator(); - - for (DefaultSessionMetric metric : ENABLED_SESSION_METRICS) { - MetricId id = metricIdGenerator.sessionMetricId(metric); - Iterable tags = MicrometerTags.toMicrometerTags(id.getTags()); - Meter m = registry.find(id.getName()).tags(tags).meter(); - assertThat(m).isNotNull(); - switch (metric) { - case CONNECTED_NODES: - assertThat(m).isInstanceOf(Gauge.class); - assertThat(((Gauge) m).value()).isEqualTo(3); - break; - case CQL_REQUESTS: - assertThat(m).isInstanceOf(Timer.class); - await().untilAsserted(() -> assertThat(((Timer) m).count()).isEqualTo(30)); - break; - case CQL_PREPARED_CACHE_SIZE: - assertThat(m).isInstanceOf(Gauge.class); - assertThat(((Gauge) m).value()).isOne(); - break; - case BYTES_SENT: - case BYTES_RECEIVED: - assertThat(m).isInstanceOf(Counter.class); - assertThat(((Counter) m).count()).isGreaterThan(0); - break; - case CQL_CLIENT_TIMEOUTS: - case THROTTLING_ERRORS: - assertThat(m).isInstanceOf(Counter.class); - assertThat(((Counter) m).count()).isZero(); - break; - case THROTTLING_DELAY: - assertThat(m).isInstanceOf(Timer.class); - assertThat(((Timer) m).count()).isZero(); - break; - case THROTTLING_QUEUE_SIZE: - assertThat(m).isInstanceOf(Gauge.class); - assertThat(((Gauge) m).value()).isZero(); - break; - } - } - - for (Node node : session.getMetadata().getNodes().values()) { - - for (DefaultNodeMetric metric : ENABLED_NODE_METRICS) { - MetricId id = metricIdGenerator.nodeMetricId(node, metric); - Iterable tags = MicrometerTags.toMicrometerTags(id.getTags()); - Meter m = registry.find(id.getName()).tags(tags).meter(); - assertThat(m).isNotNull(); - switch (metric) { - case OPEN_CONNECTIONS: - assertThat(m).isInstanceOf(Gauge.class); - // control node has 2 connections - assertThat(((Gauge) m).value()).isBetween(1.0, 2.0); - break; - case CQL_MESSAGES: - assertThat(m).isInstanceOf(Timer.class); - await().untilAsserted(() -> assertThat(((Timer) m).count()).isEqualTo(10)); - break; - case READ_TIMEOUTS: - case WRITE_TIMEOUTS: - case UNAVAILABLES: - case OTHER_ERRORS: - case ABORTED_REQUESTS: - case UNSENT_REQUESTS: - case RETRIES: - case IGNORES: - case RETRIES_ON_READ_TIMEOUT: - case RETRIES_ON_WRITE_TIMEOUT: - case RETRIES_ON_UNAVAILABLE: - case RETRIES_ON_OTHER_ERROR: - case RETRIES_ON_ABORTED: - case IGNORES_ON_READ_TIMEOUT: - case IGNORES_ON_WRITE_TIMEOUT: - case IGNORES_ON_UNAVAILABLE: - case IGNORES_ON_OTHER_ERROR: - case IGNORES_ON_ABORTED: - case SPECULATIVE_EXECUTIONS: - case CONNECTION_INIT_ERRORS: - case AUTHENTICATION_ERRORS: - assertThat(m).isInstanceOf(Counter.class); - assertThat(((Counter) m).count()).isZero(); - break; - case BYTES_SENT: - case BYTES_RECEIVED: - assertThat(m).isInstanceOf(Counter.class); - assertThat(((Counter) m).count()).isGreaterThan(0.0); - break; - case AVAILABLE_STREAMS: - case IN_FLIGHT: - case ORPHANED_STREAMS: - assertThat(m).isInstanceOf(Gauge.class); - break; - } - } - } - } - - @Override - protected void assertNodeMetricsNotEvicted(CqlSession session, Node node) { - InternalDriverContext context = (InternalDriverContext) session.getContext(); - MetricIdGenerator metricIdGenerator = context.getMetricIdGenerator(); - MeterRegistry registry = (MeterRegistry) context.getMetricRegistry(); - assertThat(registry).isNotNull(); - for (DefaultNodeMetric metric : ENABLED_NODE_METRICS) { - MetricId id = metricIdGenerator.nodeMetricId(node, metric); - Iterable tags = MicrometerTags.toMicrometerTags(id.getTags()); - Meter m = registry.find(id.getName()).tags(tags).meter(); - assertThat(m).isNotNull(); - } - } - - @Override - protected void assertMetricsNotPresent(Object registry) { - MeterRegistry micrometerRegistry = (MeterRegistry) registry; - assertThat(micrometerRegistry.getMeters()).isEmpty(); - } - - @Override - protected void assertNodeMetricsEvicted(CqlSession session, Node node) { - InternalDriverContext context = (InternalDriverContext) session.getContext(); - MetricIdGenerator metricIdGenerator = context.getMetricIdGenerator(); - MeterRegistry registry = (MeterRegistry) context.getMetricRegistry(); - assertThat(registry).isNotNull(); - for (DefaultNodeMetric metric : ENABLED_NODE_METRICS) { - MetricId id = metricIdGenerator.nodeMetricId(node, metric); - Iterable tags = MicrometerTags.toMicrometerTags(id.getTags()); - Meter m = registry.find(id.getName()).tags(tags).meter(); - assertThat(m).isNull(); - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/metrics/microprofile/MicroProfileMetricsIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/metrics/microprofile/MicroProfileMetricsIT.java deleted file mode 100644 index aa04c058a49..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/metrics/microprofile/MicroProfileMetricsIT.java +++ /dev/null @@ -1,215 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.metrics.microprofile; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.awaitility.Awaitility.await; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; -import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; -import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.core.metrics.MetricsITBase; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metrics.MetricId; -import com.datastax.oss.driver.internal.core.metrics.MetricIdGenerator; -import com.datastax.oss.driver.internal.metrics.microprofile.MicroProfileTags; -import com.datastax.oss.simulacron.common.cluster.ClusterSpec; -import io.smallrye.metrics.MetricsRegistryImpl; -import java.util.ArrayList; -import java.util.List; -import org.eclipse.microprofile.metrics.Counter; -import org.eclipse.microprofile.metrics.Gauge; -import org.eclipse.microprofile.metrics.Meter; -import org.eclipse.microprofile.metrics.Metric; -import org.eclipse.microprofile.metrics.MetricID; -import org.eclipse.microprofile.metrics.MetricRegistry; -import org.eclipse.microprofile.metrics.Tag; -import org.eclipse.microprofile.metrics.Timer; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; - -@Category(ParallelizableTests.class) -public class MicroProfileMetricsIT extends MetricsITBase { - - @ClassRule - public static final SimulacronRule SIMULACRON_RULE = - new SimulacronRule(ClusterSpec.builder().withNodes(3)); - - @Override - protected SimulacronRule simulacron() { - return SIMULACRON_RULE; - } - - @Override - protected MetricRegistry newMetricRegistry() { - return new MetricsRegistryImpl(); - } - - @Override - protected String getMetricsFactoryClass() { - return "MicroProfileMetricsFactory"; - } - - @Override - protected void assertMetricsPresent(CqlSession session) { - - MetricRegistry registry = - (MetricRegistry) ((InternalDriverContext) session.getContext()).getMetricRegistry(); - assertThat(registry).isNotNull(); - - assertThat(registry.getMetrics()) - .hasSize(ENABLED_SESSION_METRICS.size() + ENABLED_NODE_METRICS.size() * 3); - - MetricIdGenerator metricIdGenerator = - ((InternalDriverContext) session.getContext()).getMetricIdGenerator(); - - for (DefaultSessionMetric metric : ENABLED_SESSION_METRICS) { - MetricId metricId = metricIdGenerator.sessionMetricId(metric); - Tag[] tags = MicroProfileTags.toMicroProfileTags(metricId.getTags()); - MetricID id = new MetricID(metricId.getName(), tags); - Metric m = registry.getMetrics().get(id); - assertThat(m).isNotNull(); - switch (metric) { - case CONNECTED_NODES: - assertThat(m).isInstanceOf(Gauge.class); - assertThat((Integer) ((Gauge) m).getValue()).isEqualTo(3); - break; - case CQL_REQUESTS: - assertThat(m).isInstanceOf(Timer.class); - await().untilAsserted(() -> assertThat(((Timer) m).getCount()).isEqualTo(30)); - break; - case CQL_PREPARED_CACHE_SIZE: - assertThat(m).isInstanceOf(Gauge.class); - assertThat((Long) ((Gauge) m).getValue()).isOne(); - break; - case BYTES_SENT: - case BYTES_RECEIVED: - assertThat(m).isInstanceOf(Meter.class); - assertThat(((Meter) m).getCount()).isGreaterThan(0); - break; - case CQL_CLIENT_TIMEOUTS: - case THROTTLING_ERRORS: - assertThat(m).isInstanceOf(Counter.class); - assertThat(((Counter) m).getCount()).isZero(); - break; - case THROTTLING_DELAY: - assertThat(m).isInstanceOf(Timer.class); - assertThat(((Timer) m).getCount()).isZero(); - break; - case THROTTLING_QUEUE_SIZE: - assertThat(m).isInstanceOf(Gauge.class); - assertThat((Integer) ((Gauge) m).getValue()).isZero(); - break; - } - } - - for (Node node : session.getMetadata().getNodes().values()) { - - for (DefaultNodeMetric metric : ENABLED_NODE_METRICS) { - MetricId description = metricIdGenerator.nodeMetricId(node, metric); - Tag[] tags = MicroProfileTags.toMicroProfileTags(description.getTags()); - MetricID id = new MetricID(description.getName(), tags); - Metric m = registry.getMetrics().get(id); - assertThat(m).isNotNull(); - switch (metric) { - case OPEN_CONNECTIONS: - assertThat(m).isInstanceOf(Gauge.class); - // control node has 2 connections - assertThat((Integer) ((Gauge) m).getValue()).isBetween(1, 2); - break; - case CQL_MESSAGES: - assertThat(m).isInstanceOf(Timer.class); - await().untilAsserted(() -> assertThat(((Timer) m).getCount()).isEqualTo(10)); - break; - case READ_TIMEOUTS: - case WRITE_TIMEOUTS: - case UNAVAILABLES: - case OTHER_ERRORS: - case ABORTED_REQUESTS: - case UNSENT_REQUESTS: - case RETRIES: - case IGNORES: - case RETRIES_ON_READ_TIMEOUT: - case RETRIES_ON_WRITE_TIMEOUT: - case RETRIES_ON_UNAVAILABLE: - case RETRIES_ON_OTHER_ERROR: - case RETRIES_ON_ABORTED: - case IGNORES_ON_READ_TIMEOUT: - case IGNORES_ON_WRITE_TIMEOUT: - case IGNORES_ON_UNAVAILABLE: - case IGNORES_ON_OTHER_ERROR: - case IGNORES_ON_ABORTED: - case SPECULATIVE_EXECUTIONS: - case CONNECTION_INIT_ERRORS: - case AUTHENTICATION_ERRORS: - assertThat(m).isInstanceOf(Counter.class); - assertThat(((Counter) m).getCount()).isZero(); - break; - case BYTES_SENT: - case BYTES_RECEIVED: - assertThat(m).isInstanceOf(Meter.class); - assertThat(((Meter) m).getCount()).isGreaterThan(0L); - break; - case AVAILABLE_STREAMS: - case IN_FLIGHT: - case ORPHANED_STREAMS: - assertThat(m).isInstanceOf(Gauge.class); - break; - } - } - } - } - - @Override - protected void assertNodeMetricsNotEvicted(CqlSession session, Node node) { - InternalDriverContext context = (InternalDriverContext) session.getContext(); - MetricRegistry registry = (MetricRegistry) context.getMetricRegistry(); - assertThat(registry).isNotNull(); - for (MetricID id : nodeMetricIds(context, node)) { - assertThat(registry.getMetrics()).containsKey(id); - } - } - - @Override - protected void assertMetricsNotPresent(Object registry) { - MetricRegistry metricRegistry = (MetricRegistry) registry; - assertThat(metricRegistry.getMetrics()).isEmpty(); - } - - @Override - protected void assertNodeMetricsEvicted(CqlSession session, Node node) { - InternalDriverContext context = (InternalDriverContext) session.getContext(); - MetricRegistry registry = (MetricRegistry) context.getMetricRegistry(); - assertThat(registry).isNotNull(); - for (MetricID id : nodeMetricIds(context, node)) { - assertThat(registry.getMetrics()).doesNotContainKey(id); - } - } - - private List nodeMetricIds(InternalDriverContext context, Node node) { - List ids = new ArrayList<>(); - for (DefaultNodeMetric metric : ENABLED_NODE_METRICS) { - MetricId id = context.getMetricIdGenerator().nodeMetricId(node, metric); - ids.add(new MetricID(id.getName(), MicroProfileTags.toMicroProfileTags(id.getTags()))); - } - return ids; - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/querybuilder/JsonInsertIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/querybuilder/JsonInsertIT.java deleted file mode 100644 index 4df5c7a62bd..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/querybuilder/JsonInsertIT.java +++ /dev/null @@ -1,275 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.querybuilder; - -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.bindMarker; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.insertInto; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.selectFrom; -import static com.datastax.oss.driver.assertions.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.type.codec.CodecNotFoundException; -import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; -import java.time.Duration; -import java.util.List; -import java.util.Objects; -import org.junit.After; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -@BackendRequirement( - type = BackendType.CASSANDRA, - minInclusive = "2.2", - description = "JSON support in Cassandra was added in 2.2") -public class JsonInsertIT { - private static final CcmRule CCM_RULE = CcmRule.getInstance(); - - private static final SessionRule SESSION_RULE = - SessionRule.builder(CCM_RULE) - .withConfigLoader( - SessionUtils.configLoaderBuilder() - .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(30)) - .build()) - .build(); - - @ClassRule - public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - - private static final TypeCodec JACKSON_JSON_CODEC = ExtraTypeCodecs.json(User.class); - - @BeforeClass - public static void setup() { - SESSION_RULE - .session() - .execute("CREATE TABLE json_jackson_row(id int PRIMARY KEY, name text, age int)"); - } - - @After - public void clearTable() { - SESSION_RULE.session().execute("TRUNCATE TABLE json_jackson_row"); - } - - @Test - public void should_insert_string_as_json_using_simple_statement() { - // given a simple statement - try (CqlSession session = sessionWithCustomCodec()) { - String jsonUser = "{ \"id\": 2, \"name\": \"Alice\", \"age\": 3 }"; - Statement stmt = insertInto("json_jackson_row").json(jsonUser).build(); - - // when - session.execute(stmt); - - // then - String jsonUserResult = - session - .execute(selectFrom("json_jackson_row").json().all().build()) - .all() - .get(0) - .getString(0); - - assertThat(jsonUserResult).contains("\"id\": 2"); - assertThat(jsonUserResult).contains(" \"name\": \"Alice\""); - assertThat(jsonUserResult).contains("\"age\": 3"); - } - } - - @Test - public void should_insert_json_using_prepare_statement() { - // given prepare statement - try (CqlSession session = sessionWithCustomCodec()) { - User user = new User(2, "bob", 35); - PreparedStatement pst = - session.prepare(insertInto("json_jackson_row").json(bindMarker("user")).build()); - - // when - session.execute(pst.bind().set("user", user, User.class)); - - // then - List rows = session.execute(selectFrom("json_jackson_row").json().all().build()).all(); - assertThat(rows.get(0).get(0, User.class)).isEqualTo(user); - } - } - - @Test - public void should_insert_json_using_simple_statement_with_custom_codec() { - // given a simple statement - try (CqlSession session = sessionWithCustomCodec()) { - User user = new User(1, "alice", 30); - Statement stmt = insertInto("json_jackson_row").json(user, JACKSON_JSON_CODEC).build(); - - // when - session.execute(stmt); - - // then - List rows = session.execute(selectFrom("json_jackson_row").json().all().build()).all(); - - assertThat(rows.get(0).get(0, User.class)).isEqualTo(user); - } - } - - @Test - public void should_insert_json_using_simple_statement_with_custom_codec_without_codec_registry() { - try (CqlSession session = sessionWithoutCustomCodec()) { - // given - User user = new User(1, "alice", 30); - SimpleStatement stmt = insertInto("json_jackson_row").json(user, JACKSON_JSON_CODEC).build(); - - // when - session.execute(stmt); - - // then - List rows = session.execute(selectFrom("json_jackson_row").json().all().build()).all(); - assertThat(rows.get(0).get(0, JACKSON_JSON_CODEC)).isEqualTo(user); - } - } - - @Test - public void should_insert_json_using_simple_statement_with_codec_registry() { - // given a simple statement - try (CqlSession session = sessionWithCustomCodec()) { - User user = new User(1, "alice", 30); - Statement stmt = - insertInto("json_jackson_row") - .json(user, session.getContext().getCodecRegistry()) - .build(); - - // when - session.execute(stmt); - - // then - List rows = session.execute(selectFrom("json_jackson_row").json().all().build()).all(); - - assertThat(rows.get(0).get(0, User.class)).isEqualTo(user); - } - } - - @Test - public void - should_throw_when_insert_json_using_simple_statement_with_codec_registry_without_custom_codec() { - assertThatThrownBy( - () -> { - try (CqlSession session = sessionWithoutCustomCodec()) { - insertInto("json_jackson_row") - .json(new User(1, "alice", 30), session.getContext().getCodecRegistry()) - .build(); - } - }) - .isExactlyInstanceOf(IllegalArgumentException.class) - .hasMessage( - String.format( - "Could not inline JSON literal of type %s. " - + "This happens because the provided CodecRegistry does not contain " - + "a codec for this type. Try registering your TypeCodec in the registry " - + "first, or use json(Object, TypeCodec).", - User.class.getName())) - .hasCauseInstanceOf(CodecNotFoundException.class); - } - - @SuppressWarnings("unchecked") - private CqlSession sessionWithCustomCodec() { - return (CqlSession) - SessionUtils.baseBuilder() - .addContactEndPoints(CCM_RULE.getContactPoints()) - .withKeyspace(SESSION_RULE.keyspace()) - .addTypeCodecs(JACKSON_JSON_CODEC) - .build(); - } - - @SuppressWarnings("unchecked") - private CqlSession sessionWithoutCustomCodec() { - return (CqlSession) - SessionUtils.baseBuilder() - .addContactEndPoints(CCM_RULE.getContactPoints()) - .withKeyspace(SESSION_RULE.keyspace()) - .build(); - } - - @SuppressWarnings("unused") - public static class User { - - private final int id; - - private final String name; - - private final int age; - - @JsonCreator - public User( - @JsonProperty("id") int id, - @JsonProperty("name") String name, - @JsonProperty("age") int age) { - this.id = id; - this.name = name; - this.age = age; - } - - public int getId() { - return id; - } - - public String getName() { - return name; - } - - public int getAge() { - return age; - } - - @Override - public String toString() { - return String.format("%s (id %d, age %d)", name, id, age); - } - - @Override - public boolean equals(Object other) { - if (this == other) { - return true; - } else if (other instanceof User) { - User that = (User) other; - return this.id == that.id && this.age == that.age && Objects.equals(this.name, that.name); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(id, name, age); - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/querybuilder/RelationOptionsIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/querybuilder/RelationOptionsIT.java deleted file mode 100644 index fc571ccf44d..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/querybuilder/RelationOptionsIT.java +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.querybuilder; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.querybuilder.SchemaBuilder; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestName; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -public class RelationOptionsIT { - - private CcmRule ccmRule = CcmRule.getInstance(); - - private SessionRule sessionRule = SessionRule.builder(ccmRule).build(); - - @Rule public TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); - - @Rule public TestName name = new TestName(); - - @Test - @BackendRequirement( - type = BackendType.CASSANDRA, - minInclusive = "3.0", - description = "CRC check chance was moved to top level table in Cassandra 3.0") - public void should_create_table_with_crc_check_chance() { - sessionRule - .session() - .execute( - SchemaBuilder.createTable(name.getMethodName()) - .withPartitionKey("id", DataTypes.INT) - .withColumn("name", DataTypes.TEXT) - .withColumn("age", DataTypes.INT) - .withCRCCheckChance(0.8) - .build()); - KeyspaceMetadata keyspaceMetadata = - sessionRule - .session() - .getMetadata() - .getKeyspace(sessionRule.keyspace()) - .orElseThrow(AssertionError::new); - String describeOutput = keyspaceMetadata.describeWithChildren(true).trim(); - - assertThat(describeOutput).contains("crc_check_chance = 0.8"); - } - - @Test - @BackendRequirement( - type = BackendType.CASSANDRA, - minInclusive = "5.0", - description = "chunk_length_kb was renamed to chunk_length_in_kb in Cassandra 5.0") - public void should_create_table_with_chunk_length_in_kb() { - sessionRule - .session() - .execute( - SchemaBuilder.createTable(name.getMethodName()) - .withPartitionKey("id", DataTypes.INT) - .withColumn("name", DataTypes.TEXT) - .withColumn("age", DataTypes.INT) - .withLZ4Compression(4096) - .build()); - KeyspaceMetadata keyspaceMetadata = - sessionRule - .session() - .getMetadata() - .getKeyspace(sessionRule.keyspace()) - .orElseThrow(AssertionError::new); - String describeOutput = keyspaceMetadata.describeWithChildren(true).trim(); - - assertThat(describeOutput).contains("'class':'org.apache.cassandra.io.compress.LZ4Compressor'"); - assertThat(describeOutput).contains("'chunk_length_in_kb':'4096'"); - } - - @Test - @BackendRequirement( - type = BackendType.CASSANDRA, - minInclusive = "3.0", - maxExclusive = "5.0", - description = - "Deprecated compression options should still work with Cassandra >= 3.0 & < 5.0") - public void should_create_table_with_deprecated_options() { - sessionRule - .session() - .execute( - SchemaBuilder.createTable(name.getMethodName()) - .withPartitionKey("id", DataTypes.INT) - .withColumn("name", DataTypes.TEXT) - .withColumn("age", DataTypes.INT) - .withLZ4Compression(4096, 0.8) - .build()); - KeyspaceMetadata keyspaceMetadata = - sessionRule - .session() - .getMetadata() - .getKeyspace(sessionRule.keyspace()) - .orElseThrow(AssertionError::new); - String describeOutput = keyspaceMetadata.describeWithChildren(true).trim(); - - assertThat(describeOutput).contains("'class':'org.apache.cassandra.io.compress.LZ4Compressor'"); - assertThat(describeOutput).contains("'chunk_length_in_kb':'4096'"); - assertThat(describeOutput).contains("crc_check_chance = 0.8"); - } -} diff --git a/integration-tests/src/test/resources/DescribeIT/dse/4.8.cql b/integration-tests/src/test/resources/DescribeIT/dse/4.8.cql deleted file mode 100644 index ea6ca93bcbf..00000000000 --- a/integration-tests/src/test/resources/DescribeIT/dse/4.8.cql +++ /dev/null @@ -1,67 +0,0 @@ - -CREATE KEYSPACE ks_0 WITH replication = { 'class' : 'org.apache.cassandra.locator.SimpleStrategy', 'replication_factor': '1' } AND durable_writes = true; - -CREATE TYPE ks_0.btype ( - a text -); - -CREATE TYPE ks_0.xtype ( - d text -); - -CREATE TYPE ks_0.ztype ( - c text, - a int -); - -CREATE TYPE ks_0.ctype ( - z frozen, - x frozen -); - -CREATE TYPE ks_0.atype ( - c frozen -); - -CREATE TABLE ks_0.rank_by_year_and_name ( - race_year int, - race_name text, - rank int, - cyclist_name text, - PRIMARY KEY ((race_year, race_name), rank) -) WITH CLUSTERING ORDER BY (rank DESC) - AND bloom_filter_fp_chance = 0.01 - AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}' - AND comment = '' - AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy'} - AND compression = {'sstable_compression':'org.apache.cassandra.io.compress.LZ4Compressor'} - AND default_time_to_live = 0 - AND gc_grace_seconds = 864000 - AND dclocal_read_repair_chance = 0.1 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair_chance = 0.0 - AND speculative_retry = '99.0PERCENTILE'; - -CREATE INDEX ryear ON ks_0.rank_by_year_and_name (race_year); - -CREATE INDEX rrank ON ks_0.rank_by_year_and_name (rank); - -CREATE TABLE ks_0.ztable ( - zkey text, - a frozen, - PRIMARY KEY (zkey) -) WITH bloom_filter_fp_chance = 0.1 - AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}' - AND comment = '' - AND compaction = {'class':'org.apache.cassandra.db.compaction.LeveledCompactionStrategy','sstable_size_in_mb':'95'} - AND compression = {'sstable_compression':'org.apache.cassandra.io.compress.LZ4Compressor'} - AND default_time_to_live = 0 - AND gc_grace_seconds = 864000 - AND dclocal_read_repair_chance = 0.1 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair_chance = 0.0 - AND speculative_retry = '99.0PERCENTILE'; diff --git a/integration-tests/src/test/resources/DescribeIT/dse/5.0.cql b/integration-tests/src/test/resources/DescribeIT/dse/5.0.cql deleted file mode 100644 index 2572df52e24..00000000000 --- a/integration-tests/src/test/resources/DescribeIT/dse/5.0.cql +++ /dev/null @@ -1,189 +0,0 @@ - -CREATE KEYSPACE ks_0 WITH replication = { 'class' : 'org.apache.cassandra.locator.SimpleStrategy', 'replication_factor': '1' } AND durable_writes = true; - -CREATE TYPE ks_0.btype ( - a text -); - -CREATE TYPE ks_0.xtype ( - d text -); - -CREATE TYPE ks_0.ztype ( - c text, - a int -); - -CREATE TYPE ks_0.ctype ( - z frozen, - x frozen -); - -CREATE TYPE ks_0.atype ( - c frozen -); - -CREATE TABLE ks_0.cyclist_mv ( - cid uuid, - age int, - birthday date, - country text, - name text, - PRIMARY KEY (cid) -) WITH bloom_filter_fp_chance = 0.01 - AND caching = {'keys':'ALL','rows_per_partition':'NONE'} - AND comment = '' - AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} - AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} - AND crc_check_chance = 1.0 - AND dclocal_read_repair_chance = 0.1 - AND default_time_to_live = 0 - AND extensions = {} - AND gc_grace_seconds = 864000 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair_chance = 0.0 - AND speculative_retry = '99PERCENTILE'; - -CREATE INDEX cyclist_by_country ON ks_0.cyclist_mv (country); - -CREATE TABLE ks_0.rank_by_year_and_name ( - race_year int, - race_name text, - rank int, - cyclist_name text, - PRIMARY KEY ((race_year, race_name), rank) -) WITH CLUSTERING ORDER BY (rank DESC) - AND bloom_filter_fp_chance = 0.01 - AND caching = {'keys':'ALL','rows_per_partition':'NONE'} - AND comment = '' - AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} - AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} - AND crc_check_chance = 1.0 - AND dclocal_read_repair_chance = 0.1 - AND default_time_to_live = 0 - AND extensions = {} - AND gc_grace_seconds = 864000 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair_chance = 0.0 - AND speculative_retry = '99PERCENTILE'; - -CREATE INDEX rrank ON ks_0.rank_by_year_and_name (rank); - -CREATE INDEX ryear ON ks_0.rank_by_year_and_name (race_year); - -CREATE TABLE ks_0.ztable ( - zkey text, - a frozen, - PRIMARY KEY (zkey) -) WITH bloom_filter_fp_chance = 0.1 - AND caching = {'keys':'ALL','rows_per_partition':'NONE'} - AND comment = '' - AND compaction = {'class':'org.apache.cassandra.db.compaction.LeveledCompactionStrategy','sstable_size_in_mb':'95'} - AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} - AND crc_check_chance = 1.0 - AND dclocal_read_repair_chance = 0.1 - AND default_time_to_live = 0 - AND extensions = {} - AND gc_grace_seconds = 864000 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair_chance = 0.0 - AND speculative_retry = '99PERCENTILE'; - -CREATE MATERIALIZED VIEW ks_0.cyclist_by_a_age AS -SELECT * FROM ks_0.cyclist_mv -WHERE age IS NOT NULL AND cid IS NOT NULL -PRIMARY KEY (age, cid) WITH bloom_filter_fp_chance = 0.01 - AND caching = {'keys':'ALL','rows_per_partition':'NONE'} - AND comment = '' - AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} - AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} - AND crc_check_chance = 1.0 - AND dclocal_read_repair_chance = 0.1 - AND default_time_to_live = 0 - AND extensions = {} - AND gc_grace_seconds = 864000 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair_chance = 0.0 - AND speculative_retry = '99PERCENTILE'; - -CREATE MATERIALIZED VIEW ks_0.cyclist_by_age AS -SELECT - age, - cid, - birthday, - country, - name -FROM ks_0.cyclist_mv -WHERE age IS NOT NULL AND cid IS NOT NULL -PRIMARY KEY (age, cid) WITH bloom_filter_fp_chance = 0.01 - AND caching = {'keys':'ALL','rows_per_partition':'NONE'} - AND comment = 'simple view' - AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} - AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} - AND crc_check_chance = 1.0 - AND dclocal_read_repair_chance = 0.1 - AND default_time_to_live = 0 - AND extensions = {} - AND gc_grace_seconds = 864000 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair_chance = 0.0 - AND speculative_retry = '99PERCENTILE'; - -CREATE MATERIALIZED VIEW ks_0.cyclist_by_r_age AS -SELECT - age, - cid, - birthday, - country, - name -FROM ks_0.cyclist_mv -WHERE age IS NOT NULL AND cid IS NOT NULL -PRIMARY KEY (age, cid) WITH bloom_filter_fp_chance = 0.01 - AND caching = {'keys':'ALL','rows_per_partition':'NONE'} - AND comment = '' - AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} - AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} - AND crc_check_chance = 1.0 - AND dclocal_read_repair_chance = 0.1 - AND default_time_to_live = 0 - AND extensions = {} - AND gc_grace_seconds = 864000 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair_chance = 0.0 - AND speculative_retry = '99PERCENTILE'; - -CREATE FUNCTION ks_0.avgfinal(state tuple) - CALLED ON NULL INPUT - RETURNS double - LANGUAGE java - AS 'double r = 0; if (state.getInt(0) == 0) return null; r = state.getLong(1); r /= state.getInt(0); return Double.valueOf(r);'; - -CREATE FUNCTION ks_0.avgstate(state tuple,val int) - CALLED ON NULL INPUT - RETURNS tuple - LANGUAGE java - AS 'if (val !=null) { state.setInt(0, state.getInt(0)+1); state.setLong(1, state.getLong(1)+val.intValue()); } return state;'; - -CREATE AGGREGATE ks_0.average(int) - SFUNC avgstate - STYPE tuple - FINALFUNC avgfinal - INITCOND (0,0); - -CREATE AGGREGATE ks_0.mean(int) - SFUNC avgstate - STYPE tuple - FINALFUNC avgfinal - INITCOND (0,0); diff --git a/integration-tests/src/test/resources/DescribeIT/dse/5.1.cql b/integration-tests/src/test/resources/DescribeIT/dse/5.1.cql deleted file mode 100644 index 2572df52e24..00000000000 --- a/integration-tests/src/test/resources/DescribeIT/dse/5.1.cql +++ /dev/null @@ -1,189 +0,0 @@ - -CREATE KEYSPACE ks_0 WITH replication = { 'class' : 'org.apache.cassandra.locator.SimpleStrategy', 'replication_factor': '1' } AND durable_writes = true; - -CREATE TYPE ks_0.btype ( - a text -); - -CREATE TYPE ks_0.xtype ( - d text -); - -CREATE TYPE ks_0.ztype ( - c text, - a int -); - -CREATE TYPE ks_0.ctype ( - z frozen, - x frozen -); - -CREATE TYPE ks_0.atype ( - c frozen -); - -CREATE TABLE ks_0.cyclist_mv ( - cid uuid, - age int, - birthday date, - country text, - name text, - PRIMARY KEY (cid) -) WITH bloom_filter_fp_chance = 0.01 - AND caching = {'keys':'ALL','rows_per_partition':'NONE'} - AND comment = '' - AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} - AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} - AND crc_check_chance = 1.0 - AND dclocal_read_repair_chance = 0.1 - AND default_time_to_live = 0 - AND extensions = {} - AND gc_grace_seconds = 864000 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair_chance = 0.0 - AND speculative_retry = '99PERCENTILE'; - -CREATE INDEX cyclist_by_country ON ks_0.cyclist_mv (country); - -CREATE TABLE ks_0.rank_by_year_and_name ( - race_year int, - race_name text, - rank int, - cyclist_name text, - PRIMARY KEY ((race_year, race_name), rank) -) WITH CLUSTERING ORDER BY (rank DESC) - AND bloom_filter_fp_chance = 0.01 - AND caching = {'keys':'ALL','rows_per_partition':'NONE'} - AND comment = '' - AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} - AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} - AND crc_check_chance = 1.0 - AND dclocal_read_repair_chance = 0.1 - AND default_time_to_live = 0 - AND extensions = {} - AND gc_grace_seconds = 864000 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair_chance = 0.0 - AND speculative_retry = '99PERCENTILE'; - -CREATE INDEX rrank ON ks_0.rank_by_year_and_name (rank); - -CREATE INDEX ryear ON ks_0.rank_by_year_and_name (race_year); - -CREATE TABLE ks_0.ztable ( - zkey text, - a frozen, - PRIMARY KEY (zkey) -) WITH bloom_filter_fp_chance = 0.1 - AND caching = {'keys':'ALL','rows_per_partition':'NONE'} - AND comment = '' - AND compaction = {'class':'org.apache.cassandra.db.compaction.LeveledCompactionStrategy','sstable_size_in_mb':'95'} - AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} - AND crc_check_chance = 1.0 - AND dclocal_read_repair_chance = 0.1 - AND default_time_to_live = 0 - AND extensions = {} - AND gc_grace_seconds = 864000 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair_chance = 0.0 - AND speculative_retry = '99PERCENTILE'; - -CREATE MATERIALIZED VIEW ks_0.cyclist_by_a_age AS -SELECT * FROM ks_0.cyclist_mv -WHERE age IS NOT NULL AND cid IS NOT NULL -PRIMARY KEY (age, cid) WITH bloom_filter_fp_chance = 0.01 - AND caching = {'keys':'ALL','rows_per_partition':'NONE'} - AND comment = '' - AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} - AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} - AND crc_check_chance = 1.0 - AND dclocal_read_repair_chance = 0.1 - AND default_time_to_live = 0 - AND extensions = {} - AND gc_grace_seconds = 864000 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair_chance = 0.0 - AND speculative_retry = '99PERCENTILE'; - -CREATE MATERIALIZED VIEW ks_0.cyclist_by_age AS -SELECT - age, - cid, - birthday, - country, - name -FROM ks_0.cyclist_mv -WHERE age IS NOT NULL AND cid IS NOT NULL -PRIMARY KEY (age, cid) WITH bloom_filter_fp_chance = 0.01 - AND caching = {'keys':'ALL','rows_per_partition':'NONE'} - AND comment = 'simple view' - AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} - AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} - AND crc_check_chance = 1.0 - AND dclocal_read_repair_chance = 0.1 - AND default_time_to_live = 0 - AND extensions = {} - AND gc_grace_seconds = 864000 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair_chance = 0.0 - AND speculative_retry = '99PERCENTILE'; - -CREATE MATERIALIZED VIEW ks_0.cyclist_by_r_age AS -SELECT - age, - cid, - birthday, - country, - name -FROM ks_0.cyclist_mv -WHERE age IS NOT NULL AND cid IS NOT NULL -PRIMARY KEY (age, cid) WITH bloom_filter_fp_chance = 0.01 - AND caching = {'keys':'ALL','rows_per_partition':'NONE'} - AND comment = '' - AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} - AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} - AND crc_check_chance = 1.0 - AND dclocal_read_repair_chance = 0.1 - AND default_time_to_live = 0 - AND extensions = {} - AND gc_grace_seconds = 864000 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair_chance = 0.0 - AND speculative_retry = '99PERCENTILE'; - -CREATE FUNCTION ks_0.avgfinal(state tuple) - CALLED ON NULL INPUT - RETURNS double - LANGUAGE java - AS 'double r = 0; if (state.getInt(0) == 0) return null; r = state.getLong(1); r /= state.getInt(0); return Double.valueOf(r);'; - -CREATE FUNCTION ks_0.avgstate(state tuple,val int) - CALLED ON NULL INPUT - RETURNS tuple - LANGUAGE java - AS 'if (val !=null) { state.setInt(0, state.getInt(0)+1); state.setLong(1, state.getLong(1)+val.intValue()); } return state;'; - -CREATE AGGREGATE ks_0.average(int) - SFUNC avgstate - STYPE tuple - FINALFUNC avgfinal - INITCOND (0,0); - -CREATE AGGREGATE ks_0.mean(int) - SFUNC avgstate - STYPE tuple - FINALFUNC avgfinal - INITCOND (0,0); diff --git a/integration-tests/src/test/resources/DescribeIT/dse/6.8.cql b/integration-tests/src/test/resources/DescribeIT/dse/6.8.cql deleted file mode 100644 index bdeb4737748..00000000000 --- a/integration-tests/src/test/resources/DescribeIT/dse/6.8.cql +++ /dev/null @@ -1,201 +0,0 @@ - -CREATE KEYSPACE ks_0 WITH replication = { 'class' : 'org.apache.cassandra.locator.SimpleStrategy', 'replication_factor': '1' } AND durable_writes = true; - -CREATE TYPE ks_0.btype ( - a text -); - -CREATE TYPE ks_0.xtype ( - d text -); - -CREATE TYPE ks_0.ztype ( - c text, - a int -); - -CREATE TYPE ks_0.ctype ( - z frozen, - x frozen -); - -CREATE TYPE ks_0.atype ( - c frozen -); - -CREATE TABLE ks_0.cyclist_mv ( - cid uuid, - age int, - birthday date, - country text, - name text, - PRIMARY KEY (cid) -) WITH additional_write_policy = '99PERCENTILE' - AND bloom_filter_fp_chance = 0.01 - AND caching = {'keys':'ALL','rows_per_partition':'NONE'} - AND comment = '' - AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} - AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} - AND crc_check_chance = 1.0 - AND dclocal_read_repair_chance = 0.0 - AND default_time_to_live = 0 - AND extensions = {} - AND gc_grace_seconds = 864000 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair = 'BLOCKING' - AND read_repair_chance = 0.0 - AND speculative_retry = '99PERCENTILE'; - -CREATE INDEX cyclist_by_country ON ks_0.cyclist_mv (country); - -CREATE TABLE ks_0.rank_by_year_and_name ( - race_year int, - race_name text, - rank int, - cyclist_name text, - PRIMARY KEY ((race_year, race_name), rank) -) WITH CLUSTERING ORDER BY (rank DESC) - AND additional_write_policy = '99PERCENTILE' - AND bloom_filter_fp_chance = 0.01 - AND caching = {'keys':'ALL','rows_per_partition':'NONE'} - AND comment = '' - AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} - AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} - AND crc_check_chance = 1.0 - AND dclocal_read_repair_chance = 0.0 - AND default_time_to_live = 0 - AND extensions = {} - AND gc_grace_seconds = 864000 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair = 'BLOCKING' - AND read_repair_chance = 0.0 - AND speculative_retry = '99PERCENTILE'; - -CREATE INDEX rrank ON ks_0.rank_by_year_and_name (rank); - -CREATE INDEX ryear ON ks_0.rank_by_year_and_name (race_year); - -CREATE TABLE ks_0.ztable ( - zkey text, - a frozen, - PRIMARY KEY (zkey) -) WITH additional_write_policy = '99PERCENTILE' - AND bloom_filter_fp_chance = 0.1 - AND caching = {'keys':'ALL','rows_per_partition':'NONE'} - AND comment = '' - AND compaction = {'class':'org.apache.cassandra.db.compaction.LeveledCompactionStrategy','max_threshold':'32','min_threshold':'4','sstable_size_in_mb':'95'} - AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} - AND crc_check_chance = 1.0 - AND dclocal_read_repair_chance = 0.0 - AND default_time_to_live = 0 - AND extensions = {} - AND gc_grace_seconds = 864000 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair = 'BLOCKING' - AND read_repair_chance = 0.0 - AND speculative_retry = '99PERCENTILE'; - -CREATE MATERIALIZED VIEW ks_0.cyclist_by_a_age AS -SELECT * FROM ks_0.cyclist_mv -WHERE age IS NOT NULL AND cid IS NOT NULL -PRIMARY KEY (age, cid) WITH additional_write_policy = '99PERCENTILE' - AND bloom_filter_fp_chance = 0.01 - AND caching = {'keys':'ALL','rows_per_partition':'NONE'} - AND comment = '' - AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} - AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} - AND crc_check_chance = 1.0 - AND dclocal_read_repair_chance = 0.0 - AND default_time_to_live = 0 - AND extensions = {} - AND gc_grace_seconds = 864000 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair = 'BLOCKING' - AND read_repair_chance = 0.0 - AND speculative_retry = '99PERCENTILE'; - -CREATE MATERIALIZED VIEW ks_0.cyclist_by_age AS -SELECT - age, - cid, - birthday, - country, - name -FROM ks_0.cyclist_mv -WHERE age IS NOT NULL AND cid IS NOT NULL -PRIMARY KEY (age, cid) WITH additional_write_policy = '99PERCENTILE' - AND bloom_filter_fp_chance = 0.01 - AND caching = {'keys':'ALL','rows_per_partition':'NONE'} - AND comment = 'simple view' - AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} - AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} - AND crc_check_chance = 1.0 - AND dclocal_read_repair_chance = 0.0 - AND default_time_to_live = 0 - AND extensions = {} - AND gc_grace_seconds = 864000 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair = 'BLOCKING' - AND read_repair_chance = 0.0 - AND speculative_retry = '99PERCENTILE'; - -CREATE MATERIALIZED VIEW ks_0.cyclist_by_r_age AS -SELECT - age, - cid, - birthday, - country, - name -FROM ks_0.cyclist_mv -WHERE age IS NOT NULL AND cid IS NOT NULL -PRIMARY KEY (age, cid) WITH additional_write_policy = '99PERCENTILE' - AND bloom_filter_fp_chance = 0.01 - AND caching = {'keys':'ALL','rows_per_partition':'NONE'} - AND comment = '' - AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} - AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} - AND crc_check_chance = 1.0 - AND dclocal_read_repair_chance = 0.0 - AND default_time_to_live = 0 - AND extensions = {} - AND gc_grace_seconds = 864000 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair = 'BLOCKING' - AND read_repair_chance = 0.0 - AND speculative_retry = '99PERCENTILE'; - -CREATE FUNCTION ks_0.avgfinal(state tuple) - CALLED ON NULL INPUT - RETURNS double - LANGUAGE java - AS 'double r = 0; if (state.getInt(0) == 0) return null; r = state.getLong(1); r /= state.getInt(0); return Double.valueOf(r);'; - -CREATE FUNCTION ks_0.avgstate(state tuple,val int) - CALLED ON NULL INPUT - RETURNS tuple - LANGUAGE java - AS 'if (val !=null) { state.setInt(0, state.getInt(0)+1); state.setLong(1, state.getLong(1)+val.intValue()); } return state;'; - -CREATE AGGREGATE ks_0.average(int) - SFUNC avgstate - STYPE tuple - FINALFUNC avgfinal - INITCOND (0,0); - -CREATE AGGREGATE ks_0.mean(int) - SFUNC avgstate - STYPE tuple - FINALFUNC avgfinal - INITCOND (0,0); diff --git a/integration-tests/src/test/resources/DescribeIT/hcd/1.0.cql b/integration-tests/src/test/resources/DescribeIT/hcd/1.0.cql deleted file mode 100644 index abc70728206..00000000000 --- a/integration-tests/src/test/resources/DescribeIT/hcd/1.0.cql +++ /dev/null @@ -1,186 +0,0 @@ - -CREATE KEYSPACE ks_0 WITH replication = { 'class' : 'org.apache.cassandra.locator.SimpleStrategy', 'replication_factor': '1' } AND durable_writes = true; - -CREATE TYPE ks_0.btype ( - a text -); - -CREATE TYPE ks_0.xtype ( - d text -); - -CREATE TYPE ks_0.ztype ( - c text, - a int -); - -CREATE TYPE ks_0.ctype ( - z frozen, - x frozen -); - -CREATE TYPE ks_0.atype ( - c frozen -); - -CREATE TABLE ks_0.cyclist_mv ( - cid uuid, - age int, - birthday date, - country text, - name text, - PRIMARY KEY (cid) -) WITH additional_write_policy = '99p' - AND bloom_filter_fp_chance = 0.01 - AND caching = {'keys':'ALL','rows_per_partition':'NONE'} - AND comment = '' - AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} - AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} - AND crc_check_chance = 1.0 - AND default_time_to_live = 0 - AND extensions = {} - AND gc_grace_seconds = 864000 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair = 'BLOCKING' - AND speculative_retry = '99p'; - -CREATE INDEX cyclist_by_country ON ks_0.cyclist_mv (country); - -CREATE TABLE ks_0.rank_by_year_and_name ( - race_year int, - race_name text, - rank int, - cyclist_name text, - PRIMARY KEY ((race_year, race_name), rank) -) WITH CLUSTERING ORDER BY (rank DESC) - AND additional_write_policy = '99p' - AND bloom_filter_fp_chance = 0.01 - AND caching = {'keys':'ALL','rows_per_partition':'NONE'} - AND comment = '' - AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} - AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} - AND crc_check_chance = 1.0 - AND default_time_to_live = 0 - AND extensions = {} - AND gc_grace_seconds = 864000 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair = 'BLOCKING' - AND speculative_retry = '99p'; - -CREATE INDEX rrank ON ks_0.rank_by_year_and_name (rank); - -CREATE INDEX ryear ON ks_0.rank_by_year_and_name (race_year); - -CREATE TABLE ks_0.ztable ( - zkey text, - a frozen, - PRIMARY KEY (zkey) -) WITH additional_write_policy = '99p' - AND bloom_filter_fp_chance = 0.1 - AND caching = {'keys':'ALL','rows_per_partition':'NONE'} - AND comment = '' - AND compaction = {'class':'org.apache.cassandra.db.compaction.LeveledCompactionStrategy','max_threshold':'32','min_threshold':'4','sstable_size_in_mb':'95'} - AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} - AND crc_check_chance = 1.0 - AND default_time_to_live = 0 - AND extensions = {} - AND gc_grace_seconds = 864000 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair = 'BLOCKING' - AND speculative_retry = '99p'; - -CREATE MATERIALIZED VIEW ks_0.cyclist_by_a_age AS -SELECT * FROM ks_0.cyclist_mv -WHERE age IS NOT NULL AND cid IS NOT NULL -PRIMARY KEY (age, cid) WITH additional_write_policy = '99p' - AND bloom_filter_fp_chance = 0.01 - AND caching = {'keys':'ALL','rows_per_partition':'NONE'} - AND comment = '' - AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} - AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} - AND crc_check_chance = 1.0 - AND extensions = {} - AND gc_grace_seconds = 864000 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair = 'BLOCKING' - AND speculative_retry = '99p'; - -CREATE MATERIALIZED VIEW ks_0.cyclist_by_age AS -SELECT - age, - cid, - birthday, - country, - name -FROM ks_0.cyclist_mv -WHERE age IS NOT NULL AND cid IS NOT NULL -PRIMARY KEY (age, cid) WITH additional_write_policy = '99p' - AND bloom_filter_fp_chance = 0.01 - AND caching = {'keys':'ALL','rows_per_partition':'NONE'} - AND comment = 'simple view' - AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} - AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} - AND crc_check_chance = 1.0 - AND extensions = {} - AND gc_grace_seconds = 864000 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair = 'BLOCKING' - AND speculative_retry = '99p'; - -CREATE MATERIALIZED VIEW ks_0.cyclist_by_r_age AS -SELECT - age, - cid, - birthday, - country, - name -FROM ks_0.cyclist_mv -WHERE age IS NOT NULL AND cid IS NOT NULL -PRIMARY KEY (age, cid) WITH additional_write_policy = '99p' - AND bloom_filter_fp_chance = 0.01 - AND caching = {'keys':'ALL','rows_per_partition':'NONE'} - AND comment = '' - AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} - AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} - AND crc_check_chance = 1.0 - AND extensions = {} - AND gc_grace_seconds = 864000 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair = 'BLOCKING' - AND speculative_retry = '99p'; - -CREATE FUNCTION ks_0.avgfinal(state tuple) - CALLED ON NULL INPUT - RETURNS double - LANGUAGE java - AS 'double r = 0; if (state.getInt(0) == 0) return null; r = state.getLong(1); r /= state.getInt(0); return Double.valueOf(r);'; - -CREATE FUNCTION ks_0.avgstate(state tuple,val int) - CALLED ON NULL INPUT - RETURNS tuple - LANGUAGE java - AS 'if (val !=null) { state.setInt(0, state.getInt(0)+1); state.setLong(1, state.getLong(1)+val.intValue()); } return state;'; - -CREATE AGGREGATE ks_0.average(int) - SFUNC avgstate - STYPE tuple - FINALFUNC avgfinal - INITCOND (0,0); - -CREATE AGGREGATE ks_0.mean(int) - SFUNC avgstate - STYPE tuple - FINALFUNC avgfinal - INITCOND (0,0); diff --git a/integration-tests/src/test/resources/DescribeIT/oss/2.1.cql b/integration-tests/src/test/resources/DescribeIT/oss/2.1.cql deleted file mode 100644 index ea6ca93bcbf..00000000000 --- a/integration-tests/src/test/resources/DescribeIT/oss/2.1.cql +++ /dev/null @@ -1,67 +0,0 @@ - -CREATE KEYSPACE ks_0 WITH replication = { 'class' : 'org.apache.cassandra.locator.SimpleStrategy', 'replication_factor': '1' } AND durable_writes = true; - -CREATE TYPE ks_0.btype ( - a text -); - -CREATE TYPE ks_0.xtype ( - d text -); - -CREATE TYPE ks_0.ztype ( - c text, - a int -); - -CREATE TYPE ks_0.ctype ( - z frozen, - x frozen -); - -CREATE TYPE ks_0.atype ( - c frozen -); - -CREATE TABLE ks_0.rank_by_year_and_name ( - race_year int, - race_name text, - rank int, - cyclist_name text, - PRIMARY KEY ((race_year, race_name), rank) -) WITH CLUSTERING ORDER BY (rank DESC) - AND bloom_filter_fp_chance = 0.01 - AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}' - AND comment = '' - AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy'} - AND compression = {'sstable_compression':'org.apache.cassandra.io.compress.LZ4Compressor'} - AND default_time_to_live = 0 - AND gc_grace_seconds = 864000 - AND dclocal_read_repair_chance = 0.1 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair_chance = 0.0 - AND speculative_retry = '99.0PERCENTILE'; - -CREATE INDEX ryear ON ks_0.rank_by_year_and_name (race_year); - -CREATE INDEX rrank ON ks_0.rank_by_year_and_name (rank); - -CREATE TABLE ks_0.ztable ( - zkey text, - a frozen, - PRIMARY KEY (zkey) -) WITH bloom_filter_fp_chance = 0.1 - AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}' - AND comment = '' - AND compaction = {'class':'org.apache.cassandra.db.compaction.LeveledCompactionStrategy','sstable_size_in_mb':'95'} - AND compression = {'sstable_compression':'org.apache.cassandra.io.compress.LZ4Compressor'} - AND default_time_to_live = 0 - AND gc_grace_seconds = 864000 - AND dclocal_read_repair_chance = 0.1 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair_chance = 0.0 - AND speculative_retry = '99.0PERCENTILE'; diff --git a/integration-tests/src/test/resources/DescribeIT/oss/2.2.cql b/integration-tests/src/test/resources/DescribeIT/oss/2.2.cql deleted file mode 100644 index a4035ffa90e..00000000000 --- a/integration-tests/src/test/resources/DescribeIT/oss/2.2.cql +++ /dev/null @@ -1,114 +0,0 @@ - -CREATE KEYSPACE ks_0 WITH replication = { 'class' : 'org.apache.cassandra.locator.SimpleStrategy', 'replication_factor': '1' } AND durable_writes = true; - -CREATE TYPE ks_0.btype ( - a text -); - -CREATE TYPE ks_0.xtype ( - d text -); - -CREATE TYPE ks_0.ztype ( - c text, - a int -); - -CREATE TYPE ks_0.ctype ( - z frozen, - x frozen -); - -CREATE TYPE ks_0.atype ( - c frozen -); - -CREATE TABLE ks_0.cyclist_mv ( - cid uuid, - age int, - birthday date, - country text, - name text, - PRIMARY KEY (cid) -) WITH bloom_filter_fp_chance = 0.01 - AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}' - AND comment = '' - AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy'} - AND compression = {'sstable_compression':'org.apache.cassandra.io.compress.LZ4Compressor'} - AND default_time_to_live = 0 - AND gc_grace_seconds = 864000 - AND dclocal_read_repair_chance = 0.1 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair_chance = 0.0 - AND speculative_retry = '99.0PERCENTILE'; - -CREATE INDEX cyclist_by_country ON ks_0.cyclist_mv (country); - -CREATE TABLE ks_0.rank_by_year_and_name ( - race_year int, - race_name text, - rank int, - cyclist_name text, - PRIMARY KEY ((race_year, race_name), rank) -) WITH CLUSTERING ORDER BY (rank DESC) - AND bloom_filter_fp_chance = 0.01 - AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}' - AND comment = '' - AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy'} - AND compression = {'sstable_compression':'org.apache.cassandra.io.compress.LZ4Compressor'} - AND default_time_to_live = 0 - AND gc_grace_seconds = 864000 - AND dclocal_read_repair_chance = 0.1 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair_chance = 0.0 - AND speculative_retry = '99.0PERCENTILE'; - -CREATE INDEX ryear ON ks_0.rank_by_year_and_name (race_year); - -CREATE INDEX rrank ON ks_0.rank_by_year_and_name (rank); - -CREATE TABLE ks_0.ztable ( - zkey text, - a frozen, - PRIMARY KEY (zkey) -) WITH bloom_filter_fp_chance = 0.1 - AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}' - AND comment = '' - AND compaction = {'class':'org.apache.cassandra.db.compaction.LeveledCompactionStrategy','sstable_size_in_mb':'95'} - AND compression = {'sstable_compression':'org.apache.cassandra.io.compress.LZ4Compressor'} - AND default_time_to_live = 0 - AND gc_grace_seconds = 864000 - AND dclocal_read_repair_chance = 0.1 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair_chance = 0.0 - AND speculative_retry = '99.0PERCENTILE'; - -CREATE FUNCTION ks_0.avgfinal(state tuple) - CALLED ON NULL INPUT - RETURNS double - LANGUAGE java - AS 'double r = 0; if (state.getInt(0) == 0) return null; r = state.getLong(1); r /= state.getInt(0); return Double.valueOf(r);'; - -CREATE FUNCTION ks_0.avgstate(state tuple,val int) - CALLED ON NULL INPUT - RETURNS tuple - LANGUAGE java - AS 'if (val !=null) { state.setInt(0, state.getInt(0)+1); state.setLong(1, state.getLong(1)+val.intValue()); } return state;'; - -CREATE AGGREGATE ks_0.average(int) - SFUNC avgstate - STYPE tuple - FINALFUNC avgfinal - INITCOND (0,0); - -CREATE AGGREGATE ks_0.mean(int) - SFUNC avgstate - STYPE tuple - FINALFUNC avgfinal - INITCOND (0,0); diff --git a/integration-tests/src/test/resources/DescribeIT/oss/3.0.cql b/integration-tests/src/test/resources/DescribeIT/oss/3.0.cql deleted file mode 100644 index 2572df52e24..00000000000 --- a/integration-tests/src/test/resources/DescribeIT/oss/3.0.cql +++ /dev/null @@ -1,189 +0,0 @@ - -CREATE KEYSPACE ks_0 WITH replication = { 'class' : 'org.apache.cassandra.locator.SimpleStrategy', 'replication_factor': '1' } AND durable_writes = true; - -CREATE TYPE ks_0.btype ( - a text -); - -CREATE TYPE ks_0.xtype ( - d text -); - -CREATE TYPE ks_0.ztype ( - c text, - a int -); - -CREATE TYPE ks_0.ctype ( - z frozen, - x frozen -); - -CREATE TYPE ks_0.atype ( - c frozen -); - -CREATE TABLE ks_0.cyclist_mv ( - cid uuid, - age int, - birthday date, - country text, - name text, - PRIMARY KEY (cid) -) WITH bloom_filter_fp_chance = 0.01 - AND caching = {'keys':'ALL','rows_per_partition':'NONE'} - AND comment = '' - AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} - AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} - AND crc_check_chance = 1.0 - AND dclocal_read_repair_chance = 0.1 - AND default_time_to_live = 0 - AND extensions = {} - AND gc_grace_seconds = 864000 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair_chance = 0.0 - AND speculative_retry = '99PERCENTILE'; - -CREATE INDEX cyclist_by_country ON ks_0.cyclist_mv (country); - -CREATE TABLE ks_0.rank_by_year_and_name ( - race_year int, - race_name text, - rank int, - cyclist_name text, - PRIMARY KEY ((race_year, race_name), rank) -) WITH CLUSTERING ORDER BY (rank DESC) - AND bloom_filter_fp_chance = 0.01 - AND caching = {'keys':'ALL','rows_per_partition':'NONE'} - AND comment = '' - AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} - AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} - AND crc_check_chance = 1.0 - AND dclocal_read_repair_chance = 0.1 - AND default_time_to_live = 0 - AND extensions = {} - AND gc_grace_seconds = 864000 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair_chance = 0.0 - AND speculative_retry = '99PERCENTILE'; - -CREATE INDEX rrank ON ks_0.rank_by_year_and_name (rank); - -CREATE INDEX ryear ON ks_0.rank_by_year_and_name (race_year); - -CREATE TABLE ks_0.ztable ( - zkey text, - a frozen, - PRIMARY KEY (zkey) -) WITH bloom_filter_fp_chance = 0.1 - AND caching = {'keys':'ALL','rows_per_partition':'NONE'} - AND comment = '' - AND compaction = {'class':'org.apache.cassandra.db.compaction.LeveledCompactionStrategy','sstable_size_in_mb':'95'} - AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} - AND crc_check_chance = 1.0 - AND dclocal_read_repair_chance = 0.1 - AND default_time_to_live = 0 - AND extensions = {} - AND gc_grace_seconds = 864000 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair_chance = 0.0 - AND speculative_retry = '99PERCENTILE'; - -CREATE MATERIALIZED VIEW ks_0.cyclist_by_a_age AS -SELECT * FROM ks_0.cyclist_mv -WHERE age IS NOT NULL AND cid IS NOT NULL -PRIMARY KEY (age, cid) WITH bloom_filter_fp_chance = 0.01 - AND caching = {'keys':'ALL','rows_per_partition':'NONE'} - AND comment = '' - AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} - AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} - AND crc_check_chance = 1.0 - AND dclocal_read_repair_chance = 0.1 - AND default_time_to_live = 0 - AND extensions = {} - AND gc_grace_seconds = 864000 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair_chance = 0.0 - AND speculative_retry = '99PERCENTILE'; - -CREATE MATERIALIZED VIEW ks_0.cyclist_by_age AS -SELECT - age, - cid, - birthday, - country, - name -FROM ks_0.cyclist_mv -WHERE age IS NOT NULL AND cid IS NOT NULL -PRIMARY KEY (age, cid) WITH bloom_filter_fp_chance = 0.01 - AND caching = {'keys':'ALL','rows_per_partition':'NONE'} - AND comment = 'simple view' - AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} - AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} - AND crc_check_chance = 1.0 - AND dclocal_read_repair_chance = 0.1 - AND default_time_to_live = 0 - AND extensions = {} - AND gc_grace_seconds = 864000 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair_chance = 0.0 - AND speculative_retry = '99PERCENTILE'; - -CREATE MATERIALIZED VIEW ks_0.cyclist_by_r_age AS -SELECT - age, - cid, - birthday, - country, - name -FROM ks_0.cyclist_mv -WHERE age IS NOT NULL AND cid IS NOT NULL -PRIMARY KEY (age, cid) WITH bloom_filter_fp_chance = 0.01 - AND caching = {'keys':'ALL','rows_per_partition':'NONE'} - AND comment = '' - AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} - AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} - AND crc_check_chance = 1.0 - AND dclocal_read_repair_chance = 0.1 - AND default_time_to_live = 0 - AND extensions = {} - AND gc_grace_seconds = 864000 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair_chance = 0.0 - AND speculative_retry = '99PERCENTILE'; - -CREATE FUNCTION ks_0.avgfinal(state tuple) - CALLED ON NULL INPUT - RETURNS double - LANGUAGE java - AS 'double r = 0; if (state.getInt(0) == 0) return null; r = state.getLong(1); r /= state.getInt(0); return Double.valueOf(r);'; - -CREATE FUNCTION ks_0.avgstate(state tuple,val int) - CALLED ON NULL INPUT - RETURNS tuple - LANGUAGE java - AS 'if (val !=null) { state.setInt(0, state.getInt(0)+1); state.setLong(1, state.getLong(1)+val.intValue()); } return state;'; - -CREATE AGGREGATE ks_0.average(int) - SFUNC avgstate - STYPE tuple - FINALFUNC avgfinal - INITCOND (0,0); - -CREATE AGGREGATE ks_0.mean(int) - SFUNC avgstate - STYPE tuple - FINALFUNC avgfinal - INITCOND (0,0); diff --git a/integration-tests/src/test/resources/DescribeIT/oss/3.11.cql b/integration-tests/src/test/resources/DescribeIT/oss/3.11.cql deleted file mode 100644 index 2572df52e24..00000000000 --- a/integration-tests/src/test/resources/DescribeIT/oss/3.11.cql +++ /dev/null @@ -1,189 +0,0 @@ - -CREATE KEYSPACE ks_0 WITH replication = { 'class' : 'org.apache.cassandra.locator.SimpleStrategy', 'replication_factor': '1' } AND durable_writes = true; - -CREATE TYPE ks_0.btype ( - a text -); - -CREATE TYPE ks_0.xtype ( - d text -); - -CREATE TYPE ks_0.ztype ( - c text, - a int -); - -CREATE TYPE ks_0.ctype ( - z frozen, - x frozen -); - -CREATE TYPE ks_0.atype ( - c frozen -); - -CREATE TABLE ks_0.cyclist_mv ( - cid uuid, - age int, - birthday date, - country text, - name text, - PRIMARY KEY (cid) -) WITH bloom_filter_fp_chance = 0.01 - AND caching = {'keys':'ALL','rows_per_partition':'NONE'} - AND comment = '' - AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} - AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} - AND crc_check_chance = 1.0 - AND dclocal_read_repair_chance = 0.1 - AND default_time_to_live = 0 - AND extensions = {} - AND gc_grace_seconds = 864000 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair_chance = 0.0 - AND speculative_retry = '99PERCENTILE'; - -CREATE INDEX cyclist_by_country ON ks_0.cyclist_mv (country); - -CREATE TABLE ks_0.rank_by_year_and_name ( - race_year int, - race_name text, - rank int, - cyclist_name text, - PRIMARY KEY ((race_year, race_name), rank) -) WITH CLUSTERING ORDER BY (rank DESC) - AND bloom_filter_fp_chance = 0.01 - AND caching = {'keys':'ALL','rows_per_partition':'NONE'} - AND comment = '' - AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} - AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} - AND crc_check_chance = 1.0 - AND dclocal_read_repair_chance = 0.1 - AND default_time_to_live = 0 - AND extensions = {} - AND gc_grace_seconds = 864000 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair_chance = 0.0 - AND speculative_retry = '99PERCENTILE'; - -CREATE INDEX rrank ON ks_0.rank_by_year_and_name (rank); - -CREATE INDEX ryear ON ks_0.rank_by_year_and_name (race_year); - -CREATE TABLE ks_0.ztable ( - zkey text, - a frozen, - PRIMARY KEY (zkey) -) WITH bloom_filter_fp_chance = 0.1 - AND caching = {'keys':'ALL','rows_per_partition':'NONE'} - AND comment = '' - AND compaction = {'class':'org.apache.cassandra.db.compaction.LeveledCompactionStrategy','sstable_size_in_mb':'95'} - AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} - AND crc_check_chance = 1.0 - AND dclocal_read_repair_chance = 0.1 - AND default_time_to_live = 0 - AND extensions = {} - AND gc_grace_seconds = 864000 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair_chance = 0.0 - AND speculative_retry = '99PERCENTILE'; - -CREATE MATERIALIZED VIEW ks_0.cyclist_by_a_age AS -SELECT * FROM ks_0.cyclist_mv -WHERE age IS NOT NULL AND cid IS NOT NULL -PRIMARY KEY (age, cid) WITH bloom_filter_fp_chance = 0.01 - AND caching = {'keys':'ALL','rows_per_partition':'NONE'} - AND comment = '' - AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} - AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} - AND crc_check_chance = 1.0 - AND dclocal_read_repair_chance = 0.1 - AND default_time_to_live = 0 - AND extensions = {} - AND gc_grace_seconds = 864000 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair_chance = 0.0 - AND speculative_retry = '99PERCENTILE'; - -CREATE MATERIALIZED VIEW ks_0.cyclist_by_age AS -SELECT - age, - cid, - birthday, - country, - name -FROM ks_0.cyclist_mv -WHERE age IS NOT NULL AND cid IS NOT NULL -PRIMARY KEY (age, cid) WITH bloom_filter_fp_chance = 0.01 - AND caching = {'keys':'ALL','rows_per_partition':'NONE'} - AND comment = 'simple view' - AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} - AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} - AND crc_check_chance = 1.0 - AND dclocal_read_repair_chance = 0.1 - AND default_time_to_live = 0 - AND extensions = {} - AND gc_grace_seconds = 864000 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair_chance = 0.0 - AND speculative_retry = '99PERCENTILE'; - -CREATE MATERIALIZED VIEW ks_0.cyclist_by_r_age AS -SELECT - age, - cid, - birthday, - country, - name -FROM ks_0.cyclist_mv -WHERE age IS NOT NULL AND cid IS NOT NULL -PRIMARY KEY (age, cid) WITH bloom_filter_fp_chance = 0.01 - AND caching = {'keys':'ALL','rows_per_partition':'NONE'} - AND comment = '' - AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} - AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} - AND crc_check_chance = 1.0 - AND dclocal_read_repair_chance = 0.1 - AND default_time_to_live = 0 - AND extensions = {} - AND gc_grace_seconds = 864000 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair_chance = 0.0 - AND speculative_retry = '99PERCENTILE'; - -CREATE FUNCTION ks_0.avgfinal(state tuple) - CALLED ON NULL INPUT - RETURNS double - LANGUAGE java - AS 'double r = 0; if (state.getInt(0) == 0) return null; r = state.getLong(1); r /= state.getInt(0); return Double.valueOf(r);'; - -CREATE FUNCTION ks_0.avgstate(state tuple,val int) - CALLED ON NULL INPUT - RETURNS tuple - LANGUAGE java - AS 'if (val !=null) { state.setInt(0, state.getInt(0)+1); state.setLong(1, state.getLong(1)+val.intValue()); } return state;'; - -CREATE AGGREGATE ks_0.average(int) - SFUNC avgstate - STYPE tuple - FINALFUNC avgfinal - INITCOND (0,0); - -CREATE AGGREGATE ks_0.mean(int) - SFUNC avgstate - STYPE tuple - FINALFUNC avgfinal - INITCOND (0,0); diff --git a/integration-tests/src/test/resources/DescribeIT/oss/4.0.cql b/integration-tests/src/test/resources/DescribeIT/oss/4.0.cql deleted file mode 100644 index abc70728206..00000000000 --- a/integration-tests/src/test/resources/DescribeIT/oss/4.0.cql +++ /dev/null @@ -1,186 +0,0 @@ - -CREATE KEYSPACE ks_0 WITH replication = { 'class' : 'org.apache.cassandra.locator.SimpleStrategy', 'replication_factor': '1' } AND durable_writes = true; - -CREATE TYPE ks_0.btype ( - a text -); - -CREATE TYPE ks_0.xtype ( - d text -); - -CREATE TYPE ks_0.ztype ( - c text, - a int -); - -CREATE TYPE ks_0.ctype ( - z frozen, - x frozen -); - -CREATE TYPE ks_0.atype ( - c frozen -); - -CREATE TABLE ks_0.cyclist_mv ( - cid uuid, - age int, - birthday date, - country text, - name text, - PRIMARY KEY (cid) -) WITH additional_write_policy = '99p' - AND bloom_filter_fp_chance = 0.01 - AND caching = {'keys':'ALL','rows_per_partition':'NONE'} - AND comment = '' - AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} - AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} - AND crc_check_chance = 1.0 - AND default_time_to_live = 0 - AND extensions = {} - AND gc_grace_seconds = 864000 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair = 'BLOCKING' - AND speculative_retry = '99p'; - -CREATE INDEX cyclist_by_country ON ks_0.cyclist_mv (country); - -CREATE TABLE ks_0.rank_by_year_and_name ( - race_year int, - race_name text, - rank int, - cyclist_name text, - PRIMARY KEY ((race_year, race_name), rank) -) WITH CLUSTERING ORDER BY (rank DESC) - AND additional_write_policy = '99p' - AND bloom_filter_fp_chance = 0.01 - AND caching = {'keys':'ALL','rows_per_partition':'NONE'} - AND comment = '' - AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} - AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} - AND crc_check_chance = 1.0 - AND default_time_to_live = 0 - AND extensions = {} - AND gc_grace_seconds = 864000 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair = 'BLOCKING' - AND speculative_retry = '99p'; - -CREATE INDEX rrank ON ks_0.rank_by_year_and_name (rank); - -CREATE INDEX ryear ON ks_0.rank_by_year_and_name (race_year); - -CREATE TABLE ks_0.ztable ( - zkey text, - a frozen, - PRIMARY KEY (zkey) -) WITH additional_write_policy = '99p' - AND bloom_filter_fp_chance = 0.1 - AND caching = {'keys':'ALL','rows_per_partition':'NONE'} - AND comment = '' - AND compaction = {'class':'org.apache.cassandra.db.compaction.LeveledCompactionStrategy','max_threshold':'32','min_threshold':'4','sstable_size_in_mb':'95'} - AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} - AND crc_check_chance = 1.0 - AND default_time_to_live = 0 - AND extensions = {} - AND gc_grace_seconds = 864000 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair = 'BLOCKING' - AND speculative_retry = '99p'; - -CREATE MATERIALIZED VIEW ks_0.cyclist_by_a_age AS -SELECT * FROM ks_0.cyclist_mv -WHERE age IS NOT NULL AND cid IS NOT NULL -PRIMARY KEY (age, cid) WITH additional_write_policy = '99p' - AND bloom_filter_fp_chance = 0.01 - AND caching = {'keys':'ALL','rows_per_partition':'NONE'} - AND comment = '' - AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} - AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} - AND crc_check_chance = 1.0 - AND extensions = {} - AND gc_grace_seconds = 864000 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair = 'BLOCKING' - AND speculative_retry = '99p'; - -CREATE MATERIALIZED VIEW ks_0.cyclist_by_age AS -SELECT - age, - cid, - birthday, - country, - name -FROM ks_0.cyclist_mv -WHERE age IS NOT NULL AND cid IS NOT NULL -PRIMARY KEY (age, cid) WITH additional_write_policy = '99p' - AND bloom_filter_fp_chance = 0.01 - AND caching = {'keys':'ALL','rows_per_partition':'NONE'} - AND comment = 'simple view' - AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} - AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} - AND crc_check_chance = 1.0 - AND extensions = {} - AND gc_grace_seconds = 864000 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair = 'BLOCKING' - AND speculative_retry = '99p'; - -CREATE MATERIALIZED VIEW ks_0.cyclist_by_r_age AS -SELECT - age, - cid, - birthday, - country, - name -FROM ks_0.cyclist_mv -WHERE age IS NOT NULL AND cid IS NOT NULL -PRIMARY KEY (age, cid) WITH additional_write_policy = '99p' - AND bloom_filter_fp_chance = 0.01 - AND caching = {'keys':'ALL','rows_per_partition':'NONE'} - AND comment = '' - AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} - AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} - AND crc_check_chance = 1.0 - AND extensions = {} - AND gc_grace_seconds = 864000 - AND max_index_interval = 2048 - AND memtable_flush_period_in_ms = 0 - AND min_index_interval = 128 - AND read_repair = 'BLOCKING' - AND speculative_retry = '99p'; - -CREATE FUNCTION ks_0.avgfinal(state tuple) - CALLED ON NULL INPUT - RETURNS double - LANGUAGE java - AS 'double r = 0; if (state.getInt(0) == 0) return null; r = state.getLong(1); r /= state.getInt(0); return Double.valueOf(r);'; - -CREATE FUNCTION ks_0.avgstate(state tuple,val int) - CALLED ON NULL INPUT - RETURNS tuple - LANGUAGE java - AS 'if (val !=null) { state.setInt(0, state.getInt(0)+1); state.setLong(1, state.getLong(1)+val.intValue()); } return state;'; - -CREATE AGGREGATE ks_0.average(int) - SFUNC avgstate - STYPE tuple - FINALFUNC avgfinal - INITCOND (0,0); - -CREATE AGGREGATE ks_0.mean(int) - SFUNC avgstate - STYPE tuple - FINALFUNC avgfinal - INITCOND (0,0); diff --git a/integration-tests/src/test/resources/META-INF/services/com.datastax.oss.driver.api.mapper.result.MapperResultProducerService b/integration-tests/src/test/resources/META-INF/services/com.datastax.oss.driver.api.mapper.result.MapperResultProducerService deleted file mode 100644 index 8ad40a9d327..00000000000 --- a/integration-tests/src/test/resources/META-INF/services/com.datastax.oss.driver.api.mapper.result.MapperResultProducerService +++ /dev/null @@ -1 +0,0 @@ -com.datastax.oss.driver.mapper.GuavaFutureProducerService \ No newline at end of file diff --git a/integration-tests/src/test/resources/application.conf b/integration-tests/src/test/resources/application.conf deleted file mode 100644 index f3ab31bcb76..00000000000 --- a/integration-tests/src/test/resources/application.conf +++ /dev/null @@ -1,65 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# Configuration overrides for integration tests -datastax-java-driver { - basic { - load-balancing-policy { - # Since our test infra always specifies explicit contact points, we need to set the local DC - # as well. - # Note that we rely on a hack to ensure that this name is always the same, even with one DC - # (see CcmBridge). - local-datacenter = dc1 - } - config-reload-interval = 0 - request.timeout = 10 seconds - graph.timeout = 10 seconds - } - advanced { - connection { - init-query-timeout = 5 seconds - set-keyspace-timeout = 5 seconds - } - heartbeat.timeout = 5 seconds - control-connection.timeout = 5 seconds - request { - trace.interval = 1 second - warn-if-set-keyspace = false - } - graph { - name = "demo" - } - continuous-paging.timeout { - first-page = 10 seconds - other-pages = 10 seconds - } - metrics { - // Raise histogram bounds because the tests execute DDL queries with a higher timeout - session.cql_requests.highest_latency = 30 seconds - node.cql_messages.highest_latency = 30 seconds - } - // adjust quiet period to 0 seconds to speed up tests - netty { - io-group { - shutdown {quiet-period = 0, timeout = 15, unit = SECONDS} - } - admin-group { - shutdown {quiet-period = 0, timeout = 15, unit = SECONDS} - } - } - } -} diff --git a/integration-tests/src/test/resources/logback-test.xml b/integration-tests/src/test/resources/logback-test.xml deleted file mode 100644 index a2179e4357b..00000000000 --- a/integration-tests/src/test/resources/logback-test.xml +++ /dev/null @@ -1,37 +0,0 @@ - - - - - - %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n - - - - - - - - - - - - - diff --git a/licenses/HdrHistogram.txt b/licenses/HdrHistogram.txt deleted file mode 100644 index 401ccfb0ec5..00000000000 --- a/licenses/HdrHistogram.txt +++ /dev/null @@ -1,41 +0,0 @@ -The code in this repository code was Written by Gil Tene, Michael Barker, -and Matt Warren, and released to the public domain, as explained at -http://creativecommons.org/publicdomain/zero/1.0/ - -For users of this code who wish to consume it under the "BSD" license -rather than under the public domain or CC0 contribution text mentioned -above, the code found under this directory is *also* provided under the -following license (commonly referred to as the BSD 2-Clause License). This -license does not detract from the above stated release of the code into -the public domain, and simply represents an additional license granted by -the Author. - ------------------------------------------------------------------------------ -** Beginning of "BSD 2-Clause License" text. ** - - Copyright (c) 2012, 2013, 2014, 2015, 2016 Gil Tene - Copyright (c) 2014 Michael Barker - Copyright (c) 2014 Matt Warren - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - - 1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - THE POSSIBILITY OF SUCH DAMAGE. diff --git a/licenses/asm.txt b/licenses/asm.txt deleted file mode 100644 index c71bb7bac5d..00000000000 --- a/licenses/asm.txt +++ /dev/null @@ -1,27 +0,0 @@ -ASM: a very small and fast Java bytecode manipulation framework -Copyright (c) 2000-2011 INRIA, France Telecom -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. -3. Neither the name of the copyright holders nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF -THE POSSIBILITY OF SUCH DAMAGE. diff --git a/licenses/jnr-posix.txt b/licenses/jnr-posix.txt deleted file mode 100644 index 4dc4217a306..00000000000 --- a/licenses/jnr-posix.txt +++ /dev/null @@ -1,1076 +0,0 @@ -jnr-posix is released under a tri EPL/GPL/LGPL license. You can use it, -redistribute it and/or modify it under the terms of the: - - Eclipse Public License version 2.0 - OR - GNU General Public License version 2 - OR - GNU Lesser General Public License version 2.1 - -The complete text of the Eclipse Public License is as follows: - - Eclipse Public License - v 2.0 - - THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE - PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION - OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. - - 1. DEFINITIONS - - "Contribution" means: - - a) in the case of the initial Contributor, the initial content - Distributed under this Agreement, and - - b) in the case of each subsequent Contributor: - i) changes to the Program, and - ii) additions to the Program; - where such changes and/or additions to the Program originate from - and are Distributed by that particular Contributor. A Contribution - "originates" from a Contributor if it was added to the Program by - such Contributor itself or anyone acting on such Contributor's behalf. - Contributions do not include changes or additions to the Program that - are not Modified Works. - - "Contributor" means any person or entity that Distributes the Program. - - "Licensed Patents" mean patent claims licensable by a Contributor which - are necessarily infringed by the use or sale of its Contribution alone - or when combined with the Program. - - "Program" means the Contributions Distributed in accordance with this - Agreement. - - "Recipient" means anyone who receives the Program under this Agreement - or any Secondary License (as applicable), including Contributors. - - "Derivative Works" shall mean any work, whether in Source Code or other - form, that is based on (or derived from) the Program and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. - - "Modified Works" shall mean any work in Source Code or other form that - results from an addition to, deletion from, or modification of the - contents of the Program, including, for purposes of clarity any new file - in Source Code form that contains any contents of the Program. Modified - Works shall not include works that contain only declarations, - interfaces, types, classes, structures, or files of the Program solely - in each case in order to link to, bind by name, or subclass the Program - or Modified Works thereof. - - "Distribute" means the acts of a) distributing or b) making available - in any manner that enables the transfer of a copy. - - "Source Code" means the form of a Program preferred for making - modifications, including but not limited to software source code, - documentation source, and configuration files. - - "Secondary License" means either the GNU General Public License, - Version 2.0, or any later versions of that license, including any - exceptions or additional permissions as identified by the initial - Contributor. - - 2. GRANT OF RIGHTS - - a) Subject to the terms of this Agreement, each Contributor hereby - grants Recipient a non-exclusive, worldwide, royalty-free copyright - license to reproduce, prepare Derivative Works of, publicly display, - publicly perform, Distribute and sublicense the Contribution of such - Contributor, if any, and such Derivative Works. - - b) Subject to the terms of this Agreement, each Contributor hereby - grants Recipient a non-exclusive, worldwide, royalty-free patent - license under Licensed Patents to make, use, sell, offer to sell, - import and otherwise transfer the Contribution of such Contributor, - if any, in Source Code or other form. This patent license shall - apply to the combination of the Contribution and the Program if, at - the time the Contribution is added by the Contributor, such addition - of the Contribution causes such combination to be covered by the - Licensed Patents. The patent license shall not apply to any other - combinations which include the Contribution. No hardware per se is - licensed hereunder. - - c) Recipient understands that although each Contributor grants the - licenses to its Contributions set forth herein, no assurances are - provided by any Contributor that the Program does not infringe the - patent or other intellectual property rights of any other entity. - Each Contributor disclaims any liability to Recipient for claims - brought by any other entity based on infringement of intellectual - property rights or otherwise. As a condition to exercising the - rights and licenses granted hereunder, each Recipient hereby - assumes sole responsibility to secure any other intellectual - property rights needed, if any. For example, if a third party - patent license is required to allow Recipient to Distribute the - Program, it is Recipient's responsibility to acquire that license - before distributing the Program. - - d) Each Contributor represents that to its knowledge it has - sufficient copyright rights in its Contribution, if any, to grant - the copyright license set forth in this Agreement. - - e) Notwithstanding the terms of any Secondary License, no - Contributor makes additional grants to any Recipient (other than - those set forth in this Agreement) as a result of such Recipient's - receipt of the Program under the terms of a Secondary License - (if permitted under the terms of Section 3). - - 3. REQUIREMENTS - - 3.1 If a Contributor Distributes the Program in any form, then: - - a) the Program must also be made available as Source Code, in - accordance with section 3.2, and the Contributor must accompany - the Program with a statement that the Source Code for the Program - is available under this Agreement, and informs Recipients how to - obtain it in a reasonable manner on or through a medium customarily - used for software exchange; and - - b) the Contributor may Distribute the Program under a license - different than this Agreement, provided that such license: - i) effectively disclaims on behalf of all other Contributors all - warranties and conditions, express and implied, including - warranties or conditions of title and non-infringement, and - implied warranties or conditions of merchantability and fitness - for a particular purpose; - - ii) effectively excludes on behalf of all other Contributors all - liability for damages, including direct, indirect, special, - incidental and consequential damages, such as lost profits; - - iii) does not attempt to limit or alter the recipients' rights - in the Source Code under section 3.2; and - - iv) requires any subsequent distribution of the Program by any - party to be under a license that satisfies the requirements - of this section 3. - - 3.2 When the Program is Distributed as Source Code: - - a) it must be made available under this Agreement, or if the - Program (i) is combined with other material in a separate file or - files made available under a Secondary License, and (ii) the initial - Contributor attached to the Source Code the notice described in - Exhibit A of this Agreement, then the Program may be made available - under the terms of such Secondary Licenses, and - - b) a copy of this Agreement must be included with each copy of - the Program. - - 3.3 Contributors may not remove or alter any copyright, patent, - trademark, attribution notices, disclaimers of warranty, or limitations - of liability ("notices") contained within the Program from any copy of - the Program which they Distribute, provided that Contributors may add - their own appropriate notices. - - 4. COMMERCIAL DISTRIBUTION - - Commercial distributors of software may accept certain responsibilities - with respect to end users, business partners and the like. While this - license is intended to facilitate the commercial use of the Program, - the Contributor who includes the Program in a commercial product - offering should do so in a manner which does not create potential - liability for other Contributors. Therefore, if a Contributor includes - the Program in a commercial product offering, such Contributor - ("Commercial Contributor") hereby agrees to defend and indemnify every - other Contributor ("Indemnified Contributor") against any losses, - damages and costs (collectively "Losses") arising from claims, lawsuits - and other legal actions brought by a third party against the Indemnified - Contributor to the extent caused by the acts or omissions of such - Commercial Contributor in connection with its distribution of the Program - in a commercial product offering. The obligations in this section do not - apply to any claims or Losses relating to any actual or alleged - intellectual property infringement. In order to qualify, an Indemnified - Contributor must: a) promptly notify the Commercial Contributor in - writing of such claim, and b) allow the Commercial Contributor to control, - and cooperate with the Commercial Contributor in, the defense and any - related settlement negotiations. The Indemnified Contributor may - participate in any such claim at its own expense. - - For example, a Contributor might include the Program in a commercial - product offering, Product X. That Contributor is then a Commercial - Contributor. If that Commercial Contributor then makes performance - claims, or offers warranties related to Product X, those performance - claims and warranties are such Commercial Contributor's responsibility - alone. Under this section, the Commercial Contributor would have to - defend claims against the other Contributors related to those performance - claims and warranties, and if a court requires any other Contributor to - pay any damages as a result, the Commercial Contributor must pay - those damages. - - 5. NO WARRANTY - - EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT - PERMITTED BY APPLICABLE LAW, THE PROGRAM IS PROVIDED ON AN "AS IS" - BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR - IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF - TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR - PURPOSE. Each Recipient is solely responsible for determining the - appropriateness of using and distributing the Program and assumes all - risks associated with its exercise of rights under this Agreement, - including but not limited to the risks and costs of program errors, - compliance with applicable laws, damage to or loss of data, programs - or equipment, and unavailability or interruption of operations. - - 6. DISCLAIMER OF LIABILITY - - EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT - PERMITTED BY APPLICABLE LAW, NEITHER RECIPIENT NOR ANY CONTRIBUTORS - SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST - PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE - EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE - POSSIBILITY OF SUCH DAMAGES. - - 7. GENERAL - - If any provision of this Agreement is invalid or unenforceable under - applicable law, it shall not affect the validity or enforceability of - the remainder of the terms of this Agreement, and without further - action by the parties hereto, such provision shall be reformed to the - minimum extent necessary to make such provision valid and enforceable. - - If Recipient institutes patent litigation against any entity - (including a cross-claim or counterclaim in a lawsuit) alleging that the - Program itself (excluding combinations of the Program with other software - or hardware) infringes such Recipient's patent(s), then such Recipient's - rights granted under Section 2(b) shall terminate as of the date such - litigation is filed. - - All Recipient's rights under this Agreement shall terminate if it - fails to comply with any of the material terms or conditions of this - Agreement and does not cure such failure in a reasonable period of - time after becoming aware of such noncompliance. If all Recipient's - rights under this Agreement terminate, Recipient agrees to cease use - and distribution of the Program as soon as reasonably practicable. - However, Recipient's obligations under this Agreement and any licenses - granted by Recipient relating to the Program shall continue and survive. - - Everyone is permitted to copy and distribute copies of this Agreement, - but in order to avoid inconsistency the Agreement is copyrighted and - may only be modified in the following manner. The Agreement Steward - reserves the right to publish new versions (including revisions) of - this Agreement from time to time. No one other than the Agreement - Steward has the right to modify this Agreement. The Eclipse Foundation - is the initial Agreement Steward. The Eclipse Foundation may assign the - responsibility to serve as the Agreement Steward to a suitable separate - entity. Each new version of the Agreement will be given a distinguishing - version number. The Program (including Contributions) may always be - Distributed subject to the version of the Agreement under which it was - received. In addition, after a new version of the Agreement is published, - Contributor may elect to Distribute the Program (including its - Contributions) under the new version. - - Except as expressly stated in Sections 2(a) and 2(b) above, Recipient - receives no rights or licenses to the intellectual property of any - Contributor under this Agreement, whether expressly, by implication, - estoppel or otherwise. All rights in the Program not expressly granted - under this Agreement are reserved. Nothing in this Agreement is intended - to be enforceable by any entity that is not a Contributor or Recipient. - No third-party beneficiary rights are created under this Agreement. - - Exhibit A - Form of Secondary Licenses Notice - - "This Source Code may also be made available under the following - Secondary Licenses when the conditions for such availability set forth - in the Eclipse Public License, v. 2.0 are satisfied: {name license(s), - version(s), and exceptions or additional permissions here}." - - Simply including a copy of this Agreement, including this Exhibit A - is not sufficient to license the Source Code under Secondary Licenses. - - If it is not possible or desirable to put the notice in a particular - file, then You may include the notice in a location (such as a LICENSE - file in a relevant directory) where a recipient would be likely to - look for such a notice. - - You may add additional accurate notices of copyright ownership. - -The complete text of the GNU General Public License v2 is as follows: - - GNU GENERAL PUBLIC LICENSE - Version 2, June 1991 - - Copyright (C) 1989, 1991 Free Software Foundation, Inc. - 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The licenses for most software are designed to take away your - freedom to share and change it. By contrast, the GNU General Public - License is intended to guarantee your freedom to share and change free - software--to make sure the software is free for all its users. This - General Public License applies to most of the Free Software - Foundation's software and to any other program whose authors commit to - using it. (Some other Free Software Foundation software is covered by - the GNU Library General Public License instead.) You can apply it to - your programs, too. - - When we speak of free software, we are referring to freedom, not - price. Our General Public Licenses are designed to make sure that you - have the freedom to distribute copies of free software (and charge for - this service if you wish), that you receive source code or can get it - if you want it, that you can change the software or use pieces of it - in new free programs; and that you know you can do these things. - - To protect your rights, we need to make restrictions that forbid - anyone to deny you these rights or to ask you to surrender the rights. - These restrictions translate to certain responsibilities for you if you - distribute copies of the software, or if you modify it. - - For example, if you distribute copies of such a program, whether - gratis or for a fee, you must give the recipients all the rights that - you have. You must make sure that they, too, receive or can get the - source code. And you must show them these terms so they know their - rights. - - We protect your rights with two steps: (1) copyright the software, and - (2) offer you this license which gives you legal permission to copy, - distribute and/or modify the software. - - Also, for each author's protection and ours, we want to make certain - that everyone understands that there is no warranty for this free - software. If the software is modified by someone else and passed on, we - want its recipients to know that what they have is not the original, so - that any problems introduced by others will not reflect on the original - authors' reputations. - - Finally, any free program is threatened constantly by software - patents. We wish to avoid the danger that redistributors of a free - program will individually obtain patent licenses, in effect making the - program proprietary. To prevent this, we have made it clear that any - patent must be licensed for everyone's free use or not licensed at all. - - The precise terms and conditions for copying, distribution and - modification follow. - - GNU GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. This License applies to any program or other work which contains - a notice placed by the copyright holder saying it may be distributed - under the terms of this General Public License. The "Program", below, - refers to any such program or work, and a "work based on the Program" - means either the Program or any derivative work under copyright law: - that is to say, a work containing the Program or a portion of it, - either verbatim or with modifications and/or translated into another - language. (Hereinafter, translation is included without limitation in - the term "modification".) Each licensee is addressed as "you". - - Activities other than copying, distribution and modification are not - covered by this License; they are outside its scope. The act of - running the Program is not restricted, and the output from the Program - is covered only if its contents constitute a work based on the - Program (independent of having been made by running the Program). - Whether that is true depends on what the Program does. - - 1. You may copy and distribute verbatim copies of the Program's - source code as you receive it, in any medium, provided that you - conspicuously and appropriately publish on each copy an appropriate - copyright notice and disclaimer of warranty; keep intact all the - notices that refer to this License and to the absence of any warranty; - and give any other recipients of the Program a copy of this License - along with the Program. - - You may charge a fee for the physical act of transferring a copy, and - you may at your option offer warranty protection in exchange for a fee. - - 2. You may modify your copy or copies of the Program or any portion - of it, thus forming a work based on the Program, and copy and - distribute such modifications or work under the terms of Section 1 - above, provided that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices - stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in - whole or in part contains or is derived from the Program or any - part thereof, to be licensed as a whole at no charge to all third - parties under the terms of this License. - - c) If the modified program normally reads commands interactively - when run, you must cause it, when started running for such - interactive use in the most ordinary way, to print or display an - announcement including an appropriate copyright notice and a - notice that there is no warranty (or else, saying that you provide - a warranty) and that users may redistribute the program under - these conditions, and telling the user how to view a copy of this - License. (Exception: if the Program itself is interactive but - does not normally print such an announcement, your work based on - the Program is not required to print an announcement.) - - These requirements apply to the modified work as a whole. If - identifiable sections of that work are not derived from the Program, - and can be reasonably considered independent and separate works in - themselves, then this License, and its terms, do not apply to those - sections when you distribute them as separate works. But when you - distribute the same sections as part of a whole which is a work based - on the Program, the distribution of the whole must be on the terms of - this License, whose permissions for other licensees extend to the - entire whole, and thus to each and every part regardless of who wrote it. - - Thus, it is not the intent of this section to claim rights or contest - your rights to work written entirely by you; rather, the intent is to - exercise the right to control the distribution of derivative or - collective works based on the Program. - - In addition, mere aggregation of another work not based on the Program - with the Program (or with a work based on the Program) on a volume of - a storage or distribution medium does not bring the other work under - the scope of this License. - - 3. You may copy and distribute the Program (or a work based on it, - under Section 2) in object code or executable form under the terms of - Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable - source code, which must be distributed under the terms of Sections - 1 and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three - years, to give any third party, for a charge no more than your - cost of physically performing source distribution, a complete - machine-readable copy of the corresponding source code, to be - distributed under the terms of Sections 1 and 2 above on a medium - customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer - to distribute corresponding source code. (This alternative is - allowed only for noncommercial distribution and only if you - received the program in object code or executable form with such - an offer, in accord with Subsection b above.) - - The source code for a work means the preferred form of the work for - making modifications to it. For an executable work, complete source - code means all the source code for all modules it contains, plus any - associated interface definition files, plus the scripts used to - control compilation and installation of the executable. However, as a - special exception, the source code distributed need not include - anything that is normally distributed (in either source or binary - form) with the major components (compiler, kernel, and so on) of the - operating system on which the executable runs, unless that component - itself accompanies the executable. - - If distribution of executable or object code is made by offering - access to copy from a designated place, then offering equivalent - access to copy the source code from the same place counts as - distribution of the source code, even though third parties are not - compelled to copy the source along with the object code. - - 4. You may not copy, modify, sublicense, or distribute the Program - except as expressly provided under this License. Any attempt - otherwise to copy, modify, sublicense or distribute the Program is - void, and will automatically terminate your rights under this License. - However, parties who have received copies, or rights, from you under - this License will not have their licenses terminated so long as such - parties remain in full compliance. - - 5. You are not required to accept this License, since you have not - signed it. However, nothing else grants you permission to modify or - distribute the Program or its derivative works. These actions are - prohibited by law if you do not accept this License. Therefore, by - modifying or distributing the Program (or any work based on the - Program), you indicate your acceptance of this License to do so, and - all its terms and conditions for copying, distributing or modifying - the Program or works based on it. - - 6. Each time you redistribute the Program (or any work based on the - Program), the recipient automatically receives a license from the - original licensor to copy, distribute or modify the Program subject to - these terms and conditions. You may not impose any further - restrictions on the recipients' exercise of the rights granted herein. - You are not responsible for enforcing compliance by third parties to - this License. - - 7. If, as a consequence of a court judgment or allegation of patent - infringement or for any other reason (not limited to patent issues), - conditions are imposed on you (whether by court order, agreement or - otherwise) that contradict the conditions of this License, they do not - excuse you from the conditions of this License. If you cannot - distribute so as to satisfy simultaneously your obligations under this - License and any other pertinent obligations, then as a consequence you - may not distribute the Program at all. For example, if a patent - license would not permit royalty-free redistribution of the Program by - all those who receive copies directly or indirectly through you, then - the only way you could satisfy both it and this License would be to - refrain entirely from distribution of the Program. - - If any portion of this section is held invalid or unenforceable under - any particular circumstance, the balance of the section is intended to - apply and the section as a whole is intended to apply in other - circumstances. - - It is not the purpose of this section to induce you to infringe any - patents or other property right claims or to contest validity of any - such claims; this section has the sole purpose of protecting the - integrity of the free software distribution system, which is - implemented by public license practices. Many people have made - generous contributions to the wide range of software distributed - through that system in reliance on consistent application of that - system; it is up to the author/donor to decide if he or she is willing - to distribute software through any other system and a licensee cannot - impose that choice. - - This section is intended to make thoroughly clear what is believed to - be a consequence of the rest of this License. - - 8. If the distribution and/or use of the Program is restricted in - certain countries either by patents or by copyrighted interfaces, the - original copyright holder who places the Program under this License - may add an explicit geographical distribution limitation excluding - those countries, so that distribution is permitted only in or among - countries not thus excluded. In such case, this License incorporates - the limitation as if written in the body of this License. - - 9. The Free Software Foundation may publish revised and/or new versions - of the General Public License from time to time. Such new versions will - be similar in spirit to the present version, but may differ in detail to - address new problems or concerns. - - Each version is given a distinguishing version number. If the Program - specifies a version number of this License which applies to it and "any - later version", you have the option of following the terms and conditions - either of that version or of any later version published by the Free - Software Foundation. If the Program does not specify a version number of - this License, you may choose any version ever published by the Free Software - Foundation. - - 10. If you wish to incorporate parts of the Program into other free - programs whose distribution conditions are different, write to the author - to ask for permission. For software which is copyrighted by the Free - Software Foundation, write to the Free Software Foundation; we sometimes - make exceptions for this. Our decision will be guided by the two goals - of preserving the free status of all derivatives of our free software and - of promoting the sharing and reuse of software generally. - - NO WARRANTY - - 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY - FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN - OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES - PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED - OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS - TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE - PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, - REPAIR OR CORRECTION. - - 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING - WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR - REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, - INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING - OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED - TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY - YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER - PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE - POSSIBILITY OF SUCH DAMAGES. - - END OF TERMS AND CONDITIONS - -The complete text of the GNU Lesser General Public License 2.1 is as follows: - - GNU LESSER GENERAL PUBLIC LICENSE - Version 2.1, February 1999 - - Copyright (C) 1991, 1999 Free Software Foundation, Inc. - 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - [This is the first released version of the Lesser GPL. It also counts - as the successor of the GNU Library Public License, version 2, hence - the version number 2.1.] - - Preamble - - The licenses for most software are designed to take away your - freedom to share and change it. By contrast, the GNU General Public - Licenses are intended to guarantee your freedom to share and change - free software--to make sure the software is free for all its users. - - This license, the Lesser General Public License, applies to some - specially designated software packages--typically libraries--of the - Free Software Foundation and other authors who decide to use it. You - can use it too, but we suggest you first think carefully about whether - this license or the ordinary General Public License is the better - strategy to use in any particular case, based on the explanations below. - - When we speak of free software, we are referring to freedom of use, - not price. Our General Public Licenses are designed to make sure that - you have the freedom to distribute copies of free software (and charge - for this service if you wish); that you receive source code or can get - it if you want it; that you can change the software and use pieces of - it in new free programs; and that you are informed that you can do - these things. - - To protect your rights, we need to make restrictions that forbid - distributors to deny you these rights or to ask you to surrender these - rights. These restrictions translate to certain responsibilities for - you if you distribute copies of the library or if you modify it. - - For example, if you distribute copies of the library, whether gratis - or for a fee, you must give the recipients all the rights that we gave - you. You must make sure that they, too, receive or can get the source - code. If you link other code with the library, you must provide - complete object files to the recipients, so that they can relink them - with the library after making changes to the library and recompiling - it. And you must show them these terms so they know their rights. - - We protect your rights with a two-step method: (1) we copyright the - library, and (2) we offer you this license, which gives you legal - permission to copy, distribute and/or modify the library. - - To protect each distributor, we want to make it very clear that - there is no warranty for the free library. Also, if the library is - modified by someone else and passed on, the recipients should know - that what they have is not the original version, so that the original - author's reputation will not be affected by problems that might be - introduced by others. - - Finally, software patents pose a constant threat to the existence of - any free program. We wish to make sure that a company cannot - effectively restrict the users of a free program by obtaining a - restrictive license from a patent holder. Therefore, we insist that - any patent license obtained for a version of the library must be - consistent with the full freedom of use specified in this license. - - Most GNU software, including some libraries, is covered by the - ordinary GNU General Public License. This license, the GNU Lesser - General Public License, applies to certain designated libraries, and - is quite different from the ordinary General Public License. We use - this license for certain libraries in order to permit linking those - libraries into non-free programs. - - When a program is linked with a library, whether statically or using - a shared library, the combination of the two is legally speaking a - combined work, a derivative of the original library. The ordinary - General Public License therefore permits such linking only if the - entire combination fits its criteria of freedom. The Lesser General - Public License permits more lax criteria for linking other code with - the library. - - We call this license the "Lesser" General Public License because it - does Less to protect the user's freedom than the ordinary General - Public License. It also provides other free software developers Less - of an advantage over competing non-free programs. These disadvantages - are the reason we use the ordinary General Public License for many - libraries. However, the Lesser license provides advantages in certain - special circumstances. - - For example, on rare occasions, there may be a special need to - encourage the widest possible use of a certain library, so that it becomes - a de-facto standard. To achieve this, non-free programs must be - allowed to use the library. A more frequent case is that a free - library does the same job as widely used non-free libraries. In this - case, there is little to gain by limiting the free library to free - software only, so we use the Lesser General Public License. - - In other cases, permission to use a particular library in non-free - programs enables a greater number of people to use a large body of - free software. For example, permission to use the GNU C Library in - non-free programs enables many more people to use the whole GNU - operating system, as well as its variant, the GNU/Linux operating - system. - - Although the Lesser General Public License is Less protective of the - users' freedom, it does ensure that the user of a program that is - linked with the Library has the freedom and the wherewithal to run - that program using a modified version of the Library. - - The precise terms and conditions for copying, distribution and - modification follow. Pay close attention to the difference between a - "work based on the library" and a "work that uses the library". The - former contains code derived from the library, whereas the latter must - be combined with the library in order to run. - - GNU LESSER GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. This License Agreement applies to any software library or other - program which contains a notice placed by the copyright holder or - other authorized party saying it may be distributed under the terms of - this Lesser General Public License (also called "this License"). - Each licensee is addressed as "you". - - A "library" means a collection of software functions and/or data - prepared so as to be conveniently linked with application programs - (which use some of those functions and data) to form executables. - - The "Library", below, refers to any such software library or work - which has been distributed under these terms. A "work based on the - Library" means either the Library or any derivative work under - copyright law: that is to say, a work containing the Library or a - portion of it, either verbatim or with modifications and/or translated - straightforwardly into another language. (Hereinafter, translation is - included without limitation in the term "modification".) - - "Source code" for a work means the preferred form of the work for - making modifications to it. For a library, complete source code means - all the source code for all modules it contains, plus any associated - interface definition files, plus the scripts used to control compilation - and installation of the library. - - Activities other than copying, distribution and modification are not - covered by this License; they are outside its scope. The act of - running a program using the Library is not restricted, and output from - such a program is covered only if its contents constitute a work based - on the Library (independent of the use of the Library in a tool for - writing it). Whether that is true depends on what the Library does - and what the program that uses the Library does. - - 1. You may copy and distribute verbatim copies of the Library's - complete source code as you receive it, in any medium, provided that - you conspicuously and appropriately publish on each copy an - appropriate copyright notice and disclaimer of warranty; keep intact - all the notices that refer to this License and to the absence of any - warranty; and distribute a copy of this License along with the - Library. - - You may charge a fee for the physical act of transferring a copy, - and you may at your option offer warranty protection in exchange for a - fee. - - 2. You may modify your copy or copies of the Library or any portion - of it, thus forming a work based on the Library, and copy and - distribute such modifications or work under the terms of Section 1 - above, provided that you also meet all of these conditions: - - a) The modified work must itself be a software library. - - b) You must cause the files modified to carry prominent notices - stating that you changed the files and the date of any change. - - c) You must cause the whole of the work to be licensed at no - charge to all third parties under the terms of this License. - - d) If a facility in the modified Library refers to a function or a - table of data to be supplied by an application program that uses - the facility, other than as an argument passed when the facility - is invoked, then you must make a good faith effort to ensure that, - in the event an application does not supply such function or - table, the facility still operates, and performs whatever part of - its purpose remains meaningful. - - (For example, a function in a library to compute square roots has - a purpose that is entirely well-defined independent of the - application. Therefore, Subsection 2d requires that any - application-supplied function or table used by this function must - be optional: if the application does not supply it, the square - root function must still compute square roots.) - - These requirements apply to the modified work as a whole. If - identifiable sections of that work are not derived from the Library, - and can be reasonably considered independent and separate works in - themselves, then this License, and its terms, do not apply to those - sections when you distribute them as separate works. But when you - distribute the same sections as part of a whole which is a work based - on the Library, the distribution of the whole must be on the terms of - this License, whose permissions for other licensees extend to the - entire whole, and thus to each and every part regardless of who wrote - it. - - Thus, it is not the intent of this section to claim rights or contest - your rights to work written entirely by you; rather, the intent is to - exercise the right to control the distribution of derivative or - collective works based on the Library. - - In addition, mere aggregation of another work not based on the Library - with the Library (or with a work based on the Library) on a volume of - a storage or distribution medium does not bring the other work under - the scope of this License. - - 3. You may opt to apply the terms of the ordinary GNU General Public - License instead of this License to a given copy of the Library. To do - this, you must alter all the notices that refer to this License, so - that they refer to the ordinary GNU General Public License, version 2, - instead of to this License. (If a newer version than version 2 of the - ordinary GNU General Public License has appeared, then you can specify - that version instead if you wish.) Do not make any other change in - these notices. - - Once this change is made in a given copy, it is irreversible for - that copy, so the ordinary GNU General Public License applies to all - subsequent copies and derivative works made from that copy. - - This option is useful when you wish to copy part of the code of - the Library into a program that is not a library. - - 4. You may copy and distribute the Library (or a portion or - derivative of it, under Section 2) in object code or executable form - under the terms of Sections 1 and 2 above provided that you accompany - it with the complete corresponding machine-readable source code, which - must be distributed under the terms of Sections 1 and 2 above on a - medium customarily used for software interchange. - - If distribution of object code is made by offering access to copy - from a designated place, then offering equivalent access to copy the - source code from the same place satisfies the requirement to - distribute the source code, even though third parties are not - compelled to copy the source along with the object code. - - 5. A program that contains no derivative of any portion of the - Library, but is designed to work with the Library by being compiled or - linked with it, is called a "work that uses the Library". Such a - work, in isolation, is not a derivative work of the Library, and - therefore falls outside the scope of this License. - - However, linking a "work that uses the Library" with the Library - creates an executable that is a derivative of the Library (because it - contains portions of the Library), rather than a "work that uses the - library". The executable is therefore covered by this License. - Section 6 states terms for distribution of such executables. - - When a "work that uses the Library" uses material from a header file - that is part of the Library, the object code for the work may be a - derivative work of the Library even though the source code is not. - Whether this is true is especially significant if the work can be - linked without the Library, or if the work is itself a library. The - threshold for this to be true is not precisely defined by law. - - If such an object file uses only numerical parameters, data - structure layouts and accessors, and small macros and small inline - functions (ten lines or less in length), then the use of the object - file is unrestricted, regardless of whether it is legally a derivative - work. (Executables containing this object code plus portions of the - Library will still fall under Section 6.) - - Otherwise, if the work is a derivative of the Library, you may - distribute the object code for the work under the terms of Section 6. - Any executables containing that work also fall under Section 6, - whether or not they are linked directly with the Library itself. - - 6. As an exception to the Sections above, you may also combine or - link a "work that uses the Library" with the Library to produce a - work containing portions of the Library, and distribute that work - under terms of your choice, provided that the terms permit - modification of the work for the customer's own use and reverse - engineering for debugging such modifications. - - You must give prominent notice with each copy of the work that the - Library is used in it and that the Library and its use are covered by - this License. You must supply a copy of this License. If the work - during execution displays copyright notices, you must include the - copyright notice for the Library among them, as well as a reference - directing the user to the copy of this License. Also, you must do one - of these things: - - a) Accompany the work with the complete corresponding - machine-readable source code for the Library including whatever - changes were used in the work (which must be distributed under - Sections 1 and 2 above); and, if the work is an executable linked - with the Library, with the complete machine-readable "work that - uses the Library", as object code and/or source code, so that the - user can modify the Library and then relink to produce a modified - executable containing the modified Library. (It is understood - that the user who changes the contents of definitions files in the - Library will not necessarily be able to recompile the application - to use the modified definitions.) - - b) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (1) uses at run time a - copy of the library already present on the user's computer system, - rather than copying library functions into the executable, and (2) - will operate properly with a modified version of the library, if - the user installs one, as long as the modified version is - interface-compatible with the version that the work was made with. - - c) Accompany the work with a written offer, valid for at - least three years, to give the same user the materials - specified in Subsection 6a, above, for a charge no more - than the cost of performing this distribution. - - d) If distribution of the work is made by offering access to copy - from a designated place, offer equivalent access to copy the above - specified materials from the same place. - - e) Verify that the user has already received a copy of these - materials or that you have already sent this user a copy. - - For an executable, the required form of the "work that uses the - Library" must include any data and utility programs needed for - reproducing the executable from it. However, as a special exception, - the materials to be distributed need not include anything that is - normally distributed (in either source or binary form) with the major - components (compiler, kernel, and so on) of the operating system on - which the executable runs, unless that component itself accompanies - the executable. - - It may happen that this requirement contradicts the license - restrictions of other proprietary libraries that do not normally - accompany the operating system. Such a contradiction means you cannot - use both them and the Library together in an executable that you - distribute. - - 7. You may place library facilities that are a work based on the - Library side-by-side in a single library together with other library - facilities not covered by this License, and distribute such a combined - library, provided that the separate distribution of the work based on - the Library and of the other library facilities is otherwise - permitted, and provided that you do these two things: - - a) Accompany the combined library with a copy of the same work - based on the Library, uncombined with any other library - facilities. This must be distributed under the terms of the - Sections above. - - b) Give prominent notice with the combined library of the fact - that part of it is a work based on the Library, and explaining - where to find the accompanying uncombined form of the same work. - - 8. You may not copy, modify, sublicense, link with, or distribute - the Library except as expressly provided under this License. Any - attempt otherwise to copy, modify, sublicense, link with, or - distribute the Library is void, and will automatically terminate your - rights under this License. However, parties who have received copies, - or rights, from you under this License will not have their licenses - terminated so long as such parties remain in full compliance. - - 9. You are not required to accept this License, since you have not - signed it. However, nothing else grants you permission to modify or - distribute the Library or its derivative works. These actions are - prohibited by law if you do not accept this License. Therefore, by - modifying or distributing the Library (or any work based on the - Library), you indicate your acceptance of this License to do so, and - all its terms and conditions for copying, distributing or modifying - the Library or works based on it. - - 10. Each time you redistribute the Library (or any work based on the - Library), the recipient automatically receives a license from the - original licensor to copy, distribute, link with or modify the Library - subject to these terms and conditions. You may not impose any further - restrictions on the recipients' exercise of the rights granted herein. - You are not responsible for enforcing compliance by third parties with - this License. - - 11. If, as a consequence of a court judgment or allegation of patent - infringement or for any other reason (not limited to patent issues), - conditions are imposed on you (whether by court order, agreement or - otherwise) that contradict the conditions of this License, they do not - excuse you from the conditions of this License. If you cannot - distribute so as to satisfy simultaneously your obligations under this - License and any other pertinent obligations, then as a consequence you - may not distribute the Library at all. For example, if a patent - license would not permit royalty-free redistribution of the Library by - all those who receive copies directly or indirectly through you, then - the only way you could satisfy both it and this License would be to - refrain entirely from distribution of the Library. - - If any portion of this section is held invalid or unenforceable under any - particular circumstance, the balance of the section is intended to apply, - and the section as a whole is intended to apply in other circumstances. - - It is not the purpose of this section to induce you to infringe any - patents or other property right claims or to contest validity of any - such claims; this section has the sole purpose of protecting the - integrity of the free software distribution system which is - implemented by public license practices. Many people have made - generous contributions to the wide range of software distributed - through that system in reliance on consistent application of that - system; it is up to the author/donor to decide if he or she is willing - to distribute software through any other system and a licensee cannot - impose that choice. - - This section is intended to make thoroughly clear what is believed to - be a consequence of the rest of this License. - - 12. If the distribution and/or use of the Library is restricted in - certain countries either by patents or by copyrighted interfaces, the - original copyright holder who places the Library under this License may add - an explicit geographical distribution limitation excluding those countries, - so that distribution is permitted only in or among countries not thus - excluded. In such case, this License incorporates the limitation as if - written in the body of this License. - - 13. The Free Software Foundation may publish revised and/or new - versions of the Lesser General Public License from time to time. - Such new versions will be similar in spirit to the present version, - but may differ in detail to address new problems or concerns. - - Each version is given a distinguishing version number. If the Library - specifies a version number of this License which applies to it and - "any later version", you have the option of following the terms and - conditions either of that version or of any later version published by - the Free Software Foundation. If the Library does not specify a - license version number, you may choose any version ever published by - the Free Software Foundation. - - 14. If you wish to incorporate parts of the Library into other free - programs whose distribution conditions are incompatible with these, - write to the author to ask for permission. For software which is - copyrighted by the Free Software Foundation, write to the Free - Software Foundation; we sometimes make exceptions for this. Our - decision will be guided by the two goals of preserving the free status - of all derivatives of our free software and of promoting the sharing - and reuse of software generally. - - NO WARRANTY - - 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO - WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. - EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR - OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY - KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE - LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME - THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN - WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY - AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU - FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR - CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE - LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING - RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A - FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF - SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH - DAMAGES. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Libraries - - If you develop a new library, and you want it to be of the greatest - possible use to the public, we recommend making it free software that - everyone can redistribute and change. You can do so by permitting - redistribution under these terms (or, alternatively, under the terms of the - ordinary General Public License). - - To apply these terms, attach the following notices to the library. It is - safest to attach them to the start of each source file to most effectively - convey the exclusion of warranty; and each file should have at least the - "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - - Also add information on how to contact you by electronic and paper mail. - - You should also get your employer (if you work as a programmer) or your - school, if any, to sign a "copyright disclaimer" for the library, if - necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the - library `Frob' (a library for tweaking knobs) written by James Random Hacker. - - , 1 April 1990 - Ty Coon, President of Vice - - That's all there is to it! diff --git a/licenses/jnr-x86asm.txt b/licenses/jnr-x86asm.txt deleted file mode 100644 index c9583db05fd..00000000000 --- a/licenses/jnr-x86asm.txt +++ /dev/null @@ -1,24 +0,0 @@ - - Copyright (C) 2010 Wayne Meissner - Copyright (c) 2008-2009, Petr Kobalicek - - Permission is hereby granted, free of charge, to any person - obtaining a copy of this software and associated documentation - files (the "Software"), to deal in the Software without - restriction, including without limitation the rights to use, - copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the - Software is furnished to do so, subject to the following - conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES - OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT - HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - OTHER DEALINGS IN THE SOFTWARE. diff --git a/licenses/reactive-streams.txt b/licenses/reactive-streams.txt deleted file mode 100644 index 1e141c13ddb..00000000000 --- a/licenses/reactive-streams.txt +++ /dev/null @@ -1,7 +0,0 @@ -MIT No Attribution - -Copyright 2014 Reactive Streams - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/licenses/slf4j-api.txt b/licenses/slf4j-api.txt deleted file mode 100644 index bb09a9ad4ec..00000000000 --- a/licenses/slf4j-api.txt +++ /dev/null @@ -1,21 +0,0 @@ -Copyright (c) 2004-2023 QOS.ch -All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/manual/.nav b/manual/.nav deleted file mode 100644 index 35e0225438b..00000000000 --- a/manual/.nav +++ /dev/null @@ -1,8 +0,0 @@ -core -query_builder -mapper -api_conventions -case_sensitivity -osgi -cloud -developer diff --git a/manual/README.md b/manual/README.md deleted file mode 100644 index 049ddc8c8e9..00000000000 --- a/manual/README.md +++ /dev/null @@ -1,36 +0,0 @@ - - -## Manual - -Driver modules: - -* [Core](core/): the main entry point, deals with connectivity and query execution. -* [Query builder](query_builder/): a fluent API to create CQL queries programmatically. -* [Mapper](mapper/): generates the boilerplate to execute queries and convert the results into - application-level objects. -* [Developer docs](developer/): explains the codebase and internal extension points for advanced - customization. - -Common topics: - -* [API conventions](api_conventions/) -* [Case sensitivity](case_sensitivity/) -* [OSGi](osgi/) -* [Cloud](cloud/) diff --git a/manual/api_conventions/README.md b/manual/api_conventions/README.md deleted file mode 100644 index 553392658dd..00000000000 --- a/manual/api_conventions/README.md +++ /dev/null @@ -1,63 +0,0 @@ - - -## API conventions - -In previous versions, the driver relied solely on Java visibility rules: everything was either -private or part of the public API. This made it hard to cleanly organize the code, and things ended -up all together in one monolithic package; it also created a dilemma between providing useful hooks -for advanced users, or keeping them hidden to limit the API surface. - -Starting with 4.0, we adopt a package naming convention to address those issues: - -* Everything under `com.datastax.oss.driver.api` is part of the "official" public API of the driver, - intended for regular client applications to execute queries. It follows [semantic versioning]: - binary compatibility is guaranteed across minor and patch versions. - -* Everything under `com.datastax.oss.driver.internal` is the "internal" API, intended primarily for - internal communication between driver components, and secondarily for advanced customization. If - you use it from your code, the rules are: - 1. with great power comes great responsibility: this stuff is more involved, and has the - potential to break the driver. You should probably have some familiarity with the source - code. - 2. backward compatibility is "best-effort" only: we'll try to preserve it as much as possible, - but it's not formally guaranteed. - -The public API never exposes internal types (this is enforced automatically by our build). You'll -generally have to go through an explicit cast: - -```java -import com.datastax.oss.driver.api.core.context.DriverContext; - -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.TopologyEvent; - -// Public API: -DriverContext context = session.getContext(); - -// Switch to the internal API to force a node down: -InternalDriverContext internalContext = (InternalDriverContext) context; -InetSocketAddress address = new InetSocketAddress("127.0.0.1", 9042); -internalContext.getEventBus().fire(TopologyEvent.forceDown(address)); -``` - -So the risk of unintentionally using the internal API is very low. To double-check, you can always -grep `import com.datastax.oss.driver.internal` in your source files. - -[semantic versioning]: http://semver.org/ diff --git a/manual/case_sensitivity/README.md b/manual/case_sensitivity/README.md deleted file mode 100644 index e9dbf1bf9a8..00000000000 --- a/manual/case_sensitivity/README.md +++ /dev/null @@ -1,152 +0,0 @@ - - -## Case sensitivity - -### In Cassandra - -Cassandra identifiers, such as keyspace, table and column names, are case-insensitive by default. -For example, if you create the following table: - -``` -cqlsh> CREATE TABLE test.FooBar(k int PRIMARY KEY); -``` - -Cassandra actually stores the table name as lower-case: - -``` -cqlsh> SELECT table_name FROM system_schema.tables WHERE keyspace_name = 'test'; - - table_name ------------- - foobar -``` - -And you can use whatever case you want in your queries: - -``` -cqlsh> SELECT * FROM test.FooBar; -cqlsh> SELECT * FROM test.foobar; -cqlsh> SELECT * FROM test.FoObAr; -``` - -However, if you enclose an identifier in double quotes, it becomes case-sensitive: - -``` -cqlsh> CREATE TABLE test."FooBar"(k int PRIMARY KEY); -cqlsh> SELECT table_name FROM system_schema.tables WHERE keyspace_name = 'test'; - - table_name ------------- - FooBar -``` - -You now have to use the exact, quoted form in your queries: - -``` -cqlsh> SELECT * FROM test."FooBar"; -``` - -If you forget to quote, or use the wrong case, you'll get an error: - -``` -cqlsh> SELECT * FROM test.Foobar; -InvalidRequest: Error from server: code=2200 [Invalid query] message="table foobar does not exist" - -cqlsh> SELECT * FROM test."FOOBAR"; -InvalidRequest: Error from server: code=2200 [Invalid query] message="table FOOBAR does not exist" -``` - -### In the driver - -When we deal with identifiers, we use the following definitions: - -* **CQL form**: how you would type it in a CQL query. In other words, case-sensitive if it's quoted, - case-insensitive otherwise; -* **internal form**: how it is stored in system tables. In other words, never quoted and always in - its exact case. - -In previous driver versions, identifiers were represented as raw strings. The problem is that this -does not capture the form; when a method processed an identifier, it always had to know where it -came from and what form it was in, and possibly convert it. This led a lot of internal complexity, -and recurring bugs. - -To address this issue, driver 4+ uses a wrapper: [CqlIdentifier]. Its API methods are always -explicit about the form: - -```java -CqlIdentifier caseInsensitiveId = CqlIdentifier.fromCql("FooBar"); -System.out.println(caseInsensitiveId.asInternal()); // foobar -System.out.println(caseInsensitiveId.asCql(/*pretty=*/ false)); // "foobar" -System.out.println(caseInsensitiveId.asCql(true)); // foobar - -// Double-quotes need to be escaped inside Java strings -CqlIdentifier caseSensitiveId = CqlIdentifier.fromCql("\"FooBar\""); -System.out.println(caseSensitiveId.asInternal()); // FooBar -System.out.println(caseSensitiveId.asCql(true)); // "FooBar" -System.out.println(caseSensitiveId.asCql(false)); // "FooBar" - -CqlIdentifier caseSensitiveId2 = CqlIdentifier.fromInternal("FooBar"); -assert caseSensitiveId.equals(caseSensitiveId2); -``` - -*Side note: as shown above, `asCql` has a pretty-printing option that omits the quotes if they are -not necessary. This looks nicer, but is slightly more expensive because it requires parsing the -string.* - -The driver API uses `CqlIdentifier` whenever it produces or consumes an identifier. For example: - -* getting the keyspace from a table's metadata: `CqlIdentifier keyspaceId = - tableMetadata.getKeyspace()`; -* setting the keyspace when building a session: `CqlSession.builder().withKeyspace(keyspaceId)`. - -For "consuming" methods, string overloads are also provided for convenience, for example -`SessionBuilder.withKeyspace(String)`. - -* getters and setters of "data container" types [Row], [UdtValue], and [BoundStatement] follow - special rules described [here][AccessibleByName] (these methods are treated apart because they are - generally invoked very often, and therefore avoid to create `CqlIdentifier` instances internally); -* in other cases, the string is always assumed to be in CQL form, and converted on the fly with - `CqlIdentifier.fromCql`. - -[CqlIdentifier]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/CqlIdentifier.html -[Row]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/Row.html -[UdtValue]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/data/UdtValue.html -[BoundStatement]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/BoundStatement.html -[AccessibleByName]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/data/AccessibleByName.html - -### Good practices - -As should be clear by now, case sensitivity introduces a lot of extra (and arguably unnecessary) -complexity. - -The Java Driver team's recommendation is: - -> **Always use case-insensitive identifiers in your data model.** - -You'll never have to create `CqlIdentifier` instances in your application code, nor think about -CQL/internal forms. When you pass an identifier to the driver, use the string-based methods. When -the driver returns an identifier and you need to convert it into a string, use `asInternal()`. - -If you worry about readability, use snake case (`shopping_cart`), or simply stick to camel case -(`ShoppingCart`) and ignore the fact that Cassandra lower-cases everything internally. - -The only reason to use case sensitivity should be if you don't control the data model. In that -case, either pass quoted strings to the driver, or use `CqlIdentifier` instances (stored as -constants to avoid creating them over and over). diff --git a/manual/cloud/README.md b/manual/cloud/README.md deleted file mode 100644 index 9116b03dac3..00000000000 --- a/manual/cloud/README.md +++ /dev/null @@ -1,150 +0,0 @@ - - -## Connecting to Astra (Cloud) - -Using the Java Driver to connect to a DataStax Astra database is almost identical to using -the driver to connect to any normal Apache Cassandra® database. The only differences are in how the -driver is configured in an application and that you will need to obtain a *secure connect bundle*. - -### Prerequisites - -1. [Download][Download Maven] and [install][Install Maven] Maven. -2. Create an Astra database on [AWS/Azure/GCP][Create an Astra database - AWS/Azure/GCP]; - alternatively, have a team member provide access to their - Astra database (see instructions for [AWS/Azure/GCP][Access an Astra database - AWS/Azure/GCP]) to - obtain database connection details. -3. Download the secure connect bundle (see instructions for - [AWS/Azure/GCP][Download the secure connect bundle - AWS/Azure/GCP]) that contains connection - information such as contact points and certificates. - -### Procedure - -Create a minimal project structure as explained [here][minimal project structure]. Then modify -`Main.java` using one of the following approaches: - -#### Programmatic configuration - -You can pass the connection information directly to `CqlSession.builder()`: - -```java -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import java.nio.file.Paths; - -public class Main { - - public static void main(String[] args) { - try (CqlSession session = CqlSession.builder() - // make sure you change the path to the secure connect bundle below - .withCloudSecureConnectBundle(Paths.get("/path/to/secure-connect-database_name.zip")) - .withAuthCredentials("user_name","password") - .withKeyspace("keyspace_name") - .build()) { - - // For the sake of example, run a simple query and print the results - ResultSet rs = session.execute("select release_version from system.local"); - Row row = rs.one(); - if (row != null) { - System.out.println(row.getString("release_version")); - } else { - System.out.println("An error occurred."); - } - } - } - } -``` - -The path to the secure connect bundle for your Astra database is specified with -`withCloudSecureConnectBundle()`. The authentication credentials must be specified separately with -`withAuthCredentials()`, and match the username and password that were configured when creating the -Astra database. - -Note the following: - -* an SSL connection will be established automatically. Manual SSL configuration is not allowed, any - settings in the driver configuration (`advanced.ssl-engine-factory`) will be ignored; -* the secure connect bundle contains all of the necessary contact information. Specifying contact - points manually is not allowed, and will result in an error; -* if the driver configuration does not specify an explicit consistency level, it will default to - `LOCAL_QUORUM` (instead of `LOCAL_ONE` when connecting to a normal Cassandra database). - -#### File-based configuration - -Alternatively, the connection information can be specified in the driver's configuration file -(`application.conf`). Merge the following options with any content already present: - -```properties -datastax-java-driver { - basic { - # change this to match the target keyspace - session-keyspace = keyspace_name - cloud { - # change this to match bundle's location; can be either a path on the local filesystem - # or a valid URL, e.g. http://acme.com/path/to/secure-connect-database_name.zip - secure-connect-bundle = /path/to/secure-connect-database_name.zip - } - } - advanced { - auth-provider { - class = PlainTextAuthProvider - # change below to match the appropriate credentials - username = user_name - password = password - } - } -} -``` - -For more information about the driver configuration mechanism, refer to the [driver documentation]. - -With the above configuration, your main Java class can be simplified as shown below: - -```java -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; - -public class Main { - - public static void main(String[] args) { - // Create the CqlSession object; it will read the configuration file and pick the right - // values to connect to the Astra database. - try (CqlSession session = CqlSession.builder().build()) { - - ResultSet rs = session.execute("select release_version from system.local"); - Row row = rs.one(); - if (row != null) { - System.out.println(row.getString("release_version")); - } else { - System.out.println("An error occurred."); - } - } - } -} -``` - -[Download Maven]: https://maven.apache.org/download.cgi -[Install Maven]: https://maven.apache.org/install.html -[Create an Astra database - AWS/Azure/GCP]: https://docs.datastax.com/en/astra/docs/creating-your-astra-database.html -[Access an Astra database - AWS/Azure/GCP]: https://docs.datastax.com/en/astra/docs/obtaining-database-credentials.html#_sharing_your_secure_connect_bundle -[Download the secure connect bundle - AWS/Azure/GCP]: https://docs.datastax.com/en/astra/docs/obtaining-database-credentials.html -[minimal project structure]: ../core/integration/#minimal-project-structure -[driver documentation]: ../core/configuration/ diff --git a/manual/core/README.md b/manual/core/README.md deleted file mode 100644 index 5ca4cd7872f..00000000000 --- a/manual/core/README.md +++ /dev/null @@ -1,352 +0,0 @@ - - -## Core driver - -The core module handles cluster connectivity and request execution. It is published under the -following coordinates: - -```xml - - org.apache.cassandra - java-driver-core - ${driver.version} - -``` - -(For more details on setting up your build tool, see the [integration](integration/) page.) - -### Quick start - -Here's a short program that connects to Cassandra and executes a query: - -```java -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.*; - -try (CqlSession session = CqlSession.builder().build()) { // (1) - ResultSet rs = session.execute("select release_version from system.local"); // (2) - Row row = rs.one(); - System.out.println(row.getString("release_version")); // (3) -} -``` - -1. [CqlSession] is the main entry point of the driver. It holds the known state of the actual - Cassandra cluster, and is what you use to execute queries. It is thread-safe, you should create a - single instance (per target Cassandra cluster), and share it throughout your application; -2. we use `execute` to send a query to Cassandra. This returns a [ResultSet], which is an iterable - of [Row] objects. On the next line, we extract the first row (which is the only one in this case); -3. we extract the value of the first (and only) column from the row. - -Always close the `CqlSession` once you're done with it, in order to free underlying resources (TCP -connections, thread pools...). In this simple example, we can use a try-with-resources block because -`CqlSession` implements `java.lang.AutoCloseable`; in a real application, you'll probably call one -of the close methods (`close`, `closeAsync`, `forceCloseAsync`) explicitly. - -This example uses the synchronous API. Most methods have asynchronous equivalents (look for `*Async` -variants that return a `CompletionStage`). - - -### Setting up the driver - -#### [CqlSession] - -[CqlSession#builder()] provides a fluent API to create an instance programmatically. Most of the -customization is done through the driver configuration (refer to the -[corresponding section](configuration/) of this manual for full details). - -We recommend that you take a look at the [reference configuration](configuration/reference/) for the -list of available options, and cross-reference with the sub-sections in this manual for more -explanations. - -By default, `CqlSession.builder().build()` fails immediately if the cluster is not available. If you -want to retry instead, you can set the [reconnect-on-init](reconnection/#at-init-time) option in the -configuration. - -##### Contact points - -If you don't specify any contact point, the driver defaults to `127.0.0.1:9042`: - -```java -CqlSession session = CqlSession.builder().build(); -``` - -This is fine for a quick start on a developer workstation, but you'll quickly want to provide -specific addresses. There are two ways to do this: - -* via [SessionBuilder.addContactPoint()] or [SessionBuilder.addContactPoints()]; -* in the [configuration](configuration/) via the `basic.contact-points` option. - -As soon as there are explicit contact points, you also need to provide the name of the local -datacenter. All contact points must belong to it (as reported in their system tables: -`system.local.data_center` and `system.peers.data_center`). Again this can be specified either: - -* via [SessionBuilder.withLocalDatacenter()]; -* in the configuration via the `basic.load-balancing-policy.local-datacenter` option. - -Here is a full programmatic example: - -```java -CqlSession session = CqlSession.builder() - .addContactPoint(new InetSocketAddress("1.2.3.4", 9042)) - .addContactPoint(new InetSocketAddress("5.6.7.8", 9042)) - .withLocalDatacenter("datacenter1") - .build(); -``` - -And a full configuration example: - -``` -// Add `application.conf` to your classpath with the following contents: -datastax-java-driver { - basic { - contact-points = [ "1.2.3.4:9042", "5.6.7.8:9042" ] - load-balancing-policy.local-datacenter = datacenter1 - } -} -``` - -For more details about the local datacenter, refer to the [load balancing -policy](load_balancing/#local-only) section. - -##### Keyspace - -By default, a session isn't tied to any specific keyspace. You'll need to prefix table names in your -queries: - -```java -session.execute("SELECT * FROM my_keyspace.my_table WHERE id = 1"); -``` - -You can also specify a keyspace at construction time, either through the -[configuration](configuration/): - -``` -datastax-java-driver { - basic.session-keyspace = my_keyspace -} -``` - -Or with the builder: - -```java -CqlSession session = CqlSession.builder() - .withKeyspace(CqlIdentifier.fromCql("my_keyspace")) - .build(); -``` - -That keyspace will be used as the default when table names are not qualified: - -```java -session.execute("SELECT * FROM my_table WHERE id = 1"); -session.execute("SELECT * FROM other_keyspace.other_table WHERE id = 1"); -``` - -You might be tempted to open a separate session for each keyspace used in your application; however, -connection pools are created at the session level, so each new session will consume additional -system resources: - -```java -// Anti-pattern: creating two sessions doubles the number of TCP connections opened by the driver -CqlSession session1 = CqlSession.builder().withKeyspace(CqlIdentifier.fromCql("ks1")).build(); -CqlSession session2 = CqlSession.builder().withKeyspace(CqlIdentifier.fromCql("ks2")).build(); -``` - -If you issue a `USE` statement, it will change the default keyspace on that session: - -```java -CqlSession session = CqlSession.builder().build(); -// No default keyspace set, need to prefix: -session.execute("SELECT * FROM my_keyspace.my_table WHERE id = 1"); - -session.execute("USE my_keyspace"); -// Now the keyspace is set, unqualified query works: -session.execute("SELECT * FROM my_table WHERE id = 1"); -``` - -Be very careful though: switching the keyspace at runtime is inherently thread-unsafe, so if the -session is shared by multiple threads (and is usually is), it could easily cause unexpected query -failures. - -Finally, if you're connecting to Cassandra 4 or above, you can specify the keyspace independently -for each request: - -```java -CqlSession session = CqlSession.builder().build(); -session.execute( - SimpleStatement.newInstance("SELECT * FROM my_table WHERE id = 1") - .setKeyspace(CqlIdentifier.fromCql("my_keyspace"))); -``` - -### Running queries - -You run queries with the session's `execute*` methods: - -```java -ResultSet rs = session.execute("SELECT release_version FROM system.local"); -``` - -As shown here, the simplest form is to pass a query string directly. You can also pass a -[Statement](statements/) instance. - -#### Processing rows - -Executing a query produces a [ResultSet], which is an iterable of [Row]. The basic way to process -all rows is to use Java's for-each loop: - -```java -for (Row row : rs) { - // process the row -} -``` - -This will return **all results** without limit (even though the driver might use multiple queries in -the background). To handle large result sets, you might want to use a `LIMIT` clause in your CQL -query, or use one of the techniques described in the [paging](paging/) documentation. - -When you know that there is only one row (or are only interested in the first one), the driver -provides a convenience method: - -```java -Row row = rs.one(); -``` - -#### Reading columns - -[Row] provides getters to extract column values; they can be either positional or named: - -```java -Row row = session.execute("SELECT first_name, last_name FROM users WHERE id = 1").one(); - -// The two are equivalent: -String firstName = row.getString(0); -String firstName = row.getString(CqlIdentifier.fromCql("first_name")); -``` - -[CqlIdentifier] is a string wrapper that deals with case-sensitivity. If you don't want to create an -instance for each getter call, the driver also provides convenience methods that take a raw string: - -```java -String firstName = row.getString("first_name"); -``` - -See [AccessibleByName] for an explanation of the conversion rules. - -##### CQL to Java type mapping - -| CQL3 data type | Getter name | Java type | See also | -|--------------------|----------------|----------------------|-----------------------------------| -| ascii | getString | java.lang.String | | -| bigint | getLong | long | | -| blob | getByteBuffer | java.nio.ByteBuffer | | -| boolean | getBoolean | boolean | | -| counter | getLong | long | | -| date | getLocalDate | java.time.LocalDate | [Temporal types](temporal_types/) | -| decimal | getBigDecimal | java.math.BigDecimal | | -| double | getDouble | double | | -| duration | getCqlDuration | [CqlDuration] | [Temporal types](temporal_types/) | -| float | getFloat | float | | -| inet | getInetAddress | java.net.InetAddress | | -| int | getInt | int | | -| list | getList | java.util.List | | -| map | getMap | java.util.Map | | -| set | getSet | java.util.Set | | -| smallint | getShort | short | | -| text | getString | java.lang.String | | -| time | getLocalTime | java.time.LocalTime | [Temporal types](temporal_types/) | -| timestamp | getInstant | java.time.Instant | [Temporal types](temporal_types/) | -| timeuuid | getUuid | java.util.UUID | | -| tinyint | getByte | byte | | -| tuple | getTupleValue | [TupleValue] | [Tuples](tuples/) | -| user-defined types | getUDTValue | [UDTValue] | [User-defined types](udts/) | -| uuid | getUuid | java.util.UUID | | -| varchar | getString | java.lang.String | | -| varint | getBigInteger | java.math.BigInteger | | -| vector | getVector | [CqlVector] | [Custom Codecs](custom_codecs/) | - -Sometimes the driver has to infer a CQL type from a Java type (for example when handling the values -of [simple statements](statements/simple/)); for those that have multiple CQL equivalents, it makes -the following choices: - -* `java.lang.String`: `text` -* `long`: `bigint` -* `java.util.UUID`: `uuid` - -In addition to these default mappings, you can register your own types with -[custom codecs](custom_codecs/). - -##### Primitive types - -For performance reasons, the driver uses primitive Java types wherever possible (`boolean`, -`int`...); the CQL value `NULL` is encoded as the type's default value (`false`, `0`...), which can -be ambiguous. To distinguish `NULL` from actual values, use `isNull`: - -```java -Integer age = row.isNull("age") ? null : row.getInt("age"); -``` -##### Collection types - -To ensure type safety, collection getters are generic. You need to provide type parameters matching -your CQL type when calling the methods: - -```java -// Assuming given_names is a list: -List givenNames = row.getList("given_names", String.class); -``` - -For nested collections, element types are generic and cannot be expressed as Java `Class` instances. -Use [GenericType] instead: - -```java -// Assuming teams is a set>: -GenericType>> listOfStrings = new GenericType>>() {}; -Set> teams = row.get("teams", listOfStrings); -``` - -Since generic types are anonymous inner classes, it's recommended to store them as constants in a -utility class instead of re-creating them each time. - -##### Row metadata - -[ResultSet] and [Row] expose an API to explore the column metadata at runtime: - -```java -for (ColumnDefinitions.Definition definition : row.getColumnDefinitions()) { - System.out.printf("Column %s has type %s%n", - definition.getName(), - definition.getType()); -} -``` - -[CqlSession]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/CqlSession.html -[CqlSession#builder()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/CqlSession.html#builder-- -[ResultSet]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/ResultSet.html -[Row]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/Row.html -[CqlIdentifier]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/CqlIdentifier.html -[AccessibleByName]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/data/AccessibleByName.html -[GenericType]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/reflect/GenericType.html -[CqlDuration]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/data/CqlDuration.html -[CqlVector]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/data/CqlVector.html -[TupleValue]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/data/TupleValue.html -[UdtValue]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/data/UdtValue.html -[SessionBuilder.addContactPoint()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/SessionBuilder.html#addContactPoint-java.net.InetSocketAddress- -[SessionBuilder.addContactPoints()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/SessionBuilder.html#addContactPoints-java.util.Collection- -[SessionBuilder.withLocalDatacenter()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/SessionBuilder.html#withLocalDatacenter-java.lang.String- - -[CASSANDRA-10145]: https://issues.apache.org/jira/browse/CASSANDRA-10145 diff --git a/manual/core/address_resolution/README.md b/manual/core/address_resolution/README.md deleted file mode 100644 index 5b2536feb18..00000000000 --- a/manual/core/address_resolution/README.md +++ /dev/null @@ -1,200 +0,0 @@ - - -## Address resolution - -### Quick overview - -The driver uses `system.peers.rpc-address` to connect to newly discovered nodes. For special network -topologies, an address translation component can be plugged in. - -* `advanced.address-translator` in the configuration. -* none by default. Also available: EC2-specific (for deployments that span multiple regions), or - write your own. - ------ - -Each node in the Cassandra cluster is uniquely identified by an IP address that the driver will use -to establish connections. - -* for contact points, these are provided as part of configuring the `CqlSession` object; -* for other nodes, addresses will be discovered dynamically, either by inspecting `system.peers` on - already connected nodes, or via push notifications received on the control connection when new - nodes are discovered by gossip. - - -### Cassandra-side configuration - -The address that each Cassandra node shares with clients is the **broadcast RPC address**; it is -controlled by various properties in [cassandra.yaml]: - -* [rpc_address] or [rpc_interface] is the address that the Cassandra process *binds to*. You must - set one or the other, not both (for more details, see the inline comments in the default - `cassandra.yaml` that came with your installation); -* [broadcast_rpc_address] \(introduced in Cassandra 2.1) is the address to share with clients, if it - is different than the previous one (the reason for having a separate property is if the bind - address is not public to clients, because there is a router in between). - -If `broadcast_rpc_address` is not set, it defaults to `rpc_address`/`rpc_interface`. If -`rpc_address`/`rpc_interface` is 0.0.0.0 (all interfaces), then `broadcast_rpc_address` *must* be -set. - -If you're not sure which address a Cassandra node is broadcasting, launch cqlsh locally on the node, -execute the following query and take node of the result: - -``` -cqlsh> select broadcast_address from system.local; - - broadcast_address -------------------- - 172.1.2.3 -``` - -Then connect to *another* node in the cluster and run the following query, injecting the previous -result: - -``` -cqlsh> select rpc_address from system.peers where peer = '172.1.2.3'; - - rpc_address -------------- - 1.2.3.4 -``` - -That last result is the broadcast RPC address. Ensure that it is accessible from the client machine -where the driver will run. - - -### Driver-side address translation - -Sometimes it's not possible for Cassandra nodes to broadcast addresses that will work for each and -every client; for instance, they might broadcast private IPs because most clients are in the same -network, but a particular client could be on another network and go through a router. - -For such cases, you can register a driver-side component that will perform additional address -translation. Write a class that implements [AddressTranslator] with the following constructor: - -```java -public class MyAddressTranslator implements AddressTranslator { - - public PassThroughAddressTranslator(DriverContext context, DriverOption configRoot) { - // retrieve any required dependency or extra configuration option, otherwise can stay empty - } - - @Override - public InetSocketAddress translate(InetSocketAddress address) { - // your custom translation logic - } - - @Override - public void close() { - // free any resources if needed, otherwise can stay empty - } -} -``` - -Then reference this class from the [configuration](../configuration/): - -``` -datastax-java-driver.advanced.address-translator.class = com.mycompany.MyAddressTranslator -``` - -Note: the contact points provided while creating the `CqlSession` are not translated, only addresses -retrieved from or sent by Cassandra nodes are. - -### Fixed proxy hostname - -If your client applications access Cassandra through some kind of proxy (eg. with AWS PrivateLink when all Cassandra -nodes are exposed via one hostname pointing to AWS Endpoint), you can configure driver with -`FixedHostNameAddressTranslator` to always translate all node addresses to that same proxy hostname, no matter what IP -address a node has but still using its native transport port. - -To use it, specify the following in the [configuration](../configuration): - -``` -datastax-java-driver.advanced.address-translator.class = FixedHostNameAddressTranslator -advertised-hostname = proxyhostname -``` - -### Fixed proxy hostname per subnet - -When running Cassandra in a private network and accessing it from outside of that private network via some kind of -proxy, we have an option to use `FixedHostNameAddressTranslator`. But for multi-datacenter Cassandra deployments, we -want to have more control over routing queries to a specific datacenter (eg. for optimizing latencies), which requires -setting up a separate proxy per datacenter. - -Normally, each Cassandra datacenter nodes are deployed to a different subnet to support internode communications in the -cluster and avoid IP address collisions. So when Cassandra broadcasts its nodes IP addresses, we can determine which -datacenter that node belongs to by checking its IP address against the given datacenter subnet. - -For such scenarios you can use `SubnetAddressTranslator` to translate node IPs to the datacenter proxy address -associated with it. - -To use it, specify the following in the [configuration](../configuration): -``` -datastax-java-driver.advanced.address-translator { - class = SubnetAddressTranslator - subnet-addresses { - "100.64.0.0/15" = "cassandra.datacenter1.com:9042" - "100.66.0.0/15" = "cassandra.datacenter2.com:9042" - # IPv6 example: - # "::ffff:6440:0/111" = "cassandra.datacenter1.com:9042" - # "::ffff:6442:0/111" = "cassandra.datacenter2.com:9042" - } - # Optional. When configured, addresses not matching the configured subnets are translated to this address. - default-address = "cassandra.datacenter1.com:9042" - # Whether to resolve the addresses once on initialization (if true) or on each node (re-)connection (if false). - # If not configured, defaults to false. - resolve-addresses = false -} -``` - -Such setup is common for running Cassandra on Kubernetes with [k8ssandra](https://docs.k8ssandra.io/). - -### EC2 multi-region - -If you deploy both Cassandra and client applications on Amazon EC2, and your cluster spans multiple regions, you'll have -to configure your Cassandra nodes to broadcast public RPC addresses. - -However, this is not always the most cost-effective: if a client and a node are in the same region, it would be cheaper -to connect over the private IP. Ideally, you'd want to pick the best address in each case. - -The driver provides `Ec2MultiRegionAddressTranslator` which does exactly that. To use it, specify the following in -the [configuration](../configuration/): - -``` -datastax-java-driver.advanced.address-translator.class = Ec2MultiRegionAddressTranslator -``` - -With this configuration, you keep broadcasting public RPC addresses. But each time the driver connects to a new -Cassandra node: - -* if the node is *in the same EC2 region*, the public IP will be translated to the intra-region private IP; -* otherwise, it will not be translated. - -(To achieve this, `Ec2MultiRegionAddressTranslator` performs a reverse DNS lookup of the origin address, to find the -domain name of the target instance. Then it performs a forward DNS lookup of the domain name; the EC2 DNS does the -private/public switch automatically based on location). - -[AddressTranslator]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/addresstranslation/AddressTranslator.html - -[cassandra.yaml]: https://docs.datastax.com/en/cassandra/3.x/cassandra/configuration/configCassandra_yaml.html -[rpc_address]: https://docs.datastax.com/en/cassandra/3.x/cassandra/configuration/configCassandra_yaml.html?scroll=configCassandra_yaml__rpc_address -[rpc_interface]: https://docs.datastax.com/en/cassandra/3.x/cassandra/configuration/configCassandra_yaml.html?scroll=configCassandra_yaml__rpc_interface -[broadcast_rpc_address]: https://docs.datastax.com/en/cassandra/3.x/cassandra/configuration/configCassandra_yaml.html?scroll=configCassandra_yaml__broadcast_rpc_address diff --git a/manual/core/async/README.md b/manual/core/async/README.md deleted file mode 100644 index 5b4bac3dccf..00000000000 --- a/manual/core/async/README.md +++ /dev/null @@ -1,229 +0,0 @@ - - -## Asynchronous programming - -### Quick overview - -Async driver methods return Java 8's [CompletionStage]. - -* don't call synchronous methods from asynchronous callbacks (the driver detects that and throws). -* callbacks execute on I/O threads: consider providing your own executor for expensive computations. -* be careful not to accidentally ignore errors thrown from callbacks. - ------ - -The driver exposes an asynchronous API that allows you to write fully non-blocking programs. -Asynchronous methods return instances of the JDK's [CompletionStage], that can be conveniently -chained and composed. - -Here is a short example that opens a session and runs a query asynchronously: - -```java -CompletionStage sessionStage = CqlSession.builder().buildAsync(); - -// Chain one async operation after another: -CompletionStage responseStage = - sessionStage.thenCompose( - session -> session.executeAsync("SELECT release_version FROM system.local")); - -// Apply a synchronous computation: -CompletionStage resultStage = - responseStage.thenApply(resultSet -> resultSet.one().getString("release_version")); - -// Perform an action once a stage is complete: -resultStage.whenComplete( - (version, error) -> { - if (error != null) { - System.out.printf("Failed to retrieve the version: %s%n", error.getMessage()); - } else { - System.out.printf("Server version: %s%n", version); - } - sessionStage.thenAccept(CqlSession::closeAsync); - }); -``` - -### Threading model - -The driver uses two internal thread pools: one for request I/O and one for administrative tasks -(such as metadata refreshes, schema agreement or processing server events). Note that you can -control the size of these pools with the `advanced.netty` options in the -[configuration](../configuration). - -When you register a callback on a completion stage, it will execute on a thread in the corresponding -pool: - -```java -CompletionStage sessionStage = CqlSession.builder().buildAsync(); -sessionStage.thenAccept(session -> System.out.println(Thread.currentThread().getName())); -// prints s0-admin-n (admin pool thread) - -CompletionStage resultStage = - session.executeAsync("SELECT release_version FROM system.local"); -resultStage.thenAccept(resultSet -> System.out.println(Thread.currentThread().getName())); -// prints s0-io-n (I/O pool thread) -``` - -As long as you use the asynchronous API, the driver will behave in a non-blocking manner: its -internal threads will almost never block. There are a few exceptions to the rule though: see the -manual page on [non-blocking programming](../non_blocking) for details. - -Because the asynchronous API is non-blocking, you can safely call a driver method from inside a -callback, even when the callback's execution is triggered by a future returned by the driver: - -```java -// Get the department id for a given user: -CompletionStage idStage = - session.executeAsync("SELECT department_id FROM user WHERE id = 1"); - -// Once we have the id, query the details of that department: -CompletionStage dataStage = - idStage.thenCompose( - resultSet -> { - UUID departmentId = resultSet.one().getUuid(0); - return session.executeAsync( - SimpleStatement.newInstance( - "SELECT * FROM department WHERE id = ?", departmentId)); - }); -``` - -However, you can't call a synchronous method from a callback. This would be very unsafe, because the -driver blocks until the response is received; if the request happened to be assigned to the same -I/O thread that is currently running the callback, it would deadlock. In fact, the driver detects -this situation, and fails fast with a runtime exception to eliminate any chance of a hard-to-debug -deadlock: - -```java -CompletionStage dataStage = - idStage.thenApply( - resultSet -> { - UUID departmentId = resultSet.one().getUuid(0); - // WRONG: calling a synchronous method from an asynchronous callback. DON'T DO THIS! - return session.execute( - SimpleStatement.newInstance( - "SELECT * FROM department WHERE id = ?", departmentId)); - }); - -// This is just to show the exception: -dataStage.whenComplete( - (resultSet, error) -> { - if (error != null) { - error.printStackTrace(); - } - }); -// java.util.concurrent.CompletionException: -// java.lang.IllegalStateException: Detected a synchronous API call on a driver thread, -// failing because this can cause deadlocks. -``` - -You should also be careful about expensive computations: if your callbacks hold I/O threads for too -long, they will negatively impact the driver's throughput. Consider providing your own executor: - -```java -// Create this as a global resource in your application -Executor computeExecutor = ... - -CompletionStage resultStage = - responseStage.thenApplyAsync( // note: thenApplyAsync instead of thenApply - resultSet -> someVeryExpensiveComputation(resultSet), - computeExecutor); -``` - -Note that an alternate executor can also be used to allow synchronous driver API calls in callbacks, -but the recommended approach is to fully commit to the asynchronous model described above. - -### Error propagation - -One thing to pay attention to when programming asynchronously is error handling (this is not -specific to the driver). When all your callback does is a side effect, it's easy to accidentally -swallow an exception: - -```java -CompletionStage responseStage = - sessionStage.thenCompose( - session -> session.executeAsync("SELECT release_version FROM system.local")); -responseStage.thenAccept( - resultSet -> { - String version = resultSet.one().getString(0); - System.out.printf("Server version: %s%n", version); - }); -``` - -If the request fails, `responseStage` is failed, and `thenAccept` doesn't run the callback at all -(it just returns another failed stage). The error won't be surfaced anywhere, just silently ignored. -One way to address this is with `whenComplete`, which explicitly handles the error: - -```java -responseStage.whenComplete( - (resultSet, error) -> { - if (error != null) { - System.out.printf("Failed to retrieve the version: %s%n", error.getMessage()); - } else { - String version = resultSet.one().getString(0); - System.out.printf("Server version: %s%n", version); - } - }); -``` - -Or you can chain more operations on the result of `printStage`, and handle the error further down -the chain: - -```java -CompletionStage printStage = - responseStage.thenAccept( - resultSet -> { - String version = resultSet.one().getString(0); - System.out.printf("Server version: %s%n", version); - }); -// Here trivially handled right away for the sake of example, but could be after more operations: -printStage.exceptionally(error -> { - System.out.printf("Failed to retrieve the version: %s%n", error.getMessage()); - return null; -}); -``` - -One more subtle source for errors is if the callback itself throws: - -```java -responseStage.whenComplete( - (resultSet, error) -> { - if (error != null) { - System.out.printf("Request failed: %s%n", error.getMessage()); - } else { - int v = resultSet.one().getInt(0); - System.out.printf("The result is %f%n", 1.0 / v); - } - }); -``` - -There is a potential division by zero on the last line; the resulting `ArithmeticException` wouldn't -be handled anywhere. Either add a `try/catch` block in the callback, or don't ignore the result of -`whenComplete`. - -### Asynchronous paging - -Unlike previous versions of the driver, the asynchronous API never triggers synchronous behavior, -even when iterating through the results of a request. `session.executeAsync` returns a dedicated -[AsyncResultSet] that only iterates the current page, the next pages must be fetched explicitly. -This greatly simplifies asynchronous paging; see the [paging](../paging/#asynchronous-paging) -documentation for more details and an example. - -[CompletionStage]: https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletionStage.html - -[AsyncResultSet]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/AsyncResultSet.html diff --git a/manual/core/authentication/README.md b/manual/core/authentication/README.md deleted file mode 100644 index 516e47f558f..00000000000 --- a/manual/core/authentication/README.md +++ /dev/null @@ -1,258 +0,0 @@ - - -## Authentication - -### Quick overview - -* `advanced.auth-provider` in the configuration. -* disabled by default. Also available: plain-text credentials, GSSAPI (DSE only), or write your own. -* can also be defined programmatically: - [CqlSession.builder().withAuthCredentials][SessionBuilder.withAuthCredentials] or - [CqlSession.builder().withAuthProvider][SessionBuilder.withAuthProvider]. - ------ - -Cassandra's binary protocol supports [SASL]-based authentication. To use it, you must provide an -*auth provider* that will authenticate with the server every time a new connection gets established. - -This can be done in two ways: - -### In the configuration - -Define an `auth-provider` section in the [configuration](../configuration/): - -``` -datastax-java-driver { - advanced.auth-provider { - class = ... - } -} -``` - -The auth provider must be configured before opening a session, it cannot be changed at runtime. - -#### Plain text - -`PlainTextAuthProvider` supports simple username/password authentication (intended to work with the -server-side `PasswordAuthenticator`). The credentials can be changed at runtime, they will be used -for new connection attempts once the configuration gets reloaded. - -``` -datastax-java-driver { - advanced.auth-provider { - class = PlainTextAuthProvider - username = cassandra - password = cassandra - } -} -``` - -When connecting to DSE, an optional `authorization-id` can also be specified. It will be used for -proxy authentication (logging in as another user or role). If you try to use this feature with an -authenticator that doesn't support it, the authorization id will be ignored. - -``` -datastax-java-driver { - advanced.auth-provider { - class = PlainTextAuthProvider - username = user - password = pass - authorization-id = otherUserOrRole - } -} -``` - -Note that, for backward compatibility with previous driver versions, you can also use the class name -`DsePlainTextAuthProvider` to enable this provider. - -#### GSSAPI (DSE only) - -`DseGssApiAuthProvider` supports GSSAPI authentication against a DSE cluster secured with Kerberos: - -``` -datastax-java-driver { - advanced.auth-provider { - class = DseGssApiAuthProvider - login-configuration { - principal = "user principal here ex cassandra@DATASTAX.COM" - useKeyTab = "true" - refreshKrb5Config = "true" - keyTab = "Path to keytab file here" - } - } - } -``` - -See the comments in [reference.conf] for more details. - -#### Custom - -You can also write your own provider; it must implement [AuthProvider] and declare a public -constructor with a [DriverContext] argument. - -``` -datastax-java-driver { - advanced.auth-provider { - class = com.mycompany.MyCustomAuthProvider - ... // any custom options your provider might use - } -} -``` - -### Programmatically - -You can also pass an authenticator instance while building the session: - -```java -CqlSession session = - CqlSession.builder() - .withAuthProvider(new MyCustomAuthProvider()) - .build(); -``` - -The driver also offers a simple, built-in plain text authentication provider: -[ProgrammaticPlainTextAuthProvider]. The following is equivalent to using `PlainTextAuthProvider` in -the configuration: - -```java -AuthProvider authProvider = new ProgrammaticPlainTextAuthProvider("user", "pass"); - -CqlSession session = - CqlSession.builder() - .withAuthProvider(authProvider) - .build(); -``` - -For convenience, there are shortcuts that take the credentials directly: - -```java -CqlSession session = - CqlSession.builder() - .withAuthCredentials("user", "pass") - .build(); - -// With proxy authentication (DSE only) -CqlSession session = - CqlSession.builder() - .withAuthCredentials("user", "pass", "otherUserOrRole") - .build(); -``` - -One downside of the driver's built-in authentication providers is that the credentials are stored in -clear text in memory; this means they are vulnerable to an attacker who is able to perform memory -dumps. If this is not acceptable for you, consider writing your own [AuthProvider] implementation; -[PlainTextAuthProviderBase] is a good starting point. - -Similarly, [ProgrammaticDseGssApiAuthProvider] lets you configure GSSAPI programmatically: - -```java -import com.datastax.dse.driver.api.core.auth.DseGssApiAuthProviderBase.GssApiOptions; - -javax.security.auth.Subject subject = ...; // do your Kerberos configuration here - -GssApiOptions options = GssApiOptions.builder().withSubject(subject).build(); -CqlSession session = CqlSession.builder() - .withAuthProvider(new ProgrammaticDseGssApiAuthProvider(options)) - .build(); -``` - -For more complex needs (e.g. if building the options once and reusing them doesn't work for you), -you can subclass [DseGssApiAuthProviderBase]. - -### Proxy authentication - -DSE allows a user to connect as another user or role: - -``` --- Allow bob to connect as alice: -GRANT PROXY.LOGIN ON ROLE 'alice' TO 'bob' -``` - -Once connected, all authorization checks will be performed against the proxy role (alice in this -example). - -To use proxy authentication with the driver, you need to provide the **authorization-id**, in other -words the name of the role you want to connect as. - -Example for plain text authentication: - -``` -datastax-java-driver { - advanced.auth-provider { - class = PlainTextAuthProvider - username = bob - password = bob's password - authorization-id = alice - } - } -``` - -With the GSSAPI (Kerberos) provider: - -``` -datastax-java-driver { - advanced.auth-provider { - class = DseGssApiAuthProvider - authorization-id = alice - login-configuration { - principal = "user principal here ex bob@DATASTAX.COM" - useKeyTab = "true" - refreshKrb5Config = "true" - keyTab = "Path to keytab file here" - } - } - } -``` - -### Proxy execution - -Proxy execution is similar to proxy authentication, but it applies to a single query, not the whole -session. - -``` --- Allow bob to execute queries as alice: -GRANT PROXY.EXECUTE ON ROLE 'alice' TO 'bob' -``` - -For this scenario, you would **not** add the `authorization-id = alice` to your configuration. -Instead, use [ProxyAuthentication.executeAs] to wrap your query with the correct authorization for -the execution: - -```java -import com.datastax.dse.driver.api.core.auth.ProxyAuthentication; - -SimpleStatement statement = SimpleStatement.newInstance("some query"); -// executeAs returns a new instance, you need to re-assign -statement = ProxyAuthentication.executeAs("alice", statement); -session.execute(statement); -``` - -[SASL]: https://en.wikipedia.org/wiki/Simple_Authentication_and_Security_Layer - -[AuthProvider]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/auth/AuthProvider.html -[DriverContext]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/context/DriverContext.html -[PlainTextAuthProviderBase]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/auth/PlainTextAuthProviderBase.html -[ProgrammaticPlainTextAuthProvider]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/auth/ProgrammaticPlainTextAuthProvider.html -[DseGssApiAuthProviderBase]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/auth/DseGssApiAuthProviderBase.html -[ProgrammaticDseGssApiAuthProvider]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/auth/ProgrammaticDseGssApiAuthProvider.html -[ProxyAuthentication.executeAs]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/auth/ProxyAuthentication.html#executeAs-java.lang.String-StatementT- -[SessionBuilder.withAuthCredentials]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/SessionBuilder.html#withAuthCredentials-java.lang.String-java.lang.String- -[SessionBuilder.withAuthProvider]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/SessionBuilder.html#withAuthProvider-com.datastax.oss.driver.api.core.auth.AuthProvider- -[reference.conf]: ../configuration/reference/ diff --git a/manual/core/bom/README.md b/manual/core/bom/README.md deleted file mode 100644 index 235edcf632c..00000000000 --- a/manual/core/bom/README.md +++ /dev/null @@ -1,126 +0,0 @@ - - -## Bill of Materials (BOM) - -A "Bill Of Materials" is a special Maven descriptor that defines the versions of a set of related -artifacts. - -To import the driver's BOM, add the following section in your application's own POM: - -```xml - - ... - - - - org.apache.cassandra - java-driver-bom - 4.17.0 - pom - import - - - -``` - -This allows you to omit the version when you later reference the driver artifacts: - -```xml - - ... - - - org.apache.cassandra - java-driver-query-builder - - -``` - -The advantage is that this also applies to transitive dependencies. For example, if there is a -third-party library X that depends on `java-driver-core`, and you add a dependency to X in this -project, `java-driver-core` will be set to the BOM version, regardless of which version X declares -in its POM. The driver artifacts are always in sync, however they were pulled into the project. - -### BOM and mapper processor - -If you are using the driver's [object mapper](../../mapper), our recommendation is to declare the -mapper processor in the [annotationProcessorPaths](../../mapper/config/#maven) section of the -compiler plugin configuration. Unfortunately, `` versions don't work there, -this is a known Maven issue ([MCOMPILER-391]). - -As a workaround, you can either declare the mapper processor as a regular dependency in the provided -scope: - -```xml - - - org.apache.cassandra - java-driver-mapper-processor - provided - - -``` - -Or keep it in the compiler plugin, but repeat the version explicitly. In that case, it's probably a -good idea to extract a property to keep it in sync with the BOM: - -```xml - - - 4.17.0 - - - - - org.apache.cassandra - java-driver-bom - ${java-driver.version} - pom - import - - - - - - - org.apache.cassandra - java-driver-mapper-runtime - - - - - - maven-compiler-plugin - - - - - org.apache.cassandra - java-driver-mapper-processor - ${java-driver.version} - - - - - - -``` - -[MCOMPILER-391]: https://issues.apache.org/jira/browse/MCOMPILER-391 diff --git a/manual/core/compression/README.md b/manual/core/compression/README.md deleted file mode 100644 index 9f7ae3c4854..00000000000 --- a/manual/core/compression/README.md +++ /dev/null @@ -1,104 +0,0 @@ - - -## Compression - -### Quick overview - -Compress request and response bodies to save bandwidth. - -* `advanced.protocol.compression` in the configuration. -* disabled by default. Also available: LZ4, Snappy. -* your application **must** re-declare an explicit dependency to the compression library. - ------ - -Cassandra's binary protocol supports optional compression of requests and responses. This reduces -network traffic at the cost of a slight CPU overhead, therefore it will likely be beneficial when -you have larger payloads, such as: - -* requests with many values, or very large values; -* responses with many rows, or many columns per row, or very large columns. - -To enable compression, set the following option in the [configuration](../configuration): - -``` -datastax-java-driver { - advanced.protocol.compression = lz4 // or snappy -} -``` - -Compression must be set before opening a session, it cannot be changed at runtime. - -Two algorithms are supported out of the box: [LZ4](https://github.com/yawkat/lz4-java) and -[Snappy](http://google.github.io/snappy/). The LZ4 implementation is a good first choice; it offers -fallback implementations in case native libraries fail to load and -[benchmarks](http://java-performance.info/performance-general-compression/) suggest that it offers -better performance and compression ratios over Snappy. - -Both implementations rely on third-party libraries, declared by the driver as *optional* -dependencies; if you enable compression, you need to explicitly depend on the corresponding library -to pull it into your project (see the [Integration>Driver -dependencies](../integration/#driver-dependencies) section for more details). - -### LZ4 - -Dependency: - -```xml - - at.yawk.lz4 - lz4-java - 1.10.1 - -``` - -Always double-check the exact LZ4 version needed; you can find it in the driver's [parent POM]. - -LZ4-java has three internal implementations (from fastest to slowest): - -* JNI; -* pure Java using `sun.misc.Unsafe`; -* pure Java using only "safe" classes. - -It will pick the best implementation depending on what's possible on your platform. To find out -which one was chosen, [enable INFO logs](../logging/) on the category -`com.datastax.oss.driver.internal.core.protocol.Lz4Compressor` and look for the following message: - -``` -INFO com.datastax.oss.driver.internal.core.protocol.Lz4Compressor - Using LZ4Factory:JNI -``` - -### Snappy - -Dependency: - -```xml - - org.xerial.snappy - snappy-java - 1.1.2.6 - -``` - -**Important: Snappy is not supported when building a [GraalVM native image](../graalvm).** - -Always double-check the exact Snappy version needed; you can find it in the driver's [parent POM]. - -[parent POM]: https://search.maven.org/search?q=g:com.datastax.oss%20AND%20a:java-driver-parent&core=gav diff --git a/manual/core/configuration/README.md b/manual/core/configuration/README.md deleted file mode 100644 index deefadbe3d4..00000000000 --- a/manual/core/configuration/README.md +++ /dev/null @@ -1,557 +0,0 @@ - - -## Configuration - -### Quick overview - -The driver's configuration is composed of options, organized in a hierarchical manner. Optionally, -it can define *profiles* that customize a set of options for a particular kind of request. - -* the default implementation is based on the Typesafe Config framework: - * the driver JAR comes with a [reference.conf] file that defines the defaults. - * you can add an `application.conf` file in the classpath (or an absolute path, or an URL). It - only needs to contain the options that you override. - * hot reloading is supported out of the box. -* the config mechanism can be completely overridden by implementing a set of driver interfaces - ([DriverConfig], [DriverExecutionProfile] and [DriverConfigLoader]) - ------ - -### Concepts - -#### Options - -Essentially, an option is a path in the configuration with an expected type, for example -`basic.request.timeout`, representing a duration. - -#### Execution profiles - -Imagine an application that does both transactional and analytical requests. Transactional requests -are simpler and must return quickly, so they will typically use a short timeout, let's say 100 -milliseconds; analytical requests are more complex and less frequent so a higher SLA is acceptable, -for example 5 seconds. In addition, maybe you want to use a different consistency level. - -Instead of manually adjusting the options on every request, you can create execution profiles: - -``` -datastax-java-driver { - profiles { - oltp { - basic.request.timeout = 100 milliseconds - basic.request.consistency = ONE - } - olap { - basic.request.timeout = 5 seconds - basic.request.consistency = QUORUM - } -} -``` - -Now each request only needs a profile name: - -```java -SimpleStatement s = - SimpleStatement.builder("SELECT name FROM user WHERE id = 1") - .setExecutionProfileName("oltp") - .build(); -session.execute(s); -``` - -The configuration has an anonymous *default profile* that is always present. It can define an -arbitrary number of named profiles. They inherit from the default profile, so you only need to -override the options that have a different value. - - -### Default implementation: Typesafe Config - -Out of the box, the driver uses [Typesafe Config]. - -It looks at the following locations, according to the [standard behavior][config standard behavior] -of that library: - -* system properties -* `application.conf` (all resources on the classpath with this name) -* `application.json` (all resources on the classpath with this name) -* `application.properties` (all resources on the classpath with this name) -* `reference.conf` (all resources on the classpath with this name) - -The driver ships with a [reference.conf] that defines sensible defaults for all the options. That -file is heavily documented, so refer to it for details about each option. It is included in the core -driver JAR, so it is in your application's classpath. If you need to customize something, add an -`application.conf` to the classpath. There are various ways to do it: - -* place the file in a directory that is on your application or application server's classpath - ([example for Apache Tomcat](https://stackoverflow.com/questions/1300780/adding-a-directory-to-tomcat-classpath)); -* if you use Maven, place it in the `src/main/resources` directory. - -Since `application.conf` inherits from `reference.conf`, you only need to redeclare what you -override: - -``` -# Sample application.conf: overrides one option and adds a profile -datastax-java-driver { - advanced.protocol.version = V4 - profiles { - slow { - basic.request.timeout = 10 seconds - } - } -} -``` - -`.conf` files are in the *HOCON* format, an improved superset of JSON; refer to the -[HOCON spec][HOCON] for details. - -By default, configuration files are reloaded regularly, and the driver will adjust to the new values -(on a "best effort" basis: some options, like protocol version and policy configurations, cannot be -changed at runtime and will be ignored). The reload interval is defined in the configuration: - -``` -# To disable periodic reloading, set this to 0. -datastax-java-driver.basic.config-reload-interval = 5 minutes -``` - -As mentioned previously, system properties can also be used to override individual options. This is -great for temporary changes, for example in your development environment: - -``` -# Increase heartbeat interval to limit the amount of debug logs: -java -Ddatastax-java-driver.advanced.heartbeat.interval="5 minutes" ... -``` - -For array options, provide each element separately by appending an index to the path: - -``` --Ddatastax-java-driver.basic.contact-points.0="127.0.0.1:9042" --Ddatastax-java-driver.basic.contact-points.1="127.0.0.2:9042" -``` - -We recommend reserving system properties for the early phases of the project; in production, having -all the configuration in one place will make it easier to manage and review. - -As shown so far, all options live under a `datastax-java-driver` prefix. This can be changed, for -example if you need multiple driver instances in the same VM with different configurations. See the -[Advanced topics](#changing-the-config-prefix) section. - -#### Alternate application config locations - -If loading `application.conf` from the classpath doesn't work for you, other loader implementations -are available: - -* [DriverConfigLoader.fromClasspath]: still load from the classpath, but use a different resource - name. For example "config" will try to load `config.conf`, `config.json` or `config.properties`. -* [DriverConfigLoader.fromFile]: load from a file on the local filesystem. -* [DriverConfigLoader.fromUrl]: load from a URL. - -To use any of those loaders, pass it to the session builder: - -```java -File file = new File("/path/to/application.conf"); -CqlSession session = CqlSession.builder() - .withConfigLoader(DriverConfigLoader.fromFile(file)) - .build(); -``` - -Apart from application-specific configuration, they work exactly like the default loader: they -fall back to the driver's built-in `reference.conf` for defaults, accept overrides via system -properties, and reload at the interval specified by the `basic.config-reload-interval` option. - -#### Programmatic application config - -Alternatively, you can use [DriverConfigLoader.programmaticBuilder] to specify configuration options -programmatically instead of loading them from a static resource: - -```java -DriverConfigLoader loader = - DriverConfigLoader.programmaticBuilder() - .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(5)) - .startProfile("slow") - .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(30)) - .endProfile() - .build(); -CqlSession session = CqlSession.builder().withConfigLoader(loader).build(); -``` - -This is useful for frameworks and tools that already have their own configuration mechanism. - -### The configuration API - -You don't need the configuration API for everyday usage of the driver, but it can be useful if: - -* you're writing custom policies or a custom config implementation; -* use dynamic profiles (see below); -* or simply want to read configuration options at runtime. - -#### Basics - -The driver's context exposes a [DriverConfig] instance: - -```java -DriverConfig config = session.getContext().getConfig(); -DriverExecutionProfile defaultProfile = config.getDefaultProfile(); -DriverExecutionProfile olapProfile = config.getProfile("olap"); - -config.getProfiles().forEach((name, profile) -> ...); -``` - -[DriverExecutionProfile] has typed option getters: - -```java -Duration requestTimeout = defaultProfile.getDuration(DefaultDriverOption.REQUEST_TIMEOUT); -int maxRequestsPerConnection = defaultProfile.getInt(DefaultDriverOption.CONNECTION_MAX_REQUESTS); -``` - -#### Manual reloading - -In addition to periodic reloading, you can trigger a reload programmatically. This returns a -`CompletionStage` that you can use for example to register a callback when the reload is complete: - -```java -DriverConfigLoader loader = session.getContext().getConfigLoader(); -if (loader.supportsReloading()) { - CompletionStage reloaded = loader.reload(); - reloaded.whenComplete( - (configChanged, error) -> { - if (error != null) { - // handle error - } else if (configChanged) { - // do something after the config change - } - }); -} -``` - -Manual reloading is optional, this can be checked with `supportsReloading()`; the driver's built-in -loader supports it. - -#### Derived profiles - -Execution profiles are hard-coded in the configuration, and can't be changed at runtime (except -by modifying and reloading the files). What if you want to adjust an option for a single request, -without having a dedicated profile for it? - -To allow this, you start from an existing profile in the configuration and build a *derived profile* -that overrides a subset of options: - -```java -DriverExecutionProfile defaultProfile = session.getContext().getConfig().getDefaultProfile(); -DriverExecutionProfile dynamicProfile = - defaultProfile.withString( - DefaultDriverOption.REQUEST_CONSISTENCY, DefaultConsistencyLevel.EACH_QUORUM.name()); -SimpleStatement s = - SimpleStatement.builder("SELECT name FROM user WHERE id = 1") - .setExecutionProfile(dynamicProfile) - .build(); -session.execute(s); -``` - -A derived profile keeps a reference to its base profile, and reflects the change if the -configuration gets reloaded. - -Do not overuse derived profiles, as they can have an impact on performance: each `withXxx` method -creates a new copy, and propagating the changes from the base profile also has an overhead. We -strongly suggest defining all your profiles ahead of time in the configuration file; at the very -least, try to cache derived profiles if you reuse them multiple times. - - -### Advanced topics - -*Note: all the features described in this section use the driver's internal API, which is subject to -the restrictions explained in [API conventions]*. - -#### Changing the config prefix - -As mentioned earlier, all configuration options are looked up under the `datastax-java-driver` -prefix. This might be a problem if you have multiple instances of the driver executing in the same -VM, but with different configurations. What you want instead is separate option trees, like this: - -``` -# application.conf -session1 { - basic.session-name = "session1" - advanced.protocol-version = V4 - // etc. -} -session2 { - basic.session-name = "session2" - advanced.protocol-version = V3 - // etc. -} -``` - -To achieve that, first write a method that loads the configuration under your prefix, and uses the -driver's `reference.conf` as a fallback: - -```java -import com.typesafe.config.Config; -import com.typesafe.config.ConfigFactory; - -private static Config loadConfig(String prefix) { - // Make sure we see the changes when reloading: - ConfigFactory.invalidateCaches(); - - // Every config file in the classpath, without stripping the prefixes - Config root = ConfigFactory.load(); - - // The driver's built-in defaults, under the default prefix in reference.conf: - Config reference = root.getConfig("datastax-java-driver"); - - // Everything under your custom prefix in application.conf: - Config application = root.getConfig(prefix); - - return application.withFallback(reference); -} -``` - -Next, create a `DriverConfigLoader`. This is the component that abstracts the configuration -implementation to the rest of the driver. Here we use the built-in class, but tell it to load the -Typesafe Config object with the previous method: - -```java -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.internal.core.config.typesafe.DefaultDriverConfigLoader; - -DriverConfigLoader session1ConfigLoader = - new DefaultDriverConfigLoader( - () -> loadConfig("session1"), DefaultDriverOption.values()); -``` - -Finally, pass the config loader when building the driver: - -```java -CqlSession session1 = - CqlSession.builder() - .withConfigLoader(session1ConfigLoader) - .build(); -``` - -#### Loading from a different source - -If you don't want to use a config file, you can write custom code to create the Typesafe `Config` -object (refer to the [documentation][Typesafe Config] for more details). - -Then reuse the examples from the previous section to merge it with the driver's reference file, and -pass it to the driver. Here's a contrived example that loads the configuration from a string: - -```java -String configSource = "protocol.version = V3"; -DriverConfigLoader loader = - new DefaultDriverConfigLoader( - () -> { - ConfigFactory.invalidateCaches(); - Config reference = ConfigFactory.load().getConfig("datastax-java-driver"); - Config application = ConfigFactory.parseString(configSource); - return application.withFallback(reference); - }, - DefaultDriverOption.values()); - -CqlSession session = CqlSession.builder().withConfigLoader(loader).build(); -``` - -#### Bypassing Typesafe Config - -If Typesafe Config doesn't work for you, it is possible to get rid of it entirely. - -Start by excluding Typesafe Config from the list of dependencies required by the driver; if you are -using Maven, this can be achieved as follows: - -```xml - - - org.apache.cassandra - java-driver-core - ... - - - com.typesafe - config - - - - - -``` -Next, you will need to provide your own implementations of [DriverConfig] and -[DriverExecutionProfile]. Then write a [DriverConfigLoader] and pass it to the session at -initialization, as shown in the previous sections. Study the built-in implementation (package -`com.datastax.oss.driver.internal.core.config.typesafe`) for reference. - -Reloading is not mandatory: you can choose not to implement it, and the driver will simply keep -using the initial configuration. - -Note that the option getters (`DriverExecutionProfile.getInt` and similar) are invoked very -frequently on the hot code path; if your implementation is slow, consider caching the results -between reloads. - -#### Configuration change event - -If you're writing your own policies, you might want them to be reactive to configuration changes. -You can register a callback to `ConfigChangeEvent`, which gets emitted any time a manual or periodic -reload detects changes since the last reload: - -```java -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.config.ConfigChangeEvent; - -InternalDriverContext context = (InternalDriverContext) session.getContext(); - -Object key = - eventBus.register( - ConfigChangeEvent.class, (e) -> { - System.out.println("The configuration changed"); - // re-read the config option(s) you're interested in, and apply changes if needed - }); - -// If your component has a shorter lifecycle than the driver, make sure to unregister when it closes -eventBus.unregister(key, ConfigChangeEvent.class); -``` - -For example, the driver uses this mechanism internally to resize connection pools if you change the -options in `advanced.connection.pool`. - -The event is emitted by the config loader. If you write a custom loader, study the source of -`DefaultDriverConfigLoader` to reproduce the behavior. - -#### Policies - -The preferred way to instantiate policies (load balancing policy, retry policy, etc.) is via the -configuration: - -``` -datastax-java-driver { - basic.load-balancing-policy.class = DefaultLoadBalancingPolicy - advanced.reconnection-policy { - class = ExponentialReconnectionPolicy - base-delay = 1 second - max-delay = 60 seconds - } -} -``` - -When the driver encounters such a declaration, it will load the class and use reflection to invoke a -constructor with the following signature: - -* for policies that can be overridden in a profile (load balancing policy, retry policy, speculative - execution policy): - - ```java - public DefaultLoadBalancingPolicy(DriverContext context, String profileName) - ``` - -* for session-wide policies (all the others): - - ```java - public ExponentialReconnectionPolicy(DriverContext context) - ``` - -Where [DriverContext] is the object returned by `session.getContext()`, which allows the policy to -access other driver components (for example the configuration). - -If you write custom policy implementations, you should follow that same pattern; it provides an -elegant way to switch policies without having to recompile the application (if your policy needs -custom options, see the next section). Study the built-in implementations for reference. - -If for some reason you really can't use reflection, there is a way out; subclass -`DefaultDriverContext` and override the corresponding method: - -```java -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; - -public class MyDriverContext extends DefaultDriverContext { - - public MyDriverContext(DriverConfigLoader configLoader, List> typeCodecs) { - super(configLoader, typeCodecs); - } - - @Override - protected ReconnectionPolicy buildReconnectionPolicy() { - return myReconnectionPolicy; - } -} -``` - -Then you'll need to pass an instance of this context to `DefaultSession.init`. You can either do so -directly, or subclass `SessionBuilder` and override the `buildContext` method. - -#### Custom options - -You can add your own options to the configuration. This is useful for custom components, or even as -a way to associate arbitrary key/value pairs with the session instance. - -First, write an enum that implements [DriverOption]: - -```java -public enum MyCustomOption implements DriverOption { - - ADMIN_NAME("admin.name"), - ADMIN_EMAIL("admin.email"), - AWESOMENESS_FACTOR("awesomeness-factor"), - ; - - private final String path; - - MyCustomOption(String path) { - this.path = path; - } - - @Override - public String getPath() { - return path; - } -} -``` - -You can now add the options to your configuration: - -``` -datastax-java-driver { - admin { - name = "Bob" - email = "bob@example.com" - } - awesomeness-factor = 11 -} -``` - -And access them from the code: - -```java -DriverConfig config = session.getContext().getConfig(); -config.getDefaultProfile().getString(MyCustomOption.ADMIN_EMAIL); -config.getDefaultProfile().getInt(MyCustomOption.AWESOMENESS_FACTOR); -``` - -[DriverConfig]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/config/DriverConfig.html -[DriverExecutionProfile]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/config/DriverExecutionProfile.html -[DriverContext]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/context/DriverContext.html -[DriverOption]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/config/DriverOption.html -[DefaultDriverOption]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/config/DefaultDriverOption.html -[DriverConfigLoader]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/config/DriverConfigLoader.html -[DriverConfigLoader.fromClasspath]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/config/DriverConfigLoader.html#fromClasspath-java.lang.String- -[DriverConfigLoader.fromFile]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/config/DriverConfigLoader.html#fromFile-java.io.File- -[DriverConfigLoader.fromUrl]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/config/DriverConfigLoader.html#fromUrl-java.net.URL- -[DriverConfigLoader.programmaticBuilder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/config/DriverConfigLoader.html#programmaticBuilder-- - -[Typesafe Config]: https://github.com/typesafehub/config -[config standard behavior]: https://github.com/typesafehub/config#standard-behavior -[reference.conf]: reference/ -[HOCON]: https://github.com/typesafehub/config/blob/master/HOCON.md -[API conventions]: ../../api_conventions diff --git a/manual/core/configuration/reference/README.rst b/manual/core/configuration/reference/README.rst deleted file mode 100644 index d4989ecf641..00000000000 --- a/manual/core/configuration/reference/README.rst +++ /dev/null @@ -1,34 +0,0 @@ -.. - Licensed to the Apache Software Foundation (ASF) under one - or more contributor license agreements. See the NOTICE file - distributed with this work for additional information - regarding copyright ownership. The ASF licenses this file - to you under the Apache License, Version 2.0 (the - "License"); you may not use this file except in compliance - with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, - software distributed under the License is distributed on an - "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - KIND, either express or implied. See the License for the - specific language governing permissions and limitations - under the License. - -Reference configuration ------------------------ - -The following is a copy of the ``reference.conf`` file matching the version of this documentation. -It is packaged in the ``java-driver-core`` JAR artifact, and used at runtime to provide the default -values for all configuration options (in the sources, it can be found under -``core/src/main/resources``). - -See the `configuration page <../>`_ for more explanations. - -.. raw:: html - - - -.. include:: core/src/main/resources/reference.conf - :code: properties diff --git a/manual/core/control_connection/README.md b/manual/core/control_connection/README.md deleted file mode 100644 index 38544797aed..00000000000 --- a/manual/core/control_connection/README.md +++ /dev/null @@ -1,45 +0,0 @@ - - -## Control connection - -The control connection is a dedicated connection used for administrative tasks: - -* querying system tables to learn about the cluster's [topology](../metadata/node/) and - [schema](../metadata/schema/); -* checking [schema agreement](../metadata/schema/#schema-agreement); -* reacting to server events, which are used to notify the driver of external topology or schema - changes. - -When the driver starts, the control connection is established to the first contacted node. If that -node goes down, a [reconnection](../reconnection/) is started to find another node; it is governed -by the same policy as regular connections (`advanced.reconnection-policy` options in the -[configuration](../configuration/)), and tries the nodes according to a query plan from the -[load balancing policy](../load_balancing/). - -The control connection is managed independently from [regular pooled connections](../pooling/), and -used exclusively for administrative requests. It shows up in [Node.getOpenConnections], as well as -the `pool.open-connections` [metric](../metrics); for example, if you've configured a pool size of -2, the control node will show 3 connections. - -There are a few options to fine tune the control connection behavior in the -`advanced.control-connection` and `advanced.metadata` sections; see the [metadata](../metadata/) -pages and the [reference configuration](../configuration/reference/) for all the details. - -[Node.getOpenConnections]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/metadata/Node.html#getOpenConnections-- diff --git a/manual/core/custom_codecs/README.md b/manual/core/custom_codecs/README.md deleted file mode 100644 index 3a32164c3a4..00000000000 --- a/manual/core/custom_codecs/README.md +++ /dev/null @@ -1,749 +0,0 @@ - - -## Custom codecs - -### Quick overview - -Define custom Java to CQL mappings. - -* implement the [TypeCodec] interface, or use one of the alternative codecs in `ExtraTypeCodecs`. -* registering a codec: - * at init time: [CqlSession.builder().addTypeCodecs()][SessionBuilder.addTypeCodecs] - * at runtime: - - ```java - MutableCodecRegistry registry = - (MutableCodecRegistry) session.getContext().getCodecRegistry(); - registry.register(myCodec); - ``` - -* using a codec: - * if already registered: `row.get("columnName", MyCustomType.class)` - * otherwise: `row.get("columnName", myCodec)` - ------ - -Out of the box, the driver comes with [default CQL to Java mappings](../#cql-to-java-type-mapping). -For example, if you read a CQL `text` column, it is mapped to its natural counterpart -`java.lang.String`: - -```java -// cqlsh:ks> desc table test; -// CREATE TABLE ks.test (k int PRIMARY KEY, v text)... -ResultSet rs = session.execute("SELECT * FROM ks.test WHERE k = 1"); -String v = rs.one().getString("v"); -``` - -Sometimes you might want to use different mappings, for example: - -* read a text column as a Java enum; -* map an `address` UDT to a custom `Address` class in your application; -* manipulate CQL collections as arrays in performance-intensive applications. - -Custom codecs allow you to define those dedicated mappings, and plug them into your session. - -### Using alternative codecs provided by the driver - -The first thing you can do is use one of the many alternative codecs shipped with the driver. They -are exposed on the [ExtraTypeCodecs] class. In this section we are going to introduce these codecs, -then you will see how to register and use them in the next sections. - -#### Mapping CQL blobs to Java arrays - -The driver default is [TypeCodecs.BLOB], which maps CQL `blob` to Java's [java.nio.ByteBuffer]. -Check out our [CQL blob example] to understand how to manipulate the `ByteBuffer` API correctly. - -If the `ByteBuffer` API is too cumbersome for you, an alternative is to use -[ExtraTypeCodecs.BLOB_TO_ARRAY] which maps CQL blobs to Java's `byte[]`. - -#### Mapping CQL lists to Java arrays - -By default, the driver maps CQL `list` to Java's [java.util.List]. If you prefer to deal with -arrays, the driver offers the following codecs: - -1. For primitive types: - - | Codec | CQL type | Java type | - |---|---|---| - | [ExtraTypeCodecs.BOOLEAN_LIST_TO_ARRAY] | `list` | `boolean[]` | - | [ExtraTypeCodecs.BYTE_LIST_TO_ARRAY] | `list` | `byte[]` | - | [ExtraTypeCodecs.SHORT_LIST_TO_ARRAY] | `list` | `short[]` | - | [ExtraTypeCodecs.INT_LIST_TO_ARRAY] | `list` | `int[]` | - | [ExtraTypeCodecs.LONG_LIST_TO_ARRAY] | `list` | `long[]` | - | [ExtraTypeCodecs.FLOAT_LIST_TO_ARRAY] | `list` | `float[]` | - | [ExtraTypeCodecs.DOUBLE_LIST_TO_ARRAY] | `list` | `double[]` | - -2. For other types, you should use [ExtraTypeCodecs.listToArrayOf(TypeCodec)]; for example, to map - CQL `list` to `String[]`: - - ```java - TypeCodec stringArrayCodec = ExtraTypeCodecs.listToArrayOf(TypeCodecs.TEXT); - ``` - -#### Mapping CQL timestamps to Java "instant" types - -By default, the driver maps CQL `timestamp` to Java's [java.time.Instant] \(using -[TypeCodecs.TIMESTAMP]). This is the most natural mapping, since neither type contains any time zone -information: they just represent absolute points in time. - -The driver also provides codecs to map to a Java `long` representing the number of milliseconds -since the epoch (this is the raw form return by `Instant.toEpochMilli`, and also how Cassandra -stores the value internally). - -In either case, you can pick the time zone that the codec will use for its [format()] and [parse()] -methods. Note that this is only relevant for these two methods (follow the links for more -explanations on how the driver uses them); for regular encoding and decoding, like setting a value -on a bound statement or reading a column from a row, the time zone does not matter. - -| Codec | CQL type | Java type | Time zone used by `format()` and `parse()` | -|---|---|---|---| -| [TypeCodecs.TIMESTAMP] | `timestamp` | `Instant` | System default | -| [ExtraTypeCodecs.TIMESTAMP_UTC] | `timestamp` | `Instant` | UTC | -| [ExtraTypeCodecs.timestampAt(ZoneId)] | `timestamp` | `Instant` | User-provided | -| [ExtraTypeCodecs.TIMESTAMP_MILLIS_SYSTEM] | `timestamp` | `long` | System default | -| [ExtraTypeCodecs.TIMESTAMP_MILLIS_UTC] | `timestamp` | `long` | UTC | -| [ExtraTypeCodecs.timestampMillisAt(ZoneId)] | `timestamp` | `long` | User-provided | - -For example, given the schema: - -``` -CREATE TABLE example (k int PRIMARY KEY, ts timestamp); -INSERT INTO example(k, ts) VALUES (1, 0); -``` - -When reading column `ts`, all `Instant` codecs return `Instant.ofEpochMilli(0)`. But if asked to -format it, they behave differently: - -* `ExtraTypeCodecs.TIMESTAMP_UTC` returns `'1970-01-01T00:00:00.000Z'` -* `ExtraTypeCodecs.timestampAt(ZoneId.of("Europe/Paris")` returns `'1970-01-01T01:00:00.000+01:00'` - -#### Mapping CQL timestamps to `ZonedDateTime` - -If your application works with one single, pre-determined time zone, then you probably would like -the driver to map `timestamp` to [java.time.ZonedDateTime] with a fixed zone. Use one of the -following codecs: - -| Codec | CQL type | Java type | Time zone used by all codec operations | -|---|---|---|---| -| [ExtraTypeCodecs.ZONED_TIMESTAMP_SYSTEM] | `timestamp` | `ZonedDateTime` | System default | -| [ExtraTypeCodecs.ZONED_TIMESTAMP_UTC] | `timestamp` | `ZonedDateTime` | UTC | -| [ExtraTypeCodecs.zonedTimestampAt(ZoneId)] | `timestamp` | `ZonedDateTime` | User-provided | - -This time, the zone matters for all codec operations, including encoding and decoding. For example, -given the schema: - -``` -CREATE TABLE example (k int PRIMARY KEY, ts timestamp); -INSERT INTO example(k, ts) VALUES (1, 0); -``` - -When reading column `ts`: - -* `ExtraTypeCodecs.ZONED_TIMESTAMP_UTC` returns the same value as - `ZonedDateTime.parse("1970-01-01T00:00Z")` -* `ExtraTypeCodecs.zonedTimestampAt(ZoneId.of("Europe/Paris"))` returns the same value as - `ZonedDateTime.parse("1970-01-01T01:00+01:00[Europe/Paris]")` - -These are two distinct `ZonedDateTime` instances: although they represent the same absolute point in -time, they do not compare as equal. - -#### Mapping CQL timestamps to `LocalDateTime` - -If your application works with one single, pre-determined time zone, but only exposes local -date-times, then you probably would like the driver to map timestamps to [java.time.LocalDateTime] -obtained from a fixed zone. Use one of the following codecs: - -| Codec | CQL type | Java type | Time zone used by all codec operations | -|---|---|---|---| -| [ExtraTypeCodecs.LOCAL_TIMESTAMP_SYSTEM] | `timestamp` | `LocalDateTime` | System default | -| [ExtraTypeCodecs.LOCAL_TIMESTAMP_UTC] | `timestamp` | `LocalDateTime` | UTC | -| [ExtraTypeCodecs.localTimestampAt(ZoneId)] | `timestamp` | `LocalDateTime` | User-provided | - - -Again, the zone matters for all codec operations, including encoding and decoding. For example, -given the schema: - -``` -CREATE TABLE example (k int PRIMARY KEY, ts timestamp); -INSERT INTO example(k, ts) VALUES (1, 0); -``` - -When reading column `ts`: - -* `ExtraTypeCodecs.LOCAL_TIMESTAMP_UTC` returns `LocalDateTime.of(1970, 1, 1, 0, 0)` -* `ExtraTypeCodecs.localTimestampAt(ZoneId.of("Europe/Paris"))` returns `LocalDateTime.of(1970, 1, - 1, 1, 0)` - -#### Storing the time zone in Cassandra - -If your application needs to remember the time zone that each date was entered with, you need to -store it in the database. We suggest using a `tuple`, where the second component -holds the [zone id][java.time.ZoneId]. - -If you follow this guideline, then you can use [ExtraTypeCodecs.ZONED_TIMESTAMP_PERSISTED] to map -the CQL tuple to [java.time.ZonedDateTime]. - -For example, given the schema: - -``` -CREATE TABLE example(k int PRIMARY KEY, zts tuple); -INSERT INTO example (k, zts) VALUES (1, (0, 'Z')); -INSERT INTO example (k, zts) VALUES (2, (-3600000, 'Europe/Paris')); -``` - -When reading column `zts`, `ExtraTypeCodecs.ZONED_TIMESTAMP_PERSISTED` returns: - -* `ZonedDateTime.parse("1970-01-01T00:00Z")` for the first row -* `ZonedDateTime.parse("1970-01-01T00:00+01:00[Europe/Paris]")` for the second row - -Each value is read back in the time zone that it was written with. But note that you can still -compare rows on a absolute timeline with the `timestamp` component of the tuple. - -#### Mapping to `Optional` instead of `null` - -If you prefer to deal with [java.util.Optional] in your application instead of nulls, then you can -use [ExtraTypeCodecs.optionalOf(TypeCodec)]: - -```java -TypeCodec> optionalUuidCodec = ExtraTypeCodecs.optionalOf(TypeCodecs.UUID); -``` - -Note that because the CQL native protocol does not distinguish empty collections from null -collection references, this codec will also map empty collections to [Optional.empty()]. - -#### Mapping Java Enums - -Java [Enums] can be mapped to CQL in two ways: - -1. By name: [ExtraTypeCodecs.enumNamesOf(Class)] will create a codec for a given `Enum` class that -maps its constants to their [programmatic names][Enum.name()]. The corresponding CQL column must be -of type `text`. Note that this codec relies on the enum constant names; it is therefore vital that -enum names never change. -1. By ordinal: [ExtraTypeCodecs.enumOrdinalsOf(Class)] will create a codec for a given `Enum` class -that maps its constants to their [ordinal value][Enum.ordinal()]. The corresponding CQL column must -be of type `int`. - - **We strongly recommend against this approach.** It is provided for compatibility with driver 3, - but relying on ordinals is a bad practice: any reordering of the enum constants, or insertion - of a new constant before the end, will change the ordinals. The codec won't fail, but it will - insert different codes and corrupt your data. - - If you really want to use integer codes for storage efficiency, implement an explicit mapping - (for example with a `toCode()` method on your enum type). It is then fairly straightforward to - implement a codec with [MappingCodec](#creating-custom-java-to-cql-mappings-with-mapping-codec), - using `TypeCodecs#INT` as the "inner" codec. - -For example, assuming the following enum: - -```java -public enum WeekDay { - MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY -} -``` - -You can define codecs for it the following ways: - -```java -// MONDAY will be persisted as "MONDAY", TUESDAY as "TUESDAY", etc. -TypeCodec weekDaysByNameCodec = ExtraTypeCodecs.enumNamesOf(WeekDay.class); - -// MONDAY will be persisted as 0, TUESDAY as 1, etc. -TypeCodec weekDaysByNameCodec = ExtraTypeCodecs.enumOrdinalsOf(WeekDay.class); -``` - -#### Mapping Json - -The driver provides out-of-the-box support for mapping Java objects to CQL `text` using the popular -Jackson library. The method [ExtraTypeCodecs.json(Class)] will create a codec for a given Java class -that maps instances of that class to Json strings, using a newly-allocated, default [ObjectMapper]. -It is also possible to pass a custom `ObjectMapper` instance using [ExtraTypeCodecs.json(Class, -ObjectMapper)] instead. - -#### Mapping CQL vectors to Java array - -By default, the driver maps CQL `vector` to the [CqlVector] value type. If you prefer to deal with -arrays, the driver offers the following codec: - -| Codec | CQL type | Java type | -|-------------------------------------------|-----------------|-----------| -| [ExtraTypeCodecs.floatVectorToArray(int)] | `vector` | `float[]` | - -This release only provides a codec for vectors containing float values. - -### Writing codecs - -If none of the driver built-in codecs above suits you, it is also possible to roll your own. - -To write a custom codec, implement the [TypeCodec] interface. Here is an example that maps a CQL -`int` to a Java string containing its textual representation: - -```java -public class CqlIntToStringCodec implements TypeCodec { - - @Override - public GenericType getJavaType() { - return GenericType.STRING; - } - - @Override - public DataType getCqlType() { - return DataTypes.INT; - } - - @Override - public ByteBuffer encode(String value, ProtocolVersion protocolVersion) { - if (value == null) { - return null; - } else { - int intValue = Integer.parseInt(value); - return TypeCodecs.INT.encode(intValue, protocolVersion); - } - } - - @Override - public String decode(ByteBuffer bytes, ProtocolVersion protocolVersion) { - Integer intValue = TypeCodecs.INT.decode(bytes, protocolVersion); - return intValue.toString(); - } - - @Override - public String format(String value) { - int intValue = Integer.parseInt(value); - return TypeCodecs.INT.format(intValue); - } - - @Override - public String parse(String value) { - Integer intValue = TypeCodecs.INT.parse(value); - return intValue == null ? null : intValue.toString(); - } -} -``` - -Admittedly, this is a trivial -- and maybe not very realistic -- example, but it illustrates a few -important points: - -* which methods to override. Refer to the [TypeCodec] javadocs for additional information about each - of them; -* how to piggyback on a built-in codec, in this case `TypeCodecs.INT`. Very often, this is the best - approach to keep the code simple. If you want to handle the binary encoding yourself (maybe to - squeeze the last bit of performance), study the driver's - [built-in codec implementations](https://github.com/datastax/java-driver/tree/4.x/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec). - -### Using codecs - -Once you have your codec, register it when building your session. The following example registers -`CqlIntToStringCodec` along with a few driver-supplied alternative codecs: - -```java -enum WeekDay { MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY }; -class Price { - ... // a custom POJO that will be serialized as JSON -} - -CqlSession session = - CqlSession.builder() - .addTypeCodecs( - new CqlIntToStringCodec(), // user-created codec - ExtraTypeCodecs.ZONED_TIMESTAMP_PERSISTED, // tuple <-> ZonedDateTime - ExtraTypeCodecs.BLOB_TO_ARRAY, // blob <-> byte[] - ExtraTypeCodecs.arrayOf(TypeCodecs.TEXT), // list <-> String[] - ExtraTypeCodecs.enumNamesOf(WeekDay.class), // text <-> WeekDay - ExtraTypeCodecs.json(Price.class), // text <-> MyJsonPojo - ExtraTypeCodecs.optionalOf(TypeCodecs.UUID) // uuid <-> Optional - ) - .build(); -``` - -You may also add codecs to an existing session at runtime: - -```java -// The cast is required for backward compatibility reasons (registry mutability was introduced in -// 4.3.0). It is safe as long as you didn't write a custom registry implementation. -MutableCodecRegistry registry = - (MutableCodecRegistry) session.getContext().getCodecRegistry(); - -registry.register(new CqlIntToStringCodec()); -``` - -You can now use the new mappings in your code: - -```java -// cqlsh:ks> desc table test2; -// CREATE TABLE ks.test2 (k int PRIMARY KEY, v int)... -ResultSet rs = session.execute("SELECT * FROM ks.test2 WHERE k = 1"); -String v = rs.one().getString("v"); // read a CQL int as a java.lang.String - -PreparedStatement ps = session.prepare("INSERT INTO ks.test2 (k, v) VALUES (?, ?)"); -session.execute( - ps.boundStatementBuilder() - .setInt("k", 2) - .setString("v", "12") // write a java.lang.String as a CQL int - .build()); -``` - -In the above example, the driver will look up in the codec registry a codec for CQL `int` and Java -String, and will transparently pick `CqlIntToStringCodec` for that. - -So far our examples have used a Java type with dedicated accessors in the driver: `getString` and -`setString`. But sometimes you won't find suitable accessor methods; for example, there is no -accessor for `ZonedDateTime` or for `Optional`, and yet we registered codecs for these types. - -When you want to retrieve such objects, you need a way to tell the driver which Java type you want. -You do so by using one of the generic `get` and `set` methods: - -```java -// Assuming that ExtraTypeCodecs.ZONED_TIMESTAMP_PERSISTED was registered -// Assuming that ExtraTypeCodecs.BLOB_TO_ARRAY was registered -// Assuming that ExtraTypeCodecs.arrayOf(TypeCodecs.TEXT) was registered - -// Reading -ZonedDateTime v1 = row.get("v1", ZonedDateTime.class); // assuming column is of type timestamp -byte[] v2 = row.get("v2", byte[].class); // assuming column is of type blob -String[] v3 = row.get("v3", String[].class); // assuming column is of type list - - -// Writing -boundStatement.set("v1", v1, ZonedDateTime.class); -boundStatement.set("v2", v2, byte[].class); -boundStatement.set("v3", v3, String[].class); -``` - -This is also valid for arbitrary Java types. This is particularly useful when dealing with Enums and -JSON mappings, for example our `WeekDay` and `Price` types: - -```java -// Assuming that TypeCodecs.enumNamesOf(WeekDay.class) was registered -// Assuming that TypeCodecs.json(Price.class) was registered - -// Reading -WeekDay v1 = row.get("v1", WeekDay.class); // assuming column is of type text -Price v2 = row.get("v2", Price.class); // assuming column is of type text - -// Writing -boundStatement.set("v1", v1, WeekDay.class); -boundStatement.set("v2", v2, Price.class); -``` - -Note that, because the underlying CQL type is `text` you can still retrieve the column's contents -as a plain string: - -```java -// Reading -String enumName = row.getString("v1"); -String priceJson = row.getString("v2"); - -// Writing -boundStatement.setString("v1", enumName); -boundStatement.setString("v2", priceJson); -``` - -And finally, for `Optional`, you will need the `get` and `set` methods with an extra *type -token* argument, because `Optional` is a parameterized type: - -```java -// Assuming that TypeCodecs.optionalOf(TypeCodecs.UUID) was registered - -// Reading -Optional opt = row.get("v", GenericType.optionalOf(UUID.class)); - -// Writing -boundStatement.set("v", opt, GenericType.optionalOf(UUID.class)); -``` - -Type tokens are instances of [GenericType]. They are immutable and thread-safe, you should store -them as reusable constants. The `GenericType` class itself has constants and factory methods to help -creating `GenericType` objects for common types. If you don't see the type you are looking for, a -type token for any Java type can be created using the following pattern: - -```java -// Notice the '{}': this is an anonymous inner class -GenericType> fooBarType = new GenericType>(){}; - -Foo v = row.get("v", fooBarType); -``` - -Custom codecs are used not only for their base type, but also recursively in collections, tuples and -UDTs. For example, once your Json codec for the `Price` class is registered, you can also read a CQL -`list` as a Java `List`: - -```java -// Assuming that TypeCodecs.json(Price.class) was registered -// Assuming that each element of the list column is a valid Json string - -// Reading -List prices1 = row.getList("v", Price.class); -// alternative method using the generic get method with type token argument: -List prices2 = row.get("v", GenericType.listOf(Price.class)); - -// Writing -boundStatement.setList("v", prices1, Price.class); -// alternative method using the generic set method with type token argument: -boundStatement.set("v", prices2, GenericType.listOf(Price.class)); -``` - -Whenever you read or write a value, the driver tries all the built-in mappings first, followed by -custom codecs. If two codecs can process the same mapping, the one that was registered first is -used. Note that this means that built-in mappings can't be overridden. - -In rare cases, you might have a codec registered in your application, but have a legitimate reason -to use a different mapping in one particular place. In that case, you can pass a codec instance -to `get` / `set` instead of a type token: - -```java -TypeCodec defaultCodec = new CqlIntToStringCodec(); -TypeCodec specialCodec = ...; // a different implementation - -CqlSession session = - CqlSession.builder().addTypeCodecs(defaultCodec).build(); - -String s1 = row.getString("anIntColumn"); // int -> String, will decode with defaultCodec -String s2 = row.get("anIntColumn", specialCodec); // int -> String, will decode with specialCodec -``` - -By doing so, you bypass the codec registry completely and instruct the driver to use the given -codec. Note that it is your responsibility to ensure that the codec can handle the underlying CQL -type (this cannot be enforced at compile-time). - -### Creating custom Java-to-CQL mappings with `MappingCodec` - -The above example, `CqlIntToStringCodec`, could be rewritten to leverage [MappingCodec], an abstract -class that ships with the driver. This class has been designed for situations where we want to -represent a CQL type with a different Java type than the Java type natively supported by the driver, -and the conversion between the former and the latter is straightforward. - -All you have to do is extend `MappingCodec` and implement two methods that perform the conversion -between the supported Java type -- or "inner" type -- and the target Java type -- or "outer" type: - -```java -public class CqlIntToStringCodec extends MappingCodec { - - public CqlIntToStringCodec() { - super(TypeCodecs.INT, GenericType.STRING); - } - - @Nullable - @Override - protected String innerToOuter(@Nullable Integer value) { - return value == null ? null : value.toString(); - } - - @Nullable - @Override - protected Integer outerToInner(@Nullable String value) { - return value == null ? null : Integer.parseInt(value); - } -} -``` - -This technique is especially useful when mapping user-defined types to Java objects. For example, -let's assume the following user-defined type: - -``` -CREATE TYPE coordinates (x int, y int); - ``` - -And let's suppose that we want to map it to the following Java class: - -```java -public class Coordinates { - public final int x; - public final int y; - public Coordinates(int x, int y) { this.x = x; this.y = y; } -} -``` - -All you have to do is create a `MappingCodec` subclass that piggybacks on an existing -`TypeCodec` for the above user-defined type: - -```java -public class CoordinatesCodec extends MappingCodec { - - public CoordinatesCodec(@NonNull TypeCodec innerCodec) { - super(innerCodec, GenericType.of(Coordinates.class)); - } - - @NonNull @Override public UserDefinedType getCqlType() { - return (UserDefinedType) super.getCqlType(); - } - - @Nullable @Override protected Coordinates innerToOuter(@Nullable UdtValue value) { - return value == null ? null : new Coordinates(value.getInt("x"), value.getInt("y")); - } - - @Nullable @Override protected UdtValue outerToInner(@Nullable Coordinates value) { - return value == null ? null : getCqlType().newValue().setInt("x", value.x).setInt("y", value.y); - } -} -``` - -Then the new mapping codec could be registered as follows: - -```java -CqlSession session = ... -CodecRegistry codecRegistry = session.getContext().getCodecRegistry(); -// The target user-defined type -UserDefinedType coordinatesUdt = - session - .getMetadata() - .getKeyspace("...") - .flatMap(ks -> ks.getUserDefinedType("coordinates")) - .orElseThrow(IllegalStateException::new); -// The "inner" codec that handles the conversions from CQL from/to UdtValue -TypeCodec innerCodec = codecRegistry.codecFor(coordinatesUdt); -// The mapping codec that will handle the conversions from/to UdtValue and Coordinates -CoordinatesCodec coordinatesCodec = new CoordinatesCodec(innerCodec); -// Register the new codec -((MutableCodecRegistry) codecRegistry).register(coordinatesCodec); -``` - -...and used just like explained above: - -```java -BoundStatement stmt = ...; -stmt.set("coordinates", new Coordinates(10,20), Coordinates.class); - -Row row = ...; -Coordinates coordinates = row.get("coordinates", Coordinates.class); -``` - -Note: if you need even more advanced mapping capabilities, consider adopting -the driver's [object mapping framework](../../mapper/). - -### Subtype polymorphism - -Suppose the following class hierarchy: - -```java -class Animal {} -class Cat extends Animal {} -``` - -By default, a codec will accept to serialize any object that extends or implements its declared Java -type: a codec such as `AnimalCodec extends TypeCodec` will accept `Cat` instances as well. - -This allows a codec to handle interfaces and superclasses in a generic way, regardless of the actual -implementation being used by client code; for example, the driver has a built-in codec that handles -`List` instances, and this codec is capable of serializing any concrete `List` implementation. - -But this has one caveat: when setting or retrieving values with `get()` and `set()`, *you must pass -the exact Java type the codec handles*: - -```java -BoundStatement bs = ... -bs.set(0, new Cat(), Animal.class); // works -bs.set(0, new Cat(), Cat.class); // throws CodecNotFoundException - -Row row = ... -Animal animal = row.get(0, Animal.class); // works -Cat cat = row.get(0, Cat.class); // throws CodecNotFoundException -``` - -### The codec registry - -The driver stores all codecs (built-in and custom) in an internal [CodecRegistry]: - -```java -CodecRegistry getCodecRegistry = session.getContext().getCodecRegistry(); - -// Get the custom codec we registered earlier: -TypeCodec cqlIntToString = codecRegistry.codecFor(DataTypes.INT, GenericType.STRING); -``` - -If all you're doing is executing requests and reading responses, you probably won't ever need to -access the registry directly. But it's useful if you do some kind of generic processing, for -example printing out an arbitrary row when the schema is not known at compile time: - -```java -private static String formatRow(Row row) { - StringBuilder result = new StringBuilder(); - for (int i = 0; i < row.size(); i++) { - String name = row.getColumnDefinitions().get(i).getName().asCql(true); - Object value = row.getObject(i); - DataType cqlType = row.getType(i); - - // Find the best codec to format this CQL type: - TypeCodec codec = row.codecRegistry().codecFor(cqlType); - - if (i != 0) { - result.append(", "); - } - result.append(name).append(" = ").append(codec.format(value)); - } - return result.toString(); -} -``` - -[CodecRegistry]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/registry/CodecRegistry.html -[GenericType]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/reflect/GenericType.html -[TypeCodec]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/TypeCodec.html -[format()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/TypeCodec.html#format-JavaTypeT- -[parse()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/TypeCodec.html#parse-java.lang.String- -[MappingCodec]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/MappingCodec.html -[SessionBuilder.addTypeCodecs]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/SessionBuilder.html#addTypeCodecs-com.datastax.oss.driver.api.core.type.codec.TypeCodec...- - -[Enums]: https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html -[Enum.name()]: https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html#name-- -[Enum.ordinal()]: https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html#ordinal-- -[java.nio.ByteBuffer]: https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html -[java.util.List]: https://docs.oracle.com/javase/8/docs/api/java/util/List.html -[java.util.Optional]: https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html -[Optional.empty()]: https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html#empty-- -[java.time.Instant]: https://docs.oracle.com/javase/8/docs/api/java/time/Instant.html -[java.time.ZonedDateTime]: https://docs.oracle.com/javase/8/docs/api/java/time/ZonedDateTime.html -[java.time.LocalDateTime]: https://docs.oracle.com/javase/8/docs/api/java/time/LocalDateTime.html -[java.time.ZoneId]: https://docs.oracle.com/javase/8/docs/api/java/time/ZoneId.html - -[ExtraTypeCodecs]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html -[ExtraTypeCodecs.BLOB_TO_ARRAY]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#BLOB_TO_ARRAY -[ExtraTypeCodecs.BOOLEAN_LIST_TO_ARRAY]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#BOOLEAN_LIST_TO_ARRAY -[ExtraTypeCodecs.BYTE_LIST_TO_ARRAY]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#BYTE_LIST_TO_ARRAY -[ExtraTypeCodecs.SHORT_LIST_TO_ARRAY]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#SHORT_LIST_TO_ARRAY -[ExtraTypeCodecs.INT_LIST_TO_ARRAY]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#INT_LIST_TO_ARRAY -[ExtraTypeCodecs.LONG_LIST_TO_ARRAY]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#LONG_LIST_TO_ARRAY -[ExtraTypeCodecs.FLOAT_LIST_TO_ARRAY]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#FLOAT_LIST_TO_ARRAY -[ExtraTypeCodecs.DOUBLE_LIST_TO_ARRAY]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#DOUBLE_LIST_TO_ARRAY -[ExtraTypeCodecs.listToArrayOf(TypeCodec)]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#listToArrayOf-com.datastax.oss.driver.api.core.type.codec.TypeCodec- -[ExtraTypeCodecs.TIMESTAMP_UTC]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#TIMESTAMP_UTC -[ExtraTypeCodecs.timestampAt(ZoneId)]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#timestampAt-java.time.ZoneId- -[ExtraTypeCodecs.TIMESTAMP_MILLIS_SYSTEM]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#TIMESTAMP_MILLIS_SYSTEM -[ExtraTypeCodecs.TIMESTAMP_MILLIS_UTC]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#TIMESTAMP_MILLIS_UTC -[ExtraTypeCodecs.timestampMillisAt(ZoneId)]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#timestampMillisAt-java.time.ZoneId- -[ExtraTypeCodecs.ZONED_TIMESTAMP_SYSTEM]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#ZONED_TIMESTAMP_SYSTEM -[ExtraTypeCodecs.ZONED_TIMESTAMP_UTC]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#ZONED_TIMESTAMP_UTC -[ExtraTypeCodecs.zonedTimestampAt(ZoneId)]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#zonedTimestampAt-java.time.ZoneId- -[ExtraTypeCodecs.LOCAL_TIMESTAMP_SYSTEM]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#LOCAL_TIMESTAMP_SYSTEM -[ExtraTypeCodecs.LOCAL_TIMESTAMP_UTC]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#LOCAL_TIMESTAMP_UTC -[ExtraTypeCodecs.localTimestampAt(ZoneId)]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#localTimestampAt-java.time.ZoneId- -[ExtraTypeCodecs.ZONED_TIMESTAMP_PERSISTED]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#ZONED_TIMESTAMP_PERSISTED -[ExtraTypeCodecs.optionalOf(TypeCodec)]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#optionalOf-com.datastax.oss.driver.api.core.type.codec.TypeCodec- -[ExtraTypeCodecs.enumNamesOf(Class)]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#enumNamesOf-java.lang.Class- -[ExtraTypeCodecs.enumOrdinalsOf(Class)]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#enumOrdinalsOf-java.lang.Class- -[ExtraTypeCodecs.json(Class)]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#json-java.lang.Class- -[ExtraTypeCodecs.json(Class, ObjectMapper)]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#json-java.lang.Class-com.fasterxml.jackson.databind.ObjectMapper- -[ExtraTypeCodecs.floatVectorToArray(int)]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#floatVectorToArray-int- - -[TypeCodecs.BLOB]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/TypeCodecs.html#BLOB -[TypeCodecs.TIMESTAMP]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/TypeCodecs.html#TIMESTAMP - - -[ObjectMapper]: http://fasterxml.github.io/jackson-databind/javadoc/2.10/com/fasterxml/jackson/databind/ObjectMapper.html - -[CQL blob example]: https://github.com/datastax/java-driver/blob/4.x/examples/src/main/java/com/datastax/oss/driver/examples/datatypes/Blobs.java diff --git a/manual/core/detachable_types/README.md b/manual/core/detachable_types/README.md deleted file mode 100644 index 7968835dd8a..00000000000 --- a/manual/core/detachable_types/README.md +++ /dev/null @@ -1,168 +0,0 @@ - - -## Detachable types - -### Quick overview - -Advanced topic, only needed if you use Java serialization with driver rows or data types, or create -tuple or UDT types manually. - ------ - -Some driver components need to keep an internal reference to their originating [Session]. Under -specific circumstances, they can lose that reference, and you might need to reattach them. - -Namely, these components are: - -* all [DataType] instances, in particular [tuples](../tuples/) and [UDTs](../udts/); -* [result rows][Row], and their [column definitions][ColumnDefinition]. - -Detachable types are an advanced topic, that should only be a concern for 3rd-party tool developers. -If you're simply executing requests and reading results, you probably won't need to worry about -them. See the [bottom line](#bottom-line) at the end of this page for details. - -### Rationale - -Detachable components are those that encode or decode their fields themselves. For example, when you -set a field on a [tuple value](../tuples): - -```java -tupleValue = tupleValue.setString(0, "foo"); -``` - -The string "foo" is encoded immediately, and the `TupleValue` object holds a reference to the binary -data. It is done that way in order to fail fast on encoding errors, and avoid duplicate work if you -reuse the tuple instance in multiple requests. - -Encoding requires session-specific information: - -* the [CodecRegistry] instance (in case it contains [custom codecs](../custom_codecs/)); -* the [protocol version](../native_protocol/) (because the binary format can change across - versions). - -Therefore the tuple value needs a reference to the session to access those two objects. - -### Detached objects - -Detachable types implement the [Detachable] interface, which has an `isDetached()` method to check -the current status. Whenever you get an object from the driver, it is attached: - -* reading a row from a result set: - - ```java - ResultSet rs = session.execute("SELECT * FROM foo"); - Row row = rs.one(); - assert !row.isDetached(); - ``` - -* reading a data type from schema metadata: - - ```java - UserDefinedType udt = session.getMetadata().getKeyspace("ks").getUserDefinedType("type1"); - assert !udt.isDetached(); - ``` - -There is no way to detach an object explicitly. This can only happen when: - -* deserializing a previously serialized instance (we're referring here to [Java serialization]); -* attaching an object to another session; -* creating a [tuple](../tuples/) or [UDT](../udts/) definition manually: - - ```java - TupleType tupleType = DataTypes.tupleOf(DataTypes.INT, DataTypes.TEXT, DataTypes.FLOAT); - assert tupleType.isDetached(); - ``` - -When an object is detached, it uses a [default codec registry][CodecRegistry#DEFAULT] that only -handles built-in types, and the latest non-beta protocol version supported by the driver. This might -be good enough for you if you don't use any custom codec (the binary format has been stable across -modern protocol versions). - -### Reattaching - -Use `attach()` to reattach an object to the session: - -```java -TupleType tupleType = DataTypes.tupleOf(DataTypes.INT, DataTypes.TEXT, DataTypes.FLOAT); -assert tupleType.isDetached(); - -tupleType.attach(session.getContext()); -assert !tupleType.isDetached(); - -// Now this will use the session's custom codecs if field 0 isn't a text CQL type: -TupleValue tupleValue = tupleType.newValue().setString(0, "foo"); -``` - -When you pass a detached type to the session (for example by executing a request with a tuple value -based on a detached tuple type), it will automatically be reattached. - -### Sharing data across sessions - -If you're reading data from one session and writing it into another, you should take a few extra -precautions: - -* if you use custom codecs, they should obviously be registered with both sessions; - -* if the protocol version is different, you should avoid sharing UDT and tuple types; keep a - separate set of definitions for each session, and copy the values field by field: - - ```java - Row row = session1.execute("SELECT QUERY...").one(); - UdtValue user1 = row.getUdtValue("user"); - - // Don't pass user1 to session2: create a new copy from userType2 instead - UserDefinedType userType2 = - session2.getMetadata().getKeyspace("ks").flatMap(ks -> ks.getUserDefinedType("user")).get(); - UdtValue user2 = userType2.newValue(); - user2.setString("first_name", user1.getString("first_name")); - user2.setString("last_name", user1.getString("last_name")); - - session2.execute(SimpleStatement.newInstance("INSERT QUERY...", user2)); - ``` - - This will ensure that UDT definition are not accidentally reattached to the wrong session, and - use the correct protocol version to encode values. - - -### Bottom line - -You only need to worry about detachable types if you serialize driver rows or data types, or if you -create tuple or UDT types manually. - -Even then, the defaults used by detached objects might be good enough for you: - -* the default codec registry works if you don't have any [custom codec](../custom_codecs/); -* the binary encoding format is stable across modern protocol versions. The last changes were for - collection encoding from v2 to v3; Java Driver 4 only supports v3 and above. When in doubt, check - the "Changes" section of the [protocol specifications]. - -Otherwise, just make sure you reattach objects any time you deserialize them or create them from -scratch. - -[CodecRegistry]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/registry/CodecRegistry.html -[CodecRegistry#DEFAULT]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/registry/CodecRegistry.html#DEFAULT -[DataType]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/DataType.html -[Detachable]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/detach/Detachable.html -[Session]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/Session.html -[ColumnDefinition]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/ColumnDefinition.html -[Row]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/Row.html - -[Java serialization]: https://docs.oracle.com/javase/tutorial/jndi/objects/serial.html -[protocol specifications]: https://github.com/datastax/native-protocol/tree/1.x/src/main/resources diff --git a/manual/core/dse/.nav b/manual/core/dse/.nav deleted file mode 100644 index c53a353fd33..00000000000 --- a/manual/core/dse/.nav +++ /dev/null @@ -1,2 +0,0 @@ -graph -geotypes \ No newline at end of file diff --git a/manual/core/dse/README.md b/manual/core/dse/README.md deleted file mode 100644 index 75abeafb3d7..00000000000 --- a/manual/core/dse/README.md +++ /dev/null @@ -1,30 +0,0 @@ - - -## DSE-specific features - -Some driver features only work with DataStax Enterprise: - -* [Graph](graph/); -* [Geospatial types](geotypes/); -* Proxy and GSSAPI authentication (covered in the [Authentication](../authentication/) page). - -Note that, if you don't use these features, you might be able to exclude certain dependencies in -order to limit the number of JARs in your classpath. See the -[Integration](../integration/#driver-dependencies) page. diff --git a/manual/core/dse/geotypes/README.md b/manual/core/dse/geotypes/README.md deleted file mode 100644 index eb414de4f8d..00000000000 --- a/manual/core/dse/geotypes/README.md +++ /dev/null @@ -1,194 +0,0 @@ - - -## Geospatial types - -The driver comes with client-side representations of the DSE geospatial data types: [Point], -[LineString] and [Polygon]. - -Note: geospatial types require the [ESRI] library version 1.2 to be present on the classpath. The -DSE driver has a non-optional dependency on that library, but if your application does not use -geotypes at all, it is possible to exclude it to minimize the number of runtime dependencies (see -the [Integration>Driver dependencies](../../integration/#driver-dependencies) section for -more details). If the library cannot be found at runtime, geospatial types won't be available and a -warning will be logged, but the driver will otherwise operate normally (this is also valid for OSGi -deployments). - -### Usage in requests - -Geospatial types can be retrieved from query results like any other value; use the "typed" getter -that takes the class as a second argument: - -```java -// Schema: CREATE TABLE poi(id int PRIMARY KEY, location 'PointType', description text); - -CqlSession session = CqlSession.builder().build() - -Row row = session.execute("SELECT location FROM poi WHERE id = 1").one(); -Point location = row.get(0, Point.class); -``` - -The corresponding setter can be used for insertions: - -```java -PreparedStatement pst = - session.prepare("INSERT INTO poi (id, location, description) VALUES (?, ?, ?)"); -session.execute( - pst.boundStatementBuilder() - .setInt("id", 2) - .set("location", Point.fromCoordinates(2.2945, 48.8584), Point.class) - .setString("description", "Eiffel Tower") - .build()); -``` - -This also works with the vararg syntax where target CQL types are inferred: - -```java -session.execute(pst.bind(2, Point.fromCoordinates(2.2945, 48.8584), "Eiffel Tower")); -``` - -### Client-side API - -The driver provides methods to create instances or inspect existing ones. - -[Point] is a trivial pair of coordinates: - -```java -Point point = Point.fromCoordinates(2.2945, 48.8584); -System.out.println(point.X()); -System.out.println(point.Y()); -``` - -[LineString] is a series of 2 or more points: - -```java -LineString lineString = - LineString.fromPoints( - Point.fromCoordinates(30, 10), - Point.fromCoordinates(10, 30), - Point.fromCoordinates(40, 40)); - -for (Point point : lineString.getPoints()) { - System.out.println(point); -} -``` - -[Polygon] is a planar surface in a two-dimensional XY-plane. You can build a simple polygon from a -list of points: - -```java -Polygon polygon = - Polygon.fromPoints( - Point.fromCoordinates(30, 10), - Point.fromCoordinates(10, 20), - Point.fromCoordinates(20, 40), - Point.fromCoordinates(40, 40)); -``` - -In addition to its exterior boundary, a polygon can have an arbitrary number of interior rings, -possibly nested (the first level defines "lakes" in the shape, the next level "islands" in those -lakes, etc). To create such complex polygons, use the builder: - -```java -Polygon polygon = - Polygon.builder() - .addRing( - Point.fromCoordinates(0, 0), - Point.fromCoordinates(0, 3), - Point.fromCoordinates(5, 3), - Point.fromCoordinates(5, 0)) - .addRing( - Point.fromCoordinates(1, 1), - Point.fromCoordinates(1, 2), - Point.fromCoordinates(2, 2), - Point.fromCoordinates(2, 1)) - .addRing( - Point.fromCoordinates(3, 1), - Point.fromCoordinates(3, 2), - Point.fromCoordinates(4, 2), - Point.fromCoordinates(4, 1)) - .build(); -``` - -You can then retrieve all the points with the following methods: - -```java -List exteriorRing = polygon.getExteriorRing(); - -for (List interiorRing : polygon.getInteriorRings()) { - ... -} -``` - -Note that all rings (exterior or interior) are defined with the same builder method: you can provide -them in any order, the implementation will figure out which is the exterior one. In addition, points -are always ordered counterclockwise for the exterior ring, clockwise for the first interior level, -counterclockwise for the second level, etc. Again, this is done automatically, so you don't need to -sort them beforehand; however, be prepared to get a different order when you read them back: - -```java -Polygon polygon = - Polygon.fromPoints( - // Clockwise: - Point.fromCoordinates(0, 0), - Point.fromCoordinates(0, 3), - Point.fromCoordinates(5, 3), - Point.fromCoordinates(5, 0)); - -System.out.println(polygon); -// Counterclockwise: -// POLYGON ((0 0, 5 0, 5 3, 0 3, 0 0)) -``` - -All geospatial types interoperate with three standard formats: - -* [Well-known text]\: - - ```java - Point point = Point.fromWellKnownText("POINT (0 1)"); - System.out.println(point.asWellKnownText()); - ``` - -* [Well-known binary]\: - - ```java - import com.datastax.oss.protocol.internal.util.Bytes; - - Point point = - Point.fromWellKnownBinary( - Bytes.fromHexString("0x01010000000000000000000000000000000000f03f")); - System.out.println(Bytes.toHexString(point.asWellKnownBinary())); - ``` - -* [GeoJSON]\: - - ```java - Point point = Point.fromGeoJson("{\"type\":\"Point\",\"coordinates\":[0.0,1.0]}"); - System.out.println(point.asGeoJson()); - ``` - -[ESRI]: https://github.com/Esri/geometry-api-java - -[LineString]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/data/geometry/LineString.html -[Point]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/data/geometry/Point.html -[Polygon]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/data/geometry/Polygon.html - -[Well-known text]: https://en.wikipedia.org/wiki/Well-known_text -[Well-known binary]: https://en.wikipedia.org/wiki/Well-known_text#Well-known_binary -[GeoJSON]: https://tools.ietf.org/html/rfc7946 diff --git a/manual/core/dse/graph/.nav b/manual/core/dse/graph/.nav deleted file mode 100644 index d7f30c149fc..00000000000 --- a/manual/core/dse/graph/.nav +++ /dev/null @@ -1,5 +0,0 @@ -script -fluent -fluent -options -results \ No newline at end of file diff --git a/manual/core/dse/graph/README.md b/manual/core/dse/graph/README.md deleted file mode 100644 index 6bcacd44c4e..00000000000 --- a/manual/core/dse/graph/README.md +++ /dev/null @@ -1,100 +0,0 @@ - - -## Graph - -The driver provides full support for DSE graph, the distributed graph database available in DataStax -Enterprise. The [CqlSession] interface extends [GraphSession], which adds specialized methods to -execute requests expressed in the [Gremlin] graph traversal language. - -*This manual only covers driver usage; for more information about server-side configuration and data -modeling, refer to the [DSE developer guide].* - -Note: graph capabilities require the [Apache TinkerPop™] library to be present on the classpath. The -driver has a non-optional dependency on that library, but if your application does not use graph at -all, it is possible to exclude it to minimize the number of runtime dependencies (see the -[Integration>Driver dependencies](../../integration/#driver-dependencies) section for more -details). If the library cannot be found at runtime, graph queries won't be available and a warning -will be logged, but the driver will otherwise operate normally (this is also valid for OSGi -deployments). - -If you do use graph, it is important to keep the precise TinkerPop version that the driver depends -on: unlike the driver, TinkerPop does not follow semantic versioning, so even a patch version change -(e.g. 3.3.0 vs 3.3.3) could introduce incompatibilities. So do not declare an explicit dependency in -your application, let the driver pull it transitively. - -### Overview - -There are 3 ways to execute graph requests: - -1. Passing a Gremlin script directly in a plain Java string. We'll refer to this as the - [script API](script/): - - ```java - CqlSession session = CqlSession.builder().build(); - - String script = "g.V().has('name', name)"; - ScriptGraphStatement statement = - ScriptGraphStatement.builder(script) - .withQueryParam("name", "marko") - .build(); - - GraphResultSet result = session.execute(statement); - for (GraphNode node : result) { - System.out.println(node.asVertex()); - } - ``` - -2. Building a traversal with the [TinkerPop fluent API](fluent/), and [executing it - explicitly](fluent/explicit/) with the session: - - ```java - import static com.datastax.dse.driver.api.core.graph.DseGraph.g; - - GraphTraversal traversal = g.V().has("name", "marko"); - FluentGraphStatement statement = FluentGraphStatement.newInstance(traversal); - - GraphResultSet result = session.execute(statement); - for (GraphNode node : result) { - System.out.println(node.asVertex()); - } - ``` - -3. Building a connected traversal with the fluent API, and [executing it - implicitly](fluent/implicit/) by invoking a terminal step: - - ```java - GraphTraversalSource g = DseGraph.g - .withRemote(DseGraph.remoteConnectionBuilder(session).build()); - - List vertices = g.V().has("name", "marko").toList(); - ``` - -All executions modes rely on the same set of [configuration options](options/). - -The script and explicit fluent API return driver-specific [result sets](results/). The implicit -fluent API returns Apache TinkerPop™ types directly. - -[Apache TinkerPop™]: http://tinkerpop.apache.org/ - -[CqlSession]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/CqlSession.html -[GraphSession]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/graph/GraphSession.html - -[DSE developer guide]: https://docs.datastax.com/en/dse/6.0/dse-dev/datastax_enterprise/graph/graphTOC.html -[Gremlin]: https://docs.datastax.com/en/dse/6.0/dse-dev/datastax_enterprise/graph/dseGraphAbout.html#dseGraphAbout__what-is-cql diff --git a/manual/core/dse/graph/fluent/.nav b/manual/core/dse/graph/fluent/.nav deleted file mode 100644 index 4be448834af..00000000000 --- a/manual/core/dse/graph/fluent/.nav +++ /dev/null @@ -1,2 +0,0 @@ -explicit -implicit \ No newline at end of file diff --git a/manual/core/dse/graph/fluent/README.md b/manual/core/dse/graph/fluent/README.md deleted file mode 100644 index c1645fdb234..00000000000 --- a/manual/core/dse/graph/fluent/README.md +++ /dev/null @@ -1,137 +0,0 @@ - - -## Fluent API - -The driver depends on [Apache TinkerPop™], a graph computing framework that provides a fluent API to -build Gremlin traversals. This allows you to write your graph requests directly in Java, like you -would in a Gremlin-groovy script: - -```java -// How this is initialized will depend on the execution model, see details below -GraphTraversalSource g = ... - -GraphTraversal traversal = g.V().has("name", "marko"); -``` - -### Execution models - -There are two ways to execute fluent traversals: - -* [explicitly](explicit/) by wrapping a traversal into a statement and passing it to - `session.execute`; -* [implicitly](implicit/) by building the traversal from a connected source, and calling a - terminal step. - -### Common topics - -The following apply regardless of the execution model: - -#### Limitations - -At the time of writing (DSE 6.0 / driver 4.0), some types of queries cannot be executed through the -fluent API: - -* system queries (e.g. creating / dropping a graph); -* configuration; -* DSE graph schema queries. - -You'll have to use the [script API](../script) for those use cases. - -#### Performance considerations - -Before sending a fluent graph statement over the network, the driver serializes the Gremlin -traversal into a byte array. **Traversal serialization happens on the client thread, even in -asynchronous mode**. In other words, it is done on: - -* the thread that calls `session.execute` or `session.executeAsync` for explicit execution; -* the thread that calls the terminal step for implicit execution. - -In practice, this shouldn't be an issue, but we've seen it become problematic in some corner cases -of our performance benchmarks: if a single thread issues a lot of `session.executeAsync` calls in a -tight loop, traversal serialization can dominate CPU usage on that thread, and become a bottleneck -for request throughput. - -If you believe that you're running into that scenario, start by profiling your application to -confirm that the client thread maxes out its CPU core; to solve the problem, distribute your -`session.executeAsync` calls onto more threads. - -#### Domain specific languages - -Gremlin can be extended with domain specific languages to make traversals more natural to write. For -example, considering the following query: - -```java -g.V().hasLabel("person").has("name", "marko"). - out("knows").hasLabel("person").has("name", "josh"); -``` - -A "social" DSL could be written to simplify it as: - -```java -socialG.persons("marko").knows("josh"); -``` - -TinkerPop provides an annotation processor to generate a DSL from an annotated interface. This is -covered in detail in the [TinkerPop documentation][TinkerPop DSL]. - -Once your custom traversal source is generated, here's how to use it: - -```java -// Non-connected source for explicit execution: -SocialTraversalSource socialG = DseGraph.g.getGraph().traversal(SocialTraversalSource.class); - -// Connected source for implicit execution: -SocialTraversalSource socialG = - DseGraph.g - .withRemote(DseGraph.remoteConnectionBuilder(session).build()) - .getGraph() - .traversal(SocialTraversalSource.class); -``` - -#### Search and geospatial predicates - -All the DSE predicates are available on the driver side: - -* for [search][DSE search], use the [Search] class: - - ```java - GraphTraversal traversal = - g.V().has("recipe", "instructions", Search.token("Saute")).values("name"); - ``` - -* for [geospatial queries][DSE geo], use the [Geo] class: - - ```java - GraphTraversal traversal = - g.V() - .has( - "location", - "point", - Geo.inside(Geo.point(2.352222, 48.856614), 4.2, Geo.Unit.DEGREES)) - .values("name"); - ``` - -[Search]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/graph/predicates/Search.html -[Geo]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/graph/predicates/Geo.html - -[Apache TinkerPop™]: http://tinkerpop.apache.org/ -[TinkerPop DSL]: http://tinkerpop.apache.org/docs/current/reference/#dsl -[DSE search]: https://docs.datastax.com/en/dse/6.0/dse-dev/datastax_enterprise/graph/using/useSearchIndexes.html -[DSE geo]: https://docs.datastax.com/en/dse/6.0/dse-dev/datastax_enterprise/graph/using/queryGeospatial.html diff --git a/manual/core/dse/graph/fluent/explicit/README.md b/manual/core/dse/graph/fluent/explicit/README.md deleted file mode 100644 index 163180a4a8a..00000000000 --- a/manual/core/dse/graph/fluent/explicit/README.md +++ /dev/null @@ -1,132 +0,0 @@ - - -## Explicit execution - -Fluent traversals can be wrapped into a [FluentGraphStatement] and passed to the session: - -```java -// A "dummy", non-connected traversal source that is not meant to be iterated directly, but instead -// serves as the basis to build fluent statements: -import static com.datastax.dse.driver.api.core.graph.DseGraph.g; - -GraphTraversal traversal = g.V().has("name", "marko"); -FluentGraphStatement statement = FluentGraphStatement.newInstance(traversal); - -GraphResultSet result = session.execute(statement); -for (GraphNode node : result) { - System.out.println(node.asVertex()); -} -``` - -### Creating fluent statements - -#### Factory method - -As shown above, [FluentGraphStatement.newInstance] creates a statement from a traversal directly. - -The default implementation returned by the driver is **immutable**; if you call additional methods -on the statement -- for example to set [options](../../options/) -- each method call will create a -new copy: - -```java -FluentGraphStatement statement = FluentGraphStatement.newInstance(traversal); -FluentGraphStatement statement2 = statement.setTimeout(Duration.ofSeconds(10)); - -assert statement2 != statement; -``` - -Immutability is good because it makes statements inherently **thread-safe**: you can share them in -your application and access them concurrently without any risk. - -On the other hand, it means a lot of intermediary copies if you often call methods on your -statements. Modern VMs are normally good at dealing with such short-lived objects, but if you're -worried about the performance impact, consider using a builder instead. - -Note: contrary to driver statements, Tinkerpop's `GraphTraversal` is mutable and therefore not -thread-safe. This is fine if you just wrap a traversal into a statement and never modify it -afterwards, but be careful not to share traversals and modify them concurrently. - -#### Builder - -Instead of creating a statement directly, you can pass your traversal to -[FluentGraphStatement.builder], chain method calls to set options, and finally call `build()`: - -```java -FluentGraphStatement statement1 = - FluentGraphStatement.builder(traversal) - .withTimeout(Duration.ofSeconds(10)) - .withIdempotence(true) - .build(); -``` - -The builder implementation is **mutable**: every method call returns the same object, only one -builder instance will be created no matter how many methods you call on it. As a consequence, the -builder object is **not thread-safe**. - -You can also initialize a builder from an existing statement: it will inherit all of its options. - -```java -FluentGraphStatement statement2 = - FluentGraphStatement.builder(statement1).withTimeout(Duration.ofSeconds(20)).build(); - -assert statement2.getTraversal().equals(statement1.getTraversal()); -assert statement2.getTimeout().equals(Duration.ofSeconds(20)); // overridden by the builder -assert statement2.isIdempotent(); // because statement1 was -``` - -### Batching traversals - -[BatchGraphStatement] allows you to execute multiple mutating traversals in the same transaction. -Like other types of statements, it is immutable and thread-safe, and can be created either with a -[factory method][BatchGraphStatement.newInstance] or a [builder][BatchGraphStatement.builder]: - -```java -GraphTraversal traversal1 = g.addV("person").property("name", "batch1").property("age", 1); -GraphTraversal traversal2 = g.addV("person").property("name", "batch2").property("age", 2); - -// Each method call creates a copy: -BatchGraphStatement batch1 = BatchGraphStatement.newInstance() - .addTraversal(traversal1) - .addTraversal(traversal2); - -// Uses a single, mutable builder instance: -BatchGraphStatement batch2 = BatchGraphStatement.builder() - .addTraversal(traversal1) - .addTraversal(traversal2) - .build(); -``` - -Traversal batches are only available with DSE 6.0 or above. - -### Prepared statements - -At the time of writing (DSE 6.0), prepared graph statements are not supported yet; they will be -added in a future version. - ------ - -See also the [parent page](../) for topics common to all fluent traversals. - -[FluentGraphStatement]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/graph/FluentGraphStatement.html -[FluentGraphStatement.newInstance]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/graph/FluentGraphStatement.html#newInstance-org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal- -[FluentGraphStatement.builder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/graph/FluentGraphStatement.html#builder-org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal- -[BatchGraphStatement]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/graph/BatchGraphStatement.html -[BatchGraphStatement.newInstance]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/graph/BatchGraphStatement.html#newInstance-- -[BatchGraphStatement.builder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/graph/BatchGraphStatement.html#builder-- diff --git a/manual/core/dse/graph/fluent/implicit/README.md b/manual/core/dse/graph/fluent/implicit/README.md deleted file mode 100644 index f838c376022..00000000000 --- a/manual/core/dse/graph/fluent/implicit/README.md +++ /dev/null @@ -1,71 +0,0 @@ - - -## Implicit execution - -Instead of passing traversals to the driver, you can create a *remote traversal source* connected to -the DSE cluster: - -```java -CqlSession session = CqlSession.builder().build(); - -GraphTraversalSource g = - AnonymousTraversalSource.traversal().withRemote(DseGraph.remoteConnectionBuilder(session).build()); -``` - -Then build traversals from that source. Whenever you reach a [terminal step] \(such as `next()`, -`toList()`...), the DSE driver will be invoked under the covers: - -```java -List vertices = g.V().has("name", "marko").toList(); -``` - -This lets you use the traversal as if it were working against a local graph; all the communication -with DSE is done transparently. Note however that the returned objects (vertices, edges...) are -completely *detached*: even though they contain the complete data, modifications made to them will -not be reflected on the server side. - -Traversal sources with different configurations can easily be created through execution profiles in -the [configuration](../../../../configuration/): - -``` -datastax-java-driver { - profiles { - graph-oltp { - basic.graph.traversal-source = a - basic.graph.timeout = 30 seconds - } - } -} -``` - -Pass the profile name to the remote connection builder: - -```java -GraphTraversalSource a = AnonymousTraversalSource.traversal().withRemote( - DseGraph.remoteConnectionBuilder(session) - .withExecutionProfileName("graph-oltp") - .build()); -``` - ------ - -See also the [parent page](../) for topics common to all fluent traversals. - -[terminal step]: http://tinkerpop.apache.org/docs/current/reference/#terminal-steps diff --git a/manual/core/dse/graph/options/README.md b/manual/core/dse/graph/options/README.md deleted file mode 100644 index e4649ff34f3..00000000000 --- a/manual/core/dse/graph/options/README.md +++ /dev/null @@ -1,179 +0,0 @@ - - -## Graph options - -There are various [configuration](../../../configuration/) options that control the execution of -graph statements. They can also be overridden programmatically on individual statements. - -### Setting options - -Given the following configuration: - -``` -datastax-java-driver { - - basic.graph.timeout = 3 seconds - - profiles { - graph-oltp { - basic.graph.timeout = 30 seconds - } - } -} -``` - -This statement inherits the timeout from the default profile: - -```java -ScriptGraphStatement statement = ScriptGraphStatement.newInstance("g.V().next()"); -assert statement.getTimeout().equals(Duration.ofSeconds(3)); -``` - -This statement inherits the timeout from a named profile: - -```java -ScriptGraphStatement statement = - ScriptGraphStatement.newInstance("g.V().next()").setExecutionProfileName("graph-oltp"); -assert statement.getTimeout().equals(Duration.ofSeconds(30)); -``` - -This statement overrides the timeout programmatically; that takes precedence over the configuration: - -```java -ScriptGraphStatement statement = - ScriptGraphStatement.newInstance("g.V().next()").setTimeout(Duration.ofSeconds(5)); -``` - -Programmatic overrides are also available in statement builders: - -```java -ScriptGraphStatement statement = - ScriptGraphStatement.builder("g.V().next()").withTimeout(Duration.ofSeconds(5)).build(); -``` - -Whether you use the configuration or programmatic API depends on the use case; in general, we -recommend trying execution profiles first, if you can identify static categories of statements that -share the same options. Resort to the API for specific options that only apply to a single -statement, or if the value is only known at runtime. - -### Available options - -#### Graph name - -The `basic.graph.name` option defines the name of the graph you're querying. - -This doesn't have to be set all the time. In fact, some queries explicitly require no graph name, -for example those that access the `system` query. If you try to execute them with a graph name set, -you'll get an error: - -```java -// Don't do this: executing a system query with the graph name set -ScriptGraphStatement statement = - ScriptGraphStatement.newInstance("system.graph('demo').ifNotExists().create()") - .setGraphName("test"); -session.execute(statement); -// InvalidQueryException: No such property: system for class: Script2 -``` - -If you set the graph name globally in the configuration, you'll need to unset it for system queries. -To do that, set it to `null`, or use the more explicit equivalent `is-system-query`: - -``` -datastax-java-driver { - basic.graph.name = my_graph - - profiles { - graph-system { - # Don't inherit the graph name here - basic.graph.is-system-query = true - } - } -} -``` - -```java -ScriptGraphStatement statement = - ScriptGraphStatement.newInstance("system.graph('demo').ifNotExists().create()") - .setExecutionProfileName("graph-system"); - -// Programmatic alternative: -ScriptGraphStatement statement = - ScriptGraphStatement.newInstance("system.graph('demo').ifNotExists().create()") - .setSystemQuery(true); -``` - -#### Traversal source - -`basic.graph.traversal-source` defines the underlying engine used to create traversals. - -Set this to `g` for regular OLTP queries, or `a` for OLAP queries. - -#### Consistency level - -Graph statements use the same option as CQL: `basic.request.consistency`. - -However, DSE graph also provides a finer level of tuning: a single traversal may produce multiple -internal storage queries, some of which are reads, and others writes. The read and write consistency -levels can be configured independently with `basic.graph.read-consistency` and -`basic.graph.write-consistency`. - -If any of these is set, it overrides the consistency level for that type of query; otherwise, the -global option is used. - -#### Timeout - -Graph statements have a dedicated timeout option: `basic.graph.timeout`. This is because the timeout -behaves a bit differently with DSE graph: by default, it is unset and the driver will wait until the -server replies (there are server-side timeouts that limit how long the request will take). - -If a timeout is defined on the client, the driver will fail the request after that time, without -waiting for a reply. But the timeout is also sent alongside the initial request, and the server will -adjust its own timeout to ensure that it doesn't keep working for a result that the client is no -longer waiting for. - -#### Graph protocol version - -DSE graph relies on the Cassandra native protocol, but it extends it with a sub-protocol that has -its own versioning scheme. - -`advanced.graph.sub-protocol` controls the graph protocol version to use for each statement. It is -unset by default, and you should almost never have to change it: the driver sets it automatically -based on the information it knows about the server. - -There is one exception: if you use the [script API](../script/) against a legacy DSE version (5.0.3 -or older), the driver infers the wrong protocol version. This manifests as a `ClassCastException` -when you try to deserialize complex result objects, such as vertices: - -```java -GraphResultSet result = - session.execute(ScriptGraphStatement.newInstance("g.V().next()")); -result.one().asVertex(); -// ClassCastException: java.util.LinkedHashMap cannot be cast to org.apache.tinkerpop.gremlin.structure.Vertex -``` - -If you run into that situation, force the sub-protocol to `graphson-1.0` for script statements -(that's not necessary for fluent statements). - -Currently, if the Graph sub-protocol version is not specified on a given GraphStatement, and it's -not explicitly set through `advanced.graph.sub-protocol` in configuration, the version of DSE to -which the driver is connected will determine the default sub-protocol version used by the driver. -For DSE 6.8.0 and later, the driver will pick "graph-binary-1.0" as the default sub-protocol -version. For DSE 6.7.x and older (or in cases where the driver can't determine the DSE version), the -driver will pick "graphson-2.0" as the default sub-protocol version. diff --git a/manual/core/dse/graph/results/README.md b/manual/core/dse/graph/results/README.md deleted file mode 100644 index 3b4d25fa012..00000000000 --- a/manual/core/dse/graph/results/README.md +++ /dev/null @@ -1,163 +0,0 @@ - - -## Handling graph results - -[Script queries](../script/) and [explicit fluent traversals](../fluent/explicit/) return graph -result sets, which are essentially iterables of [GraphNode]. - -### Synchronous / asynchronous result - -Like their CQL counterparts, graph result sets come in two forms, depending on the way the query -was executed. - -* `session.execute` returns a [GraphResultSet]. It can be iterated directly, and will return the - whole result set, triggering background fetches if the query is paged: - - ```java - for (GraphNode n : resultSet) { - System.out.println(n); - } - ``` - -* `session.executeAsync` returns an [AsyncGraphResultSet]. It only holds the current page of - results, accessible via the `currentPage()` method. If the query is paged, the next pages must be - fetched explicitly using the `hasMorePages()` and `fetchNextPage()` methods. See [Asynchronous - paging](../../../paging/#asynchronous-paging) for more details about how to work with async - types. - -*Note: at the time of writing (DSE 6.0), graph queries are never paged. Results are always returned -as a single page. However, paging is on the roadmap for a future DSE version; the driver APIs -reflect that, to avoid breaking changes when the feature is introduced.* - -Both types have a `one()` method, to use when you know there is exactly one node, or are only -interested in the first one: - -```java -GraphNode n = resultSet.one(); -``` - -### Working with graph nodes - -[GraphNode] wraps the responses returned by the server. Use the `asXxx()` methods to coerce a node -to a specific type: - -```java -FluentGraphStatement statement = FluentGraphStatement.newInstance(g.V().count()); -GraphNode n = session.execute(statement).one(); -System.out.printf("The graph has %s vertices%n", n.asInt()); -``` - -If the result is an array or "object" (in the JSON sense: a collection of named fields), you can -iterate its children: - -```java -if (n.isList()) { - for (int i = 0; i < n.size(); i++) { - GraphNode child = n.getByIndex(i); - System.out.printf("Element at position %d: %s%n", i, child); - } - - // Alternatively, convert to a list: - List l = n.asList(); -} - -if (n.isMap()) { - for (Object key : n.keys()) { - System.out.printf("Element at key %s: %s%n", key, n.getByKey(key)); - } - - // Alternatively, convert to a map: - Map m = n.asMap(); -} -``` - -#### Graph structural types - -If the traversal returns graph elements (like vertices and edges), the results can be converted to -the corresponding TinkerPop types: - -```java -GraphNode n = session.execute(FluentGraphStatement.newInstance( - g.V().hasLabel("test_vertex") -)).one(); -Vertex vertex = n.asVertex(); - -n = session.execute(FluentGraphStatement.newInstance( - g.V().hasLabel("test_vertex").outE() -)).one(); -Edge edge = n.asEdge(); - -n = session.execute(FluentGraphStatement.newInstance( - g.V().hasLabel("test_vertex") - .outE() - .inV() - .path() -)).one(); -Path path = n.asPath(); - -n = session.execute(FluentGraphStatement.newInstance( - g.V().hasLabel("test_vertex") - .properties("name") -)).one(); -// .properties() returns a list of properties, so we get the first one and transform it as a -// VertexProperty -VertexProperty vertexProperty = n.getByIndex(0).asVertexProperty(); -``` - -#### Data type compatibility matrix - -Dse graph exposes several [data types][DSE data types] when defining a schema for a graph. They -translate into specific Java classes when the data is returned from the server. - -Here is an exhaustive compatibility matrix (for DSE 6.0): - -| DSE graph | Java Driver | -|------------|---------------------| -| bigint | Long | -| blob | byte[] | -| boolean | Boolean | -| date | java.time.LocalDate | -| decimal | BigDecimal | -| double | Double | -| duration | java.time.Duration | -| float | Float | -| inet | InetAddress | -| int | Integer | -| linestring | LineString | -| point | Point | -| polygon | Polygon | -| smallint | Short | -| text | String | -| time | java.time.LocalTime | -| timestamp | java.time.Instant | -| uuid | UUID | -| varint | BigInteger | - -If a type doesn't have a corresponding `asXxx()` method, use the variant that takes a type token: - -```java -UUID uuid = graphNode.as(UUID.class); -``` - -[GraphNode]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/graph/GraphNode.html -[GraphResultSet]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/graph/GraphResultSet.html -[AsyncGraphResultSet]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/graph/AsyncGraphResultSet.html - -[DSE data types]: https://docs.datastax.com/en/dse/6.0/dse-dev/datastax_enterprise/graph/reference/refDSEGraphDataTypes.html diff --git a/manual/core/dse/graph/script/README.md b/manual/core/dse/graph/script/README.md deleted file mode 100644 index cec8e4e94ef..00000000000 --- a/manual/core/dse/graph/script/README.md +++ /dev/null @@ -1,125 +0,0 @@ - - -## Script API - -The script API handles Gremlin-groovy requests provided as plain Java strings. To execute a script, -wrap it into a [ScriptGraphStatement] and pass it to the session: - -```java -CqlSession session = CqlSession.builder().build(); - -String groovyScript = "system.graph('demo').ifNotExists().create()"; -ScriptGraphStatement statement = ScriptGraphStatement.newInstance(groovyScript); -session.execute(statement); -``` - -### Creating script statements - -#### Factory method - -As demonstrated above, the simplest way to create a script statement is to pass the Gremlin-groovy -string to [ScriptGraphStatement.newInstance]. - -The default implementation returned by the driver is **immutable**; if you call additional methods -on the statement -- for example to set [options](../options/) -- each method call will create a new -copy: - -```java -ScriptGraphStatement statement = - ScriptGraphStatement.newInstance("system.graph('demo').ifNotExists().create()"); -ScriptGraphStatement statement2 = statement.setTimeout(Duration.ofSeconds(10)); - -assert statement2 != statement; -``` - -Immutability is good because it makes statements inherently **thread-safe**: you can share them in -your application and access them concurrently without any risk. - -On the other hand, it means a lot of intermediary copies if you often call methods on your -statements. Modern VMs are normally good at dealing with such short-lived objects, but if you're -worried about the performance impact, consider using a builder instead. - -#### Builder - -Instead of creating a statement directly, you can pass your Gremlin-groovy string to -[ScriptGraphStatement.builder], chain method calls to set options, and finally call `build()`: - -```java -ScriptGraphStatement statement1 = - ScriptGraphStatement.builder("system.graph('demo').ifNotExists().create()") - .withTimeout(Duration.ofSeconds(10)) - .withIdempotence(true) - .build(); -``` - -The builder implementation is **mutable**: every method call returns the same object, only one -builder instance will be created no matter how many methods you call on it. As a consequence, the -builder object is **not thread-safe**. - -You can also initialize a builder from an existing statement: it will inherit all of its options. - -```java -ScriptGraphStatement statement2 = - ScriptGraphStatement.builder(statement1).withTimeout(Duration.ofSeconds(20)).build(); - -assert statement2.getScript().equals(statement1.getScript()); -assert statement2.getTimeout().equals(Duration.ofSeconds(20)); // overridden by the builder -assert statement2.isIdempotent(); // because statement1 was -``` - -### Parameters - -Gremlin-groovy scripts accept parameters, which are always named. Note that, unlike in CQL, -placeholders are not prefixed with ":". - -To manage parameters on an existing statement, use `setQueryParam` / `removeQueryParam`: - -```java -ScriptGraphStatement statement = - ScriptGraphStatement.newInstance("g.addV(label, vertexLabel)") - .setQueryParam("vertexLabel", "test_vertex_2"); -``` - -On the builder, use `withQueryParam` / `withoutQueryParams`: - -```java -ScriptGraphStatement statement = - ScriptGraphStatement.builder("g.addV(label, vertexLabel)") - .withQueryParam("vertexLabel", "test_vertex_2") - .build(); -``` - -Alternatively, `withQueryParams` takes multiple parameters as a map. - -### Use cases for the script API - -Building requests as Java strings can be unwieldy, especially for long scripts. Besides, the script -API is a bit less performant on the server side. Therefore we recommend the -[Fluent API](../fluent/) instead for graph traversals. - -Note however that some types of queries can only be performed through the script API: - -* system queries (e.g. creating / dropping a graph); -* configuration; -* DSE graph schema queries. - -[ScriptGraphStatement]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/graph/ScriptGraphStatement.html -[ScriptGraphStatement.newInstance]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/graph/ScriptGraphStatement.html#newInstance-java.lang.String- -[ScriptGraphStatement.builder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/graph/ScriptGraphStatement.html#builder-java.lang.String- diff --git a/manual/core/graalvm/README.md b/manual/core/graalvm/README.md deleted file mode 100644 index 5fcaaff9178..00000000000 --- a/manual/core/graalvm/README.md +++ /dev/null @@ -1,334 +0,0 @@ - - -## GraalVM native images - -### Quick overview - -* [GraalVM native images](https://www.graalvm.org/reference-manual/native-image/) can be built with - no additional configuration starting with driver 4.13.0. -* But extra configurations are required in a few cases: - * When using [reactive programming](../reactive); - * When using [Jackson](../integration#Jackson); - * When using LZ4 [compression](../compression/); - * Depending on the [logging backend](../logging) in use. -* DSE-specific features: - * [Geospatial types](../dse/geotypes) are supported. - * [DSE Graph](../dse/graph) is not officially supported, although it may work. -* The [shaded jar](../shaded_jar) is not officially supported, although it may work. - ------ - -### Concepts - -Starting with version 4.13.0, the driver ships with [embedded GraalVM configuration files] that -allow GraalVM native images including the driver to be built without hassle, barring a few -exceptions and caveats listed below. - -[embedded GraalVM configuration files]:https://www.graalvm.org/reference-manual/native-image/BuildConfiguration/#embedding-a-configuration-file - -### Classes instantiated by reflection - -The driver instantiates its components by reflection. The actual classes that will be instantiated -in this way need to be registered for reflection. All built-in implementations of various driver -components, such as `LoadBalancingPolicy` or `TimestampGenerator`, are automatically registered for -reflection, along with a few other internal components tha are also instantiated by reflection. -_You don't need to manually register any of these built-in implementations_. - -But if you intend to use a custom implementation in lieu of a driver built-in class, then it is your -responsibility to register that custom implementation for reflection. - -For example, assuming that you have the following load balancing policy implementation: - -```java - -package com.example.app; - -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.loadbalancing.DefaultLoadBalancingPolicy; -import java.util.Map; -import java.util.Optional; -import java.util.UUID; - -public class CustomLoadBalancingPolicy extends DefaultLoadBalancingPolicy { - - public CustomLoadBalancingPolicy(DriverContext context, String profileName) { - super(context, profileName); - } - // rest of class omitted for brevity -} -``` - -And assuming that you declared the above class in your application.conf file as follows: - -```hocon -datastax-java-driver.basic{ - load-balancing-policy.class = com.example.app.CustomLoadBalancingPolicy -} -``` - -Then you will have to register that class for reflection: - -1. Create the following reflection.json file, or add the entry to an existing file: - -```json -[ - { "name": "com.example.app.CustomLoadBalancingPolicy", "allPublicConstructors": true } -] -``` - -2. When invoking the native image builder, add a `-H:ReflectionConfigurationFiles=reflection.json` - flag and point it to the file created above. - -Note: some frameworks allow you to simplify the registration process. For example, Quarkus offers -the `io.quarkus.runtime.annotations.RegisterForReflection` annotation that you can use to annotate -your class: - -```java -@RegisterForReflection -public class CustomLoadBalancingPolicy extends DefaultLoadBalancingPolicy { - //... -} -``` - -In this case, no other manual configuration is required for the above class to be correctly -registered for reflection. - -### Configuration resources - -The default driver [configuration](../configuration) mechanism is based on the TypeSafe Config -library. TypeSafe Config looks for a few classpath resources when initializing the configuration: -`reference.conf`, `application.conf`, `application.json`, `application.properties`. _These classpath -resources are all automatically included in the native image: you should not need to do it -manually_. See [Accessing Resources in Native Images] for more information on how classpath -resources are handled in native images. - -[Accessing Resources in Native Images]: https://www.graalvm.org/reference-manual/native-image/Resources/ - -### Configuring the logging backend - -When configuring [logging](../logging), the choice of a backend must be considered carefully, as -most logging backends resort to reflection during their configuration phase. - -By default, GraalVM native images provide support for the java.util.logging (JUL) backend. See -[this page](https://www.graalvm.org/reference-manual/native-image/Logging/) for more information. - -For other logging backends, please refer to the logging library documentation to find out if GraalVM -native images are supported. - -### Using reactive-style programming - -The [reactive execution model](../reactive) is compatible with GraalVM native images, but the -following configurations must be added: - -1. Create the following reflection.json file, or add the entry to an existing file: - -```json -[ - { "name": "org.reactivestreams.Publisher" } -] -``` - -2. When invoking the native image builder, add a `-H:ReflectionConfigurationFiles=reflection.json` - flag and point it to the file created above. - -### Using the Jackson JSON library - -[Jackson](https://github.com/FasterXML/jackson) is used in [a few places](../integration#jackson) in -the driver, but is an optional dependency; if you intend to use Jackson, the following -configurations must be added: - -1. Create the following reflection.json file, or add these entries to an existing file: - -```json -[ - { "name": "com.fasterxml.jackson.core.JsonParser" }, - { "name": "com.fasterxml.jackson.databind.ObjectMapper" } -] -``` - -**Important**: when using the shaded jar – which is not officially supported on GraalVM native -images, see below for more details – replace the above entries with the below ones: - -```json -[ - { "name": "com.datastax.oss.driver.shaded.fasterxml.jackson.core.JsonParser" }, - { "name": "com.datastax.oss.driver.shaded.fasterxml.jackson.databind.ObjectMapper" } -] -``` -2. When invoking the native image builder, add a `-H:ReflectionConfigurationFiles=reflection.json` - flag and point it to the file created above. - -### Enabling compression - -When using [compression](../compression/), only LZ4 can be enabled in native images. **Snappy -compression is not supported.** - -In order for LZ4 compression to work in a native image, the following additional GraalVM -configuration is required: - -1. Create the following reflection.json file, or add these entries to an existing file: - -```json -[ - { "name" : "net.jpountz.lz4.LZ4Compressor" }, - { - "name" : "net.jpountz.lz4.LZ4JNICompressor", - "allDeclaredConstructors": true, - "allPublicFields": true - }, - { - "name" : "net.jpountz.lz4.LZ4JavaSafeCompressor", - "allDeclaredConstructors": true, - "allPublicFields": true - }, - { - "name" : "net.jpountz.lz4.LZ4JavaUnsafeCompressor", - "allDeclaredConstructors": true, - "allPublicFields": true - }, - { - "name" : "net.jpountz.lz4.LZ4HCJavaSafeCompressor", - "allDeclaredConstructors": true, - "allPublicFields": true - }, - { - "name" : "net.jpountz.lz4.LZ4HCJavaUnsafeCompressor", - "allDeclaredConstructors": true, - "allPublicFields": true - }, - { - "name" : "net.jpountz.lz4.LZ4JavaSafeSafeDecompressor", - "allDeclaredConstructors": true, - "allPublicFields": true - }, - { - "name" : "net.jpountz.lz4.LZ4JavaSafeFastDecompressor", - "allDeclaredConstructors": true, - "allPublicFields": true - }, - { - "name" : "net.jpountz.lz4.LZ4JavaUnsafeSafeDecompressor", - "allDeclaredConstructors": true, - "allPublicFields": true - }, - { - "name" : "net.jpountz.lz4.LZ4JavaUnsafeFastDecompressor", - "allDeclaredConstructors": true, - "allPublicFields": true - } -] -``` - -2. When invoking the native image builder, add a `-H:ReflectionConfigurationFiles=reflection.json` - flag and point it to the file created above. - -### Native calls - -The driver performs a few [native calls](../integration#native-libraries) using -[JNR](https://github.com/jnr). - -Starting with driver 4.7.0, native calls are also possible in a GraalVM native image, without any -extra configuration. - -### Using DataStax Enterprise (DSE) features - -#### DSE Geospatial types - -DSE [Geospatial types](../dse/geotypes) are supported on GraalVM native images; the following -configurations must be added: - -1. Create the following reflection.json file, or add the entry to an existing file: - -```json -[ - { "name": "com.esri.core.geometry.ogc.OGCGeometry" } -] -``` - -**Important**: when using the shaded jar – which is not officially supported on GraalVM native -images, as stated above – replace the above entry with the below one: - -```json -[ - { "name": "com.datastax.oss.driver.shaded.esri.core.geometry.ogc.OGCGeometry" } -] -``` - -2. When invoking the native image builder, add a `-H:ReflectionConfigurationFiles=reflection.json` - flag and point it to the file created above. - -#### DSE Graph - -**[DSE Graph](../dse/graph) is not officially supported on GraalVM native images.** - -The following configuration can be used as a starting point for users wishing to build a native -image for a DSE Graph application. DataStax does not guarantee however that the below configuration -will work in all cases. If the native image build fails, a good option is to use GraalVM's -[Tracing Agent](https://www.graalvm.org/reference-manual/native-image/Agent/) to understand why. - -1. Create the following reflection.json file, or add these entries to an existing file: - -```json -[ - { "name": "org.apache.tinkerpop.gremlin.tinkergraph.structure.TinkerIoRegistryV3d0" }, - { "name": "org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal" }, - { "name": "org.apache.tinkerpop.gremlin.structure.Graph", - "allDeclaredConstructors": true, - "allPublicConstructors": true, - "allDeclaredMethods": true, - "allPublicMethods": true - }, - { "name": "org.apache.tinkerpop.gremlin.tinkergraph.structure.TinkerGraph", - "allDeclaredConstructors": true, - "allPublicConstructors": true, - "allDeclaredMethods": true, - "allPublicMethods": true - }, - { "name": " org.apache.tinkerpop.gremlin.structure.util.empty.EmptyGraph", - "allDeclaredConstructors": true, - "allPublicConstructors": true, - "allDeclaredMethods": true, - "allPublicMethods": true - }, - { "name": "org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource", - "allDeclaredConstructors": true, - "allPublicConstructors": true, - "allDeclaredMethods": true, - "allPublicMethods": true - } -] -``` - -2. When invoking the native image builder, add the following flags: - -``` --H:ReflectionConfigurationFiles=reflection.json ---initialize-at-build-time=org.apache.tinkerpop.gremlin.tinkergraph.structure.TinkerIoRegistryV3d0 ---initialize-at-build-time=org.apache.tinkerpop.shaded.jackson.databind.deser.std.StdDeserializer -``` - -### Using the shaded jar - -**The [shaded jar](../shaded_jar) is not officially supported in a GraalVM native image.** - -However, it has been reported that the shaded jar can be included in a GraalVM native image as a -drop-in replacement for the regular driver jar for simple applications, without any extra GraalVM -configuration. diff --git a/manual/core/idempotence/README.md b/manual/core/idempotence/README.md deleted file mode 100644 index be784dfa40b..00000000000 --- a/manual/core/idempotence/README.md +++ /dev/null @@ -1,83 +0,0 @@ - - -## Query idempotence - -### Quick overview - -A request is *idempotent* if executing it multiple times leaves the database in the same state as -executing it only once. - -* `basic.request.default-idempotence` in the configuration (defaults to false). -* can be overridden per statement [Statement.setIdempotent] or [StatementBuilder.setIdempotence]. -* retries and speculative executions only happen for idempotent statements. - ------ - -For example: - -* `update my_table set list_col = [1] where pk = 1` is idempotent: no matter how many times it gets - executed, `list_col` will always end up with the value `[1]`; -* `update my_table set list_col = [1] + list_col where pk = 1` is not idempotent: if `list_col` was - initially empty, it will contain `[1]` after the first execution, `[1, 1]` after the second, etc. - -Idempotence matters because the driver sometimes re-runs requests automatically: - -* [retries](../retries): if we're waiting for a response from a node and the connection gets - dropped, the default retry policy automatically retries on another node. But we can't know what - went wrong with the first node: maybe it went down, or maybe it was just a network issue; in any - case, it might have applied the changes already. Therefore non-idempotent requests are never - retried. - -* [speculative executions](../speculative_execution): if they are enabled and a node takes too long - to respond, the driver queries another node to get the response faster. But maybe both nodes will - eventually apply the changes. Therefore non-idempotent requests are never speculatively executed. - -In most cases, you need to flag your statements manually: - -```java -SimpleStatement statement = - SimpleStatement.newInstance("SELECT first_name FROM user WHERE id=1") - .setIdempotent(true); - -// Or with a builder: -SimpleStatement statement = - SimpleStatement.builder("SELECT first_name FROM user WHERE id=1") - .setIdempotence(true) - .build(); -``` - -If you don't, they default to the value defined in the [configuration](../configuration/) by the -`basic.request.default-idempotence` option; out of the box, it is set to `false`. - -When you prepare a statement, its idempotence carries over to bound statements: - -```java -PreparedStatement pst = session.prepare( - SimpleStatement.newInstance("SELECT first_name FROM user WHERE id=?") - .setIdempotent(true)); -BoundStatement bs = pst.bind(1); -assert bs.isIdempotent(); -``` - -The query builder tries to infer idempotence automatically; refer to -[its manual](../../query_builder/idempotence/) for more details. - -[Statement.setIdempotent]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/Statement.html#setIdempotent-java.lang.Boolean- -[StatementBuilder.setIdempotence]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/StatementBuilder.html#setIdempotence-java.lang.Boolean- diff --git a/manual/core/integration/README.md b/manual/core/integration/README.md deleted file mode 100644 index e2c7bc218ee..00000000000 --- a/manual/core/integration/README.md +++ /dev/null @@ -1,688 +0,0 @@ - - -## Integration - -### Quick overview - -* sample project structures for Maven and Gradle. -* explanations about [driver dependencies](#driver-dependencies) and when they can be manually - excluded. - -Note: guidelines to build a GraalVM native image can be found [here](../graalvm). - ------ - -### Which artifact(s) should I use? - -There are multiple driver artifacts under the group id -[com.datastax.oss](https://search.maven.org/search?q=g:com.datastax.oss). Here's how to pick the -right dependencies: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    FeatureArtifact(s)Comments
    - Core functionality: executing queries with CqlSession.execute(), processing the - results with ResultSet, etc. - java‑driver‑core
    - Same as the above, but without explicit dependencies to Netty, - Jackson or ESRI. - java‑driver‑core‑shaded - Replaces java‑driver‑core.
    - See this page. -
    - Query builder: generating CQL query strings programmatically. - java‑driver‑query‑builder
    - Object mapper: generating the boilerplate to execute queries and - convert the results into your own domain classes. - - java‑driver‑mapper‑processor
    - java‑driver‑mapper‑runtime -
    - Both artifacts are needed.
    - See this page. -
    - Instrumenting the driver and gathering metrics using the Micrometer metrics library. - java‑driver‑metrics‑micrometerSee this page.
    - Instrumenting the driver and gathering metrics using the MicroProfile Metrics library. - java‑driver‑metrics‑microprofileSee this page.
    - "Bill Of Materials": can help manage versions if you use multiple driver artifacts. - java‑driver‑bomSee this page.
    - Writing integration tests that run the driver against Cassandra or Simulacron. - java‑driver‑test‑infra - Those APIs are not covered in this manual, but you can look at the driver's contribution - guidelines and internal tests for - guidance. -
    - -### Minimal project structure - -We publish the driver to [Maven central][central_oss]. Most modern build tools can download the -dependency automatically. - -#### Maven - -Create the following 4 files: - -``` -$ find . -type f -./pom.xml -./src/main/resources/application.conf -./src/main/resources/logback.xml -./src/main/java/Main.java -``` - -##### Project descriptor - -`pom.xml` is the [Project Object Model][maven_pom] that describes your application. We declare the -dependencies, and tell Maven that we're going to use Java 8: - -```xml - - - 4.0.0 - - com.example.yourcompany - yourapp - 1.0.0-SNAPSHOT - - - - org.apache.cassandra - java-driver-core - ${driver.version} - - - ch.qos.logback - logback-classic - 1.2.3 - - - - - - - maven-compiler-plugin - - 1.8 - 1.8 - - - - - -``` - -##### Application configuration - -`application.conf` is not stricly necessary, but it illustrates an important point about the -driver's [configuration](../configuration/): you override any of the driver's default options here. - -``` -datastax-java-driver { - basic.session-name = poc -} -``` - -In this case, we just specify a custom name for our session, it will appear in the logs. - -##### Logging configuration - -For this example, we choose Logback as our [logging framework](../logging/) (we added the dependency -in `pom.xml`). `logback.xml` configures it to send the driver's `INFO` logs to the console. - -```xml - - - - %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n - - - - - - - -``` - -Again, this is not strictly necessary: a truly minimal example could run without the Logback -dependency, or this file; but the default behavior is a bit verbose. - -##### Main class - -`Main.java` is the canonical example introduced in our [quick start](../#quick-start); it connects -to Cassandra, queries the server version and prints it: - -```java -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.ResultSet; - -public class Main { - public static void main(String[] args) { - try (CqlSession session = CqlSession.builder().build()) { - ResultSet rs = session.execute("SELECT release_version FROM system.local"); - System.out.println(rs.one().getString(0)); - } - } -} -``` - -Make sure you have a Cassandra instance running on 127.0.0.1:9042 (otherwise, you use -[CqlSession.builder().addContactPoint()][SessionBuilder.addContactPoint] to use a different -address). - -##### Running - -To launch the program from the command line, use: - -``` -$ mvn compile exec:java -Dexec.mainClass=Main -``` - -You should see output similar to: - -``` -... -[INFO] ------------------------------------------------------------------------ -[INFO] Building yourapp 1.0.0-SNAPSHOT -[INFO] ------------------------------------------------------------------------ -... (at this point, Maven will download the dependencies the first time) -[INFO] --- maven-resources-plugin:2.6:resources (default-resources) @ yourapp --- -[WARNING] Using platform encoding (UTF-8 actually) to copy filtered resources, i.e. build is platform dependent! -[INFO] Copying 1 resource -[INFO] -[INFO] --- maven-compiler-plugin:2.5.1:compile (default-compile) @ yourapp --- -[INFO] Nothing to compile - all classes are up to date -[INFO] -[INFO] --- exec-maven-plugin:1.3.1:java (default-cli) @ yourapp --- -11:39:45.355 [Main.main()] INFO c.d.o.d.i.c.DefaultMavenCoordinates - Apache Cassandra Java Driver (com.datastax.oss:java-driver-core) version 4.0.1 -11:39:45.648 [poc-admin-0] INFO c.d.o.d.internal.core.time.Clock - Using native clock for microsecond precision -11:39:45.649 [poc-admin-0] INFO c.d.o.d.i.c.metadata.MetadataManager - [poc] No contact points provided, defaulting to /127.0.0.1:9042 -3.11.2 -[INFO] ------------------------------------------------------------------------ -[INFO] BUILD SUCCESS -[INFO] ------------------------------------------------------------------------ -[INFO] Total time: 11.777 s -[INFO] Finished at: 2018-06-18T11:32:49-08:00 -[INFO] Final Memory: 16M/277M -[INFO] ------------------------------------------------------------------------ -``` - -#### Gradle - -[Initialize a new project][gradle_init] with Gradle. - -Modify `build.gradle` to add the dependencies: - -```groovy -group 'com.example.yourcompany' -version '1.0.0-SNAPSHOT' - -apply plugin: 'java' - -sourceCompatibility = 1.8 - -repositories { - mavenCentral() -} - -dependencies { - compile group: 'com.datastax.oss', name: 'java-driver-core', version: '${driver.version}' - compile group: 'ch.qos.logback', name: 'logback-classic', version: '1.2.3' -} -``` - -Then place [application.conf](#application-configuration), [logback.xml](#logging-configuration) and -[Main.java](#main-class) in the same locations, and with the same contents, as in the Maven example: - -``` -./src/main/resources/application.conf -./src/main/resources/logback.xml -./src/main/java/Main.java -``` - -Optionally, if you want to run from the command line, add the following at the end of -`build.gradle`: - -```groovy -task execute(type:JavaExec) { - main = 'Main' - classpath = sourceSets.main.runtimeClasspath -} -``` - -Then launch with: - -``` -$ ./gradlew execute -``` - -You should see output similar to: - -``` -$ ./gradlew execute -:compileJava -:processResources -:classes -:execute -13:32:25.339 [main] INFO c.d.o.d.i.c.DefaultMavenCoordinates - Apache Cassandra Java Driver (com.datastax.oss:java-driver-core) version 4.0.1-alpha4-SNAPSHOT -13:32:25.682 [poc-admin-0] INFO c.d.o.d.internal.core.time.Clock - Using native clock for microsecond precision -13:32:25.683 [poc-admin-0] INFO c.d.o.d.i.c.metadata.MetadataManager - [poc] No contact points provided, defaulting to /127.0.0.1:9042 -3.11.2 - -BUILD SUCCESSFUL -``` - -#### Manually (from the binary tarball) - -If your build tool can't fetch dependencies from Maven central, we publish a binary tarball on the -[DataStax download server][downloads]. - -The driver and its dependencies must be in the compile-time classpath. Application resources, such -as `application.conf` and `logback.xml` in our previous examples, must be in the runtime classpath. - -### JPMS support - -All the driver's artifacts are JPMS automatic modules. - -### Driver dependencies - -The driver depends on a number of third-party libraries; some of those dependencies are opt-in, -while others are present by default, but may be excluded under specific circumstances. - -Here's a rundown of what you can customize: - -#### Netty - -[Netty](https://netty.io/) is the NIO framework that powers the driver's networking layer. - -It is a required dependency, but we provide a a [shaded JAR](../shaded_jar/) that relocates it to a -different Java package; this is useful to avoid dependency hell if you already use Netty in another -part of your application. - -#### Typesafe config - -[Typesafe config](https://lightbend.github.io/config/) is used for our file-based -[configuration](../configuration/). - -It is a required dependency if you use the driver's built-in configuration loader, but this can be -[completely overridden](../configuration/#bypassing-typesafe-config) with your own implementation, -that could use a different framework or an ad-hoc solution. - -In that case, you can exclude the dependency: - -```xml - - org.apache.cassandra - java-driver-core - ${driver.version} - - - com.typesafe - config - - - -``` - -#### Native libraries - -The driver performs native calls with [JNR](https://github.com/jnr). This is used in two cases: - -* to access a microsecond-precision clock in [timestamp generators](../query_timestamps/); -* to get the process ID when generating [UUIDs][Uuids]. - -In both cases, this is completely optional; if system calls are not available on the current -platform, or the library fails to load for any reason, the driver falls back to pure Java -workarounds. - -If you don't want to use system calls, or already know (from looking at the driver's logs) that they -are not available on your platform, you can exclude the following dependency: - -```xml - - org.apache.cassandra - java-driver-core - ${driver.version} - - - com.github.jnr - jnr-posix - - - -``` - -#### Compression libraries - -The driver supports compression with either [LZ4](https://github.com/yawkat/lz4-java) or -[Snappy](http://google.github.io/snappy/). - -These dependencies are optional; you have to add them explicitly in your application in order to -enable compression. See the [Compression](../compression/) page for more details. - -#### Metrics - -The driver exposes [metrics](../metrics/) through the -[Dropwizard](http://metrics.dropwizard.io/4.1.2/) library. - -The dependency is declared as required, but metrics are optional. If you've disabled all metrics, or -if you are using a different metrics library, and you never call [Session.getMetrics] anywhere in -your application, then you can remove the dependency: - -```xml - - org.apache.cassandra - java-driver-core - ${driver.version} - - - io.dropwizard.metrics - metrics-core - - - -``` - -In addition, when using Dropwizard, "timer" metrics use -[HdrHistogram](http://hdrhistogram.github.io/HdrHistogram/) to record latency percentiles. At the -time of writing, these metrics are: `cql-requests`, `throttling.delay` and `cql-messages`; you can -also identify them by reading the comments in the [configuration -reference](../configuration/reference/) (look for "exposed as a Timer"). - -If all of these metrics are disabled, or if you use a different metrics library, you can remove the -dependency: - -```xml - - org.apache.cassandra - java-driver-core - ${driver.version} - - - org.hdrhistogram - HdrHistogram - - - -``` - -#### Jackson - -[Jackson](https://github.com/FasterXML/jackson) is used: - -* when connecting to [DataStax Astra](../../cloud/); -* when Insights monitoring is enabled; -* when [Json codecs](../custom_codecs) are being used. - -Jackson is declared as a required dependency, but the driver can operate normally without it. If you -don't use any of the above features, you can safely exclude the dependency: - -```xml - - org.apache.cassandra - java-driver-core - ${driver.version} - - - com.fasterxml.jackson.core - * - - - -``` - -#### Esri - -Our [geospatial types](../dse/geotypes/) implementation is based on the [Esri Geometry -API](https://github.com/Esri/geometry-api-java). - -For driver versions >= 4.4.0 and < 4.14.0 Esri is declared as a required dependency, -although the driver can operate normally without it. If you don't use geospatial types -anywhere in your application you can exclude the dependency: - -```xml - - org.apache.cassandra - java-driver-core - ${driver.version} - - - com.esri.geometry - * - - - -``` - -Starting with driver 4.14.0 Esri has been changed to an optional dependency. You no longer have to -explicitly exclude the dependency if it's not used, but if you do wish to make use of the Esri -library you must now explicitly specify it as a dependency : - -```xml - - com.esri.geometry - esri-geometry-api - ${esri.version} - -``` - -In the dependency specification above you should use any 1.2.x version of Esri (we recommend -1.2.1). These versions are older than the current 2.x versions of the library but they are -guaranteed to be fully compatible with DSE. - -#### TinkerPop - -[Apache TinkerPop™](http://tinkerpop.apache.org/) is used in our [graph API](../dse/graph/), -introduced in the OSS driver in version 4.4.0 (it was previously a feature only available in the -now-retired DSE driver). - -For driver versions ranging from 4.4.0 to 4.9.0 inclusive, TinkerPop is declared as a required -dependency, but the driver can operate normally without it. If you don't use the graph API at all, -you can exclude the TinkerPop dependencies: - -```xml - - org.apache.cassandra - java-driver-core - ${driver.version} - - - org.apache.tinkerpop - * - - - -``` - -Starting with driver 4.10 however, TinkerPop switched to an optional dependency. Excluding TinkerPop -explicitly is not required anymore if you don't use it. _If you do use the graph API though, you now -need to explicitly include the dependencies below in your application_: - -```xml - - org.apache.tinkerpop - gremlin-core - ${tinkerpop.version} - - - org.apache.tinkerpop - tinkergraph-gremlin - ${tinkerpop.version} - -``` - -If you do use graph, it is important to keep the precise TinkerPop version that the driver depends -on: unlike the driver, TinkerPop does not follow semantic versioning, so even a patch version change -(e.g. 3.3.0 vs 3.3.3) could introduce incompatibilities. - -Here are the recommended TinkerPop versions for each driver version: - - - - - - - - - - - - - - - - - - -
    Driver versionTinkerPop version
    4.17.03.5.3
    4.16.03.5.3
    4.15.03.5.3
    4.14.13.5.3
    4.14.03.4.10
    4.13.03.4.10
    4.12.03.4.10
    4.11.03.4.10
    4.10.03.4.9
    4.9.03.4.8
    4.8.03.4.5
    4.7.03.4.5
    4.6.03.4.5
    4.5.03.4.5
    4.4.03.3.3
    - -#### Reactive Streams - -[Reactive Streams](https://www.reactive-streams.org/) types are referenced in our [reactive -API](../reactive/). - -The Reactive Streams API is declared as a required dependency, but the driver can operate normally -without it. If you never call any of the `executeReactive` methods, you can exclude the dependency: - -```xml - - org.apache.cassandra - java-driver-core - ${driver.version} - - - org.reactivestreams - reactive-streams - - - -``` - -#### Documenting annotations - -The driver team uses annotations to document certain aspects of the code: - -* thread safety with [Java Concurrency in Practice](http://jcip.net/annotations/doc/index.html) - annotations `@Immutable`, `@ThreadSafe`, `@NotThreadSafe` and `@GuardedBy`; -* nullability with [SpotBugs](https://spotbugs.github.io/) annotations `@Nullable` and `@NonNull`. - -This is mostly used during development; while these annotations are retained in class files, they -serve no purpose at runtime. This class is an optional dependency of the driver. If you wish to -make use of these annotations in your own code you have to explicitly depend on these jars: - -```xml - - - com.github.stephenc.jcip - jcip-annotations - 1.0-1 - - - com.github.spotbugs - spotbugs-annotations - 3.1.12 - - -``` - -However, there is one case when excluding those dependencies won't work: if you use [annotation -processing] in your build, the Java compiler scans the entire classpath -- including the driver's -classes -- and tries to load all declared annotations. If it can't find the class for an annotation, -you'll get a compiler error: - -``` -error: cannot access ThreadSafe - class file for net.jcip.annotations.ThreadSafe not found -1 error -``` - -The workaround is to keep the dependencies. - -Sometimes annotation scanning can be triggered involuntarily, if one of your dependencies declares -a processor via the service provider mechanism (check the `META-INF/services` directory in the -JARs). If you are sure that you don't need any annotation processing, you can compile with the -`-proc:none` option and still exclude the dependencies. - -#### Mandatory dependencies - -The remaining core driver dependencies are the only ones that are truly mandatory: - -* the [native protocol](https://github.com/datastax/native-protocol) layer. This is essentially part - of the driver code, but was externalized for reuse in other projects; -* `java-driver-guava-shaded`, a shaded version of [Guava](https://github.com/google/guava). It is - relocated to a different package, and only used by internal driver code, so it should be - completely transparent to third-party code; -* the [SLF4J](https://www.slf4j.org/) API for [logging](../logging/). - -[central_oss]: https://search.maven.org/#search%7Cga%7C1%7Ccom.datastax.oss -[maven_pom]: https://maven.apache.org/guides/introduction/introduction-to-the-pom.html -[gradle_init]: https://guides.gradle.org/creating-new-gradle-builds/ -[downloads]: http://downloads.datastax.com/java-driver/ -[guava]: https://github.com/google/guava/issues/2721 -[annotation processing]: https://docs.oracle.com/javase/8/docs/technotes/tools/windows/javac.html#sthref65 - -[Session.getMetrics]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/Session.html#getMetrics-- -[SessionBuilder.addContactPoint]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/SessionBuilder.html#addContactPoint-java.net.InetSocketAddress- -[Uuids]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/uuid/Uuids.html diff --git a/manual/core/load_balancing/README.md b/manual/core/load_balancing/README.md deleted file mode 100644 index 3f391c14f56..00000000000 --- a/manual/core/load_balancing/README.md +++ /dev/null @@ -1,456 +0,0 @@ - - -## Load balancing - -### Quick overview - -Which nodes the driver talks to, and in which order they are tried. - -* `basic.load-balancing-policy` in the configuration. -* defaults to `DefaultLoadBalancingPolicy` (opinionated best practices). -* can have per-profile policies. - ------ - -A Cassandra cluster is typically composed of multiple nodes; the *load balancing policy* (sometimes -abbreviated LBP) is a central component that determines: - -* which nodes the driver will communicate with; -* for each new query, which coordinator to pick, and which nodes to use as failover. - -It is defined in the [configuration](../configuration/): - -``` -datastax-java-driver.basic.load-balancing-policy { - class = DefaultLoadBalancingPolicy -} -``` - -### Concepts - -#### Node distance - -For each node, the policy computes a *distance* that determines how connections will be established: - -* `LOCAL` and `REMOTE` are "active" distances, meaning that the driver will keep open connections to - this node. [Connection pools](../pooling/) can be sized independently for each distance. -* `IGNORED` means that the driver will never attempt to connect. - -Typically, the distance will reflect network topology (e.g. local vs. remote datacenter), although -that is entirely up to each policy implementation. It can also change over time. - -The driver built-in policies only ever assign the `LOCAL` or `IGNORED` distance, to avoid cross- -datacenter traffic (see below to understand how to change this behavior). - -#### Query plan - -Each time the driver executes a query, it asks the policy to compute a *query plan*, in other words -a list of nodes. The driver then tries each node in sequence, moving down the plan according to the -[retry policy](../retries/) and [speculative execution policy](../speculative_execution/). - -The contents and order of query plans are entirely implementation-specific, but policies typically -return plans that: - -* are different for each query, in order to balance the load across the cluster; -* only contain nodes that are known to be able to process queries, i.e. neither ignored nor down; -* favor local nodes over remote ones. - -### Built-in policies - -In previous versions, the driver provided a wide variety of built-in load balancing policies; in -addition, they could be nested into each other, yielding an even higher number of choices. In our -experience, this has proven to be too complicated: it's not obvious which policy(ies) to choose for -a given use case, and nested policies can sometimes affect each other's effects in subtle and hard- -to-predict ways. - -In driver 4+, we are taking a different approach: we provide only a handful of load balancing -policies, that we consider the best choices for most cases: - -- `DefaultLoadBalancingPolicy` should almost always be used; it requires a local datacenter to be - specified either programmatically when creating the session, or via the configuration (see below). - It can also use a highly efficient slow replica avoidance mechanism, which is by default enabled. -- `DcInferringLoadBalancingPolicy` is similar to `DefaultLoadBalancingPolicy`, but does not require - a local datacenter to be defined, in which case it will attempt to infer the local datacenter from - the provided contact points. If that's not possible, it will throw an error during session - initialization. This policy is intended mostly for ETL tools and is not recommended for normal - applications. -- `BasicLoadBalancingPolicy` is similar to `DefaultLoadBalancingPolicy`, but does not have the slow - replica avoidance mechanism. More importantly, it is the only policy capable of operating without - local datacenter defined, in which case it will consider nodes in the cluster in a datacenter- - agnostic way. Beware that this could cause spikes in cross-datacenter traffic! This policy is - provided mostly as a starting point for users wishing to implement their own load balancing - policy; it should not be used as is in normal applications. - -You can still write a [custom implementation](#custom-implementation) if you have special -requirements. - -#### Datacenter locality - -By default, both `DefaultLoadBalancingPolicy` and `DcInferringLoadBalancingPolicy` **only connect to -a single datacenter**. The rationale is that a typical multi-region deployment will collocate one or -more application instances with each Cassandra datacenter: - -```ditaa - /----+----\ - | client | - \----+----/ - | - v - /---------------\ - | load balancer | - \-------+-------/ - | - +------------+------------+ - | | -+---------|---------+ +---------|---------+ -| Region1 v | | Region2 v | -| /---------\ | | /---------\ | -| | app1 | | | | app2 | | -| \----+----/ | | \----+----/ | -| | | | | | -| v | | v | -| +-----------+ | | +-----------+ | -| | {s} | | | | {s} | | -| | Cassandra +------=------+ Cassandra | | -| | DC1 | | | | DC2 | | -| +-----------+ | | +-----------+ | -| | | | -+-------------------+ +-------------------+ -``` - -When using these policies you **must** provide a local datacenter name, either in the configuration: - -``` -datastax-java-driver.basic.load-balancing-policy { - local-datacenter = datacenter1 -} -``` - -Or programmatically when building the session: - -```java -CqlSession session = CqlSession.builder() - .withLocalDatacenter("datacenter1") - .build(); -``` - -If both are provided, the programmatic value takes precedence. - -For convenience, the local datacenter name may be omitted if no contact points were provided: in -that case, the driver will connect to 127.0.0.1:9042, and use that node's datacenter. This is just -for a better out-of-the-box experience for users who have just downloaded the driver; beyond that -initial development phase, you should provide explicit contact points and a local datacenter. - -##### Finding the local datacenter - -To check which datacenters are defined in a given cluster, you can run [`nodetool status`]. It will -print information about each node in the cluster, grouped by datacenters. Here is an example: - -``` -$ nodetool status -Datacenter: DC1 -=============== -Status=Up/Down -|/ State=Normal/Leaving/Joining/Moving --- Address Load Tokens Owns Host ID Rack -UN 1.5 TB 256 ? rack1 -UN 1.5 TB 256 ? rack2 -UN 1.5 TB 256 ? rack3 - -Datacenter: DC2 -=============== -Status=Up/Down -|/ State=Normal/Leaving/Joining/Moving --- Address Load Tokens Owns Host ID Rack -UN 1.5 TB 256 ? rack1 -UN 1.5 TB 256 ? rack2 -UN 1.5 TB 256 ? rack3 -``` - -To find out which datacenter should be considered local, you need to first determine which nodes the -driver is going to be co-located with, then choose their datacenter as local. In case of doubt, you -can also use [cqlsh]; if cqlsh is co-located too in the same datacenter, simply run the command -below: - -``` -cqlsh> select data_center from system.local; - -data_center -------------- -DC1 -``` - -#### Cross-datacenter failover - -Since the driver by default only contacts nodes in the local datacenter, what happens if the whole -datacenter is down? Resuming the example shown in the diagram above, shouldn't the driver -temporarily allow app1 to connect to the nodes in DC2? - -We believe that, while appealing by its simplicity, such ability is not the right way to handle a -datacenter failure: resuming our example above, if the whole DC1 datacenter went down at once, it -probably means a catastrophic failure happened in Region1, and the application node is down as well. -Failover should be cross-region instead (handled by the load balancer in the above example). - -However, due to popular demand, starting with driver 4.10, we re-introduced cross-datacenter -failover in the driver built-in load balancing policies. - -Cross-datacenter failover is enabled with the following configuration option: - -``` -datastax-java-driver.advanced.load-balancing-policy.dc-failover { - max-nodes-per-remote-dc = 2 -} -``` - -The default for `max-nodes-per-remote-dc` is zero, which means that failover is disabled. Setting -this option to any value greater than zero will have the following effects: - -- The load balancing policies will assign the `REMOTE` distance to that many nodes *in each remote - datacenter*. -- The driver will then attempt to open connections to those nodes. The actual number of connections - to open to each one of those nodes is configurable, see [Connection pools](../pooling/) for - more details. By default, the driver opens only one connection to each node. -- Those remote nodes (and only those) will then become eligible for inclusion in query plans, - effectively enabling cross-datacenter failover. - -Beware that enabling such failover can result in cross-datacenter network traffic spikes, if the -local datacenter is down or experiencing high latencies! - -Cross-datacenter failover can also have unexpected consequences when using local consistency levels -(LOCAL_ONE, LOCAL_QUORUM and LOCAL_SERIAL). Indeed, a local consistency level may have different -semantics depending on the replication factor (RF) in use in each datacenter: if the local DC has -RF=3 for a given keyspace, but the remote DC has RF=1 for it, achieving LOCAL_QUORUM in the local DC -means 2 replicas required, but in the remote DC, only one will be required. - -For this reason, cross-datacenter failover for local consistency levels is disabled by default. If -you want to enable this and understand the consequences, then set the following option to true: - -``` -datastax-java-driver.advanced.load-balancing-policy.dc-failover { - allow-for-local-consistency-levels = true -} -``` - -##### Alternatives to driver-level cross-datacenter failover - -Before you jump into the failover technique explained above, please also consider the following -alternatives: - -1. **Application-level failover**: instead of letting the driver do the failover, implement the -failover logic in your application. Granted, this solution wouldn't be much better if the -application servers are co-located with the Cassandra datacenter itself. It's also a bit more work, -but at least, you would have full control over the failover procedure: you could for example decide, -based on the exact error that prevented the local datacenter from fulfilling a given request, -whether a failover would make sense, and which remote datacenter to use for that specific request. -Such a fine-grained logic is not possible with a driver-level failover. Besides, if you opt for this -approach, execution profiles can come in handy. See "Using multiple policies" below and also check -our [application-level failover example] for a good starting point. - -2. **Infrastructure-level failover**: in this scenario, the failover is handled by the -infrastructure. To resume our example above, if Region1 goes down, the load balancers in your -infrastructure would transparently switch all the traffic intended for that region to Region2, -possibly scaling up its bandwidth to cope with the network traffic spike. This is by far the best -solution for the cross-datacenter failover issue in general, but we acknowledge that it also -requires a purpose-built infrastructure. To help you explore this option, read our [white paper]. - -[application-level failover example]: https://github.com/datastax/java-driver/blob/4.x/examples/src/main/java/com/datastax/oss/driver/examples/failover/CrossDatacenterFailover.java -[white paper]: https://www.datastax.com/sites/default/files/content/whitepaper/files/2019-09/Designing-Fault-Tolerant-Applications-DataStax.pdf - -#### Token-aware - -The default policy is **token-aware** by default: requests will be routed in priority to the -replicas that own the data being queried. - -##### Providing routing information - -First make sure that [token metadata](../metadata/token/#configuration) is enabled. - -Then your statements need to provide: - -* a keyspace: if you use a [per-query keyspace](../statements/per_query_keyspace/), then it will be - used for routing as well. Otherwise, the driver relies on [getRoutingKeyspace()]; -* a routing key: it can be provided either by [getRoutingKey()] \(raw binary data) or - [getRoutingToken()] \(already hashed as a token). - -Depending on the type of statement, some of this information may be computed automatically, -otherwise you have to set it manually. The examples below assume the following CQL schema: - -``` -CREATE TABLE testKs.sensor_data(id int, year int, ts timestamp, data double, - PRIMARY KEY ((id, year), ts)); -``` - -For [simple statements](../statements/simple/), routing information is never computed -automatically: - -```java -SimpleStatement statement = - SimpleStatement.newInstance( - "SELECT * FROM testKs.sensor_data WHERE id = 1 and year = 2016"); - -// No routing info available: -assert statement.getRoutingKeyspace() == null; -assert statement.getRoutingKey() == null; - -// Set the keyspace manually (skip this if using a per-query keyspace): -statement = statement.setRoutingKeyspace("testKs"); - -// Set the routing key manually: serialize each partition key component to its target CQL type -statement = statement.setRoutingKey( - TypeCodecs.INT.encodePrimitive(1, session.getContext().getProtocolVersion()), - TypeCodecs.INT.encodePrimitive(2016, session.getContext().getProtocolVersion())); - -session.execute(statement); -``` - -For [bound statements](../statements/prepared/), the keyspace is always available; the routing key -is only available if all components of the partition key are bound as variables: - -```java -// All components bound: all info available -PreparedStatement pst1 = - session.prepare("SELECT * FROM testKs.sensor_data WHERE id = :id and year = :year"); -BoundStatement statement1 = pst1.bind(1, 2016); - -assert statement1.getRoutingKeyspace() != null; -assert statement1.getRoutingKey() != null; - -// 'id' hard-coded, only 'year' is bound: only keyspace available -PreparedStatement pst2 = - session.prepare("SELECT * FROM testKs.sensor_data WHERE id = 1 and year = :year"); -BoundStatement statement2 = pst2.bind(2016); - -assert statement2.getRoutingKeyspace() != null; -assert statement2.getRoutingKey() == null; -``` - -For [batch statements](../statements/batch/), the routing information of each child statement is -inspected; the first non-null keyspace is used as the keyspace of the batch, and the first non-null -routing key as its routing key (the idea is that all children should have the same routing -information, since batches are supposed to operate on a single partition). If no child has any -routing information, you need to provide it manually. - -##### Policy behavior - -When the policy computes a query plan, it first inspects the statement's routing information. If -there isn't any, the query plan is a simple round-robin shuffle of all connected nodes that are -located in the local datacenter. - -If the statement has routing information, the policy uses it to determine the *local* replicas that -hold the corresponding data. Then it returns a query plan containing these replicas shuffled in -random order, followed by a round-robin shuffle of the rest of the nodes. - -If cross-datacenter failover has been activated as explained above, some remote nodes may appear in -query plans as well. With the driver built-in policies, remote nodes always come after local nodes -in query plans: this way, if the local datacenter is up, local nodes will be tried first, and remote -nodes are unlikely to ever be queried. If the local datacenter goes down however, all the local -nodes in query plans will likely fail, causing the query plans to eventually try remote nodes -instead. If the local datacenter unavailability persists, local nodes will be eventually marked down -and will be removed from query plans completely from query plans, until they are back up again. - -#### Customizing node distance assignment - -Finally, all the driver the built-in policies accept an optional node distance evaluator that gets -invoked each time a node is added to the cluster or comes back up. If the evaluator returns a -non-null distance for the node, that distance will be used, otherwise the driver will use its -built-in logic to assign a default distance to it. This is a good way to exclude nodes or to adjust -their distance according to custom, dynamic criteria. - -You can pass the node distance evaluator through the configuration: - -``` -datastax-java-driver.basic.load-balancing-policy { - class = DefaultLoadBalancingPolicy - local-datacenter = datacenter1 - evaluator.class = com.acme.MyNodeDistanceEvaluator -} -``` - -The node distance evaluator class must implement [NodeDistanceEvaluator], and have a public -constructor that takes a [DriverContext] argument: `public MyNodeDistanceEvaluator(DriverContext -context)`. - -Sometimes it's more convenient to pass the evaluator programmatically; you can do that with -`SessionBuilder.withNodeDistanceEvaluator`: - -```java -Map distances = ... -CqlSession session = CqlSession.builder() - .withNodeDistanceEvaluator((node, dc) -> distances.get(node)) - .build(); -``` - -If a programmatic node distance evaluator evaluator is provided, the configuration option is -ignored. - -### Custom implementation - -You can use your own implementation by specifying its fully-qualified name in the configuration. - -Study the [LoadBalancingPolicy] interface and the built-in [BasicLoadingBalancingPolicy] for the -low-level details. Feel free to extend `BasicLoadingBalancingPolicy` and override only the methods -that you wish to modify – but keep in mind that it may be simpler to just start from scratch. - -### Using multiple policies - -The load balancing policy can be overridden in [execution profiles](../configuration/#profiles): - -``` -datastax-java-driver { - basic.load-balancing-policy { - class = DefaultLoadBalancingPolicy - } - profiles { - custom-lbp { - basic.load-balancing-policy { - class = CustomLoadBalancingPolicy - } - } - slow { - request.timeout = 30 seconds - } - } -} -``` - -The `custom-lbp` profile uses a dedicated policy. The `slow` profile inherits the default profile's. -Note that this goes beyond configuration inheritance: the driver only creates a single -`DefaultLoadBalancingPolicy` instance and reuses it (this also occurs if two sibling profiles have -the same configuration). - -For query plans, each request uses its declared profile's policy. If it doesn't declare any profile, -or if the profile doesn't have a dedicated policy, then the default profile's policy is used. - -For node distances, the driver remembers the last distance suggested by each policy for each node. -Then it uses the "closest" distance for any given node. For example: - -* for node1, policy1 suggests distance LOCAL and policy2 suggests REMOTE. node1 is set to LOCAL; -* policy1 changes its suggestion to IGNORED. node1 is set to REMOTE; -* policy1 changes its suggestion to REMOTE. node1 stays at REMOTE. - -[DriverContext]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/context/DriverContext.html -[LoadBalancingPolicy]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/loadbalancing/LoadBalancingPolicy.html -[BasicLoadBalancingPolicy]: https://github.com/datastax/java-driver/blob/4.x/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicy.java -[getRoutingKeyspace()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/Request.html#getRoutingKeyspace-- -[getRoutingToken()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/Request.html#getRoutingToken-- -[getRoutingKey()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/Request.html#getRoutingKey-- -[NodeDistanceEvaluator]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/loadbalancing/NodeDistanceEvaluator.html -[`nodetool status`]: https://docs.datastax.com/en/dse/6.7/dse-dev/datastax_enterprise/tools/nodetool/toolsStatus.html -[cqlsh]: https://docs.datastax.com/en/dse/6.7/cql/cql/cql_using/startCqlshStandalone.html diff --git a/manual/core/logging/README.md b/manual/core/logging/README.md deleted file mode 100644 index e3f8bfa7777..00000000000 --- a/manual/core/logging/README.md +++ /dev/null @@ -1,237 +0,0 @@ - - -## Logging - -### Quick overview - -* based on SLF4J. -* config file examples for Logback and Log4J. - -**If you're looking for information about the request logger, see the [request -tracker](../request_tracker/#request-logger) page.** - ------ - -The driver uses [SLF4J] as a logging facade. This allows you to plug in your preferred logging -framework (java.util.logging, logback, log4j...) at deployment time. - -### Setup - -To connect SLF4J to your logging framework, add a [binding] JAR in your classpath. If you use a -build tool such as Maven or Gradle, this usually involves adding a runtime dependency to your -application descriptor (`pom.xml` or `build.gradle`). For example, here is a Maven snippet for -[Logback]: - -```xml - - ch.qos.logback - logback-classic - ... - -``` - -And the same for [Log4J]: - -```xml - - org.slf4j - slf4j-log4j12 - ... - -``` - -Check [SLF4J's documentation](http://www.slf4j.org/manual.html#projectDep) for examples for other -logging frameworks, and for troubleshooting dependency resolution problems. - -Each logging framework has its own configuration rules, but all of them provide different levels -(DEBUG, INFO, WARN, ERROR...), different *loggers* or *categories* (messages from different -categories or loggers can be filtered out separately or printed out differently), and different -*appenders* (message receptacles such as the standard console, the error console, a file on disk, a -socket...). - -Check your logging framework documentation for more information about how to properly configure it. -You can also find some configuration examples at the end of this page. - -Performance tips: - -* Use asynchronous appenders; both - [Log4J](http://logging.apache.org/log4j/1.2/apidocs/org/apache/log4j/AsyncAppender.html) and - [Logback](http://logback.qos.ch/manual/appenders.html#AsyncAppender) provide asynchronous - appenders which reduce the impact of logging in latency-sensitive applications. -* While the driver does not provide such capability, it is possible for client applications to - hot-reload the log configuration without stopping the application. This usually involves JMX and - is available for [Logback](http://logback.qos.ch/manual/jmxConfig.html); Log4J provides a - `configureAndWatch()` method but it is not recommended to use it inside J2EE containers (see - [FAQ](https://logging.apache.org/log4j/1.2/faq.html#a3.6)). - -### Taxonomy of driver logs - -The driver has a well-defined use for each log level. As an application developer/administrator, you -should be focusing mostly on the `ERROR`, `WARN` and `INFO` levels. - -#### ERROR - -Something that renders the driver -- or a part of it -- completely unusable. An action is required -to fix it: bouncing the client, applying a patch, etc. - -#### WARN - -Something that the driver can recover from automatically, but indicates a configuration or -programming error that should be addressed. For example: - -``` -WARN c.d.o.d.i.core.session.PoolManager - [s0] Detected a keyspace change at runtime ( => -test). This is an anti-pattern that should be avoided in production (see -'request.warn-if-set-keyspace' in the configuration). - -WARN c.d.o.d.i.c.c.CqlPrepareHandlerBase - Re-preparing already prepared query. This is generally -an anti-pattern and will likely affect performance. The cached version of the PreparedStatement -will be returned, which may use different bound statement execution parameters (CL, timeout, etc.) -from the current session.prepare call. Consider preparing the statement only once. Query='...' -``` - -#### INFO - -Something that is part of the normal operation of the driver, but might be useful to know for an -administrator. For example: - -``` -INFO c.d.o.d.i.c.metadata.MetadataManager - [s0] No contact points provided, defaulting to -/127.0.0.1:9042 - -INFO c.d.o.d.internal.core.time.Clock - Using native clock for microsecond precision - -INFO c.d.o.d.i.c.c.t.DefaultDriverConfigLoader - [s0] Detected a configuration change -``` - -#### DEBUG and TRACE - -These levels are intended primarily for driver developers; we might ask you to enable them to -investigate an issue. - -Keep in mind that they are quite verbose, in particular TRACE. It's a good idea to only enable them -on a limited set of categories. - -### Configuration examples - -#### Logback - -Here is a sample configuration file for Logback. - -It logs driver messages of level INFO and above, and all other libraries at level ERROR only. - -The appenders send all messages of level INFO and above to the console, and all messages to a -rolling file (with the current configuration, the console and log file have the same contents, but -if you were to enable DEBUG logs for a category, those logs would go to the file but not the -console). - -```xml - - - - - - - %-5p %msg%n - - - - - - driver.log - - %-5p [%d{ISO8601}] [%t] %F:%L - %msg%n - - - - driver.%d{yyyy-MM-dd}.log - - 30 - - - - - - - - - - - - - - -``` - -#### Log4J - -Here is a sample configuration file for Log4J. - -It logs driver messages of level INFO and above, and all other libraries at level ERROR only. - -The appenders send all messages of level INFO and above to the console, and all messages to a -rolling file (with the current configuration, the console and log file have the same contents, but -if you were to enable DEBUG logs for a category, those logs would go to the file but not the -console). - -```xml - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -``` - -[SLF4J]: https://www.slf4j.org/ -[binding]: https://www.slf4j.org/manual.html#swapping -[Logback]: http://logback.qos.ch -[Log4J]: https://logging.apache.org/log4j diff --git a/manual/core/metadata/README.md b/manual/core/metadata/README.md deleted file mode 100644 index 73609ee0542..00000000000 --- a/manual/core/metadata/README.md +++ /dev/null @@ -1,80 +0,0 @@ - - -## Metadata - -### Quick overview - -[session.getMetadata()][Session#getMetadata]: node states, schema and token map. - -* immutable, provides a consistent view at a given point in time (e.g. token map always matches - schema). -* pitfall: holding onto a stale instance; must call `session.getMetadata()` again to observe - changes. - ------ - -The driver exposes metadata about the Cassandra cluster via the [Session#getMetadata] method. It -returns a [Metadata] object, which contains three types of information: - -* [node metadata](node/) -* [schema metadata](schema/) -* [token metadata](token/) - -Metadata is mostly **immutable** (except for the fields of the [Node] class, see the "node metadata" -link above for details). Each call to `getMetadata()` will return a **new copy** if something has -changed since the last call. Do not cache the result across usages: - -```java -Metadata metadata = session.getMetadata(); - -session.execute("CREATE TABLE test.foo (k int PRIMARY KEY)"); - -// WRONG: the metadata was retrieved before the CREATE TABLE call, it does not reflect the new table -TableMetadata fooMetadata = - metadata - .getKeyspace(CqlIdentifier.fromCql("test")) - .getTable(CqlIdentifier.fromCql("foo")); -assert fooMetadata == null; -``` - -On the other hand, the advantage of immutability is that a `Metadata` instance provides a -**consistent view** of the cluster at a given point in time. In other words, the token map is -guaranteed to be in sync with the node and schema metadata: - -```java -Metadata metadata = session.getMetadata(); -// Pick up any node and keyspace: -Node node = metadata.getNodes().values().iterator().next(); -KeyspaceMetadata keyspace = metadata.getKeyspaces().values().iterator().next(); - -TokenMap tokenMap = metadata.getTokenMap().get(); -// The token map is guaranteed to have the corresponding data: -Set tokenRanges = tokenMap.getTokenRanges(keyspace.getName(), node); -``` - -This is a big improvement over previous versions of the driver, where it was possible to observe a -new keyspace in the schema metadata before the token metadata was updated. - -Schema and node state events are debounced. This allows you to control how often the metadata gets -refreshed. See the [Performance](../performance/#debouncing) page for more details. - -[Session#getMetadata]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/Session.html#getMetadata-- -[Metadata]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/metadata/Metadata.html -[Node]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/metadata/Node.html diff --git a/manual/core/metadata/node/README.md b/manual/core/metadata/node/README.md deleted file mode 100644 index fea04e5f262..00000000000 --- a/manual/core/metadata/node/README.md +++ /dev/null @@ -1,164 +0,0 @@ - - -## Node metadata - -### Quick overview - -[session.getMetadata().getNodes()][Metadata#getNodes]: all nodes known to the driver (even if not -actively connected). - -* [Node] instances are mutable, the fields will update in real time. -* getting notifications: - [CqlSession.builder().addNodeStateListener][SessionBuilder.addNodeStateListener]. - ------ - -[Metadata#getNodes] returns all the nodes known to the driver when the metadata was retrieved; this -includes down and ignored nodes (see below), so the fact that a node is in this list does not -necessarily mean that the driver is connected to it. - -```java -Map nodes = session.getMetadata().getNodes(); -System.out.println("Nodes in the cluster:"); -for (Node node : nodes.values()) { - System.out.printf( - " %s is %s and %s (%d connections)%n", - node.getConnectAddress().getAddress(), - node.getState(), - node.getDistance(), - node.getOpenConnections()); -} -``` - -The returned map is immutable: it does not reflect additions or removals since the metadata was -retrieved. On the other hand, the [Node] object is mutable; you can hold onto an instance across -metadata refreshes and see updates to the fields. - -A few notable fields are explained below; for the full details, refer to the Javadocs. - -[Node#getState()] indicates how the driver sees the node (see the Javadocs of [NodeState] for the -list of possible states with detailed explanations). In general, the driver tries to be resilient to -spurious DOWN notifications, and will try to use a node as long as it seems up, even if some events -seem to indicate otherwise: for example, if the Cassandra gossip detects a node as down because of -cross-node connectivity issues, but the driver still has active connections to that node, the node -will stay up. Two related properties are [Node#getOpenConnections()] and [Node#isReconnecting()]. - -[Node#getDatacenter()] and [Node#getRack()] represent the location of the node. This information is -used by some load balancing policies to prioritize coordinators that are physically close to the -client. - -[Node#getDistance()] is set by the load balancing policy. The driver does not connect to `IGNORED` -nodes. The exact definition of `LOCAL` and `REMOTE` is left to the interpretation of each policy, -but in general it represents the proximity to the client, and `LOCAL` nodes will be prioritized as -coordinators. They also influence pooling options. - -[Node#getExtras()] contains additional free-form properties. This is intended for future evolution -or custom driver extensions. In particular, if the driver is connected to DataStax Enterprise, the -map will contain additional information under the keys defined in [DseNodeProperties]: - -```java -Object rawDseVersion = node.getExtras().get(DseNodeProperties.DSE_VERSION); -Version dseVersion = (rawDseVersion == null) ? null : (Version) rawDseVersion; -``` - -### Notifications - -If you need to follow node state changes, you don't need to poll the metadata manually; instead, -you can register one or more listeners to get notified when changes occur: - -```java -NodeStateListener listener = - new NodeStateListenerBase() { - @Override - public void onUp(@NonNull Node node) { - System.out.printf("%s went UP%n", node); - } - }; -CqlSession session = CqlSession.builder() - .addNodeStateListener(listener) - .build(); -``` - -See [NodeStateListener] for the list of available methods. [NodeStateListenerBase] is a -convenience implementation with empty methods, for when you only need to override a few of them. - -It is also possible to register one or more listeners via the configuration: - -```hocon -datastax-java-driver { - advanced { - node-state-listener.classes = [com.example.app.MyNodeStateListener1,com.example.app.MyNodeStateListener2] - } -} -``` - -Listeners registered via configuration will be instantiated with reflection; they must have a public -constructor taking a `DriverContext` argument. - -The two registration methods (programmatic and via the configuration) can be used simultaneously. - -### Advanced topics - -#### Forcing a node down - -It is possible to temporarily or permanently close all connections to a node and disable -reconnection. The driver does that internally for certain unrecoverable errors (such as a protocol -version mismatch), but this could also be useful for maintenance, or for a custom component (load -balancing policy, etc). - -```java -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.TopologyEvent; - -InternalDriverContext context = (InternalDriverContext) session.getContext(); -context.getEventBus().fire(TopologyEvent.forceDown(node1.getConnectAddress())); -context.getEventBus().fire(TopologyEvent.forceUp(node1.getConnectAddress())); -``` - -As shown by the imports above, forcing a node down requires the *internal* driver API, which is -reserved for expert usage and subject to the disclaimers in -[API conventions](../../../api_conventions/). - -#### Using a custom topology monitor - -By default, the driver relies on Cassandra's gossip protocol to receive notifications about the -node states. It opens a control connection to one of the nodes, and registers for server-sent state -events. - -Some organizations have their own way of monitoring Cassandra nodes, and prefer to use it instead. -It is possible to completely override the default behavior to bypass gossip. The full details are -beyond the scope of this document; if you're interested, study the `TopologyMonitor` interface in -the source code. - - -[Metadata#getNodes]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/metadata/Metadata.html#getNodes-- -[Node]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/metadata/Node.html -[Node#getState()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/metadata/Node.html#getState-- -[Node#getDatacenter()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/metadata/Node.html#getDatacenter-- -[Node#getRack()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/metadata/Node.html#getRack-- -[Node#getDistance()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/metadata/Node.html#getDistance-- -[Node#getExtras()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/metadata/Node.html#getExtras-- -[Node#getOpenConnections()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/metadata/Node.html#getOpenConnections-- -[Node#isReconnecting()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/metadata/Node.html#isReconnecting-- -[NodeState]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/metadata/NodeState.html -[NodeStateListener]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/metadata/NodeStateListener.html -[NodeStateListenerBase]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/metadata/NodeStateListenerBase.html -[SessionBuilder.addNodeStateListener]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/SessionBuilder.html#addNodeStateListener-com.datastax.oss.driver.api.core.metadata.NodeStateListener- -[DseNodeProperties]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/metadata/DseNodeProperties.html diff --git a/manual/core/metadata/schema/README.md b/manual/core/metadata/schema/README.md deleted file mode 100644 index 20521d1def4..00000000000 --- a/manual/core/metadata/schema/README.md +++ /dev/null @@ -1,355 +0,0 @@ - - -## Schema metadata - -### Quick overview - -[session.getMetadata().getKeyspaces()][Metadata#getKeyspaces] - -* immutable (must invoke again to observe changes). -* getting notifications: - [CqlSession.builder().addSchemaChangeListener][SessionBuilder#addSchemaChangeListener]. -* enabling/disabling: `advanced.metadata.schema.enabled` in the configuration, or - [session.setSchemaMetadataEnabled()][Session#setSchemaMetadataEnabled]. -* filtering: `advanced.metadata.schema.refreshed-keyspaces` in the configuration. -* schema agreement: wait for the schema to replicate to all nodes (may add latency to DDL - statements). - ------ - -[Metadata#getKeyspaces] returns a client-side representation of the database schema: - -```java -Map keyspaces = session.getMetadata().getKeyspaces(); -KeyspaceMetadata system = keyspaces.get(CqlIdentifier.fromCql("system")); -System.out.println("The system keyspace contains the following tables:"); -for (TableMetadata table : system.getTables().values()) { - System.out.printf( - " %s (%d columns)%n", table.getName().asCql(true), table.getColumns().size()); -} -``` - -Schema metadata is fully immutable (both the map and all the objects it contains). It represents a -snapshot of the database at the time of the last metadata refresh, and is consistent with the -[token map](../token/) of its parent `Metadata` object. Keep in mind that `Metadata` is itself -immutable; if you need to get the latest schema, be sure to call -`session.getMetadata().getKeyspaces()` again (and not just `getKeyspaces()` on a stale `Metadata` -reference). - - -### DSE - -All schema metadata interfaces accessible through `Metadata.getKeyspaces()` have a DSE-specific -subtype in the package [com.datastax.dse.driver.api.core.metadata.schema]. The objects returned by -the DSE driver implement those types, so you can safely cast: - -```java -for (KeyspaceMetadata keyspace : session.getMetadata().getKeyspaces().values()) { - DseKeyspaceMetadata dseKeyspace = (DseKeyspaceMetadata) keyspace; -} -``` - -If you're calling a method that returns an optional and want to keep the result wrapped, use this -pattern: - -```java -Optional f = - session - .getMetadata() - .getKeyspace("ks") - .flatMap(ks -> ks.getFunction("f")) - .map(DseFunctionMetadata.class::cast); -``` - -For future extensibility, there is a `DseXxxMetadata` subtype for every OSS type. But currently (DSE -6.7), the only types that really add extra information are: - -* [DseFunctionMetadata]: add support for the `DETERMINISTIC` and `MONOTONIC` keywords; -* [DseAggregateMetadata]: add support for the `MONOTONIC` keyword. - -All other types (keyspaces, tables, etc.) are identical to their OSS counterparts. - -### Notifications - -If you need to follow schema changes, you don't need to poll the metadata manually; instead, -you can register one or more listeners to get notified when changes occur: - -```java -SchemaChangeListener listener = - new SchemaChangeListenerBase() { - @Override - public void onTableCreated(TableMetadata table) { - System.out.println("New table: " + table.getName().asCql(true)); - } - }; -CqlSession session = CqlSession.builder() - .addSchemaChangeListener(listener) - .build(); - -session.execute("CREATE TABLE test.foo (k int PRIMARY KEY)"); -``` - -See [SchemaChangeListener] for the list of available methods. [SchemaChangeListenerBase] is a -convenience implementation with empty methods, for when you only need to override a few of them. - -It is also possible to register one or more listeners via the configuration: - -```hocon -datastax-java-driver { - advanced { - schema-change-listener.classes = [com.example.app.MySchemaChangeListener1,com.example.app.MySchemaChangeListener2] - } -} -``` - -Listeners registered via configuration will be instantiated with reflection; they must have a public -constructor taking a `DriverContext` argument. - -The two registration methods (programmatic and via the configuration) can be used simultaneously. - -### Configuration - -#### Enabling/disabling - -You can disable schema metadata globally from the configuration: - -``` -datastax-java-driver.advanced.metadata.schema.enabled = false -``` - -If it is disabled at startup, [Metadata#getKeyspaces] will stay empty. If you disable it at runtime, -it will keep the value of the last refresh. - -You can achieve the same thing programmatically with [Session#setSchemaMetadataEnabled]: if you call -it with `true` or `false`, it overrides the configuration; if you pass `null`, it reverts to the -value defined in the configuration. One case where that could come in handy is if you are sending a -large number of DDL statements from your code: - -```java -// Disable temporarily, we'll do a single refresh once we're done -session.setSchemaMetadataEnabled(false); - -for (int i = 0; i < 100; i++) { - session.execute(String.format("CREATE TABLE test.foo%d (k int PRIMARY KEY)", i)); -} - -session.setSchemaMetadataEnabled(null); -``` - -Whenever schema metadata was disabled and becomes enabled again (either through the configuration or -the API), a refresh is triggered immediately. - - -#### Filtering - -You can also limit the metadata to a subset of keyspaces: - -``` -datastax-java-driver.advanced.metadata.schema.refreshed-keyspaces = [ "users", "products" ] -``` - -Each element in the list can be one of the following: - -1. An exact name inclusion, for example `"Ks1"`. If the name is case-sensitive, it must appear in - its exact case. -2. An exact name exclusion, for example `"!Ks1"`. -3. A regex inclusion, enclosed in slashes, for example `"/^Ks.*/"`. The part between the slashes - must follow the syntax rules of [java.util.regex.Pattern]. The regex must match the entire - keyspace name (no partial matching). -4. A regex exclusion, for example `"!/^Ks.*/"`. - -If the list is empty, or the option is unset, all keyspaces will match. Otherwise: - -* If a keyspace matches an exact name inclusion, it is always included, regardless of what any other - rule says. -* Otherwise, if it matches an exact name exclusion, it is always excluded, regardless of what any - regex rule says. -* Otherwise, if there are regex rules: - - * if they're only inclusions, the keyspace must match at least one of them. - * if they're only exclusions, the keyspace must match none of them. - * if they're both, the keyspace must match at least one inclusion and none of the - exclusions. - -For example, given the keyspaces `system`, `ks1`, `ks2`, `data1` and `data2`, here's the outcome of -a few filters: - -|Filter|Outcome|Translation| -|---|---|---| -| `[]` | `system`, `ks1`, `ks2`, `data1`, `data2` | Include all. | -| `["ks1", "ks2"]` | `ks1`, `ks2` | Include ks1 and ks2 (recommended, see explanation below). | -| `["!system"]` | `ks1`, `ks2`, `data1`, `data2` | Include all except system. | -| `["/^ks.*/"]` | `ks1`, `ks2` | Include all that start with ks. | -| `["!/^ks.*/"]` | `system`, `data1`, `data2` | Exclude all that start with ks (and include everything else). | -| `["system", "/^ks.*/"]` | `system`, `ks1`, `ks2` | Include system, and all that start with ks. | -| `["/^ks.*/", "!ks2"]` | `ks1` | Include all that start with ks, except ks2. | -| `["!/^ks.*/", "ks1"]` | `system`, `ks1`, `data1`, `data2` | Exclude all that start with ks, except ks1 (and also include everything else). | -| `["/^s.*/", /^ks.*/", "!/.*2$/"]` | `system`, `ks1` | Include all that start with s or ks, except if they end with 2. | - - -If an element is malformed, or if its regex has a syntax error, a warning is logged and that single -element is ignored. - -The default configuration (see [reference.conf](../../configuration/reference/)) excludes all -Cassandra and DSE system keyspaces. - -Try to use only exact name inclusions if possible. This allows the driver to filter on the server -side with a `WHERE IN` clause. If you use any other rule, it has to fetch all system rows and filter -on the client side. - -Note that, if you change the list at runtime, `onKeyspaceAdded`/`onKeyspaceDropped` will be invoked -on your schema listeners for the newly included/excluded keyspaces. - - -#### Schema agreement - -Due to the distributed nature of Cassandra, schema changes made on one node might not be immediately -visible to others. If left unaddressed, this could create race conditions when successive queries -get routed to different coordinators: - -```ditaa - Application Driver Node 1 Node 2 -------+--------------------+------------------+------------------+--- - | | | | - | CREATE TABLE foo | | | - |------------------->| | | - | | send request | | - | |----------------->| | - | | | | - | | success | | - | |<-----------------| | - | complete query | | | - |<-------------------| | | - | | | | - | SELECT k FROM foo | | | - |------------------->| | | - | | send request | - | |------------------------------------>| schema changes not - | | | replicated yet - | | unconfigured table foo | - | |<------------------------------------| - | ERROR! | | | - |<-------------------| | | - | | | | -``` - -To avoid this issue, the driver waits until all nodes agree on a common schema version: - -```ditaa - Application Driver Node 1 -------+--------------------+------------------+----- - | | | - | CREATE TABLE... | | - |------------------->| | - | | send request | - | |----------------->| - | | | - | | success | - | |<-----------------| - | | | - | /--------------------\ | - | :Wait until all nodes+------>| - | :agree (or timeout) : | - | \--------------------/ | - | | ^ | - | | | | - | | +---------| - | | | - | complete query | | - |<-------------------| | - | | | -``` - -Schema agreement is checked: - -* before a schema refresh; -* before completing a successful schema-altering query (like in our example above). - -It is done by querying system tables to find out the schema version of all nodes that are currently -UP. If all the versions match, the check succeeds, otherwise it is retried periodically, until a -given timeout. This process is tunable in the driver's configuration: - -``` -datastax-java-driver.advanced.control-connection.schema-agreement { - interval = 200 milliseconds - timeout = 10 seconds - warn-on-failure = true -} -``` - -After executing a statement, you can check whether schema agreement was successful or timed out with -[ExecutionInfo#isSchemaInAgreement]: - -```java -ResultSet rs = session.execute("CREATE TABLE..."); -if (rs.getExecutionInfo().isSchemaInAgreement()) { - ... -} -``` - -You can also perform an on-demand check at any time with [Session#checkSchemaAgreementAsync] \(or -its synchronous counterpart): - -```java -if (session.checkSchemaAgreement()) { - ... -} -``` - -A schema agreement failure is not fatal, but it might produce unexpected results (as explained at -the beginning of this section). - - -##### Schema agreement in mixed-version clusters - -If you're operating a cluster with different major/minor server releases (for example, Cassandra 2.1 -and 2.2), schema agreement will never succeed. This is because the way the schema version is -computed changes across releases, so the nodes will report different versions even though they -actually agree (see [JAVA-750] for the technical details). - -This issue would be hard to fix in a reliable way, and shouldn't be that much of a problem in -practice anyway: if you're in the middle of a rolling upgrade, you're probably not applying schema -changes at the same time. - - -### Relation to token metadata - -Some of the data in the [token map](../token/) relies on keyspace metadata (any method that takes a -`CqlIdentifier` argument). If schema metadata is disabled or filtered, token metadata will also be -unavailable for the excluded keyspaces. - -### Performing schema updates from the client - -If you issue schema-altering requests from the driver (e.g. `session.execute("CREATE TABLE ..")`), -take a look at the [Performance](../../performance/#schema-updates) page for a few tips. - -[Metadata#getKeyspaces]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/metadata/Metadata.html#getKeyspaces-- -[SchemaChangeListener]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/metadata/schema/SchemaChangeListener.html -[SchemaChangeListenerBase]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/metadata/schema/SchemaChangeListenerBase.html -[Session#setSchemaMetadataEnabled]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/Session.html#setSchemaMetadataEnabled-java.lang.Boolean- -[Session#checkSchemaAgreementAsync]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/Session.html#checkSchemaAgreementAsync-- -[SessionBuilder#addSchemaChangeListener]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/SessionBuilder.html#addSchemaChangeListener-com.datastax.oss.driver.api.core.metadata.schema.SchemaChangeListener- -[ExecutionInfo#isSchemaInAgreement]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/ExecutionInfo.html#isSchemaInAgreement-- -[com.datastax.dse.driver.api.core.metadata.schema]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/metadata/schema/package-frame.html -[DseFunctionMetadata]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/metadata/schema/DseFunctionMetadata.html -[DseAggregateMetadata]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/metadata/schema/DseAggregateMetadata.html - -[JAVA-750]: https://datastax-oss.atlassian.net/browse/JAVA-750 -[java.util.regex.Pattern]: https://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html diff --git a/manual/core/metadata/token/README.md b/manual/core/metadata/token/README.md deleted file mode 100644 index 4d7cd9252df..00000000000 --- a/manual/core/metadata/token/README.md +++ /dev/null @@ -1,192 +0,0 @@ - - -## Token metadata - -### Quick overview - -[session.getMetadata().getTokenMap()][Metadata#getTokenMap] - -* used for token-aware routing or analytics clients. -* immutable (must invoke again to observe changes). -* `advanced.metadata.token-map.enabled` in the configuration (defaults to true). - ------ - -[Metadata#getTokenMap] returns information about the tokens used for data replication. It is used -internally by the driver to send requests to the optimal coordinator when token-aware routing is -enabled. Another typical use case is data analytics clients, for example fetching a large range of -keys in parallel by sending sub-queries to each replica. - -Because token metadata can be disabled, the resulting [TokenMap] object is wrapped in an `Optional`; -to access it, you can use either a functional pattern, or more traditionally test first with -`isPresent` and then unwrap: - -```java -Metadata metadata = session.getMetadata(); - -metadata.getTokenMap().ifPresent(tokenMap -> { - // do something with the map -}); - -if (metadata.getTokenMap().isPresent()) { - TokenMap tokenMap = metadata.getTokenMap().get(); - // do something with the map -} -``` - - -### `TokenMap` methods - -For illustration purposes, let's consider a fictitious ring with 6 tokens, and a cluster of 3 nodes -that each own two tokens: - -```ditaa - node1 - /---\ - /=---+ 12+---=\ - : \---/ : - | | - /-+-\ /-+-\ -node3 | 10| | 2 | node2 - \-+-/ \-+-/ - : : - | | - /---\ /-+-\ -node2 | 8 | | 4 | node3 - \-+-/ \-+-/ - : : - | /---\ | - \=---+ 6 +---=/ - \---/ - node1 -``` - -The first thing you can do is retrieve all the ranges, in other words describe the ring: - -```java -Set ring = tokenMap.getTokenRanges(); -// Returns [Murmur3TokenRange(Murmur3Token(12), Murmur3Token(2)), -// Murmur3TokenRange(Murmur3Token(2), Murmur3Token(4)), -// Murmur3TokenRange(Murmur3Token(4), Murmur3Token(6)), -// Murmur3TokenRange(Murmur3Token(6), Murmur3Token(8)), -// Murmur3TokenRange(Murmur3Token(8), Murmur3Token(10)), -// Murmur3TokenRange(Murmur3Token(10), Murmur3Token(12))] -``` - -Note: `Murmur3Token` is an implementation detail. The actual class depends on the partitioner -you configured in Cassandra, but in general you don't need to worry about that. `TokenMap` provides -a few utility methods to parse tokens and create new instances: `parse`, `format`, `newToken` and -`newTokenRange`. - -You can also retrieve the ranges and tokens owned by a specific replica: - -```java -tokenMap.getTokenRanges(node1); -// [Murmur3TokenRange(Murmur3Token(10), Murmur3Token(12)), -// Murmur3TokenRange(Murmur3Token(4), Murmur3Token(6))] - -tokenMap.getTokens(node1); -// [Murmur3Token(12)), Murmur3Token(6))] -``` - -As shown here, the node owns the ranges that *end* with its tokens; this is because ranges are -start-exclusive and end-inclusive: `]10, 12]` and `]4, 6]`. - -Next, you can retrieve keyspace-specific information. To illustrate this, let's use two keyspaces -with different replication settings: - -``` -// RF = 1: each range is only stored on the primary replica -CREATE KEYSPACE ks1 WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}; - -// RF = 2: each range is stored on the primary replica, and replicated on the next node in the ring -CREATE KEYSPACE ks2 WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 2}; -``` - -`getReplicas` finds the nodes that have the data in a given range: - -```java -TokenRange firstRange = tokenMap.getTokenRanges().iterator().next(); -// Murmur3TokenRange(Murmur3Token(12), Murmur3Token(2)) - -Set nodes1 = tokenMap.getReplicas(CqlIdentifier.fromCql("ks1"), firstRange); -// [node2] (only the primary replica) - -Set nodes2 = tokenMap.getReplicas(CqlIdentifier.fromCql("ks2"), firstRange); -// [node2, node3] (the primary replica, and the next node on the ring) -``` - -There is a also a variant that takes a primary key, to find the replicas for a particular row. In -the following example, let's assume that the key hashes to the token "1" with the current -partitioner: - -```java -String pk = "johndoe@example.com"; -// You need to manually encode the key as binary: -ByteBuffer encodedPk = TypeCodecs.TEXT.encode(pk, session.getContext().getProtocolVersion()); - -Set nodes1 = tokenMap.getReplicas(CqlIdentifier.fromInternal("ks1"), encodedPk); -// Assuming the key hashes to "1", it is in the ]12, 2] range -// => [node2] (only the primary replica) - -Set nodes2 = tokenMap.getReplicas(CqlIdentifier.fromCql("ks2"), encodedPk); -// [node2, node3] (the primary replica, and the next node on the ring) -``` - -Finally, you can go the other way, and find the token ranges that a node stores for a given -keyspace: - -```java -Set ranges1 = tokenMap.getTokenRanges(CqlIdentifier.fromCql("ks1"), node1); -// [Murmur3TokenRange(Murmur3Token(4), Murmur3Token(6)), -// Murmur3TokenRange(Murmur3Token(10), Murmur3Token(12))] -// (only its primary ranges) - -Set ranges2 = tokenMap.getTokenRanges(CqlIdentifier.fromCql("ks2"), node1); -// [Murmur3TokenRange(Murmur3Token(2), Murmur3Token(4)), -// Murmur3TokenRange(Murmur3Token(4), Murmur3Token(6)), -// Murmur3TokenRange(Murmur3Token(8), Murmur3Token(10)), -// Murmur3TokenRange(Murmur3Token(10), Murmur3Token(12))] -// (its primary ranges, and a replica of the primary ranges of node3, the previous node on the ring) -``` - -### Configuration - -#### Enabling/disabling - -You can disable token metadata globally from the configuration: - -``` -datastax-java-driver.advanced.metadata.token-map.enabled = false -``` - -If it is disabled at startup, [Metadata#getTokenMap] will stay empty, and token-aware routing won't -work (requests will be sent to a non-optimal coordinator). If you disable it at runtime, it will -keep the value of the last refresh, and token-aware routing might operate on stale data. - -#### Relation to schema metadata - -The keyspace-specific information in `TokenMap` (all methods with a `CqlIdentifier` argument) relies -on [schema metadata](../schema/). If schema metadata is disabled or filtered, token metadata will -also be unavailable for the excluded keyspaces. - - -[Metadata#getTokenMap]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/metadata/Metadata.html#getTokenMap-- -[TokenMap]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/metadata/TokenMap.html diff --git a/manual/core/metrics/README.md b/manual/core/metrics/README.md deleted file mode 100644 index ef5d9b453f0..00000000000 --- a/manual/core/metrics/README.md +++ /dev/null @@ -1,368 +0,0 @@ - - -## Metrics - -### Quick overview - -* `advanced.metrics` in the configuration. All metrics disabled by default. To enable, select the - metrics library to use, then define which individual metrics to activate. -* some metrics are per node, others global to the session, or both. -* unlike driver 3, JMX is not provided out of the box. You need to add the dependency manually. - ------ - -The driver is able to report measurements of its internal behavior to a variety of metrics -libraries, and ships with bindings for three popular ones: [Dropwizard Metrics] , [Micrometer -Metrics] and [MicroProfile Metrics]. - -### Selecting a Metrics Library - -#### Dropwizard Metrics - -Dropwizard is the driver's default metrics library; there is no additional configuration nor any -extra dependency to add if you wish to use Dropwizard. - -#### Micrometer - -To use Micrometer you must: - -1. Define `MicrometerMetricsFactory` as the metrics factory to use in the driver configuration: - -``` -datastax-java-driver.advanced.metrics { - factory.class = MicrometerMetricsFactory -} -``` - -2. Add a dependency to `java-driver-metrics-micrometer` in your application. This separate driver -module contains the actual bindings for Micrometer, and depends itself on the Micrometer library: - -```xml - - org.apache.cassandra - java-driver-metrics-micrometer - ${driver.version} - -``` - -3. You should also exclude Dropwizard and HdrHistogram, which are two transitive dependencies of the -driver, because they are not relevant when using Micrometer: - -```xml - - org.apache.cassandra - java-driver-core - - - io.dropwizard.metrics - metrics-core - - - org.hdrhistogram - HdrHistogram - - - -``` - -#### MicroProfile Metrics - -To use MicroProfile Metrics you must: - -1. Define `MicroProfileMetricsFactory` as the metrics factory to use in the driver configuration: - -``` -datastax-java-driver.advanced.metrics { - factory.class = MicroProfileMetricsFactory -} -``` - -2. Add a dependency to `java-driver-metrics-microprofile` in your application. This separate driver -module contains the actual bindings for MicroProfile, and depends itself on the MicroProfile Metrics -library: - -```xml - - org.apache.cassandra - java-driver-metrics-microprofile - ${driver.version} - -``` - -3. You should also exclude Dropwizard and HdrHistogram, which are two transitive dependencies of the -driver, because they are not relevant when using MicroProfile Metrics: - -```xml - - org.apache.cassandra - java-driver-core - - - io.dropwizard.metrics - metrics-core - - - org.hdrhistogram - HdrHistogram - - - -``` - -#### Other Metrics libraries - -Other metrics libraries can also be used. However, you will need to provide a custom -metrics factory. Simply implement the -`com.datastax.oss.driver.internal.core.metrics.MetricsFactory` interface for your library of choice, -then pass the fully-qualified name of that implementation class to the driver using the -`advanced.metrics.factory.class` option. See the [reference configuration]. - -You will certainly need to add the metrics library as a dependency to your application as well. -It is also recommended excluding Dropwizard and HdrHistogram, as shown above. - -### Enabling specific driver metrics - -Now that the metrics library is configured, you need to activate the driver metrics you are -interested in. - -There are two categories of driver metrics: - -* session-level: the measured data is global to a `Session` instance. For example, `connected-nodes` - measures the number of nodes to which we have connections. -* node-level: the data is specific to a node (and therefore there is one metric instance per node). - For example, `pool.open-connections` measures the number of connections open to this particular - node. - -To find out which metrics are available, see the [reference configuration]. It contains a -commented-out line for each metric, with detailed explanations on its intended usage. - -By default, all metrics are disabled. You can turn them on individually in the configuration, by -adding their name to these lists: - -``` -datastax-java-driver.advanced.metrics { - session.enabled = [ connected-nodes, cql-requests ] - node.enabled = [ pool.open-connections, pool.in-flight ] -} -``` - -If you specify a metric that doesn't exist, it will be ignored, and a warning will be logged. - -Finally, if you are using Dropwizard or Micrometer and enabled any metric of timer type, such as -`cql-requests`, it is also possible to provide additional configuration to fine-tune the underlying -histogram's characteristics and precision, such as its highest expected latency, its number of -significant digits to use, and its refresh interval. Again, see the [reference configuration] for -more details. - -### Selecting a metric identifier style - -Most metric libraries uniquely identify a metric by a name and, optionally, by a set of key-value -pairs, usually called tags. - -The `advanced.metrics.id-generator.class` option is used to customize how the driver generates -metric identifiers. The driver ships with two built-in implementations: - -- `DefaultMetricIdGenerator`: generates identifiers composed solely of (unique) metric names; it - does not generate tags. All metric names start with the name of the session (see `session-name` in - the configuration), and in the case of node-level metrics, this is followed by `.nodes.`, followed - by a textual representation of the node's address. All names end with the metric distinctive name. - See below for examples. This generator is mostly suitable for use with metrics libraries that do - not support tags, like Dropwizard. - -- `TaggingMetricIdGenerator`: generates identifiers composed of a name and one or two tags. - Session-level metric names start with the `session.` prefix followed by the metric distinctive - name; node-level metric names start with the `nodes.` prefix followed by the metric distinctive - name. Session-level tags will include a `session` tag whose value is the session name (see - `session-name` in the configuration); node-level tags will include the same `session` tag, and - also a `node` tag whose value is the node's address. See below for examples. This generator is - mostly suitable for use with metrics libraries that support tags, like Micrometer or MicroProfile - Metrics. - -For example, here is how each one of them generates identifiers for the session metric "bytes-sent", -assuming that the session is named "s0": - -- `DefaultMetricIdGenerator`: - - name:`s0.bytes-sent` - - tags: `{}` -- `TaggingMetricIdGenerator`: - - name: `session.bytes-sent` - - tags: `{ "session" : "s0" }` - -Here is how each one of them generates identifiers for the node metric "bytes-sent", assuming that -the session is named "s0", and the node's broadcast address is 10.1.2.3:9042: - -- `DefaultMetricIdGenerator`: - - name : `s0.nodes.10_1_2_3:9042.bytes-sent` - - tags: `{}` -- `TaggingMetricIdGenerator`: - - name `nodes.bytes-sent` - - tags: `{ "session" : "s0", "node" : "\10.1.2.3:9042" }` - -As shown above, both built-in implementations generate names that are path-like structures separated -by dots. This is indeed the most common expected format by reporting tools. - -Finally, it is also possible to define a global prefix for all metric names; this can be done with -the `advanced.metrics.id-generator.prefix` option. - -The prefix should not start nor end with a dot or any other path separator; the following are two -valid examples: `cassandra` or `myapp.prod.cassandra`. - -For example, if this prefix is set to `cassandra`, here is how the session metric "bytes-sent" would -be named, assuming that the session is named "s0": - -- with `DefaultMetricIdGenerator`: `cassandra.s0.bytes-sent` -- with `TaggingMetricIdGenerator`: `cassandra.session.bytes-sent` - -Here is how the node metric "bytes-sent" would be named, assuming that the session is named "s0", -and the node's broadcast address is 10.1.2.3:9042: - -- with `DefaultMetricIdGenerator`: `cassandra.s0.nodes.10_1_2_3:9042.bytes-sent` -- with `TaggingMetricIdGenerator`: `cassandra.nodes.bytes-sent` - -### Using an external metric registry - -Regardless of which metrics library is used, you can provide an external metric registry object when -building a session. This allows the driver to transparently export its operational metrics to -whatever reporting system you want to use. - -To pass a metric registry object to the session, use the `CqlSessionBuilder.withMetricRegistry()` -method: - -```java -CqlSessionBuilder builder = CqlSession.builder(); -builder.withMetricRegistry(myRegistryObject); -CqlSession session = builder.build(); -``` - -Beware that the driver does not inspect the provided object, it simply passes it to the metrics -factory in use; it is the user's responsibility to provide registry objects compatible with the -metrics library in use. For reference, here are the expected base types for the three built-in -metrics libraries: - -* Dropwizard: `com.codahale.metrics.MetricRegistry` -* Micrometer: `io.micrometer.core.instrument.MeterRegistry` -* MicroProfile: `org.eclipse.microprofile.metrics.MetricRegistry` - -**NOTE:** MicroProfile **requires** an external instance of its registry to be provided. For -Micrometer, if no registry object is provided, Micrometer's `globalRegistry` will be used. For -Dropwizard, if no registry object is provided, an instance of `MetricRegistry` will be created and -used (in which case, it can be retrieved programmatically if needed, see below). - -### Programmatic access to driver metrics - -Programmatic access to driver metrics is only available when using Dropwizard Metrics. Users of -other libraries are encouraged to provide an external registry when creating the driver session (see -above), then use it to gain programmatic access to the driver metrics. - -The Dropwizard `MetricRegistry` object is exposed in the driver API via -`session.getMetrics().getRegistry()`. You can retrieve it and, for example, configure a `Reporter` -to send the metrics to a monitoring tool. - -**NOTE:** Beware that `session.getMetrics()` is not available when using other metrics libraries, -and will throw a `NoClassDefFoundError` at runtime if accessed in such circumstances. - -### Exposing driver metrics with JMX - -Unlike previous driver versions, JMX support is not included out of the box. - -The way to add JMX support to your application depends largely on the metrics library being used. We -show below instructions for Dropwizard only. Micrometer also has support for JMX: please refer to -its [official documentation][Micrometer JMX]. - -#### Dropwizard Metrics - -Add the following dependency to your application (make sure the version matches the `metrics-core` -dependency of the driver): - -```xml - - io.dropwizard.metrics - metrics-jmx - 4.1.2 - -``` - -Then create a JMX reporter for the registry: - -```java -MetricRegistry registry = session.getMetrics() - .orElseThrow(() -> new IllegalStateException("Metrics are disabled")) - .getRegistry(); - -JmxReporter reporter = - JmxReporter.forRegistry(registry) - .inDomain("com.datastax.oss.driver") - .build(); -reporter.start(); -``` - -Note: by default, the JMX reporter exposes all metrics in a flat structure (for example, -`pool.open-connections` and `pool.in-flight` appear as root elements). If you prefer a hierarchical -structure (`open-connections` and `in-flight` nested into a `pool` sub-domain), use a custom object -factory: - -```java -import com.codahale.metrics.jmx.JmxReporter; -import com.codahale.metrics.jmx.ObjectNameFactory; -import com.google.common.base.Splitter; -import com.google.common.base.Strings; -import javax.management.MalformedObjectNameException; -import javax.management.ObjectName; - -ObjectNameFactory objectNameFactory = (type, domain, name) -> { - StringBuilder objectName = new StringBuilder(domain).append(':'); - List nameParts = Splitter.on('.').splitToList(name); - int i = 0; - for (String namePart : nameParts) { - boolean isLast = (i == nameParts.size() - 1); - String key = - isLast ? "name" : Strings.padStart(Integer.toString(i), 2, '0'); - objectName.append(key).append('=').append(namePart); - if (!isLast) { - objectName.append(','); - } - i += 1; - } - try { - return new ObjectName(objectName.toString()); - } catch (MalformedObjectNameException e) { - throw new RuntimeException(e); - } -}; - -JmxReporter reporter = - JmxReporter.forRegistry(registry) - .inDomain("com.datastax.oss.driver") - .createsObjectNamesWith(objectNameFactory) - .build(); -reporter.start(); -``` - -### Exporting metrics with other protocols - -Dropwizard Metrics has built-in reporters for other output formats: JSON (via a servlet), stdout, -CSV files, SLF4J logs and Graphite. Refer to their [manual][Dropwizard manual] for more details. - -[Dropwizard Metrics]: https://metrics.dropwizard.io/4.1.2 -[Dropwizard Manual]: https://metrics.dropwizard.io/4.1.2/getting-started.html -[Micrometer Metrics]: https://micrometer.io/docs -[Micrometer JMX]: https://micrometer.io/docs/registry/jmx -[MicroProfile Metrics]: https://github.com/eclipse/microprofile-metrics -[reference configuration]: ../configuration/reference/ diff --git a/manual/core/native_protocol/README.md b/manual/core/native_protocol/README.md deleted file mode 100644 index 42146e63f42..00000000000 --- a/manual/core/native_protocol/README.md +++ /dev/null @@ -1,159 +0,0 @@ - - -## Native protocol - -### Quick overview - -Low-level binary format. Mostly irrelevant for everyday use, only governs whether certain features -are available. - -* setting the version: - * automatically negotiated during the connection (improved algorithm in driver 4, no longer an - issue in mixed clusters). - * or force with `advanced.protocol.version` in the configuration. -* reading the version: - [session.getContext().getProtocolVersion()][AttachmentPoint.getProtocolVersion]. - ------ - -The native protocol defines the format of the binary messages exchanged between the driver and -Cassandra over TCP. As a driver user, you don't need to know the fine details (although the -[protocol spec] is available if you're curious); the most visible aspect is that some features are -only available with specific protocol versions. - -### Compatibility matrix - -Java Driver 4 supports protocol versions 3 to 5. By default, the version is negotiated with the -first node the driver connects to: - -| Cassandra version | Negotiated protocol version with driver 4 ¹ | -|-------------------|-------------------------------------------------| -| 2.1.x | v3 | -| 2.2.x | v4 | -| 3.x | v4 | -| 4.x | v5 | - -*(1) for previous driver versions, see the [3.x documentation][driver3]* - -Since version 4.5.0, the driver can also use DSE protocols when all nodes are running a version of -DSE. The table below shows the protocol matrix for these cases: - -| DSE version | Negotiated protocol version with driver 4 | -|---------------------|-------------------------------------------------| -| 4.7/4.8 | v3 | -| 5.0 | v4 | -| 5.1 | DSE_V1 ² | -| 6.0/6.7/6.8 | DSE_V2 ² | - -*(2) DSE Protocols are chosen before other Cassandra native protocols.* - -### Controlling the protocol version - -To find out which version you're currently using, use the following: - -```java -ProtocolVersion currentVersion = session.getContext().getProtocolVersion(); -``` - -The protocol version cannot be changed at runtime. However, you can force a particular version in -the [configuration](../configuration/): - -``` -datastax-java-driver { - advanced.protocol { - version = V3 - } -} -``` - -Note that the protocol version you specify above is case sensitive so make sure to only use uppercase letters. -"V3" is correct, "v3" is not. - -If you force a version that is too high for the server, you'll get an error: - -``` -Exception in thread "main" com.datastax.oss.driver.api.core.AllNodesFailedException: - All 1 node tried for the query failed (showing first 1 nodes, use getAllErrors() for more: - /127.0.0.1:9042: com.datastax.oss.driver.api.core.UnsupportedProtocolVersionException: - [/127.0.0.1:9042] Host does not support protocol version V5) -``` - -### Protocol version with mixed clusters - -It's possible to have heterogeneous Cassandra versions in your cluster, in particular during a -rolling upgrade. This used to be a problem with previous driver versions, which would negotiate a -version with the first contacted node that might not work with others. - -Starting with driver 4, protocol negotiation uses an improved strategy to prevent those issues: - -* the driver negotiates with the first node for the initial connection (for example, v4 for - Cassandra 3); -* right after connecting, it queries the `system.peers` table to find out the Cassandra version of - the other nodes (for example, node2 → Cassandra 3, node3 → Cassandra 2.1); -* it infers the highest supported protocol version for each node (node2 → v4, node3 → v3); -* it selects the minimum of those protocol versions (v3). If that is lower than the initially - negotiated version, the first connection is closed and reopened; -* the connection to the rest of the nodes proceeds with the possibly downgraded protocol version. - -Thanks to this approach, automatic negotiation works even with mixed clusters, you don't need to -force the protocol version manually anymore. - -### Debugging protocol negotiation - -You can observe the negotiation process in the [logs](../logging/). - -The versions tried while negotiating with the first node are logged at level `DEBUG` in the category -`com.datastax.oss.driver.internal.core.channel.ChannelFactory`: - -``` -DEBUG ChannelFactory - Failed to connect with protocol v4, retrying with v3 -``` - -If a mixed cluster renegotiation happens, it is logged at level `INFO` in the category -`com.datastax.oss.driver.internal.core.session.DefaultSession`: - -``` -INFO DefaultSession - Negotiated protocol version v4 for the initial contact point, but other nodes - only support v3, downgrading -``` - -If you want to see the details of mixed cluster negotiation, enable `DEBUG` level for the category -`com.datastax.oss.driver.internal.core.CassandraProtocolVersionRegistry`. - -### New features by protocol version - -#### v3 to v4 - -* [query warnings][ExecutionInfo.getWarnings] -* [unset values in bound statements](../statements/prepared/#unset-values) -* [custom payloads][Request.getCustomPayload] - -#### v4 to v5 - -* [per-query keyspace](../statements/per_query_keyspace) -* [improved prepared statement resilience](../statements/prepared/#prepared-statements-and-schema-changes) - in the face of schema changes - -[protocol spec]: https://github.com/datastax/native-protocol/tree/1.x/src/main/resources -[driver3]: https://docs.datastax.com/en/developer/java-driver/3.10/manual/native_protocol/ - -[ExecutionInfo.getWarnings]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/ExecutionInfo.html#getWarnings-- -[Request.getCustomPayload]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/Request.html#getCustomPayload-- -[AttachmentPoint.getProtocolVersion]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/detach/AttachmentPoint.html#getProtocolVersion-- diff --git a/manual/core/non_blocking/README.md b/manual/core/non_blocking/README.md deleted file mode 100644 index f320ffd13d2..00000000000 --- a/manual/core/non_blocking/README.md +++ /dev/null @@ -1,306 +0,0 @@ - - -## Non-blocking programming - -### Quick overview - -With the advent of reactive programming, the demand for fully non-blocking libraries has become -popular among application developers. The recent availability of frameworks enforcing lock-freedom, -such as [Vert.x] or [Reactor], along with tools for automatic detection of blocking calls like -[BlockHound], has exacerbated this trend even more so. - -[Vert.x]: https://vertx.io -[Reactor]: https://projectreactor.io -[BlockHound]: https://github.com/reactor/BlockHound - -**In summary, when used properly, the Java Driver offers non-blocking guarantees for most -of its operations, and during most of the session lifecycle.** - -These guarantees and their exceptions are detailed below. A final chapter explains how to use the -driver with BlockHound. - -The developer guide also has more information on driver internals and its -[concurrency model](../../developer/common/concurrency). - -### Definition of "non-blocking" - -Since the term "non-blocking" is subject to interpretation, in this page the term should be -understood as "[lock-free]": a program is non-blocking if at least one thread is guaranteed to make -progress; such programs are implemented without locks, mutexes nor semaphores, using only low-level -primitives such as atomic variables and CAS (compare-and-swap) instructions. - -A further distinction is generally established between "lock-free" and "wait-free" algorithms: the -former ones allow progress of the overall system, while the latter ones allow each thread to make -progress at any time. This distinction is however rather theoretical and is outside the scope of -this document. - -[lock-free]: https://www.baeldung.com/lock-free-programming - -### Driver lock-free guarantees - -#### Driver lock-free guarantees per execution models - -The driver offers many execution models. For the built-in ones, the lock-free guarantees are as -follows: - -* The synchronous API is blocking and does not offer any lock-free guarantee. -* The [asynchronous](../async) API is implemented in lock-free algorithms. -* The [reactive](../reactive) API is implemented in lock-free algorithms (it's actually wait-free). - -For example, calling any synchronous method declared in [`SyncCqlSession`], such as [`execute`], -will block until the result is available. These methods should never be used in non-blocking -applications. - -[`SyncCqlSession`]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/SyncCqlSession.html` -[`execute`]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/SyncCqlSession.html#execute-com.datastax.oss.driver.api.core.cql.Statement- - -However, the asynchronous methods declared in [`AsyncCqlSession`], such as [`executeAsync`], are all -safe for use in non-blocking applications; the statement execution and asynchronous result delivery -is guaranteed to never block. - -[`AsyncCqlSession`]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/AsyncCqlSession.html -[`executeAsync`]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/AsyncCqlSession.html#executeAsync-com.datastax.oss.driver.api.core.cql.Statement- - -The same applies to the methods declared in [`ReactiveSession`] such as [`executeReactive`]: the -returned publisher will never block when subscribed to, until the final results are delivered to -the subscriber. - -[`ReactiveSession`]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/cql/reactive/ReactiveSession.html -[`executeReactive`]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/cql/reactive/ReactiveSession.html#executeReactive-com.datastax.oss.driver.api.core.cql.Statement- - -There is one exception though: continuous paging queries (a feature specific to DSE) have a special -execution model which uses internal locks for coordination. Although such locks are only held for -extremely brief periods of time, and never under high contention, this execution model doesn't -qualify as lock-free. - -As a consequence, all methods declared in [`ContinuousSession`] and [`ContinuousReactiveSession`] -cannot be considered as implemented 100% lock-free, even those built on top of the asynchronous or -reactive APIs like [`executeContinuouslyAsync`] and [`executeContinuouslyReactive`]. In practice -though, continuous paging is extremely efficient and can safely be used in most non-blocking -contexts, unless they require strict lock-freedom. - -[`ContinuousSession`]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/cql/continuous/ContinuousSession.html -[`ContinuousReactiveSession`]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/cql/continuous/reactive/ContinuousReactiveSession.html -[`executeContinuouslyAsync`]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/cql/continuous/ContinuousSession.html#executeContinuouslyAsync-com.datastax.oss.driver.api.core.cql.Statement- -[`executeContinuouslyReactive`]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/cql/continuous/reactive/ContinuousReactiveSession.html#executeContinuouslyReactive-com.datastax.oss.driver.api.core.cql.Statement- - -#### Driver lock-free guarantees per session lifecycle phases - -The guarantees vary according to three possible session states: initializing, running, and closing. - -Session initialization is a costly operation that performs many I/O operations, hitting both the -local filesystem (configuration files) and the network (connection initialization). This procedure -is triggered by a call to [`SessionBuilder.buildAsync()`] and happens partially on the calling -thread, and partially asynchronously on an internal driver thread. - -* The creation of the [driver context] happens synchronously on the calling thread. The context - creation usually requires file I/O, mainly to read configuration files. A call to - `SessionBuilder.buildAsync()`, in spite of its name, is thus a blocking call and must be - dispatched to a thread that is allowed to block. -* The rest of the initialization process will happen asynchronously, on an internal driver admin - thread. This process is mostly non-blocking, with a few exceptions listed below. Therefore, - the driver admin thread performing the initialization tasks must be allowed to block, at least - temporarily. - -[driver context]: ../../developer/common/context - -For the reasons above, the initialization phase obviously doesn't qualify as lock-free. For -non-blocking applications, it is generally advised to trigger session initialization during -application startup, before strong non-blocking guarantees are enforced on application threads. - -Similarly, a call to [`SessionBuilder.build()`] should be considered blocking as it will block the -calling thread and wait until the method returns. For this reason, calls to `SessionBuilder.build()` -should be avoided in non-blocking applications. - -[`SessionBuilder.buildAsync()`]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/SessionBuilder.html#buildAsync-- -[`SessionBuilder.build()`]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/SessionBuilder.html#build-- - -Once the session is initialized, however, the driver is guaranteed to be non-blocking during the -session's lifecycle, and under normal operation, unless otherwise noted elsewhere in this document. - -Finally, closing the session is generally non-blocking, but the driver offers no strong guarantees -during that phase. Therefore, calls to any method declared in [`AsyncAutoCloseable`], including the -asynchronous ones like [`closeAsync()`], should also be preferably deferred until the application is -shut down and lock-freedom enforcement is disabled. - -[`AsyncAutoCloseable`]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/AsyncAutoCloseable.html -[`closeAsync()`]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/AsyncAutoCloseable.html#closeAsync-- - -#### Driver lock-free guarantees for specific components - -Certain driver components are not implemented in lock-free algorithms. - -For example, [`SafeInitNodeStateListener`] is implemented with internal locks for coordination. It -should not be used if strict lock-freedom is enforced. - -[`SafeInitNodeStateListener`]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/metadata/SafeInitNodeStateListener.html - -The `RateLimitingRequestThrottler` is currently blocking. The `ConcurrencyLimitingRequestThrottler` -is lock-free. - -See the section about [throttling](../throttling) for details about these components. Depending on -how many requests are being executed in parallel, the thread contention on these locks can be high: -in short, if your application enforces strict lock-freedom, then you should not use the -`RateLimitingRequestThrottler`. - -[request throttlers]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/throttling/RequestThrottler.html - -Other components may be lock-free, *except* for their first invocation. This is the case of the -following items: - -* All built-in implementations of [`TimestampGenerator`], upon instantiation; -* The utility method [`Uuids.timeBased()`]. - -[`TimestampGenerator`]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/time/TimestampGenerator.html -[`Uuids.timeBased()`]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/uuid/Uuids.html#timeBased-- - -Both components need to access native libraries when they get initialized and this may involve -hitting the local filesystem, thus causing the initialization to become a blocking call. - -Timestamp generators are automatically created when the session is initialized, and are thus -generally safe to use afterwards. - -`Uuids.timeBased()`, however, is a convenience method that the driver doesn't use internally. For -this reason, it is advised that this method be called once during application startup, so that it is -safe to use it afterwards in a non-blocking context. - -Alternatively, it's possible to disable the usage of client-side timestamp generation, and/or the -usage of native libraries. See the manual sections on [query timestamps](../query_timestamps) and -[integration](../integration) for more information. - -One component, the codec registry, can block when its [`register`] method is called; it is -therefore advised that codecs should be registered during application startup exclusively. See the -[custom codecs](../custom_codecs) section for more details about registering codecs. - -[`register`]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/registry/MutableCodecRegistry.html#register-com.datastax.oss.driver.api.core.type.codec.TypeCodec- - -Finally, a few internal components also use locks, but only during session initialization; once the -session is ready, they are either discarded, or don't use locks anymore for the rest of the -session's lifecycle. - -These components are safe to use once the session is ready, although they could be reported by -lock-freedom monitoring tools. They are listed below in case their exclusion is necessary: - -* `com.datastax.oss.driver.internal.core.context.DefaultNettyOptions` -* `com.datastax.oss.driver.internal.core.util.concurrent.LazyReference` -* `com.datastax.oss.driver.internal.core.util.concurrent.ReplayingEventFilter` - -#### Driver lock-free guarantees on topology and status events - -Topology and status events can cause the driver to use locks temporarily. - -When a node gets added to the cluster, or when a node state changes (DOWN to UP or vice versa), the -driver needs to notify a few components: the load balancing policies need to coordinate in order to -assign a new distance to the node (LOCAL, REMOTE or IGNORED); and the node connection pool will have -to be resized either to accommodate new connections, or to close existing ones. - -These operations use internal locks for coordination. Again, they are only held for extremely brief -periods of time, and never under high contention. Note that this behavior cannot be disabled or -changed; if you need to enforce strict lock-freedom, and topology or status changes are being -reported as infringements, consider adding exceptions for the following method calls: - - * `com.datastax.oss.driver.internal.core.pool.ChannelSet#add(DriverChannel)` - * `com.datastax.oss.driver.internal.core.pool.ChannelSet#remote(DriverChannel)` - * `com.datastax.oss.driver.internal.core.metadata.LoadBalancingPolicyWrapper$SinglePolicyDistanceReporter#setDistance(Node,NodeDistance)` - -#### Driver lock-free guarantees on random uuid generation - -Until driver 4.9, the [`Uuids.random()`] method was a blocking call. Because of that, this method -could not be used in non-blocking contexts, making UUID generation a difficult issue to solve. - -Moreover, this method is used in a few places internally. This situation was unfortunate because -lock-freedom enforcement tools could report calls to that method, but it was impossible to suppress -these calls. Thanks to [JAVA-2449], released with driver 4.10.0, `Uuids.random()` became a -non-blocking call and random UUIDs can now be safely generated in non-blocking applications. - -[`Uuids.random()`]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/uuid/Uuids.html#random-- -[JAVA-2449]: https://datastax-oss.atlassian.net/browse/JAVA-2449 - -#### Driver lock-free guarantees when reloading the configuration - -The driver has a pluggable configuration mechanism built around the [`DriverConfigLoader`] -interface. Implementors may choose to support [hot-reloading] of configuration files, and the -default built-in implementation has this feature enabled by default. - -Beware that a hot-reloading of the default configuration mechanism is performed on a driver internal -admin thread. If hot-reloading is enabled, then this might be reported by lock-freedom infringement -detectors. If that is the case, it is advised to disable hot-reloading by setting the -`datastax-java-driver.basic.config-reload-interval` option to 0. See the manual page on -[configuration](../configuration) for more information. - -[`DriverConfigLoader`]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/config/DriverConfigLoader.html -[hot-reloading]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/config/DriverConfigLoader.html#supportsReloading-- - -#### Driver lock-free guarantees when connecting to DSE - -When connecting to clusters running recent DSE versions, the driver automatically enables periodic -status reporting. When preparing the status report, the driver has to hit the local filesystem, and -because of that, the status reporting process does not qualify as lock-free. - -If lock-freedom is being enforced, then automatic status reporting must be disabled by setting the -`datastax-java-driver.advanced.monitor-reporting.enabled` property to false in the driver -configuration. - -### Driver mechanism for detection of blocking calls - -The driver has its own mechanism for detecting blocking calls happening on an internal driver -thread. This mechanism is capable of detecting and reporting blatant cases of misuse of the -asynchronous and reactive APIs, e.g. when the synchronous API is invoked inside a future or callback -produced by the asynchronous execution of a statement. See the core manual page on the -[asynchronous](../async) API or the developer manual page on -[driver concurrency](../../developer/common/concurrency) for details. - -The driver is not capable, however, of detecting low-level lock-freedom infringements, such as the -usage of locks. You must use an external tool to achieve that. See below how to use BlockHound for -that. - -### Using the driver with Reactor BlockHound - -[Reactor]'s tool for automatic detection of blocking calls, [BlockHound], is capable of detecting -and reporting any sort of blocking calls, including I/O, locks, `Thread.sleep`, etc. - -When used with the driver, BlockHound can report some calls that, for the reasons explained above, -could be safely considered as false positives. - -For this reason, the driver, since version 4.10, ships with a custom `DriverBlockHoundIntegration` -class which is automatically discovered by BlockHound through the Service Loader mechanism. It -contains BlockHound customizations that target most of the cases detailed above, and prevent them -from being reported as blocking calls. - -More specifically, the following items are currently declared to be allowed: - -* Loading of native libraries during startup (`TimestampGenerator`); -* Locks held during startup only (`DefaultNettyOptions`, `LazyReference`, `ReplayingEventFilter`); -* Locks held during startup and topology and status events processing (`ChannelSet`, - `DistanceReporter`); -* Locks held when executing continuous paging queries; -* Locks held during calls to `MutableCodecRegistry.register()` and `Uuids.timeBased()`. - -The following items are NOT declared to be allowed and are likely to be reported by BlockHound if -used: - -* Request throttlers; -* Automatic status reporting; -* `SafeInitNodeStateListener`. - -Note that other blocking startup steps, e.g. loading of configuration files, are also not declared -to be allowed, because these are genuine blocking I/O calls. For this reason, if BlockHound is being -used, the loading of the driver context, performed by the thread calling `SessionBuilder.build()` -or `SessionBuilder.buildAsync()`, must be allowed to perform blocking calls. diff --git a/manual/core/paging/README.md b/manual/core/paging/README.md deleted file mode 100644 index 2df92bd69d1..00000000000 --- a/manual/core/paging/README.md +++ /dev/null @@ -1,284 +0,0 @@ - - -## Paging - -### Quick overview - -How the server splits large result sets into multiple network responses. - -* `basic.request.page-size` in the configuration. -* transparent in the synchronous API (`session.execute`): the driver fetches new pages in the - background as you iterate. -* explicit in the asynchronous API (`session.executeAsync`): - [AsyncResultSet.hasMorePages()][AsyncPagingIterable.hasMorePages] and - [AsyncResultSet.fetchNextPage()][AsyncPagingIterable.fetchNextPage]. -* paging state: record the current position and reuse it later (forward only). -* offset queries: emulated client-side with [OffsetPager] \(**this comes with important performance - trade-offs, make sure you read and understand the full documentation below**). - ------ - -When a query returns many rows, it would be inefficient to return them as a single response message. -Instead, the driver breaks the results into *pages* which get returned as they are needed. - - -### Setting the page size - -The page size specifies how many rows the server will return in each network frame. You can set it -in the configuration: - -``` -datastax-java-driver.basic.request.page-size = 5000 -``` - -It can be changed at runtime (the new value will be used for requests issued after the change). If -you have categories of queries that require different page sizes, use -[configuration profiles](../configuration#profiles). - -Note that the page size is merely a hint; the server will not always return the exact number of -rows, it might decide to return slightly more or less. - - -### Synchronous paging - -The fetch size limits the number of results that are returned in one page; if you iterate past that, -the driver uses background queries to fetch subsequent pages. Here's an example with a fetch size of -20: - -```java -ResultSet rs = session.execute("SELECT * FROM my_table WHERE k = 1"); -for (Row row : rs) { - // process the row -} -``` - -```ditaa - client Session Cassandra - --+--------------+---------------------------------+----- - |execute(query)| | - |------------->| | - | | query rows 1 to 20 | - | |-------------------------------->| - | | | - | |create | - | |------>ResultSet | - | | | -+-----+--------+-----------------+-+ | -|For i in 1..20| | | | -+--------------+ | | | -| | get next row | | | -| |------------------------->| | | -| | row i | | | -| |<-------------------------| | | -| | | | | -+-----+--------------------------+-+ | - | | | - | | | - | get next row | | - |------------------------->| | - | | query rows 21 to 40 | - | |-------------------->| - | row 21 | | - |<------------------------ | | -``` - - -### Asynchronous paging - -In previous versions of the driver, the synchronous and asynchronous APIs returned the same -`ResultSet` type. This made asynchronous paging very tricky, because it was very easy to -accidentally trigger background synchronous queries (which would defeat the whole purpose of async, -and potentially introduce deadlocks). - -To avoid this problem, the driver's asynchronous API now returns a dedicated [AsyncResultSet]; -iteration only yields the current page, and the next page must be fetched explicitly. To iterate a -result set in a fully asynchronous manner, you need to compose page futures using the methods of -[CompletionStage]. Here's an example that prints each row on the command line: - -```java -CompletionStage resultSetFuture = - session.executeAsync("SELECT * FROM myTable WHERE id = 1"); -// The returned stage will complete once all the rows have been printed: -CompletionStage printRowsFuture = resultSetFuture.thenCompose(this::printRows); - -private CompletionStage printRows(AsyncResultSet resultSet) { - for (Row row : resultSet.currentPage()) { - System.out.println(row.getFormattedContents()); - } - if (resultSet.hasMorePages()) { - return resultSet.fetchNextPage().thenCompose(this::printRows); - } else { - return CompletableFuture.completedFuture(null); - } -} -``` - -If you need to propagate state throughout the iteration, add parameters to the callback. Here's an -example that counts the number of rows (obviously this is contrived, you would use `SELECT COUNT(*)` -instead of doing this client-side, but it illustrates the basic principle): - -```java -CompletionStage resultSetFuture = - session.executeAsync("SELECT * FROM myTable WHERE id = 1"); -CompletionStage countFuture = resultSetFuture.thenCompose(rs -> countRows(rs, 0)); - -private CompletionStage countRows(AsyncResultSet resultSet, int previousPagesCount) { - int count = previousPagesCount; - for (Row row : resultSet.currentPage()) { - count += 1; - } - if (resultSet.hasMorePages()) { - int finalCount = count; // need a final variable to use in the lambda below - return resultSet.fetchNextPage().thenCompose(rs -> countRows(rs, finalCount)); - } else { - return CompletableFuture.completedFuture(count); - } -} -``` - -See [Asynchronous programming](../async/) for more tips about the async API. - -### Saving and reusing the paging state - -Sometimes it is convenient to interrupt paging and resume it later. For example, this could be -used for a stateless web service that displays a list of results with a link to the next page. When -the user clicks that link, we want to run the exact same query, except that the iteration should -start where we stopped the last time. - -The driver exposes a *paging state* for that: - -```java -ResultSet rs = session.execute("your query"); -ByteBuffer pagingState = rs.getExecutionInfo().getPagingState(); - -// Finish processing the current page -while (rs.getAvailableWithoutFetching() > 0) { - Row row = rs.one(); - // process the row -} - -// Later: -SimpleStatement statement = - SimpleStatement.builder("your query").setPagingState(pagingState).build(); -session.execute(statement); -``` - -Note the loop to finish the current page after we extract the state. The new statement will start at -the beginning of the next page, so we want to make sure we don't leave a gap of unprocessed rows. - -The paging state can only be reused with the exact same statement (same query string, same -parameters). It is an opaque value that is only meant to be collected, stored and re-used. If you -try to modify its contents or reuse it with a different statement, the results are unpredictable. - -If you want additional safety, the driver also provides a "safe" wrapper around the raw value: -[PagingState]. - -```java -PagingState pagingState = rs.getExecutionInfo().getSafePagingState(); -``` - -It works in the exact same manner, except that it will throw an `IllegalStateException` if you try -to reinject it in the wrong statement. This allows you to detect the error early, without a -roundtrip to the server. - -Note that, if you use a simple statement and one of the bound values requires a [custom -codec](../custom_codecs), you have to provide a reference to the session when reinjecting the paging -state: - -```java -CustomType value = ... -SimpleStatement statement = SimpleStatement.newInstance("query", value); -// session required here, otherwise you will get a CodecNotFoundException: -statement = statement.setPagingState(pagingState, session); -``` - -This is a small corner case because checking the state requires encoding the values, and a simple -statement doesn't have a reference to the codec registry. If you don't use custom codecs, or if the -statement is a bound statement, you can use the regular `setPagingState(pagingState)`. - -### Offset queries - -Saving the paging state works well when you only let the user move from one page to the next. But in -most Web UIs and REST services, you need paginated results with random access, for example: "given a -page size of 20 elements, fetch page 5". - -Cassandra does not support this natively (see -[CASSANDRA-6511](https://issues.apache.org/jira/browse/CASSANDRA-6511)), because such queries are -inherently linear: the database would have to restart from the beginning every time, and skip -unwanted rows until it reaches the desired offset. - -However, random pagination is a real need for many applications, and linear performance can be a -reasonable trade-off if the cardinality stays low. The driver provides a utility to emulate offset -queries on the client side: [OffsetPager]. - -#### Performance considerations - -For each page that you want to retrieve: - -* you need to re-execute the query, in order to start with a fresh result set; -* you then pass the result to `OffsetPager`, which starts iterating from the beginning, and skips - rows until it reaches the desired offset. - -```java -String query = "SELECT ..."; -OffsetPager pager = new OffsetPager(20); - -// Get page 2: start from a fresh result set, throw away rows 1-20, then return rows 21-40 -ResultSet rs = session.execute(query); -OffsetPager.Page page2 = pager.getPage(rs, 2); - -// Get page 5: start from a fresh result set, throw away rows 1-80, then return rows 81-100 -rs = session.execute(query); -OffsetPager.Page page5 = pager.getPage(rs, 5); -``` - -Note that `getPage` can also process the entity iterables returned by the [mapper](../../mapper/). - -#### Establishing application-level guardrails - -Linear performance should be fine for the values typically encountered in real-world applications: -for example, if the page size is 25 and users never go past page 10, the worst case is only 250 -rows, which is a very small result set. However, we strongly recommend that you implement hard -limits in your application code: if the page number is exposed to the user (for example if it is -passed as a URL parameter), make sure it is properly validated and enforce a maximum, so that an -attacker can't inject a large value that could potentially fetch millions of rows. - -#### Relation with protocol-level paging - -Offset paging has no direct relation to `basic.request.page-size`. Protocol-level paging happens -under the hood, and is completely transparent for offset paging: `OffsetPager` will work the same no -matter how many network roundtrips were needed to fetch the result. You don't need to set the -protocol page size and the logical page size to the same value. - ------ - -The [driver examples] include two complete web service implementations demonstrating forward-only -and offset paging. - -[ResultSet]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/ResultSet.html -[AsyncResultSet]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/AsyncResultSet.html -[AsyncPagingIterable.hasMorePages]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/AsyncPagingIterable.html#hasMorePages-- -[AsyncPagingIterable.fetchNextPage]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/AsyncPagingIterable.html#fetchNextPage-- -[OffsetPager]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/paging/OffsetPager.html -[PagingState]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/PagingState.html - -[CompletionStage]: https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletionStage.html - -[driver examples]: https://github.com/datastax/java-driver/tree/4.x/examples/src/main/java/com/datastax/oss/driver/examples/paging diff --git a/manual/core/performance/README.md b/manual/core/performance/README.md deleted file mode 100644 index 3afb321968e..00000000000 --- a/manual/core/performance/README.md +++ /dev/null @@ -1,371 +0,0 @@ - - -## Performance - -This page is intended as a checklist for everything related to driver performance. Most of the -information is already available in other sections of the manual, but it's linked here for -easy reference if you're benchmarking your application or diagnosing performance issues. - - -### Statements - -[Statements](../statements/) are some of the driver types you'll use the most. Every request needs -one -- even `session.execute(String)` creates a `SimpleStatement` under the hood. - -#### Immutability and builders - -Statements are by default implemented with immutable classes: every call to a setter method creates -an intermediary copy. If you have multiple attributes to set, use a builder instead: - -```java -SimpleStatement statement = SimpleStatement.builder("SELECT * FROM foo") - .setPageSize(20) - .setConsistencyLevel(DefaultConsistencyLevel.QUORUM) - .setIdempotence(true) - .build(); -``` - -Also, note that you don't need a driver `Session` to create simple statements: they can be -initialized statically and stored as constants. - -#### Prepared statements - -[Prepared statements](../statements/prepared) allow Cassandra to cache parsed query strings -server-side, but that's not their only benefit for performance: - -* the driver also caches the response metadata, which can then be skipped in subsequent responses. - This saves bandwidth, as well as the CPU and memory resources required to parse it. -* in some cases, prepared statements set routing information automatically, which allows the driver - to target the most appropriate replicas. - -You should use prepared statements for all recurring requests in your application. Simple statements -should only be used for one-off queries, for example some initialization that will be done only once -at startup. - -The driver caches prepared statements -- see [CqlSession.prepare(SimpleStatement)] for the fine -print. However, if the query is static, it's still a good practice to cache your `PreparedStatement` -instances to avoid calling `prepare()` every time. One common pattern is to use some sort of DAO -component: - -```java -public static class UserDao { - - private final CqlSession session; - private final PreparedStatement preparedFindById; - - public UserDao(CqlSession session) { - this.session = session; - this.preparedFindById = session.prepare("SELECT * FROM user WHERE id = ?"); - } - - public User findById(int id) { - Row row = session.execute(preparedFindById.bind(id)).one(); - return new User(row.getInt(id), row.getString("first_name"), row.getString("last_name")); - } -} -``` - - -### Request execution - -#### Connection pooling - -By default, the driver opens 1 connection per node, and allows 1024 concurrent requests on each -connection. In our experience this is enough for most scenarios. - -If your application generates a very high throughput (hundreds of thousands of requests per second), -you might want to experiment with different settings. See the [tuning](../pooling/#tuning) section -in the connection pooling page. - -#### Compression - -Consider [compression](../compression/) if your queries return large payloads; it might help to -reduce network traffic. - -#### Timestamp generation - -Each query is assigned a [timestamp](../query_timestamps/) to order them relative to each other. - -By default, this is done driver-side with -[AtomicTimestampGenerator](../query_timestamps/#atomic-timestamp-generator). This is a very simple -operation so unlikely to be a bottleneck, but note that there are other options, such as a -[thread-local](../query_timestamps/#thread-local-timestamp-generator) variant that creates slightly -less contention, writing your own implementation or letting the server assign timestamps. - -#### Tracing - -[Tracing](../tracing/) should be used for only a small percentage of your queries. It consumes -additional resources on the server, and fetching each trace requires background requests. - -Do not enable tracing for every request; it's a sure way to bring your performance down. - -#### Request trackers - -[Request trackers](../request_tracker/) are on the hot path (that is, invoked on I/O threads, each -time a request is executed), and users can plug custom implementations. - -If you experience throughput issues, check if any trackers are configured, and what they are doing. -They should avoid blocking calls, as well as any CPU-intensive computations. - -#### Metrics - -Similarly, some of the driver's [metrics](../metrics/) are updated for every request (if the metric -is enabled). - -By default, the driver ships with all metrics disabled. Enable them conservatively, and if you're -investigating a performance issue, try disabling them temporarily to check that they are not the -cause. - -#### Throttling - -[Throttling](../throttling/) can help establish more predictable server performance, by controlling -how much load each driver instance is allowed to put on the cluster. The throttling algorithm itself -incurs a bit of overhead in the driver, but that shouldn't be a problem since the goal is to stay -under reasonable rates in the first place. - -If you're debugging an unfamiliar application and experience a throughput plateau, make sure that -it's not caused by a throttler. - - -### Caching reusable objects - -Many driver objects are immutable. If you reuse the same values often, consider caching them in -private fields or constants to alleviate GC pressure. - -#### Identifiers - -The driver uses [CqlIdentifier] to deal with [case sensitivity](../../case_sensitivity). When you -call methods that take raw strings, the driver generally wraps them under the hood: - -```java -session.getMetadata().getKeyspace("inventory"); // shortcut for getKeyspace(CqlIdentifier.fromCql("inventory") - -// Caching the identifier: -public static final String INVENTORY_ID = "inventory"; - -session.getMetadata().getKeyspace(INVENTORY_ID); -``` - -This also applies to built queries, although it's less important because generally the whole query -can be cached (see below). - -Note however that row getters and bound statement setters do **not** wrap their argument: because -those methods are used very often, they handle raw strings with an optimized algorithm that does not -require creating an identifier (the rules are detailed [here][AccessibleByName]). - -```java -// No need to extract a CqlIdentifier, raw strings are handled efficiently: -Row row = session.execute("SELECT * FROM user WHERE id = 1").one(); -row.getInt("age"); - -PreparedStatement pst = session.prepare("UPDATE user SET name=:name WHERE id=:id"); -pst.bind().setInt("age", 25); -``` - -#### Type tokens - -[GenericType] is used to express complex generic types -- such as -[nested collections](../#collection-types) -- in getters and setters. These objects are immutable -and stateless, so they are good candidates for constants: - -```java -public static final GenericType>> SET_OF_LIST_OF_STRING = new GenericType>>() {}; - -Set> teams = row.get("teams", SET_OF_LIST_OF_STRING); -``` - -`GenericType` itself already exposes a few of those constants. You can create your own utility class -to store yours. - -#### Built queries - -Similarly, [built queries](../../query_builder/) are immutable and don't need a reference to a live -driver instance. If you create them statically, they can be stored as constants: - -```java -public static final BuildableQuery SELECT_SERVER_VERSION = - selectFrom("system", "local").column("release_version"); -``` - -Note that you don't necessarily need to extract `CqlIdentifier` constants since the construction -already happens at initialization time. - -#### Derived configuration profiles - -The configuration API allows you to build [derived profiles](../configuration/#derived-profiles) at -runtime. - -```java -DriverExecutionProfile dynamicProfile = - defaultProfile.withString( - DefaultDriverOption.REQUEST_CONSISTENCY, DefaultConsistencyLevel.EACH_QUORUM.name()); -``` - -Their use is generally discouraged (you should define profiles statically in the configuration file -as much as possible), but if there's no other way and you reuse them over time, store them instead -of recreating them each time. - -### Metadata - -The driver maintains [metadata](../metadata/) about the state of the Cassandra cluster. This work is -done on dedicated "admin" threads (see the [thread pooling](#thread-pooling) section below), so it's -not in direct competition with regular requests. - - -#### Filtering - -You can disable entire parts of the metadata with those configuration options: - -``` -datastax-java-driver.advanced.metadata { - schema.enabled = true - token-map.enabled = true -} -``` - -This will save CPU and memory resources, but you lose some driver features: - -* if schema is disabled, `session.getMetadata().getKeyspaces()` will always be empty: your - application won't be able to inspect the database schema dynamically. -* if the token map is disabled, `session.getMetadata().getTokenMap()` will always be empty, and you - lose the ability to use [token-aware routing](../load_balancing/#token-aware). - -Note that disabling the schema implicitly disables the token map (because computing the token map -requires the keyspace replication settings). - -Perhaps more interestingly, metadata can be [filtered](../metadata/schema/#filtering) to a specific -subset of keyspaces. This is handy if you connect to a shared cluster that holds data for multiple -applications: - -``` -datastax-java-driver.advanced.metadata { - schema.refreshed-keyspaces = [ "users", "inventory" ] -} -``` - -To get a sense of the time spent on metadata refreshes, enable [debug logs](../logging/) and look -for entries like this: - -``` -[s0-io-0] DEBUG c.d.o.d.i.c.m.s.q.CassandraSchemaQueries - [s0] Schema queries took 88 ms -[s0-admin-0] DEBUG c.d.o.d.i.c.m.s.p.CassandraSchemaParser - [s0] Schema parsing took 71 ms -[s0-admin-0] DEBUG c.d.o.d.i.c.metadata.DefaultMetadata - [s0] Refreshing token map (only schema has changed) -[s0-admin-0] DEBUG c.d.o.d.i.c.m.token.DefaultTokenMap - [s0] Computing keyspace-level data for {system_auth={class=org.apache.cassandra.locator.SimpleStrategy, replication_factor=1}, system_schema={class=org.apache.cassandra.locator.LocalStrategy}, system_distributed={class=org.apache.cassandra.locator.SimpleStrategy, replication_factor=3}, system={class=org.apache.cassandra.locator.LocalStrategy}, system_traces={class=org.apache.cassandra.locator.SimpleStrategy, replication_factor=2}} -[s0-admin-0] DEBUG c.d.o.d.i.c.m.token.DefaultTokenMap - [s0] Computing new keyspace-level data for {class=org.apache.cassandra.locator.SimpleStrategy, replication_factor=1} -[s0-admin-0] DEBUG c.d.o.d.i.c.m.token.KeyspaceTokenMap - [s0] Computing keyspace-level data for {class=org.apache.cassandra.locator.SimpleStrategy, replication_factor=1} took 12 ms -[s0-admin-0] DEBUG c.d.o.d.i.c.m.token.DefaultTokenMap - [s0] Computing new keyspace-level data for {class=org.apache.cassandra.locator.LocalStrategy} -[s0-admin-0] DEBUG c.d.o.d.i.c.m.token.KeyspaceTokenMap - [s0] Computing keyspace-level data for {class=org.apache.cassandra.locator.LocalStrategy} took 1 ms -[s0-admin-0] DEBUG c.d.o.d.i.c.m.token.DefaultTokenMap - [s0] Computing new keyspace-level data for {class=org.apache.cassandra.locator.SimpleStrategy, replication_factor=3} -[s0-admin-0] DEBUG c.d.o.d.i.c.m.token.KeyspaceTokenMap - [s0] Computing keyspace-level data for {class=org.apache.cassandra.locator.SimpleStrategy, replication_factor=3} took 54 us -[s0-admin-0] DEBUG c.d.o.d.i.c.m.token.DefaultTokenMap - [s0] Computing new keyspace-level data for {class=org.apache.cassandra.locator.SimpleStrategy, replication_factor=2} -[s0-admin-0] DEBUG c.d.o.d.i.c.m.token.KeyspaceTokenMap - [s0] Computing keyspace-level data for {class=org.apache.cassandra.locator.SimpleStrategy, replication_factor=2} took 98 us -[s0-admin-0] DEBUG c.d.o.d.i.c.metadata.DefaultMetadata - [s0] Rebuilding token map took 32 ms -[s0-admin-0] DEBUG c.d.o.d.i.c.metadata.MetadataManager - [s0] Applying schema refresh took 34 ms -``` - -#### Debouncing - -The driver receives push notifications of schema and topology changes from the Cassandra cluster. -These signals are *debounced*, meaning that rapid series of events will be amortized, for example: - -* if multiple schema objects are created or modified, only perform a single schema refresh at the - end. -* if a node's status oscillates rapidly between UP and DOWN, wait for gossip to stabilize and only - apply the last state. - -Debouncing is controlled by these configuration options (shown here with their defaults): - -``` -datastax-java-driver.advanced.metadata { - topology-event-debouncer { - # How long the driver waits to propagate an event. If another event is received within that - # time, the window is reset and a batch of accumulated events will be delivered. - window = 1 second - - # The maximum number of events that can accumulate. If this count is reached, the events are - # delivered immediately and the time window is reset. - max-events = 20 - } - schema.debouncer { - window = 1 second - max-events = 20 - } -} -``` - -You may adjust those settings depending on your application's needs: higher values mean less impact -on performance, but the driver will be slower to react to changes. - -#### Schema updates - -You should group your schema changes as much as possible. - -Every change made from a client will be pushed to all other clients, causing them to refresh their -metadata. If you have multiple client instances, it might be a good idea to -[deactivate the metadata](../metadata/schema/#enabling-disabling) on all clients while you apply the -updates, and reactivate it at the end (reactivating will trigger an immediate refresh, so you might -want to ramp up clients to avoid a "thundering herd" effect). - -Schema changes have to replicate to all nodes in the cluster. To minimize the chance of schema -disagreement errors: - -* apply your changes serially. The driver handles this automatically by checking for - [schema agreement](../metadata/schema/#schema-agreement) after each DDL query. Run them from the - same application thread, and, if you use the asynchronous API, chain the futures properly. -* send all the changes to the same coordinator. This is one of the rare cases where we recommend - using [Statement.setNode()]. - -### Thread pooling - -The driver architecture is designed around two code paths: - -* the **hot path** is everything directly related to the execution of requests: encoding/decoding - driver types to/from low-level binary payloads, and network I/O. This is where the driver spends - most of its cycles in a typical application: when we have to make design tradeoffs, performance is - always the priority. Hot code runs on 3 categories of threads: - * your application's thread for the construction of statements; - * the driver's "I/O" event loop group for encoding/decoding and network I/O. You can configure - it with the options in `datastax-java-driver.advanced.netty.io-group`. - * the driver's "timer" thread for request timeouts and speculative executions. See - `datastax-java-driver.advanced.netty.timer`. -* the **cold path** is for all administrative tasks: managing the - [control connection](../control_connection), parsing [metadata](../metadata/), reacting to cluster - events (node going up/down, getting added/removed, etc), and scheduling periodic events - (reconnections, reloading the configuration). Comparatively, these tasks happen less often, and - are less critical (for example, stale schema metadata is not a blocker for request execution). - They are scheduled on a separate "admin" event loop group, controlled by the options in - `datastax-java-driver.advanced.netty.admin-group`. - -By default, the number of I/O threads is set to `Runtime.getRuntime().availableProcessors() * 2`, -and the number of admin threads to 2. It's hard to give one-size-fits-all recommendations because -every case is different, but you might want to try lowering I/O threads, especially if your -application already creates a lot of threads on its side. - -Note that you can gain more fine-grained control over thread pools via the -[internal](../../api_conventions) API (look at the `NettyOptions` interface). In particular, it is -possible to reuse the same event loop group for I/O, admin tasks, and even your application code -(the driver's internal code is fully asynchronous so it will never block any thread). The timer is -the only one that will have to stay on a separate thread. - -[AccessibleByName]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/data/AccessibleByName.html -[CqlIdentifier]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/CqlIdentifier.html -[CqlSession.prepare(SimpleStatement)]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/CqlSession.html#prepare-com.datastax.oss.driver.api.core.cql.SimpleStatement- -[GenericType]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/reflect/GenericType.html -[Statement.setNode()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/Statement.html#setNode-com.datastax.oss.driver.api.core.metadata.Node- diff --git a/manual/core/pooling/README.md b/manual/core/pooling/README.md deleted file mode 100644 index 578de6b4abd..00000000000 --- a/manual/core/pooling/README.md +++ /dev/null @@ -1,193 +0,0 @@ - - -## Connection pooling - -### Quick overview - -One connection pool per node. **Many concurrent requests** per connection (don't tune like a JDBC -pool). - -* `advanced.connection` in the configuration: `max-requests-per-connection`, `pool.local.size`, - `pool.remote.size`. -* metrics (per node): `pool.open-connections`, `pool.in-flight`, `pool.available-streams`, - `pool.orphaned-streams`. -* heartbeat: driver-level keepalive, prevents idle connections from being dropped; - `advanced.heartbeat` in the configuration. - ------ - -### Basics - -The driver communicates with Cassandra over TCP, using the Cassandra binary protocol. This protocol -is asynchronous, which allows each TCP connection to handle multiple simultaneous requests: - -* when a query gets executed, a *stream id* gets assigned to it. It is a unique identifier on the - current connection; -* the driver writes a request containing the stream id and the query on the connection, and then - proceeds without waiting for the response (if you're using the asynchronous API, this is when the - driver will send you back a `java.util.concurrent.CompletionStage`). Once the request has been - written to the connection, we say that it is *in flight*; -* at some point, Cassandra will send back a response on the connection. This response also contains - the stream id, which allows the driver to trigger a callback that will complete the corresponding - query (this is the point where your `CompletionStage` will get completed). - -You don't need to manage connections yourself. You simply interact with a [CqlSession] object, which -takes care of it. - -**For a given session, there is one connection pool per connected node** (a node is connected when -it is up and not ignored by the [load balancing policy](../load_balancing/)). - -The number of connections per pool is configurable (this will be described in the next section). -There are up to 32768 stream ids per connection. - -```ditaa -+-------+1 n+----+1 n+----------+1 32K+-------+ -+Session+-------+Pool+-------+Connection+-------+Request+ -+-------+ +----+ +----------+ +-------+ -``` - -### Configuration - -Pool sizes are defined in the `connection` section of the [configuration](../configuration/). Here -are the relevant options with their default values: - -``` -datastax-java-driver.advanced.connection { - max-requests-per-connection = 1024 - pool { - local.size = 1 - remote.size = 1 - } -} -``` - -Do not change those values unless informed by concrete performance measurements; see the -[Tuning](#tuning) section at the end of this page. - -Unlike previous versions of the driver, pools do not resize dynamically. However you can adjust the -options at runtime, the driver will detect and apply the changes. - -#### Heartbeat - -If connections stay idle for too long, they might be dropped by intermediate network devices -(routers, firewalls...). Normally, TCP keepalive should take care of this; but tweaking low-level -keepalive settings might be impractical in some environments. - -The driver provides application-side keepalive in the form of a connection heartbeat: when a -connection does not receive incoming reads for a given amount of time, the driver will simulate -activity by writing a dummy request to it. If that request fails, the connection is trashed and -replaced. - -This feature is enabled by default. Here are the default values in the configuration: - -``` -datastax-java-driver.advanced.heartbeat { - interval = 30 seconds - - # How long the driver waits for the response to a heartbeat. If this timeout fires, the heartbeat - # is considered failed. - timeout = 500 milliseconds -} -``` - -Both options can be changed at runtime, the new value will be used for new connections created after -the change. - -### Monitoring - -The driver exposes node-level [metrics](../metrics/) to monitor your pools (note that all metrics -are disabled by default, you'll need to change your configuration to enable them): - -``` -datastax-java-driver { - advanced.metrics.node.enabled = [ - # The number of connections open to this node for regular requests (exposed as a - # Gauge). - # - # This includes the control connection (which uses at most one extra connection to a random - # node in the cluster). - pool.open-connections, - - # The number of stream ids available on the connections to this node (exposed as a - # Gauge). - # - # Stream ids are used to multiplex requests on each connection, so this is an indication of - # how many more requests the node could handle concurrently before becoming saturated (note - # that this is a driver-side only consideration, there might be other limitations on the - # server that prevent reaching that theoretical limit). - pool.available-streams, - - # The number of requests currently executing on the connections to this node (exposed as a - # Gauge). This includes orphaned streams. - pool.in-flight, - - # The number of "orphaned" stream ids on the connections to this node (exposed as a - # Gauge). - # - # See the description of the connection.max-orphan-requests option for more details. - pool.orphaned-streams, - ] -} -``` - -In particular, it's a good idea to keep an eye on those two metrics: - -* `pool.open-connections`: if this doesn't match your configured pool size, something is preventing - connections from opening (either configuration or network issues, or a server-side limitation -- - see [CASSANDRA-8086]); -* `pool.available-streams`: if this is often close to 0, it's a sign that the pool is getting - saturated. Consider adding more connections per node. - -### Tuning - -The driver defaults should be good for most scenarios. - -#### Number of requests per connection - -In our experience, raising `max-requests-per-connection` above 1024 does not bring any significant -improvement: the server is only going to service so many requests at a time anyway, so additional -requests are just going to pile up. - -Lowering the value is not a good idea either. If your goal is to limit the global throughput of the -driver, a [throttler](../throttling) is a better solution. - -#### Number of connections per node - -1 connection per node (`pool.local.size` or `pool.remote.size`) is generally sufficient. However, it -might become a bottleneck in very high performance scenarios: all I/O for a connection happens on -the same thread, so it's possible for that thread to max out its CPU core. In our benchmarks, this -happened with a single-node cluster and a high throughput (approximately 80K requests / second / -connection). - -It's unlikely that you'll run into this issue: in most real-world deployments, the driver connects -to more than one node, so the load will spread across more I/O threads. However if you suspect that -you experience the issue, here's what to look out for: - -* the driver throughput plateaus but the process does not appear to max out any system resource (in - particular, overall CPU usage is well below 100%); -* one of the driver's I/O threads maxes out its CPU core. You can see that with a profiler, or - OS-level tools like `pidstat -tu` on Linux. By default, I/O threads are named - `-io-`. - -Try adding more connections per node. Thanks to the driver's hot-reload mechanism, you can do that -at runtime and see the effects immediately. - -[CqlSession]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/CqlSession.html -[CASSANDRA-8086]: https://issues.apache.org/jira/browse/CASSANDRA-8086 diff --git a/manual/core/query_timestamps/README.md b/manual/core/query_timestamps/README.md deleted file mode 100644 index 4498afe21c4..00000000000 --- a/manual/core/query_timestamps/README.md +++ /dev/null @@ -1,214 +0,0 @@ - - -## Query timestamps - -### Quick overview - -Defines the order in which mutations are applied on the server. Ways to set it (by order of -precedence, higher priority first): - -* `USING TIMESTAMP` in the query string. -* programmatically with [Statement.setQueryTimestamp()]. -* timestamp generator: `advanced.timestamp-generator` in the configuration. Defaults to session-wide - monotonic, also available: per-thread monotonic, server-side, or write your own. -* if the generator didn't set it, assigned server-side. - ------ - -In Cassandra, each mutation has a microsecond-precision timestamp, which is used to order operations -relative to each other. - -There are various ways to assign it: - -### CQL `USING TIMESTAMP` - -You can explicitly provide the timestamp in your CQL query: - -```java -session.execute("INSERT INTO my_table(c1, c2) values (1, 1) " + - "USING TIMESTAMP 1432815430948040"); -``` - -### Timestamp generator - -The driver has a timestamp generator that gets invoked for every outgoing request; it either assigns -a client-side timestamp to the request, or indicates that the server should assign it. - -The timestamp generator is defined in the [configuration](../configuration/). - -#### AtomicTimestampGenerator - -``` -datastax-java-driver.advanced.timestamp-generator { - class = AtomicTimestampGenerator -} -``` - -This is the default implementation. It always generates a client timestamp, and guarantees -monotonicity (i.e. ever-increasing timestamps) across all application threads. - -Note that, in order to achieve monotonicity, the generator might return timestamps that drift out in -the future. This happens if timestamps are generated at a rate of more than one per microsecond, or -more likely in the event of a system clock skew. When this happens, the generator logs a warning -message in the category `com.datastax.oss.driver.internal.core.time.MonotonicTimestampGenerator`: - -``` -Clock skew detected: current tick (...) was ... microseconds behind the last generated timestamp (...), -returned timestamps will be artificially incremented to guarantee monotonicity. -``` - -You can control that message with these options: - -``` -datastax-java-driver.advanced.timestamp-generator { - drift-warning { - # How far in the future timestamps are allowed to drift before the warning is logged. - # If it is undefined or set to 0, warnings are disabled. - threshold = 1 second - # How often the warning will be logged if timestamps keep drifting above the threshold. - interval = 10 seconds - } -} -``` - -This generator strives to achieve microsecond resolution on a best-effort basis. But in practice, -the real accuracy of generated timestamps is largely dependent on the granularity of the operating -system's clock. For most systems, this minimum granularity is millisecond, and the sub-millisecond -part is simply a counter that gets incremented until the next clock tick, as provided by -`System.currentTimeMillis()`. - -On some systems, however, it is possible to have a better granularity by using a [JNR] call to -[gettimeofday]. This native call will be used when available, unless use of the Java clock is forced -with this configuration option: - -``` -datastax-java-driver.advanced.timestamp-generator { - force-java-clock = true -} -``` - -To check what the driver is currently using, turn on `INFO` logs for the category -`com.datastax.oss.driver.internal.core.time`, and look for one of the following messages at -initialization: - -* `Using Java system clock because this was explicitly required in the configuration` -* `Could not access native clock (see debug logs for details), falling back to Java system clock` -* `Using native clock for microsecond precision` - -#### ThreadLocalTimestampGenerator - -``` -datastax-java-driver.advanced.timestamp-generator { - class = ThreadLocalTimestampGenerator -} -``` - -This is similar to the atomic generator, except that it only guarantees monotonicity within each -thread. In other words, if a given application thread invokes `session.execute()` multiple times, -the timestamps will be strictly increasing; but across two or more application threads, there might -be duplicates. - -This is a bit more efficient, but should only be used when threads are not in direct competition for -timestamp ties (i.e., they are executing independent statements). - -It uses the same configuration options `drift-warning` and`force-java-clock`; see the previous -section for details. - -#### ServerSideTimestampGenerator - -``` -datastax-java-driver.advanced.timestamp-generator { - class = ServerSideTimestampGenerator -} -``` - -This implementation always lets the server assign a timestamp. - -#### Custom - -You can create your own generator by implementing [TimestampGenerator], and referencing your -implementation class from the configuration. - -#### Using multiple generators - -The timestamp generator can be overridden in [execution profiles](../configuration/#profiles): - -``` -datastax-java-driver { - advanced.timestamp-generator.class = AtomicTimestampGenerator - profiles { - profile1 { - advanced.timestamp-generator.class = ServerSideTimestampGenerator - } - profile2 {} - } -} -``` - -The `profile1` profile uses its own generator. The `profile2` profile inherits the default -profile's. Note that this goes beyond configuration inheritance: the driver only creates a single -`AtomicTimestampGenerator` instance and reuses it (this also occurs if two sibling profiles have the -same configuration). - -Each request uses its declared profile's generator. If it doesn't declare any profile, or if the -profile doesn't have a dedicated policy, then the default profile's generator is used. - -### Per-statement timestamp - -Finally, you can assign a timestamp to a statement directly from application code: - -```java -Statement statement = - SimpleStatement.builder("UPDATE users SET email = 'x@y.com' where id = 1") - .setQueryTimestamp(1432815430948040L) - .build(); -session.execute(statement); -``` - -### Timestamps and lightweight transactions - -Client-side timestamps are prohibited for [lightweight transactions] \(used for conditional updates -such as `INSERT... IF NOT EXISTS`, `UPDATE... IF...`, etc.). - -If you add a `USING TIMESTAMP` clause to such a query, the server will return an error: - -``` -cqlsh> UPDATE foo USING TIMESTAMP 1234 SET v=1 WHERE k=0 IF v=2; -InvalidRequest: Error from server: code=2200 [Invalid query] message="Cannot provide custom timestamp for conditional updates" -``` - -If you execute a conditional update through the driver with a client-side timestamp generator, the -client-side timestamp will be silently ignored and the server will provide its own. - -### Summary - -Here is the order of precedence of all the methods described so far: - -1. if there is a `USING TIMESTAMP` clause in the CQL string, use that over anything else; -2. otherwise, if a default timestamp was set directly on the statement, use it; -3. otherwise, if the timestamp generator assigned a timestamp, use it; -4. otherwise, let the server assign the timestamp. - -[TimestampGenerator]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/time/TimestampGenerator.html - -[gettimeofday]: http://man7.org/linux/man-pages/man2/settimeofday.2.html -[JNR]: https://github.com/jnr/jnr-posix -[Lightweight transactions]: https://docs.datastax.com/en/dse/6.0/cql/cql/cql_using/useInsertLWT.html -[Statement.setQueryTimestamp()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/Statement.html#setQueryTimestamp-long- diff --git a/manual/core/reactive/README.md b/manual/core/reactive/README.md deleted file mode 100644 index 37a2e3411b8..00000000000 --- a/manual/core/reactive/README.md +++ /dev/null @@ -1,410 +0,0 @@ - - -## Reactive Style Programming - -The driver provides built-in support for reactive queries. The [CqlSession] interface extends -[ReactiveSession], which adds specialized methods to execute requests expressed in [reactive -streams]. - -Notes: - -* Reactive capabilities require the [Reactive Streams API] to be present on the classpath. The - driver has a dependency on that library, but if your application does not use reactive queries at - all, it is possible to exclude it to minimize the number of runtime dependencies. If the library - cannot be found at runtime, reactive queries won't be available, and a warning will be logged, but - the driver will otherwise operate normally (this is also valid for OSGi deployments). -* For historical reasons, reactive-related driver types reside in a package prefixed with `dse`; - however, reactive queries also work with regular Cassandra. -* The reactive execution model is implemented in a non-blocking fashion: see the manual page on - [non-blocking programming](../non_blocking) for details. - -### Overview - -`ReactiveSession` exposes two public methods: - -```java -ReactiveResultSet executeReactive(String query); -ReactiveResultSet executeReactive(Statement statement); -``` - -Both methods return a [ReactiveResultSet], which is the reactive streams version of a regular -[ResultSet]. In other words, a `ReactiveResultSet` is a [Publisher] for query results. - -When subscribing to and consuming from a `ReactiveResultSet`, there are two important caveats to -bear in mind: - -1. By default, all `ReactiveResultSet` implementations returned by the driver are cold, unicast, - single-subscription-only publishers. In other words, they do not support multiple subscribers; - consider caching the results produced by such publishers if you need to consume them by more than - one downstream subscriber. We provide a few examples of caching further in this document. -2. Also, note that reactive result sets may emit items to their subscribers on an internal driver IO - thread. Subscriber implementors are encouraged to abide by [Reactive Streams Specification rule - 2.2] and avoid performing heavy computations or blocking calls inside `onNext` calls, as doing so - could slow down the driver and impact performance. Instead, they should asynchronously dispatch - received signals to their processing logic. - -### Basic usage - -The examples in this page make usage of [Reactor], a popular reactive library, but they should be -easily adaptable to any other library implementing the concepts of reactive streams. - -#### Reading in reactive style - -The following example reads from a table and prints all the returned rows to the console. In case of -error, a `DriverException` is thrown and its stack trace is printed to standard error: - -```java -try (CqlSession session = ...) { - Flux.from(session.executeReactive("SELECT ...")) - .doOnNext(System.out::println) - .blockLast(); -} catch (DriverException e) { - e.printStackTrace(); -} -``` - -#### Writing in reactive style - -The following example inserts rows into a table after printing the queries to the console, stopping -at the first error, if any. Again, in case of error, a `DriverException` is thrown: - -```java -try (CqlSession session = ...) { - Flux.just("INSERT ...", "INSERT ...", "INSERT ...", ...) - .doOnNext(System.out::println) - .flatMap(session::executeReactive) - .blockLast(); -} catch (DriverException e) { - e.printStackTrace(); -} -``` - -Note that when a statement is executed reactively, the actual request is only triggered when the -`ReactiveResultSet` is subscribed to; in other words, when the `executeReactive` method returns, -_nothing has been executed yet_. This is why the write example above uses a `flatMap` operator, -which takes care of subscribing to each `ReactiveResultSet` returned by successive calls to -`session.executeReactive`. A common pitfall is to use an operator that silently ignores the returned -`ReactiveResultSet`; for example, the code below seems correct, but will not execute any query: - -```java -// DON'T DO THIS -Flux.just("INSERT INTO ...") - // The returned ReactiveResultSet is not subscribed to - .doOnNext(session::executeReactive) - .blockLast(); -``` - -Since a write query does not return any rows, it may appear difficult to count the number of rows -written to the database. Hopefully most reactive libraries have operators that are useful in these -scenarios. The following example demonstrates how to achieve this goal with Reactor: - -```java -Flux> stmts = ...; -long count = - stmts - .flatMap( - stmt -> - Flux.from(session.executeReactive(stmt)) - // dummy cast, since result sets are always empty for write queries - .cast(Integer.class) - // flow will always be empty, so '1' will be emitted for each query - .defaultIfEmpty(1)) - .count() - .block(); -System.out.printf("Executed %d write statements%n", count); -``` - -### Accessing query metadata - -`ReactiveResultSet` exposes useful information about request execution and query metadata: - -```java -Publisher getColumnDefinitions(); -Publisher getExecutionInfos(); -Publisher wasApplied(); -``` - -Refer to the javadocs of [getColumnDefinitions], [getExecutionInfos] and [wasApplied] for more -information on these methods. - -To inspect the contents of the above publishers, simply subscribe to them. Note that these -publishers cannot complete before the query itself completes; if the query fails, then these -publishers will fail with the same error. - -The following example executes a query, then prints all the available metadata to the console: - -```java -ReactiveResultSet rs = session.executeReactive("SELECT ..."); -// execute the query first -Flux.from(rs).blockLast(); -// then retrieve query metadata -System.out.println("Column definitions: "); -Mono.from(rs.getColumnDefinitions()).doOnNext(System.out::println).block(); -System.out.println("Execution infos: "); -Flux.from(rs.getExecutionInfos()).doOnNext(System.out::println).blockLast(); -System.out.println("Was applied: "); -Mono.from(rs.wasApplied()).doOnNext(System.out::println).block(); -``` - -Note that it is also possible to inspect query metadata at row level. Each row returned by a -reactive query execution implements [`ReactiveRow`][ReactiveRow], the reactive equivalent of a -[`Row`][Row]. - -`ReactiveRow` exposes the same kind of query metadata and execution info found in -`ReactiveResultSet`, but for each individual row: - -```java -ColumnDefinitions getColumnDefinitions(); -ExecutionInfo getExecutionInfo(); -boolean wasApplied(); -``` - -Refer to the javadocs of [`getColumnDefinitions`][ReactiveRow.getColumnDefinitions], -[`getExecutionInfo`][ReactiveRow.getExecutionInfo] and [`wasApplied`][ReactiveRow.wasApplied] for -more information on these methods. - -The following example executes a query and, for each row returned, prints the coordinator that -served that row, then retrieves all the coordinators that were contacted to fulfill the query and -prints them to the console: - -```java -Iterable coordinators = Flux.from(session.executeReactive("SELECT ...")) - .doOnNext( - row -> - System.out.printf( - "Row %s was obtained from coordinator %s%n", - row, - row.getExecutionInfo().getCoordinator())) - .map(ReactiveRow::getExecutionInfo) - // dedup by coordinator (note: this is dangerous on a large result set) - .groupBy(ExecutionInfo::getCoordinator) - .map(GroupedFlux::key) - .toIterable(); -System.out.println("Contacted coordinators: " + coordinators); -``` - -### Advanced topics - -#### Applying backpressure - -One of the key features of reactive programming is backpressure. - -Unfortunately, the Cassandra native protocol does not offer proper support for exchanging -backpressure information between client and server over the network. Cassandra is able, since -version 3.10, to [throttle clients](https://issues.apache.org/jira/browse/CASSANDRA-9318) but at the -time of writing, there is no proper [client-facing backpressure -mechanism](https://issues.apache.org/jira/browse/CASSANDRA-11380) available. - -When reading from Cassandra, this shouldn't however be a problem for most applications. Indeed, in a -read scenario, Cassandra acts as a producer, and the driver is a consumer; in such a setup, if a -downstream subscriber is not able to cope with the throughput, the driver would progressively adjust -the rate at which it requests more pages from the server, thus effectively regulating the server -throughput to match the subscriber's. The only caveat is if the subscriber is really too slow, which -could eventually trigger a query timeout, be it on the client side (`DriverTimeoutException`), or on -the server side (`ReadTimeoutException`). - -When writing to Cassandra, the lack of backpressure communication between client and server is more -problematic. Indeed in a write scenario, the driver acts as a producer, and Cassandra is a consumer; -in such a setup, if an upstream producer generates too much data, the driver would blindly send the -write statements to the server as quickly as possible, eventually causing the cluster to become -overloaded or even crash. This usually manifests itself with errors like `WriteTimeoutException`, or -`OverloadedException`. - -It is strongly advised for users to limit the concurrency at which write statements are executed in -write-intensive scenarios. A simple way to achieve this is to use the `flatMap` operator, which, in -most reactive libraries, has an overloaded form that takes a parameter that controls the desired -amount of concurrency. The following example executes a flow of statements with a maximum -concurrency of 10, leveraging the `concurrency` parameter of Reactor's `flatMap` operator: - -```java -Flux> stmts = ...; -stmts.flatMap(session::executeReactive, 10).blockLast(); -``` - -In the example above, the `flatMap` operator will subscribe to at most 10 `ReactiveResultSet` -instances simultaneously, effectively limiting the number of concurrent in-flight requests to 10. -This is usually enough to prevent data from being written too fast. More sophisticated operators are -capable of rate-limiting or throttling the execution of a flow; for example, Reactor offers a -`delayElements` operator that rate-limits the throughput of its upstream publisher. Consult the -documentation of your reactive library for more information. - -As a last resort, it is also possible to limit concurrency at driver level, for example using the -driver's built-in [request throttling] mechanism, although this is usually not required in reactive -applications. See "[Managing concurrency in asynchronous query execution]" in the Developer Guide -for a few examples. - -#### Caching query results - -As stated above, a `ReactiveResultSet` can only be subscribed once. This is an intentional design -decision, because otherwise users could inadvertently trigger a spurious execution of the same query -again when subscribing for the second time to the same `ReactiveResultSet`. - -Let's suppose that we want to compute both the average and the sum of all values from a table -column. The most naive approach would be to create two flows and subscribe to both: - - ```java -// DON'T DO THIS -ReactiveResultSet rs = session.executeReactive("SELECT n FROM ..."); -double avg = Flux.from(rs) - .map(row -> row.getLong(0)) - .reduce(0d, (a, b) -> (a + b / 2.0)) - .block(); -// will fail with IllegalStateException -long sum = Flux.from(rs) - .map(row -> row.getLong(0)) - .reduce(0L, (a, b) -> a + b) - .block(); - ``` - -Unfortunately, the second `Flux` above with terminate immediately with an `onError` signal -encapsulating an `IllegalStateException`, since `rs` was already subscribed to. - -To circumvent this limitation, while still avoiding to query the table twice, the easiest technique -consists in using the `cache` operator that most reactive libraries offer: - -```java -Flux rs = Flux.from(session.executeReactive("SELECT n FROM ...")) - .map(row -> row.getLong(0)) - .cache(); -double avg = rs - .reduce(0d, (a, b) -> (a + b / 2.0)) - .block(); -long sum = rs - .reduce(0L, (a, b) -> a + b) - .block(); -``` - -The above example works just fine. - -The `cache` operator will subscribe at most once to the `ReactiveResultSet`, cache the results, and -serve the cached results to downstream subscribers. This is obviously only possible if your result -set is small and can fit entirely in memory. - -If caching is not an option, most reactive libraries also offer operators that multicast their -upstream subscription to many subscribers on the fly. - -The above example could be rewritten with a different approach as follows: - -```java -Flux rs = Flux.from(session.executeReactive("SELECT n FROM ...")) - .map(row -> row.getLong(0)) - .publish() // multicast upstream to all downstream subscribers - .autoConnect(2); // wait until two subscribers subscribe -long sum = rs - .reduce(0L, (a, b) -> a + b) - .block(); -double avg = rs - .reduce(0d, (a, b) -> (a + b / 2.0)) - .block(); -``` - -In the above example, the `publish` operator multicasts every `onNext` signal to all of its -subscribers; and the `autoConnect(2)` operator instructs `publish` to wait until it gets 2 -subscriptions before subscribing to its upstream source (and triggering the actual query execution). - -This approach should be the preferred one for large result sets since it does not involve caching -results in memory. - -#### Resuming from and retrying after failed queries - -When executing a flow of statements, any failed query execution would trigger an `onError` signal -and terminate the subscription immediately, potentially preventing subsequent queries from being -executed at all. - -If this behavior is not desired, it is possible to mimic the behavior of a fail-safe system. This -usually involves the usage of operators such as `onErrorReturn` or `onErrorResume`. Consult your -reactive library documentation to find out which operators allow you to intercept failures. - -The following example executes a flow of statements; for each failed execution, the stack trace is -printed to standard error and, thanks to the `onErrorResume` operator, the error is completely -ignored and the flow execution resumes normally: - -```java -Flux> stmts = ...; -stmts.flatMap( - statement -> - Flux.from(session.executeReactive(statement)) - .doOnError(Throwable::printStackTrace) - .onErrorResume(error -> Mono.empty())) - .blockLast(); -``` - -The following example expands on the previous one: for each failed execution, at most 3 retries are -attempted if the error was an ` UnavailableException`, then, if the query wasn't successful after -retrying, a message is logged. Finally, all the errors are collected and the total number of failed -queries is printed to the console: - -```java -Flux> statements = ...; -long failed = statements.flatMap( - stmt -> - Flux.defer(() -> session.executeReactive(stmt)) - // retry at most 3 times on Unavailable - .retry(3, UnavailableException.class::isInstance) - // handle errors - .doOnError( - error -> { - System.err.println("Statement failed: " + stmt); - error.printStackTrace(); - }) - // Collect errors and discard all returned rows - .ignoreElements() - .cast(Long.class) - .onErrorReturn(1L)) - .sum() - .block(); -System.out.println("Total failed queries: " + failed); -``` - -The example above uses `Flux.defer()` to wrap the call to `session.executeReactive()`. This is -required because, as mentioned above, the driver always creates single-subscription-only publishers. -Such publishers are not compatible with operators like `retry` because these operators sometimes -subscribe more than once to the upstream publisher, thus causing the driver to throw an exception. -Hopefully it's easy to solve this issue, and that's exactly what the `defer` operator is designed -for: each subscription to the `defer` operator triggers a distinct call to -`session.executeReactive()`, thus causing the session to re-execute the query and return a brand-new -publisher at every retry. - -Note that the driver already has a [built-in retry mechanism] that can transparently retry failed -queries; the above example should be seen as a demonstration of application-level retries, when a -more fine-grained control of what should be retried, and how, is required. - -[CqlSession]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/CqlSession.html -[ReactiveSession]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/cql/reactive/ReactiveSession.html -[ResultSet]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/ResultSet.html -[ReactiveResultSet]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/cql/reactive/ReactiveResultSet.html -[ReactiveRow]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/cql/reactive/ReactiveRow.html -[Row]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/Row.html -[getColumnDefinitions]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/cql/reactive/ReactiveResultSet.html#getColumnDefinitions-- -[getExecutionInfos]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/cql/reactive/ReactiveResultSet.html#getExecutionInfos-- -[wasApplied]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/cql/reactive/ReactiveResultSet.html#wasApplied-- -[ReactiveRow.getColumnDefinitions]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/cql/reactive/ReactiveRow.html#getColumnDefinitions-- -[ReactiveRow.getExecutionInfo]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/cql/reactive/ReactiveRow.html#getExecutionInfo-- -[ReactiveRow.wasApplied]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/cql/reactive/ReactiveRow.html#wasApplied-- - -[built-in retry mechanism]: ../retries/ -[request throttling]: ../throttling/ - -[Managing concurrency in asynchronous query execution]: https://docs.datastax.com/en/devapp/doc/devapp/driverManagingConcurrency.html] -[Publisher]: https://www.reactive-streams.org/reactive-streams-1.0.2-javadoc/org/reactivestreams/Publisher.html -[reactive streams]: https://en.wikipedia.org/wiki/Reactive_Streams -[Reactive Streams API]: https://github.com/reactive-streams/reactive-streams-jvm -[Reactive Streams Specification rule 2.2]: https://github.com/reactive-streams/reactive-streams-jvm#2.2 -[Reactor]: https://projectreactor.io/ diff --git a/manual/core/reconnection/README.md b/manual/core/reconnection/README.md deleted file mode 100644 index 3eb6dad9c05..00000000000 --- a/manual/core/reconnection/README.md +++ /dev/null @@ -1,109 +0,0 @@ - - -## Reconnection - -### Quick overview - -When a connection is lost, try to reestablish it at configured intervals. - -* `advanced.reconnection-policy` in the configuration; defaults to exponential backoff, also - available: constant delay, write your own. -* applies to connection pools and the control connection. -* `advanced.reconnect-on-init` (false by default) controls whether the session tries to reconnect - when it is first created - ------ - -### At runtime - -If a running session loses a connection to a node, it tries to re-establish it according to a -configurable policy. This is used in two places: - -* [connection pools](../pooling/): for each node, a session has a fixed-size pool of connections to - execute user requests. If one or more connections drop, a reconnection gets started for the pool; - each attempt tries to reopen the missing number of connections. This goes on until the pool is - back to its expected size; -* [control connection](../control_connection/): a session uses a single connection to an arbitrary - node for administrative requests. If that connection goes down, a reconnection gets started; each - attempt iterates through all active nodes until one of them accepts a connection. This goes on - until we have a control node again. - -The reconnection policy controls the interval between each attempt. It is defined in the -[configuration](../configuration/): - -``` -datastax-java-driver { - advanced.reconnection-policy { - class = ExponentialReconnectionPolicy - base-delay = 1 second - max-delay = 60 seconds - } -} -``` - -[ExponentialReconnectionPolicy] is the default; it starts with a base delay, and then doubles it -after each attempt. [ConstantReconnectionPolicy] uses the same delay every time, regardless of the -previous number of attempts. - -You can also write your own policy; it must implement [ReconnectionPolicy] and declare a public -constructor with a [DriverContext] argument. - -For best results, use reasonable values: very low values (for example a constant delay of 10 -milliseconds) will quickly saturate your system. - -The policy works by creating a *schedule* each time a reconnection starts. These schedules are -independent across reconnection attempts, meaning that each pool will start with a fresh delay even -if other pools are already reconnecting. For example, assuming that the pool size is 3, the policy -is the exponential one with the default values, and the control connection is initially on node1: - -* [t = 0] 2 connections to node2 go down. A reconnection starts for node2's pool, with the next - attempt in 1 second; -* [t = 1] node2's pool tries to open the 2 missing connections. One succeeds but the other fails. - Another attempt is scheduled in 2 seconds; -* [t = 1.2] 1 connection to node3 goes down. A reconnection starts for node3's pool, with the next - attempt in 1 second; -* [t = 1.5] the control connection to node1 goes down. A reconnection starts for the control - connection, with the next attempt in 1 second; -* [t = 2.2], node3's pool tries to open its missing connection, which succeeds. The pool is back to - its expected size, node3's reconnection stops; -* [t = 2.5] the control connection tries to find a new node. It invokes the - [load balancing policy](../load_balancing/) to get a query plan, which happens to start with - node4. The connection succeeds, node4 is now the control node and the reconnection stops; -* [t = 3] node2's pool tries to open the last missing connection, which succeeds. The pool is back - to its expected size, node2's reconnection stops. - -### At init time - -If a session fails to connect when it is first created, the default behavior is to abort and throw -an error immediately. - -If you prefer to retry, you can set the configuration option `advanced.reconnect-on-init` to true. -Instead of failing, the driver will keep attempting to initialize the session at regular intervals, -according to the reconnection policy, until at least one contact point replies. This can be useful -when dealing with containers and microservices. - -Note that the session is not accessible until it is fully ready: the `CqlSessionBuilder.build()` -call — or the future returned by `buildAsync()` — will not complete until the connection -was established. - -[ConstantReconnectionPolicy]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/internal/core/connection/ConstantReconnectionPolicy.html -[DriverContext]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/context/DriverContext.html -[ExponentialReconnectionPolicy]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/internal/core/connection/ExponentialReconnectionPolicy.html -[ReconnectionPolicy]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/connection/ReconnectionPolicy.html diff --git a/manual/core/request_id/README.md b/manual/core/request_id/README.md deleted file mode 100644 index a766a4419af..00000000000 --- a/manual/core/request_id/README.md +++ /dev/null @@ -1,48 +0,0 @@ - - -## Request Id - -### Quick overview - -Users can inject an identifier for each individual CQL request, and such ID can be written in to the [custom payload](https://github.com/apache/cassandra/blob/trunk/doc/native_protocol_v5.spec) to -correlate a request across the driver and the Apache Cassandra server. - -A request ID generator needs to generate both: -- Session request ID: an identifier for an entire session.execute() call -- Node request ID: an identifier for the execution of a CQL statement against a particular node. There can be one or more node requests for a single session request, due to retries or speculative executions. - -Usage: -* Inject ID generator: set the desired `RequestIdGenerator` in `advanced.request-id.generator.class`. -* Add ID to custom payload: the default behavior of a `RequestIdGenerator` is to add the request ID into the custom payload with the key `request-id`. Override `RequestIdGenerator.getDecoratedStatement` to customize the behavior. - -### Request Id Generator Configuration - -Request ID generator can be declared in the [configuration](../configuration/) as follows: - -``` -datastax-java-driver.advanced.request-id.generator { - class = com.example.app.MyGenerator -} -``` - -To register your own request ID generator, specify the name of the class -that implements `RequestIdGenerator`. - -The generated ID will be added to the log message of `CqlRequestHandler`, and propagated to other classes, e.g. the request trackers. \ No newline at end of file diff --git a/manual/core/request_tracker/README.md b/manual/core/request_tracker/README.md deleted file mode 100644 index c135abfe53f..00000000000 --- a/manual/core/request_tracker/README.md +++ /dev/null @@ -1,146 +0,0 @@ - - -## Request tracker - -### Quick overview - -Callback that gets invoked for every request: success or error, globally and for every tried node. - -* `advanced.request-tracker` in the configuration; defaults to none, also available: request logger, - or write your own. -* or programmatically: - [CqlSession.builder().addRequestTracker()][SessionBuilder.addRequestTracker]. - ------ - -The request tracker is a session-wide component that gets notified of the latency and outcome of -every application request. The driver comes with an optional implementation that logs requests. - -### Configuration - -Request trackers can be declared in the [configuration](../configuration/) as follows: - -``` -datastax-java-driver.advanced.request-tracker { - classes = [com.example.app.MyTracker1,com.example.app.MyTracker2] -} -``` - -By default, no tracker is registered. To register your own trackers, specify the name of a class -that implements [RequestTracker]. One such class is the built-in request logger (see the next -section), but you can also create your own implementation. - -Also, trackers registered via configuration will be instantiated with reflection; they must have a -public constructor taking a `DriverContext` argument. - -Sometimes you have a tracker instance already in your code, and need to pass it programmatically -instead of referencing a class. The session builder has a method for that: - -```java -RequestTracker myTracker1 = ...; -RequestTracker myTracker2 = ...; -CqlSession session = CqlSession.builder() - .addRequestTracker(myTracker1) - .addRequestTracker(myTracker2) - .build(); -``` - -The two registration methods (programmatic and via the configuration) can be used simultaneously. - -### Request logger - -The request logger is a built-in implementation that logs every request. It has many options to mark -requests as "slow" above a given threshold, limit the line size for large queries, etc: - -``` -datastax-java-driver.advanced.request-tracker { - classes = [RequestLogger] - - logs { - # Whether to log successful requests. - success.enabled = true - - slow { - # The threshold to classify a successful request as "slow". If this is unset, all - # successful requests will be considered as normal. - threshold = 1 second - - # Whether to log slow requests. - enabled = true - } - - # Whether to log failed requests. - error.enabled = true - - # The maximum length of the query string in the log message. If it is longer than that, it - # will be truncated. - max-query-length = 500 - - # Whether to log bound values in addition to the query string. - show-values = true - - # The maximum length for bound values in the log message. If the formatted representation of - # a value is longer than that, it will be truncated. - max-value-length = 50 - - # The maximum number of bound values to log. If a request has more values, the list of - # values will be truncated. - max-values = 50 - - # Whether to log stack traces for failed queries. If this is disabled, the log will just - # include the exception's string representation (generally the class name and message). - show-stack-traces = true -} -``` - -All requests are logged under the category -`com.datastax.oss.driver.internal.core.tracker.RequestLogger`. - -The prefix of the log will always contain at least: - -``` -s0|274426173 -``` - -Where `s0` is the session name (see the `basic.session-name` configuration option), and `274426173` -is a unique hash code calculated per request, that can be used for correlation with the driver's -debug and trace logs. - - -Successful and slow requests use the `INFO` level: - -``` -INFO c.d.o.d.i.core.tracker.RequestLogger - [s0|274426173][/127.0.0.1:9042] Success (13 ms) [1 values] -SELECT * FROM users WHERE user_id=? [v0=42] - -INFO c.d.o.d.i.core.tracker.RequestLogger - [s0|1883237069][/127.0.0.1:9042] Slow (1.245 s) [1 values] SELECT -* FROM users WHERE user_id=? [v0=42] -``` - -Failed requests use the `ERROR` level: - -``` -ERROR c.d.o.d.i.core.tracker.RequestLogger - [s0|1883237069][/127.0.0.1:9042] Error (179 ms) [1 values] SELECT -all FROM users WHERE user_id=? [v0=42] -com.datastax.oss.driver.api.core.servererrors.InvalidQueryException: Undefined column name all -``` - -[RequestTracker]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/tracker/RequestTracker.html -[SessionBuilder.addRequestTracker]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/SessionBuilder.html#addRequestTracker-com.datastax.oss.driver.api.core.tracker.RequestTracker- diff --git a/manual/core/retries/README.md b/manual/core/retries/README.md deleted file mode 100644 index e92f8e214aa..00000000000 --- a/manual/core/retries/README.md +++ /dev/null @@ -1,270 +0,0 @@ - - -## Retries - -### Quick overview - -What to do when a request failed on a node: retry (same or other node), rethrow, or ignore. - -* `advanced.retry-policy` in the configuration. Default policy retries at most once, in cases that - have a high chance of success; you can also write your own. -* can have per-profile policies. -* only kicks in if the query is [idempotent](../idempotence). - ------ - -When a query fails, it sometimes makes sense to retry it: the error might be temporary, or the query -might work on a different node. The driver uses a *retry policy* to determine when and how to retry. - -### Built-in retry policies - -The driver ships with two retry policies: `DefaultRetryPolicy` –– the default –– and -`ConsistencyDowngradingRetryPolicy`. - -The default retry policy should be preferred in most cases as it only retries when *it is perfectly -safe to do so*, and when *the chances of success are high enough* to warrant a retry. - -`ConsistencyDowngradingRetryPolicy` is provided for cases where the application can tolerate a -temporary degradation of its consistency guarantees. Its general behavior is as follows: if, based -on the information the coordinator returns, retrying the operation with the initially requested -consistency has a chance to succeed, do it. Otherwise, if based on this information, we know that -the initially requested consistency level *cannot be achieved currently*, then: - -* For writes, ignore the exception *if we know the write has been persisted on at least one - replica*. -* For reads, try reading again at a weaker consistency level. - -Keep in mind that this may break invariants! For example, if your application relies on immediate -write visibility by writing and reading at QUORUM only, downgrading a write to ONE could cause that -write to go unnoticed by subsequent reads at QUORUM. Furthermore, this policy doesn't always respect -datacenter locality; for example, it may downgrade LOCAL_QUORUM to ONE, and thus could accidentally -send a write that was intended for the local datacenter to another datacenter. In summary: **only -use this retry policy if you understand the consequences.** - -Since `DefaultRetryPolicy` is already the driver's default retry policy, no special configuration -is required to activate it. To use `ConsistencyDowngradingRetryPolicy` instead, the following -option must be declared in the driver [configuration](../configuration/): - -``` -datastax-java-driver.advanced.retry-policy.class = ConsistencyDowngradingRetryPolicy -``` - -You can also use your own policy by specifying for the above option the fully-qualified name of a -class that implements [RetryPolicy]. - -### Behavior - -The behavior of both policies will be detailed in the sections below. - -The policy has several methods that cover different error cases. Each method returns a -[RetryVerdict]. A retry verdict essentially provides the driver with a [RetryDecision] to indicate -what to do next. There are four possible retry decisions: - -* retry on the same node; -* retry on the next node in the [query plan](../load_balancing/) for this statement; -* rethrow the exception to the user code (from the `session.execute` call, or as a failed future if - using the asynchronous API); -* ignore the exception. That is, mark the request as successful, and return an empty result set. - -#### `onUnavailableVerdict` - -A request reached the coordinator, but there weren't enough live replicas to achieve the requested -consistency level. The coordinator replied with an `UNAVAILABLE` error. - -If the policy rethrows the error, the user code will get an [UnavailableException]. You can inspect -the exception's fields to get the amount of replicas that were known to be *alive* when the error -was triggered, as well as the amount of replicas that where *required* by the requested consistency -level. - -The default policy triggers a maximum of one retry, to the next node in the query plan. The -rationale is that the first coordinator might have been network-isolated from all other nodes -(thinking they're down), but still able to communicate with the client; in that case, retrying on -the same node has almost no chance of success, but moving to the next node might solve the issue. - -`ConsistencyDowngradingRetryPolicy` also triggers a maximum of one retry, but instead of trying the -next node, it will downgrade the initial consistency level, if possible, and retry *the same node*. -Note that if it is not possible to downgrade, this policy will rethrow the exception. For example, -if the original consistency level was QUORUM, and 2 replicas were required to achieve a quorum, but -only one replica is alive, then the query will be retried with consistency ONE. If no replica was -alive however, there is no point in downgrading, and the policy will rethrow. - -#### `onReadTimeoutVerdict` - -A read request reached the coordinator, which initially believed that there were enough live -replicas to process it. But one or several replicas were too slow to answer within the predefined -timeout (`read_request_timeout_in_ms` in `cassandra.yaml`); therefore the coordinator replied to the -client with a `READ_TIMEOUT` error. - -This could be due to a temporary overloading of these replicas, or even that they just failed or -were turned off. During reads, Cassandra doesn't request data from every replica to minimize -internal network traffic; instead, some replicas are only asked for a checksum of the data. A read -timeout may occur even if enough replicas responded to fulfill the consistency level, but only -checksum responses were received (the method's `dataPresent` parameter allow you to check if you're -in that situation). - -If the policy rethrows the error, the user code will get a [ReadTimeoutException] \(do not confuse -this error with [DriverTimeoutException], which happens when the coordinator didn't reply at all to -the client). - -The default policy triggers a maximum of one retry (to the same node), and only if enough replicas -had responded to the read request but data was not retrieved amongst those. That usually means that -enough replicas are alive to satisfy the consistency, but the coordinator picked a dead one for data -retrieval, not having detected that replica as dead yet. The reasoning is that by the time we get -the timeout, the dead replica will likely have been detected as dead and the retry has a high chance -of success. - -`ConsistencyDowngradingRetryPolicy` behaves like the default policy when enough replicas responded. -If not enough replicas responded however, it will attempt to downgrade the initial consistency -level, and retry *the same node*. If it is not possible to downgrade, this policy will rethrow the -exception. - -#### `onWriteTimeoutVerdict` - -This is similar to `onReadTimeout`, but for write operations. The reason reads and writes are -handled separately is because a read is obviously a non mutating operation, whereas a write is -likely to be. If a write times out at the coordinator level, there is no way to know whether the -mutation was applied or not on the non-answering replica. - -If the policy rethrows the error, the user code will get a [WriteTimeoutException]. - -This method is only invoked for [idempotent](../idempotence/) statements. Otherwise, the driver -bypasses the retry policy and always rethrows the error. - -The default policy triggers a maximum of one retry (to the same node), and only for a `BATCH_LOG` -write. The reasoning is that the coordinator tries to write the distributed batch log against a -small subset of nodes in the local datacenter; a timeout usually means that none of these nodes were -alive but the coordinator hadn't detected them as dead yet. By the time we get the timeout, the dead -nodes will likely have been detected as dead, and the retry has a high chance of success. - -`ConsistencyDowngradingRetryPolicy` also triggers a maximum of one retry, but behaves differently: - -* For `SIMPLE` and `BATCH` write types: if at least one replica acknowledged the write, the policy - will assume that the write will be eventually replicated, and decide to ignore the error; in other - words, it will consider that the write already succeeded, albeit with weaker consistency - guarantees: retrying is therefore useless. If no replica acknowledged the write, the policy will - rethrow the error. -* For `UNLOGGED_BATCH` write type: since only part of the batch could have been persisted, the - policy will attempt to downgrade the consistency level and retry *on the same node*. If - downgrading is not possible, the policy will rethrow. -* For `BATCH_LOG` write type: the policy will retry the same node, for the reasons explained above. -* For other write types: the policy will always rethrow. - -#### `onRequestAbortedVerdict` - -The request was aborted before we could get a response from the coordinator. This can happen in two -cases: - -* if the connection was closed due to an external event. This will manifest as a - [ClosedConnectionException] \(network failure) or [HeartbeatException] \(missed - [heartbeat](../pooling/#heartbeat)); -* if there was an unexpected error while decoding the response (this can only be a driver bug). - -This method is only invoked for [idempotent](../idempotence/) statements. Otherwise, the driver -bypasses the retry policy and always rethrows the error. - -Both the default policy and `ConsistencyDowngradingRetryPolicy` retry on the next node if the -connection was closed, and rethrow (assuming a driver bug) in all other cases. - -#### `onErrorResponseVerdict` - -The coordinator replied with an error other than `READ_TIMEOUT`, `WRITE_TIMEOUT` or `UNAVAILABLE`. -Namely, this covers [OverloadedException], [ServerError], [TruncateException], -[ReadFailureException] and [WriteFailureException]. - -This method is only invoked for [idempotent](../idempotence/) statements. Otherwise, the driver -bypasses the retry policy and always rethrows the error. - -Both the default policy and `ConsistencyDowngradingRetryPolicy` rethrow read and write failures, -and retry other errors on the next node. - -### Hard-coded rules - -There are a few cases where retrying is always the right thing to do. These are not covered by -`RetryPolicy`, but instead hard-coded in the driver: - -* **any error before a network write was attempted**: to send a query, the driver selects a node, - borrows a connection from the host's [connection pool](../pooling/), and then writes the message - to the connection. Errors can occur before the write was even attempted, for example if the - connection pool is saturated, or if the node went down right after we borrowed. In those cases, it - is always safe to retry since the request wasn't sent, so the driver will transparently move to - the next node in the query plan. -* **re-preparing a statement**: when the driver executes a prepared statement, it may find out that - the coordinator doesn't know about it, and need to re-prepare it on the fly (this is described in - detail [here](../statements/prepared/)). The query is then retried on the same node. -* **trying to communicate with a node that is bootstrapping**: this is a rare edge case, as in - practice the driver should never try to communicate with a bootstrapping node (the only way is if - it was specified as a contact point). It is again safe to assume that the query was not executed - at all, so the driver moves to the next node. - -Similarly, some errors have no chance of being solved by a retry. They will always be rethrown -directly to the user. These include: - -* [QueryValidationException] and any of its subclasses; -* [FunctionFailureException]; -* [ProtocolError]. - -### Using multiple policies - -The retry policy can be overridden in [execution profiles](../configuration/#profiles): - -``` -datastax-java-driver { - advanced.retry-policy { - class = DefaultRetryPolicy - } - profiles { - custom-retries { - advanced.retry-policy { - class = CustomRetryPolicy - } - } - slow { - request.timeout = 30 seconds - } - } -} -``` - -The `custom-retries` profile uses a dedicated policy. The `slow` profile inherits the default -profile's. Note that this goes beyond configuration inheritance: the driver only creates a single -`DefaultRetryPolicy` instance and reuses it (this also occurs if two sibling profiles have the same -configuration). - -Each request uses its declared profile's policy. If it doesn't declare any profile, or if the -profile doesn't have a dedicated policy, then the default profile's policy is used. - -[AllNodesFailedException]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/AllNodesFailedException.html -[ClosedConnectionException]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/connection/ClosedConnectionException.html -[DriverTimeoutException]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/DriverTimeoutException.html -[FunctionFailureException]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/servererrors/FunctionFailureException.html -[HeartbeatException]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/connection/HeartbeatException.html -[ProtocolError]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/servererrors/ProtocolError.html -[OverloadedException]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/servererrors/OverloadedException.html -[QueryValidationException]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/servererrors/QueryValidationException.html -[ReadFailureException]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/servererrors/ReadFailureException.html -[ReadTimeoutException]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/servererrors/ReadTimeoutException.html -[RetryDecision]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/retry/RetryDecision.html -[RetryPolicy]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/retry/RetryPolicy.html -[RetryVerdict]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/retry/RetryVerdict.html -[ServerError]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/servererrors/ServerError.html -[TruncateException]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/servererrors/TruncateException.html -[UnavailableException]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/servererrors/UnavailableException.html -[WriteFailureException]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/servererrors/WriteFailureException.html -[WriteTimeoutException]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/servererrors/WriteTimeoutException.html diff --git a/manual/core/shaded_jar/README.md b/manual/core/shaded_jar/README.md deleted file mode 100644 index 8e183c0efb5..00000000000 --- a/manual/core/shaded_jar/README.md +++ /dev/null @@ -1,86 +0,0 @@ - - -## Using the shaded JAR - -The default `java-driver-core` JAR depends on a number of [third party -libraries](../integration/#driver-dependencies). This can create conflicts if your application -already uses other versions of those same dependencies. - -To avoid this, we provide an alternative core artifact that shades [Netty](../integration/#netty), -[Jackson](../integration/#jackson) and [ESRI](../integration/#esri). To use it, replace the -dependency to `java-driver-core` by: - -```xml - - org.apache.cassandra - java-driver-core-shaded - ${driver.version} - -``` - -If you also use the query-builder, mapper or some other library that depends on java-driver-core, -you need to remove its dependency to the non-shaded JAR: - -```xml - - org.apache.cassandra - java-driver-core-shaded - ${driver.version} - - - - org.apache.cassandra - java-driver-query-builder - ${driver.version} - - - org.apache.cassandra - java-driver-core - - - -``` - -Notes: - -* the shading process works by moving the libraries under a different package name, and bundling - them directly into the driver JAR. This should be transparent for client applications: the - impacted dependencies are purely internal, their types are not surfaced in the driver's public - API. -* the driver is compatible with all Netty versions in the range `[4.1.7, 4.2.0)` (equal to or higher - than 4.1.7, and lesser than 4.2.0). If you just need a specific version in that range, you can - avoid the need for the shaded JAR by declaring an explicit dependency in your POM: - - ```xml - - org.apache.cassandra - java-driver-core - ${driver.version} - - - - io.netty - netty-handler - 4.1.39.Final - - ``` - - This only works with Netty: for Jackson and ESRI, only the exact version declared in the driver POM - is supported. diff --git a/manual/core/speculative_execution/README.md b/manual/core/speculative_execution/README.md deleted file mode 100644 index 5666d6a1363..00000000000 --- a/manual/core/speculative_execution/README.md +++ /dev/null @@ -1,272 +0,0 @@ - - -## Speculative query execution - -### Quick overview - -Pre-emptively query another node if the current one takes too long to respond. - -* `advanced.speculative-execution-policy` in the configuration. -* disabled by default. Also available: constant delay, or write your own policy. -* can have per-profile policies. -* only kicks in if the query is idempotent. -* creates more traffic: tune your pool and provision your cluster accordingly. - ------ - -Sometimes a Cassandra node might be experiencing difficulties (ex: long GC pause) and take longer -than usual to reply. Queries sent to that node will experience bad latency. - -One thing we can do to improve that is pre-emptively start a second execution of the query against -another node, before the first node has replied or errored out. If that second node replies faster, -we can send the response back to the client. We also cancel the first execution (note that -"cancelling" in this context simply means discarding the response when it arrives later, Cassandra -does not support cancellation of in flight requests): - -```ditaa -client driver exec1 exec2 ---+----------------+--------------+------+--- - | execute(query) | - |--------------->| - | | query node1 - | |------------->| - | | | - | | | - | | query node2 - | |-------------------->| - | | | | - | | | | - | | node2 replies | - | |<--------------------| - | complete | | - |<---------------| | - | | cancel | - | |------------->| -``` - -Or the first node could reply just after the second execution was started. In this case, we cancel -the second execution. In other words, whichever node replies faster "wins" and completes the client -query: - -```ditaa -client driver exec1 exec2 ---+----------------+--------------+------+--- - | execute(query) | - |--------------->| - | | query node1 - | |------------->| - | | | - | | | - | | query node2 - | |-------------------->| - | | | | - | | | | - | | node1 replies| | - | |<-------------| | - | complete | | - |<---------------| | - | | cancel | - | |-------------------->| -``` - -Speculative executions are **disabled** by default. The following sections cover the practical -details and how to enable them. - -### Query idempotence - -If a query is [not idempotent](../idempotence/), the driver will never schedule speculative -executions for it, because there is no way to guarantee that only one node will apply the mutation. - -### Configuration - -Speculative executions are controlled by a policy defined in the [configuration](../configuration/). -The default implementation never schedules an execution: - -``` -datastax-java-driver.advanced.speculative-execution-policy { - class = NoSpeculativeExecutionPolicy -} -``` - -The "constant" policy schedules executions at a fixed delay: - -``` -datastax-java-driver.advanced.speculative-execution-policy { - class = ConstantSpeculativeExecutionPolicy - - # The maximum number of executions (including the initial, non-speculative execution). - # This must be at least one. - max-executions = 3 - - # The delay between each execution. 0 is allowed, and will result in all executions being sent - # simultaneously when the request starts. - # Note that sub-millisecond precision is not supported, any excess precision information will - # be dropped; in particular, delays of less than 1 millisecond are equivalent to 0. - # This must be positive or 0. - delay = 100 milliseconds -} -``` - -Given the above configuration, an idempotent query would be handled this way: - -* start the initial execution at t0; -* if no response has been received at t0 + 100 milliseconds, start a speculative execution on - another node; -* if no response has been received at t0 + 200 milliseconds, start another speculative execution on - a third node; -* past that point, don't query other nodes, just wait for the first response to arrive. - -Finally, you can create your own policy by implementing [SpeculativeExecutionPolicy], and -referencing your implementation class from the configuration. - -### How speculative executions affect retries - -Turning on speculative executions doesn't change the driver's [retry](../retries/) behavior. Each -parallel execution will trigger retries independently: - -```ditaa -client driver exec1 exec2 ---+----------------+--------------+------+--- - | execute(query) | - |--------------->| - | | query node1 - | |------------->| - | | | - | | unavailable | - | |<-------------| - | | - | |retry at lower CL - | |------------->| - | | | - | | query node2 - | |-------------------->| - | | | | - | | server error | - | |<--------------------| - | | | - | | retry on node3 - | |-------------------->| - | | | | - | | node1 replies| | - | |<-------------| | - | complete | | - |<---------------| | - | | cancel | - | |-------------------->| -``` - -The only impact is that all executions of the same query always share the same query plan, so each -node will be used by at most one execution. - -### Tuning and practical details - -The goal of speculative executions is to improve overall latency (the time between `execute(query)` -and `complete` in the diagrams above) at high percentiles. On the flip side, too many speculative -executions increase the pressure on the cluster. - -If you use speculative executions to avoid unhealthy nodes, a good-behaving node should rarely hit -the threshold. We recommend running a benchmark on a healthy platform (all nodes up and healthy) and -monitoring the request percentiles with the `cql-requests` [metric](../metrics/). Then use the -latency at a high percentile (for example p99.9) as the threshold. - -Alternatively, maybe low latency is your absolute priority, and you are willing to take the -increased throughput as a tradeoff. In that case, set the threshold to 0 and provision your cluster -accordingly. - -You can monitor the number of speculative executions triggered by each node with the -`speculative-executions` [metric](../metrics/). - -#### Stream id exhaustion - -One side-effect of speculative executions is that many requests get cancelled, which can lead to a -phenomenon called *stream id exhaustion*: each TCP connection can handle multiple simultaneous -requests, identified by a unique number called *stream id* (see also the [pooling](../pooling/) -section). When a request gets cancelled, we can't reuse its stream id immediately because we might -still receive a response from the server later. If this happens often, the number of available -stream ids diminishes over time, and when it goes below a given threshold we close the connection -and create a new one. If requests are often cancelled, you will see connections being recycled at a -high rate. - -The best way to monitor this is to compare the `pool.orphaned-streams` [metric](../metrics/) to the -total number of available stream ids (which can be computed from the configuration: -`pool.local.size * max-requests-per-connection`). The `pool.available-streams` and `pool.in-flight` -metrics will also give you an idea of how many stream ids are left for active queries. - -#### Request ordering - -Note: ordering issues are only a problem with [server-side timestamps](../query_timestamps/), which -are not the default anymore in driver 4+. So unless you've explicitly enabled -`ServerSideTimestampGenerator`, you can skip this section. - -Suppose you run the following query with speculative executions and server-side timestamps enabled: - - insert into my_table (k, v) values (1, 1); - -The first execution is a bit too slow, so a second execution gets triggered. Finally, the first -execution completes, so the client code gets back an acknowledgement, and the second execution is -cancelled. However, cancelling only means that the driver stops waiting for the server's response, -the request could still be "on the wire"; let's assume that this is the case. - -Now you run the following query, which completes successfully: - - delete from my_table where k = 1; - -But now the second execution of the first query finally reaches its target node, which applies the -mutation. The row that you've just deleted is back! - -The workaround is to either specify a timestamp in your CQL queries: - - insert into my_table (k, v) values (1, 1) USING TIMESTAMP 1432764000; - -Or use a client-side [timestamp generator](../query_timestamps/). - -### Using multiple policies - -The speculative execution policy can be overridden in [execution -profiles](../configuration/#profiles): - -``` -datastax-java-driver { - advanced.speculative-execution-policy { - class = ConstantSpeculativeExecutionPolicy - max-executions = 3 - delay = 100 milliseconds - } - profiles { - oltp { - basic.request.timeout = 100 milliseconds - } - olap { - basic.request.timeout = 30 seconds - advanced.speculative-execution-policy.class = NoSpeculativeExecutionPolicy - } - } -} -``` - -The `olap` profile uses its own policy. The `oltp` profile inherits the default profile's. Note that -this goes beyond configuration inheritance: the driver only creates a single -`ConstantSpeculativeExecutionPolicy` instance and reuses it (this also occurs if two sibling -profiles have the same configuration). - -Each request uses its declared profile's policy. If it doesn't declare any profile, or if the -profile doesn't have a dedicated policy, then the default profile's policy is used. - -[SpeculativeExecutionPolicy]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/specex/SpeculativeExecutionPolicy.html diff --git a/manual/core/ssl/README.md b/manual/core/ssl/README.md deleted file mode 100644 index 913c7bc6c9a..00000000000 --- a/manual/core/ssl/README.md +++ /dev/null @@ -1,236 +0,0 @@ - - -## SSL - -### Quick overview - -Secure the traffic between the driver and Cassandra. - -* `advanced.ssl-engine-factory` in the configuration; defaults to none, also available: - config-based, or write your own. -* or programmatically: - [CqlSession.builder().withSslEngineFactory()][SessionBuilder.withSslEngineFactory] or - [CqlSession.builder().withSslContext()][SessionBuilder.withSslContext]. - ------ - -There are two aspects to SSL: - -* **client-to-node encryption**, where the traffic is encrypted, and the client verifies the - identity of the Cassandra nodes it connects to; -* optionally, **client certificate authentication**, where Cassandra nodes also verify the identity - of the client. - -This section describes the driver-side configuration; it assumes that you've already configured SSL -in Cassandra: - -* [the Cassandra documentation][dsClientToNode] covers a basic approach with self-signed - certificates, which is fine for development and tests. -* [this blog post][pickle] details a more advanced solution based on a Certificate Authority (CA). - -### Preparing the certificates - -#### Client truststore - -This is required for client-to-node encryption. - -If you're using self-signed certificates, you need to export the public part of each node's -certificate from that node's keystore: - -``` -keytool -export -alias cassandra -file cassandranode0.cer -keystore .keystore -``` - -Then add all public certificates to the client truststore: - -``` -keytool -import -v -trustcacerts -alias -file cassandranode0.cer -keystore client.truststore -keytool -import -v -trustcacerts -alias -file cassandranode1.cer -keystore client.truststore -... -``` - -If you're using a Certificate Authority, the client truststore only needs to contain the CA's -certificate: - -``` -keytool -import -v -trustcacerts -alias CARoot -file ca.cer -keystore client.truststore -``` - -#### Client keystore - -If you also intend to use client certificate authentication, generate the public and private key -pair for the client: - -``` -keytool -genkey -keyalg RSA -alias client -keystore client.keystore -``` - -If you're using self-signed certificates, extract the public part of the client certificate, and -import it in the truststore of each Cassandra node: - -``` -keytool -export -alias client -file client.cer -keystore client.keystore -keytool -import -v -trustcacerts -alias client -file client.cer -keystore server.truststore -``` - -If you're using a CA, sign the client certificate with it (see the blog post linked at the top of -this page). Then the nodes' truststores only need to contain the CA's certificate (which should -already be the case if you've followed the steps for inter-node encryption). - -`DefaultSslEngineFactory` supports client keystore reloading; see property -`advanced.ssl-engine-factory.keystore-reload-interval`. - -### Driver configuration - -By default, the driver's SSL support is based on the JDK's built-in implementation: JSSE (Java -Secure Socket Extension). - -To enable it, you need to define an engine factory in the [configuration](../configuration/). - -#### JSSE, property-based - -``` -datastax-java-driver { - advanced.ssl-engine-factory { - class = DefaultSslEngineFactory - - # This property is optional. If it is not present, the driver won't explicitly enable cipher - # suites on the engine, which according to the JDK documentations results in "a minimum quality - # of service". - // cipher-suites = [ "TLS_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_256_CBC_SHA" ] - - # Whether or not to require validation that the hostname of the server certificate's common - # name matches the hostname of the server being connected to. If not set, defaults to true. - // hostname-validation = true - - # The locations and passwords used to access truststore and keystore contents. - # These properties are optional. If either truststore-path or keystore-path are specified, - # the driver builds an SSLContext from these files. If neither option is specified, the - # default SSLContext is used, which is based on system property configuration. - // truststore-path = /path/to/client.truststore - // truststore-password = password123 - // keystore-path = /path/to/client.keystore - // keystore-password = password123 - - # The duration between attempts to reload the keystore from the contents of the file specified - # by `keystore-path`. This is mainly relevant in environments where certificates have short - # lifetimes and applications are restarted infrequently, since an expired client certificate - # will prevent new connections from being established until the application is restarted. - // keystore-reload-interval = 30 minutes - } -} -``` - -Alternatively to storing keystore and truststore information in your configuration, you can instead -use [JSSE system properties]: - -``` --Djavax.net.ssl.trustStore=/path/to/client.truststore --Djavax.net.ssl.trustStorePassword=password123 -# If you're using client authentication: --Djavax.net.ssl.keyStore=/path/to/client.keystore --Djavax.net.ssl.keyStorePassword=password123 -``` - -#### JSSE, custom factory - -If you need more control than what system properties allow, you need to write your own engine -factory. If you just need specific configuration on the `SSLEngine`, you can extend the default -factory and override `newSslEngine`. For example, here is how you would configure custom -`AlgorithmConstraints`: - -```java -public class CustomSslEngineFactory extends DefaultSslEngineFactory { - - public CustomSslEngineFactory(DriverContext context) { - super(context); - } - - @Override - public SSLEngine newSslEngine(SocketAddress remoteEndpoint) { - SSLEngine engine = super.newSslEngine(remoteEndpoint); - SSLParameters parameters = engine.getSSLParameters(); - parameters.setAlgorithmConstraints(...); - engine.setSSLParameters(parameters); - return engine; - } -} -``` - -Then declare your custom implementation in the configuration: - -``` -datastax-java-driver { - advanced.ssl-engine-factory { - class = com.mycompany.CustomSslEngineFactory - } -} -``` - -#### JSSE, programmatic - -You can also provide a factory instance programmatically. This will take precedence over the -configuration: - -```java -SslEngineFactory yourFactory = ... -CqlSession session = CqlSession.builder() - .withSslEngineFactory(yourFactory) - .build(); -``` - -If you are reusing code that configures SSL programmatically, you can use -[ProgrammaticSslEngineFactory] as an easy way to wrap that into a factory instance: - -```java -SSLContext sslContext = ... -String[] cipherSuites = ... -boolean requireHostNameValidation = ... -CqlSession session = - CqlSession.builder() - .withSslEngineFactory( - new ProgrammaticSslEngineFactory( - sslContext, cipherSuites, requireHostNameValidation)) - .build(); -``` - -Finally, there is a convenient shortcut on the session builder if you just need to pass an -`SSLContext`: - -```java -SSLContext sslContext = ... -CqlSession session = CqlSession.builder() - .withSslContext(sslContext) - .build(); -``` - -#### Netty-tcnative - -Netty supports native integration with OpenSSL / boringssl. The driver does not provide this out of -the box, but with a bit of custom development it is fairly easy to add. See -[SslHandlerFactory](../../developer/netty_pipeline/#ssl-handler-factory) in the developer docs. - - -[dsClientToNode]: https://docs.datastax.com/en/cassandra/3.0/cassandra/configuration/secureSSLClientToNode.html -[pickle]: http://thelastpickle.com/blog/2015/09/30/hardening-cassandra-step-by-step-part-1-server-to-server.html -[JSSE system properties]: http://docs.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#Customization -[SessionBuilder.withSslEngineFactory]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/SessionBuilder.html#withSslEngineFactory-com.datastax.oss.driver.api.core.ssl.SslEngineFactory- -[SessionBuilder.withSslContext]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/SessionBuilder.html#withSslContext-javax.net.ssl.SSLContext- -[ProgrammaticSslEngineFactory]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/ssl/ProgrammaticSslEngineFactory.html diff --git a/manual/core/statements/.nav b/manual/core/statements/.nav deleted file mode 100644 index 0c001e3860e..00000000000 --- a/manual/core/statements/.nav +++ /dev/null @@ -1,3 +0,0 @@ -simple -prepared -batch \ No newline at end of file diff --git a/manual/core/statements/README.md b/manual/core/statements/README.md deleted file mode 100644 index 394e81ae00e..00000000000 --- a/manual/core/statements/README.md +++ /dev/null @@ -1,84 +0,0 @@ - - -## Statements - -### Quick overview - -What you pass to `session.execute()`. - -* three types: simple (textual query), bound (prepared) and batch. -* built-in implementations are **immutable**. Setters always return a new object, don't ignore the - result. - ------ - -To execute a CQL query, you create a [Statement] instance and pass it to -[Session#execute][execute] or [Session#executeAsync][executeAsync]. The driver provides various -implementations: - -* [SimpleStatement](simple/): a simple implementation built directly from a character string. - Typically used for queries that are executed only once or a few times. -* [BoundStatement (from PreparedStatement)](prepared/): obtained by binding values to a prepared - query. Typically used for queries that are executed often, with different values. -* [BatchStatement](batch/): a statement that groups multiple statements to be executed as a batch. - -All statement types share a [common set of execution attributes][StatementBuilder], that can be set -through either setters or a builder: - -* [execution profile](../configuration/) name, or the profile itself if it's been built dynamically. -* [idempotent flag](../idempotence/). -* [tracing flag](../tracing/). -* [query timestamp](../query_timestamps/). -* [page size and paging state](../paging/). -* [per-query keyspace](per_query_keyspace/) (Cassandra 4 or above). -* [token-aware routing](../load_balancing/#token-aware) information (keyspace and key/token). -* normal and serial consistency level. -* query timeout. -* custom payload to send arbitrary key/value pairs with the request (you should only need this if - you have a custom query handler on the server). - -When setting these attributes, keep in mind that statements are **immutable**, and every method -returns a different instance: - -```java -SimpleStatement statement = - SimpleStatement.newInstance("SELECT release_version FROM system.local"); - -// Won't work: statement isn't modified in place -statement.setConfigProfileName("oltp"); -statement.setIdempotent(true); - -// Instead, reassign the statement every time: -statement = statement.setConfigProfileName("oltp").setIdempotent(true); -``` - -All of these mutating methods are annotated with `@CheckReturnValue`. Some code analysis tools -- -such as [ErrorProne](https://errorprone.info/) -- can check correct usage at build time, and report -mistakes as compiler errors. - -Note that some attributes can either be set programmatically, or inherit a default value defined in -the [configuration](../configuration/). Namely, these are: idempotent flag, query timeout, -consistency levels and page size. We recommended the configuration approach whenever possible (you -can create execution profiles to capture common combinations of those options). - -[Statement]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/Statement.html -[StatementBuilder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/StatementBuilder.html -[execute]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/Session.html#execute-com.datastax.oss.driver.api.core.cql.Statement- -[executeAsync]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/Session.html#executeAsync-com.datastax.oss.driver.api.core.cql.Statement- diff --git a/manual/core/statements/batch/README.md b/manual/core/statements/batch/README.md deleted file mode 100644 index f080fe16ab0..00000000000 --- a/manual/core/statements/batch/README.md +++ /dev/null @@ -1,87 +0,0 @@ - - -## Batch statements - -### Quick overview - -Group a set of statements into an atomic operation. - -* create with [BatchStatement.newInstance()] or [BatchStatement.builder()]. -* built-in implementation is **immutable**. Setters always return a new object, don't ignore the - result. - ------ - -Use [BatchStatement] to execute a set of queries as an atomic operation (refer to -[Batching inserts, updates and deletes][batch_dse] to understand how to use batching effectively): - -```java -PreparedStatement preparedInsertExpense = - session.prepare( - "INSERT INTO cyclist_expenses (cyclist_name, expense_id, amount, description, paid) " - + "VALUES (:name, :id, :amount, :description, :paid)"); -SimpleStatement simpleInsertBalance = - SimpleStatement.newInstance( - "INSERT INTO cyclist_expenses (cyclist_name, balance) VALUES (?, 0) IF NOT EXISTS", - "Vera ADRIAN"); - -BatchStatement batch = - BatchStatement.newInstance( - DefaultBatchType.LOGGED, - simpleInsertBalance, - preparedInsertExpense.bind("Vera ADRIAN", 1, 7.95f, "Breakfast", false)); - -session.execute(batch); -``` - -To create a new batch statement, use one of the static factory methods (as demonstrated above), or a -builder: - -```java -BatchStatement batch = - BatchStatement.builder(DefaultBatchType.LOGGED) - .addStatement(simpleInsertBalance) - .addStatement(preparedInsertExpense.bind("Vera ADRIAN", 1, 7.95f, "Breakfast", false)) - .build(); -``` - -Keep in mind that batch statements are **immutable**, and every method returns a different instance: - -```java -// Won't work: the object is not modified in place: -batch.setExecutionProfileName("oltp"); - -// Instead, reassign the statement every time: -batch = batch.setExecutionProfileName("oltp"); -``` - -As shown in the examples above, batches can contain any combination of simple statements and bound -statements. A given batch can contain at most 65536 statements. Past this limit, addition methods -throw an `IllegalStateException`. - -In addition, simple statements with named parameters are currently not supported in batches (this is -due to a [protocol limitation][CASSANDRA-10246] that will be fixed in a future version). If you try -to execute such a batch, an `IllegalArgumentException` is thrown. - -[BatchStatement]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/BatchStatement.html -[BatchStatement.newInstance()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/BatchStatement.html#newInstance-com.datastax.oss.driver.api.core.cql.BatchType- -[BatchStatement.builder()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/BatchStatement.html#builder-com.datastax.oss.driver.api.core.cql.BatchType- -[batch_dse]: http://docs.datastax.com/en/dse/6.7/cql/cql/cql_using/useBatch.html -[CASSANDRA-10246]: https://issues.apache.org/jira/browse/CASSANDRA-10246 diff --git a/manual/core/statements/per_query_keyspace/README.md b/manual/core/statements/per_query_keyspace/README.md deleted file mode 100644 index 9a7ffa338c9..00000000000 --- a/manual/core/statements/per_query_keyspace/README.md +++ /dev/null @@ -1,148 +0,0 @@ - - -## Per-query keyspace - -### Quick overview - -Specify the keyspace separately instead of hardcoding it in the query string. - -* Cassandra 4+ / DSE 6+. -* only works with simple statements. - ------ - -Sometimes it is convenient to send the keyspace separately from the query string, and without -switching the whole session to that keyspace either. For example, you might have a multi-tenant -setup where identical requests are executed against different keyspaces. - -**This feature is only available with Cassandra 4.0 or above** ([CASSANDRA-10145]). Make sure you -are using [native protocol](../../native_protocol/) v5 or above to connect. - -If you try against an older version, you will get an error: - -``` -Exception in thread "main" java.lang.IllegalArgumentException: Can't use per-request keyspace with protocol V4 -``` - -*Note: at the time of writing, Cassandra 4 is not released yet. If you want to test those examples -against the development version, keep in mind that native protocol v5 is still in beta, so you'll -need to force it in the configuration: `datastax-java-driver.protocol.version = V5`*. - -### Basic usage - -To use a per-query keyspace, set it on your statement instance: - -```java -CqlSession session = CqlSession.builder().build(); -CqlIdentifier keyspace = CqlIdentifier.fromCql("test"); -SimpleStatement statement = - SimpleStatement.newInstance("SELECT * FROM foo WHERE k = 1").setKeyspace(keyspace); -session.execute(statement); -``` - -You can do this on [simple](../simple/), [prepared](../prepared) or [batch](../batch/) statements. - -If the session is connected to another keyspace, the per-query keyspace takes precedence: - -```java -CqlIdentifier keyspace1 = CqlIdentifier.fromCql("test1"); -CqlIdentifier keyspace2 = CqlIdentifier.fromCql("test2"); - -CqlSession session = CqlSession.builder().withKeyspace(keyspace1).build(); - -// Will query test2.foo: -SimpleStatement statement = - SimpleStatement.newInstance("SELECT * FROM foo WHERE k = 1").setKeyspace(keyspace2); -session.execute(statement); -``` - -On the other hand, if a keyspace is hard-coded in the query, it takes precedence over the per-query -keyspace: - -```java -// Will query test1.foo: -SimpleStatement statement = - SimpleStatement.newInstance("SELECT * FROM test1.foo WHERE k = 1").setKeyspace(keyspace2); -``` - -### Bound statements - -Bound statements can't have a per-query keyspace; they only inherit the one that was set on the -prepared statement: - -```java -CqlIdentifier keyspace = CqlIdentifier.fromCql("test"); -PreparedStatement pst = - session.prepare( - SimpleStatement.newInstance("SELECT * FROM foo WHERE k = ?").setKeyspace(keyspace)); - -// Will query test.foo: -BoundStatement bs = pst.bind(1); -``` - -The rationale is that prepared statements hold metadata about the target table; if Cassandra allowed -execution against different keyspaces, it would be under the assumption that all tables have the -same exact schema, which could create issues if this turned out not to be true at runtime. - -Therefore you'll have to prepare against every target keyspace. A good strategy is to do this lazily -with a cache. Here is a simple example using Guava: - -```java -import com.google.common.cache.CacheBuilder; -import com.google.common.cache.CacheLoader; -import com.google.common.cache.LoadingCache; - -LoadingCache cache = - CacheBuilder.newBuilder() - .build( - new CacheLoader() { - @Override - public PreparedStatement load(CqlIdentifier keyspace) throws Exception { - return session.prepare( - SimpleStatement.newInstance("SELECT * FROM foo WHERE k = ?") - .setKeyspace(keyspace)); - } - }); -CqlIdentifier keyspace = CqlIdentifier.fromCql("test"); -BoundStatement bs = cache.get(keyspace).bind(1); -``` - -### Relation to the routing keyspace - -Statements have another keyspace-related method: `Statement.setRoutingKeyspace()`. However, the -routing keyspace is only used for [token-aware routing], as a hint to help the driver send requests -to the best replica. It does not affect the query string itself. - -If you are using a per-query keyspace, the routing keyspace becomes obsolete: the driver will use -the per-query keyspace as the routing keyspace. - -```java -SimpleStatement statement = - SimpleStatement.newInstance("SELECT * FROM foo WHERE k = 1") - .setKeyspace(keyspace) - .setRoutingKeyspace(keyspace); // NOT NEEDED: will be ignored -``` - -At some point in the future, when Cassandra 4 becomes prevalent and using a per-query keyspace is -the norm, we'll probably deprecate `setRoutingKeyspace()`. - -[token-aware routing]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/Request.html#getRoutingKey-- - -[CASSANDRA-10145]: https://issues.apache.org/jira/browse/CASSANDRA-10145 diff --git a/manual/core/statements/prepared/README.md b/manual/core/statements/prepared/README.md deleted file mode 100644 index 5a87b238cbc..00000000000 --- a/manual/core/statements/prepared/README.md +++ /dev/null @@ -1,358 +0,0 @@ - - -## Prepared statements - -### Quick overview - -Prepare a query string once, reuse with different values. More efficient than simple statements for -queries that are used often. - -* create the prepared statement with `session.prepare()`, call [bind()][PreparedStatement.bind] or - [boundStatementBuilder()][PreparedStatement.boundStatementBuilder] on it to create executable - statements. -* the session has a built-in cache, it's OK to prepare the same string twice. -* values: `?` or `:name`, fill with `setXxx(int, ...)` or `setXxx(String, ...)` respectively. -* some values can be left unset with Cassandra 2.2+ / DSE 5+. -* built-in implementation is **immutable**. Setters always return a new object, don't ignore the - result. - ------ - -Use prepared statements for queries that are executed multiple times in your application: - -```java -PreparedStatement prepared = session.prepare( - "insert into product (sku, description) values (?, ?)"); - -BoundStatement bound = prepared.bind("234827", "Mouse"); -session.execute(bound); -``` - -When you prepare the statement, Cassandra parses the query string, caches the result and returns a -unique identifier (the `PreparedStatement` object keeps an internal reference to that identifier): - -```ditaa -client driver Cassandra ---+------------------------+----------------+------ - | | | - | session.prepare(query) | | - |----------------------->| | - | | PREPARE(query) | - | |--------------->| - | | | - | | | - | | | - compute id - | | | - parse query string - | | | - cache (id, parsed) - | | | - | | PREPARED(id) | - | |<---------------| - | PreparedStatement(id) | | - |<-----------------------| | -``` - -When you bind and execute a prepared statement, the driver only sends the identifier, which allows -Cassandra to skip the parsing phase: - -```ditaa -client driver Cassandra ---+---------------------------------+---------------------+------ - | | | - | session.execute(BoundStatement) | | - |-------------------------------->| | - | | EXECUTE(id, values) | - | |-------------------->| - | | | - | | | - | | | - get cache(id) - | | | - execute query - | | | - | | ROWS | - | |<--------------------| - | | | - |<--------------------------------| | -``` - -### Advantages of prepared statements - -Beyond saving a bit of parsing overhead on the server, prepared statements have other advantages; -the `PREPARED` response also contains useful metadata about the CQL query: - -* information about the result set that will be produced when the statement gets executed. The - driver caches this, so that the server doesn't need to include it with every response. This saves - a bit of bandwidth, and the resources it would take to decode it every time. -* the CQL types of the bound variables. This allows bound statements' `set` methods to perform - better checks, and fail fast (without a server round-trip) if the types are wrong. -* which bound variables are part of the partition key. This allows bound statements to automatically - compute their [routing key](../../load_balancing/#token-aware). -* more optimizations might get added in the future. For example, [CASSANDRA-10813] suggests adding - an "[idempotent](../../idempotence)" flag to the response. - -If you have a unique query that is executed only once, a [simple statement](../simple/) will be more -efficient. But note that this should be pretty rare: most client applications typically repeat the -same queries over and over, and a parameterized version can be extracted and prepared. - -### Preparing - -`Session.prepare()` accepts either a plain query string, or a `SimpleStatement` object. If you use a -`SimpleStatement`, its execution parameters will propagate to bound statements: - -```java -SimpleStatement simpleStatement = - SimpleStatement.builder("SELECT * FROM product WHERE sku = ?") - .setConsistencyLevel(DefaultConsistencyLevel.QUORUM) - .build(); -PreparedStatement preparedStatement = session.prepare(simpleStatement); -BoundStatement boundStatement = preparedStatement.bind(); -assert boundStatement.getConsistencyLevel() == DefaultConsistencyLevel.QUORUM; -``` - -For more details, including the complete list of attributes that are copied, refer to -[API docs][Session.prepare]. - -The driver caches prepared statements: if you call `prepare()` multiple times with the same query -string (or a `SimpleStatement` with the same execution parameters), you will get the same -`PreparedStatement` instance: - -```java -PreparedStatement ps1 = session.prepare("SELECT * FROM product WHERE sku = ?"); -// The second call hits the cache, nothing is sent to the server: -PreparedStatement ps2 = session.prepare("SELECT * FROM product WHERE sku = ?"); -assert ps1 == ps2; -``` - -We still recommend avoiding repeated calls to `prepare()`; if that's not possible (e.g. if query -strings are generated dynamically), there will just be a small performance overhead to check the -cache on every call. - -Note that caching is based on: - -* the query string exactly as you provided it: the driver does not perform any kind of trimming or - sanitizing. -* all other execution parameters: for example, preparing two statements with identical query strings - but different consistency levels will yield two distinct prepared statements (that each produce - bound statements with their respective consistency level). - -The size of the cache is exposed as a session-level [metric](../../metrics/) -`cql-prepared-cache-size`. The cache uses [weak values]([guava eviction]) eviction, so this -represents the number of `PreparedStatement` instances that your application has created, and is -still holding a reference to. - -### Parameters and binding - -The prepared query string will usually contain placeholders, which can be either anonymous or named: - -```java -ps1 = session.prepare("insert into product (sku, description) values (?, ?)"); -ps2 = session.prepare("insert into product (sku, description) values (:s, :d)"); -``` - -To turn the statement into its executable form, you need to *bind* it in order to create a -[BoundStatement]. As shown previously, there is a shorthand to provide the parameters in the same -call: - -```java -BoundStatement bound = ps1.bind("324378", "LCD screen"); -``` - -You can also bind first, then use setters, which is slightly more explicit. Bound statements are -**immutable**, so each method returns a new instance; make sure you don't accidentally discard the -result: - -```java -// Positional setters: -BoundStatement bound = ps1.bind() - .setString(0, "324378") - .setString(1, "LCD screen"); - -// Named setters: -BoundStatement bound = ps2.bind() - .setString("s", "324378") - .setString("d", "LCD screen"); -``` - -Finally, you can use a builder to avoid creating intermediary instances, especially if you have a -lot of methods to call: - -```java -BoundStatement bound = - ps1 - .boundStatementBuilder() - .setString(0, "324378") - .setString(1, "LCD screen") - .setExecutionProfileName("oltp") - .setQueryTimestamp(123456789L) - .build(); -``` - -You can use named setters even if the query uses anonymous parameters; Cassandra names the -parameters after the column they apply to: - -```java -BoundStatement bound = ps1.bind() - .setString("sku", "324378") - .setString("description", "LCD screen"); -``` - -This can be ambiguous if the query uses the same column multiple times, like in `select * from sales -where sku = ? and date > ? and date < ?`. In these situations, use positional setters or named -parameters. - -#### Unset values - -With [native protocol](../../native_protocol/) V3, all variables must be bound. With native protocol -V4 (Cassandra 2.2 / DSE 5) or above, variables can be left unset, in which case they will be ignored -(no tombstones will be generated). If you're reusing a bound statement, you can use the `unset` -method to unset variables that were previously set: - -```java -BoundStatement bound = ps1.bind() - .setString("sku", "324378") - .setString("description", "LCD screen"); - -// Named: -bound = bound.unset("description"); - -// Positional: -bound = bound.unset(1); -``` - -A bound statement also has getters to retrieve the values. Note that this has a small performance -overhead, since values are stored in their serialized form. - -Since bound statements are immutable, they are safe to reuse across threads and asynchronous -executions. - - -### How the driver prepares - -Cassandra does not replicate prepared statements across the cluster. It is the driver's -responsibility to ensure that each node's cache is up to date. It uses a number of strategies to -achieve this: - -1. When a statement is initially prepared, it is first sent to a single node in the cluster (this - avoids hitting all nodes in case the query string is wrong). Once that node replies - successfully, the driver re-prepares on all remaining nodes: - - ```ditaa - client driver node1 node2 node3 - --+------------------------+----------------+--------------+------+--- - | | | | | - | session.prepare(query) | | | | - |----------------------->| | | | - | | PREPARE(query) | | | - | |--------------->| | | - | | | | | - | | PREPARED(id) | | | - | |<---------------| | | - | | | | | - | | | | | - | | PREPARE(query) | | - | |------------------------------>| | - | | | | | - | | PREPARE(query) | | - | |------------------------------------->| - | | | | | - |<-----------------------| | | | - ``` - - The prepared statement identifier is deterministic (it's a hash of the query string), so it is - the same for all nodes. - -2. if a node crashes, it might lose all of its prepared statements (this depends on the version: - since Cassandra 3.10, prepared statements are stored in a table, and the node is able to - reprepare on its own when it restarts). So the driver keeps a client-side cache; anytime a node - is marked back up, the driver re-prepares all statements on it; - -3. finally, if the driver tries to execute a statement and finds out that the coordinator doesn't - know about it, it will re-prepare the statement on the fly (this is transparent for the client, - but will cost two extra roundtrips): - - ```ditaa - client driver node1 - --+-------------------------------+------------------------------+-- - | | | - |session.execute(boundStatement)| | - +------------------------------>| | - | | EXECUTE(id, values) | - | |----------------------------->| - | | | - | | UNPREPARED | - | |<-----------------------------| - | | | - | | | - | | PREPARE(query) | - | |----------------------------->| - | | | - | | PREPARED(id) | - | |<-----------------------------| - | | | - | | | - | | EXECUTE(id, values) | - | |----------------------------->| - | | | - | | ROWS | - | |<-----------------------------| - | | | - |<------------------------------| | - ``` - -You can customize these strategies through the [configuration](../../configuration/): - -* `datastax-java-driver.advanced.prepared-statements.prepare-on-all-nodes` controls whether - statements are initially re-prepared on other hosts (step 1 above); -* `datastax-java-driver.advanced.prepared-statements.reprepare-on-up` controls how statements are - re-prepared on a node that comes back up (step 2 above). - -Read the [reference configuration](../../configuration/reference/) for a detailed description of each -of those options. - -### Prepared statements and schema changes - -**With Cassandra 3 and below, avoid preparing `SELECT *` queries**; the driver does not handle -schema changes that would affect the results of a prepared statement. Therefore `SELECT *` queries -can create issues, for example: - -* table `foo` contains columns `b` and `c`. -* the driver prepares `SELECT * FROM foo`. It gets a reply indicating that executing this statement - will return columns `b` and `c`, and caches that metadata locally (for performance reasons: this - avoids sending it with each response later). -* someone alters table `foo` to add a new column `a`. -* the next time the driver executes the prepared statement, it gets a response that now contains - columns `a`, `b` and `c`. However, it's still using its stale copy of the metadata, so it decodes - `a` thinking it's `b`. In the best case scenario, `a` and `b` have different types and decoding - fails; in the worst case, they have compatible types and the client gets corrupt data. - -To avoid this, do not create prepared statements for `SELECT *` queries if you plan on making schema -changes involving adding or dropping columns. Instead, always list all columns of interest in your -statement, i.e.: `SELECT b, c FROM foo`. - -With Cassandra 4 and [native protocol](../../native_protocol/) v5, this issue is fixed -([CASSANDRA-10786]): the server detects that the driver is operating on stale metadata and sends the -new version with the response; the driver updates its local cache transparently, and the client can -observe the new columns in the result set. - -[BoundStatement]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/BoundStatement.html -[Session.prepare]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/CqlSession.html#prepare-com.datastax.oss.driver.api.core.cql.SimpleStatement- -[CASSANDRA-10786]: https://issues.apache.org/jira/browse/CASSANDRA-10786 -[CASSANDRA-10813]: https://issues.apache.org/jira/browse/CASSANDRA-10813 -[guava eviction]: https://github.com/google/guava/wiki/CachesExplained#reference-based-eviction -[PreparedStatement.bind]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/PreparedStatement.html#bind-java.lang.Object...- -[PreparedStatement.boundStatementBuilder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/PreparedStatement.html#boundStatementBuilder-java.lang.Object...- diff --git a/manual/core/statements/simple/README.md b/manual/core/statements/simple/README.md deleted file mode 100644 index 13ddbb7a389..00000000000 --- a/manual/core/statements/simple/README.md +++ /dev/null @@ -1,206 +0,0 @@ - - -## Simple statements - -### Quick overview - -For one-off executions of a raw query string. - -* create with [SimpleStatement.newInstance()] or [SimpleStatement.builder()]. -* values: `?` or `:name`, fill with `setPositionalValues()` or `setNamedValues()` respectively. - Driver has to guess target CQL types, this can lead to ambiguities. -* built-in implementation is **immutable**. Setters always return a new object, don't ignore the - result. - ------ - -Use [SimpleStatement] for queries that will be executed only once (or just a few times): - -```java -SimpleStatement statement = - SimpleStatement.newInstance( - "SELECT value FROM application_params WHERE name = 'greeting_message'"); -session.execute(statement); -``` - -Each time you execute a simple statement, Cassandra parses the query string again; nothing is cached -(neither on the client nor on the server): - -```ditaa -client driver Cassandra ---+----------------------------------+---------------------+------ - | | | - | session.execute(SimpleStatement) | | - |--------------------------------->| | - | | QUERY(query_string) | - | |-------------------->| - | | | - | | | - | | | - parse query string - | | | - execute query - | | | - | | ROWS | - | |<--------------------| - | | | - |<---------------------------------| | -``` - -If you execute the same query often (or a similar query with different column values), consider a -[prepared statement](../prepared/) instead. - -### Creating an instance - -The driver provides various ways to create simple statements instances. First, `SimpleStatement` has -a few static factory methods: - -```java -SimpleStatement statement = - SimpleStatement.newInstance( - "SELECT value FROM application_params WHERE name = 'greeting_message'"); -``` - -You can then use setter methods to configure additional options. Note that, like all statement -implementations, simple statements are **immutable**, so these methods return a new instance each -time. Make sure you don't ignore the result: - -```java -// WRONG: ignores the result -statement.setIdempotent(true); - -// Instead, reassign the statement every time: -statement = statement.setIdempotent(true); -``` - -If you have many options to set, you can use a builder to avoid creating intermediary instances: - -```java -SimpleStatement statement = - SimpleStatement.builder("SELECT value FROM application_params WHERE name = 'greeting_message'") - .setIdempotence(true) - .build(); -``` - -Finally, `Session` provides a shorthand method when you only have a simple query string: - -```java -session.execute("SELECT value FROM application_params WHERE name = 'greeting_message'"); -``` - -### Using values - -Instead of hard-coding everything in the query string, you can use bind markers and provide values -separately: - -* by position: - - ```java - SimpleStatement.builder("SELECT value FROM application_params WHERE name = ?") - .addPositionalValues("greeting_message") - .build(); - ``` -* by name: - - ```java - SimpleStatement.builder("SELECT value FROM application_params WHERE name = :n") - .addNamedValue("n", "greeting_message") - .build(); - ``` - -This syntax has a few advantages: - -* if the values come from some other part of your code, it looks cleaner than doing the - concatenation yourself; -* you don't need to translate the values to their string representation. The driver will send them - alongside the query, in their serialized binary form. - -The number of values must match the number of placeholders in the query string, and their types must -match the database schema. Note that the driver does not parse simple statements, so it cannot -perform those checks on the client side; if you make a mistake, the query will be sent anyway, and -the server will reply with an error, that gets translated into a driver exception: - -```java -session.execute( - SimpleStatement.builder("SELECT value FROM application_params WHERE name = :n") - .addPositionalValues("greeting_message", "extra_value") - .build()); -// Exception in thread "main" com.datastax.oss.driver.api.core.servererrors.InvalidQueryException: -// Invalid amount of bind variables -``` - -### Type inference - -Another consequence of not parsing query strings is that the driver has to guess how to serialize -values, based on their Java type (see the [default type mappings](../../#cql-to-java-type-mapping)). -This can be tricky, in particular for numeric types: - -```java -// schema: create table bigints(b bigint primary key) -session.execute( - SimpleStatement.builder("INSERT INTO bigints (b) VALUES (?)") - .addPositionalValues(1) - .build()); -// Exception in thread "main" com.datastax.oss.driver.api.core.servererrors.InvalidQueryException: -// Expected 8 or 0 byte long (4) -``` - -The problem here is that the literal `1` has the Java type `int`. So the driver serializes it as a -CQL `int` (4 bytes), but the server expects a CQL `bigint` (8 bytes). The fix is to specify the -correct Java type: - -```java -session.execute( - SimpleStatement.builder("INSERT INTO bigints (b) VALUES (?)") - .addPositionalValues(1L) // long literal - .build()); -``` - -Similarly, strings are always serialized to `varchar`, so you could have a problem if you target an -`ascii` column: - -```java -// schema: create table ascii_quotes(id int primary key, t ascii) -session.execute( - SimpleStatement.builder("INSERT INTO ascii_quotes (id, t) VALUES (?, ?)") - .addPositionalValues(1, "Touché sir, touché...") - .build()); -// Exception in thread "main" com.datastax.oss.driver.api.core.servererrors.InvalidQueryException: -// Invalid byte for ascii: -61 -``` - -In that situation, there is no way to hint at the correct type. Fortunately, you can encode the -value manually as a workaround: - -```java -TypeCodec codec = session.getContext().getCodecRegistry().codecFor(DataTypes.ASCII); -ByteBuffer bytes = - codec.encode("Touché sir, touché...", session.getContext().getProtocolVersion()); - -session.execute( - SimpleStatement.builder("INSERT INTO ascii_quotes (id, t) VALUES (?, ?)") - .addPositionalValues(1, bytes) - .build()); -``` - -Or you could also use [prepared statements](../prepared/), which don't have this limitation since -parameter types are known in advance. - -[SimpleStatement]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/SimpleStatement.html -[SimpleStatement.newInstance()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/SimpleStatement.html#newInstance-java.lang.String- -[SimpleStatement.builder()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/SimpleStatement.html#builder-java.lang.String- diff --git a/manual/core/temporal_types/README.md b/manual/core/temporal_types/README.md deleted file mode 100644 index 6542d5b8dac..00000000000 --- a/manual/core/temporal_types/README.md +++ /dev/null @@ -1,171 +0,0 @@ - - -## Temporal types - -### Quick overview - -This page provides more details about the various CQL time types, and the Java types they are mapped -to in the driver. - -| CQL | Java | | -|---|---|---| -|`date` | `java.time.LocalDate` || -|`time` | `java.time.LocalTime` || -|`timestamp` | `java.time.Instant` | No time zone. Use `Instant.atZone` or register [TypeCodecs.ZONED_TIMESTAMP_SYSTEM], [TypeCodecs.ZONED_TIMESTAMP_UTC] or [TypeCodecs.zonedTimestampAt()] | -|`duration` | [CqlDuration] | Custom driver type; can't be accurately represented by any of the `java.time` types. | - ------ - -### Date and time - -CQL types `date` and `time` map directly to `java.time.LocalDate` and `java.time.LocalTime`. - -These are simple, time-zone-free representations of date-only (`yyyy-mm-dd`) and time-only -(`HH:MM:SS[.fff]`) types. - -### Timestamp - -CQL type `timestamp` is the date-and-time representation, stored as a number of milliseconds since -the epoch (01/01/1970 UTC). - - -#### No time zone - -`timestamp` does **not** store a time zone. This is not always obvious because clients generally do -use one for display. For instance, the following CQLSH snippet is from a machine in Pacific time: - -``` -cqlsh> CREATE TABLE test(t timestamp PRIMARY KEY); -cqlsh> INSERT INTO test (t) VALUES (dateof(now())); -cqlsh> SELECT * FROM test; - - t ---------------------------------- - 2018-11-07 08:50:52.433000-0800 -``` - -It looks like the timestamp has a zone (`-0800`), but it is actually the client's. If you force -CQLSH to a different zone and observe the same data, it will be displayed differently: - -``` -$ TZ=UTC cqlsh -cqlsh> SELECT * FROM test; - - t ---------------------------------- - 2018-11-07 16:50:52.433000+0000 -``` - -Internally, Cassandra only stores the raw number of milliseconds. You can observe that with a cast: - -``` -cqlsh> SELECT cast(t as bigint) FROM test; - - cast(t as bigint) -------------------- - 1541609452433 -``` - -#### Java equivalent - -By default, the driver maps `timestamp` to `java.time.Instant`. This Java type is the closest to the -internal representation; in particular, it does not have a time zone. On the downside, this means -you can't directly extract calendar fields (year, month, etc.). You need to call `atZone` to perform -the conversion: - -```java -Row row = session.execute("SELECT t FROM test").one(); -Instant instant = row.getInstant("t"); -ZonedDateTime dateTime = instant.atZone(ZoneId.of("America/Los_Angeles")); -System.out.println(dateTime.getYear()); -``` - -Conversely, you can convert a `ZonedDateTime` back to an `Instant` with `toInstant`. - -If you want to automate those `atZone`/`toInstant` conversions, the driver comes with an optional -`ZonedDateTime` codec, that must be registered explicitly with the session: - -```java -CqlSession session = CqlSession.builder() - .addTypeCodecs(TypeCodecs.ZONED_TIMESTAMP_UTC) - .build(); - -Row row = session.execute("SELECT t FROM test").one(); -ZonedDateTime dateTime = row.get("t", GenericType.ZONED_DATE_TIME); -``` - -There are various constants and methods to obtain a codec instance for a particular zone: - -* [TypeCodecs.ZONED_TIMESTAMP_SYSTEM]\: system default; -* [TypeCodecs.ZONED_TIMESTAMP_UTC]\: UTC; -* [TypeCodecs.zonedTimestampAt()]\: user-provided. - -Which zone you choose is application-dependent. The driver doesn't map to `ZonedDateTime` by default -because it would have to make an arbitrary choice; we want you to think about time zones explicitly -before you decide to use that type. - -#### Millisecond-only precision - -As already stated, `timestamp` is stored as a number of milliseconds. If you try to write an -`Instant` or `ZonedDateTime` with higher precision through the driver, the sub-millisecond part will -be truncated: - -```java -CqlSession session = - CqlSession.builder() - .addTypeCodecs(TypeCodecs.ZONED_TIMESTAMP_UTC) - .build(); - -ZonedDateTime valueOnClient = ZonedDateTime.parse("2018-11-07T16:50:52.433395762Z"); - // sub-millisecond digits ^^^^^^ -session.execute( - SimpleStatement.newInstance("INSERT INTO test (t) VALUES (?)", valueOnClient)); - -ZonedDateTime valueInDb = - session.execute("SELECT * FROM test").one().get(0, GenericType.ZONED_DATE_TIME); -System.out.println(valueInDb); -// Prints "2018-11-07T16:50:52.433Z" -``` - -### Duration - -CQL type `duration` represents a period in months, days and nanoseconds. The driver maps it to a -custom type: [CqlDuration]. - -We deliberately avoided `java.time.Period`, because it does not contain a nanoseconds part as -`CqlDuration` does; and we also avoided `java.time.Duration`, because it represents an absolute -time-based amount, regardless of the calendar, whereas `CqlDuration` manipulates conceptual days and -months instead. Thus a `CqlDuration` of "2 months" represents a different amount of time depending -on the date to which it is applied (because months have a different number of days, and because -daylight savings rules might also apply, etc). - -`CqlDuration` implements `java.time.temporal.TemporalAmount`, so it interoperates nicely with the -JDK's built-in temporal types: - -```java -ZonedDateTime dateTime = ZonedDateTime.parse("2018-10-04T00:00-07:00[America/Los_Angeles]"); -System.out.println(dateTime.minus(CqlDuration.from("1h15s15ns"))); -// prints "2018-10-03T22:59:44.999999985-07:00[America/Los_Angeles]" -``` - -[CqlDuration]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/data/CqlDuration.html -[TypeCodecs.ZONED_TIMESTAMP_SYSTEM]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/TypeCodecs.html#ZONED_TIMESTAMP_SYSTEM -[TypeCodecs.ZONED_TIMESTAMP_UTC]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/TypeCodecs.html#ZONED_TIMESTAMP_UTC -[TypeCodecs.zonedTimestampAt()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/TypeCodecs.html#zonedTimestampAt-java.time.ZoneId- diff --git a/manual/core/throttling/README.md b/manual/core/throttling/README.md deleted file mode 100644 index 275c0cb5b40..00000000000 --- a/manual/core/throttling/README.md +++ /dev/null @@ -1,169 +0,0 @@ - - -## Request throttling - -### Quick overview - -Limit session throughput. - -* `advanced.throttler` in the configuration; defaults to pass-through (no throttling), also - available: concurrency-based (max simultaneous requests), rate-based (max requests per time unit), - or write your own. -* metrics: `throttling.delay`, `throttling.queue-size`, `throttling.errors`. - ------ - -Throttling allows you to limit how many requests a session can execute concurrently. This is -useful if you have multiple applications connecting to the same Cassandra cluster, and want to -enforce some kind of SLA to ensure fair resource allocation. - -The request throttler tracks the level of utilization of the session, and lets requests proceed as -long as it is under a predefined threshold. When that threshold is exceeded, requests are enqueued -and will be allowed to proceed when utilization goes back to normal. - -From a user's perspective, this process is mostly transparent: any time spent in the queue is -included in the `session.execute()` or `session.executeAsync()` call. Similarly, the request timeout -encompasses throttling: it starts ticking before the request is passed to the throttler; in other -words, a request may time out while it is still in the throttler's queue, before the driver has even -tried to send it to a node. - -The only visible effect is that a request may fail with a [RequestThrottlingException], if the -throttler has determined that it can neither allow the request to proceed now, nor enqueue it; -this indicates that your session is overloaded. How you react to that is specific to your -application; typically, you could display an error asking the end user to retry later. - -Note that the following requests are also affected by throttling: - -* preparing a statement (either directly, or indirectly when the driver reprepares on other nodes, - or when a node comes back up -- see - [how the driver prepares](../statements/prepared/#how-the-driver-prepares)); -* fetching the next page of a result set (which happens in the background when you iterate the - synchronous variant `ResultSet`). -* fetching a [query trace](../tracing/). - -### Configuration - -Request throttling is parameterized in the [configuration](../configuration/) under -`advanced.throttler`. There are various implementations, detailed in the following sections: - -#### Pass through - -``` -datastax-java-driver { - advanced.throttler { - class = PassThroughRequestThrottler - } -} -``` - -This is a no-op implementation: requests are simply allowed to proceed all the time, never enqueued. - -Note that you will still hit a limit if all your connections run out of stream ids. In that case, -requests will fail with an [AllNodesFailedException], with the `getErrors()` method returning a -[BusyConnectionException] for each node. See the [connection pooling](../pooling/) page. - -#### Concurrency-based - -``` -datastax-java-driver { - advanced.throttler { - class = ConcurrencyLimitingRequestThrottler - - # Note: the values below are for illustration purposes only, not prescriptive - max-concurrent-requests = 10000 - max-queue-size = 100000 - } -} -``` - -This implementation limits the number of requests that are allowed to execute simultaneously. -Additional requests get enqueued up to the configured limit. Every time an active request completes -(either by succeeding, failing or timing out), the oldest enqueued request is allowed to proceed. - -Make sure you pick a threshold that is consistent with your pooling settings; the driver should -never run out of stream ids before reaching the maximum concurrency, otherwise requests will fail -with [BusyConnectionException] instead of being throttled. The total number of stream ids is a -function of the number of connected nodes and the `connection.pool.*.size` and -`connection.max-requests-per-connection` configuration options. Keep in mind that aggressive -speculative executions and timeout options can inflate stream id consumption, so keep a safety -margin. One good way to get this right is to track the `pool.available-streams` [metric](../metrics) -on every node, and make sure it never reaches 0. See the [connection pooling](../pooling/) page. - -#### Rate-based - -``` -datastax-java-driver { - advanced.throttler { - class = RateLimitingRequestThrottler - - # Note: the values below are for illustration purposes only, not prescriptive - max-requests-per-second = 5000 - max-queue-size = 50000 - drain-interval = 1 millisecond - } -} -``` - -This implementation tracks the rate at which requests start, and enqueues when it exceeds the -configured threshold. - -With this approach, we can't dequeue when requests complete, because having less active requests -does not necessarily mean that the rate is back to normal. So instead the throttler re-checks the -rate periodically and dequeues when possible, this is controlled by the `drain-interval` option. -Picking the right interval is a matter of balance: too low might consume too many resources and only -dequeue a few requests at a time, but too high will delay your requests too much; start with a few -milliseconds and use the `cql-requests` [metric](../metrics/) to check the impact on your latencies. - -Like with the concurrency-based throttler, you should make sure that your target rate is in line -with the pooling options; see the recommendations in the previous section. - -### Monitoring - -Enable the following [metrics](../metrics/) to monitor how the throttler is performing: - -``` -datastax-java-driver { - advanced.metrics.session.enabled = [ - # How long requests are being throttled (exposed as a Timer). - # - # This is the time between the start of the session.execute() call, and the moment when the - # throttler allows the request to proceed. - throttling.delay, - - # The size of the throttling queue (exposed as a Gauge). - # - # This is the number of requests that the throttler is currently delaying in order to - # preserve its SLA. This metric only works with the built-in concurrency- and rate-based - # throttlers; in other cases, it will always be 0. - throttling.queue-size, - - # The number of times a request was rejected with a RequestThrottlingException (exposed as a - # Counter) - throttling.errors, - ] -} -``` - -If you enable `throttling.delay`, make sure to also check the associated extra options to correctly -size the underlying histograms (`metrics.session.throttling.delay.*`). - -[RequestThrottlingException]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/RequestThrottlingException.html -[AllNodesFailedException]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/AllNodesFailedException.html -[BusyConnectionException]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/connection/BusyConnectionException.html diff --git a/manual/core/tracing/README.md b/manual/core/tracing/README.md deleted file mode 100644 index f9beca8e49b..00000000000 --- a/manual/core/tracing/README.md +++ /dev/null @@ -1,140 +0,0 @@ - - -## Query tracing - -### Quick overview - -Detailed information about the server-side internals for a given query. - -* disabled by default, must enable per statement with [Statement.setTracing()] or - [StatementBuilder.setTracing()]. -* retrieve with [ResultSet.getExecutionInfo().getTracingId()][ExecutionInfo.getTracingId()] and - [getQueryTrace()][ExecutionInfo.getQueryTrace()]. -* `advanced.request.trace` in the configuration: fine-grained control over how the driver fetches - the trace data. - ------ - -To help troubleshooting performance, Cassandra offers the ability to *trace* a query, in other words -capture detailed information about the the internal operations performed by all nodes in the cluster -in order to build the response. - -The driver provides a way to enable tracing on a particular statement, and an API to examine the -results. - -### Enabling tracing - -Set the tracing flag on the `Statement` instance. There are various ways depending on how you build -it (see [statements](../statements/) for more details): - -```java -// Setter-based: -Statement statement = - SimpleStatement.newInstance("SELECT * FROM users WHERE id = 1234").setTracing(true); - -// Builder-based: -Statement statement = - SimpleStatement.builder("SELECT * FROM users WHERE id = 1234").setTracing().build(); -``` - -Tracing is supposed to be run on a small percentage of requests only. Do not enable it on every -request, you would risk overwhelming your cluster. - -### Retrieving tracing data - -Once you've executed a statement with tracing enabled, tracing data is available through the -[ExecutionInfo]: - -```java -ResultSet rs = session.execute(statement); -ExecutionInfo executionInfo = rs.getExecutionInfo(); -``` - -#### Tracing id - -Cassandra assigns a unique identifier to each query trace. It is returned with the query results, -and therefore available immediately: - -```java -UUID tracingId = executionInfo.getTracingId(); -``` - -This is the primary key in the `system_traces.sessions` and `system_traces.events` tables where -Cassandra stores tracing data (you don't need to query those tables manually, see the next section). - -If you call `getTracingId()` for a statement that didn't have tracing enabled, the resulting id will -be `null`. - -#### Tracing information - -To get to the details of the trace, retrieve the [QueryTrace] instance: - -```java -QueryTrace trace = executionInfo.getQueryTrace(); - -// Or asynchronous equivalent: -CompletionStage traceFuture = executionInfo.getQueryTraceAsync(); -``` - -This triggers background queries to fetch the information from the `system_traces.sessions` and -`system_traces.events` tables. Because Cassandra writes that information asynchronously, it might -not be immediately available, therefore the driver will retry a few times if necessary. You can -control this behavior through the configuration: - -``` -# These options can be changed at runtime, the new values will be used for requests issued after -# the change. They can be overridden in a profile. -datastax-java-driver.advanced.request.trace { - # How many times the driver will attempt to fetch the query if it is not ready yet. - attempts = 5 - - # The interval between each attempt. - interval = 3 milliseconds - - # The consistency level to use for trace queries. - # Note that the default replication strategy for the system_traces keyspace is SimpleStrategy - # with RF=2, therefore LOCAL_ONE might not work if the local DC has no replicas for a given - # trace id. - consistency = ONE -} -``` - -Once you have the `QueryTrace` object, access its properties for relevant information, for example: - -```java -System.out.printf( - "'%s' to %s took %dμs%n", - trace.getRequestType(), trace.getCoordinator(), trace.getDurationMicros()); -for (TraceEvent event : trace.getEvents()) { - System.out.printf( - " %d - %s - %s%n", - event.getSourceElapsedMicros(), event.getSource(), event.getActivity()); -} -``` - -If you call `getQueryTrace()` for a statement that didn't have tracing enabled, an exception is -thrown. - -[ExecutionInfo]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/ExecutionInfo.html -[QueryTrace]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/QueryTrace.html -[Statement.setTracing()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/Statement.html#setTracing-boolean- -[StatementBuilder.setTracing()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/StatementBuilder.html#setTracing-- -[ExecutionInfo.getTracingId()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/ExecutionInfo.html#getTracingId-- -[ExecutionInfo.getQueryTrace()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/ExecutionInfo.html#getQueryTrace-- diff --git a/manual/core/tuples/README.md b/manual/core/tuples/README.md deleted file mode 100644 index d0684b77569..00000000000 --- a/manual/core/tuples/README.md +++ /dev/null @@ -1,162 +0,0 @@ - - -## Tuples - -### Quick overview - -Ordered set of anonymous, typed fields, e.g. `tuple`, `(1, 'a', 1.0)`. - -* `row.getTupleValue()` / `boundStatement.setTupleValue()`. -* positional getters and setters: `tupleValue.getInt(0)`, `tupleValue.setString(1, "a")`... -* getting hold of the [TupleType]: statement or session metadata, `tupleValue.getType()`, or - `DataTypes.tupleOf()`. -* creating a value from a type: `tupleType.newValue()`. - ------ - -[CQL tuples][cql_doc] are ordered sets of anonymous, typed fields. They can be used as a column type -in tables, or a field type in [user-defined types](../udts/): - -``` -CREATE TABLE ks.collect_things ( - pk int, - ck1 text, - ck2 text, - v tuple, - PRIMARY KEY (pk, ck1, ck2) -); -``` - -### Fetching tuples from results - -The driver maps tuple columns to the [TupleValue] class, which exposes getters and setters to access -individual fields by index: - -```java -Row row = session.execute("SELECT v FROM ks.collect_things WHERE pk = 1").one(); - -TupleValue tupleValue = row.getTupleValue("v"); -int field0 = tupleValue.getInt(0); -String field1 = tupleValue.getString(1); -Float field2 = tupleValue.getFloat(2); -``` - -### Using tuples as parameters - -Statements may contain tuples as bound values: - -```java -PreparedStatement ps = - session.prepare( - "INSERT INTO ks.collect_things (pk, ck1, ck2, v) VALUES (:pk, :ck1, :ck2, :v)"); -``` - -To create a new tuple value, you must first have a reference to its [TupleType]. There are various -ways to get it: - -* from the statement's metadata - - ```java - TupleType tupleType = (TupleType) ps.getVariableDefinitions().get("v").getType(); - ``` - -* from the driver's [schema metadata](../metadata/schema/): - - ```java - TupleType tupleType = - (TupleType) - session - .getMetadata() - .getKeyspace("ks") - .getTable("collect_things") - .getColumn("v") - .getType(); - ``` - -* from another tuple value: - - ```java - TupleType tupleType = tupleValue.getType(); - ``` - -* or creating it from scratch: - - ```java - TupleType tupleType = DataTypes.tupleOf(DataTypes.INT, DataTypes.TEXT, DataTypes.FLOAT); - ``` - - Note that the resulting type is [detached](../detachable_types). - -Once you have the type, call `newValue()` and set the fields: - -```java -TupleValue tupleValue = - tupleType.newValue().setInt(0, 1).setString(1, "hello").setFloat(2, 2.3f); - -// Or as a one-liner for convenience: -TupleValue tupleValue = tupleType.newValue(1, "hello", 2.3f); -``` - -And bind your tuple value like any other type: - -```java -BoundStatement bs = - ps.boundStatementBuilder() - .setInt("pk", 1) - .setString("ck1", "1") - .setString("ck2", "1") - .setTupleValue("v", tupleValue) - .build(); -session.execute(bs); -``` - -Tuples are also used for multi-column `IN` restrictions (usually for tables with composite -clustering keys): - -```java -PreparedStatement ps = - session.prepare("SELECT * FROM ks.collect_things WHERE pk = 1 and (ck1, ck2) IN (:choice1, :choice2)"); - -TupleType tupleType = DataTypes.tupleOf(DataTypes.TEXT, DataTypes.TEXT); -BoundStatement bs = ps.boundStatementBuilder() - .setTupleValue("choice1", tupleType.newValue("a", "b")) - .setTupleValue("choice2", tupleType.newValue("c", "d")) - .build(); -``` - -If you bind the whole list of choices as a single variable, a list of tuple values is expected: - -```java -PreparedStatement ps = - // Note the absence of parentheses around ':choices' - session.prepare("SELECT * FROM ks.collect_things WHERE pk = 1 and (ck1, ck2) IN :choices"); - -TupleType tupleType = DataTypes.tupleOf(DataTypes.TEXT, DataTypes.TEXT); -List choices = new ArrayList<>(); -choices.add(tupleType.newValue("a", "b")); -choices.add(tupleType.newValue("c", "d")); -BoundStatement bs = - ps.boundStatementBuilder().setList("choices", choices, TupleValue.class).build(); -``` - -[cql_doc]: https://docs.datastax.com/en/cql/3.3/cql/cql_reference/tupleType.html - -[TupleType]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/TupleType.html -[TupleValue]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/data/TupleValue.html diff --git a/manual/core/udts/README.md b/manual/core/udts/README.md deleted file mode 100644 index a22057030ae..00000000000 --- a/manual/core/udts/README.md +++ /dev/null @@ -1,158 +0,0 @@ - - -## User-defined types - -### Quick overview - -Ordered set of named, typed fields, e.g. `{ street: '1 Main St', zip: 12345}`. - -* `row.getUdtValue()` / `boundStatement.setUdtValue()`. -* positional or named getters and setters: `udtValue.getString("street")`, - `udtValue.setInt(1, 12345)`... -* getting hold of the [UserDefinedType]: - * statement or session metadata, or `udtValue.getType()`. - * `UserDefinedTypeBuilder` (not recommended, dangerous if you build a type that doesn't match the - database schema). -* creating a value from a type: `userDefinedType.newValue()`. - ------ - - -[CQL user-defined types][cql_doc] are ordered sets of named, typed fields. They must be defined in a -keyspace: - -``` -CREATE TYPE ks.type1 ( - a int, - b text, - c float); -``` - -And can then be used as a column type in tables, or a field type in other user-defined types in that -keyspace: - -``` -CREATE TABLE ks.collect_things ( - pk int, - ck1 text, - ck2 text, - v frozen, - PRIMARY KEY (pk, ck1, ck2) -); - -CREATE TYPE ks.type2 (v frozen); -``` - -### Fetching UDTs from results - -The driver maps UDT columns to the [UdtValue] class, which exposes getters and setters to access -individual fields by index or name: - -```java -Row row = session.execute("SELECT v FROM ks.collect_things WHERE pk = 1").one(); - -UdtValue udtValue = row.getUdtValue("v"); -int a = udtValue.getInt(0); -String b = udtValue.getString("b"); -Float c = udtValue.getFloat(2); -``` - -### Using UDTs as parameters - -Statements may contain UDTs as bound values: - -```java -PreparedStatement ps = - session.prepare( - "INSERT INTO ks.collect_things (pk, ck1, ck2, v) VALUES (:pk, :ck1, :ck2, :v)"); -``` - -To create a new UDT value, you must first have a reference to its [UserDefinedType]. There are -various ways to get it: - -* from the statement's metadata - - ```java - UserDefinedType udt = (UserDefinedType) ps.getVariableDefinitions().get("v").getType(); - ``` - -* from the driver's [schema metadata](../metadata/schema/): - - ```java - UserDefinedType udt = - session.getMetadata() - .getKeyspace("ks") - .flatMap(ks -> ks.getUserDefinedType("type1")) - .orElseThrow(() -> new IllegalArgumentException("Missing UDT definition")); - ``` - -* from another UDT value: - - ```java - UserDefinedType udt = udtValue.getType(); - ``` - -Note that the driver's official API does not expose a way to build [UserDefinedType] instances -manually. This is because the type's internal definition must precisely match the database schema; -if it doesn't (for example if the fields are not in the same order), you run the risk of inserting -corrupt data, that you won't be able to read back. There is still a way to do it with the driver, -but it's part of the [internal API](../../api_conventions/): - -```java -// Advanced usage: make sure you understand the risks -import com.datastax.oss.driver.internal.core.type.UserDefinedTypeBuilder; - -UserDefinedType udt = - new UserDefinedTypeBuilder("ks", "type1") - .withField("a", DataTypes.INT) - .withField("b", DataTypes.TEXT) - .withField("c", DataTypes.FLOAT) - .build(); -``` - -Note that a manually created type is [detached](../detachable_types). - - -Once you have the type, call `newValue()` and set the fields: - -```java -UdtValue udtValue = udt.newValue().setInt(0, 1).setString(1, "hello").setFloat(2, 2.3f); - -// Or as a one-liner for convenience: -UdtValue udtValue = udt.newValue(1, "hello", 2.3f); -``` - -And bind your UDT value like any other type: - -```java -BoundStatement bs = - ps.boundStatementBuilder() - .setInt("pk", 1) - .setString("ck1", "1") - .setString("ck2", "1") - .setUdtValue("v", udtValue) - .build(); -session.execute(bs); -``` - -[cql_doc]: https://docs.datastax.com/en/cql/3.3/cql/cql_reference/cqlRefUDType.html - -[UdtValue]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/data/UdtValue.html -[UserDefinedType]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/UserDefinedType.html diff --git a/manual/developer/.nav b/manual/developer/.nav deleted file mode 100644 index 0bb954b1293..00000000000 --- a/manual/developer/.nav +++ /dev/null @@ -1,5 +0,0 @@ -common -native_protocol -netty_pipeline -request_execution -admin diff --git a/manual/developer/README.md b/manual/developer/README.md deleted file mode 100644 index b6e0bda16ed..00000000000 --- a/manual/developer/README.md +++ /dev/null @@ -1,38 +0,0 @@ - - -## Developer docs - -This section explains how driver internals work. The intended audience is: - -* driver developers and contributors; -* framework authors, or architects who want to write advanced customizations and integrations. - -Most of this material will involve "internal" packages; see [API conventions](../api_conventions/) -for more explanations. - -We recommend reading about the [common infrastructure](common/) first. Then the documentation goes -from lowest to highest level: - -* [Native protocol layer](native_protocol/): binary encoding of the TCP payloads; -* [Netty pipeline](netty_pipeline/): networking and low-level stream management; -* [Request execution](request_execution/): higher-level handling of user requests and responses; -* [Administrative tasks](admin/): everything else (cluster state and metadata). - -If you're reading this on GitHub, the `.nav` file in each directory contains a suggested order. diff --git a/manual/developer/admin/README.md b/manual/developer/admin/README.md deleted file mode 100644 index 0ebd9e2d746..00000000000 --- a/manual/developer/admin/README.md +++ /dev/null @@ -1,342 +0,0 @@ - - -## Administrative tasks - -Aside from the main task of [executing user requests](../request_execution), the driver also needs -to track cluster state and metadata. This is done with a number of administrative components: - -```ditaa - +---------------+ - | DriverChannel | - +-------+-------+ - |1 - | topology -+-----------------+ query +---------+---------+ events -| TopologyMonitor +------+---->| ControlConnection +-----------------+ -+-----------------+ | +---------+---------+ | - ^ | | | - | | | topology+channel V - get | +---------+ refresh| events +----------+ -node info| | schema | +------------+ EventBus | - | | | | +-+--------+ -+--------+-----+--+ | | ^ ^ -| MetadataManager |<-------+-------------+ | node| | -+--------+-------++ | | state| | - | | | add/remove v events| | - |1 | | node +------------------+ | | - +-----+----+ | +------------+ NodeStateManager +------+ | - | Metadata | | +------------------+ | - +----------+ | | - +-------------------------------------------------------+ - metadata changed events -``` - -Note: the event bus is covered in the [common infrastructure](../common/event_bus) section. - -### Control connection - -The goal of the control connection is to maintain a dedicated `DriverChannel` instance, used to: - -* listen for server-side protocol events: - * topology events (`NEW_NODE`, `REMOVED_NODE`) and status events (`UP`, `DOWN`) are published on - the event bus, to be processed by other components; - * schema events are propagated directly to the metadata manager, to trigger a refresh; -* provide a way to query system tables. In practice, this is used by: - * the topology monitor, to read node information from `system.local` and `system.peers`; - * the metadata manager, to read schema metadata from `system_schema.*`. - -It has its own reconnection mechanism (if the channel goes down, a new one will be opened to another -node in the cluster) and some logic for initialization and shutdown. - -Note that the control connection is really just an implementation detail of the metadata manager and -topology monitor: if those components are overridden with custom versions that use other means to -get their data, the driver will detect it and not initialize the control connection (at the time of -writing, the session also references the control connection directly, but that's a bug: -[JAVA-2473](https://datastax-oss.atlassian.net/browse/JAVA-2473)). - -### Metadata manager - -This component is responsible for maintaining the contents of -[session.getMetadata()](../../core/metadata/). - -One big improvement in driver 4 is that the `Metadata` object is immutable and updated atomically; -this guarantees a consistent view of the cluster at a given point in time. For example, if a -keyspace name is referenced in the token map, there will always be a corresponding -`KeyspaceMetadata` in the schema metadata. - -`MetadataManager` keeps the current `Metadata` instance in a volatile field. Each transition is -managed by a `MetadataRefresh` object that computes the new metadata, along with an optional list of -events to publish on the bus (e.g. table created, keyspace removed, etc.) The new metadata is then -written back to the volatile field. `MetadataManager` follows the [confined inner -class](../common/concurrency/#cold-path) pattern to ensure that all refreshes are applied serially, -from a single admin thread. This guarantees that two refreshes can't start from the same initial -state and overwrite each other. - -There are various types of refreshes targeting nodes, the schema or the token map. - -Note that, unlike driver 3, we only do full schema refreshes. This simplifies the code considerably, -and thanks to debouncing this should not affect performance. The schema refresh process uses a few -auxiliary components that may have different implementations depending on the Cassandra version: - -* `SchemaQueries`: launches the schema queries asynchronously, and assemble the result in a - `SchemaRows`; -* `SchemaParser`: turns the `SchemaRows` into the `SchemaRefresh`. - -When the metadata manager needs node-related data, it queries the topology monitor. When it needs -schema-related data, it uses the control connection directly to issue its queries. - -### Topology monitor - -`TopologyMonitor` abstracts how we get information about nodes in the cluster: - -* refresh the list of nodes; -* refresh an individual node, or load the information of a newly added node; -* check schema agreement; -* emit `TopologyEvent` instances on the bus when we get external signals suggesting topology changes - (node added or removed), or status changes (node down or up). - -The built-in implementation uses the control connection to query `system.local` and `system.peers`, -and listen to gossip events. - -### Node state manager - -`NodeStateManager` tracks the state of the nodes in the cluster. - -We can't simply trust gossip events because they are not always reliable (the coordinator can become -isolated and think other nodes are down). Instead, the driver uses more elaborate rules that combine -external signals with observed internal state: - -* as long as we have an active connection to a node, it is considered up, whatever gossip events - say; -* if all connections to a node are lost, and its pool has started reconnecting, it gets marked down - (we check the reconnection because the pool could have shut down for legitimate reasons, like the - node distance changing to IGNORED); -* a node is marked back up when the driver has successfully reopened at least one connection; -* if the driver is not actively trying to connect to a node (for example if it is at distance - IGNORED), then gossip events are applied directly. - -See the javadocs of `NodeState` and `TopologyEvent`, as well as the `NodeStateManager` -implementation itself, for more details. - -#### Topology events vs. node state events - -These two event types are related, but they're used at different stages: - -* `TopologyEvent` is an external signal about the state of a node (by default, a `TOPOLOGY_CHANGE` - or `STATUS_CHANGE` gossip event received on the control connection). This is considered as a mere - suggestion, that the driver may or may not decide to follow; -* `NodeStateEvent` is an actual decision made by the driver to change a node to a given state. - -`NodeStateManager` essentially transforms topology events, as well as other internal signals, into -node state events. - -In general, other driver components only react to node state events, but there are a few exceptions: -for example, if a connection pool is reconnecting and the next attempt is scheduled in 5 minutes, -but a SUGGEST_UP topology event is emitted, the pool tries to reconnect immediately. - -The best way to find where each event is used is to do a usage search of the event type. - -### How admin components work together - -Most changes to the cluster state will involve the coordinated effort of multiple admin components. -Here are a few examples: - -#### A new node gets added - -```ditaa -+-----------------+ +--------+ +----------------+ +---------------+ +---------------+ -|ControlConnection| |EventBus| |NodeStateManager| |MetadataManager| |TopologyMonitor| -+--------+--------+ +---+----+ +--------+-------+ +-------+-------+ +-------+-------+ - | | | | | -+--------+-------+ | | | | -|Receive NEW_NODE| | | | | -|gossip event | | | | | -| {d}| | | | | -+--------+-------+ | | | | - | | | | | - |TopologyEvent( | | | | - | SUGGEST_ADDED)| | | | - +--------------->| | | | - | |onTopologyEvent| | | - | +-------------->| | | - | | +------+-------+ | | - | | |check node not| | | - | | |known already | | | - | | | {d}| | | - | | +------+-------+ | | - | | | | | - | | | addNode | | - | | +---------------->| | - | | | | getNewNodeInfo | - | | | +---------------->| - | | | | | - | query(SELECT FROM system.peers) | - |<-------------------------------------------------------------------+ - +------------------------------------------------------------------->| - | | | |<----------------+ - | | | +-------+--------+ | - | | | |create and apply| | - | | | |AddNodeRefresh | | - | | | | {d}| | - | | | +-------+--------+ | - | | | | | - | | NodeChangeEvent(ADDED) | | - | |<--------------------------------+ | - | | | | | -``` - -At this point, other driver components listening on the event bus will get notified of the addition. -For example, `DefaultSession` will initialize a connection pool to the new node. - -#### A new table gets created - -```ditaa - +-----------------+ +---------------+ +---------------+ +--------+ - |ControlConnection| |MetadataManager| |TopologyMonitor| |EventBus| - +--------+--------+ +-------+-------+ +-------+-------+ +---+----+ - | | | | -+----------+----------+ | | | -|Receive SCHEMA_CHANGE| | | | -|gossip event | | | | -| {d} | | | | -+----------+----------+ | | | - | | | | - | refreshSchema | | | - +------------------------------->| | | - | |checkSchemaAgreement | | - | +-------------------->| | - | | | | - | query(SELECT FROM system.local/peers) | | - |<-----------------------------------------------------+ | - +----------------------------------------------------->| | - | | | | - | |<--------------------+ | - |query(SELECT FROM system_schema)| | | - |<-------------------------------+ | | - +------------------------------->| | | - | +-------+--------+ | | - | |Parse results | | | - | |Create and apply| | | - | |SchemaRefresh | | | - | | {d}| | | - | +-------+--------+ | | - | | | | - | | TableChangeEvent(CREATED) | - | +---------------------------------->| - | | | | -``` - -#### The last connection to an active node drops - -```ditaa - +-----------+ +--------+ +----------------+ +----+ +---------------+ - |ChannelPool| |EventBus| |NodeStateManager| |Node| |MetadataManager| - +-----+-----+ +---+----+ +-------+--------+ +-+--+ +-------+-------+ - | | | | | - |ChannelEvent(CLOSED) | | | | - +----------------------->| | | | - | |onChannelEvent | | | - +------+-----+ +--------------->| | | - | start | | |decrement | | - |reconnecting| | |openConnections | | - | {d}| | +--------------->| | - +------+-----+ | | | | - |ChannelEvent( | | | | - | RECONNECTION_STARTED) | | | | - +----------------------->| | | | - | |onChannelEvent | | | - | +--------------->| | | - | | |increment | | - | | |reconnections | | - | | +--------------->| | - | | | | | - | | +--------+--------+ | | - | | |detect node has | | | - | | |0 connections and| | | - | | |is reconnecting | | | - | | | {d} | | | - | | +--------+--------+ | | - | | |set state DOWN | | - | | +--------------->| | - | |NodeStateEvent( | | | - | | DOWN) | | | - +------+-----+ |<---------------+ | | - |reconnection| | | | | - | succeeds | | | | | - | {d}| | | | | - +------+-----+ | | | | - |ChannelEvent(OPENED) | | | | - +----------------------->| | | | - | |onChannelEvent | | | - | +--------------->| | | - | | |increment | | - | | |openConnections | | - | | +--------------->| | - | | | | | - | | +--------+--------+ | | - | | |detect node has | | | - | | |1 connection | | | - | | | {d} | | | - | | +--------+--------+ | | - | | | refreshNode | | - | | +---------------------------->| - | | | | | - | | |set state UP | | - | | +--------------->| | - | |NodeStateEvent( | | | - | | UP) | | | - | |<---------------+ | | - |ChannelEvent( | | | | - | RECONNECTION_STOPPED) | | | | - +----------------------->| | | | - | |onChannelEvent | | | - | +--------------->| | | - | | |decrement | | - | | |reconnections | | - | | +--------------->| | - | | | | | -``` - -### Extension points - -#### TopologyMonitor - -This is a standalone component because some users have asked for a way to use their own discovery -service instead of relying on system tables and gossip (see -[JAVA-1082](https://datastax-oss.atlassian.net/browse/JAVA-1082)). - -A custom implementation can be plugged by [extending the -context](../common/context/#overriding-a-context-component) and overriding `buildTopologyMonitor`. -It should: - -* implement the methods of `TopologyMonitor` by querying the discovery service; -* use some notification mechanism (or poll the service periodically) to detect when nodes go up or - down, or get added or removed, and emit the corresponding `TopologyEvent` instances on the bus. - -Read the javadocs for more details; in particular, `NodeInfo` explains how the driver uses the -information returned by the topology monitor. - -#### MetadataManager - -It's less likely that this will be overridden directly. But the schema querying and parsing logic is -abstracted behind two factories that handle the differences between Cassandra versions: -`SchemaQueriesFactory` and `SchemaParserFactory`. These are pluggable by [extending the -context](../common/context/#overriding-a-context-component) and overriding the corresponding -`buildXxx` methods. diff --git a/manual/developer/common/.nav b/manual/developer/common/.nav deleted file mode 100644 index a841aca40ca..00000000000 --- a/manual/developer/common/.nav +++ /dev/null @@ -1,3 +0,0 @@ -context -concurrency -event_bus diff --git a/manual/developer/common/README.md b/manual/developer/common/README.md deleted file mode 100644 index 13ad8639e62..00000000000 --- a/manual/developer/common/README.md +++ /dev/null @@ -1,28 +0,0 @@ - - -## Common infrastructure - -This covers utilities or concept that are shared throughout the codebase: - -* the [context](context/) is what glues everything together, and your primary entry point to extend - the driver. -* we explain the two major approaches to deal with [concurrency](concurrency/) in the driver. -* the [event bus](event_bus/) is used to decouple some of the internal components through - asynchronous messaging. diff --git a/manual/developer/common/concurrency/README.md b/manual/developer/common/concurrency/README.md deleted file mode 100644 index fb493930d6e..00000000000 --- a/manual/developer/common/concurrency/README.md +++ /dev/null @@ -1,145 +0,0 @@ - - -## Concurrency - -The driver is a highly concurrent environment. We try to use thread confinement to simplify the -code, when that does not impact performance. - -### Hot path - -The hot path is everything that happens for a `session.execute` call. In a typical client -application, this is where the driver will likely spend the majority of its time, so it must be -fast. - -Write path: - -1. convert the statement into a protocol-level `Message` (`CqlRequestHandler` constructor); -2. find a node and a connection, and write the message to it (`CqlRequestHandler.sendRequest`); -3. assign a stream id and wrap the message into a frame (`InflightHandler.write`); -4. encode the frame into a binary payload (`FrameEncoder`). - -Read path: - -1. decode the binary payload into a frame (`FrameDecoder`); -2. find the handler that corresponds to the stream id (`InFlightHandler.channelRead`); -3. complete the client's future (`CqlRequestHandler.NodeResponseCallback.onResponse`). - -Various policies are also invoked along the way (load balancing, retry, speculative execution, -timestamp generator...), they are considered on the hot path too. - -Steps 1 and 2 of the write path happen on the client thread, and 3 and 4 on the Netty I/O thread -(which is one of the threads in `NettyOptions.ioEventLoopGroup()`). -On the read path, everything happens on the Netty I/O thread. Beyond that, we want to avoid context -switches for performance reasons: in early prototypes, we tried confining `CqlRequestHandler` to a -particular thread, but that did not work well; so you will find that the code is fairly similar to -driver 3 in terms of concurrency control (reliance on atomic structures, volatile fields, etc). - -Note: code on the hot path should prefer the `TRACE` log level. - -### Cold path - -The cold path is everything else: initialization and shutdown, metadata refreshes, tracking node -states, etc. They will typically be way less frequent than user requests, so we can tolerate a small -performance hit in order to make concurrency easier to handle. - -One pattern we use a lot is a confined inner class: - -```java -public class ControlConnection { - // some content omitted for brevity - - private final EventExecutor adminExecutor; - private final SingleThreaded singleThreaded; - - // Called from other components, from any thread - public void reconnectNow() { - RunOrSchedule.on(adminExecutor, singleThreaded::reconnectNow); - } - - private class SingleThreaded { - private void reconnectNow() { - assert adminExecutor.inEventLoop(); - // this method is only ever called from one thread, much easier to handle concurrency - } - } -} -``` - -Public outer methods such as `reconnectNow()` are called concurrently. But they delegate to a method -of the internal class, that always runs on the same `adminExecutor` thread. `RunOrSchedule.on` calls -the method directly if we're already on the target thread, otherwise it schedules a task. If we need -to propagate a result, the outer method injects a future that the inner method completes. - -`adminExecutor` is picked randomly from `NettyOptions.adminEventExecutorGroup()` at construction -time. - -Confining `SingleThreaded` simplifies the code tremendously: we can use regular, non-volatile -fields, and methods are guaranteed to always run in isolation, eliminating subtle race conditions -(this idea was borrowed from actor systems). - -### Non-blocking - -Whether on the hot or cold path, internal code is almost 100% lock-free. The driver guarantees on -lock-freedom are [detailed](../../../core/non_blocking) in the core manual. - -If an internal component needs to execute a query, it does so asynchronously, and registers -callbacks to process the results. Examples of this can be found in `ReprepareOnUp` and -`DefaultTopologyMonitor` (among others). - -The only place where the driver blocks is when using the synchronous API (methods declared in -[`SyncCqlSession`]), and when calling other synchronous wrapper methods in the public API, for -example, [`ExecutionInfo.getQueryTrace()`]: - -```java -public interface ExecutionInfo { - // some content omitted for brevity - - default QueryTrace getQueryTrace() { - BlockingOperation.checkNotDriverThread(); - return CompletableFutures.getUninterruptibly(getQueryTraceAsync()); - } -} -``` - -When a public API method is blocking, this is generally clearly stated in its javadocs. - -[`ExecutionInfo.getQueryTrace()`]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/ExecutionInfo.html#getQueryTrace-- -[`SyncCqlSession`]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/SyncCqlSession.html` - -`BlockingOperation` is a utility to check that those methods aren't called on I/O threads, which -could introduce deadlocks. - -Keeping the internals fully asynchronous is another major improvement over driver 3, where internal -requests were synchronous, and required multiple internal executors to avoid deadlocks. - -In driver 4, there are only two executors: `NettyOptions.ioEventLoopGroup()` and -`NettyOptions.adminEventLoopGroup()`, that are guaranteed to never run blocking tasks. They can be -shared with application code, or across multiple sessions, or can even be one and the same (in -theory, it's possible to use a single 1-thread executor, although there's probably no practical -reason to do that). - -To be exhaustive, `NettyOptions.getTimer()` also uses its own thread; we tried scheduling request -timeouts and speculative executions on I/O threads in early alphas, but that didn't perform as well -as Netty's `HashedWheelTimer`. - -So the total number of threads created by a session is -``` -advanced.netty.io-group.size + advanced.netty.admin-group.size + 1 -``` diff --git a/manual/developer/common/context/README.md b/manual/developer/common/context/README.md deleted file mode 100644 index e20d5ad0ddb..00000000000 --- a/manual/developer/common/context/README.md +++ /dev/null @@ -1,141 +0,0 @@ - - -## Driver context - -The context holds the driver's internal components. It is exposed in the public API as -`DriverContext`, accessible via `session.getContext()`. Internally, the child interface -`InternalDriverContext` adds access to more components; finally, `DefaultDriverContext` is the -implementing class. - -### The dependency graph - -Most components initialize lazily (see `LazyReference`). They also reference each other, typically -by taking the context as a constructor argument, and extracting the dependencies they need: - -```java -public DefaultTopologyMonitor(InternalDriverContext context) { - ... - this.controlConnection = context.getControlConnection(); -} -``` - -This avoids having to handle the initialization order ourselves. It is also convenient for unit -tests: you can run a component in isolation by mocking all of its dependencies. - -Obviously, things won't go well if there are cyclic dependencies; if you make changes to the -context, you can set a system property to check the dependency graph, it will throw if a cycle is -detected (see `CycleDetector`): - -``` --Dcom.datastax.oss.driver.DETECT_CYCLES=true -``` - -This is disabled by default, because we don't expect it to be very useful outside of testing cycles. - -### Why not use a DI framework? - -As should be clear by now, the context is a poor man's Dependency Injection framework. We -deliberately avoided third-party solutions: - -* to keep things as simple as possible, -* to avoid an additional library dependency, -* to allow end users to access components and add their own (which wouldn't work well with - compile-time approaches like Dagger). - -### Overriding a context component - -The basic approach to plug in a custom internal component is to subclass the context. - -For example, let's say you wrote a custom `NettyOptions` implementation (maybe you have multiple -sessions, and want to reuse the event loop groups instead of recreating them every time): - -```java -public class CustomNettyOptions implements NettyOptions { - ... -} -``` - -In the default context, here's how the component is managed: - -```java -public class DefaultDriverContext { - - // some content omitted for brevity - - private final LazyReference nettyOptionsRef = - new LazyReference<>("nettyOptions", this::buildNettyOptions, cycleDetector); - - protected NettyOptions buildNettyOptions() { - return new DefaultNettyOptions(this); - } - - @NonNull - @Override - public NettyOptions getNettyOptions() { - return nettyOptionsRef.get(); - } -} -``` - -To switch in your implementation, you only need to override the build method: - -```java -public class CustomContext extends DefaultDriverContext { - - public CustomContext(DriverConfigLoader configLoader, ProgrammaticArguments programmaticArguments) { - super(configLoader, programmaticArguments); - } - - @Override - protected NettyOptions buildNettyOptions() { - return new CustomNettyOptions(this); - } -} -``` - -Then you need a way to create a session that uses your custom context. The session builder is -extensible as well: - -```java -public class CustomBuilder extends SessionBuilder { - - @Override - protected DriverContext buildContext( - DriverConfigLoader configLoader, ProgrammaticArguments programmaticArguments) { - return new CustomContext(configLoader, programmaticArguments); - } - - @Override - protected CqlSession wrap(@NonNull CqlSession defaultSession) { - // Nothing to do here, nothing changes on the session type - return defaultSession; - } -} -``` - -Finally, you can use your custom builder like the regular `CqlSession.builder()`, it inherits all -the methods: - -```java -CqlSession session = new CustomBuilder() - .addContactPoint(new InetSocketAddress("1.2.3.4", 9042)) - .withLocalDatacenter("datacenter1") - .build(); -``` diff --git a/manual/developer/common/event_bus/README.md b/manual/developer/common/event_bus/README.md deleted file mode 100644 index 74729ac6656..00000000000 --- a/manual/developer/common/event_bus/README.md +++ /dev/null @@ -1,62 +0,0 @@ - - -## Event bus - -`EventBus` is a bare-bones messaging mechanism, to decouple components from each other, and -broadcast messages to more than one component at a time. - -Producers fire events on the bus; consumers register to be notified for a particular event class. -For example, `DefaultDriverConfigLoader` reloads the config periodically, and fires an event if it -detects a change: - -```java -boolean changed = driverConfig.reload(configSupplier.get()); -if (changed) { - LOG.info("[{}] Detected a configuration change", logPrefix); - eventBus.fire(ConfigChangeEvent.INSTANCE); -} -``` - -This allows other components, such as `ChannelPool`, to react to config changes dynamically: - -```java -eventBus.register( - ConfigChangeEvent.class, RunOrSchedule.on(adminExecutor, this::onConfigChanged)); - -private void onConfigChanged(ConfigChangeEvent event) { - assert adminExecutor.inEventLoop(); - // resize re-reads the pool size from the configuration and does nothing if it hasn't changed, - // which is exactly what we want. - resize(distance); -} -``` - -For simplicity, the implementation makes the following assumptions: - -* events are propagated synchronously: if their processing needs to be delayed or rescheduled to - another thread, it's the consumer's responsibility (see how the pool uses `RunOrSchedule` in the - example above); -* callbacks are not polymorphic: you must register for the exact event class. For example, if you - have `eventBus.register(B.class, callback)` and fire an `A extends B`, the callback won't catch - it (internally, this allows direct lookups instead of traversing all registered callbacks with an - `instanceof` check). - -Those choices have been good enough for the needs of the driver. That's why we use a custom -implementation rather than something more sophisticated like Guava's event bus. diff --git a/manual/developer/native_protocol/README.md b/manual/developer/native_protocol/README.md deleted file mode 100644 index b96553fc51b..00000000000 --- a/manual/developer/native_protocol/README.md +++ /dev/null @@ -1,197 +0,0 @@ - - -## Native protocol layer - -The native protocol layer encodes protocol messages into binary, before they are sent over the -network. - -This part of the code lives in its own project: -[native-protocol](https://github.com/datastax/native-protocol). We extracted it to make it reusable -([Simulacron](https://github.com/datastax/simulacron) also uses it). - -The protocol specifications are available in -[native-protocol/src/main/resources](https://github.com/datastax/native-protocol/tree/1.x/src/main/resources). -These files originally come from Cassandra, we copy them over for easy access. Authoritative specifications can -always be found in [cassandra/doc](https://github.com/apache/cassandra/tree/trunk/doc). - - -For a broad overview of how protocol types are used in the driver, let's step through an example: - -* the user calls `session.execute()` with a `SimpleStatement`. The protocol message for a - non-prepared request is `QUERY`; -* `CqlRequestHandler` uses `Conversions.toMessage` to convert the statement into a - `c.d.o.protocol.internal.request.Query`; -* `InflightHandler.write` assigns a stream id to that message, and wraps it into a - `c.d.o.protocol.internal.Frame`; -* `FrameEncoder` uses `c.d.o.protocol.internal.FrameCodec` to convert the frame to binary. - -(All types prefixed with `c.d.o.protocol.internal` belong to the native-protocol project.) - -A similar process happens on the response path: decode the incoming binary payload into a protocol -message, then convert the message into higher-level driver objects: `ResultSet`, `ExecutionInfo`, -etc. - -### Native protocol types - -#### Messages - -Every protocol message is identified by an opcode, and has a corresponding `Message` subclass. - -A `Frame` wraps a message to add metadata, such as the protocol version and stream id. - -```ditaa -+-------+ contains +------------+ -| Frame +--------->+ Message + -+-------+ +------------+ - | int opcode | - +--+---------+ - | - | +---------+ - +----+ Query | - | +---------+ - | - | +---------+ - +----+ Execute | - | +---------+ - | - | +---------+ - +----+ Rows | - +---------+ - - etc. -``` - -All value classes are immutable, but for efficiency they don't make defensive copies of their -fields. If these fields are mutable (for example collections), they shouldn't be modified after -creating a message instance. - -The code makes very few assumptions about how the messages will be used. Data is often represented -in the most simple way. For example, `ProtocolConstants` uses simple integer constants to represent -protocol codes (enums wouldn't work at that level, because we need to add new codes in the DSE -driver); the driver generally rewraps them in more type-safe structures before exposing them to -higher-level layers. - -#### Encoding/decoding - -For every message, there is a corresponding `Message.Codec` for encoding and decoding. A -`FrameCodec` relies on a set of message codecs, for one or more protocol versions. Given an incoming -frame, it looks up the right message codec to use, based on the protocol version and opcode. -Optionally, it compresses frame bodies with a `Compressor`. - - -```ditaa -+-----------------+ +-------------------+ -| FrameCodec[B] +----------------+ PrimitiveCodec[B] | -+-----------------+ +-------------------+ -| B encode(Frame) | -| Frame decode(B) +-------+ +---------------+ -+------+----------+ +--------+ Compressor[B] | - | +---------------+ - | - | +-------------------+ - +---------------------------+ Message.Codec | - 1 codec per opcode +-------------------+ - and protocol version | B encode(Message) | - | Message decode(B) | - +-------------------+ -``` - -Most of the time, you'll want to use the full set of message codecs for a given protocol version. -`CodecGroup` provides a convenient way to register multiple codecs at once. The project provides -default implementations for all supported protocol version, both for clients like the driver (e.g. -encode `QUERY`, decode `RESULT`), or servers like Simulacron (decode `QUERY` encode `RESULT`). - - -```ditaa -+-------------+ -| CodecGroup | -+------+------+ - | - | +------------------------+ - +----+ ProtocolV3ClientCodecs | - | +------------------------+ - | - | +------------------------+ - +----+ ProtocolV3ServerCodecs | - | +------------------------+ - | - | +------------------------+ - +----+ ProtocolV4ClientCodecs | - | +------------------------+ - | - | +------------------------+ - +----+ ProtocolV4ClientCodecs | - | +------------------------+ - | - | +------------------------+ - +----+ ProtocolV5ClientCodecs | - | +------------------------+ - | - | +------------------------+ - +----+ ProtocolV5ClientCodecs | - +------------------------+ -``` - -The native protocol layer is agnostic to the actual binary representation. In the driver, this -happens to be a Netty `ByteBuf`, but the encoding logic doesn't need to be aware of that. This is -expressed by the type parameter `B` in `FrameCodec`. `PrimitiveCodec` abstracts the basic -primitives to work with a `B`: how to create an instance, read and write data to it, etc. - -```java -public interface PrimitiveCodec { - B allocate(int size); - int readInt(B source); - void writeInt(int i, B dest); - ... -} -``` - -Everything else builds upon those primitives. By just switching the `PrimitiveCodec` implementation, -the whole protocol layer could be reused with a different type, such as `byte[]`. - -In summary, to initialize a `FrameCodec`, you need: - -* a `PrimitiveCodec`; -* a `Compressor` (optional); -* one or more `CodecGroup`s. - -### Integration in the driver - -The driver initializes its `FrameCodec` in `DefaultDriverContext.buildFrameCodec()`. - -* the primitive codec is `ByteBufPrimitiveCodec`, which implements the basic primitives for Netty's - `ByteBuf`; -* the compressor comes from `DefaultDriverContext.buildCompressor()`, which determines the - implementation from the configuration; -* it is built with `FrameCodec.defaultClient`, which is a shortcut to use the default client groups: - `ProtocolV3ClientCodecs`, `ProtocolV4ClientCodecs` and `ProtocolV5ClientCodecs`. - -### Extension points - -The default frame codec can be replaced by [extending the -context](../common/context/#overriding-a-context-component) to override `buildFrameCodec`. This -can be used to add or remove a protocol version, or replace a particular codec. - -If protocol versions change, `ProtocolVersionRegistry` will likely be affected as well. - -Also, depending on the nature of the protocol changes, the driver's [request -processors](../request_execution/#request-processors) might require some adjustments: either replace -them, or introduce separate ones (possibly with new `executeXxx()` methods on a custom session -interface). diff --git a/manual/developer/netty_pipeline/README.md b/manual/developer/netty_pipeline/README.md deleted file mode 100644 index b596832e202..00000000000 --- a/manual/developer/netty_pipeline/README.md +++ /dev/null @@ -1,180 +0,0 @@ - - -## Netty pipeline - -With the [protocol layer](../native_protocol) in place, the next step is to build the logic for a -single server connection. - -We use [Netty](https://netty.io/) for network I/O (to learn more about Netty, [this -book](https://www.manning.com/books/netty-in-action) is an excellent resource). - -```ditaa - +----------------+ - | ChannelFactory | - +----------------+ - | connect() | - +-------+--------+ - | Application - |creates +----------------------------------------------+ - V | Outgoing | - +-------+--------+ | | +---------------------+ ^ | - | DriverChannel | | | | ProtocolInitHandler | | | - +-------+--------+ | | +---------------------+ | | - | | | | | - +-------+--------+ | | +---------------------+ | | - | Channel | | | | InFlightHandler | | | - | (Netty) | | | +---------------------+ | | - +-------+--------+ | | | | - | | | +---------------------+ | | - +-------+--------+ | | | Heartbeathandler | | | - |ChannelPipeline +---+ | +---------------------+ | | - | (Netty) | | | | | - +----------------+ | | +--------------+ +--------------+ | | - | | | FrameEncoder | | FrameDecoder | | | - | | +--------------+ +--------------+ | | - | | | | - | | +---------------------+ | | - | | | SslHandler | | | - | | | (Netty) | | | - | V +---------------------+ | | - | Incoming | - +----------------------------------------------+ - Network -``` - -Each Cassandra connection is based on a Netty `Channel`. We wrap it into our own `DriverChannel`, -that exposes higher-level operations. `ChannelFactory` is the entry point for other driver -components; it handles protocol negotiation for the first channel. - -A Netty channel has a *pipeline*, that contains a sequence of *handlers*. As a request is sent, it -goes through the pipeline top to bottom; each successive handler processes the input, and passes the -result to the next handler. Incoming responses go the other way. - -Our pipeline is configured with the following handlers: - -### SslHandler - -The implementation is provided by Netty (all the others handlers are custom implementations). - -Internally, handler instances are provided by `SslHandlerFactory`. At the user-facing level, this is -abstracted behind `SslEngineFactory`, based on Java's default SSL implementation. - -See also the [Extension points](#extension-points) section below. - -### FrameEncoder and FrameDecoder - -This is where we integrate the protocol layer, as explained -[here](../native_protocol/#integration-in-the-driver). - -Unlike the other pipeline stages, we use separate handlers for incoming and outgoing messages. - -### HeartbeatHandler - -The heartbeat is a background request sent on inactive connections (no reads since x seconds), to -make sure that they are still alive, and prevent them from being dropped by a firewall. This is -similar to TCP_KeepAlive, but we provide an application-side alternative because users don't always -have full control over their network configuration. - -`HeartbeatHandler` is based on Netty's built-in `IdleStateHandler`, so there's not much in there -apart from the details of the control request. - -### InFlightHandler - -This handler is where most of the connection logic resides. It is responsible for: - -* writing regular requests: - * find an available stream id; - * store the `ResponseCallback` provided by the client under that id; - * when the response comes in, retrieve the callback and complete it; -* cancelling a request; -* switching the connection to a new keyspace (if a USE statement was executed through the session); -* handling shutdown: gracefully (allow all request to complete), or forcefully (error out all - requests). - -The two most important methods are: - -* `write(ChannelHandlerContext, Object, ChannelPromise)`: processes outgoing messages. We accept - different types of messages, because cancellation and shutdown also use that path. See - `DriverChannel`, which abstracts those details. -* `channelRead`: processes incoming responses. - -Netty handlers are confined to the channel's event loop (a.k.a I/O thread). Therefore the code -doesn't have to be concurrent, fields can be non-volatile and methods are guaranteed not to race -with each other. - -In particular, a big difference from driver 3 is that stream ids are assigned within the event loop, -instead of from client code before writing to the channel (see also [connection -pooling](../request_execution/#connection_pooling)). `StreamIdGenerator` is not thread-safe. - -All communication between the handler and the outside world must be done through messages or channel -events. There are 3 exceptions to this rule: `getAvailableIds`, `getInflight` and `getOrphanIds`, -which are based on volatile fields. They are all used for metrics, and `getAvailableIds` is also -used to balance the load over connections to the same node (see `ChannelSet`). - -### ProtocolInitHandler - -This handler manages the protocol initialization sequence on a newly established connection (see the -`STARTUP` message in the protocol specification). - -Most of the logic resides in `InitRequest.onResponse`, which acts as a simple state machine based on -the last request sent. - -There is also a bit of custom code to ensure that the channel is not made available to clients -before the protocol is ready. This is abstracted in the parent class `ConnectInitHandler`. - -Once the initialization is complete, `ProtocolInitHandler` removes itself from the pipeline. - -### Extension points - -#### NettyOptions - -The `advanced.netty` section in the [configuration](../../core/configuration/reference/) exposes a -few high-level options. - -For more elaborate customizations, you can [extend the -context](../common/context/#overriding-a-context-component) to plug in a custom `NettyOptions` -implementation. This allows you to do things such as: - -* reusing existing event loops; -* using Netty's [native Epoll transport](https://netty.io/wiki/native-transports.html); -* adding custom handlers to the pipeline. - -#### SslHandlerFactory - -The [user-facing API](../../core/ssl/) (`advanced.ssl-engine-factory` in the configuration, or -`SessionBuilder.withSslContext` / `SessionBuilder.withSslEngineFactory`) only supports Java's -default SSL implementation. - -The driver can also work with Netty's [native -integration](https://netty.io/wiki/requirements-for-4.x.html#tls-with-openssl) with OpenSSL or -boringssl. This requires a bit of custom development against the internal API: - -* add a dependency to one of the `netty-tcnative` artifacts, following [these - instructions](http://netty.io/wiki/forked-tomcat-native.html); -* implement `SslHandlerFactory`. Typically: - * the constructor will create a Netty [SslContext] with [SslContextBuilder.forClient], and store - it in a field; - * `newSslHandler` will delegate to one of the [SslContext.newHandler] methods; -* [extend the context](../common/context/#overriding-a-context-component) and override - `buildSslHandlerFactory` to plug your custom implementation. - -[SslContext]: https://netty.io/4.1/api/io/netty/handler/ssl/SslContext.html -[SslContext.newHandler]: https://netty.io/4.1/api/io/netty/handler/ssl/SslContext.html#newHandler-io.netty.buffer.ByteBufAllocator- -[SslContextBuilder.forClient]: https://netty.io/4.1/api/io/netty/handler/ssl/SslContextBuilder.html#forClient-- diff --git a/manual/developer/request_execution/README.md b/manual/developer/request_execution/README.md deleted file mode 100644 index 38a0a55fbd7..00000000000 --- a/manual/developer/request_execution/README.md +++ /dev/null @@ -1,342 +0,0 @@ - - -## Request execution - -The [Netty pipeline](../netty_pipeline/) gives us the ability to send low-level protocol messages on -a single connection. - -The request execution layer builds upon that to: - -* manage multiple connections (many nodes, possibly many connections per node); -* abstract the protocol layer behind higher-level, user-facing types. - -The session is the main entry point. `CqlSession` is the type that users will most likely reference -in their applications. It extends a more generic `Session` type, for the sake of extensibility; this -will be explained in [Request processors](#request-processors). - - -```ditaa -+----------------------------------+ -| Session | -+----------------------------------+ -| ResultT execute( | -| RequestT, GenericType[ResultT])| -+----------------------------------+ - ^ - | -+----------------+-----------------+ -| CqlSession | -+----------------------------------+ -| ResultSet execute(Statement) | -+----------------+-----------------+ - ^ - | -+----------------+-----------------+ -| DefaultSession | -+----------------+-----------------+ - | - | - | 1 per node +-------------+ - +------------+ ChannelPool | - | +----+--------+ - | | - | | n +---------------+ - | +----+ DriverChannel | - | +---------------+ - | - | 1 +--------------------------+ - +------------+ RequestProcessorRegistry | - +----+---------------------+ - | - | n +---------------------------+ - +----+ RequestProcessor | - +---------------------------+ - | ResultT process(RequestT) | - +---------------------------+ -``` - -`DefaultSession` contains the session implementation. It follows the [confined inner -class](../common/concurrency/#cold-path) pattern to simplify concurrency. - -### Connection pooling - -```ditaa -+----------------------+ 1 +------------+ -| ChannelPool +---------+ ChannelSet | -+----------------------+ +-----+------+ -| DriverChannel next() | | -+----------+-----------+ n| - | +------+--------+ - 1| | DriverChannel | - +------+-------+ +---------------+ - | Reconnection | - +--------------+ -``` - -`ChannelPool` handles the connections to a given node, for a given session. It follows the [confined -inner class](../common/concurrency/#cold-path) pattern to simplify concurrency. There are a few -differences compared to the 3.x implementation: - -#### Fixed size - -The pool has a fixed number of connections, it doesn't grow or shrink dynamically based on current -usage. In other words, there is no more "max" size, only a "core" size. - -However, this size is specified in the configuration. If the value is changed at runtime, the driver -will detect it, and trigger a resize of all active pools. - -The rationale for removing the dynamic behavior is that it introduced a ton of complexity in the -implementation and configuration, for unclear benefits: if the load fluctuates very rapidly, then -you need to provision for the max size anyway, so you might as well run with all the connections all -the time. If on the other hand the fluctuations are rare and predictable (e.g. peak for holiday -sales), then a manual configuration change is good enough. - -#### No queuing - -To get a connection to a node, client code calls `ChannelPool.next()`. This returns the less busy -connection, based on the the `getAvailableIds()` counter exposed by -[InFlightHandler](netty_pipeline/#in-flight-handler). - -If all connections are busy, there is no queuing; the driver moves to the next node immediately. The -rationale is that it's better to try another node that might be ready to reply, instead of -introducing an additional wait for each node. If the user wants queuing when all nodes are busy, -it's better to do it at the session level with a [throttler](../../core/throttling/), which provides -more intuitive configuration. - -Before 4.5.0, there was also no preemptive acquisition of the stream id outside of the event loop: -`getAvailableIds()` had volatile semantics, and a client could get a pooled connection that seemed -not busy, but fail to acquire a stream id when it later tried the actual write. This turned out to -not work well under high load, see [JAVA-2644](https://datastax-oss.atlassian.net/browse/JAVA-2644). - -Starting with 4.5.0, we've reintroduced a stronger guarantee (reminiscent of how things worked in -3.x): clients **must call `DriverChannel.preAcquireId()` exactly once before each write**. If the -call succeeds, `getAvailableIds()` is incremented immediately, and the client is guaranteed that -there will be a stream id available for the write. `preAcquireId()` and `getAvailableIds()` have -atomic semantics, so we can distribute the load more accurately. - -This comes at the cost of additional complexity: **we must ensure that every write is pre-acquired -first**, so that `getAvailableIds()` doesn't get out of sync with the actual stream id usage inside -`InFlightHandler`. This is explained in detail in the javadocs of `DriverChannel.preAcquireId()`, -read them carefully. - -The pool manages its channels with `ChannelSet`, a simple copy-on-write data structure. - -#### Built-in reconnection - -The pool has its own independent reconnection mechanism (based on the `Reconnection` utility class). -The goal is to keep the pool at its expected capacity: whenever a connection is lost, the task -starts and will try to reopen the missing connections at regular intervals. - -### Request processors - -```ditaa -+----------------------------------+ -| Session | -+----------------------------------+ -| ResultT execute( | -| RequestT, GenericType[ResultT])| -+----------------------------------+ - ^ - | -+----------------+-----------------+ -| CqlSession | -+----------------------------------+ -| ResultSet execute(Statement) | -+----------------+-----------------+ -``` - -The driver can execute different types of requests, in different ways. This is abstracted by the -top-level `Session` interface, with a very generic execution method: - -```java - ResultT execute( - RequestT request, GenericType resultType); -``` - -It takes a request, and a type token that serves as a hint at the expected result. Each `(RequestT, -ResultT)` combination defines an execution model, for example: - -| `RequestT` | `ResultT` | Execution | -| --- | --- | ---| -| `Statement` | `ResultSet` | CQL, synchronous | -| `Statement` | `CompletionStage` | CQL, asynchronous | -| `Statement` | `ReactiveResultSet` | CQL, reactive | -| `GraphStatement` | `GraphResultSet` | DSE Graph, synchronous | -| `GraphStatement` | `CompletionStage` | DSE Graph, asynchronous | - -In general, regular client code doesn't use `Session.execute` directly. Instead, child interfaces -expose more user-friendly shortcuts for a given result type: - -```java -public interface CqlSession extends Session { - default ResultSet execute(Statement statement) { - return execute(statement, Statement.SYNC); - } -} -``` - -The logic for each execution model is encapsulated in a `RequestProcessor`. -Processors are stored in a `RequestProcessorRegistry`. For each request, the session invokes the -registry to find the processor that matches the request and result types. - -```ditaa -+----------------+ 1+-----------------------------------+ -| DefaultSession +---+ RequestProcessorRegistry | -+----------------+ +-----------------------------------+ - | processorFor( | - | RequestT, GenericType[ResultT]) | - +-----------------+-----------------+ - | - |n - +----------------------+----------------------+ - | RequestProcessor[RequestT, ResultT] | - +---------------------------------------------+ - | boolean canProcess(Request, GenericType[?]) | - | ResultT process(RequestT) | - +---------------------------------------------+ - ^ - | +--------------------------+ - +---------+ CqlRequestSyncProcessor | - | +--------------------------+ - | - | +--------------------------+ - +---------+ CqlRequestAsyncProcessor | - | +--------------------------+ - | - | +--------------------------+ - +---------+ CqlPrepareSyncProcessor | - | +--------------------------+ - | - | +--------------------------+ - +---------+ CqlPrepareAsyncProcessor | - +--------------------------+ -``` - -A processor is responsible for: - -* converting the user request into [protocol-level messages](../native_protocol/); -* selecting a coordinator node, and obtaining a channel from its connection pool; -* writing the request to the channel; -* handling timeouts, retries and speculative executions; -* translating the response into user-level types. - -The `RequestProcessor` interface makes very few assumptions about the actual processing; but in -general, implementations create a handler for the lifecycle of every request. For example, -`CqlRequestHandler` is the central component for basic CQL execution. - -Processors can be implemented in terms of other processors. In particular, this is the case for -synchronous execution models, which are just a blocking wrapper around their asynchronous -counterpart. You can observe this in `CqlRequestSyncProcessor`. - -Note that preparing a statement is treated as just another execution model. It has its own -processors, that operate on a special `PrepareRequest` type: - -```java -public interface CqlSession extends Session { - default PreparedStatement prepare(SimpleStatement statement) { - return execute(new DefaultPrepareRequest(statement), PrepareRequest.SYNC); - } -} -``` - -### Extension points - -#### RequestProcessorRegistry - -You can customize the set of request processors by [extending the -context](../common/context/#overriding-a-context-component) and overriding -`buildRequestProcessorRegistry`. - -This can be used to either: - -* add your own execution models (new request types and/or return types); -* remove existing ones; -* or a combination of both. - -The driver codebase contains an integration test that provides a complete example: -[RequestProcessorIT]. It shows how you can build a session that returns Guava's `ListenableFuture` -instead of Java's `CompletionStage` (existing request type, different return type). - -[GuavaDriverContext] is the custom context subclass. It plugs a custom registry that wraps the -default async processors with [GuavaRequestAsyncProcessor], to transform the returned futures. - -Note that the default async processors are not present in the registry anymore; if you try to call -a method that returns a `CompletionStage`, it fails. See the next section for how to hide those -methods. - -#### Exposing a custom session interface - -If you add or remove execution models, you probably want to expose a session interface that matches -the underlying capabilities of the implementation. - -For example, in the [RequestProcessorIT] example mentioned in the previous section, we remove the -ability to return `CompletionStage`, but add the ability to return `ListenableFuture`. Therefore we -expose a custom [GuavaSession] with a different return type for async methods: - -```java -public interface GuavaSession extends Session { - default ListenableFuture executeAsync(Statement statement) { ... } - default ListenableFuture prepareAsync(SimpleStatement statement) { ... } -} -``` - -We need an implementation of this interface. Our new methods all have default implementations in -term of the abstract `Session.execute()`, so the only thing we need is to delegate to an existing -`Session`. The driver provides `SessionWrapper` to that effect. See [DefaultGuavaSession]: - -```java -public class DefaultGuavaSession extends SessionWrapper implements GuavaSession { - public DefaultGuavaSession(Session delegate) { - super(delegate); - } -} -``` - -Finally, we want to create an instance of this wrapper. Since we extended the context (see previous -section), we already wrote a custom builder subclass; there is another protected method we can -override to plug our wrapper. See [GuavaSessionBuilder]: - -```java -public class GuavaSessionBuilder extends SessionBuilder { - - @Override - protected DriverContext buildContext( ... ) { ... } - - @Override - protected GuavaSession wrap(CqlSession defaultSession) { - return new DefaultGuavaSession(defaultSession); - } -``` - -Client code can now use the familiar pattern to create a session: - -```java -GuavaSession session = new GuavaSessionBuilder() - .addContactEndPoints(...) - .withKeyspace("test") - .build(); -``` - -[RequestProcessorIT]: https://github.com/datastax/java-driver/blob/4.x/integration-tests/src/test/java/com/datastax/oss/driver/core/session/RequestProcessorIT.java -[GuavaDriverContext]: https://github.com/datastax/java-driver/blob/4.x/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/internal/GuavaDriverContext.java -[GuavaRequestAsyncProcessor]: https://github.com/datastax/java-driver/blob/4.x/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/internal/GuavaRequestAsyncProcessor.java -[GuavaSession]: https://github.com/datastax/java-driver/blob/4.x/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/api/GuavaSession.java -[DefaultGuavaSession]: https://github.com/datastax/java-driver/blob/4.x/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/internal/DefaultGuavaSession.java -[GuavaSessionBuilder]: https://github.com/datastax/java-driver/blob/4.x/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/api/GuavaSessionBuilder.java diff --git a/manual/mapper/.nav b/manual/mapper/.nav deleted file mode 100644 index 7bfdb6c0c8e..00000000000 --- a/manual/mapper/.nav +++ /dev/null @@ -1,5 +0,0 @@ -entities -daos -mapper -config -custom_types \ No newline at end of file diff --git a/manual/mapper/README.md b/manual/mapper/README.md deleted file mode 100644 index 27005b671ad..00000000000 --- a/manual/mapper/README.md +++ /dev/null @@ -1,173 +0,0 @@ - - -## Mapper - -The mapper generates the boilerplate to execute queries and convert the results into -application-level objects. - -It is published as two artifacts: `org.apache.cassandra:java-driver-mapper-processor` and -`org.apache.cassandra:java-driver-mapper-runtime`. See [Integration](config/) for detailed instructions -for different build tools. - -### Quick start - -For a quick overview of mapper features, we are going to build a trivial example based on the -following schema: - -``` -CREATE KEYSPACE inventory -WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}; - -CREATE TABLE inventory.product(id uuid PRIMARY KEY, description text); -``` - -#### Entity class - -This is a simple data container that will represent a row in the `product` table: - -```java -import com.datastax.oss.driver.api.mapper.annotations.Entity; -import com.datastax.oss.driver.api.mapper.annotations.PartitionKey; - -@Entity -public class Product { - - @PartitionKey private UUID id; - private String description; - - public Product() {} - - public Product(UUID id, String description) { - this.id = id; - this.description = description; - } - - public UUID getId() { return id; } - - public void setId(UUID id) { this.id = id; } - - public String getDescription() { return description; } - - public void setDescription(String description) { this.description = description; } -} -``` - -Entity classes must have a no-arg constructor; note that, because we also have a constructor that -takes all the fields, we have to define the no-arg constructor explicitly. - -We use mapper annotations to mark the class as an entity, and indicate which field(s) correspond to -the primary key. - -More annotations are available; for more details, see [Entities](entities/). - -#### DAO interface - -A DAO defines a set of query methods: - -```java -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.Delete; -import com.datastax.oss.driver.api.mapper.annotations.Insert; -import com.datastax.oss.driver.api.mapper.annotations.Select; - -@Dao -public interface ProductDao { - @Select - Product findById(UUID productId); - - @Insert - void save(Product product); - - @Delete - void delete(Product product); -} -``` - -Again, mapper annotations are used to mark the interface, and indicate what kind of request each -method should execute. You can probably guess what they are in this example. - -For the full list of available query types, see [DAOs](daos/). - -#### Mapper interface - -This is the top-level entry point to mapper features, that allows you to obtain DAO instances: - -```java -import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; -import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; -import com.datastax.oss.driver.api.mapper.annotations.Mapper; - -@Mapper -public interface InventoryMapper { - @DaoFactory - ProductDao productDao(@DaoKeyspace CqlIdentifier keyspace); -} -``` - -For more details, see [Mapper](mapper/). - -#### Generating the code - -The mapper uses *annotation processing*: it hooks into the Java compiler to analyze annotations, and -generate additional classes that implement the mapping logic. Annotation processing is a common -technique in modern frameworks, and is generally well supported by build tools and IDEs; this is -covered in detail in [Configuring the annotation processor](config/). - -Pay attention to the compiler output: the mapper processor will sometimes generate warnings if -annotations are used incorrectly. - -#### Using the generated code - -One of the classes generated during annotation processing is `InventoryMapperBuilder`. It allows you -to initialize a mapper instance by wrapping a core driver session: - -```java -CqlSession session = CqlSession.builder().build(); -InventoryMapper inventoryMapper = new InventoryMapperBuilder(session).build(); -``` - -The mapper should have the same lifecycle as the session in your application: created once at -initialization time, then reused. It is thread-safe. - -From the mapper, you can then obtain a DAO instance and execute queries: - -```java -ProductDao dao = inventoryMapper.productDao(CqlIdentifier.fromCql("inventory")); -dao.save(new Product(UUID.randomUUID(), "Mechanical keyboard")); -``` - -### Logging - -The code generated by the mapper includes logs. They are issued with SLF4J, and can be configured -the same way as the [core driver logs](../core/logging/). - -They can help you figure out which queries the mapper is generating under the hood, for example: - -``` -DEBUG ProductDaoImpl__MapperGenerated - [s0] Initializing new instance for keyspace = ks_0 and table = null -DEBUG ProductHelper__MapperGenerated - [s0] Entity Product will be mapped to ks_0.product -DEBUG ProductDaoImpl__MapperGenerated - [s0] Preparing query - `SELECT id,description,dimensions FROM ks_0.product WHERE id=:id` - for method findById(java.util.UUID) -``` - -You can decide which logs to enable using the standard SLF4J mechanisms (categories and levels). In -addition, if you want no logs at all, it's possible to entirely remove them from the generated code -with the Java compiler option `-Acom.datastax.oss.driver.mapper.logs.enabled=false`. diff --git a/manual/mapper/config/README.md b/manual/mapper/config/README.md deleted file mode 100644 index 1e4f9981306..00000000000 --- a/manual/mapper/config/README.md +++ /dev/null @@ -1,134 +0,0 @@ - - -## Integration - -### Builds tools - -The `java-driver-mapper-processor` artifact contains the annotation processor. It hooks into the -Java compiler, and generates additional source files from your annotated classes before the main -compilation happens. It is only required in the compile classpath. - -The `java-driver-mapper-runtime` artifact contains the annotations and a few utility classes. It is -a regular dependency, required at runtime. - -#### Maven - -The best approach is to add the `annotationProcessorPaths` option to the compiler plugin's -configuration (make sure you use version 3.5 or higher): - -```xml - - ... - - - - - org.apache.cassandra - java-driver-mapper-runtime - ${java-driver.version} - - - - - - - maven-compiler-plugin - 3.8.1 - - 1.8 - 1.8 - - - org.apache.cassandra - java-driver-mapper-processor - ${java-driver.version} - - - - org.slf4j - slf4j-nop - 1.7.26 - - - - - - -``` - -Alternatively (e.g. if you are using the [BOM](../../core/bom/)), you may also declare the processor -as a regular dependency in the "provided" scope: - -```xml - - - org.apache.cassandra - java-driver-mapper-processor - ${java-driver.version} - provided - - - org.apache.cassandra - java-driver-mapper-runtime - ${java-driver.version} - - -``` - -The processor runs every time you execute the `mvn compile` phase. It normally supports incremental -builds, but if something looks off you can try a full rebuild with `mvn clean compile`. - -One of the advantages of annotation processing is that the generated code is produced as regular -source files, that you can read and debug like the rest of your application. With the above -configuration, these files are in `target/generated-sources/annotations`. Make sure that -directory is marked as a source folder in your IDE (for example, in IntelliJ IDEA, this might -require right-clicking on your `pom.xml` and selecting "Maven > Reimport"). - -Generated sources follow the same package structure as your annotated types. Most end in a special -`__MapperGenerated` suffix, in order to clearly identify them in stack traces (one exception is the -mapper builder, because it is referenced directly from your code). - -Do not edit those files files directly: your changes would be overwritten during the next full -rebuild. - -#### Gradle - -Use the following configuration (Gradle 4.6 and above): - -```groovy -apply plugin: 'java' - -def javaDriverVersion = '...' - -dependencies { - annotationProcessor group: 'com.datastax.oss', name: 'java-driver-mapper-processor', version: javaDriverVersion - compile group: 'com.datastax.oss', name: 'java-driver-mapper-runtime', version: javaDriverVersion -} -``` - -You will find the generated files in `build/generated/sources/annotationProcessor`. - -### Integration with other languages and libraries - -* [Kotlin](kotlin/) -* [Lombok](lombok/) -* [Java 14 records](record/) -* [Scala](scala/) diff --git a/manual/mapper/config/kotlin/README.md b/manual/mapper/config/kotlin/README.md deleted file mode 100644 index a78bf04fb79..00000000000 --- a/manual/mapper/config/kotlin/README.md +++ /dev/null @@ -1,128 +0,0 @@ - - -## Kotlin - -[Kotlin](https://kotlinlang.org/) is an alternative language for the JVM. Its compact syntax and -native support for annotation processing make it a good fit for the mapper. - -We have a full example at [DataStax-Examples/object-mapper-jvm/kotlin]. - -### Writing the model - -You can use Kotlin [data classes] for your entities. Data classes are usually -[immutable](../../entities/#mutability), but you don't need to declare that explicitly with -[@PropertyStrategy]: the mapper detects that it's processing Kotlin code, and will assume `mutable = -false` by default: - -```kotlin -@Entity -data class Product(@PartitionKey val id: Int?, val description: String?) -``` - -Data classes may also be made mutable (by declaring the components with `var` instead of `val`). If -you choose that approach, you'll have to annotate your entities with [@PropertyStrategy], and also -declare a default value for every component in order to generate a no-arg constructor: - -```kotlin -@Entity -@PropertyStrategy(mutable = true) -data class Product(@PartitionKey var id: Int? = null, var description: String? = null) -``` - -All of the [property annotations](../../entities/#property-annotations) can be declared directly on -the components. - -If you want to take advantage of [null saving strategies](../../daos/null_saving/), your components -should be nullable. - -The other mapper interfaces are direct translations of the Java versions: - -```kotlin -@Dao -interface ProductDao { - @Insert - fun insert(product: Product) -} -``` - -Known limitation: because of a Kotlin bug ([KT-4779]), you can't use default interface methods. They -will appear as abstract methods to the mapper processor, which will generate an error since they are -not properly annotated. As a workaround, you can use a companion object method that takes the DAO as -an argument (as shown in [UserDao.kt]), or query provider methods. - -### Building - -#### Gradle - -See the example's [build.gradle]. - -You enable Kotlin support with [kotlin][gradle_kotlin] and [kotlin_kapt][gradle_kapt], and declare -the mapper processor with the `kapt` directive. - -#### Maven - -Configure [dual compilation][maven_kotlin_java] of Kotlin and Java sources. In addition, you'll need -an additional execution of the [kotlin-maven-plugin:kapt][maven_kapt] goal with the mapper processor -before compilation: - -```xml - - org.jetbrains.kotlin - kotlin-maven-plugin - ${kotlin.version} - - - kapt - kapt - - - src/main/kotlin - src/main/java - - - - org.apache.cassandra - java-driver-mapper-processor - ${java-driver.version} - - - - - - compile - compile - ... - - - -``` - -[maven_kotlin_java]: https://kotlinlang.org/docs/reference/using-maven.html#compiling-kotlin-and-java-sources -[maven_kapt]: https://kotlinlang.org/docs/reference/kapt.html#using-in-maven -[gradle_kotlin]: https://kotlinlang.org/docs/reference/using-gradle.html -[gradle_kapt]: https://kotlinlang.org/docs/reference/kapt.html#using-in-gradle -[data classes]: https://kotlinlang.org/docs/reference/data-classes.html -[KT-4779]: https://youtrack.jetbrains.com/issue/KT-4779 - -[DataStax-Examples/object-mapper-jvm/kotlin]: https://github.com/DataStax-Examples/object-mapper-jvm/tree/master/kotlin -[build.gradle]: https://github.com/DataStax-Examples/object-mapper-jvm/blob/master/kotlin/build.gradle -[UserDao.kt]: https://github.com/DataStax-Examples/object-mapper-jvm/blob/master/kotlin/src/main/kotlin/com/datastax/examples/mapper/killrvideo/user/UserDao.kt - -[@PropertyStrategy]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/PropertyStrategy.html diff --git a/manual/mapper/config/lombok/README.md b/manual/mapper/config/lombok/README.md deleted file mode 100644 index b87f8f79ea4..00000000000 --- a/manual/mapper/config/lombok/README.md +++ /dev/null @@ -1,109 +0,0 @@ - - -## Lombok - -[Lombok](https://projectlombok.org/) is a popular library that automates repetitive code, such as -getters and setters. You can use it in conjunction with the mapper to eliminate even more -boilerplate. - -We have a full example at [DataStax-Examples/object-mapper-jvm/lombok]. - -### Writing the model - -You can either map mutable "data" classes: - -```java -import lombok.Data; -import com.datastax.oss.driver.api.mapper.annotations.*; - -@Data -@Entity -public class Product { - @PartitionKey private int id; - private String description; -} -``` - -Or immutable "value" classes: - -```java -import lombok.Value; -import com.datastax.oss.driver.api.mapper.annotations.*; - -@Value -@Entity -@PropertyStrategy(mutable = false) -public class Product { - @PartitionKey private int id; - private String description; -} -``` - -You can also use Lombok's fluent accessors if you configure the mapper accordingly: - -```java -import lombok.Data; -import lombok.experimental.Accessors; -import com.datastax.oss.driver.api.mapper.annotations.*; -import com.datastax.oss.driver.api.mapper.entity.naming.*; - -@Data -@Accessors(fluent = true) -@Entity -@PropertyStrategy(getterStyle = GetterStyle.FLUENT, setterStyle = SetterStyle.FLUENT) -public static class Product { - @PartitionKey private int id; - private String description; -} -``` - -### Building - -You'll need to configure the Lombok annotation processor in your build. The only requirement is that -it must run *before* the mapper's. - -#### Maven - -See the compiler plugin's configuration in the example's [pom.xml]. - -#### Gradle - -A similar result can be achieved with: - -```groovy -apply plugin: 'java' - -def javaDriverVersion = '...' -def lombokVersion = '...' - -dependencies { - annotationProcessor group: 'org.projectlombok', name: 'lombok', version: lombokVersion - annotationProcessor group: 'com.datastax.oss', name: 'java-driver-mapper-processor', version: javaDriverVersion - compile group: 'com.datastax.oss', name: 'java-driver-mapper-runtime', version: javaDriverVersion - compileOnly group: 'org.projectlombok', name: 'lombok', version: lombokVersion -} -``` - -You'll also need to install a Lombok plugin in your IDE (for IntelliJ IDEA, [this -one](https://plugins.jetbrains.com/plugin/6317-lombok) is available in the marketplace). - - -[DataStax-Examples/object-mapper-jvm/lombok]: https://github.com/DataStax-Examples/object-mapper-jvm/tree/master/lombok -[pom.xml]: https://github.com/DataStax-Examples/object-mapper-jvm/blob/master/lombok/pom.xml diff --git a/manual/mapper/config/record/README.md b/manual/mapper/config/record/README.md deleted file mode 100644 index 95530d52742..00000000000 --- a/manual/mapper/config/record/README.md +++ /dev/null @@ -1,54 +0,0 @@ - - -## Java 14 Records - -Java 14 introduced [Record] as a lightweight, immutable alternative to POJOs. You can map annotated -records as entities. - -We have a full example at [DataStax-Examples/object-mapper-jvm/record]. - -Note: records are a **preview feature** of Java 14. As such the mapper's support for them is also -provided as a preview. - -### Writing the model - -Annotate your records like regular classes: - -```java -@Entity -record Product(@PartitionKey int id, String description) {} -``` - -Records are immutable and use the [fluent getter style](../../entities#getter-style), but you don't -need to declare that explicitly with [@PropertyStrategy]: the mapper detects when it's processing a -record, and will assume `mutable = false, getterStyle = FLUENT` by default. - -### Building - -You need to build with Java 14, and pass the `--enable-preview` flag to both the compiler and the -runtime JVM. See [pom.xml] in the example. - - -[@PropertyStrategy]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/PropertyStrategy.html - -[DataStax-Examples/object-mapper-jvm/record]: https://github.com/DataStax-Examples/object-mapper-jvm/tree/master/record -[pom.xml]: https://github.com/DataStax-Examples/object-mapper-jvm/blob/master/record/pom.xml - -[Record]: https://docs.oracle.com/en/java/javase/14/docs/api/java.base/java/lang/Record.html diff --git a/manual/mapper/config/scala/README.md b/manual/mapper/config/scala/README.md deleted file mode 100644 index 2cb75273d0b..00000000000 --- a/manual/mapper/config/scala/README.md +++ /dev/null @@ -1,76 +0,0 @@ - - -## Scala - -[Scala](https://www.scala-lang.org/) is an alternative language for the JVM. It doesn't support -annotation processing natively, so using it with the mapper is a bit more complicated, but it can be -done. - -We have a full example at [DataStax-Examples/object-mapper-jvm/scala]. - -### Writing the model - -You can use Scala case classes for your entities. Notice the peculiar syntax for field annotations: - -```scala -@Entity -case class UserVideo(@(PartitionKey@field) userid: UUID, - @(ClusteringColumn@field)(0) addedDate: Instant, - @(ClusteringColumn@field)(1) videoid: UUID, - name: String, - previewImageLocation: String) -``` - -Case classes are immutable and use the [fluent getter style](../../entities#getter-style), but you -don't need to declare that explicitly with [@PropertyStrategy]: the mapper detects when it's -processing a case class, and will assume `mutable = false, getterStyle = FLUENT` by default. - -The DAOs and main mapper can be defined as Scala traits, that are direct translations of their Java -equivalents: - -```scala -@Dao -trait UserDao { - @Select - def get(userid: UUID): User -} -``` - -### Building - -Since Scala does not support annotation processing, the mapper processor cannot operate on Scala -sources directly. But it can process the compiled class files output by the Scala compiler. So the -compilation happens in 3 phases: - -1. Compile the Scala sources with the regular sbt task. -2. Execute a custom task that runs the annotation processor (`javac -proc:only ...`) on the compiled - class files. -3. Execute another custom task that compiles the Java sources generated by the mapper. - -See the example's [build.sbt] for the full details. - -Because of that process, the sources fed to the processor cannot reference any generated code. So -the application code needs to be placed in a separate subproject, in order to have access to the -mapper builder. - -[DataStax-Examples/object-mapper-jvm/scala]: https://github.com/DataStax-Examples/object-mapper-jvm/tree/master/scala -[build.sbt]: https://github.com/DataStax-Examples/object-mapper-jvm/blob/master/scala/build.sbt - -[@PropertyStrategy]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/PropertyStrategy.html diff --git a/manual/mapper/daos/.nav b/manual/mapper/daos/.nav deleted file mode 100644 index be60381834f..00000000000 --- a/manual/mapper/daos/.nav +++ /dev/null @@ -1,11 +0,0 @@ -delete -getentity -insert -query -queryprovider -select -setentity -update -increment -null_saving -statement_attributes \ No newline at end of file diff --git a/manual/mapper/daos/README.md b/manual/mapper/daos/README.md deleted file mode 100644 index d12172bf056..00000000000 --- a/manual/mapper/daos/README.md +++ /dev/null @@ -1,174 +0,0 @@ - - -## DAOs - -### Quick overview - -Interface annotated with [@Dao]. - -* interface-level annotations: - * [@DefaultNullSavingStrategy] - * [@HierarchyScanStrategy] -* method-level annotations: query methods (see child pages). -* instantiated from a [@DaoFactory] method on the mapper. - ------ - -A DAO is an interface that defines a set of query methods. In general, those queries will relate to -the same [entity](../entities/) (although that is not a requirement). - -It must be annotated with [@Dao]: - -```java -@Dao -public interface ProductDao { - @Select - Product findById(UUID productId); - - @Insert - void save(Product product); - - @Delete - void delete(Product product); -} -``` - -### Query methods - -To add queries, define methods on your interface and mark them with one of the following -annotations: - -* [@Delete](delete/) -* [@GetEntity](getentity/) -* [@Insert](insert/) -* [@Query](query/) -* [@QueryProvider](queryprovider/) -* [@Select](select/) -* [@SetEntity](setentity/) -* [@Update](update/) -* [@Increment](increment/) - -The methods can have any name. The allowed parameters and return type are specific to each -annotation. - -### Runtime usage - -To obtain a DAO instance, use a [factory method](../mapper/#dao-factory-methods) on the mapper -interface. - -```java -InventoryMapper inventoryMapper = new InventoryMapperBuilder(session).build(); -ProductDao dao = inventoryMapper.productDao("someKeyspace"); -``` - -The returned object is thread-safe, and can securely be shared throughout your application. - -### Inheritance - -DAOs can benefit from inheriting methods from other interfaces. This is useful when you -have a common set of query methods that could be shared between entities. For example, using the -class hierarchy defined in [Entity Inheritance], one may define a set of DAO interfaces in the -following manner: - -```java -interface BaseDao { - @Insert - void save(T t); - - @Select - T findById(UUID id); - - @SetEntity - void bind(T t, BoundStatementBuilder builder); -} - -@Dao -interface CircleDao extends BaseDao {} - -@Dao -interface RectangleDao extends BaseDao {} - -@Dao -interface SphereDao extends BaseDao {} - -@Mapper -public interface ShapeMapper { - @DaoFactory - CircleDao circleDao(@DaoKeyspace CqlIdentifier keyspace); - - @DaoFactory - RectangleDao rectangleDao(@DaoKeyspace CqlIdentifier keyspace); - - @DaoFactory - SphereDao sphereDao(@DaoKeyspace CqlIdentifier keyspace); -} -``` - -Note that interfaces that declare generic type variables should not be annotated with -[@Dao]. - -In addition to inheriting methods from parent interfaces, interface-level annotations such as -[@DefaultNullSavingStrategy] are also inherited: - -```java -import static com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy.SET_TO_NULL; - -@DefaultNullSavingStrategy(SET_TO_NULL) -interface BaseDao { -} - -@Dao -interface RectangleDao extends BaseDao {} -``` - -Annotation priority is driven by proximity to the [@Dao]-annotated interface. For example: - -```java -import static com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy.SET_TO_NULL; -import static com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy.DO_NOT_SET; - -@DefaultNullSavingStrategy(SET_TO_NULL) -interface BaseDao { -} - -@Dao -@DefaultNullSavingStrategy(DO_NOT_SET) -interface RectangleDao extends BaseDao {} -``` - -In this case `@DefaultNullSavingStrategy(DO_NOT_SET)` on `RectangleDao` would override the -annotation on `BaseDao`. - -If two parent interfaces at the same level declare the same annotation, the priority of annotation -chosen is controlled by the order the interfaces are declared, for example: - -```java -interface RectangleDao extends Dao1, Dao2 {} -``` - -In this case, any annotations declared in `Dao1` would be chosen over `Dao2`. - -To control how the hierarchy is scanned, annotate interfaces with [@HierarchyScanStrategy]. - -[@Dao]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/Dao.html -[@DaoFactory]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/DaoFactory.html -[@DefaultNullSavingStrategy]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/DefaultNullSavingStrategy.html -[@HierarchyScanStrategy]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/HierarchyScanStrategy.html -[Entity Inheritance]: ../entities/#inheritance diff --git a/manual/mapper/daos/custom_types/README.md b/manual/mapper/daos/custom_types/README.md deleted file mode 100644 index 19f689655a7..00000000000 --- a/manual/mapper/daos/custom_types/README.md +++ /dev/null @@ -1,262 +0,0 @@ - - -## Custom result types - -The mapper supports a pre-defined set of built-in types for DAO method results. For example, a -[Select](../select/#return-type) method can return a single entity, an asynchronous -`CompletionStage`, a `ReactiveResultSet`, etc. - -Sometimes it's convenient to use your own types. For example if you use a specific Reactive Streams -implementation (RxJava, Reactor, Mutiny...), you probably want your DAOs to return those types -directly, instead of having to wrap every call manually. - -To achieve this, the mapper allows you to plug custom logic that will get invoked when an unknown -type is encountered. - -In the rest of this page, we'll show a simple example that replaces Java's `CompletableFuture` with -Guava's `ListenableFuture`. Our goal is to have the mapper implement this interface: - -```java -import com.google.common.util.concurrent.ListenableFuture; - -@Dao -public interface ProductDao { - @Select - ListenableFuture select(UUID id); - - @Update - ListenableFuture update(Product entity); - - @Insert - ListenableFuture insert(Product entity); - - @Delete - ListenableFuture delete(Product entity); -} -``` - -### Writing the producers - -The basic component that encapsulates conversion logic is [MapperResultProducer]. Our DAO has two -different return types: `ListenableFuture` and `ListenableFuture`. So we're going to -write two producers: - -#### Future of void - -```java -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.SettableFuture; - -public class FutureOfVoidProducer implements MapperResultProducer { - private static final GenericType> PRODUCED_TYPE = - new GenericType>() {}; - - @Override - public boolean canProduce(GenericType resultType) { - return resultType.equals(PRODUCED_TYPE); // (1) - } - - @Override - public ListenableFuture execute( - Statement statement, MapperContext context, EntityHelper entityHelper) { - CqlSession session = context.getSession(); // (2) - SettableFuture result = SettableFuture.create(); // (3) - session.executeAsync(statement).whenComplete( - (resultSet, error) -> { - if (error != null) { - result.setException(error); - } else { - result.set(null); - }}); - return result; - } - - @Override - public ListenableFuture wrapError(Exception error) { - return Futures.immediateFailedFuture(error); // (4) - } -} -``` - -All the producer methods will be invoked at runtime, by the mapper-generated DAO implementation: - -1. `canProduce()` is used to select a producer. All registered producers are tried in the order that - they were added, the first one that returns `true` is used. The [GenericType] argument is a - runtime representation of the static type. Here we know exactly the type we're looking for: - `ListenableFuture`. So we can use simple equality. -2. `execute()` is invoked once the statement is ready to be sent. Note that the producer is not only - responsible for converting the result, but also for invoking the appropriate execution method: to - this effect, it receives the [MapperContext], which provides access to the session. The - `entityHelper` argument is not used in this implementation (and in fact it happens to be `null`); - see the next producer for more explanations. -3. We execute the statement asynchronously to obtain a `CompletionStage`, and then convert it into a - `ListenableFuture`. -4. `wrapError()` handles any error thrown throughout the process (either while building the - statement, or while invoking `execute()` in this class). Clients of asynchronous APIs generally - expect to deal with exceptions in future callbacks rather than having to catch them directly, so - we create a failed future. - -Note that we specialized the return types of `execute()` and `wrapError()`, instead of using -`Object` as declared by the parent interface. This is not strictly necessary (the calling code only -knows the parent interface, so there *will* be an unchecked cast), but it makes the code a bit nicer -to read. - -#### Future of entity - -```java -public class FutureOfEntityProducer implements MapperResultProducer { - @Override - public boolean canProduce(GenericType resultType) { - return resultType.getRawType().equals(ListenableFuture.class); // (1) - } - - @Override - public ListenableFuture execute( - Statement statement, MapperContext context, EntityHelper entityHelper) { - assert entityHelper != null; - SettableFuture result = SettableFuture.create(); - CqlSession session = context.getSession(); - session - .executeAsync(statement) - .whenComplete( - (resultSet, error) -> { - if (error != null) { - result.setException(error); - } else { - Row row = resultSet.one(); - result.set((row == null) ? null : entityHelper.get(row)); // (2) - } - }); - return result; - } - - @Override - public ListenableFuture wrapError(Exception error) { - return Futures.immediateFailedFuture(error); // same as other producer - } -} -``` - -1. We could use an exact match with `ListenableFuture` like the previous example, but - that's not very scalable: in a real application, we'll probably have more than one entity, we - don't want to write a separate producer every time. So instead we match `ListenableFuture`. - Note that this would also match `ListenableFuture`, so we'll have to be careful of the order - of the producers (more on that in the "packaging" section below). -2. Whenever a return type references a mapped entity, the mapper processor will detect it and inject - the corresponding [EntityHelper] in the `execute()` method. This is a general-purpose utility - class used throughout the mapper, in this case the method we're more specifically interested in is - `get()`: it allows us to convert CQL rows into entity instances. - -At most one entity class is allowed in the return type. - -#### Matching more complex types - -The two examples above (exact match and matching the raw type) should cover the vast majority of -needs. Occasionally you may encounter cases with deeper level of parameterization, such as -`ListenableFuture>`. To match this you'll have to call `getType()` and switch to -the `java.lang.reflect` world: - -```java -import java.lang.reflect.ParameterizedType; -import java.lang.reflect.Type; - -// Matches ListenableFuture> -public boolean canProduce(GenericType genericType) { - if (genericType.getRawType().equals(ListenableFuture.class)) { - Type type = genericType.getType(); - if (type instanceof ParameterizedType) { - Type[] arguments = ((ParameterizedType) type).getActualTypeArguments(); - if (arguments.length == 1) { - Type argument = arguments[0]; - return argument instanceof ParameterizedType - && ((ParameterizedType) argument).getRawType().equals(Optional.class); - } - } - } - return false; -} -``` - -### Packaging the producers in a service - -Once all the producers are ready, we package them in a class that implements -[MapperResultProducerService]: - -```java -public class GuavaFutureProducerService implements MapperResultProducerService { - @Override - public Iterable getProducers() { - return Arrays.asList( - // Order matters, the most specific must come first. - new FutureOfVoidProducer(), new FutureOfEntityProducer()); - } -} -``` - -As hinted previously, the order of the producers matter: they will be tried from left to right. -Since our "future of entity" producer would also match `Void`, it must come last. - -The mapper uses the Java Service Provider mechanism to register producers: create a new file -`META-INF/services/com.datastax.oss.driver.api.mapper.result.MapperResultProducerService`, -containing the name of the implementation: - -``` -some.package.name.GuavaFutureProducerService -``` - -You can put the producers, service and service descriptor directly in your application, or -distribute them as a standalone JAR if you intend to reuse them. - -### Disabling custom types - -Custom types are handled at runtime. This goes a bit against the philosophy of the rest of the -object mapper, where most of the work is done at compile time thanks to annotation processing. There -are ways to extend the mapper processor, but we feel that this would be too complicated for this use -case. - -One downside is that validation can now only be done at runtime: if you use a return type that isn't -supported by any producer, you'll only find out when you call the method. - -**If you don't use custom types at all**, you can disable the feature with an annotation processor -flag: - -```xml - - - - maven-compiler-plugin - - -Acom.datastax.oss.driver.mapper.customResults.enabled=false - - - - -``` - -With this configuration, if a DAO method declares a non built-in return type, it will be surfaced as -a compiler error. - -[EntityHelper]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/entity/EntityHelper.html -[GenericType]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/reflect/GenericType.html -[MapperContext]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/MapperContext.html -[MapperResultProducer]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/result/MapperResultProducer.html -[MapperResultProducerService]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/result/MapperResultProducerService.html diff --git a/manual/mapper/daos/delete/README.md b/manual/mapper/daos/delete/README.md deleted file mode 100644 index e67ecdc8a6e..00000000000 --- a/manual/mapper/daos/delete/README.md +++ /dev/null @@ -1,185 +0,0 @@ - - -## Delete methods - -Annotate a DAO method with [@Delete] to generate a query that deletes an [Entity](../../entities): - -```java -@Dao -public interface ProductDao { - @Delete - void delete(Product product); -} -``` - -### Parameters - -The method can operate on: - -* an entity instance: - - ```java - @Delete - void delete(Product product); - ``` - -* a primary key (partition key + clustering columns): - - ```java - @Delete(entityClass = Product.class) - void deleteById(UUID productId); - ``` - - In this case, the parameters must match the types of the [primary key - columns](../../entities/#primary-key-columns), in the exact order (as defined by the - [@PartitionKey] and [@ClusteringColumn] annotations). The parameter names don't necessarily need - to match the names of the columns. - - In addition, because the entity class can't be inferred from the method signature, it must be - specified via the annotation's `entityClass` element. - -* a subset of the primary key. As in the partition key, or partition key + subset of clustering - columns: - - ```java - // given: PRIMARY KEY ((product_id, day), customer_id, ts) - // delete all rows in partition - @Delete(entityClass = ProductSale.class) - void deleteByIdForDay(UUID productId, LocalDate day); - - // delete by partition key and partial clustering key - @Delete(entityClass = ProductSale.class) - void deleteByIdForCustomer(UUID productId, LocalDate day, UUID customerId); - - /* Note that the clustering columns in your primary key definition are significant. All - * preceding clustering columns must be provided if any are. - * - * For example, the following is *NOT VALID* because ts is provided, but customer_id is - * not. */ - @Delete(entityClass = ProductSale.class) - void deleteByIdForTs(UUID productId, LocalDate day, long ts); - ``` - -* a number of parameters matching the placeholder markers in `customWhereClause`, for which - the parameters match the name and compatible java type of the markers: - - ```java - @Delete( - entityClass = ProductSale.class, - customWhereClause = - "id = :id and day = :day and customer_id = :customerId and ts >= :startTs and ts < :endTs") - ResultSet deleteInTimeRange(UUID id, String day, int customerId, UUID startTs, UUID endTs); - ``` - -An optional IF clause can be added to the generated query. Like `customWhereClause` it can contain -placeholders: - -```java -@Delete(entityClass = Product.class, customIfClause = "description = :expectedDescription") -void deleteIfDescriptionMatches(UUID productId, String expectedDescription); -``` - -A `Function` or `UnaryOperator` -can be added as the **last** parameter. It will be applied to the statement before execution. This -allows you to customize certain aspects of the request (page size, timeout, etc) at runtime. See -[statement attributes](../statement_attributes/). - -### Return type - -The method can return: - -* `void`. - -* a `boolean` or `Boolean`, which will be mapped to [ResultSet#wasApplied()]. This is intended for - IF EXISTS queries: - - ```java - /** @return true if the product did exist */ - @Delete(ifExists = true) - boolean deleteIfExists(Product product); - ``` - -* a [ResultSet]. This is intended for queries with custom IF clauses; when those queries are not - applied, they return the actual values of the tested columns. - - ```java - @Delete(entityClass = Product.class, customIfClause = "description = :expectedDescription") - ResultSet deleteIfDescriptionMatches(UUID productId, String expectedDescription); - // if the condition fails, the result set will contain columns '[applied]' and 'description' - ``` - -* a [BoundStatement]. This is intended for queries where you will execute this statement later - or in a batch. - - ```java - @Delete - BoundStatement delete(Product product); - ``` - -* a [CompletionStage] or [CompletableFuture] of any of the above. The method will execute the query - asynchronously. Note that for result sets, you need to switch to [AsyncResultSet]. - - ```java - @Delete - CompletableFuture deleteAsync(Product product); - - @Delete(ifExists = true) - CompletionStage deleteIfExistsAsync(Product product); - - @Delete(entityClass = Product.class, customIfClause = "description = :expectedDescription") - CompletionStage deleteIfDescriptionMatchesAsync(UUID productId, String expectedDescription); - ``` - -* a [ReactiveResultSet]. - - ```java - @Delete - ReactiveResultSet deleteReactive(Product product); - ``` - -* a [custom type](../custom_types). - -Note that you can also return a boolean or result set for non-conditional queries, but there's no -practical purpose for that since those queries always return `wasApplied = true` and an empty result -set. - -### Target keyspace and table - -If a keyspace was specified [when creating the DAO](../../mapper/#dao-factory-methods), then the -generated query targets that keyspace. Otherwise, it doesn't specify a keyspace, and will only work -if the mapper was built from a session that has a [default keyspace] set. - -If a table was specified when creating the DAO, then the generated query targets that table. -Otherwise, it uses the default table name for the entity (which is determined by the name of the -entity class and the [naming strategy](../../entities/#naming-strategy)). - -[default keyspace]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/SessionBuilder.html#withKeyspace-com.datastax.oss.driver.api.core.CqlIdentifier- -[AsyncResultSet]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/AsyncResultSet.html -[@ClusteringColumn]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/ClusteringColumn.html -[@Delete]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/Delete.html -[@PartitionKey]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/PartitionKey.html -[ResultSet]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/ResultSet.html -[ResultSet#wasApplied()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/ResultSet.html#wasApplied-- -[BoundStatement]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/BoundStatement.html -[ReactiveResultSet]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/cql/reactive/ReactiveResultSet.html - - -[CompletionStage]: https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletionStage.html -[CompletableFuture]: https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html diff --git a/manual/mapper/daos/getentity/README.md b/manual/mapper/daos/getentity/README.md deleted file mode 100644 index de9a530b558..00000000000 --- a/manual/mapper/daos/getentity/README.md +++ /dev/null @@ -1,165 +0,0 @@ - - -## GetEntity methods - -Annotate a DAO method with [@GetEntity] to convert a core driver data structure into one or more -[Entities](../../entities): - -```java -@Dao -public interface ProductDao { - @GetEntity - Product asProduct(Row row); -} -``` - -The generated code will retrieve each entity property from the source, such as: - -```java -Product product = new Product(); -product.setId(row.get("id", UUID.class)); -product.setDescription(row.get("description", String.class)); -... -``` - -It does not perform a query. Instead, those methods are intended for cases where you already have a -query result, and just need the conversion logic. - -### Lenient mode - -By default, the mapper operates in "strict" mode: the source row must contain a matching column for -every property in the entity definition, *including computed ones*. If such a column is not found, -an error will be thrown. - -Starting with driver 4.12.0, the `@GetEntity` annotation has a new `lenient` attribute. If this -attribute is explicitly set to `true`, the mapper will operate in "lenient" mode: all entity -properties that have a matching column in the source row will be set. However, *unmatched properties -will be left untouched*. - -As an example to illustrate how lenient mode works, assume that we have the following entity and -DAO: - -```java -@Entity class Product { - - @PartitionKey int id; - String description; - float price; - // other members omitted -} - -interface ProductDao { - - @GetEntity(lenient = true) - Product getLenient(Row row); - -} -``` - -Then the following code would be possible: - -```java -// row does not contain the price column -Row row = session.execute("SELECT id, description FROM product").one(); -Product product = productDao.getLenient(row); -assert product.price == 0.0; -``` - -Since no `price` column was found in the source row, `product.price` wasn't set and was left to its -default value (0.0). Without lenient mode, the code above would throw an error instead. - -Lenient mode allows to achieve the equivalent of driver 3.x [manual mapping -feature](https://docs.datastax.com/en/developer/java-driver/3.10/manual/object_mapper/using/#manual-mapping). - -**Beware that lenient mode may result in incomplete entities being produced.** - -### Parameters - -The method must have a single parameter. The following types are allowed: - -* [GettableByName] or one of its subtypes (the most likely candidates are [Row] and [UdtValue]). -* [ResultSet]. -* [AsyncResultSet]. - -The data must match the target entity: the generated code will try to extract every mapped property, -and fail if one is missing. - -### Return type - -The method can return: - -* a single entity instance. If the argument is a result set type, the generated code will extract - the first row and convert it, or return `null` if the result set is empty. - - ```java - @GetEntity - Product asProduct(Row row); - - @GetEntity - Product firstRowAsProduct(ResultSet resultSet); - ``` - -* a [PagingIterable] of an entity class. In that case, the type of the parameter **must** be - [ResultSet]. Each row in the result set will be converted into an entity instance. - - ```java - @GetEntity - PagingIterable asProducts(ResultSet resultSet); - ``` - -* a [Stream] of an entity class. In that case, the type of the parameter **must** be [ResultSet]. - Each row in the result set will be converted into an entity instance. - - Note: even if streams are lazily evaluated, results are fetched synchronously; therefore, as the - returned stream is traversed, blocking calls may occur, as more results are fetched from the - server in the background. For details about the stream's characteristics, see - [PagingIterable.spliterator]. - - ```java - @GetEntity - Stream asProducts(ResultSet resultSet); - ``` - -* a [MappedAsyncPagingIterable] of an entity class. In that case, the type of the parameter **must** - be [AsyncResultSet]. Each row in the result set will be converted into an entity instance. - - ```java - @GetEntity - MappedAsyncPagingIterable asProducts(AsyncResultSet resultSet); - ``` - -If the return type doesn't match the parameter type (for example [PagingIterable] for -[AsyncResultSet]), the mapper processor will issue a compile-time error. - - -[@GetEntity]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/GetEntity.html -[AsyncResultSet]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/AsyncResultSet.html -[GettableByName]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/data/GettableByName.html -[MappedAsyncPagingIterable]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/MappedAsyncPagingIterable.html -[PagingIterable]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/PagingIterable.html -[PagingIterable.spliterator]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/PagingIterable.html#spliterator-- -[ResultSet]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/ResultSet.html -[Row]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/Row.html -[UdtValue]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/data/UdtValue.html - -[Stream]: https://docs.oracle.com/javase/8/docs/api/java/util/stream/Stream.html - - - diff --git a/manual/mapper/daos/increment/README.md b/manual/mapper/daos/increment/README.md deleted file mode 100644 index 44b017be2e1..00000000000 --- a/manual/mapper/daos/increment/README.md +++ /dev/null @@ -1,105 +0,0 @@ - - -## Increment methods - -Annotate a DAO method with [@Increment] to generate a query that updates a counter table that is -mapped to an entity: - -```java -// CREATE TABLE votes(article_id int PRIMARY KEY, up_votes counter, down_votes counter); - -@Entity -public class Votes { - @PartitionKey private int articleId; - private long upVotes; - private long downVotes; - ... // constructor(s), getters and setters, etc. -} - -@Dao -public interface VotesDao { - @Increment(entityClass = Votes.class) - void incrementUpVotes(int articleId, long upVotes); - - @Increment(entityClass = Votes.class) - void incrementDownVotes(int articleId, long downVotes); - - @Select - Votes findById(int articleId); -} -``` - -### Parameters - -The entity class must be specified with `entityClass` in the annotation. - -The method's parameters must start with the [full primary key](../../entities/#primary-key-columns), -in the exact order (as defined by the [@PartitionKey] and [@ClusteringColumn] annotations in the -entity class). The parameter names don't necessarily need to match the names of the columns, but the -types must match. Unlike other methods like [@Select](../select/) or [@Delete](../delete/), counter -updates cannot operate on a whole partition, they need to target exactly one row; so all the -partition key and clustering columns must be specified. - -Then must follow one or more parameters representing counter increments. Their type must be -`long` or `java.lang.Long`. The name of the parameter must match the name of the entity -property that maps to the counter (that is, the name of the getter without "get" and -decapitalized). Alternatively, you may annotate a parameter with [@CqlName] to specify the -raw column name directly; in that case, the name of the parameter does not matter: - -```java -@Increment(entityClass = Votes.class) -void incrementUpVotes(int articleId, @CqlName("up_votes") long foobar); -``` - -When you invoke the method, each parameter value is interpreted as a **delta** that will be applied -to the counter. In other words, if you pass 1, the counter will be incremented by 1. Negative values -are allowed. If you are using Cassandra 2.2 or above, you can use `Long` and pass `null` for some of -the parameters, they will be ignored (following [NullSavingStrategy#DO_NOT_SET](../null_saving/) -semantics). If you are using Cassandra 2.1, `null` values will trigger a runtime error. - -A `Function` or `UnaryOperator` -can be added as the **last** parameter. It will be applied to the statement before execution. This -allows you to customize certain aspects of the request (page size, timeout, etc) at runtime. See -[statement attributes](../statement_attributes/). - -### Return type - -The method can return `void`, a void [CompletionStage] or [CompletableFuture], or a -[ReactiveResultSet]. - -### Target keyspace and table - -If a keyspace was specified [when creating the DAO](../../mapper/#dao-factory-methods), then the -generated query targets that keyspace. Otherwise, it doesn't specify a keyspace, and will only work -if the mapper was built from a session that has a [default keyspace] set. - -If a table was specified when creating the DAO, then the generated query targets that table. -Otherwise, it uses the default table name for the entity (which is determined by the name of the -entity class and the naming convention). - -[@Increment]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/Increment.html -[ReactiveResultSet]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/cql/reactive/ReactiveResultSet.html -[default keyspace]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/SessionBuilder.html#withKeyspace-com.datastax.oss.driver.api.core.CqlIdentifier- -[@ClusteringColumn]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/ClusteringColumn.html -[@PartitionKey]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/PartitionKey.html -[@CqlName]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/CqlName.html - -[CompletionStage]: https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletionStage.html -[CompletableFuture]: https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html diff --git a/manual/mapper/daos/insert/README.md b/manual/mapper/daos/insert/README.md deleted file mode 100644 index b90ffa33a32..00000000000 --- a/manual/mapper/daos/insert/README.md +++ /dev/null @@ -1,140 +0,0 @@ - - -## Insert methods - -Annotate a DAO method with [@Insert] to generate a query that inserts an [Entity](../../entities): - -```java -@Dao -public interface ProductDao { - @Insert - void insert(Product product); -} -``` - -### Parameters - -The first parameter must be the entity to insert. - -If the annotation defines a TTL and/or timestamp with placeholders, the method must have -corresponding additional parameters (same name, and a compatible Java type): - -```java -@Insert(ttl = ":ttl") -void insertWithTtl(Product product, int ttl); -``` - -The annotation can define a [null saving strategy](../null_saving/) that applies to the properties -of the entity to insert. - -A `Function` or `UnaryOperator` -can be added as the **last** parameter. It will be applied to the statement before execution. This -allows you to customize certain aspects of the request (page size, timeout, etc) at runtime. See -[statement attributes](../statement_attributes/). - -### Return type - -The method can return: - -* `void`. - -* the entity class. This is intended for `INSERT ... IF NOT EXISTS` queries. The method will return - `null` if the insertion succeeded, or the existing entity if it failed. - - ```java - @Insert(ifNotExists = true) - Product insertIfNotExists(Product product); - ``` - -* an [Optional] of the entity class, as a null-safe alternative for `INSERT ... IF NOT EXISTS` - queries. - - ```java - @Insert(ifNotExists = true) - Optional insertIfNotExists(Product product); - ``` - -* a `boolean` or `Boolean`, which will be mapped to [ResultSet#wasApplied()]. This is intended for - IF NOT EXISTS queries: - - ```java - /** @return true if the product did not exist */ - @Insert(ifNotExists = true) - boolean saveIfNotExists(Product product); - ``` - -* a [ResultSet]. This is intended for cases where you intend to inspect data associated with the - result, such as [ResultSet#getExecutionInfo()]: - - ```java - @Insert - ResultSet save(Product product); - ``` -* a [BoundStatement]. This is intended for cases where you intend to execute this statement later or in a batch: - - ```java - @Insert - BoundStatement save(Product product); - ``` - -* a [CompletionStage] or [CompletableFuture] of any of the above. The mapper will execute the query - asynchronously. - - ```java - @Insert - CompletionStage insert(Product product); - - @Insert(ifNotExists = true) - CompletableFuture insertIfNotExists(Product product); - - @Insert(ifNotExists = true) - CompletableFuture> insertIfNotExists(Product product); - ``` - -* a [ReactiveResultSet]. - - ```java - @Insert - ReactiveResultSet insertReactive(Product product); - ``` - -* a [custom type](../custom_types). - -### Target keyspace and table - -If a keyspace was specified [when creating the DAO](../../mapper/#dao-factory-methods), then the -generated query targets that keyspace. Otherwise, it doesn't specify a keyspace, and will only work -if the mapper was built from a session that has a [default keyspace] set. - -If a table was specified when creating the DAO, then the generated query targets that table. -Otherwise, it uses the default table name for the entity (which is determined by the name of the -entity class and the [naming strategy](../../entities/#naming-strategy)). - -[default keyspace]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/SessionBuilder.html#withKeyspace-com.datastax.oss.driver.api.core.CqlIdentifier- -[@Insert]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/Insert.html -[ResultSet]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/ResultSet.html -[ResultSet#wasApplied()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/ResultSet.html#wasApplied-- -[ResultSet#getExecutionInfo()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/ResultSet.html#getExecutionInfo-- -[BoundStatement]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/BoundStatement.html -[ReactiveResultSet]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/cql/reactive/ReactiveResultSet.html - -[CompletionStage]: https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletionStage.html -[CompletableFuture]: https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html -[Optional]: https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html diff --git a/manual/mapper/daos/null_saving/README.md b/manual/mapper/daos/null_saving/README.md deleted file mode 100644 index eed98934356..00000000000 --- a/manual/mapper/daos/null_saving/README.md +++ /dev/null @@ -1,121 +0,0 @@ - - -## Null saving strategy - -The null saving strategy controls how null entity properties are handled when writing to the -database. It can be configured either for each method, or globally at the DAO level. - -Two strategies are available: - -* [DO_NOT_SET]: the mapper won't call the corresponding setter on the [BoundStatement]. The - generated code looks approximately like this: - - ```java - if (entity.getDescription() != null) { - boundStatement = boundStatement.setString("description", entity.getDescription()); - } - ``` - - This avoids inserting tombstones for null properties. On the other hand, if the query is an - update and the column previously had another value, it won't be overwritten. - - Note that unset values ([CASSANDRA-7304]) are only supported with [native - protocol](../../../core/native_protocol/) v4 (Cassandra 2.2) or above . If you try to use this - strategy with a lower Cassandra version, the mapper will throw an [MapperException] when you try - to access the corresponding DAO. - -* [SET_TO_NULL]: the mapper will always call the setter, even with a null value. The generated code - looks approximately like this: - - ```java - // Called even if entity.getDescription() == null - boundStatement = boundStatement.setString("description", entity.getDescription()); - ``` - -### Method level - -Specify `nullSavingStrategy` on the method annotation: - -```java -import static com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy.SET_TO_NULL; - -@Update(nullSavingStrategy = SET_TO_NULL) -void update(Product product); -``` - -This applies to [@Insert](../insert/), [@Query](../query/), [@SetEntity](../setentity/) and -[@Update](../update/) (other method types don't need it since they don't write data). - -### DAO level - -Annotate your [DAO](../../daos/) interface with [@DefaultNullSavingStrategy]. Any method that does -not explicitly define its strategy inherits the DAO-level one: - -```java -@Dao -@DefaultNullSavingStrategy(SET_TO_NULL) -public interface ProductDao { - - @Insert - void insert(Product product); // inherits SET_TO_NULL - - @Update(nullSavingStrategy = DO_NOT_SET) - void update(Product product); // uses DO_NOT_SET -} -``` - -If you don't define a DAO-level default, any method that does not declare its own value defaults to -[DO_NOT_SET]: - -```java -@Dao -public interface ProductDao { - - @Insert - void insert(Product product); // defaults to DO_NOT_SET -} -``` - -Note that you can use inheritance to set a common default for all your DAOs: - -```java -@DefaultNullSavingStrategy(SET_TO_NULL) -public interface InventoryDao {} - -@Dao -public interface ProductDao extends InventoryDao { - @Insert - void insert(Product product); // inherits SET_TO_NULL -} - -@Dao -public interface UserDao extends InventoryDao { - @Insert - void insert(User user); // inherits SET_TO_NULL -} -``` - -[@DefaultNullSavingStrategy]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/DefaultNullSavingStrategy.html -[BoundStatement]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/BoundStatement.html -[MapperException]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/MapperException.html -[DO_NOT_SET]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/entity/saving/NullSavingStrategy.html#DO_NOT_SET -[SET_TO_NULL]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/entity/saving/NullSavingStrategy.html#SET_TO_NULL - -[CASSANDRA-7304]: https://issues.apache.org/jira/browse/CASSANDRA-7304 diff --git a/manual/mapper/daos/query/README.md b/manual/mapper/daos/query/README.md deleted file mode 100644 index a11753da880..00000000000 --- a/manual/mapper/daos/query/README.md +++ /dev/null @@ -1,151 +0,0 @@ - - -## Query methods - -Annotate a DAO method with [@Query] to provide your own query string: - -```java -@Dao -public interface SensorReadingDao { - @Query("SELECT count(*) FROM sensor_readings WHERE id = :id") - long countById(int id); -} -``` - -This is the equivalent of what was called "accessor methods" in the driver 3 mapper. - -### Parameters - -The query string will typically contain CQL placeholders. The method's parameters must match those -placeholders: same name and a compatible Java type. - -```java -@Query("SELECT count(*) FROM sensor_readings WHERE id = :id AND year = :year") -long countByIdAndYear(int id, int year); -``` - -The annotation can define a [null saving strategy](../null_saving/) that applies to the method -parameters. - -A `Function` or `UnaryOperator` -can be added as the **last** parameter. It will be applied to the statement before execution. This -allows you to customize certain aspects of the request (page size, timeout, etc) at runtime. See -[statement attributes](../statement_attributes/). - -### Return type - -The method can return: - -* `void`. - -* a `boolean` or `Boolean`, which will be mapped to [ResultSet#wasApplied()]. This is intended for - conditional queries. - -* a `long` or `Long`, which will be mapped to the first column of the first row, expecting CQL type - `BIGINT`. This is intended for count queries. The method will fail if the result set is empty, or - does not match the expected format. - -* a [Row]. This means the result is not converted, the mapper only extracts the first row of the - result set and returns it. The method will return `null` if the result set is empty. - -* a single instance of an [Entity](../../entities/) class. The method will extract the first row and - convert it, or return `null` if the result set is empty. - -* an [Optional] of an entity class. The method will extract the first row and convert - it, or return `Optional.empty()` if the result set is empty. - -* a [ResultSet]. The method will return the raw query result, without any conversion. - -* a [BoundStatement]. This is intended for queries where you will execute this statement later - or in a batch. - -* a [PagingIterable]. The method will convert each row into an entity instance. - -* a [Stream]. The method will convert each row into an entity instance. For details about the - stream's characteristics, see [PagingIterable.spliterator]. - -* a [CompletionStage] or [CompletableFuture] of any of the above. The method will execute the query - asynchronously. Note that for result sets and iterables, you need to switch to the asynchronous - equivalent [AsyncResultSet] and [MappedAsyncPagingIterable] respectively. - -* a [ReactiveResultSet], or a [MappedReactiveResultSet] of the entity class. - -* a [custom type](../custom_types). - -### Target keyspace and table - -To avoid hard-coding the keyspace and table name, the query string supports 3 additional -placeholders: `${keyspaceId}`, `${tableId}` and `${qualifiedTableId}`. They get substituted at DAO -initialization time, with the [keyspace and table that the DAO was built -with](../../mapper/#dao-factory-methods). - -For example, given the following: - -```java -@Dao -public interface TestDao { - @Query("SELECT * FROM ${keyspaceId}.${tableId}") - ResultSet queryFromKeyspaceAndTable(); - - @Query("SELECT * FROM ${qualifiedTableId}") - ResultSet queryFromQualifiedTable(); -} - -@Mapper -public interface TestMapper { - @DaoFactory - TestDao dao(@DaoKeyspace String keyspace, @DaoTable String table); - - @DaoFactory - TestDao dao(@DaoTable String table); -} - -TestDao dao1 = mapper.dao("ks", "t"); -TestDao dao2 = mapper.dao("t"); -``` - -Then: - -* `dao1.queryFromKeyspaceAndTable()` and `dao1.queryFromQualifiedTable()` both execute `SELECT * - FROM ks.t`. -* `dao2.queryFromKeyspaceAndTable()` fails: no keyspace was specified for this DAO, so - `${keyspaceId}` can't be substituted. -* `dao1.queryFromQualifiedTable()` executes `SELECT * FROM t`. In other words, `${qualifiedTableId}` - uses the keyspace if it is available, but resolves to the table name only if it isn't. Whether the - query succeeds or not depends on whether the session that the mapper was built with has a [default - keyspace]. - -[default keyspace]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/SessionBuilder.html#withKeyspace-com.datastax.oss.driver.api.core.CqlIdentifier- -[@Query]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/Query.html -[AsyncResultSet]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/AsyncResultSet.html -[ResultSet]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/ResultSet.html -[ResultSet#wasApplied()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/ResultSet.html#wasApplied-- -[MappedAsyncPagingIterable]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/MappedAsyncPagingIterable.html -[PagingIterable]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/PagingIterable.html -[PagingIterable.spliterator]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/PagingIterable.html#spliterator-- -[Row]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/Row.html -[BoundStatement]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/BoundStatement.html -[ReactiveResultSet]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/cql/reactive/ReactiveResultSet.html -[MappedReactiveResultSet]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/mapper/reactive/MappedReactiveResultSet.html - -[CompletionStage]: https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletionStage.html -[CompletableFuture]: https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html -[Optional]: https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html -[Stream]: https://docs.oracle.com/javase/8/docs/api/java/util/stream/Stream.html diff --git a/manual/mapper/daos/queryprovider/README.md b/manual/mapper/daos/queryprovider/README.md deleted file mode 100644 index 593a3a6b1a4..00000000000 --- a/manual/mapper/daos/queryprovider/README.md +++ /dev/null @@ -1,166 +0,0 @@ - - -## Query provider methods - -Annotate a DAO method with [@QueryProvider] to delegate the execution of the query to one of your -own classes: - -```java -@Dao -public interface SensorDao { - @QueryProvider(providerClass = FindSliceProvider.class, entityHelpers = SensorReading.class) - PagingIterable findSlice(int id, Integer month, Integer day); -} - -/* Schema: - CREATE TABLE sensor_reading(sensor_id int, month int, day int, value double, - PRIMARY KEY (id, month, day) - WITH CLUSTERING ORDER BY (month DESC, day DESC); - */ -``` - -Use this for requests that can't be expressed as static query strings. For example, we want the -`month` and `day` parameters above to be optional: - -* if both are present, we query for a particular day: `WHERE id = ? AND month = ? AND day = ?` -* if `day` is null, we query for the whole month: `WHERE id = ? AND month = ?` -* if `month` is also null, we query the whole partition: `WHERE id = ?` - -We assume that you've already written a corresponding [entity](../../entities/) class: - -```java -@Entity -public class SensorReading { - @PartitionKey private int id; - @ClusteringColumn(1) private int month; - @ClusteringColumn(2) private int day; - private double value; - // constructors, getters and setters omitted for conciseness -} -``` - -### Provider class - -[@QueryProvider.providerClass()][providerClass] indicates which class to delegate to. The mapper -will create one instance for each DAO instance. - -This class must expose a constructor that is accessible from the DAO interface's package. - -The first constructor argument must always be [MapperContext]. This is a utility type that -provides access to mapper- and DAO-level state. In particular, this is how you get hold of the -session. - -If [@QueryProvider.entityHelpers()][entityHelpers] is specified, the constructor must take an -additional [EntityHelper] argument for each provided entity class. We specified -`SensorReading.class` so our argument types are `(MapperContext, EntityHelper)`. - -An entity helper is a utility type generated by the mapper. One thing it can do is construct query -templates (with the [query builder](../../../query_builder/)). We want to retrieve entities so we -use `selectStart()`, chain a first WHERE clause for the id (which is always present), and store the -result in a field for later use: - -```java -public class FindSliceProvider { - private final CqlSession session; - private final EntityHelper sensorReadingHelper; - private final Select selectStart; - - public FindSliceProvider( - MapperContext context, EntityHelper sensorReadingHelper) { - this.session = context.getSession(); - this.sensorReadingHelper = sensorReadingHelper; - this.selectStart = - sensorReadingHelper.selectStart().whereColumn("id").isEqualTo(bindMarker()); - } - - ... // (to be continued) -``` - -### Provider method - -[@QueryProvider.providerMethod()][providerMethod] indicates which method to invoke on the provider -class. When it's not specified (as is our case), it defaults to the same name as the DAO method. - -The provider method must be accessible from the DAO interface's package, and have the same -parameters and return type as the DAO method. - -Here is the full implementation: - -```java - ... // public class FindSliceProvider (continued) - - public PagingIterable findSlice(int id, Integer month, Integer day) { - - // (1) complete the query - Select select = this.selectStart; - if (month != null) { - select = select.whereColumn("month").isEqualTo(bindMarker()); - if (day != null) { - select = select.whereColumn("day").isEqualTo(bindMarker()); - } - } - - // (2) prepare - PreparedStatement preparedStatement = session.prepare(select.build()); - - // (3) bind - BoundStatementBuilder boundStatementBuilder = - preparedStatement.boundStatementBuilder().setInt("id", id); - if (month != null) { - boundStatementBuilder = boundStatementBuilder.setInt("month", month); - if (day != null) { - boundStatementBuilder = boundStatementBuilder.setInt("day", day); - } - } - - // (4) execute and map the results - return session.execute(boundStatementBuilder.build()).map(sensorReadingHelper::get); - } -} -``` - -1. Retrieve the SELECT query that was started in the constructor, and append additional WHERE - clauses as appropriate. - - Note that all query builder objects are immutable, so this creates a new instance every time, - there is no risk of corrupting the original field. - -2. Prepare the resulting statement. - - `session.prepare` caches its results, so if we already prepared that particular combination, - there is no network call at this step. - -3. Bind the parameters, according to the WHERE clauses we've generated. - -4. Execute the request. - - Another useful helper feature is mapping entities to/from low-level driver data structures: - `get` extracts a `SensorReading` from a `Row`, so by mapping it to the [ResultSet] we get back - the desired [PagingIterable][PagingIterable]. - - -[@QueryProvider]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/QueryProvider.html -[providerClass]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/QueryProvider.html#providerClass-- -[entityHelpers]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/QueryProvider.html#entityHelpers-- -[providerMethod]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/QueryProvider.html#providerMethod-- -[MapperContext]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/MapperContext.html -[EntityHelper]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/EntityHelper.html -[ResultSet]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/ResultSet.html -[PagingIterable]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/PagingIterable.html diff --git a/manual/mapper/daos/select/README.md b/manual/mapper/daos/select/README.md deleted file mode 100644 index fb6c4ca2077..00000000000 --- a/manual/mapper/daos/select/README.md +++ /dev/null @@ -1,200 +0,0 @@ - - -## Select methods - -Annotate a DAO method with [@Select] to generate a query that selects one or more rows, and maps -them to [Entities](../../entities): - -```java -@Dao -public interface ProductDao { - @Select - Product findById(UUID productId); -} -``` - -### Parameters - -If the annotation doesn't have a [customWhereClause()], the mapper defaults to a selection by -primary key (partition key + clustering columns). The method's parameters must match the types of -the [primary key columns](../../entities/#primary-key-columns), in the exact order (as defined by -the [@PartitionKey] and [@ClusteringColumn] annotations). The parameter names don't necessarily need -to match the names of the columns. - -To select more than one entity within a partition, a subset of primary key components may be -specified as long as enough parameters are provided to account for the partition key. - -```java -// given: PRIMARY KEY ((product_id, day), customer_id, ts) -public interface ProductSaleDao { - @Select - PagingIterable findByDay(UUID productId, LocalDate day); - - @Select - PagingIterable findByDayForCustomer(UUID productId, LocalDate day, UUID customerID); - - /* Note that the clustering columns in your primary key definition are significant. All - * preceding clustering columns must be provided if any are. - * - * For example, the following is *NOT VALID* because ts is provided, but customer_id is - * not. */ - @Select - PagingIterable findByDayForTs(UUID productId, LocalDate day, long ts); -} -``` - -To select all rows within a table, you may also provide no parameters. - -```java -@Dao -public interface ProductDao { - @Select - PagingIterable all(); -} -``` - -If the annotation has a [customWhereClause()], it completely replaces the WHERE clause. The provided -string can contain named placeholders. In that case, the method must have a corresponding parameter -for each, with the same name and a compatible Java type. - -```java -@Select(customWhereClause = "description LIKE :searchString") -PagingIterable findByDescription(String searchString); -``` - -The generated SELECT query can be further customized with [limit()], [perPartitionLimit()], -[orderBy()], [groupBy()] and [allowFiltering()]. Some of these clauses can also contain placeholders -whose values will be provided through additional method parameters. Note that it is sometimes not -possible to determine if a parameter is a primary key component or a placeholder value; therefore -the rule is that **if your method takes a partial primary key, the first parameter that is not a -primary key component must be explicitly annotated with -[@CqlName](../../entities/#user-provided-names)**. For example if the primary key is `((day int, -hour int, minute int), ts timestamp)`: - -```java -// Annotate 'l' so that it's not mistaken for the second PK component -@Select(limit = ":l") -PagingIterable findDailySales(int day, @CqlName("l") int l); -``` - -A `Function` or `UnaryOperator` -can be added as the **last** parameter. It will be applied to the statement before execution. This -allows you to customize certain aspects of the request (page size, timeout, etc) at runtime. See -[statement attributes](../statement_attributes/). - -### Return type - -In all cases, the method can return: - -* the entity class itself. If the query returns no rows, the method will return `null`. If it - returns more than one row, subsequent rows will be discarded. - - ```java - @Select - Product findById(UUID productId); - ``` - -* an [Optional] of the entity class. If the query returns no rows, the method will return - `Optional.empty()`. If it returns more than one row, subsequent rows will be discarded. - - ```java - @Select - Optional findById(UUID productId); - ``` - -* a [PagingIterable] of the entity class. It behaves like a result set, except that each element is - a mapped entity instead of a row. - - ```java - @Select(customWhereClause = "description LIKE :searchString") - PagingIterable findByDescription(String searchString); - ``` - -* a [Stream] of the entity class. It behaves like a result set, except that each element is a mapped - entity instead of a row. - - Note: even if streams are lazily evaluated, the query will be executed synchronously; also, as - the returned stream is traversed, more blocking calls may occur, as more results are fetched - from the server in the background. For details about the stream's characteristics, see - [PagingIterable.spliterator]. - - ```java - @Select(customWhereClause = "description LIKE :searchString") - Stream findByDescription(String searchString); - ``` - -* a [CompletionStage] or [CompletableFuture] of any of the above. The method will execute the query - asynchronously. Note that for iterables, you need to switch to the asynchronous equivalent - [MappedAsyncPagingIterable]. - - ```java - @Select - CompletionStage findByIdAsync(UUID productId); - - @Select - CompletionStage> findByIdAsync(UUID productId); - - @Select(customWhereClause = "description LIKE :searchString") - CompletionStage> findByDescriptionAsync(String searchString); - ``` - - For streams, even if the initial query is executed asynchronously, traversing the returned - stream may block the traversing thread. Blocking calls can indeed be required as more results - are fetched from the server in the background. For this reason, _the usage of - `CompletionStage>` cannot be considered as a fully asynchronous execution method_. - -* a [MappedReactiveResultSet] of the entity class. - - ```java - @Select(customWhereClause = "description LIKE :searchString") - MappedReactiveResultSet findByDescriptionReactive(String searchString); - ``` - -* a [custom type](../custom_types). - -### Target keyspace and table - -If a keyspace was specified [when creating the DAO](../../mapper/#dao-factory-methods), then the -generated query targets that keyspace. Otherwise, it doesn't specify a keyspace, and will only work -if the mapper was built from a session that has a [default keyspace] set. - -If a table was specified when creating the DAO, then the generated query targets that table. -Otherwise, it uses the default table name for the entity (which is determined by the name of the -entity class and the [naming strategy](../../entities/#naming-strategy)). - -[default keyspace]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/SessionBuilder.html#withKeyspace-com.datastax.oss.driver.api.core.CqlIdentifier- -[@ClusteringColumn]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/ClusteringColumn.html -[@PartitionKey]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/PartitionKey.html -[@Select]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/Select.html -[allowFiltering()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/Select.html#allowFiltering-- -[customWhereClause()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/Select.html#customWhereClause-- -[groupBy()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/Select.html#groupBy-- -[limit()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/Select.html#limit-- -[orderBy()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/Select.html#orderBy-- -[perPartitionLimit()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/Select.html#perPartitionLimit-- -[MappedAsyncPagingIterable]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/MappedAsyncPagingIterable.html -[PagingIterable]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/PagingIterable.html -[PagingIterable.spliterator]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/PagingIterable.html#spliterator-- -[MappedReactiveResultSet]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/mapper/reactive/MappedReactiveResultSet.html - -[CompletionStage]: https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletionStage.html -[CompletableFuture]: https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html -[Optional]: https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html -[Stream]: https://docs.oracle.com/javase/8/docs/api/java/util/stream/Stream.html diff --git a/manual/mapper/daos/setentity/README.md b/manual/mapper/daos/setentity/README.md deleted file mode 100644 index eeb7957f62e..00000000000 --- a/manual/mapper/daos/setentity/README.md +++ /dev/null @@ -1,138 +0,0 @@ - - -## SetEntity methods - -Annotate a DAO method with [@SetEntity] to fill a core driver data structure from an -[Entity](../../entities): - -```java -public interface ProductDao { - @SetEntity - BoundStatement bind(Product product, BoundStatement boundStatement); -} -``` - -The generated code will set each entity property on the target, such as: - -```java -boundStatement = boundStatement.set("id", product.getId(), UUID.class); -boundStatement = boundStatement.set("description", product.getDescription(), String.class); -... -``` - -It does not perform a query. Instead, those methods are intended for cases where you will execute -the query yourself, and just need the conversion logic. - -### Lenient mode - -By default, the mapper operates in "strict" mode: the target statement must contain a matching -column for every property in the entity definition, *except computed ones*. If such a column is not -found, an error will be thrown. - -Starting with driver 4.12.0, the `@SetEntity` annotation has a new `lenient` attribute. If this -attribute is explicitly set to `true`, the mapper will operate in "lenient" mode: all entity -properties that have a matching column in the target statement will be set. However, *unmatched -properties will be left untouched*. - -As an example to illustrate how lenient mode works, assume that we have the following entity and -DAO: - -```java -@Entity class Product { - - @PartitionKey int id; - String description; - float price; - // other members omitted -} - -interface ProductDao { - - @SetEntity(lenient = true) - BoundStatement setLenient(Product product, BoundStatement stmt); - -} -``` - -Then the following code would be possible: - -```java -Product product = new Product(1, "scented candle", 12.99); -// stmt does not contain the price column -BoundStatement stmt = session.prepare("INSERT INTO product (id, description) VALUES (?, ?)").bind(); -stmt = productDao.setLenient(product, stmt); -``` - -Since no `price` column was found in the target statement, `product.price` wasn't read (if the -statement is executed, the resulting row in the database will have a price of zero). Without lenient -mode, the code above would throw an error instead. - -Lenient mode allows to achieve the equivalent of driver 3.x [manual mapping -feature](https://docs.datastax.com/en/developer/java-driver/3.10/manual/object_mapper/using/#manual-mapping). - -**Beware that lenient mode may result in incomplete rows being inserted in the database.** - -### Parameters - -The method must have two parameters: one is the entity instance, the other must be a subtype of -[SettableByName] \(the most likely candidates are [BoundStatement], [BoundStatementBuilder] and -[UdtValue]). Note that you can't use [SettableByName] itself. - -The order of the parameters does not matter. - -The annotation can define a [null saving strategy](../null_saving/) that applies to the properties -of the object to set. This is only really useful with bound statements (or bound statement -builders): if the target is a [UdtValue], the driver sends null fields in the serialized form -anyway, so both strategies are equivalent. - -### Return type - -The method can either be void, or return the exact same type as its settable parameter. - -```java -@SetEntity -void bind(Product product, UdtValue udtValue); - -@SetEntity -void bind(Product product, BoundStatementBuilder builder); -``` - -Note that if the settable parameter is immutable, the method should return a new instance, because -the generated code won't be able to modify the argument in place. This is the case for -[BoundStatement], which is immutable in the driver: - -```java -// Wrong: statement won't be modified -@SetEntity -void bind(Product product, BoundStatement statement); - -// Do this instead: -@SetEntity -BoundStatement bind(Product product, BoundStatement statement); -``` - -If you use a void method with [BoundStatement], the mapper processor will issue a compile-time -warning. - -[@SetEntity]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/SetEntity.html -[BoundStatement]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/BoundStatement.html -[BoundStatementBuilder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/BoundStatementBuilder.html -[SettableByName]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/data/SettableByName.html -[UdtValue]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/data/UdtValue.html diff --git a/manual/mapper/daos/statement_attributes/README.md b/manual/mapper/daos/statement_attributes/README.md deleted file mode 100644 index f772df36775..00000000000 --- a/manual/mapper/daos/statement_attributes/README.md +++ /dev/null @@ -1,82 +0,0 @@ - - -## Statement attributes - -The [@Delete](../delete/), [@Insert](../insert/), [@Query](../query/), [@Select](../select/) and -[@Update](../update/) annotations allow you to control some aspects of the execution of the -underlying statement, such as the consistency level, timeout, etc. - -### As a parameter - -If the **last** parameter of any of those methods is a `Function` (or `UnaryOperator`), the mapper will apply that -function to the statement before executing it: - -```java -@Dao -public interface ProductDao { - @Select - Product findById( - int productId, Function setAttributes); -} - -Function statementFunction = - builder -> builder.setConsistencyLevel(DefaultConsistencyLevel.ONE).setPageSize(500); - -Product product = dao.findById(1, statementFunction); -``` - -Use this if you need to execute the same DAO methods with different configurations that can change -dynamically. - -If you reuse the same set of attributes often, you can store the function as a constant to reduce -allocation costs. - -### As an annotation - -Attributes can also be provided statically by annotating the method with [@StatementAttributes]: - -```java -@Dao -public interface ProductDao { - @Select - @StatementAttributes(consistencyLevel = "ONE", pageSize = 500) - Product findById(int productId); -} -``` - -It's possible to have both the annotation and the function parameter; in that case, the annotation -will be applied first, and the function second: - -```java -@Dao -public interface ProductDao { - @Select - @StatementAttributes(consistencyLevel = "ONE", pageSize = 500) - Product findById( - int productId, Function setAttributes); -} - -// Will use CL = QUORUM, page size = 500 -Product product = - dao.findById(1, builder -> builder.setConsistencyLevel(DefaultConsistencyLevel.QUORUM)); -``` - -[@StatementAttributes]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/StatementAttributes.html diff --git a/manual/mapper/daos/update/README.md b/manual/mapper/daos/update/README.md deleted file mode 100644 index 87e9286c800..00000000000 --- a/manual/mapper/daos/update/README.md +++ /dev/null @@ -1,174 +0,0 @@ - - -## Update methods - -Annotate a DAO method with [@Update] to generate a query that updates one or more -[entities](../../entities): - -```java -@Dao -public interface ProductDao { - @Update - void update(Product product); -} -``` - -### Parameters - -The first parameter must be an entity instance. All of its non-PK properties will be interpreted as -values to update. - -* If the annotation doesn't have a `customWhereClause`, the mapper defaults to an update by primary - key (partition key + clustering columns). The WHERE clause is generated automatically, and bound - with the PK components of the provided entity instance. The query will update at most one row. - -* If the annotation has a `customWhereClause`, it completely replaces the WHERE clause. If the - provided string contains placeholders, the method must have corresponding additional parameters - (same name, and a compatible Java type): - - ```java - @Update(customWhereClause = "description LIKE :searchString") - void updateIfDescriptionMatches(Product product, String searchString); - ``` - - The PK components of the provided entity are ignored. Multiple rows may be updated. - -If the query has a custom timestamp or TTL with placeholders, the method must have corresponding -additional parameters (same name, and a compatible Java type): - -```java -@Update(timestamp = ":timestamp") -void updateWithTimestamp(Product product, long timestamp); - -@Update(ttl = ":ttl") -void updateWithTtl(Product product, int ttl); -``` - -An optional IF clause can be appended to the generated query. It can contain placeholders, for which -the method must have corresponding parameters (same name, and a compatible Java type): - -```java -@Update(customIfClause = "description = :expectedDescription") -ResultSet updateIfDescriptionMatches(Product product, String expectedDescription); -``` - -An optional IF EXISTS clause at the end of the generated UPDATE query. This is mutually exclusive -with `customIfClause` (if both are set, the mapper processor will generate a compile-time error): - -```java -@Update(ifExists = true) -boolean updateIfExists(Product product); -``` - -The annotation can define a [null saving strategy](../null_saving/) that applies to the properties -of the entity to update. This allows you to implement partial updates, by passing a "template" -entity that only contains the properties you want to modify: - -```java -// DAO method definition: -@Update(customWhereClause = "id IN (:id1, :id2)", nullSavingStrategy = DO_NOT_SET) -void updateWhereIdIn(Product product, UUID id1, UUID id2); - -// Client code: -Product template = new Product(); -template.setDescription("Coming soon"); // all other properties remain null -dao.updateWhereIdIn(template, 42, 43); // Will only update 'description' on the selected rows -``` - -A `Function` or `UnaryOperator` -can be added as the **last** parameter. It will be applied to the statement before execution. This -allows you to customize certain aspects of the request (page size, timeout, etc) at runtime. See -[statement attributes](../statement_attributes/). - -### Return type - -The method can return: - -* `void`. - -* a `boolean` or [Boolean], which will be mapped to `ResultSet#wasApplied()`. This is intended for - conditional queries. - - ```java - @Update(ifExists = true) - boolean updateIfExists(Product product); - ``` - -* a [ResultSet]. The method will return the raw query result, without any conversion. This is - intended for queries with custom IF clauses; when those queries are not applied, they return the - actual values of the tested columns. - - ```java - @Update(customIfClause = "description = :expectedDescription") - ResultSet updateIfExists(Product product); - // if the condition fails, the result set will contain columns '[applied]' and 'description' - ``` - -* a [BoundStatement]. This is intended for queries where you will execute this statement later or in a batch: - - ```java - @Update - BoundStatement update(Product product); - ``` - -* a [CompletionStage] or [CompletableFuture] of any of the above. The mapper will execute the query - asynchronously. - Note that for result sets, you need to switch to the asynchronous equivalent [AsyncResultSet]. - - ```java - @Update - CompletionStage update(Product product); - - @Update(ifExists = true) - CompletableFuture updateIfExists(Product product); - - @Update(customIfClause = "description = :expectedDescription") - CompletableFuture updateIfDescriptionMatches(Product product, String expectedDescription); - ``` - -* a [ReactiveResultSet]. - - ```java - @Update - ReactiveResultSet updateReactive(Product product); - ``` - -* a [custom type](../custom_types). - -### Target keyspace and table - -If a keyspace was specified [when creating the DAO](../../mapper/#dao-factory-methods), then the -generated query targets that keyspace. Otherwise, it doesn't specify a keyspace, and will only work -if the mapper was built from a session that has a [default keyspace] set. - -If a table was specified when creating the DAO, then the generated query targets that table. -Otherwise, it uses the default table name for the entity (which is determined by the name of the -entity class and the naming convention). - -[default keyspace]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/SessionBuilder.html#withKeyspace-com.datastax.oss.driver.api.core.CqlIdentifier- -[@Update]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/Update.html - -[AsyncResultSet]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/AsyncResultSet.html -[Boolean]: https://docs.oracle.com/javase/8/docs/api/index.html?java/lang/Boolean.html -[CompletionStage]: https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletionStage.html -[CompletableFuture]: https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html -[ResultSet]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/ResultSet.html -[BoundStatement]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/BoundStatement.html -[ReactiveResultSet]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/cql/reactive/ReactiveResultSet.html diff --git a/manual/mapper/entities/README.md b/manual/mapper/entities/README.md deleted file mode 100644 index 978c781245f..00000000000 --- a/manual/mapper/entities/README.md +++ /dev/null @@ -1,595 +0,0 @@ - - -## Entities - -### Quick overview - -POJO annotated with [@Entity], must expose a no-arg constructor. - -* class-level annotations: - * [@NamingStrategy] - * [@CqlName] - * [@HierarchyScanStrategy] - * [@PropertyStrategy] -* field/method-level annotations: - * [@PartitionKey], [@ClusteringColumn] - * [@Computed] - * [@Transient] - * [@CqlName] -* can inherit annotated fields/methods and [@NamingStrategy]. Only use [@Entity] on concrete - classes. - ------ - -An entity is a Java class that will be mapped to a Cassandra table or [UDT](../../core/udts). -Entities are used as arguments or return types of [DAO](../daos/) methods; they can also be nested -inside other entities (to map UDT columns). - -In order to be detected by the mapper, the class must be annotated with [@Entity]: - -```java -@Entity -public class Product { - @PartitionKey private UUID productId; - private String description; - - public UUID getProductId() { return productId; } - public void setProductId(UUID productId) { this.productId = productId; } - public String getDescription() { return description; } - public void setDescription(String description) { this.description = description; } -} -``` - -Each entity property will be mapped to a CQL column. The way properties are detected is -configurable, as explained below: - -### Property detection - -#### Mutability - -By default, the mapper expects mutable entity classes: - -```java -@Entity -public class Product { - @PartitionKey private UUID productId; - - public Product() {} - - public UUID getProductId() { return productId; } - public void setProductId(UUID productId) { this.productId = productId; } -} -``` - -With mutable entities: - -* each entity property: - * **must** have a non-void, no-argument getter method. - * **must** have a corresponding setter method: matching name, and exactly one argument matching - the getter's return type. Note that the return type of the setter does not matter. - * *may* have a corresponding field: matching name and type. -* the type **must** expose a non-private, no-argument constructor. - -When the mapper reads a mutable entity from the database, it will invoke the no-argument -constructor to materialize the instance, and then read and set the properties one by one. - -You can switch to an immutable style with the [@PropertyStrategy] annotation: - -```java -@Entity -@PropertyStrategy(mutable = false) -public class ImmutableProduct { - @PartitionKey private final UUID productId; - - public ImmutableProduct(UUID productId) { this.productId = productId; } - - public UUID getProductId() { return productId; } -} -``` - -With immutable entities: - -* each entity property: - * **must** have a non-void, no-argument getter method. The mapper will not look for a setter. - * *may* have a corresponding field: matching name and type. You'll probably want to make that - field final (although that has no impact on the mapper-generated code). -* the type **must** expose a non-private constructor that takes every - non-[transient](#transient-properties) property, in the declaration order. - -When the mapper reads an immutable entity from the database, it will first read all properties, then -invoke the "all columns" constructor to materialize the instance. - -Note: the "all columns" constructor must take the properties in the order that they are declared in -the entity. If the entity inherits properties from parent types, those must come last in the -constructor signature, ordered from the closest parent to the farthest. If things get too -complicated, a good trick is to deliberately omit the constructor to let the mapper processor fail: -the error message describes the expected signature. - -#### Accessor styles - -By default, the mapper looks for JavaBeans-style accessors: getter prefixed with "get" (or "is" for -boolean properties) and, if the entity is mutable, setter prefixed with "set": - -```java -@Entity -public class Product { - @PartitionKey private UUID productId; - - public UUID getProductId() { return productId; } - public void setProductId(UUID productId) { this.productId = productId; } -} -``` - -You can switch to a "fluent" style (no prefixes) with the [@PropertyStrategy] annotation: - -```java -import static com.datastax.oss.driver.api.mapper.entity.naming.GetterStyle; -import static com.datastax.oss.driver.api.mapper.entity.naming.SetterStyle; - -@Entity -@PropertyStrategy(getterStyle = GetterStyle.FLUENT, setterStyle = SetterStyle.FLUENT) -public class Product { - @PartitionKey private UUID productId; - - public UUID productId() { return productId; } - public void productId(UUID productId) { this.productId = productId; } -} -``` - -Note that if you use the fluent style with immutable entities, Java's built-in `hashCode()` and -`toString()` methods would qualify as properties. The mapper skips them automatically. If you have -other false positives that you'd like to ignore, mark them as [transient](#transient-properties). - -### Naming strategy - -The mapper infers the database schema from your Java model: the entity class's name is converted -into a table name, and the property names into column names. - -You can control the details of this conversion by annotating your entity class with -[@NamingStrategy]. - -#### Naming conventions - -The simplest strategy is to use one of the mapper's built-in conventions: - -```java -import static com.datastax.oss.driver.api.mapper.entity.naming.NamingConvention.UPPER_SNAKE_CASE; - -@Entity -@NamingStrategy(convention = UPPER_SNAKE_CASE) -public class Product { - @PartitionKey private UUID productId; - ... -} -``` - -Conventions convert names according to pre-defined rules. For example, with the `UPPER_SNAKE_CASE` -convention used above, the mapper expects the following schema: - -``` -CREATE TABLE "PRODUCT"("PRODUCT_ID" int primary key ...) -``` - -For the list of all available conventions, look at the enum constants in [NamingConvention]. - -If you don't annotate your class with [@NamingStrategy], the mapper defaults to the -`SNAKE_CASE_INSENSITIVE` convention. - -#### User-provided name converter - -If none of the built-in conventions work for you, you can provide your own conversion logic by -implementing [NameConverter]: - -```java -public class MyNameConverter implements NameConverter { - @Override - public String toCassandraName(String javaName) { - ... // implement your logic here - } -} -``` - -Then pass your converter class to the annotation: - -```java -@Entity -@NamingStrategy(customConverterClass = MyNameConverter.class) -public class Product { - ... -} -``` - -The mapper will use reflection to build an instance of the converter; it needs to expose a public -no-arg constructor. - -Note that, unlike built-in conventions, the mapper processor cannot invoke your converter at compile -time and use the converted names directly in generated code. Instead, the generated code will invoke -the converter at runtime (that is, every time you run a query). If you want to squeeze the last bit -of performance from the mapper, we recommend sticking to conventions. - -#### User-provided names - -Finally, you can override the CQL name manually with the [@CqlName] annotation: - -```java -@PartitionKey -@CqlName("id") -private UUID productId; -``` - -It works both on entity properties, and on the entity class itself. - -This takes precedence over the entity-level naming strategy, so it's convenient if almost all of -your schema follows a convention, but you need exceptions for a few columns. - -### Property annotations - -Properties can be annotated to configure various aspects of the mapping. The annotation can be -either on the field, or on the getter (if both are specified, the mapper processor issues a -compile-time warning, and the field annotation will be ignored). - -#### Primary key columns - -If the entity maps to a table, properties that map to partition key columns must be annotated with -[@PartitionKey]: - -```java -// CREATE TABLE sales(countryCode text, areaCode text, sales int, -// PRIMARY KEY((countryCode, areaCode))); - -@PartitionKey(1) -private String countryCode; -@PartitionKey(2) -private String areaCode; -``` - -If the partition key is composite, the annotation's integer value indicates the position of each -property in the key. Note that any values can be used, but for clarity it's probably a good idea to -use consecutive integers starting at 0 or 1. - -Similarly, properties that map to clustering columns must be annotated with [@ClusteringColumn]: - -```java -// CREATE TABLE sensor_reading(id uuid, year int, month int, day int, value double, -// PRIMARY KEY(id, year, month, day)); -@PartitionKey -private UUID id; -@ClusteringColumn(1) -private int year; -@ClusteringColumn(2) -private int month; -@ClusteringColumn(3) -private int day; -``` - -This information is used by some of the DAO method annotations; for example, -[@Select](../daos/select/)'s default behavior is to generate a selection by primary key. - -#### Computed properties - -Annotating an entity property with [@Computed] indicates that when retrieving data with the mapper -this property should be set to the result of a computation on the Cassandra side, typically a -function call: - -```java -private int v; - -@Computed("writetime(v)") -private long writetime; -``` - -The CQL return type of the formula must match the type of the property, otherwise an exception -will be thrown. - -[@Computed] does not support case-sensitivity. If the expression contains case-sensitive column -or function names, you'll have to escape them: - -```java -@Computed("\"myFunction\"(\"myColumn\")") -private int f; -``` - -[@Computed] fields are only used for select-based queries, so they will not be considered for -[@Update] or [@Insert] operations. - -Also note that like all other properties, the expected name in a query result for a [@Computed] -property is based on the property name and the employed [@NamingStrategy](#naming-strategy). You may -override this behavior using [@CqlName](#user-provided-names). - -Mapping computed results to property names is accomplished using [aliases]. If you wish to use -entities with [@Computed] properties with [@GetEntity] or [@Query]-annotated dao methods, you -must also do the same: - -```java -@Entity -class MyEntity { - @PartitionKey private int k; - - private int v; - - @Computed("ttl(v)") - private int myTtl; - - @Computed("writetime(v)") - @CqlName("ts") - private long writetime; -} -``` - -would expect a [@Query] such as: - -```java -@Dao -class MyDao { - @Query("select k, v, ttl(v) as my_ttl, writetime(v) as ts from ${qualifiedTableId} where k=:id") - MyEntity findById(int id); -} -``` - -#### Transient properties - -In some cases, one may opt to exclude properties defined on an entity from being considered -by the mapper. In this case, simply annotate these properties with [@Transient]: - -```java -@Transient -private int notAColumn; -``` - -In addition, one may specify transient property names at the entity level by leveraging the -[@TransientProperties] annotation: - -```java -@TransientProperties({"notAColumn", "x"}) -@Entity -public class Product { - @PartitionKey private UUID id; - private String description; - // these columns are not included because their names are specified in @TransientProperties - private int notAColumn; - private int x; -} -``` - -Finally, any field including the `transient` keyword modifier will also be considered transient, -i.e.: - -```java -private transient int notAColumn; -``` - -#### Custom column name - -Override the CQL name manually with [@CqlName], see [User-provided names](#user-provided-names) -above. - -### Default keyspace - -You can specify a default keyspace to use when doing operations on a given entity: - -```java -@Entity(defaultKeyspace = "inventory") -public class Product { - //.... -} -``` - -This will be used when you build a DAO without an explicit keyspace parameter: - -```java -@Mapper -public interface InventoryMapper { - @DaoFactory - ProductDao productDao(); - - @DaoFactory - ProductDao productDao(@DaoKeyspace String keyspace); -} - -ProductDao productDao = mapper.productDao(); -productDao.insert(product); // inserts into inventory.product - -ProductDao productDaoTest = mapper.productDao("test"); -productDaoTest.insert(product); // inserts into test.product -``` - -The default keyspace optional: if it is not specified, and you build a DAO without a keyspace, then -the session **must** have a default keyspace, otherwise an error will be thrown: - -```java -@Entity -public class Product { ... } - -CqlSession session = CqlSession.builder() - .withKeyspace("default_ks") - .build(); -InventoryMapper mapper = new InventoryMapperBuilder(session).build(); - -ProductDao productDao = mapper.productDao(); -productDao.insert(product); // inserts into default_ks.product -``` - -If you want the name to be case-sensitive, it must be enclosed in double-quotes, for example: - -```java -@Entity(defaultKeyspace = "\"defaultKs\"") -``` - -### Inheritance - -When mapping an entity class or a UDT class, the mapper will transparently scan superclasses and -parent interfaces for properties and annotations, thus enabling polymorphic mapping of one class -hierarchy into different CQL tables or UDTs. - -Each concrete class must be annotated with [@Entity] and abstract classes and interfaces must not -use this annotation. - -Here is an example of a polymorphic mapping: - -```java -@Entity -static class Point2D { - private int x; - private int y; - - @CqlName("\"X\"") - public int getX() { return x; } - - public void setX(int x) { this.x = x; } - - @CqlName("\"Y\"") - public int getY() { return y; } - - public void setY(int y) { this.y = y; } -} - -@Entity -static class Point3D extends Point2D { - private int z; - - @CqlName("\"Z\"") - public int getZ() { return z; } - - public void setZ(int z) { this.z = z; } -} - -abstract static class Shape { - @PartitionKey // annotated field on superclass; annotation will get inherited in all subclasses - protected UUID id; - - public abstract UUID getId(); - - public void setId(UUID id) { this.id = id; } -} - -@CqlName("rectangles") -@Entity -static class Rectangle extends Shape { - private Point2D bottomLeft; - private Point2D topRight; - - @CqlName("rect_id") - @Override - public UUID getId() { return id; } - - public Point2D getBottomLeft() { return bottomLeft; } - - public void setBottomLeft(Point2D bottomLeft) { this.bottomLeft = bottomLeft; } - - public Point2D getTopRight() { return topRight; } - - public void setTopRight(Point2D topRight) { this.topRight = topRight; } - - public double getWidth() { return Math.abs(topRight.getX() - bottomLeft.getX()); } - - public double getHeight() { return Math.abs(topRight.getY() - bottomLeft.getY()); } -} - -@CqlName("circles") -@Entity -static class Circle extends Shape { - @CqlName("center2d") - protected Point2D center; - - protected double radius; - - @Override - @CqlName("circle_id") - public UUID getId() { return id; } - - public double getRadius() { return this.radius; } - - public Circle setRadius(double radius) { - this.radius = radius; - return this; - } - - public Point2D getCenter() { return center; } - - public void setCenter(Point2D center) { this.center = center; } -} - -@CqlName("spheres") -@Entity -static class Sphere extends Circle { - - @CqlName("sphere_id") - @Override - public UUID getId() { return id; } - - // overrides field annotation in Circle, - // note that the property type is narrowed down to Point3D - @CqlName("center3d") - @Override - public Point3D getCenter() { return (Point3D) center; } - - @Override - public void setCenter(Point2D center) { - assert center instanceof Point3D; - this.center = center; - } - - // overridden builder-style setter - @Override - public Sphere setRadius(double radius) { - super.setRadius(radius); - return this; - } -} -``` - -The generated entity code should map to the following schema: - -``` -CREATE TYPE point2d ("X" int, "Y" int) -CREATE TYPE point3d ("X" int, "Y" int, "Z" int) -CREATE TABLE rectangles (rect_id uuid PRIMARY KEY, bottom_left frozen, top_right frozen) -CREATE TABLE circles (circle_id uuid PRIMARY KEY, center2d frozen, radius double) -CREATE TABLE spheres (sphere_id uuid PRIMARY KEY, center3d frozen, radius double) -``` - -Annotation priority is driven by proximity to the [@Entity] class. For example, in the code above -the use of `@CqlName("sphere_id")` on `Sphere.getId()` overrides the annotation -`@CqlName("circle_id")` on `Circle.getId()` for the `Sphere` entity. - -Annotations declared on classes are given priority over annotations declared by interfaces -the same level. - -To control how the class hierarchy is scanned, annotate classes with [@HierarchyScanStrategy]. - -[@ClusteringColumn]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/ClusteringColumn.html -[@CqlName]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/CqlName.html -[@Dao]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/Dao.html -[@Entity]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/Entity.html -[NameConverter]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/entity/naming/NameConverter.html -[NamingConvention]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/entity/naming/NamingConvention.html -[@NamingStrategy]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/NamingStrategy.html -[@PartitionKey]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/PartitionKey.html -[@Computed]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/Computed.html -[@Select]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/Select.html -[@Insert]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/Insert.html -[@Update]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/Update.html -[@GetEntity]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/GetEntity.html -[@Query]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/Query.html -[aliases]: http://cassandra.apache.org/doc/latest/cql/dml.html?#aliases -[@Transient]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/Transient.html -[@TransientProperties]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/TransientProperties.html -[@HierarchyScanStrategy]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/HierarchyScanStrategy.html -[@PropertyStrategy]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/PropertyStrategy.html diff --git a/manual/mapper/mapper/README.md b/manual/mapper/mapper/README.md deleted file mode 100644 index 752424c9a3b..00000000000 --- a/manual/mapper/mapper/README.md +++ /dev/null @@ -1,256 +0,0 @@ - - -## Mapper interface - -### Quick overview - -Interface annotated with [@Mapper], entry point to mapper features. - -* a corresponding builder gets generated (default: `[YourInterfacesName]Builder`). -* defines [@DaoFactory] methods that provide DAO instances. They can be parameterized by keyspace - and/or table. - ------ - -The mapper interface is the top-level entry point to mapping features. It wraps a core driver -session, and acts as a factory of [DAO](../daos/) objects that will be used to execute requests. - -It must be annotated with [@Mapper]: - -```java -@Mapper -public interface InventoryMapper { - @DaoFactory - ProductDao productDao(); -} -``` - -### Mapper builder - -For each mapper interface, a builder is generated. By default, it resides in the same package, and -is named by appending a "Builder" suffix, for example `InventoryMapper => InventoryMapperBuilder`. - -You can also use the `builderName()` element to specify a different name. It must be -fully-qualified: - -```java -@Mapper(builderName = "com.acme.MyCustomBuilder") -public interface InventoryMapper { - ... -} -``` - -The builder allows you to create a mapper instance, by wrapping a core `CqlSession` (if you need -more details on how to create a session, refer to the [core driver documentation](../../core/)). - -```java -CqlSession session = CqlSession.builder().build(); -InventoryMapper inventoryMapper = new InventoryMapperBuilder(session).build(); -``` - -One nice trick you can use is to create a static factory method on your interface. This hides the -name of the generated class from the rest of your application: - -```java -@Mapper -public interface InventoryMapper { - - static MapperBuilder builder(CqlSession session) { - return new InventoryMapperBuilder(session); - } - ... -} - -InventoryMapper inventoryMapper = InventoryMapper.builder(session).build(); -``` - -Like the session, the mapper is a long-lived object: you should create it once at initialization -time, and reuse it for the entire lifetime of your application. It doesn't need to get closed. It is -thread-safe. - -### DAO factory methods - -The mapper's main goal is to provide DAO instances. Your interface should provide one or more -methods annotated with [@DaoFactory], that return a DAO interface: - -```java -@DaoFactory -ProductDao productDao(); -``` - -These methods can also receive a keyspace and/or table identifier as parameters (how those -parameters affect the returned DAO is explained in the next section). They must be annotated with -[@DaoKeyspace] and [@DaoTable] respectively, and be of type `String` or [CqlIdentifier]: - -```java -@DaoFactory -ProductDao productDao(@DaoKeyspace String keyspace, @DaoTable String table); - -@DaoFactory -ProductDao productDao(@DaoKeyspace String keyspace); - -@DaoFactory -ProductDao productDao(@DaoTable CqlIdentifier table); -``` - -You can also specify a default keyspace when building the mapper, it will be used for all methods -that don't have a `@DaoKeyspace` parameter: - -```java -InventoryMapper inventoryMapper = new InventoryMapperBuilder(session) - .withDefaultKeyspace("keyspace1") - .build(); -``` - -The mapper maintains an interface cache. Calling a factory method with the same arguments will yield -the same DAO instance: - -```java -ProductDao dao1 = inventoryMapper.productDao("keyspace1", "product"); -ProductDao dao2 = inventoryMapper.productDao("keyspace1", "product"); -assert dao1 == dao2; -``` - -### DAO parameterization - -#### Keyspace and table - -The mapper allows you to reuse the same DAO interface for different tables. For example, given the -following definitions: - -```java -@Dao -public interface ProductDao { - @Select - Product findById(UUID productId); -} - -@Mapper -public interface InventoryMapper { - @DaoFactory - ProductDao productDao(); - - @DaoFactory - ProductDao productDao(@DaoKeyspace String keyspace); - - @DaoFactory - ProductDao productDao(@DaoKeyspace String keyspace, @DaoTable String table); -} - -ProductDao dao1 = inventoryMapper.productDao(); -ProductDao dao2 = inventoryMapper.productDao("keyspace2"); -ProductDao dao3 = inventoryMapper.productDao("keyspace3", "table3"); -``` - -* `dao1.findById` executes the query `SELECT ... FROM product WHERE id = ?`. No table name was - specified for the DAO, so it uses the default name for the `Product` entity (which in this case is - the entity name converted with the default [naming strategy](../entities/#naming-strategy)). No - keyspace was specified either, so the table is unqualified, and this DAO will only work with a - session that was built with a default keyspace: - - ```java - CqlSession session = CqlSession.builder().withKeyspace("keyspace1").build(); - InventoryMapper inventoryMapper = new InventoryMapperBuilder(session).build(); - ProductDao dao1 = inventoryMapper.productDao(); - ``` - -* `dao2.findById` uses the DAO's keyspace, and the default table name: `SELECT ... FROM - keyspace2.product WHERE id = ?`. - -* `dao3.findById` uses the DAO's keyspace and table name: `SELECT ... FROM keyspace3.table3 WHERE id - = ?`. - -The DAO's keyspace and table can also be injected into custom query strings; see [Query -methods](../daos/query/). - -#### Execution profile - -Similarly, a DAO can be parameterized to use a particular [configuration -profile](../../core/configuration/#execution-profiles): - -```java -@Mapper -public interface InventoryMapper { - @DaoFactory - ProductDao productDao(@DaoProfile String profileName); - - @DaoFactory - ProductDao productDao(@DaoProfile DriverExecutionProfile profile); -} -``` - -The mapper will call `setExecutionProfileName` / `setExecutionProfile` on every generated statement. - -### Schema validation - -The mapper validates entity mappings against the database schema at runtime. This check is performed -every time you initialize a new DAO: - -```java -// Checks that entity 'Product' can be mapped to table or UDT 'keyspace1.product' -ProductDao dao1 = inventoryMapper.productDao("keyspace1", "product"); - -// Checks that entity 'Product' can be mapped to table or UDT 'keyspace2.product' -ProductDao dao2 = inventoryMapper.productDao("keyspace2", "product"); -``` - -For each entity referenced in the DAO, the mapper tries to find a schema element with the -corresponding name (according to the [naming strategy](../entities/#naming-strategy)). It tries -tables first, then falls back to UDTs if there is no match. You can speed up this process by -providing a hint: - -```java -import static com.datastax.oss.driver.api.mapper.annotations.SchemaHint.TargetElement.UDT; -import com.datastax.oss.driver.api.mapper.annotations.SchemaHint; - -@Entity -@SchemaHint(targetElement = UDT) -public class Address { ... } -``` - -The following checks are then performed: - -* for each entity field, the database table or UDT must contain a column with the corresponding name - (according to the [naming strategy](../entities/#naming-strategy)). -* the types must be compatible, either according to the [default type - mappings](../../core/#cql-to-java-type-mapping), or via a [custom - codec](../../core/custom_codecs/) registered with the session. -* additionally, if the target element is a table, the primary key must be [properly - annotated](../entities/#primary-key-columns) in the entity. - -If any of those steps fails, an `IllegalArgumentException` is thrown. - -Schema validation adds a small startup overhead, so once your application is stable you may want to -disable it: - -```java -InventoryMapper inventoryMapper = new InventoryMapperBuilder(session) - .withSchemaValidationEnabled(false) - .build(); -``` - -You can also permanently disable validation of an individual entity by annotating it with -`@SchemaHint(targetElement = NONE)`. - -[CqlIdentifier]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/CqlIdentifier.html -[@DaoFactory]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/DaoFactory.html -[@DaoKeyspace]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/DaoKeyspace.html -[@DaoTable]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/DaoTable.html -[@Mapper]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/Mapper.html diff --git a/manual/osgi/README.md b/manual/osgi/README.md deleted file mode 100644 index 92cd4625b68..00000000000 --- a/manual/osgi/README.md +++ /dev/null @@ -1,163 +0,0 @@ - - -# OSGi - -The driver is available as an [OSGi] bundle. More specifically, the following maven artifacts are -valid OSGi bundles: - -- `java-driver-core` -- `java-driver-query-builder` -- `java-driver-mapper-runtime` -- `java-driver-core-shaded` - -Note: some of the driver dependencies are not valid OSGi bundles. Most of them are optional, and the -driver can work properly without them (see the -[Integration>Driver dependencies](../core/integration/#driver-dependencies) section for more -details); in such cases, the corresponding packages are declared with optional resolution in -`Import-Package` directives. However, if you need to access such packages in an OSGi container you -MUST wrap the corresponding jar in a valid OSGi bundle and make it available for provisioning to the -OSGi runtime. - -## Using the shaded jar - -`java-driver-core-shaded` shares the same bundle name as `java-driver-core` -(`com.datastax.oss.driver.core`). It can be used as a drop-in replacement in cases where you have -an explicit version of dependency in your project different than that of the driver's. Refer to -[shaded jar](../core/shaded_jar/) for more information. - -## Using a custom `ClassLoader` - -In several places of the [driver configuration] it is possible to specify the class name of -something to be instantiated by the driver such as the reconnection policy. This is accomplished -using reflection, which uses a `ClassLoader`. By default, the driver uses its own bundle's -`ClassLoader` to instantiate classes by reflection. This is typically adequate as long as the driver -bundle has access to the bundle where the implementing class resides. - -However if the default `ClassLoader` cannot load the implementing class, you may encounter an error -like this: - - java.lang.ClassNotFoundException: com.datastax.oss.MyCustomReconnectionPolicy - -Similarly, it also happens that the default `ClassLoader` is able to load the implementing class but -is not able to ascertain whether that class implements the expected parent type. In these cases you -may encounter an error such as: - - java.lang.IllegalArgumentException: Expected class ExponentialReconnectionPolicy - (specified by advanced.reconnection-policy.class) to be a subtype of - com.datastax.oss.driver.api.core.connection.ReconnectionPolicy - -This is occurring because there is a disparity in the `ClassLoader`s used between the driver code -and the `ClassLoader` used to reflectively load the class (in this case, -`ExponentialReconnectionPolicy`). - -To overcome these issues, you may specify a `ClassLoader` instance when constructing a `Session` -by using [withClassLoader()]. - -Alternatively, if you have access to the `BundleContext` (for example, if you are creating the -session in an `Activator` class) you can also obtain the bundle's `ClassLoader` the following way: - -```java -BundleContext bundleContext = ...; -Bundle bundle = bundleContext.getBundle(); -BundleWiring bundleWiring = bundle.adapt(BundleWiring.class); -ClassLoader classLoader = bundleWiring.getClassLoader(); -CqlSession session = CqlSession.builder() - .withClassLoader(classLoader) - .build(); -``` - -### Using a custom `ClassLoader` for application-bundled configuration resources - -In addition to specifying a `ClassLoader` when constructing a `Session`, you can also specify -a `ClassLoader` instance on certain `DriverConfigLoader` methods for cases when your OSGi -application bundle provides overrides to driver configuration defaults. This is typically done by -including an `application.conf` file in your application bundle. - -For example, you can use [DriverConfigLoader.fromDefaults(ClassLoader)] to use the driver's default -configuration mechanism while specifying a different class loader: - -```java -BundleContext bundleContext = ...; -Bundle bundle = bundleContext.getBundle(); -BundleWiring bundleWiring = bundle.adapt(BundleWiring.class); -ClassLoader classLoader = bundleWiring.getClassLoader(); - -CqlSession session = CqlSession.builder() - .withClassLoader(classLoader) - .withConfigLoader(DriverConfigLoader.fromDefaults(classLoader)) - .build(); -``` - -The above configuration will look for resources named `application.conf` inside the application -bundle, using the right class loader for that. - -Similarly, if you want to use programmatic configuration in you application bundle, but still -want to be able to provide some configuration in an `application.conf` file, you can use -[DriverConfigLoader.programmaticBuilder(ClassLoader)]: - -```java -BundleContext bundleContext = ...; -Bundle bundle = bundleContext.getBundle(); -BundleWiring bundleWiring = bundle.adapt(BundleWiring.class); -ClassLoader classLoader = bundleWiring.getClassLoader(); -DriverConfigLoader loader = - DriverConfigLoader.programmaticBuilder(classLoader) - .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(5)) - .startProfile("slow") - .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(30)) - .endProfile() - .build(); -CqlSession session = CqlSession.builder() - .withClassLoader(classLoader) - .withConfigLoader(loader) - .build(); -``` - -The above configuration will honor all programmatic settings, but will look for resources named -`application.conf` inside the application bundle, using the right class loader for that. - -## What does the "Error loading libc" DEBUG message mean? - -The driver is able to perform native system calls through [JNR] in some cases, for example to -achieve microsecond resolution when [generating timestamps](../core/query_timestamps/). - -Unfortunately, some of the JNR artifacts available from Maven are not valid OSGi bundles and cannot -be used in OSGi applications. - -[JAVA-1127] has been created to track this issue, and there is currently no simple workaround short -of embedding the dependency, which we've chosen not to do. - -Because native calls are not available, it is also normal to see the following log lines when -starting the driver: - - [main] DEBUG - Error loading libc - java.lang.NoClassDefFoundError: jnr/ffi/LibraryLoader - ... - [main] INFO - Could not access native clock (see debug logs for details), falling back to Java - system clock - - -[driver configuration]: ../core/configuration -[OSGi]:https://www.osgi.org -[JNR]: https://github.com/jnr/jnr-posix -[withClassLoader()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/SessionBuilder.html#withClassLoader-java.lang.ClassLoader- -[JAVA-1127]:https://datastax-oss.atlassian.net/browse/JAVA-1127 -[DriverConfigLoader.fromDefaults(ClassLoader)]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/config/DriverConfigLoader.html#fromDefaults-java.lang.ClassLoader- -[DriverConfigLoader.programmaticBuilder(ClassLoader)]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/config/DriverConfigLoader.html#programmaticBuilder-java.lang.ClassLoader- diff --git a/manual/query_builder/.nav b/manual/query_builder/.nav deleted file mode 100644 index 5dd9a982006..00000000000 --- a/manual/query_builder/.nav +++ /dev/null @@ -1,11 +0,0 @@ -select -insert -update -batch -delete -truncate -relation -condition -term -idempotence -schema \ No newline at end of file diff --git a/manual/query_builder/README.md b/manual/query_builder/README.md deleted file mode 100644 index d1932b329e7..00000000000 --- a/manual/query_builder/README.md +++ /dev/null @@ -1,238 +0,0 @@ - - -## Query builder - -The query builder is a utility to **generate CQL queries programmatically**. For example, it could -be used to: - -* given a set of optional search parameters, build a search query dynamically depending on which - parameters are provided; -* given a Java class, generate the CRUD queries that map instances of that class to a Cassandra - table. - -To use it in your application, add the following dependency: - -```xml - - org.apache.cassandra - java-driver-query-builder - ${driver.version} - -``` - -Here is our canonical example rewritten with the query builder: - -```java -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.*; - -try (CqlSession session = CqlSession.builder().build()) { - - Select query = selectFrom("system", "local").column("release_version"); // SELECT release_version FROM system.local - SimpleStatement statement = query.build(); - - ResultSet rs = session.execute(statement); - Row row = rs.one(); - System.out.println(row.getString("release_version")); -} -``` - -### General concepts - -#### Fluent API - -All the starting methods are centralized in the [QueryBuilder] and [SchemaBuilder] classes. To get -started, add one of the following imports: - -```java -// For DML queries, such as SELECT -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.*; - -// For DDL queries, such as CREATE TABLE -import static com.datastax.oss.driver.api.querybuilder.SchemaBuilder.*; -``` - -Choose the method matching your desired statement, for example `selectFrom`. Then use your IDE's -completion and the javadocs to add query parts: - -```java -Select select = - selectFrom("ks", "user") - .column("first_name") - .column("last_name") - .whereColumn("id").isEqualTo(bindMarker()); -// SELECT first_name,last_name FROM ks.user WHERE id=? -``` - -When your query is complete, you can either extract a raw query string, or turn it into a -[simple statement](../core/statements/simple) (or its builder): - -```java -String cql = select.asCql(); -SimpleStatement statement = select.build(); -SimpleStatementBuilder builder = select.builder(); -``` - -#### DataStax Enterprise - -The driver provides two additional entry points for DSE-specific queries: [DseQueryBuilder] and -[DseSchemaBuilder]. They extend their respective non-DSE counterparts, so anything that is available -on the default query builder can also be done with the DSE query builder. - -We recommend that you use those classes if you are targeting DataStax Enterprise; they will be -enriched in the future if DSE adds custom CQL syntax. - -Currently, the only difference is the support for the `DETERMINISTIC` and `MONOTONIC` keywords when -generating `CREATE FUNCTION` or `CREATE AGGREGATE` statements: - -```java -import static com.datastax.dse.driver.api.querybuilder.DseSchemaBuilder.createDseFunction; - -createDseFunction("func1") - .withParameter("param1", DataTypes.INT) - .returnsNullOnNull() - .returnsType(DataTypes.INT) - .deterministic() - .monotonic(); -// CREATE FUNCTION func1 (param1 int) RETURNS NULL ON NULL INPUT RETURNS int DETERMINISTIC MONOTONIC -``` - -#### Immutability - -All types in the fluent API are immutable. This means that every step creates a new object: - -```java -SelectFrom selectFrom = selectFrom("ks", "user"); - -Select select1 = selectFrom.column("first_name"); // SELECT first_name FROM ks.user -Select select2 = selectFrom.column("last_name"); // SELECT last_name FROM ks.user - -assert select1 != select2; -``` - -Immutability has great benefits: - -* **thread safety**: you can share built queries across threads, without any race condition or - badly published state. -* **zero sharing**: when you build multiple queries from a shared "base" (as in the example above), - all the queries are totally independent, changes to one query will never "pollute" another. - -On the downside, immutability means that the query builder creates lots of short-lived objects. -Modern garbage collectors are good at handling that, but still we recommend that you **avoid using -the query builder in your hot path**: - -* favor [bound statements](../core/statements/prepared) for queries that are used often. You can - still use the query builder and prepare the result: - - ```java - // During application initialization: - Select selectUser = selectFrom("user").all().whereColumn("id").isEqualTo(bindMarker()); - // SELECT * FROM user WHERE id=? - PreparedStatement preparedSelectUser = session.prepare(selectUser.build()); - - // At runtime: - session.execute(preparedSelectUser.bind(userId)); - ``` -* for queries that never change, build them when your application initializes, and store them in a - field or constant for later. -* for queries that are built dynamically, consider using a cache. - -#### Identifiers - -All fluent API methods use [CqlIdentifier] for schema element names (keyspaces, tables, columns...). -But, for convenience, there are also `String` overloads that take the CQL form (as see [Case -sensitivity](../case_sensitivity) for more explanations). - -For conciseness, we'll use the string-based versions for the examples in this manual. - -### Non-goals - -The query builder is **NOT**: - -#### A crutch to learn CQL - -While the fluent API guides you, it does not encode every rule of the CQL grammar. Also, it supports -a wide range of Cassandra versions, some of which may be more recent than your production target, or -not even released yet. It's still possible to generate invalid CQL syntax if you don't know what -you're doing. - -You should always start with a clear idea of the CQL query, and write the builder code that produces -it, not the other way around. - -#### A better way to write static queries - -The primary use case of the query builder is dynamic generation. You will get the most value out of -it when you do things like: - -```java -// The columns to select are only known at runtime: -for (String columnName : columnNames) { - select = select.column(columnName) -} - -// If a search parameter is present, add the corresponding WHERE clause: -if (name != null) { - select = select.whereColumn("name").isEqualTo(name); -} -``` - -If all of your queries could also be written as compile-time string constants, ask yourself what the -query builder is really buying you: - -```java -// Built version: -private static final Statement SELECT_USERS = - selectFrom("user").all().limit(10).build(); - -// String version: -private static final Statement SELECT_USERS = - SimpleStatement.newInstance("SELECT * FROM user LIMIT 10"); -``` - -The built version: - -* is slightly more expensive to build (admittedly, that is not really an issue for constants); -* is not more readable; -* is not necessarily less error-prone (see the previous section). - -It eventually boils down to personal taste, but for simple cases you should consider raw strings as -a better alternative. - -### Building queries - -For a complete tour of the API, browse the child pages in this manual: - -* statement types: - * [SELECT](select/) - * [INSERT](insert/) - * [UPDATE](update/) - * [DELETE](delete/) - * [TRUNCATE](truncate/) - * [Schema builder](schema/) (for DDL statements such as CREATE TABLE, etc.) -* common topics: - * [Relations](relation/) - * [Conditions](condition/) - * [Terms](term/) - * [Idempotence](idempotence/) - -[QueryBuilder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/querybuilder/QueryBuilder.html -[SchemaBuilder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/querybuilder/SchemaBuilder.html -[CqlIdentifier]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/CqlIdentifier.html -[DseQueryBuilder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/querybuilder/DseQueryBuilder.html -[DseSchemaBuilder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/querybuilder/DseSchemaBuilder.html diff --git a/manual/query_builder/condition/README.md b/manual/query_builder/condition/README.md deleted file mode 100644 index 1a6a37eb2ef..00000000000 --- a/manual/query_builder/condition/README.md +++ /dev/null @@ -1,154 +0,0 @@ - - -## Conditions - -A condition is a clause that appears after the IF keyword in a conditional [UPDATE](../update/) or -[DELETE](../delete/) statement. - -The easiest way to add a condition is with an `ifXxx` method in the fluent API: - -```java -deleteFrom("user") - .whereColumn("k").isEqualTo(bindMarker()) - .ifColumn("v1").isEqualTo(literal(1)) - .ifColumn("v2").isEqualTo(literal(2)); -// DELETE FROM user WHERE k=? IF v1=1 AND v2=2 -``` - -You can also create it manually with one of the factory methods in [Condition], and then pass it to -`if_()`: - -```java -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.*; - -Condition vCondition = Condition.column("v").isEqualTo(literal(1)); -deleteFrom("user") - .whereColumn("k").isEqualTo(bindMarker()) - .if_(vCondition); -// DELETE FROM user WHERE k=? IF v=1 -``` - -If you call `if_()` multiple times, the clauses will be joined with the AND keyword. You can also -add multiple conditions in a single call. This is a bit more efficient since it creates less -temporary objects: - -```java -deleteFrom("user") - .whereColumn("k").isEqualTo(bindMarker()) - .if_( - Condition.column("v1").isEqualTo(literal(1)), - Condition.column("v2").isEqualTo(literal(2))); -// DELETE FROM user WHERE k=? IF v1=1 AND v2=2 -``` - -Conditions are composed of a left operand, an operator, and a right-hand-side -[term](../term/). - -### Simple columns - -`ifColumn` operates on a single column. It supports basic arithmetic comparison operators: - -| Comparison operator | Method name | -|---------------------|--------------------------| -| `=` | `isEqualTo` | -| `<` | `isLessThan` | -| `<=` | `isLessThanOrEqualTo` | -| `>` | `isGreaterThan` | -| `>=` | `isGreaterThanOrEqualTo` | -| `!=` | `isNotEqualTo` | - -*Note: we support `!=` because it is present in the CQL grammar but, as of Cassandra 4, it is not -implemented yet.* - -In addition, `in()` can test for equality with various alternatives. You can either provide each -alternative as a term: - -```java -deleteFrom("user") - .whereColumn("k").isEqualTo(bindMarker()) - .ifColumn("v").in(bindMarker(), bindMarker(), bindMarker()); -// DELETE FROM user WHERE k=? IF v IN (?,?,?) -``` - -Or bind the whole list of alternatives as a single variable: - -```java -deleteFrom("user") - .whereColumn("k").isEqualTo(bindMarker()) - .ifColumn("v").in(bindMarker()); -// DELETE FROM user WHERE k=? IF v IN ? -``` - -### UDT fields - -`ifField` tests a field in a top-level UDT (nested UDTs are not allowed): - -```java -deleteFrom("user") - .whereColumn("k").isEqualTo(bindMarker()) - .ifField("address", "zip").isEqualTo(literal(94040)); -// DELETE FROM user WHERE k=? IF address.zip=94040 -``` - -It supports the same set of operators as simple columns. - -### Collection elements - -`ifElement` tests an element in a top-level collection (nested collections are not allowed): - -```java -deleteFrom("product") - .whereColumn("sku").isEqualTo(bindMarker()) - .ifElement("features", literal("color")).in(literal("red"), literal("blue")); -// DELETE FROM product WHERE sku=? IF features['color'] IN ('red','blue') -``` - -It supports the same set of operators as simple columns. - -### Raw snippets - -You can also provide a condition as a raw CQL snippet, that will get appended to the query as-is, -without any syntax checking or escaping: - -```java -deleteFrom("product") - .whereColumn("sku").isEqualTo(bindMarker()) - .ifRaw("features['color'] IN ('red', 'blue') /*some random comment*/"); -// DELETE FROM product WHERE sku=? IF features['color'] IN ('red', 'blue') /*some random comment*/ -``` - -This should be used with caution, as it's possible to generate invalid CQL that will fail at -execution time; on the other hand, it can be used as a workaround to handle new CQL features that -are not yet covered by the query builder. - -### IF EXISTS - -Finally, you can specify an IF EXISTS clause: - -```java -deleteFrom("product").whereColumn("sku").isEqualTo(bindMarker()).ifExists(); -// DELETE FROM product WHERE sku=? IF EXISTS -``` - -It is mutually exclusive with column conditions: if you previously specified column conditions on -the statement, they will be ignored; conversely, adding a column condition cancels a previous IF -EXISTS clause. - -[Condition]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/querybuilder/condition/Condition.html diff --git a/manual/query_builder/delete/README.md b/manual/query_builder/delete/README.md deleted file mode 100644 index 8e97920ae9f..00000000000 --- a/manual/query_builder/delete/README.md +++ /dev/null @@ -1,164 +0,0 @@ - - -## DELETE - -To start a DELETE query, use one of the `deleteFrom` methods in [QueryBuilder]. There are several -variants depending on whether your table name is qualified, and whether you use -[identifiers](../../case_sensitivity/) or raw strings: - -```java -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.*; - -DeleteSelection delete = deleteFrom("user"); -``` - -Note that, at this stage, the query can't be built yet. You need at least one -[relation](#relations). - -### Selectors - -A selector is something that appears after the `DELETE` keyword, and will be removed from the -affected row(s). - -Selectors are optional; if you don't provide any, the whole row will be deleted. - -The easiest way to add a selector is with a fluent API method: - -```java -deleteFrom("user").column("v1").column("v2"); -// DELETE v1,v2 FROM user... -``` - -You can also create it manually with one of the factory methods in [Selector], and then pass it to -`selector()`: - -```java -deleteFrom("user").selector(Selector.getColumn("v")) -// DELETE v FROM user ... -``` - -If you have multiple selectors, you can also use `selectors()` to add them all in a single call. -This is a bit more efficient since it creates less temporary objects: - -```java -deleteFrom("user").selectors(getColumn("v1"), getColumn("v2")); -// DELETE v1,v2 FROM user... -``` - -Only 3 types of selectors can be used in DELETE statements: - -* simple columns (as illustrated in the previous examples); -* fields in non-nested UDT columns: - - ```java - deleteFrom("user").field("address", "street"); - // DELETE address.street FROM user ... - ``` - -* elements in non-nested collection columns: - - ```java - deleteFrom("product").element("features", literal("color")); - // DELETE features['color'] FROM product ... - ``` - -You can also pass a raw CQL snippet, that will get appended to the query as-is, without any syntax -checking or escaping: - -```java -deleteFrom("user").raw("v /*some random comment*/") -// DELETE v /*some random comment*/ FROM user ... -``` - -This should be used with caution, as it's possible to generate invalid CQL that will fail at -execution time; on the other hand, it can be used as a workaround to handle new CQL features that -are not yet covered by the query builder. - -### Timestamp - -The USING TIMESTAMP clause specifies the timestamp at which the mutation will be applied. You can -pass either a literal value: - -```java -deleteFrom("user").column("v").usingTimestamp(1234) -// DELETE v FROM user USING TIMESTAMP 1234 -``` - -Or a bind marker: - -```java -deleteFrom("user").column("v").usingTimestamp(bindMarker()) -// DELETE v FROM user USING TIMESTAMP ? -``` - -If you call the method multiple times, the last value will be used. - -### Relations - -Relations get added with the fluent `whereXxx()` methods: - -```java -deleteFrom("user").whereColumn("k").isEqualTo(bindMarker()); -// DELETE FROM user WHERE k=? -``` - -Or you can build and add them manually: - -```java -deleteFrom("user").where( - Relation.column("k").isEqualTo(bindMarker())); -// DELETE FROM user WHERE k=? -``` - -Once there is at least one relation, the statement can be built: - -```java -SimpleStatement statement = deleteFrom("user").whereColumn("k").isEqualTo(bindMarker()).build(); -``` - -Relations are a common feature used by many types of statements, so they have a -[dedicated page](../relation) in this manual. - -### Conditions - -Conditions get added with the fluent `ifXxx()` methods: - -```java -deleteFrom("user") - .whereColumn("k").isEqualTo(bindMarker()) - .ifColumn("v").isEqualTo(literal(1)); -// DELETE FROM user WHERE k=? IF v=1 -``` - -Or you can build and add them manually: - -```java -deleteFrom("user") - .whereColumn("k").isEqualTo(bindMarker()) - .if_( - Condition.Column("v").isEqualTo(literal(1))); -// DELETE FROM user WHERE k=? IF v=1 -``` - -Conditions are a common feature used by UPDATE and DELETE, so they have a -[dedicated page](../condition) in this manual. - -[QueryBuilder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/querybuilder/QueryBuilder.html -[Selector]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/querybuilder/select/Selector.html diff --git a/manual/query_builder/idempotence/README.md b/manual/query_builder/idempotence/README.md deleted file mode 100644 index 2f97151d277..00000000000 --- a/manual/query_builder/idempotence/README.md +++ /dev/null @@ -1,247 +0,0 @@ - - -## Idempotence in the query builder - -When you generate a statement (or a statement builder) from the query builder, it automatically -infers the [isIdempotent](../../core/idempotence/) flag: - -```java -SimpleStatement statement = - selectFrom("user").all() - .whereColumn("id").isEqualTo(literal(1)) - .build(); -// SELECT * FROM user WHERE id=1 -assert statement.isIdempotent(); -``` - -This can't always be determined accurately; when in doubt, the builder is pessimistic and marks the -statement as not idempotent. If you know otherwise, you can fix it manually: - -```java -Delete delete = - deleteFrom("product") - .element("features", literal("color")) - .whereColumn("sku").isEqualTo(bindMarker()); -assert !delete.build().isIdempotent(); // see below for why -SimpleStatement statement = delete.builder() - .withIdempotence(true) - .build(); -``` - -The remaining sections describe the rules that are applied to compute the flag. - -### SELECT statements - -SELECT statements don't modify the contents of the database. They're always considered idempotent, -regardless of the other rules below. - -### Unsafe terms - -If you use the result of a user-defined function in an INSERT or UPDATE statement, there is no way -of knowing if that function is idempotent: - -```java -SimpleStatement statement = insertInto("foo").value("k", function("generate_id")).build(); -// INSERT INTO foo (k) VALUES (generate_id()) -assert !statement.isIdempotent(); -``` - -This extends to arithmetic operations using such terms: - -```java -SimpleStatement statement = - insertInto("foo").value("k", add(function("generate_id"), literal(1))).build(); -// INSERT INTO foo (k) VALUES (generate_id()+1) -assert !statement.isIdempotent(); -``` - -Raw terms could be anything, so they are also considered unsafe by default: - -```java -SimpleStatement statement = - insertInto("foo").value("k", raw("generate_id()+1")).build(); -// INSERT INTO foo (k) VALUES (generate_id()+1) -assert !statement.isIdempotent(); -``` - -### Unsafe WHERE clauses - -If a WHERE clause in an UPDATE or DELETE statement uses a comparison with an unsafe term, it could -potentially apply to different rows for each execution: - -```java -SimpleStatement statement = - update("foo") - .setColumn("v", bindMarker()) - .whereColumn("k").isEqualTo(function("non_idempotent_func")) - .build(); -// UPDATE foo SET v=? WHERE k=non_idempotent_func() -assert !statement.isIdempotent(); -``` - -### Unsafe updates - -Counter updates are never idempotent: - -```java -SimpleStatement statement = - update("foo") - .increment("c") - .whereColumn("k").isEqualTo(bindMarker()) - .build(); -// UPDATE foo SET c+=1 WHERE k=? -assert !statement.isIdempotent(); -``` - -Nor is appending or prepending an element to a list: - -```java -SimpleStatement statement = - update("foo") - .appendListElement("l", literal(1)) - .whereColumn("k").isEqualTo(bindMarker()) - .build(); -// UPDATE foo SET l=l+[1] WHERE k=? -assert !statement.isIdempotent(); -``` - -The generic `append` and `prepend` methods apply to any kind of collection, so we have to consider -them unsafe by default too: - -```java -SimpleStatement statement = - update("foo") - .prepend("l", literal(Arrays.asList(1, 2, 3))) - .whereColumn("k").isEqualTo(bindMarker()) - .build(); -// UPDATE foo SET l=[1,2,3]+l WHERE k=? -assert !statement.isIdempotent(); -``` - -The generic `remove` method is however safe since collection removals are idempotent: - -```java -SimpleStatement statement = - update("foo") - .remove("l", literal(Arrays.asList(1, 2, 3))) - .whereColumn("k").isEqualTo(bindMarker()) - .build(); -// UPDATE foo SET l=l-[1,2,3] WHERE k=? -assert statement.isIdempotent(); -``` - -When appending, prepending or removing a single element to/from a collection, it is possible to use -the dedicated methods listed below; their idempotence depends on the collection type (list, set or -map), the operation (append, prepend or removal) and the idempotence of the element being -added/removed: - -1. `appendListElement` : not idempotent -2. `prependListElement` : not idempotent -3. `removeListElement` : idempotent if element is idempotent -4. `appendSetElement` : idempotent if element is idempotent -5. `prependSetElement` : idempotent if element is idempotent -6. `removeSetElement` : idempotent if element is idempotent -7. `appendMapElement` : idempotent if both key and value are idempotent -8. `prependMapElement` : idempotent if both key and value are idempotent -9. `removeMapElement` : idempotent if both key and value are idempotent - -In practice, most invocations of the above methods will be idempotent because most collection -elements are. For example, the following statement is idempotent since `literal(1)` is also -idempotent: - -```java -SimpleStatement statement = - update("foo") - .removeListElement("l", literal(1)) - .whereColumn("k").isEqualTo(bindMarker()) - .build(); -// UPDATE foo SET l=l-[1] WHERE k=? -assert statement.isIdempotent(); -``` - -However, in rare cases the resulting statement won't be marked idempotent, e.g. if you use a -function to select a collection element: - -```java -SimpleStatement statement = - update("foo") - .removeListElement("l", function("myfunc")) - .whereColumn("k").isEqualTo(bindMarker()) - .build(); -// UPDATE foo SET l=l-[myfunc()] WHERE k=? -assert !statement.isIdempotent(); -``` - -### Unsafe deletions - -Deleting from a list is not idempotent: - -```java -SimpleStatement statement = - deleteFrom("foo") - .element("l", literal(0)) - .whereColumn("k").isEqualTo(bindMarker()) - .build(); -// DELETE l[0] FROM foo WHERE k=? -assert !statement.isIdempotent(); -``` - -### Conditional statements - -All conditional statements are considered non-idempotent: - -* INSERT with IF NOT EXISTS; -* UPDATE and DELETE with IF EXISTS or IF conditions on columns. - -This might seem counter-intuitive, as these queries can sometimes be safe to execute multiple times. -For example, consider the following query: - -```java -update("foo") - .setColumn("v", literal(4)) - .whereColumn("k").isEqualTo(literal(1)) - .ifColumn("v").isEqualTo(literal(1)); -// UPDATE foo SET v=4 WHERE k=1 IF v=1 -``` - -If we execute it twice, the IF condition will fail the second time, so the second execution will do -nothing and `v` will still have the value 4. - -However, the problem appears when we consider multiple clients executing the query with retries: - -1. `v` has the value 1; -2. client 1 executes the query above, performing a a CAS (compare and set) from 1 to 4; -3. client 1's connection drops, but the query completes successfully. `v` now has the value 4; -4. client 2 executes a CAS from 4 to 2; -5. client 2's transaction succeeds. `v` now has the value 2; -6. since client 1 lost its connection, it considers the query as failed, and transparently retries - the CAS from 1 to 4. But since the column now has value 2, it receives a "not applied" response. - -One important aspect of lightweight transactions is [linearizability]: given a set of concurrent -operations on a column from different clients, there must be a way to reorder them to yield a -sequential history that is correct. From our clients' point of view, there were two operations: - -* client 1 executed a CAS from 1 to 4, that was not applied; -* client 2 executed a CAS from 4 to 2, that was applied. - -But overall the column changed from 1 to 2. There is no ordering of the two operations that can -explain that change. We broke linearizability by doing a transparent retry at step 6. - -[linearizability]: https://en.wikipedia.org/wiki/Linearizability#Definition_of_linearizability diff --git a/manual/query_builder/insert/README.md b/manual/query_builder/insert/README.md deleted file mode 100644 index 6bac896d9b8..00000000000 --- a/manual/query_builder/insert/README.md +++ /dev/null @@ -1,136 +0,0 @@ - - -## INSERT - -To start an INSERT query, use one of the `insertInto` methods in [QueryBuilder]. There are -several variants depending on whether your table name is qualified, and whether you use -[identifiers](../../case_sensitivity/) or raw strings: - -```java -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.*; - -InsertInto insert = insertInto("user"); -``` - -Note that, at this stage, the query can't be built yet. You need to set at least one value. - -### Setting values - -#### Regular insert - -A regular insert (as opposed to a JSON insert, covered in the next section) specifies values for a -set of columns. In the Query Builder DSL, this is expressed with the `value` method: - -```java -insertInto("user") - .value("id", bindMarker()) - .value("first_name", literal("John")) - .value("last_name", literal("Doe")); -// INSERT INTO user (id,first_name,last_name) VALUES (?,'John','Doe') -``` - -The column names can only be simple identifiers. The values are [terms](../term). - -#### JSON insert - -To start a JSON insert, use the `json` method instead. It takes the payload as a raw string, that -will get inlined as a CQL literal: - -```java -insertInto("user").json("{\"id\":1, \"first_name\":\"John\", \"last_name\":\"Doe\"}"); -// INSERT INTO user JSON '{"id":1, "first_name":"John", "last_name":"Doe"}' -``` - -In a real application, you'll probably obtain the string from a JSON library such as Jackson. - -You can also bind it as a value: - -```java -insertInto("user").json(bindMarker()); -// INSERT INTO user JSON ? -``` - -JSON inserts have extra options to indicate how missing fields should be handled: - -```java -insertInto("user").json("{\"id\":1}").defaultUnset(); -// INSERT INTO user JSON '{"id":1}' DEFAULT UNSET - -insertInto("user").json("{\"id\":1}").defaultNull(); -// INSERT INTO user JSON '{"id":1}' DEFAULT NULL -``` - -### Conditions - -For INSERT queries, there is only one possible condition: IF NOT EXISTS. It applies to both regular -and JSON inserts: - -```java -insertInto("user").json(bindMarker()).ifNotExists(); -// INSERT INTO user JSON ? IF NOT EXISTS -``` - -### Timestamp - -The USING TIMESTAMP clause specifies the timestamp at which the mutation will be applied. You can -pass either a literal value: - -```java -insertInto("user").json(bindMarker()).usingTimestamp(1234) -// INSERT INTO user JSON ? USING TIMESTAMP 1234 -``` - -Or a bind marker: - -```java -insertInto("user").json(bindMarker()).usingTimestamp(bindMarker()) -// INSERT INTO user JSON ? USING TIMESTAMP ? -``` - -If you call the method multiple times, the last value will be used. - -### Time To Live (TTL) - -You can generate a USING TTL clause that will cause column values to be deleted (marked with a -tombstone) after the specified time (in seconds) has expired. This can be done with a literal: - -```java -insertInto("user").value("a", bindMarker()).usingTtl(60) -// INSERT INTO user (a) VALUES (?) USING TTL 60 -``` - -Or a bind marker: - -```java -insertInto("user").value("a", bindMarker()).usingTtl(bindMarker()) -// INSERT INTO user (a) VALUES (?) USING TTL ? -``` - -If you call the method multiple times, the last value will be used. - -The TTL value applies only to the inserted data, not the entire column. Any subsequent updates to -the column resets the TTL. - -Setting the value to 0 will result in removing the TTL for the inserted data in Cassandra when the query -is executed. This is distinctly different than setting the value to null. Passing a null value to -this method will only remove the USING TTL clause from the query, which will not alter the TTL (if -one is set) in Cassandra. - -[QueryBuilder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/querybuilder/QueryBuilder.html diff --git a/manual/query_builder/relation/README.md b/manual/query_builder/relation/README.md deleted file mode 100644 index eb1c728888e..00000000000 --- a/manual/query_builder/relation/README.md +++ /dev/null @@ -1,224 +0,0 @@ - - -## Relations - -A relation is a clause that appears after the WHERE keyword, and restricts the rows that the -statement operates on. - -Relations are used by the following statements: - -* [SELECT](../select/) -* [UPDATE](../update/) -* [DELETE](../delete/) -* [CREATE MATERIALIZED VIEW](../schema/materialized_view/) - -The easiest way to add a relation is with a `whereXxx` method in the fluent API: - -```java -selectFrom("sensor_data").all() - .whereColumn("id").isEqualTo(bindMarker()) - .whereColumn("date").isGreaterThan(bindMarker()); -// SELECT * FROM sensor_data WHERE id=? AND date>? -``` - -You can also create it manually with one of the factory methods in [Relation], and then pass it to -`where()`: - -```java -selectFrom("user").all().where( - Relation.column("id").isEqualTo(bindMarker())); -// SELECT * FROM user WHERE id=? -``` - -If you call `where()` multiple times, the clauses will be joined with the AND keyword. You can also -add multiple relations in a single call. This is a bit more efficient since it creates less -temporary objects: - -```java -selectFrom("sensor_data").all() - .where( - Relation.column("id").isEqualTo(bindMarker()), - Relation.column("date").isGreaterThan(bindMarker())); -// SELECT * FROM sensor_data WHERE id=? AND date>? -``` - -Relations are generally composed of a left operand, an operator, and an optional right-hand-side -[term](../term/). The type of relation determines which operators are available. - -### Simple columns - -`whereColumn` operates on a single column. It supports basic arithmetic comparison operators: - -| Comparison operator | Method name | -|---------------------|--------------------------| -| `=` | `isEqualTo` | -| `<` | `isLessThan` | -| `<=` | `isLessThanOrEqualTo` | -| `>` | `isGreaterThan` | -| `>=` | `isGreaterThanOrEqualTo` | -| `!=` | `isNotEqualTo` | - -*Note: we support `!=` because it is present in the CQL grammar but, as of Cassandra 4, it is not -implemented yet.* - -See above for comparison operator examples. - -If you're using SASI indices, you can also use `like()` for wildcard comparisons: - -```java -selectFrom("user").all().whereColumn("last_name").like(literal("M%")); -// SELECT * FROM user WHERE last_name LIKE 'M%' -``` - -`in()` is like `isEqualTo()`, but with various alternatives. You can either provide each alternative as a -term: - -```java -selectFrom("user").all().whereColumn("id").in(literal(1), literal(2), literal(3)); -// SELECT * FROM user WHERE id IN (1,2,3) - -selectFrom("user").all().whereColumn("id").in(bindMarker(), bindMarker(), bindMarker()); -// SELECT * FROM user WHERE id IN (?,?,?) -``` - -Or bind the whole list of alternatives as a single variable: - -```java -selectFrom("user").all().whereColumn("id").in(bindMarker()); -// SELECT * FROM user WHERE id IN ? -``` - -For collection columns, you can check for the presence of an element with `contains()` and -`containsKey()`: - -```java -selectFrom("sensor_data") - .all() - .whereColumn("id").isEqualTo(bindMarker()) - .whereColumn("date").isEqualTo(bindMarker()) - .whereColumn("readings").containsKey(literal("temperature")) - .allowFiltering(); -// SELECT * FROM sensor_data WHERE id=? AND date=? AND readings CONTAINS KEY 'temperature' ALLOW FILTERING -``` - -Finally, `isNotNull()` generates an `IS NOT NULL` check. *Note: we support `IS NOT NULL` because it -is present in the CQL grammar but, as of Cassandra 4, it is not implemented yet.* - -### Column components - -`whereMapValue` operates on an value inside of a map: - -```java -selectFrom("sensor_data") - .all() - .whereColumn("id").isEqualTo(bindMarker()) - .whereColumn("date").isEqualTo(bindMarker()) - .whereMapValue("readings", literal("temperature")).isGreaterThan(literal(65)) - .allowFiltering(); -// SELECT * FROM sensor_data WHERE id=? AND date=? AND readings['temperature']>65 ALLOW FILTERING -``` - -Column components support the six basic arithmetic comparison operators. - -### Tokens - -`whereToken` hashes one or more columns into a token. It is generally used to perform range queries: - -```java -selectFrom("user") - .all() - .whereToken("id").isGreaterThan(bindMarker()) - .whereToken("id").isLessThanOrEqualTo(bindMarker()); -// SELECT * FROM user WHERE token(id)>? AND token(id)<=? -``` - -It supports the six basic arithmetic comparison operators. - -### Multi-column relations - -`whereColumns` compares a set of columns to tuple terms of the same arity. It supports the six basic -arithmetic comparison operators (using lexicographical order): - -```java -selectFrom("sensor_data") - .all() - .whereColumn("id").isEqualTo(bindMarker()) - .whereColumns("date", "hour").isGreaterThan(tuple(bindMarker(), bindMarker())); -// SELECT * FROM sensor_data WHERE id=? AND (date,hour)>(?,?) -``` - -In addition, tuples support the `in()` operator. Like with regular columns, bind markers can operate -at different levels: - -```java -// Bind the whole list of alternatives (two-element tuples) as a single value: -selectFrom("test") - .all() - .whereColumn("k").isEqualTo(literal(1)) - .whereColumns("c1", "c2").in(bindMarker()); -// SELECT * FROM test WHERE k=1 AND (c1,c2) IN ? - -// Bind each alternative as a value: -selectFrom("test") - .all() - .whereColumn("k").isEqualTo(literal(1)) - .whereColumns("c1", "c2").in(bindMarker(), bindMarker(), bindMarker()); -// SELECT * FROM test WHERE k=1 AND (c1,c2) IN (?,?,?) - -// Bind each element in the alternatives as a value: -selectFrom("test") - .all() - .whereColumn("k").isEqualTo(literal(1)) - .whereColumns("c1", "c2").in( - tuple(bindMarker(), bindMarker()), - tuple(bindMarker(), bindMarker()), - tuple(bindMarker(), bindMarker())); -// SELECT * FROM test WHERE k=1 AND (c1,c2) IN ((?,?),(?,?),(?,?)) -``` - -### Custom index expressions - -`whereCustomIndex` evaluates a custom index. The argument is a free-form term (what is a legal value -depends on your index implementation): - -```java -selectFrom("foo") - .all() - .whereColumn("k").isEqualTo(literal(1)) - .whereCustomIndex("my_custom_index", literal("a text expression")); -// SELECT * FROM foo WHERE k=1 AND expr(my_custom_index,'a text expression') -``` - -### Raw snippets - -Finally, it is possible to provide a raw CQL snippet with `whereRaw()`; it will get appended to the -query as-is, without any syntax checking or escaping: - -```java -selectFrom("foo").all().whereRaw("k = 1 /*some custom comment*/ AND c<2"); -// SELECT * FROM foo WHERE k = 1 /*some custom comment*/ AND c<2 -``` - -This should be used with caution, as it's possible to generate invalid CQL that will fail at -execution time; on the other hand, it can be used as a workaround to handle new CQL features that -are not yet covered by the query builder. - -[QueryBuilder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/querybuilder/QueryBuilder.html -[Relation]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/querybuilder/relation/Relation.html diff --git a/manual/query_builder/schema/.nav b/manual/query_builder/schema/.nav deleted file mode 100644 index 650e895a197..00000000000 --- a/manual/query_builder/schema/.nav +++ /dev/null @@ -1,7 +0,0 @@ -keyspace -table -index -materialized_view -type -function -aggregate \ No newline at end of file diff --git a/manual/query_builder/schema/README.md b/manual/query_builder/schema/README.md deleted file mode 100644 index 0472c8e8c6f..00000000000 --- a/manual/query_builder/schema/README.md +++ /dev/null @@ -1,66 +0,0 @@ - - -# Schema builder - -The schema builder is an additional API provided by [java-driver-query-builder](../) that enables -one to *generate CQL DDL queries programmatically**. For example it could be used to: - -* based on application configuration, generate schema queries instead of building CQL strings by - hand. -* given a Java class that represents a table, view, or user defined type, generate representative - schema DDL `CREATE` queries. - -Here is an example that demonstrates creating a keyspace and a table using [SchemaBuilder]: - -```java -import static com.datastax.oss.driver.api.querybuilder.SchemaBuilder.*; - -try (CqlSession session = CqlSession.builder().build()) { - CreateKeyspace createKs = createKeyspace("cycling").withSimpleStrategy(1); - session.execute(createKs.build()); - - CreateTable createTable = - createTable("cycling", "cyclist_name") - .withPartitionKey("id", DataTypes.UUID) - .withColumn("lastname", DataTypes.TEXT) - .withColumn("firstname", DataTypes.TEXT); - - session.execute(createTable.build()); -} -``` - -The [general concepts](../#general-concepts) and [non goals](../#non-goals) defined for the query -builder also apply for the schema builder. - -### Building DDL Queries - -The schema builder offers functionality for creating, altering and dropping elements of a CQL -schema. For a complete tour of the API, browse the child pages in the manual for each schema -element type: - -* [keyspace](keyspace/) -* [table](table/) -* [index](index/) -* [materialized view](materialized_view/) -* [type](type/) -* [function](function/) -* [aggregate](aggregate/) - -[SchemaBuilder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/querybuilder/SchemaBuilder.html diff --git a/manual/query_builder/schema/aggregate/README.md b/manual/query_builder/schema/aggregate/README.md deleted file mode 100644 index a54f8703d69..00000000000 --- a/manual/query_builder/schema/aggregate/README.md +++ /dev/null @@ -1,98 +0,0 @@ - - -## Aggregate - -Aggregates enable users to apply User-defined functions (UDF) to rows in a data set and combine -their values into a final result, for example average or standard deviation. [SchemaBuilder] -offers API methods for creating and dropping aggregates. - -### Creating an aggregate (CREATE AGGREGATE) - -To start a `CREATE AGGREGATE` query, use `createAggregate` in [SchemaBuilder]: - -```java -import static com.datastax.oss.driver.api.querybuilder.SchemaBuilder.*; - -CreateAggregateStart create = createAggregate("average"); -``` - -Like all other `CREATE` queries, one may supply `ifNotExists()` to require that the aggregate should -only be created if it doesn't already exist, i.e.: - -```java -CreateAggregateStart create = createAggregate("cycling", "average").ifNotExists(); -``` - -You may also specify that you would like to replace an existing aggregate by the same signature if -it exists. In this case, use `orReplace`: - -```java -CreateAggregateStart create = createAggregate("cycling", "average").orReplace(); -``` - -One may also specify the parameters of an aggregate using `withParameter`: - -```java -CreateAggregateStart create = createAggregate("cycling", "average") - .withParameter(DataTypes.INT); -``` - -To complete an aggregate, one must then provide the following: - -* The state function (`withSFunc`) to execute on each row -* The type of the value returned by the state function (`withSType`) - -In addition, while optional, it is typical that the following is also provided: - -* The final function to be executed after the state function is evaluated against all rows - (`withFinalFunc`) -* The initial condition (`withInitCond`) which defines the initial value(s) to be passed in to the - first parameter of the state function. - -For example, the following defines a complete `CREATE AGGREGATE` statement: - -```java -createAggregate("cycling", "average") - .withParameter(DataTypes.INT) - .withSFunc("avgstate") - .withSType(DataTypes.tupleOf(DataTypes.INT, DataTypes.BIGINT)) - .withFinalFunc("avgfinal") - .withInitCond(tuple(literal(0), literal(0))); - -// CREATE AGGREGATE cycling.average (int) SFUNC avgstate STYPE tuple FINALFUNC avgfinal INITCOND (0,0) -``` - -### Dropping an aggregate (DROP AGGREGATE) - -To create a `DROP AGGREGATE` query, use `dropAggregate`: - -```java -dropAggregate("cycling", "average"); -// DROP AGGREGATE cycling.average -``` - -You may also specify `ifExists`: - -```java -dropAggregate("average").ifExists(); -// DROP AGGREGATE IF EXISTS average -``` - -[SchemaBuilder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/querybuilder/SchemaBuilder.html diff --git a/manual/query_builder/schema/function/README.md b/manual/query_builder/schema/function/README.md deleted file mode 100644 index 001327626b1..00000000000 --- a/manual/query_builder/schema/function/README.md +++ /dev/null @@ -1,114 +0,0 @@ - - -## Function - -User-defined functions (UDF) enable users to create user code written in JSR-232 compliant scripting -languages that can be evaluated in CQL queries. [SchemaBuilder] offers API methods for creating -and dropping UDFs. - -### Creating a Function (CREATE FUNCTION) - -To start a `CREATE FUNCTION` query, use `createFunction` in [SchemaBuilder]: - -```java -import static com.datastax.oss.driver.api.querybuilder.SchemaBuilder.*; - -CreateFunctionStart create = createFunction("log"); -``` - -Like all other `CREATE` queries, one may supply `ifNotExists()` to require that the UDF should only -be created if it doesn't already exist, i.e.: - -```java -CreateFunctionStart create = createFunction("cycling", "log").ifNotExists(); -``` - -You may also specify that you would like to replace an existing function by the same signature if it -exists. In this case, use `orReplace`: - -```java -CreateFunctionStart create = createFunction("cycling", "log").orReplace(); -``` - -One may also specify the parameters of a function using `withParameter`: - -``` -createFunction("cycling", "left") - .withParameter("colName", DataTypes.TEXT) - .withParameter("num", DataTypes.DOUBLE) -``` - -There are a number of steps that must be executed to complete a function: - -* Specify whether the function is called on null input (`calledOnNull`) or if it should simply - return null (`returnsNullOnNull`). -* Specify the return type of the function using `returnsType` -* Specify language of the function body using `withJavaLanguage`, `withJavaScriptLanguage`, or - `withLanguage` -* Specify the function body with `as` or `asQuoted` - -For example, the following defines a complete `CREATE FUNCTION` statement: - -```java -createFunction("cycling", "log") - .withParameter("input", DataTypes.DOUBLE) - .calledOnNull() - .returnsType(DataTypes.DOUBLE) - .withJavaLanguage() - .asQuoted("return Double.valueOf(Math.log(input.doubleValue()));"); - -// CREATE FUNCTION cycling.log (columnname text,num int) CALLED ON NULL INPUT RETURNS double LANGUAGE java -// AS 'return Double.valueOf(Math.log(input.doubleValue()));' -``` - -Note that when providing a function body, the `as` method does not implicitly quote your function -body. If you would like to have the API handle this for you, use `asQuoted`. This will surround -your function body in single quotes if the body itself does not contain a single quote, otherwise it -will surround your function body in two dollar signs (`$$`) mimicking a postgres-style string -literal, i.e.: - -```java -createFunction("sayhi") - .withParameter("input", DataTypes.TEXT) - .returnsNullOnNull() - .returnsType(DataTypes.TEXT) - .withJavaScriptLanguage() - .asQuoted("'hi ' + input;"); -// CREATE FUNCTION sayhi (input text) RETURNS NULL ON NULL INPUT RETURNS text LANGUAGE javascript AS $$ 'hi ' + input; $$ -``` - - -### Dropping a Function (DROP FUNCTION) - -To create a `DROP FUNCTION` query, use `dropFunction`: - -```java -dropFunction("cycling", "log"); -// DROP FUNCTION cycling.log -``` - -You may also specify `ifExists`: - -```java -dropFunction("log").ifExists(); -// DROP FUNCTION IF EXISTS log -``` - -[SchemaBuilder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/querybuilder/SchemaBuilder.html diff --git a/manual/query_builder/schema/index/README.md b/manual/query_builder/schema/index/README.md deleted file mode 100644 index c0c9448dfab..00000000000 --- a/manual/query_builder/schema/index/README.md +++ /dev/null @@ -1,121 +0,0 @@ - - -# Index - -An index provides a means of expanding the query capabilities of a table. [SchemaBuilder] offers -API methods for creating and dropping indices. Unlike other schema members, there is no mechanism -to alter an index. - -### Creating an Index (CREATE INDEX) - -To start a `CREATE INDEX` query, use `createIndex` in [SchemaBuilder]: - -```java -import static com.datastax.oss.driver.api.querybuilder.SchemaBuilder.*; - -// an index name is not required -CreateIndexStart create = createIndex(); - -create = createIndex("my_idx"); -``` - -Unlike other keyspace elements, there is not option to provide the keyspace name, instead it is -implied from the indexed table's keyspace. - -Like all other `CREATE` queries, one may supply `ifNotExists()` to require that the index should -only be created if it doesn't already exist, i.e.: - -```java -CreateIndexStart create = createIndex("my_idx").ifNotExists(); -``` - -Note one small difference with `IF NOT EXISTS` with indices is that the criteria also applies to -whether or not the table and column specification has an index already, not just the name of the -index. - -At this stage, the query cannot be completed yet. You need to provide at least: - -* The table the index applies to using `onTable` -* The column the index applies to using an `andColumn*` implementation. - -For example: - -```java -createIndex().onTable("tbl").andColumnKeys("addresses"); -// CREATE INDEX ON tbl (KEYS(addresses)) -``` - -#### Custom Indices - -Cassandra supports indices with a custom implementation, specified by an input class name. The -class implementation may be specified using `custom(className)`, for example: - -```java -createIndex() - .custom("org.apache.cassandra.index.MyCustomIndex") - .onTable("tbl") - .andColumn("x"); -// CREATE CUSTOM INDEX ON tbl (x) USING 'org.apache.cassandra.index.MyCustomIndex' -``` - -One popular custom index implementation is SASI (SSTable Attached Secondary Index). To use SASI, -use `usingSASI` and optionally `withSASIOptions`: - -```java -createIndex() - .usingSASI() - .onTable("tbl") - .andColumn("x") - .withSASIOptions(ImmutableMap.of("mode", "CONTAINS", "tokenization_locale", "en")); -// CREATE CUSTOM INDEX ON tbl (x) USING 'org.apache.cassandra.index.sasi.SASIIndex' WITH OPTIONS={'mode':'CONTAINS','tokenization_locale':'en'} -``` - -#### Column Index Types - -When indexing columns, one may simply use `andColumn`. However, when indexing collection columns -there are several additional options available: - -* `andColumnKeys`: Creates an index on a map column's keys. -* `andColumnValues`: Creates an index on a map column's values. -* `andColumnEntries`: Creates an index on a map column's entries. -* `andColumnFull`: Creates an index of a frozen collection's full value. - -#### Index options - -After specifying the columns for the index, you may use `withOption` to provide free-form options on -the index. These are really only applicable to custom index implementations. - -### Dropping an Index (DROP INDEX) - -To create a `DROP INDEX` query, use `dropIndex`: - -```java -dropIndex("ks", "my_idx"); -// DROP INDEX ks.my_idx -``` - -You may also specify `ifExists`: - -```java -dropIndex("my_idx").ifExists(); -// DROP INDEX IF EXISTS my_idx -``` - -[SchemaBuilder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/querybuilder/SchemaBuilder.html diff --git a/manual/query_builder/schema/keyspace/README.md b/manual/query_builder/schema/keyspace/README.md deleted file mode 100644 index 572e8af1658..00000000000 --- a/manual/query_builder/schema/keyspace/README.md +++ /dev/null @@ -1,107 +0,0 @@ - - -## Keyspace - -A keyspace is a top-level namespace that defines a name, replication strategy and configurable -options. [SchemaBuilder] offers API methods for creating, altering and dropping keyspaces. - -### Creating a Keyspace (CREATE KEYSPACE) - -To start a `CREATE KEYSPACE` query, use `createKeyspace` in [SchemaBuilder]: - -```java -import static com.datastax.oss.driver.api.querybuilder.SchemaBuilder.*; - -CreateKeyspaceStart create = createKeyspace("cycling"); -``` - -Like all other `CREATE` queries, one may supply `ifNotExists()` to require that the keyspace should -only be created if it doesn't already exist, i.e.: - -```java -CreateKeyspaceStart create = createKeyspace("cycling").ifNotExists(); -``` - -Note that, at this stage, the query cannot be completed yet. You need to provide at least a -replication strategy. The two most widely used ones are SimpleStrategy and NetworkTopologyStrategy. - -To provide a replication strategy, use one of the following API methods on `CreateKeyspaceStart`: - -* `withSimpleStrategy(int replicationFactor)` -* `withNetworkTopologyStrategy(Map replications)` -* `withReplicationOptions(Map replicationOptions)` - -For example, the following builds a completed `CreateKeyspace` using `NetworkTopologyStrategy` with -a replication factor of 2 in `east` and 3 in `west`: - -```java -CreateKeyspace create = createKeyspace("cycling") - .withNetworkTopologyStrategy(ImmutableMap.of("east", 2, "west", 3)); -// CREATE KEYSPACE cycling WITH replication={'class':'NetworkTopologyStrategy','east':2,'west':3} -``` - -Optionally, once a replication factor is provided, one may provide additional configuration when -creating a keyspace: - -* `withDurableWrites(boolean durableWrites)` -* `withOption(String name, Object value)` - -### Altering a Keyspace (ALTER KEYSPACE) - -To start an `ALTER KEYSPACE` query, use `alterKeyspace`: - -```java -AlterKeyspaceStart alterKeyspace = alterKeyspace("cycling"); -``` - -From here, you can modify the keyspace's replication and other settings: - -* `withSimpleStrategy(int replicationFactor)` -* `withNetworkTopologyStrategy(Map replications)` -* `withReplicationOptions(Map replicationOptions)` -* `withDurableWrites(boolean durableWrites)` -* `withOption(String name, Object value)` - -At least one of these operations must be used to return a completed `AlterKeyspace`, i.e.: - -```java -alterKeyspace("cycling").withDurableWrites(true); -// ALTER KEYSPACE cycling WITH durable_writes=true -``` - -### Dropping a keyspace (DROP KEYSPACE) - -To create a `DROP KEYSPACE` query, use `dropKeyspace`: - -```java -dropKeyspace("cycling"); -// DROP KEYSPACE cycling -``` - -You may also specify `ifExists`: - -```java -dropKeyspace("cycling").ifExists(); -// DROP KEYSPACE IF EXISTS cycling -``` - -[SchemaBuilder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/querybuilder/SchemaBuilder.html - - diff --git a/manual/query_builder/schema/materialized_view/README.md b/manual/query_builder/schema/materialized_view/README.md deleted file mode 100644 index c4f495f95aa..00000000000 --- a/manual/query_builder/schema/materialized_view/README.md +++ /dev/null @@ -1,108 +0,0 @@ - - -## Materialized View - -Materialized Views are an experimental feature introduced in Apache Cassandra 3.0 that provide a -mechanism for server-side denormalization from a base table into a view that is updated when the -base table is updated. [SchemaBuilder] offers API methods for creating, altering and dropping -materialized views. - -### Creating a Materialized View (CREATE MATERIALIZED VIEW) - -To start a `CREATE MATERIALIZED VIEW` query, use `createMaterializedView` in [SchemaBuilder]: - -```java -import static com.datastax.oss.driver.api.querybuilder.SchemaBuilder.*; - -CreateMaterializedViewStart create = createMaterializedView("cycling", "cyclist_by_age"); -``` - -Like all other `CREATE` queries, one may supply `ifNotExists()` to require that the view should only -be created if it doesn't already exist, i.e.: - -```java -CreateMaterializedViewStart create = createMaterializedView("cycling", "cyclist_by_age").ifNotExists(); -``` - -There are a number of steps that must be executed to complete a materialized view: - -* Specify the base table using `asSelectFrom` -* Specify the columns to include in the view via `column` or `columns` -* Specify the where clause using [relations](../../relation) -* Specify the partition key columns using `withPartitionKey` and `withClusteringColumn` - -For example, the following defines a complete `CREATE MATERIALIZED VIEW` statement: - -```java -createMaterializedView("cycling", "cyclist_by_age") - .asSelectFrom("cycling", "cyclist") - .columns("age", "name", "country") - .whereColumn("age") - .isNotNull() - .whereColumn("cid") - .isNotNull() - .withPartitionKey("age") - .withClusteringColumn("cid"); -// CREATE MATERIALIZED VIEW cycling.cyclist_by_age AS -// SELECT age,name,country FROM cycling.cyclist WHERE age IS NOT NULL AND cid IS NOT NULL PRIMARY KEY(age,cid) -``` - -Please note that not all WHERE clause relations may be compatible with materialized views. - -Like a [table](../table), one may additionally provide configuration such as clustering order, -compaction options and so on. Refer to [RelationStructure] for documentation on additional -configuration that may be provided for a view. - -### Altering a Materialized View (ALTER MATERIALIZED VIEW) - -To start a `ALTER MATERIALIZED VIEW` query, use `alterMaterializedView`: - -```java -alterMaterializedView("cycling", "cyclist_by_age"); -``` - -Unlike a table, you may not alter, drop or rename columns on a materialized view. Instead, one may -only alter the options defined in [RelationStructure]. For example: - -```java -alterMaterializedView("cycling", "cyclist_by_age") - .withGcGraceSeconds(0) - .withCaching(true, RowsPerPartition.NONE); -// ALTER MATERIALIZED VIEW cycling.cyclist_by_age WITH gc_grace_seconds=0 AND caching={'keys':'ALL','rows_per_partition':'NONE'} -``` - -### Dropping a View (DROP MATERIALIZED VIEW) - -To create a `DROP MATERIALIZED VIEW` query, use `dropMaterializedView`: - -```java -dropMaterializedView("cycling", "cyclist_by_age"); -// DROP MATERIALIZED VIEW cycling.cyclist_by_age -``` - -You may also specify `ifExists`: - -```java -dropTable("cyclist_by_age").ifExists(); -// DROP MATERIALIZED VIEW IF EXISTS cyclist_by_age -``` - -[SchemaBuilder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/querybuilder/SchemaBuilder.html -[RelationStructure]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/querybuilder/schema/RelationStructure.html diff --git a/manual/query_builder/schema/table/README.md b/manual/query_builder/schema/table/README.md deleted file mode 100644 index 090f8a1f67b..00000000000 --- a/manual/query_builder/schema/table/README.md +++ /dev/null @@ -1,131 +0,0 @@ - - -## Table - -Data in Apache Cassandra is stored in tables. [SchemaBuilder] offers API methods for creating, -altering, and dropping tables. - -### Creating a Table (CREATE TABLE) - -To start a `CREATE TABLE` query, use `createTable` in [SchemaBuilder]: - -```java -import static com.datastax.oss.driver.api.querybuilder.SchemaBuilder.*; - -CreateTableStart create = createTable("cycling", "cyclist_name"); -``` - -Like all other `CREATE` queries, one may supply `ifNotExists()` to require that the table should -only be created if it doesn't already exist, i.e.: - -```java -CreateTableStart create = createTable("cycling", "cyclist_name").ifNotExists(); -``` - -Note that, at this stage, the query cannot be completed yet. You need to provide at least one -partition key column using `withPartitionKey()`, i.e.: - -```java -CreateTable create = createTable("cycling", "cyclist_name").withPartitionKey("id", DataTypes.UUID); -// CREATE TABLE cycling.cyclist_name (id UUID PRIMARY KEY) -``` - -A table with only one column is not so typical however. At this point you may provide partition, -clustering, regular and static columns using any of the following API methods: - -* `withPartitionKey(name, dataType)` -* `withClusteringColumn(name, dataType)` -* `withColumn(name, dataType)` -* `withStaticColumn(name, dataType)` - -Primary key precedence is driven by the order of `withPartitionKey` and `withClusteringKey` -invocations, for example: - - -```java -CreateTable create = createTable("cycling", "cyclist_by_year_and_name") - .withPartitionKey("race_year", DataTypes.INT) - .withPartitionKey("race_name", DataTypes.TEXT) - .withClusteringColumn("rank", DataTypes.INT) - .withColumn("cyclist_name", DataTypes.TEXT); -// CREATE TABLE cycling.cyclist_by_year_and_name (race_year int,race_name text,rank int,cyclist_name text,PRIMARY KEY((race_year,race_name),rank)) -``` - -After providing the column specification, clustering order and many table options may be provided. -Refer to [CreateTableWithOptions] for the variety of configuration options available. - -The following configures compaction and compression options and includes a clustering order. - -```java -CreateTableWithOptions create = createTable("cycling", "cyclist_by_year_and_name") - .withPartitionKey("race_year", DataTypes.INT) - .withPartitionKey("race_name", DataTypes.TEXT) - .withClusteringColumn("rank", DataTypes.INT) - .withColumn("cyclist_name", DataTypes.TEXT) - .withCompaction(leveledCompactionStrategy()) - .withSnappyCompression() - .withClusteringOrder("rank", ClusteringOrder.DESC); -// CREATE TABLE cycling.cyclist_by_year_and_name (race_year int,race_name text,rank int,cyclist_name text,PRIMARY KEY((race_year,race_name),rank)) -// WITH CLUSTERING ORDER BY (rank DESC) -// AND compaction={'class':'LeveledCompactionStrategy'} -// AND compression={'class':'SnappyCompressor'} -``` - -### Altering a Table (ALTER TABLE) - -To start an `ALTER TABLE` query, use `alterTable`: - -```java -alterTable("cycling", "cyclist_name"); -``` - -From here, you can modify the table in the following ways: - -* `dropCompactStorage()`: Drops `COMPACT STORAGE` from a table, removing thrift compatibility mode - and migrates to a CQL-compatible format. -* `addColumn(columnName, dataType)`: Adds a new column to the table. -* `alterColumn(columnName, dataType)`: Changes the type of an existing column. This is not - recommended. -* `dropColumn(columnName)`: Removes an existing column from the table. -* `renameColumn(from, to)`: Renames a column. -* API methods from [AlterTableWithOptions] - -Invoking any of these methods returns a complete query and you may make successive calls to the same -API methods, with exception to alter column, which may only be invoked once. - -### Dropping a Table (DROP TABLE) - -To create a `DROP TABLE` query, use `dropTable`: - -```java -dropTable("cycling", "cyclist_name"); -// DROP TABLE cycling.cyclist_name -``` - -You may also specify `ifExists`: - -```java -dropTable("cyclist_name").ifExists(); -// DROP TABLE IF EXISTS cyclist_name -``` - -[SchemaBuilder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/querybuilder/SchemaBuilder.html -[CreateTableWithOptions]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/querybuilder/schema/CreateTableWithOptions.html -[AlterTableWithOptions]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/querybuilder/schema/AlterTableWithOptions.html diff --git a/manual/query_builder/schema/type/README.md b/manual/query_builder/schema/type/README.md deleted file mode 100644 index c289ad776a8..00000000000 --- a/manual/query_builder/schema/type/README.md +++ /dev/null @@ -1,110 +0,0 @@ - - -## Type - -User-defined types are special types that can associate multiple named fields to a single column. -[SchemaBuilder] offers API methods for creating, altering, and dropping types. - -### Creating a Type (CREATE TYPE) - -To start a `CREATE TYPE` query, use `createType` in [SchemaBuilder]: - -```java -import static com.datastax.oss.driver.api.querybuilder.SchemaBuilder.*; - -CreateTypeStart create = createType("mykeyspace", "address"); -``` - -Like all other `CREATE` queries, one may supply `ifNotExists()` to require that the type should only -be created if it doesn't already exist, i.e.: - -```java -CreateTypeStart create = createType("address").ifNotExists(); -``` - -Note that, at this stage, the query cannot be completed yet. You need to provide at least one field -using `withField()`, i.e.: - -```java -CreateType create = createType("mykeyspace", "address").withField("street", DataTypes.TEXT); -// CREATE TYPE mykeyspace.address (street text) -``` - -A type with only one field is not entirely useful. You may continue to make successive calls to -`withField` to specify additional fields, i.e.: - -```java -CreateType create = createType("mykeyspace", "address") - .withField("street", DataTypes.TEXT) - .withField("city", DataTypes.TEXT) - .withField("zip_code", DataTypes.INT) - .withField("phones", DataTypes.setOf(DataTypes.TEXT)); -// CREATE TYPE mykeyspace.address (street text,city text,zip_code int,phones set) -``` - -### Using a created Type in Schema Builder API - -After creating a UDT, one may wonder how to use it in other schema statements. To do so, utilize -`udt(name,frozen)` from [SchemaBuilder], i.e: - -```java -CreateTable users = createTable("mykeyspace", "users") - .withPartitionKey("id", DataTypes.UUID) - .withColumn("name", udt("fullname", true)) - .withColumn("name", DataTypes.setOf(udt("direct_reports", true))) - .withColumn("addresses", DataTypes.mapOf(DataTypes.TEXT, udt("address", true))); -// CREATE TABLE mykeyspace.users (id uuid PRIMARY KEY,name set>,addresses map>) -``` - -### Altering a Type (ALTER TYPE) - -To start an `ALTER TYPE` query, use `alterType`: - -```java -alterTable("mykeyspace", "address"); -``` - -From here, you can modify the type in the following ways: - -* `addField(fieldName, dataType)`: Adds a new field to the type. -* `alterField(fieldName, dataType)`: Changes the type of an existing field. This is not - recommended. -* `renameField(from, to)`: Renames a field. - -Invoking any of these methods returns a complete query. You may make successive calls to -`renameField`, but not the other methods. - -### Dropping a Type (DROP TYPE) - -To create a `DROP TYPE` query, use `dropType`: - -```java -dropType("mykeyspace", "address"); -// DROP TYPE mykeyspace.address -``` - -You may also specify `ifExists`: - -```java -dropTable("address").ifExists(); -// DROP TYPE IF EXISTS address -``` - -[SchemaBuilder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/querybuilder/SchemaBuilder.html diff --git a/manual/query_builder/select/README.md b/manual/query_builder/select/README.md deleted file mode 100644 index 0425423a402..00000000000 --- a/manual/query_builder/select/README.md +++ /dev/null @@ -1,437 +0,0 @@ - - -## SELECT - -Start your SELECT with the `selectFrom` method in [QueryBuilder]. There are several variants -depending on whether your table name is qualified, and whether you use -[identifiers](../../case_sensitivity/) or raw strings: - -```java -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.*; - -SelectFrom selectUser = selectFrom("user"); -``` - -Note that, at this stage, the query can't be built yet. You need at least one selector. - -### Selectors - -A selector is something that appears after the `SELECT` keyword, and will become a column in the -result set. Its simplest form is a column identifier, but it can be a more complex expression. - -The easiest way to add a selector is with one of the fluent shortcuts: - -```java -selectFrom("user") - .column("first_name") - .column("last_name"); -// SELECT first_name,last_name FROM user -``` - -You can also create it manually with one of the factory methods in [Selector], and then pass it to -`selector()`: - -```java -selectFrom("user").selector( - Selector.column("first_name")); -// SELECT first_name FROM user -``` - -If you have multiple selectors, you can also use `selectors()` to add them all in a single call. -This is a bit more efficient since it creates less temporary objects: - -```java -selectFrom("user").selectors( - Selector.column("first_name"), - Selector.column("last_name")); -// SELECT first_name,last_name FROM user -``` - -Use an alias to give a selector a different name in the result set: - -```java -selectFrom("user").column("first_name").as("first"); -// SELECT first_name AS first FROM user - -selectFrom("user").selector( - Selector.column("first_name").as("first")); -// SELECT first_name AS first FROM user -``` - -The query builder provides many kinds of selectors. Some of them only work with newer Cassandra -versions, always check what your target platform supports. - -#### Star selector and count - -`all` is the classic "star" selector that returns all columns. It cannot be aliased, and must be the -only selector: - -```java -selectFrom("user").all(); -// SELECT * FROM user - -selectFrom("user").all().as("everything"); -// throws IllegalStateException: Can't alias the * selector -``` - -If you add it to a query that already had other selectors, they will get removed: - -```java -selectFrom("user").column("first_name").all(); -// SELECT * FROM user -``` - -If you add other selectors to a query that already had the star selector, the star selector gets -removed: - -```java -selectFrom("user").all().column("first_name"); -// SELECT first_name FROM user -``` - -If you add multiple selectors at once, and one of them is the star selector, an exception is thrown: - -```java -selectFrom("user").selectors( - Selector.column("first_name"), - Selector.all(), - Selector.column("last_name")); -// throws IllegalArgumentException: Can't pass the * selector to selectors() -``` - -`countAll` counts the number of rows: - -```java -selectFrom("user").countAll(); -// SELECT count(*) FROM user -``` - -#### Columns - -We've already shown how `column` works: - -```java -selectFrom("user") - .column("first_name") - .column("last_name"); -// SELECT first_name,last_name FROM user -``` - -When all your selectors are simple columns, there is a convenience shortcut to add them in one call: - -```java -selectFrom("user").columns("first_name", "last_name"); -// SELECT first_name,last_name FROM user -``` - -#### Arithmetic operations - -Selectors can be combined with arithmetic operations. - -| CQL Operator | Selector name | -|--------------|---------------| -| `a+b` | `add` | -| `a-b` | `subtract` | -| `-a` | `negate` | -| `a*b` | `multiply` | -| `a/b` | `divide` | -| `a%b` | `remainder` | - -```java -selectFrom("rooms") - .multiply(Selector.column("length"), Selector.column("width")) - .as("surface"); -// SELECT length*width AS surface FROM rooms -``` - -Operations can be nested, and will get parenthesized according to the usual precedence rules: - -```java -selectFrom("foo") - .multiply( - Selector.negate(Selector.column("a")), - Selector.add(Selector.column("b"), Selector.column("c"))); -// SELECT -a*(b+c) FROM foo -``` - -Note: as shown in the examples above, arithmetic operations can get verbose very quickly. If you -have common expressions that get reused throughout your application code, consider writing your own -shortcuts: - -```java -public static Selector multiplyColumns(String left, String right) { - return Selector.multiply(Selector.column(left), Selector.column(right)); -} - -selectFrom("rooms") - .selector(multiplyColumns("length", "width")) - .as("surface"); -// SELECT length*width AS surface FROM rooms -``` - -#### Casts - -Casting is closely related to arithmetic operations; it allows you to coerce a selector to a -different data type. For example, if `height` and `weight` are two `int` columns, the following -expression uses integer division and returns an `int`: - -```java -selectFrom("user") - .divide( - Selector.multiply(Selector.column("weight"), literal(10_000)), - Selector.multiply(Selector.column("height"), Selector.column("height"))) - .as("bmi"); -// SELECT weight*10000/(height*height) AS bmi FROM user -``` - -What if you want a floating-point result instead? You have to introduce a cast: - -```java -selectFrom("user") - .divide( - Selector.multiply( - Selector.cast(Selector.column("weight"), DataTypes.DOUBLE), - literal(10_000)), - Selector.multiply(Selector.column("height"), Selector.column("height"))) - .as("bmi"); -// SELECT CAST(weight AS double)*10000/(height*height) AS bmi FROM user -``` - -Type hints are similar to casts, with a subtle difference: a cast applies to an expression with an -already well-established type, whereas a hint is used with a literal, where the type can be -ambiguous. - -```java -selectFrom("foo").divide( - // A literal 1 can be any numeric type (int, bigint, double, etc.) - // It defaults to int, which is wrong here if we want a floating-point result. - Selector.typeHint(literal(1), DataTypes.DOUBLE), - Selector.column("a")); -// SELECT (double)1/a FROM foo -``` - -#### Sub-elements - -These selectors extract an element from a complex column, for example: - -* a field from a user-defined type: - - ```java - selectFrom("user").field("address", "street"); - // SELECT address.street FROM user - ``` - -* an element, or range of elements, in a set or a map: - - ```java - selectFrom("product").element("features", literal("color")); - // SELECT features['color'] FROM product - - selectFrom("movie").range("ratings", literal(3), literal(4)); - // SELECT ratings[3..4] FROM movie - - selectFrom("movie").range("ratings", literal(3), null); - // SELECT ratings[3..] FROM movie - - selectFrom("movie").range("ratings", null, literal(3)); - // SELECT ratings[..3] FROM movie - ``` - -#### Collections of selectors - -Groups of selectors can be extracted as a single collection, such as: - -* a list or set. All inner selectors must return the same CQL type: - - ```java - selectFrom("user").listOf( - Selector.column("first_name"), - Selector.column("last_name")); - // SELECT [first_name,last_name] FROM user - - selectFrom("user").setOf( - Selector.column("first_name"), - Selector.column("last_name")); - // SELECT {first_name,last_name} FROM user - ``` - -* a map. All key and value selectors must have consistent types. In most cases, Cassandra will - require a type hint for the outer map, so the query builder can generate that for you if you - provide the key and value types: - - ```java - Map mapSelector = new HashMap<>(); - mapSelector.put(literal("first"), Selector.column("first_name")); - mapSelector.put(literal("last"), Selector.column("last_name")); - - selectFrom("user").mapOf(mapSelector, DataTypes.TEXT, DataTypes.TEXT); - // SELECT (map){'first':first_name,'last':last_name} FROM user - ``` - -* a tuple. This time the types can be heterogeneous: - - ```java - selectFrom("user").tupleOf( - Selector.column("first_name"), - Selector.column("birth_date")); - // SELECT (first_name,birth_date) FROM user - ``` - -#### Functions - -Function calls take a function name (optionally qualified with a keyspace), and a list of selectors -that will be passed as arguments: - -```java -selectFrom("user").function("utils", "bmi", Selector.column("weight"), Selector.column("height")); -// SELECT utils.bmi(weight,height) FROM user -``` - -The built-in functions `ttl` and `writetime` have convenience shortcuts: - -```java -selectFrom("user").writeTime("first_name").ttl("last_name"); -// SELECT writetime(first_name),ttl(last_name) FROM user -``` - -#### Literals - -Occasionally, you'll need to inline a CQL literal in your query; this is not very useful as a -top-level selector, but could happen as part of an arithmetic operation: - -```java -selectFrom("foo").quotient(literal(1), Selector.column("a")); -// SELECT 1/a FROM foo -``` - -See the [terms](../term/#literals) section for more details on literals. - -#### Raw snippets - -Lastly, a selector can be expressed as a raw CQL snippet, that will get appended to the query as-is, -without any syntax checking or escaping: - -```java -selectFrom("user").raw("first_name, last_name /*some random comment*/"); -// SELECT first_name, last_name /*some random comment*/ FROM user -``` - -This should be used with caution, as it's possible to generate invalid CQL that will fail at -execution time; on the other hand, it can be used as a workaround to handle new CQL features that -are not yet covered by the query builder. - -### Relations - -Relations get added with the `whereXxx()` methods: - -```java -selectFrom("user").all().whereColumn("id").isEqualTo(literal(1)); -// SELECT * FROM user WHERE id=1 -``` - -You can also create and add them manually: - -```java -selectFrom("user").all().where( - Relation.column("id").isEqualTo(literal(1))); -// SELECT * FROM user WHERE id=1 -``` - -Like selectors, they also have fluent shortcuts to build and add in a single call: - - -Relations are a common feature used by many types of statements, so they have a -[dedicated page](../relation) in this manual. - -### Other clauses - -The remaining SELECT clauses have a straightforward syntax. Refer to the javadocs for the fine -print. - -Groupings: - -```java -selectFrom("sensor_data") - .function("max", Selector.column("reading")) - .whereColumn("id").isEqualTo(bindMarker()) - .groupBy("date"); -// SELECT max(reading) FROM sensor_data WHERE id=? GROUP BY date -``` - -Orderings: - -```java -import com.datastax.oss.driver.api.core.metadata.schema.ClusteringOrder; - -selectFrom("sensor_data") - .column("reading") - .whereColumn("id").isEqualTo(bindMarker()) - .orderBy("date", ClusteringOrder.DESC); -// SELECT reading FROM sensor_data WHERE id=? ORDER BY date DESC -``` - -Vector Search: - -```java - -import com.datastax.oss.driver.api.core.data.CqlVector; - -selectFrom("foo") - .all() - .where(Relation.column("k").isEqualTo(literal(1))) - .orderByAnnOf("c1", CqlVector.newInstance(0.1, 0.2, 0.3)); -// SELECT * FROM foo WHERE k=1 ORDER BY c1 ANN OF [0.1, 0.2, 0.3] - -selectFrom("cycling", "comments_vs") - .column("comment") - .function( - "similarity_cosine", - Selector.column("comment_vector"), - literal(CqlVector.newInstance(0.2, 0.15, 0.3, 0.2, 0.05))) - .orderByAnnOf("comment_vector", CqlVector.newInstance(0.1, 0.15, 0.3, 0.12, 0.05)) - .limit(1); -// SELECT comment,similarity_cosine(comment_vector,[0.2, 0.15, 0.3, 0.2, 0.05]) FROM cycling.comments_vs ORDER BY comment_vector ANN OF [0.1, 0.15, 0.3, 0.12, 0.05] LIMIT 1 -``` - -Limits: - -```java -selectFrom("sensor_data") - .column("reading") - .whereColumn("id").isEqualTo(bindMarker()) - .limit(10); -// SELECT reading FROM sensor_data WHERE id=? LIMIT 10 - -selectFrom("sensor_data") - .column("reading") - .whereColumn("id").isEqualTo(bindMarker()) - .perPartitionLimit(bindMarker("l")); -// SELECT reading FROM sensor_data WHERE id IN ? PER PARTITION LIMIT :l -``` - -Filtering: - -```java -selectFrom("user").all().allowFiltering(); -// SELECT * FROM user ALLOW FILTERING -``` - -[QueryBuilder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/querybuilder/QueryBuilder.html -[Selector]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/querybuilder/select/Selector.html diff --git a/manual/query_builder/term/README.md b/manual/query_builder/term/README.md deleted file mode 100644 index 460ed8dcb10..00000000000 --- a/manual/query_builder/term/README.md +++ /dev/null @@ -1,128 +0,0 @@ - - -## Terms - -A term is an expression that does not involve the value of a column. It is used: - -* as an argument to some selectors, for example the indices of [sub-element](../select/#sub-element) - selectors; -* as the right operand of [relations](../relation). - -To create a term, call one of the factory methods in [QueryBuilder]: - -### Literals - -`literal()` takes a Java object and inlines it as a CQL literal: - -```java -selectFrom("user").all().whereColumn("id").isEqualTo(literal(1)); -// SELECT * FROM user WHERE id=1 -``` - -The argument is converted according to the driver's -[default type mappings](../../core/#cql-to-java-type-mapping). If there is no default mapping, you -will get a `CodecNotFoundException`. - -If you use [custom codecs](../../core/custom_codecs), you might need to inline a custom Java type. -You can pass a [CodecRegistry] as the second argument (most likely, this will be the registry of -your session): - -```java -MyCustomId myCustomId = ...; -CodecRegistry registry = session.getContext().getCodecRegistry(); -selectFrom("user").all().whereColumn("id").isEqualTo(literal(myCustomId, registry)); -``` - -Alternatively, you can pass a codec directly: - -```java -TypeCodec codec = ...; -selectFrom("user").all().whereColumn("id").isEqualTo(literal(myCustomId, codec)); -``` - -### Function calls - -`function()` invokes a built-in or user-defined function. It takes a function name (optionally -qualified with a keyspace), and a list of terms that will be passed as arguments: - -```java -selectFrom("sensor_data") - .all() - .whereColumn("id").isEqualTo(bindMarker()) - .whereColumn("date").isEqualTo(function("system", "currentDate")); -// SELECT * FROM sensor_data WHERE id=? AND date=system.currentdate() -``` - -### Arithmetic operations - -Terms can be combined with arithmetic operations. - -| CQL Operator | Selector name | -|--------------|---------------| -| `a+b` | `add` | -| `a-b` | `subtract` | -| `-a` | `negate` | -| `a*b` | `multiply` | -| `a/b` | `divide` | -| `a%b` | `remainder` | - -```java -selectFrom("sensor_data") - .all() - .whereColumn("id").isEqualTo(bindMarker()) - .whereColumn("unix_timestamp").isGreaterThan( - subtract(function("toUnixTimestamp", function("now")), - literal(3600))); -// SELECT * FROM sensor_data WHERE id=? AND unix_timestamp>tounixtimestamp(now())-3600 -``` - -Operations can be nested, and will get parenthesized according to the usual precedence rules. - -### Type hints - -`typeHint` forces a term to a particular CQL type. For instance, it could be used to ensure that an -expression uses floating-point division: - -```java -selectFrom("test") - .all() - .whereColumn("k").isEqualTo(literal(1)) - .whereColumn("c").isGreaterThan(divide( - typeHint(literal(1), DataTypes.DOUBLE), - literal(3))); -// SELECT * FROM test WHERE k=1 AND c>(double)1/3 -``` - -### Raw CQL snippets - -Finally, it is possible to provide a raw CQL snippet with `raw()`; it will get appended to the query -as-is, without any syntax checking or escaping: - -```java -selectFrom("sensor_data").all().whereColumn("id").isEqualTo(raw(" 1 /*some random comment*/")); -// SELECT * FROM sensor_data WHERE id= 1 /*some random comment*/ -``` - -This should be used with caution, as it's possible to generate invalid CQL that will fail at -execution time; on the other hand, it can be used as a workaround to handle new CQL features that -are not yet covered by the query builder. - -[QueryBuilder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/querybuilder/QueryBuilder.html -[CodecRegistry]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/registry/CodecRegistry.html diff --git a/manual/query_builder/truncate/README.md b/manual/query_builder/truncate/README.md deleted file mode 100644 index c8cd6945123..00000000000 --- a/manual/query_builder/truncate/README.md +++ /dev/null @@ -1,39 +0,0 @@ - - -## TRUNCATE - -To create a TRUNCATE query, use one of the `truncate` methods in [QueryBuilder]. There are several -variants depending on whether your table name is qualified, and whether you use -[identifiers](../../case_sensitivity/) or raw strings: - -```java -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.*; - -Truncate truncate = truncate("ks", "mytable"); -// TRUNCATE ks.mytable - -Truncate truncate2 = truncate(CqlIdentifier.fromCql("mytable")); -// TRUNCATE mytable -``` - -Note that, at this stage, the query is ready to build. After creating a TRUNCATE query it does not -take any values. - -[QueryBuilder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/querybuilder/QueryBuilder.html diff --git a/manual/query_builder/update/README.md b/manual/query_builder/update/README.md deleted file mode 100644 index 15502f52bb7..00000000000 --- a/manual/query_builder/update/README.md +++ /dev/null @@ -1,274 +0,0 @@ - - -## UPDATE - -To start an UPDATE query, use one of the `update` methods in [QueryBuilder]. There are several -variants depending on whether your table name is qualified, and whether you use -[identifiers](../../case_sensitivity/) or raw strings: - -```java -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.*; - -UpdateStart update = update("user"); -``` - -Note that, at this stage, the query can't be built yet. You need at least one -[assignment](#assignments) and one [relation](#relations). - -### Timestamp - -The USING TIMESTAMP clause comes right after the table, and specifies the timestamp at which the -mutation will be applied. You can pass either a literal value: - -```java -update("user").usingTimestamp(1234); -// UPDATE user USING TIMESTAMP 1234... -``` - -Or a bind marker: - -```java -update("user").usingTimestamp(bindMarker()); -// UPDATE user USING TIMESTAMP ?... -``` - -If you call the method multiple times, the last value will be used. - -### Time To Live (TTL) - -You can generate a USING TTL clause that will cause column values to be deleted (marked with a -tombstone) after the specified time (in seconds) has expired. This can be done with a literal: - -```java -update("user").usingTtl(60).setColumn("v", bindMarker()).whereColumn("k").isEqualTo(bindMarker()); -// UPDATE user USING TTL 60 SET v=? WHERE k=? -``` - -Or a bind marker: - -```java -update("user").usingTtl(bindMarker()).setColumn("v", bindMarker()).whereColumn("k").isEqualTo(bindMarker()); -// UPDATE user USING TTL ? SET v=? WHERE k=? -``` - -You can clear a previously set TTL by setting the value to 0: - -```java -update("user").usingTtl(0).setColumn("v", bindMarker()).whereColumn("k").isEqualTo(bindMarker()); -// UPDATE user USING TTL 0 SET v=? WHERE k=? -``` - -Setting the value to 0 will result in removing the TTL from the column in Cassandra when the query -is executed. This is distinctly different than setting the value to null. Passing a null value to -this method will only remove the USING TTL clause from the query, which will not alter the TTL (if -one is set) in Cassandra. - -### Assignments - -An assignment is an operation that appears after the SET keyword. You need at least one for a valid -update query. - -The easiest way to add an assignment is with one of the fluent methods: - -```java -update("user").setColumn("v", bindMarker()); -// UPDATE user SET v=?... -``` - -You can also create it manually with one of the factory methods in [Assignment], and then pass it to -`set()`: - -```java -update("user").set( - Assignment.setColumn("v", bindMarker())); -// UPDATE user SET v=?... -``` - -If you have multiple assignments, you can add them all in a single call. This is a bit more -efficient since it creates less temporary objects: - -```java -update("user").set( - Assignment.setColumn("v1", bindMarker()), - Assignment.setColumn("v2", bindMarker())) -// UPDATE user SET v1=?,v2=?... -``` - -#### Simple columns - -As shown already, `setColumn` changes the value of a column. It can take a bind marker or a literal -(which must have the same CQL type as the column): - -```java -update("user").setColumn("last_name", literal("Doe")); -// UPDATE user SET last_name='Doe'... -``` - -#### UDT fields - -`setField` modifies a field inside of a UDT column: - -```java -update("user").setField("address", "street", bindMarker()); -// UPDATE user SET address.street=?... -``` - -#### Counters - -Counter columns can be incremented by a given amount: - -```java -update("foo").increment("c", bindMarker()); -// UPDATE foo SET c+=?... - -update("foo").increment("c", literal(4)); -// UPDATE foo SET c+=4... -``` - -There is a shortcut to increment by 1: - -```java -update("foo").increment("c"); -// UPDATE foo SET c+=1... -``` - -All those methods have a `decrement` counterpart: - -```java -update("foo").decrement("c"); -// UPDATE foo SET c-=1... -``` - -#### Collections - -`mapValue` changes a value in a map. The key is expressed as a term (here a literal value) : - -```java -update("product").setMapValue("features", literal("color"), bindMarker()) -// UPDATE product SET features['color']=?... -``` - -`append` operates on any CQL collection type (list, set or map). If you pass a literal, it must also -be a collection, with the same CQL type of elements: - -```java -update("foo").append("l", bindMarker()); -// UPDATE foo SET l+=?... - -List value = Arrays.asList(1, 2, 3); -update("foo").append("l", literal(value)); -// UPDATE foo SET l+=[1,2,3]... - -Set value = new HashSet<>(Arrays.asList(1, 2, 3)); -update("foo").append("s", literal(value)) -// UPDATE foo SET s+={1,2,3}... - -Map value = new HashMap<>(); -value.put(1, "bar"); -value.put(2, "baz"); -update("foo").append("m", literal(value)); -// UPDATE foo SET m+={1:'bar',2:'baz'}... -``` - -If you only have one element to append, there are shortcuts to avoid creating a collection in your -code: - -```java -update("foo").appendListElement("l", literal(1)); -// UPDATE foo SET l+=[1]... - -update("foo").appendSetElement("s", literal(1)); -// UPDATE foo SET s+={1}... - -update("foo").appendMapEntry("m", literal(1), literal("bar")); -// UPDATE foo SET m+={1:'bar'}... -``` - -All those methods have a `prepend` counterpart: - -```java -update("foo").prepend("l", bindMarker()); -// UPDATE foo SET l=?+l... -``` - -As well as a `remove` counterpart: - -```java -update("foo").remove("l", bindMarker()); -// UPDATE foo SET l-=?... -``` - -### Relations - -Once you have at least one assignment, relations can be added with the fluent `whereXxx()` methods: - -```java -update("foo").setColumn("v", bindMarker()).whereColumn("k").isEqualTo(bindMarker()); -// UPDATE foo SET v=? WHERE k=? -``` - -Or you can build and add them manually: - -```java -update("foo").setColumn("v", bindMarker()).where( - Relation.column("k").isEqualTo(bindMarker())); -// UPDATE foo SET v=? WHERE k=? -``` - -Once there is at least one assignment and one relation, the statement can be built: - -```java -SimpleStatement statement = update("foo") - .setColumn("k", bindMarker()) - .whereColumn("k").isEqualTo(bindMarker()) - .build(); -``` - -Relations are a common feature used by many types of statements, so they have a -[dedicated page](../relation) in this manual. - -### Conditions - -Conditions get added with the fluent `ifXxx()` methods: - -```java -update("foo") - .setColumn("v", bindMarker()) - .whereColumn("k").isEqualTo(bindMarker()) - .ifColumn("v").isEqualTo(bindMarker()); -// UPDATE foo SET v=? WHERE k=? IF v=? -``` - -Or you can build and add them manually: - -```java -update("foo") - .setColumn("v", bindMarker()) - .whereColumn("k").isEqualTo(bindMarker()) - .if_( - Condition.column("v").isEqualTo(bindMarker())); -// UPDATE foo SET v=? WHERE k=? IF v=? -``` - -Conditions are a common feature used by UPDATE and DELETE, so they have a -[dedicated page](../condition) in this manual. - -[QueryBuilder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/querybuilder/QueryBuilder.html -[Assignment]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/querybuilder/update/Assignment.html diff --git a/mapper-processor/CONTRIBUTING.md b/mapper-processor/CONTRIBUTING.md deleted file mode 100644 index c6d324106c4..00000000000 --- a/mapper-processor/CONTRIBUTING.md +++ /dev/null @@ -1,52 +0,0 @@ - - -# Mapper contributing guidelines - -Everything in the [main contribution guidelines](../CONTRIBUTING.md) also applies to the mapper. -This file adds specific guidelines for the mapper. - -## Testing - -There are two ways to test the mapper: - -### Unit tests - -These tests reside here in the `mapper-processor` module. They run `javac` in-process with the -mapper's annotation processor configured. Each test creates its own input source code using the -JavaPoet DSL. - -For an example, refer to any subclass of `MapperProcessorTest` (`DaoFactoryMethodGeneratorTest` is -one of them). - -We don't fully validate the generated code, because it would be too much overhead to maintain those -checks anytime something changes. Therefore those tests should not try to cover semantic aspects of -the mapper, but instead focus on: - -* errors and warnings, e.g. which method signatures are valid for a particular annotation, and what - happens otherwise; -* presence and naming of the generated files for a particular input source. - -### Integration tests - -These tests reside in the usual `integration-tests` module. They cover the whole cycle: the module -contains annotated sources and runs the mapper processor as part of its build; the tests use the -generated code to interact with a CCM cluster. - -For an example, see `GetEntityIT`. diff --git a/mapper-processor/pom.xml b/mapper-processor/pom.xml deleted file mode 100644 index 04d8c98c4f0..00000000000 --- a/mapper-processor/pom.xml +++ /dev/null @@ -1,206 +0,0 @@ - - - - 4.0.0 - - org.apache.cassandra - java-driver-parent - 4.19.3-SNAPSHOT - - java-driver-mapper-processor - Apache Cassandra Java Driver - object mapper processor - - - - ${project.groupId} - java-driver-bom - ${project.version} - pom - import - - - - - - org.apache.cassandra - java-driver-mapper-runtime - - - org.apache.cassandra - java-driver-guava-shaded - - - com.squareup - javapoet - - - com.github.stephenc.jcip - jcip-annotations - provided - - - com.github.spotbugs - spotbugs-annotations - provided - - - com.google.testing.compile - compile-testing - test - - - junit - junit - test - - - org.assertj - assertj-core - test - - - com.tngtech.java - junit-dataprovider - test - - - org.mockito - mockito-core - test - - - ch.qos.logback - logback-classic - test - - - org.apache.cassandra - java-driver-core - test - test-jar - - - - - - src/main/resources - - - ${project.basedir}/.. - - LICENSE - NOTICE_binary.txt - NOTICE.txt - - META-INF - - - - - src/test/resources - - project.properties - - true - - - src/test/resources - - project.properties - - false - - - - - maven-compiler-plugin - - none - - - - maven-jar-plugin - - - - com.datastax.oss.driver.mapper.processor - - - - - - - javadoc-jar - package - - jar - - - javadoc - - ** - - - - - test-jar - - test-jar - - - - logback-test.xml - - - - - - - org.revapi - revapi-maven-plugin - - true - - - - maven-dependency-plugin - - - generate-dependency-list - - list - - generate-resources - - runtime - true - com.datastax.cassandra,com.datastax.dse - ${project.build.outputDirectory}/com/datastax/dse/driver/internal/mapper/processor/deps.txt - - - - - - - diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/CodeGenerator.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/CodeGenerator.java deleted file mode 100644 index b862c7716f3..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/CodeGenerator.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor; - -/** A component that generates one or more source files. */ -public interface CodeGenerator { - void generate(); -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/CodeGeneratorFactory.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/CodeGeneratorFactory.java deleted file mode 100644 index 10fa89107da..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/CodeGeneratorFactory.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor; - -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.Entity; -import com.datastax.oss.driver.api.mapper.annotations.Mapper; -import com.datastax.oss.driver.internal.mapper.processor.dao.DaoImplementationSharedCode; -import com.datastax.oss.driver.internal.mapper.processor.dao.DaoReturnTypeParser; -import com.datastax.oss.driver.internal.mapper.processor.mapper.MapperImplementationSharedCode; -import java.util.Map; -import java.util.Optional; -import javax.lang.model.element.ExecutableElement; -import javax.lang.model.element.Name; -import javax.lang.model.element.TypeElement; - -public interface CodeGeneratorFactory { - - /** The "helper" class associated to an {@link Entity}-annotated class. */ - CodeGenerator newEntity(TypeElement classElement); - - /** - * All the types derived from a {@link Mapper}-annotated interface. - * - *

    By default, this calls {@link #newMapperImplementation(TypeElement)} and {@link - * #newMapperBuilder(TypeElement)}. - */ - CodeGenerator newMapper(TypeElement interfaceElement); - - /** - * The implementation of a {@link Mapper}-annotated interface. - * - *

    The default code factory calls {@link #newMapperImplementationMethod} for each non-static, - * non-default method, but this is not a hard requirement. - */ - CodeGenerator newMapperImplementation(TypeElement interfaceElement); - - /** - * A method in the implementation of a {@link Mapper}-annotated interface. - * - * @return empty if the processor can't determine what to generate. This will translate as a - * compile-time error. - * @see #newMapperImplementation(TypeElement) - */ - Optional newMapperImplementationMethod( - ExecutableElement methodElement, - TypeElement processedType, - MapperImplementationSharedCode enclosingClass); - - /** The builder associated to a {@link Mapper}-annotated interface. */ - CodeGenerator newMapperBuilder(TypeElement interfaceElement); - - /** - * The implementation of a {@link Dao}-annotated interface. - * - *

    The default code factory calls {@link #newDaoImplementationMethod} for each non-static, - * non-default method, but this is not a hard requirement. - */ - CodeGenerator newDaoImplementation(TypeElement interfaceElement); - - /** - * A method in the implementation of a {@link Dao}-annotated interface. - * - * @return empty if the processor can't determine what to generate. This will translate as a - * compile-time error. - * @see #newDaoImplementation(TypeElement) - */ - Optional newDaoImplementationMethod( - ExecutableElement methodElement, - Map typeParameters, - TypeElement processedType, - DaoImplementationSharedCode enclosingClass); - - DaoReturnTypeParser getDaoReturnTypeParser(); -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/DecoratedMessager.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/DecoratedMessager.java deleted file mode 100644 index 57e0fd9ae04..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/DecoratedMessager.java +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor; - -import java.util.Arrays; -import java.util.HashSet; -import java.util.Objects; -import java.util.Set; -import javax.annotation.processing.Messager; -import javax.lang.model.element.Element; -import javax.tools.Diagnostic; - -/** Wraps {@link Messager} to provide convenience methods. */ -public class DecoratedMessager { - - private final Messager messager; - private final Set emittedMessages = new HashSet<>(); - - public DecoratedMessager(Messager messager) { - this.messager = messager; - } - - /** Emits a global warning that doesn't target a particular element. */ - public void warn(String template, Object... arguments) { - messager.printMessage(Diagnostic.Kind.WARNING, String.format(template, arguments)); - } - - /** Emits a warning for a specific element. */ - public void warn(Element element, String template, Object... arguments) { - message(Diagnostic.Kind.WARNING, element, template, arguments); - } - - /** Emits an error for a specific element. */ - public void error(Element element, String template, Object... arguments) { - message(Diagnostic.Kind.ERROR, element, template, arguments); - } - - private void message( - Diagnostic.Kind level, Element element, String template, Object[] arguments) { - if (emittedMessages.add(new MessageId(level, element, template, arguments))) { - messager.printMessage( - level, formatLocation(element) + String.format(template, arguments), element); - } - } - - private static String formatLocation(Element element) { - switch (element.getKind()) { - case CLASS: - case INTERFACE: - return String.format("[%s] ", element.getSimpleName()); - case FIELD: - case METHOD: - case CONSTRUCTOR: - return String.format( - "[%s.%s] ", element.getEnclosingElement().getSimpleName(), element.getSimpleName()); - case PARAMETER: - Element method = element.getEnclosingElement(); - Element type = method.getEnclosingElement(); - return String.format( - "[%s.%s, parameter %s] ", - type.getSimpleName(), method.getSimpleName(), element.getSimpleName()); - default: - // We don't emit messages for other types of elements in the mapper processor. Handle - // gracefully nevertheless: - return String.format("[%s] ", element); - } - } - - private static class MessageId { - - private final Diagnostic.Kind level; - private final Element element; - private final String template; - private final Object[] arguments; - - private MessageId(Diagnostic.Kind level, Element element, String template, Object[] arguments) { - this.level = level; - this.element = element; - this.template = template; - this.arguments = arguments; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof MessageId) { - MessageId that = (MessageId) other; - return this.level == that.level - && Objects.equals(this.element, that.element) - && Objects.equals(this.template, that.template) - && Arrays.deepEquals(this.arguments, that.arguments); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(level, element, template, Arrays.hashCode(arguments)); - } - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/DefaultCodeGeneratorFactory.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/DefaultCodeGeneratorFactory.java deleted file mode 100644 index e980fcbab15..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/DefaultCodeGeneratorFactory.java +++ /dev/null @@ -1,154 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor; - -import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; -import com.datastax.oss.driver.api.mapper.annotations.Delete; -import com.datastax.oss.driver.api.mapper.annotations.GetEntity; -import com.datastax.oss.driver.api.mapper.annotations.Increment; -import com.datastax.oss.driver.api.mapper.annotations.Insert; -import com.datastax.oss.driver.api.mapper.annotations.Query; -import com.datastax.oss.driver.api.mapper.annotations.QueryProvider; -import com.datastax.oss.driver.api.mapper.annotations.Select; -import com.datastax.oss.driver.api.mapper.annotations.SetEntity; -import com.datastax.oss.driver.api.mapper.annotations.Update; -import com.datastax.oss.driver.internal.mapper.processor.dao.DaoDeleteMethodGenerator; -import com.datastax.oss.driver.internal.mapper.processor.dao.DaoGetEntityMethodGenerator; -import com.datastax.oss.driver.internal.mapper.processor.dao.DaoImplementationGenerator; -import com.datastax.oss.driver.internal.mapper.processor.dao.DaoImplementationSharedCode; -import com.datastax.oss.driver.internal.mapper.processor.dao.DaoIncrementMethodGenerator; -import com.datastax.oss.driver.internal.mapper.processor.dao.DaoInsertMethodGenerator; -import com.datastax.oss.driver.internal.mapper.processor.dao.DaoQueryMethodGenerator; -import com.datastax.oss.driver.internal.mapper.processor.dao.DaoQueryProviderMethodGenerator; -import com.datastax.oss.driver.internal.mapper.processor.dao.DaoReturnTypeParser; -import com.datastax.oss.driver.internal.mapper.processor.dao.DaoSelectMethodGenerator; -import com.datastax.oss.driver.internal.mapper.processor.dao.DaoSetEntityMethodGenerator; -import com.datastax.oss.driver.internal.mapper.processor.dao.DaoUpdateMethodGenerator; -import com.datastax.oss.driver.internal.mapper.processor.dao.DefaultDaoReturnTypeParser; -import com.datastax.oss.driver.internal.mapper.processor.entity.EntityHelperGenerator; -import com.datastax.oss.driver.internal.mapper.processor.mapper.MapperBuilderGenerator; -import com.datastax.oss.driver.internal.mapper.processor.mapper.MapperDaoFactoryMethodGenerator; -import com.datastax.oss.driver.internal.mapper.processor.mapper.MapperGenerator; -import com.datastax.oss.driver.internal.mapper.processor.mapper.MapperImplementationGenerator; -import com.datastax.oss.driver.internal.mapper.processor.mapper.MapperImplementationSharedCode; -import java.util.Map; -import java.util.Optional; -import javax.lang.model.element.ExecutableElement; -import javax.lang.model.element.Name; -import javax.lang.model.element.TypeElement; - -public class DefaultCodeGeneratorFactory implements CodeGeneratorFactory { - - protected final ProcessorContext context; - private final DaoReturnTypeParser daoReturnTypeParser; - - public DefaultCodeGeneratorFactory(ProcessorContext context) { - this.context = context; - this.daoReturnTypeParser = new DefaultDaoReturnTypeParser(context); - } - - @Override - public CodeGenerator newEntity(TypeElement classElement) { - return new EntityHelperGenerator(classElement, context); - } - - @Override - public CodeGenerator newMapper(TypeElement interfaceElement) { - return new MapperGenerator(interfaceElement, context); - } - - @Override - public CodeGenerator newMapperImplementation(TypeElement interfaceElement) { - return new MapperImplementationGenerator(interfaceElement, context); - } - - @Override - public Optional newMapperImplementationMethod( - ExecutableElement methodElement, - TypeElement processedType, - MapperImplementationSharedCode enclosingClass) { - if (methodElement.getAnnotation(DaoFactory.class) != null) { - return Optional.of( - new MapperDaoFactoryMethodGenerator(methodElement, enclosingClass, context)); - } else { - return Optional.empty(); - } - } - - @Override - public CodeGenerator newMapperBuilder(TypeElement interfaceElement) { - return new MapperBuilderGenerator(interfaceElement, context); - } - - @Override - public CodeGenerator newDaoImplementation(TypeElement interfaceElement) { - return new DaoImplementationGenerator(interfaceElement, context); - } - - @Override - public Optional newDaoImplementationMethod( - ExecutableElement methodElement, - Map typeParameters, - TypeElement processedType, - DaoImplementationSharedCode enclosingClass) { - if (methodElement.getAnnotation(SetEntity.class) != null) { - return Optional.of( - new DaoSetEntityMethodGenerator( - methodElement, typeParameters, processedType, enclosingClass, context)); - } else if (methodElement.getAnnotation(Insert.class) != null) { - return Optional.of( - new DaoInsertMethodGenerator( - methodElement, typeParameters, processedType, enclosingClass, context)); - } else if (methodElement.getAnnotation(GetEntity.class) != null) { - return Optional.of( - new DaoGetEntityMethodGenerator( - methodElement, typeParameters, processedType, enclosingClass, context)); - } else if (methodElement.getAnnotation(Select.class) != null) { - return Optional.of( - new DaoSelectMethodGenerator( - methodElement, typeParameters, processedType, enclosingClass, context)); - } else if (methodElement.getAnnotation(Delete.class) != null) { - return Optional.of( - new DaoDeleteMethodGenerator( - methodElement, typeParameters, processedType, enclosingClass, context)); - } else if (methodElement.getAnnotation(Query.class) != null) { - return Optional.of( - new DaoQueryMethodGenerator( - methodElement, typeParameters, processedType, enclosingClass, context)); - } else if (methodElement.getAnnotation(Update.class) != null) { - return Optional.of( - new DaoUpdateMethodGenerator( - methodElement, typeParameters, processedType, enclosingClass, context)); - } else if (methodElement.getAnnotation(QueryProvider.class) != null) { - return Optional.of( - new DaoQueryProviderMethodGenerator( - methodElement, typeParameters, processedType, enclosingClass, context)); - } else if (methodElement.getAnnotation(Increment.class) != null) { - return Optional.of( - new DaoIncrementMethodGenerator( - methodElement, typeParameters, processedType, enclosingClass, context)); - } else { - return Optional.empty(); - } - } - - @Override - public DaoReturnTypeParser getDaoReturnTypeParser() { - return daoReturnTypeParser; - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/DefaultProcessorContext.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/DefaultProcessorContext.java deleted file mode 100644 index b996af466c2..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/DefaultProcessorContext.java +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor; - -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import com.datastax.oss.driver.internal.core.util.concurrent.LazyReference; -import com.datastax.oss.driver.internal.mapper.processor.dao.LoggingGenerator; -import com.datastax.oss.driver.internal.mapper.processor.entity.DefaultEntityFactory; -import com.datastax.oss.driver.internal.mapper.processor.entity.EntityFactory; -import com.datastax.oss.driver.internal.mapper.processor.util.Classes; -import javax.annotation.processing.Filer; -import javax.lang.model.util.Elements; -import javax.lang.model.util.Types; - -/** This follows the same principles as {@link DefaultDriverContext}. */ -public class DefaultProcessorContext implements ProcessorContext { - - private final LazyReference codeGeneratorFactoryRef = - new LazyReference<>(this::buildCodeGeneratorFactory); - - private final LazyReference entityFactoryRef = - new LazyReference<>(this::buildEntityFactory); - - private final DecoratedMessager messager; - private final Types typeUtils; - private final Elements elementUtils; - private final Classes classUtils; - private final JavaPoetFiler filer; - private final LoggingGenerator loggingGenerator; - private final boolean customResultsEnabled; - - public DefaultProcessorContext( - DecoratedMessager messager, - Types typeUtils, - Elements elementUtils, - Filer filer, - String indent, - boolean logsEnabled, - boolean customResultsEnabled) { - this.messager = messager; - this.typeUtils = typeUtils; - this.elementUtils = elementUtils; - this.classUtils = new Classes(typeUtils, elementUtils); - this.filer = new JavaPoetFiler(filer, indent); - this.loggingGenerator = new LoggingGenerator(logsEnabled); - this.customResultsEnabled = customResultsEnabled; - } - - protected CodeGeneratorFactory buildCodeGeneratorFactory() { - return new DefaultCodeGeneratorFactory(this); - } - - protected EntityFactory buildEntityFactory() { - return new DefaultEntityFactory(this); - } - - @Override - public DecoratedMessager getMessager() { - return messager; - } - - @Override - public Types getTypeUtils() { - return typeUtils; - } - - @Override - public Elements getElementUtils() { - return elementUtils; - } - - @Override - public Classes getClassUtils() { - return classUtils; - } - - @Override - public JavaPoetFiler getFiler() { - return filer; - } - - @Override - public CodeGeneratorFactory getCodeGeneratorFactory() { - return codeGeneratorFactoryRef.get(); - } - - @Override - public EntityFactory getEntityFactory() { - return entityFactoryRef.get(); - } - - @Override - public LoggingGenerator getLoggingGenerator() { - return loggingGenerator; - } - - @Override - public boolean areCustomResultsEnabled() { - return customResultsEnabled; - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/GeneratedNames.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/GeneratedNames.java deleted file mode 100644 index 67c4b72ebf2..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/GeneratedNames.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor; - -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.Entity; -import com.datastax.oss.driver.api.mapper.annotations.Mapper; -import com.squareup.javapoet.ClassName; -import javax.lang.model.element.TypeElement; - -/** Centralizes the naming conventions for types or members generated by the processor. */ -public class GeneratedNames { - - /** - * A suffix that gets appended to the name of every generated class (to make it obvious that it - * should not be referenced from client code, e.g. in IDE completion). - */ - private static final String SUFFIX = "__MapperGenerated"; - - /** The entity helpers' private constants holding generic type definitions. */ - public static final String GENERIC_TYPE_CONSTANT = "GENERIC_TYPE"; - - /** The helper class generated for an {@link Entity}-annotated class. */ - public static ClassName entityHelper(TypeElement entityClass) { - return entityHelper(ClassName.get(entityClass)); - } - - /** Variant for {@link #entityHelper(TypeElement)} when only the type name is known. */ - public static ClassName entityHelper(ClassName entityClassName) { - return peerClass(entityClassName, "Helper" + SUFFIX); - } - - /** The builder for a {@link Mapper}-annotated interface. */ - public static ClassName mapperBuilder(TypeElement mapperInterface) { - String custom = mapperInterface.getAnnotation(Mapper.class).builderName(); - if (custom.isEmpty()) { - // Note that the builder is referenced in client code, so generate a "normal" name (no - // underscores). - return peerClass(mapperInterface, "Builder"); - } else { - int i = custom.lastIndexOf('.'); - return ClassName.get(custom.substring(0, i), custom.substring(i + 1)); - } - } - - /** The implementation of a {@link Mapper}-annotated interface. */ - public static ClassName mapperImplementation(TypeElement mapperInterface) { - return peerClass(mapperInterface, "Impl" + SUFFIX); - } - - /** The implementation of a {@link Dao}-annotated interface. */ - public static ClassName daoImplementation(TypeElement daoInterface) { - return peerClass(daoInterface, "Impl" + SUFFIX); - } - - // Generates a non-nested peer class. If the base class is nested, the names of all enclosing - // classes are prepended to ensure uniqueness. For example: - // com.datastax.Foo.Bar.Baz => com.datastax.Foo_Bar_BazImpl - private static ClassName peerClass(ClassName base, String suffix) { - ClassName topLevel = base; - StringBuilder prefix = new StringBuilder(); - while (topLevel.enclosingClassName() != null) { - topLevel = topLevel.enclosingClassName(); - prefix.insert(0, '_').insert(0, topLevel.simpleName()); - } - return topLevel.peerClass(prefix.toString() + base.simpleName() + suffix); - } - - private static ClassName peerClass(TypeElement element, String suffix) { - return peerClass(ClassName.get(element), suffix); - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/JavaPoetFiler.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/JavaPoetFiler.java deleted file mode 100644 index 9f8dadd9744..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/JavaPoetFiler.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor; - -import com.squareup.javapoet.JavaFile; -import java.io.IOException; -import java.io.UncheckedIOException; -import java.io.Writer; -import javax.annotation.processing.Filer; -import javax.tools.JavaFileObject; - -/** Thin wrapper around {@link Filer} for content generated with JavaPoet. */ -public class JavaPoetFiler { - - private final Filer filer; - private final String indent; - - public JavaPoetFiler(Filer filer, String indent) { - this.filer = filer; - this.indent = indent; - } - - public void write(String fileName, JavaFile.Builder contents) { - try { - JavaFileObject file = filer.createSourceFile(fileName); - try (Writer writer = file.openWriter()) { - contents.indent(indent).build().writeTo(writer); - } - } catch (IOException e) { - throw new UncheckedIOException(e); - } - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/MapperProcessor.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/MapperProcessor.java deleted file mode 100644 index 619a07ea886..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/MapperProcessor.java +++ /dev/null @@ -1,179 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor; - -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.Entity; -import com.datastax.oss.driver.api.mapper.annotations.Mapper; -import com.datastax.oss.driver.shaded.guava.common.base.Strings; -import com.datastax.oss.driver.shaded.guava.common.base.Throwables; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import java.lang.annotation.Annotation; -import java.util.Map; -import java.util.Set; -import java.util.function.Function; -import javax.annotation.processing.AbstractProcessor; -import javax.annotation.processing.Filer; -import javax.annotation.processing.ProcessingEnvironment; -import javax.annotation.processing.RoundEnvironment; -import javax.lang.model.SourceVersion; -import javax.lang.model.element.Element; -import javax.lang.model.element.ElementKind; -import javax.lang.model.element.TypeElement; -import javax.lang.model.util.Elements; -import javax.lang.model.util.Types; - -public class MapperProcessor extends AbstractProcessor { - private static final boolean DEFAULT_MAPPER_LOGS_ENABLED = true; - private static final boolean DEFAULT_CUSTOM_RESULTS_ENABLED = true; - - private static final String INDENT_AMOUNT_OPTION = "com.datastax.oss.driver.mapper.indent"; - private static final String INDENT_WITH_TABS_OPTION = - "com.datastax.oss.driver.mapper.indentWithTabs"; - private static final String MAPPER_LOGS_ENABLED_OPTION = - "com.datastax.oss.driver.mapper.logs.enabled"; - private static final String CUSTOM_RESULTS_ENABLED_OPTION = - "com.datastax.oss.driver.mapper.customResults.enabled"; - - private DecoratedMessager messager; - private Types typeUtils; - private Elements elementUtils; - private Filer filer; - private String indent; - private boolean logsEnabled; - private boolean customResultsEnabled; - - @Override - public synchronized void init(ProcessingEnvironment processingEnvironment) { - super.init(processingEnvironment); - messager = new DecoratedMessager(processingEnvironment.getMessager()); - typeUtils = processingEnvironment.getTypeUtils(); - elementUtils = processingEnvironment.getElementUtils(); - filer = processingEnvironment.getFiler(); - Map options = processingEnvironment.getOptions(); - indent = computeIndent(options); - logsEnabled = - getBooleanOption(options, MAPPER_LOGS_ENABLED_OPTION, DEFAULT_MAPPER_LOGS_ENABLED); - customResultsEnabled = - getBooleanOption(options, CUSTOM_RESULTS_ENABLED_OPTION, DEFAULT_CUSTOM_RESULTS_ENABLED); - } - - private boolean getBooleanOption( - Map options, String optionName, boolean defaultValue) { - String value = options.get(optionName); - return (value == null) ? defaultValue : Boolean.parseBoolean(value); - } - - @Override - public boolean process( - Set annotations, RoundEnvironment roundEnvironment) { - ProcessorContext context = - buildContext( - messager, typeUtils, elementUtils, filer, indent, logsEnabled, customResultsEnabled); - - CodeGeneratorFactory generatorFactory = context.getCodeGeneratorFactory(); - processAnnotatedTypes( - roundEnvironment, Entity.class, ElementKind.CLASS, generatorFactory::newEntity); - processAnnotatedTypes( - roundEnvironment, Dao.class, ElementKind.INTERFACE, generatorFactory::newDaoImplementation); - processAnnotatedTypes( - roundEnvironment, Mapper.class, ElementKind.INTERFACE, generatorFactory::newMapper); - return true; - } - - protected ProcessorContext buildContext( - DecoratedMessager messager, - Types typeUtils, - Elements elementUtils, - Filer filer, - String indent, - boolean logsEnabled, - boolean customResultsEnabled) { - return new DefaultProcessorContext( - messager, typeUtils, elementUtils, filer, indent, logsEnabled, customResultsEnabled); - } - - protected void processAnnotatedTypes( - RoundEnvironment roundEnvironment, - Class annotationClass, - ElementKind expectedKind, - Function generatorFactory) { - for (Element element : roundEnvironment.getElementsAnnotatedWith(annotationClass)) { - ElementKind actualKind = element.getKind(); - boolean isExpectedElement = - actualKind == expectedKind - // Hack to support Java 14 records without having to compile against JDK 14 (also - // possible because we only expect CLASS for entities). - || (expectedKind == ElementKind.CLASS && actualKind.name().equals("RECORD")); - if (isExpectedElement) { - // Safe cast given that we checked the kind above - TypeElement typeElement = (TypeElement) element; - try { - generatorFactory.apply(typeElement).generate(); - } catch (Exception e) { - messager.error( - element, - "Unexpected error while writing generated code: %s", - Throwables.getStackTraceAsString(e)); - } - } else { - messager.error( - element, - "Only %s elements can be annotated with %s (got %s)", - expectedKind, - annotationClass.getSimpleName(), - actualKind); - } - } - } - - @Override - public Set getSupportedAnnotationTypes() { - return ImmutableSet.of(Entity.class.getName(), Mapper.class.getName(), Dao.class.getName()); - } - - @Override - public Set getSupportedOptions() { - return ImmutableSet.of( - INDENT_AMOUNT_OPTION, - INDENT_WITH_TABS_OPTION, - MAPPER_LOGS_ENABLED_OPTION, - CUSTOM_RESULTS_ENABLED_OPTION); - } - - @Override - public SourceVersion getSupportedSourceVersion() { - return SourceVersion.latest(); - } - - private String computeIndent(Map options) { - boolean tabs = options.containsKey(INDENT_WITH_TABS_OPTION); - String amountSpec = options.get(INDENT_AMOUNT_OPTION); - if (amountSpec != null) { - try { - int amount = Integer.parseInt(amountSpec); - return Strings.repeat(tabs ? "\t" : " ", amount); - } catch (NumberFormatException e) { - messager.warn( - "Could not parse %s: expected a number, got '%s'. Defaulting to %s.", - INDENT_AMOUNT_OPTION, amountSpec, tabs ? "1 tab" : "2 spaces"); - } - } - return tabs ? "\t" : " "; - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/MethodGenerator.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/MethodGenerator.java deleted file mode 100644 index 4d35b3baca9..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/MethodGenerator.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor; - -import com.squareup.javapoet.MethodSpec; -import java.util.Optional; - -/** A component that generates a single method. */ -public interface MethodGenerator { - - /** - * @return empty if an error occurred during generation. In that case, the caller doesn't have - * anything to do: it's the generator's responsibility to report the error via {@link - * DecoratedMessager}. - */ - Optional generate(); - - /** - * Whether the generated method requires the Reactive Streams API. - * - *

    If true, the generated DAO class will inherit from {@link - * com.datastax.dse.driver.internal.mapper.reactive.ReactiveDaoBase}, otherwise it will inherit - * from {@link com.datastax.oss.driver.internal.mapper.DaoBase}. - * - *

    Only generated methods returning {@link - * com.datastax.dse.driver.api.core.cql.reactive.ReactiveResultSet} and {@link - * com.datastax.dse.driver.api.mapper.reactive.MappedReactiveResultSet} should return true here. - */ - default boolean requiresReactive() { - return false; - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/ProcessorContext.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/ProcessorContext.java deleted file mode 100644 index 89452173d9c..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/ProcessorContext.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor; - -import com.datastax.oss.driver.internal.mapper.processor.dao.LoggingGenerator; -import com.datastax.oss.driver.internal.mapper.processor.entity.EntityFactory; -import com.datastax.oss.driver.internal.mapper.processor.util.Classes; -import javax.lang.model.util.Elements; -import javax.lang.model.util.Types; - -/** Holds common components that are shared throughout an annotation processing round. */ -public interface ProcessorContext { - - DecoratedMessager getMessager(); - - Types getTypeUtils(); - - Elements getElementUtils(); - - Classes getClassUtils(); - - JavaPoetFiler getFiler(); - - CodeGeneratorFactory getCodeGeneratorFactory(); - - EntityFactory getEntityFactory(); - - LoggingGenerator getLoggingGenerator(); - - boolean areCustomResultsEnabled(); -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/SingleFileCodeGenerator.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/SingleFileCodeGenerator.java deleted file mode 100644 index badeb71af33..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/SingleFileCodeGenerator.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor; - -import com.squareup.javapoet.ClassName; -import com.squareup.javapoet.JavaFile; - -/** A code generator that produces exactly one source file. */ -public abstract class SingleFileCodeGenerator implements CodeGenerator { - - /** A generic warning that can get added to the javadoc of any generated type. */ - public static final String JAVADOC_GENERATED_WARNING = - "Generated by the DataStax driver mapper, do not edit directly.\n"; - - public static final String JAVADOC_PARAGRAPH_SEPARATOR = "\n\n

    "; - - protected final ProcessorContext context; - - protected SingleFileCodeGenerator(ProcessorContext context) { - this.context = context; - } - - @Override - public void generate() { - ClassName typeName = getPrincipalTypeName(); - String fileName = typeName.simpleName(); - if (!typeName.packageName().isEmpty()) { - fileName = typeName.packageName() + '.' + fileName; - } - context.getFiler().write(fileName, getContents()); - } - - /** Name of the principal type being declared in this file. */ - protected abstract ClassName getPrincipalTypeName(); - - protected abstract JavaFile.Builder getContents(); -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoDeleteMethodGenerator.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoDeleteMethodGenerator.java deleted file mode 100644 index cebf65337a5..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoDeleteMethodGenerator.java +++ /dev/null @@ -1,317 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.dao; - -import static com.datastax.oss.driver.internal.mapper.processor.dao.DefaultDaoReturnTypeKind.BOOLEAN; -import static com.datastax.oss.driver.internal.mapper.processor.dao.DefaultDaoReturnTypeKind.BOUND_STATEMENT; -import static com.datastax.oss.driver.internal.mapper.processor.dao.DefaultDaoReturnTypeKind.CUSTOM; -import static com.datastax.oss.driver.internal.mapper.processor.dao.DefaultDaoReturnTypeKind.FUTURE_OF_ASYNC_RESULT_SET; -import static com.datastax.oss.driver.internal.mapper.processor.dao.DefaultDaoReturnTypeKind.FUTURE_OF_BOOLEAN; -import static com.datastax.oss.driver.internal.mapper.processor.dao.DefaultDaoReturnTypeKind.FUTURE_OF_VOID; -import static com.datastax.oss.driver.internal.mapper.processor.dao.DefaultDaoReturnTypeKind.REACTIVE_RESULT_SET; -import static com.datastax.oss.driver.internal.mapper.processor.dao.DefaultDaoReturnTypeKind.RESULT_SET; -import static com.datastax.oss.driver.internal.mapper.processor.dao.DefaultDaoReturnTypeKind.VOID; - -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.BoundStatementBuilder; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.mapper.annotations.Delete; -import com.datastax.oss.driver.internal.mapper.processor.ProcessorContext; -import com.datastax.oss.driver.internal.mapper.processor.entity.EntityDefinition; -import com.datastax.oss.driver.internal.mapper.processor.entity.PropertyDefinition; -import com.datastax.oss.driver.internal.mapper.processor.util.generation.GeneratedCodePatterns; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import com.squareup.javapoet.ClassName; -import com.squareup.javapoet.CodeBlock; -import com.squareup.javapoet.MethodSpec; -import com.squareup.javapoet.TypeName; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Set; -import java.util.stream.Collectors; -import javax.lang.model.element.ExecutableElement; -import javax.lang.model.element.Name; -import javax.lang.model.element.TypeElement; -import javax.lang.model.element.VariableElement; - -public class DaoDeleteMethodGenerator extends DaoMethodGenerator { - - public DaoDeleteMethodGenerator( - ExecutableElement methodElement, - Map typeParameters, - TypeElement processedType, - DaoImplementationSharedCode enclosingClass, - ProcessorContext context) { - super(methodElement, typeParameters, processedType, enclosingClass, context); - } - - protected Set getSupportedReturnTypes() { - return ImmutableSet.of( - VOID, - FUTURE_OF_VOID, - BOOLEAN, - FUTURE_OF_BOOLEAN, - RESULT_SET, - BOUND_STATEMENT, - FUTURE_OF_ASYNC_RESULT_SET, - REACTIVE_RESULT_SET, - CUSTOM); - } - - @Override - public boolean requiresReactive() { - // Validate the return type: - DaoReturnType returnType = - parseAndValidateReturnType(getSupportedReturnTypes(), Delete.class.getSimpleName()); - if (returnType == null) { - return false; - } - return returnType.requiresReactive(); - } - - @Override - public Optional generate() { - - Delete annotation = methodElement.getAnnotation(Delete.class); - assert annotation != null; - if (annotation.ifExists() && !annotation.customIfClause().isEmpty()) { - context - .getMessager() - .error( - methodElement, - "Invalid annotation parameters: %s cannot have both ifExists and customIfClause", - Delete.class.getSimpleName()); - return Optional.empty(); - } - - // Validate the arguments: either an entity instance, or the PK components (in the latter case, - // the entity class has to be provided via the annotation). - // In either case, a Function can be added in last - // position. - List parameters = methodElement.getParameters(); - VariableElement boundStatementFunction = findBoundStatementFunction(methodElement); - if (boundStatementFunction != null) { - parameters = parameters.subList(0, parameters.size() - 1); - } - - TypeElement entityElement; - EntityDefinition entityDefinition; - boolean hasEntityParameter; - if (parameters.isEmpty()) { - context - .getMessager() - .error( - methodElement, - "Wrong number of parameters: %s methods with no custom clause " - + "must take either an entity instance, or the primary key components", - Delete.class.getSimpleName()); - return Optional.empty(); - } - String customWhereClause = annotation.customWhereClause(); - String customIfClause = annotation.customIfClause(); - VariableElement firstParameter = parameters.get(0); - entityElement = EntityUtils.asEntityElement(firstParameter, typeParameters); - hasEntityParameter = (entityElement != null); - - // the number of primary key parameters provided, if -1 this implies a custom - // where clause where number of parameters that are primary key are irrelevant. - final int primaryKeyParameterCount; - if (hasEntityParameter) { - if (!customWhereClause.isEmpty()) { - context - .getMessager() - .error( - methodElement, - "Invalid parameter list: %s methods that have a custom where clause " - + "must not take an Entity (%s) as a parameter", - Delete.class.getSimpleName(), - entityElement.getSimpleName()); - } - entityDefinition = context.getEntityFactory().getDefinition(entityElement); - primaryKeyParameterCount = entityDefinition.getPrimaryKey().size(); - } else { - entityElement = getEntityClassFromAnnotation(Delete.class); - if (entityElement == null) { - context - .getMessager() - .error( - methodElement, - "Missing entity class: %s methods that do not operate on an entity " - + "instance must have an 'entityClass' argument", - Delete.class.getSimpleName()); - return Optional.empty(); - } else { - entityDefinition = context.getEntityFactory().getDefinition(entityElement); - } - - if (customWhereClause.isEmpty()) { - /* if a custom if clause is provided, the whole primary key must also be provided. - * we only do this check if there is no custom where clause as the order of - * keys may differ in that case.*/ - List primaryKeyParameters = parameters; - if (!customIfClause.isEmpty()) { - if (primaryKeyParameters.size() < entityDefinition.getPrimaryKey().size()) { - List primaryKeyTypes = - entityDefinition.getPrimaryKey().stream() - .map(d -> d.getType().asTypeName()) - .collect(Collectors.toList()); - context - .getMessager() - .error( - methodElement, - "Invalid parameter list: %s methods that have a custom if clause" - + "must specify the entire primary key (expected primary keys of %s: %s)", - Delete.class.getSimpleName(), - entityElement.getSimpleName(), - primaryKeyTypes); - return Optional.empty(); - } else { - // restrict parameters to primary key length. - primaryKeyParameters = - primaryKeyParameters.subList(0, entityDefinition.getPrimaryKey().size()); - } - } - - primaryKeyParameterCount = primaryKeyParameters.size(); - if (!EntityUtils.areParametersValid( - entityElement, - entityDefinition, - primaryKeyParameters, - Delete.class, - context, - methodElement, - processedType, - "do not operate on an entity instance and lack a custom where clause")) { - return Optional.empty(); - } - } else { - primaryKeyParameterCount = -1; - } - } - - // Validate the return type: - DaoReturnType returnType = - parseAndValidateReturnType(getSupportedReturnTypes(), Delete.class.getSimpleName()); - if (returnType == null) { - return Optional.empty(); - } - - // Generate the method - String helperFieldName = enclosingClass.addEntityHelperField(ClassName.get(entityElement)); - String statementName = - enclosingClass.addPreparedStatement( - methodElement, - (methodBuilder, requestName) -> - generatePrepareRequest( - methodBuilder, requestName, helperFieldName, primaryKeyParameterCount)); - - CodeBlock.Builder createStatementBlock = CodeBlock.builder(); - - createStatementBlock.addStatement( - "$T boundStatementBuilder = $L.boundStatementBuilder()", - BoundStatementBuilder.class, - statementName); - populateBuilderWithStatementAttributes(createStatementBlock, methodElement); - populateBuilderWithFunction(createStatementBlock, boundStatementFunction); - - int nextParameterIndex = 0; - if (hasEntityParameter) { - warnIfCqlNamePresent(Collections.singletonList(firstParameter)); - // Bind entity's PK properties - for (PropertyDefinition property : entityDefinition.getPrimaryKey()) { - GeneratedCodePatterns.setValue( - property.getCqlName(), - property.getType(), - CodeBlock.of("$L.$L()", firstParameter.getSimpleName(), property.getGetterName()), - "boundStatementBuilder", - createStatementBlock, - enclosingClass); - } - nextParameterIndex = 1; - } else if (customWhereClause.isEmpty()) { - // The PK components are passed as arguments to the method (we've already checked that the - // types match). - List primaryKeyNames = - entityDefinition.getPrimaryKey().stream() - .map(PropertyDefinition::getCqlName) - .collect(Collectors.toList()) - .subList(0, primaryKeyParameterCount); - List bindMarkers = parameters.subList(0, primaryKeyParameterCount); - warnIfCqlNamePresent(bindMarkers); - GeneratedCodePatterns.bindParameters( - bindMarkers, primaryKeyNames, createStatementBlock, enclosingClass, context, false); - nextParameterIndex = primaryKeyNames.size(); - } - - // Bind any remaining parameters, assuming they are values for a custom WHERE or IF clause - if (nextParameterIndex < parameters.size()) { - if (customIfClause.isEmpty() && customWhereClause.isEmpty()) { - context - .getMessager() - .error( - methodElement, - "Wrong number of parameters: %s methods can only have additional " - + "parameters if they specify a custom WHERE or IF clause", - Delete.class.getSimpleName()); - } - List bindMarkers = - parameters.subList(nextParameterIndex, parameters.size()); - if (validateCqlNamesPresent(bindMarkers)) { - GeneratedCodePatterns.bindParameters( - bindMarkers, createStatementBlock, enclosingClass, context, false); - } else { - return Optional.empty(); - } - } - - createStatementBlock.addStatement( - "$T boundStatement = boundStatementBuilder.build()", BoundStatement.class); - - return crudMethod(createStatementBlock, returnType, helperFieldName); - } - - private void generatePrepareRequest( - MethodSpec.Builder methodBuilder, - String requestName, - String helperFieldName, - int parameterSize) { - - Delete delete = methodElement.getAnnotation(Delete.class); - boolean ifExists = delete.ifExists(); - String customWhereClause = delete.customWhereClause(); - String customIfClause = delete.customIfClause(); - - methodBuilder.addCode("$[$T $L = $L", SimpleStatement.class, requestName, helperFieldName); - - if (!customWhereClause.isEmpty()) { - methodBuilder.addCode(".deleteStart().whereRaw($S)", customWhereClause); - } else { - methodBuilder.addCode(".deleteByPrimaryKeyParts($L)", parameterSize); - } - - if (ifExists) { - methodBuilder.addCode(".ifExists()"); - } else if (!customIfClause.isEmpty()) { - methodBuilder.addCode(".ifRaw($S)", customIfClause); - } - - methodBuilder.addCode(".build();$]\n"); - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoGetEntityMethodGenerator.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoGetEntityMethodGenerator.java deleted file mode 100644 index 8fa3538f0e8..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoGetEntityMethodGenerator.java +++ /dev/null @@ -1,202 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.dao; - -import com.datastax.oss.driver.api.core.MappedAsyncPagingIterable; -import com.datastax.oss.driver.api.core.PagingIterable; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.data.GettableByName; -import com.datastax.oss.driver.api.mapper.annotations.Entity; -import com.datastax.oss.driver.api.mapper.annotations.GetEntity; -import com.datastax.oss.driver.internal.mapper.processor.ProcessorContext; -import com.datastax.oss.driver.internal.mapper.processor.util.generation.GeneratedCodePatterns; -import com.squareup.javapoet.ClassName; -import com.squareup.javapoet.MethodSpec; -import java.util.Map; -import java.util.Optional; -import java.util.stream.Stream; -import java.util.stream.StreamSupport; -import javax.lang.model.element.Element; -import javax.lang.model.element.ExecutableElement; -import javax.lang.model.element.Name; -import javax.lang.model.element.TypeElement; -import javax.lang.model.element.VariableElement; -import javax.lang.model.type.DeclaredType; -import javax.lang.model.type.TypeKind; -import javax.lang.model.type.TypeMirror; - -public class DaoGetEntityMethodGenerator extends DaoMethodGenerator { - - /** The type of processing to do on the argument in addition to invoking the entity helper. */ - private enum Transformation { - /** Single source element to single entity. */ - NONE, - /** First row of iterable to single entity. */ - ONE, - /** Iterable of rows to iterable of entity. */ - MAP, - /** Iterable of rows to stream of entity. */ - STREAM, - } - - private final boolean lenient; - - public DaoGetEntityMethodGenerator( - ExecutableElement methodElement, - Map typeParameters, - TypeElement processedType, - DaoImplementationSharedCode enclosingClass, - ProcessorContext context) { - super(methodElement, typeParameters, processedType, enclosingClass, context); - lenient = methodElement.getAnnotation(GetEntity.class).lenient(); - } - - @Override - public Optional generate() { - - // Validate the parameter: there must be exactly one, of type GettableByName or a ResultSet: - if (methodElement.getParameters().size() != 1) { - context - .getMessager() - .error( - methodElement, - "Wrong number of parameters: %s methods must have exactly one", - GetEntity.class.getSimpleName()); - return Optional.empty(); - } - VariableElement parameterElement = methodElement.getParameters().get(0); - String parameterName = parameterElement.getSimpleName().toString(); - TypeMirror parameterType = parameterElement.asType(); - boolean parameterIsGettable = context.getClassUtils().implementsGettableByName(parameterType); - boolean parameterIsResultSet = context.getClassUtils().isSame(parameterType, ResultSet.class); - boolean parameterIsAsyncResultSet = - context.getClassUtils().isSame(parameterType, AsyncResultSet.class); - if (!parameterIsGettable && !parameterIsResultSet && !parameterIsAsyncResultSet) { - context - .getMessager() - .error( - methodElement, - "Invalid parameter type: %s methods must take a %s, %s or %s", - GetEntity.class.getSimpleName(), - GettableByName.class.getSimpleName(), - ResultSet.class.getSimpleName(), - AsyncResultSet.class.getSimpleName()); - return Optional.empty(); - } - - // Validate the return type. Make sure it matches the parameter type - Transformation transformation = null; - TypeMirror returnType = methodElement.getReturnType(); - TypeElement entityElement = EntityUtils.asEntityElement(returnType, typeParameters); - if (entityElement != null) { - transformation = parameterIsGettable ? Transformation.NONE : Transformation.ONE; - } else if (returnType.getKind() == TypeKind.DECLARED) { - Element element = ((DeclaredType) returnType).asElement(); - if (context.getClassUtils().isSame(element, PagingIterable.class)) { - if (!parameterIsResultSet) { - context - .getMessager() - .error( - methodElement, - "Invalid return type: %s methods returning %s must have an argument of type %s", - GetEntity.class.getSimpleName(), - PagingIterable.class.getSimpleName(), - ResultSet.class.getSimpleName()); - return Optional.empty(); - } - entityElement = EntityUtils.typeArgumentAsEntityElement(returnType, typeParameters); - transformation = Transformation.MAP; - } else if (context.getClassUtils().isSame(element, Stream.class)) { - if (!parameterIsResultSet) { - context - .getMessager() - .error( - methodElement, - "Invalid return type: %s methods returning %s must have an argument of type %s", - GetEntity.class.getSimpleName(), - Stream.class.getSimpleName(), - ResultSet.class.getSimpleName()); - return Optional.empty(); - } - entityElement = EntityUtils.typeArgumentAsEntityElement(returnType, typeParameters); - transformation = Transformation.STREAM; - } else if (context.getClassUtils().isSame(element, MappedAsyncPagingIterable.class)) { - if (!parameterIsAsyncResultSet) { - context - .getMessager() - .error( - methodElement, - "Invalid return type: %s methods returning %s must have an argument of type %s", - GetEntity.class.getSimpleName(), - MappedAsyncPagingIterable.class.getSimpleName(), - AsyncResultSet.class.getSimpleName()); - return Optional.empty(); - } - entityElement = EntityUtils.typeArgumentAsEntityElement(returnType, typeParameters); - transformation = Transformation.MAP; - } - } - if (entityElement == null) { - context - .getMessager() - .error( - methodElement, - "Invalid return type: " - + "%s methods must return a %s-annotated class, or a %s, a %s or %s thereof", - GetEntity.class.getSimpleName(), - Entity.class.getSimpleName(), - PagingIterable.class.getSimpleName(), - Stream.class.getSimpleName(), - MappedAsyncPagingIterable.class.getSimpleName()); - return Optional.empty(); - } - - // Generate the implementation: - String helperFieldName = enclosingClass.addEntityHelperField(ClassName.get(entityElement)); - - MethodSpec.Builder overridingMethodBuilder = - GeneratedCodePatterns.override(methodElement, typeParameters); - switch (transformation) { - case NONE: - overridingMethodBuilder.addStatement( - "return $L.get($L, $L)", helperFieldName, parameterName, lenient); - break; - case ONE: - overridingMethodBuilder - .addStatement("$T row = $L.one()", Row.class, parameterName) - .addStatement( - "return (row == null) ? null : $L.get(row, $L)", helperFieldName, lenient); - break; - case MAP: - overridingMethodBuilder.addStatement( - "return $L.map(row -> $L.get(row, $L))", parameterName, helperFieldName, lenient); - break; - case STREAM: - overridingMethodBuilder.addStatement( - "return $T.stream($L.map(row -> $L.get(row, $L)).spliterator(), false)", - StreamSupport.class, - parameterName, - helperFieldName, - lenient); - break; - } - return Optional.of(overridingMethodBuilder.build()); - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoImplementationGenerator.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoImplementationGenerator.java deleted file mode 100644 index 7c10a4f9106..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoImplementationGenerator.java +++ /dev/null @@ -1,575 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.dao; - -import static com.datastax.oss.driver.api.mapper.MapperBuilder.SCHEMA_VALIDATION_ENABLED_SETTING; - -import com.datastax.dse.driver.internal.mapper.reactive.ReactiveDaoBase; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.mapper.MapperContext; -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.DefaultNullSavingStrategy; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import com.datastax.oss.driver.internal.core.util.concurrent.BlockingOperation; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.driver.internal.mapper.DaoBase; -import com.datastax.oss.driver.internal.mapper.processor.GeneratedNames; -import com.datastax.oss.driver.internal.mapper.processor.MethodGenerator; -import com.datastax.oss.driver.internal.mapper.processor.ProcessorContext; -import com.datastax.oss.driver.internal.mapper.processor.SingleFileCodeGenerator; -import com.datastax.oss.driver.internal.mapper.processor.util.Capitalizer; -import com.datastax.oss.driver.internal.mapper.processor.util.HierarchyScanner; -import com.datastax.oss.driver.internal.mapper.processor.util.NameIndex; -import com.datastax.oss.driver.internal.mapper.processor.util.generation.GenericTypeConstantGenerator; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import com.datastax.oss.driver.shaded.guava.common.collect.Maps; -import com.squareup.javapoet.AnnotationSpec; -import com.squareup.javapoet.ClassName; -import com.squareup.javapoet.CodeBlock; -import com.squareup.javapoet.FieldSpec; -import com.squareup.javapoet.JavaFile; -import com.squareup.javapoet.MethodSpec; -import com.squareup.javapoet.ParameterizedTypeName; -import com.squareup.javapoet.TypeName; -import com.squareup.javapoet.TypeSpec; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.lang.annotation.Annotation; -import java.util.ArrayList; -import java.util.Collections; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Set; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.function.BiConsumer; -import java.util.stream.Collectors; -import javax.lang.model.element.Element; -import javax.lang.model.element.ElementKind; -import javax.lang.model.element.ExecutableElement; -import javax.lang.model.element.Modifier; -import javax.lang.model.element.Name; -import javax.lang.model.element.TypeElement; -import javax.lang.model.type.DeclaredType; -import javax.lang.model.type.TypeMirror; -import javax.lang.model.type.TypeVariable; - -public class DaoImplementationGenerator extends SingleFileCodeGenerator - implements DaoImplementationSharedCode { - - private static final TypeName PREPARED_STATEMENT_STAGE = - ParameterizedTypeName.get(CompletionStage.class, PreparedStatement.class); - - private final TypeElement interfaceElement; - private final ClassName implementationName; - private final NameIndex nameIndex = new NameIndex(); - private final GenericTypeConstantGenerator genericTypeConstantGenerator = - new GenericTypeConstantGenerator(nameIndex); - private final Map entityHelperFields = new LinkedHashMap<>(); - private final List preparedStatements = new ArrayList<>(); - private final List queryProviders = new ArrayList<>(); - private final NullSavingStrategyValidation nullSavingStrategyValidation; - - // tracks interface type variable mappings as child interfaces discover them. - private final Map> typeMappingsForInterface = - Maps.newHashMap(); - - private final Set interfaces; - private final Map, Annotation> annotations; - - private static final Set> ANNOTATIONS_TO_SCAN = - ImmutableSet.of(DefaultNullSavingStrategy.class); - - public DaoImplementationGenerator(TypeElement interfaceElement, ProcessorContext context) { - super(context); - this.interfaceElement = interfaceElement; - this.interfaces = HierarchyScanner.resolveTypeHierarchy(interfaceElement, context); - this.annotations = scanAnnotations(); - implementationName = GeneratedNames.daoImplementation(interfaceElement); - nullSavingStrategyValidation = new NullSavingStrategyValidation(context); - } - - private Map, Annotation> scanAnnotations() { - Map, Annotation> annotations = Maps.newHashMap(); - for (TypeMirror mirror : interfaces) { - Element element = context.getTypeUtils().asElement(mirror); - for (Class annotationClass : ANNOTATIONS_TO_SCAN) { - Annotation annotation = element.getAnnotation(annotationClass); - if (annotation != null) { - // don't replace annotations from lower levels. - annotations.putIfAbsent(annotationClass, annotation); - } - } - } - return ImmutableMap.copyOf(annotations); - } - - private Optional getAnnotation(Class annotationClass) { - return Optional.ofNullable(annotationClass.cast(annotations.get(annotationClass))); - } - - @Override - public NameIndex getNameIndex() { - return nameIndex; - } - - @Override - public String addGenericTypeConstant(TypeName type) { - return genericTypeConstantGenerator.add(type); - } - - @Override - public String addEntityHelperField(ClassName entityClassName) { - ClassName helperClass = GeneratedNames.entityHelper(entityClassName); - return entityHelperFields.computeIfAbsent( - helperClass, - k -> { - String baseName = Capitalizer.decapitalize(entityClassName.simpleName()) + "Helper"; - return nameIndex.uniqueField(baseName); - }); - } - - @Override - public String addPreparedStatement( - ExecutableElement methodElement, - BiConsumer simpleStatementGenerator) { - // Prepared statements are not shared between methods, so always generate a new name - String fieldName = - nameIndex.uniqueField(methodElement.getSimpleName().toString() + "Statement"); - preparedStatements.add( - new GeneratedPreparedStatement(methodElement, fieldName, simpleStatementGenerator)); - return fieldName; - } - - @Override - public String addQueryProvider( - ExecutableElement methodElement, - TypeMirror providerClass, - List entityHelperTypes) { - // Invokers are not shared between methods, so always generate a new name - String fieldName = nameIndex.uniqueField(methodElement.getSimpleName().toString() + "Invoker"); - List entityHelperNames = new ArrayList<>(); - for (ClassName type : entityHelperTypes) { - entityHelperNames.add(addEntityHelperField(type)); - } - queryProviders.add(new GeneratedQueryProvider(fieldName, providerClass, entityHelperNames)); - return fieldName; - } - - @Override - public Optional getNullSavingStrategy() { - return getAnnotation(DefaultNullSavingStrategy.class).map(DefaultNullSavingStrategy::value); - } - - @Override - protected ClassName getPrincipalTypeName() { - return implementationName; - } - - /* - * Parses the given interface mirror and returns a mapping of type variable names to their - * resolved concrete declared type element. - * - * Also updates typeMappingsForInterface for parent interfaces of this interface with - * the type variable to concrete type mappings declared on this interface. This is needed to - * resolve declared types parameter values between the interface hierarchy. - * - * For example, given the following hierarchy: - * - * interface BaseDao - * interface NamedDeviceDao extends BaseDao - * interface TrackedDeviceDao extends NamedDeviceDao - * - * In getContents(), the following mirrors would be parsed for type parameters when - * generating code for TrackedDeviceDao: - * - * * parseTypeParameters(TrackedDeviceDao): returns empty map - * * parseTypeParameters(NamedDeviceDao): returns Y -> TrackedDevice, - * typeMappingsForInterface(BaseDao) updated with Y -> TrackedDevice - * * parseTypeParameters(BaseDao): returns T -> TrackedDevice - * - */ - private Map parseTypeParameters(TypeMirror mirror) { - // Map interface type variable names to their concrete class. - Map typeParameters = Maps.newHashMap(); - Map childTypeParameters = - typeMappingsForInterface.getOrDefault(mirror, Collections.emptyMap()); - - if (mirror instanceof DeclaredType) { - TypeElement element = (TypeElement) context.getTypeUtils().asElement(mirror); - DeclaredType declaredType = (DeclaredType) mirror; - // For each type argument on this interface, resolve the declared (concrete) type - for (int i = 0; i < declaredType.getTypeArguments().size(); i++) { - Name name = element.getTypeParameters().get(i).getSimpleName(); - TypeMirror typeArgument = declaredType.getTypeArguments().get(i); - if (typeArgument instanceof DeclaredType) { - /* If its a DeclaredType, we have the concrete type. - * - * For example, given: - * * interface: NamedDeviceDao extends BaseDao - * * mirror: NamedDeviceDao - * - * Type parameter name would be 'Y', type argument would be declared type 'TrackedDevice', - * enabling mapping Y -> TrackedDevice. - */ - Element concreteType = ((DeclaredType) typeArgument).asElement(); - typeParameters.put(name, (TypeElement) concreteType); - } else if (typeArgument instanceof TypeVariable) { - /* If its a TypeVariable, we resolve the concrete type from type parameters declared in - * a child interface and alias it to the type on this class. - * - * For example, given: - * * interface: BaseDao - * * mirror: BaseDao - * - * Type parameter name would be 'T', type argument would be declared type 'Y', - * enabling mapping T -> Y. - */ - Name typeVariableName = ((TypeVariable) typeArgument).asElement().getSimpleName(); - /* Resolve the concrete type from previous child interfaces we parsed types for. - * - * For example, given a child with: - * * interface: NamedDeviceDao extends BaseDao - * * mirror: NamedDeviceDao - * - * We would have childTypeParameters mapping Y -> TrackedDevice enabling the resolution - * of T -> TrackedDevice from T -> Y -> TrackedDevice. - */ - if (childTypeParameters.containsKey(typeVariableName)) { - typeParameters.put(name, childTypeParameters.get(typeVariableName)); - } else { - context - .getMessager() - .error( - element, - "Could not resolve type parameter %s " - + "on %s from child interfaces. This error usually means an interface " - + "was inappropriately annotated with @%s. Interfaces should only be annotated " - + "with @%s if all generic type variables are declared.", - name, - mirror, - Dao.class.getSimpleName(), - Dao.class.getSimpleName()); - } - } - } - - /* For each parent interface of this type, check that parent's type arguments for - * type variables. For each of these variables keep track of the discovered concrete - * type on the current interface so it has access to it. See the comments in the code - * above for an explanation of how these mappings are used. - */ - for (TypeMirror parentInterface : element.getInterfaces()) { - if (parentInterface instanceof DeclaredType) { - Map typeMappingsForParent = - typeMappingsForInterface.computeIfAbsent(parentInterface, k -> Maps.newHashMap()); - DeclaredType parentInterfaceType = (DeclaredType) parentInterface; - for (TypeMirror parentTypeArgument : parentInterfaceType.getTypeArguments()) { - if (parentTypeArgument instanceof TypeVariable) { - TypeVariable parentTypeVariable = (TypeVariable) parentTypeArgument; - Name parentTypeName = parentTypeVariable.asElement().getSimpleName(); - TypeElement typeElement = typeParameters.get(parentTypeName); - if (typeElement != null) { - typeMappingsForParent.put(parentTypeName, typeElement); - } - } - } - } - } - } - - return typeParameters; - } - - @Override - protected JavaFile.Builder getContents() { - - TypeSpec.Builder classBuilder = - TypeSpec.classBuilder(implementationName) - .addJavadoc(JAVADOC_GENERATED_WARNING) - .addAnnotation( - AnnotationSpec.builder(SuppressWarnings.class) - .addMember("value", "\"all\"") - .build()) - .addModifiers(Modifier.PUBLIC) - .addSuperinterface(ClassName.get(interfaceElement)); - - boolean reactive = false; - for (TypeMirror mirror : interfaces) { - TypeElement parentInterfaceElement = (TypeElement) context.getTypeUtils().asElement(mirror); - Map typeParameters = parseTypeParameters(mirror); - - for (Element child : parentInterfaceElement.getEnclosedElements()) { - if (child.getKind() == ElementKind.METHOD) { - ExecutableElement methodElement = (ExecutableElement) child; - Set modifiers = methodElement.getModifiers(); - if (!modifiers.contains(Modifier.STATIC) && !modifiers.contains(Modifier.DEFAULT)) { - Optional maybeGenerator = - context - .getCodeGeneratorFactory() - .newDaoImplementationMethod( - methodElement, typeParameters, interfaceElement, this); - if (!maybeGenerator.isPresent()) { - context - .getMessager() - .error( - methodElement, - "Unrecognized method signature: no implementation will be generated"); - } else { - maybeGenerator.flatMap(MethodGenerator::generate).ifPresent(classBuilder::addMethod); - reactive |= maybeGenerator.get().requiresReactive(); - } - } - } - } - } - - classBuilder = classBuilder.superclass(getDaoParentClass(reactive)); - - genericTypeConstantGenerator.generate(classBuilder); - - MethodSpec.Builder initAsyncBuilder = getInitAsyncContents(); - - MethodSpec.Builder initBuilder = getInitContents(); - - MethodSpec.Builder constructorBuilder = - MethodSpec.constructorBuilder() - .addModifiers(Modifier.PRIVATE) - .addParameter(MapperContext.class, "context") - .addStatement("super(context)"); - - context.getLoggingGenerator().addLoggerField(classBuilder, implementationName); - - // For each entity helper that was requested by a method generator, create a field for it and - // add a constructor parameter for it (the instance gets created in initAsync). - for (Map.Entry entry : entityHelperFields.entrySet()) { - ClassName fieldTypeName = entry.getKey(); - String fieldName = entry.getValue(); - - classBuilder.addField( - FieldSpec.builder(fieldTypeName, fieldName, Modifier.PRIVATE, Modifier.FINAL).build()); - constructorBuilder - .addParameter(fieldTypeName, fieldName) - .addStatement("this.$1L = $1L", fieldName); - } - - // Same for prepared statements: - for (GeneratedPreparedStatement preparedStatement : preparedStatements) { - classBuilder.addField( - FieldSpec.builder( - PreparedStatement.class, - preparedStatement.fieldName, - Modifier.PRIVATE, - Modifier.FINAL) - .build()); - constructorBuilder - .addParameter(PreparedStatement.class, preparedStatement.fieldName) - .addStatement("this.$1L = $1L", preparedStatement.fieldName); - } - - // Same for method invokers: - for (GeneratedQueryProvider queryProvider : queryProviders) { - TypeName providerClassName = TypeName.get(queryProvider.providerClass); - classBuilder.addField( - providerClassName, queryProvider.fieldName, Modifier.PRIVATE, Modifier.FINAL); - constructorBuilder - .addParameter(providerClassName, queryProvider.fieldName) - .addStatement("this.$1L = $1L", queryProvider.fieldName); - } - - classBuilder.addMethod(initAsyncBuilder.build()); - classBuilder.addMethod(initBuilder.build()); - classBuilder.addMethod(constructorBuilder.build()); - - return JavaFile.builder(implementationName.packageName(), classBuilder.build()); - } - - @NonNull - protected Class getDaoParentClass(boolean requiresReactive) { - if (requiresReactive) { - return ReactiveDaoBase.class; - } else { - return DaoBase.class; - } - } - - /** - * Generates the DAO's initAsync() builder: this is the entry point, that the main mapper will use - * to build instances. - * - *

    In this method we want to instantiate any entity helper or prepared statement that will be - * needed by methods of the DAO. Then we call the DAO's private constructor, passing that - * information. - */ - private MethodSpec.Builder getInitAsyncContents() { - MethodSpec.Builder initAsyncBuilder = - MethodSpec.methodBuilder("initAsync") - .returns( - ParameterizedTypeName.get( - ClassName.get(CompletableFuture.class), ClassName.get(interfaceElement))) - .addModifiers(Modifier.PUBLIC, Modifier.STATIC) - .addParameter(MapperContext.class, "context"); - - LoggingGenerator loggingGenerator = context.getLoggingGenerator(); - loggingGenerator.debug( - initAsyncBuilder, - "[{}] Initializing new instance for keyspace = {} and table = {}", - CodeBlock.of("context.getSession().getName()"), - CodeBlock.of("context.getKeyspaceId()"), - CodeBlock.of("context.getTableId()")); - - generateProtocolVersionCheck(initAsyncBuilder); - - initAsyncBuilder.beginControlFlow("try"); - - // Start a constructor call: we build it dynamically because the number of parameters depends on - // the entity helpers and prepared statements below. - CodeBlock.Builder newDaoStatement = CodeBlock.builder(); - newDaoStatement.add("new $1T(context$>$>", implementationName); - - initAsyncBuilder.addComment("Initialize all entity helpers"); - // For each entity helper that was requested by a method generator: - for (Map.Entry entry : entityHelperFields.entrySet()) { - ClassName fieldTypeName = entry.getKey(); - String fieldName = entry.getValue(); - // - create an instance - initAsyncBuilder.addStatement("$1T $2L = new $1T(context)", fieldTypeName, fieldName); - // - validate entity schema - generateValidationCheck(initAsyncBuilder, fieldName); - // - add it as a parameter to the constructor call - newDaoStatement.add(",\n$L", fieldName); - } - - initAsyncBuilder.addStatement( - "$T<$T> prepareStages = new $T<>()", List.class, PREPARED_STATEMENT_STAGE, ArrayList.class); - // For each prepared statement that was requested by a method generator: - for (GeneratedPreparedStatement preparedStatement : preparedStatements) { - initAsyncBuilder.addComment( - "Prepare the statement for `$L`:", preparedStatement.methodElement.toString()); - // - generate the simple statement - String simpleStatementName = preparedStatement.fieldName + "_simple"; - preparedStatement.simpleStatementGenerator.accept(initAsyncBuilder, simpleStatementName); - // - prepare it asynchronously, store all CompletionStages in a list - loggingGenerator.debug( - initAsyncBuilder, - String.format( - "[{}] Preparing query `{}` for method %s", - preparedStatement.methodElement.toString()), - CodeBlock.of("context.getSession().getName()"), - CodeBlock.of("$L.getQuery()", simpleStatementName)); - initAsyncBuilder - .addStatement( - "$T $L = prepare($L, context)", - PREPARED_STATEMENT_STAGE, - preparedStatement.fieldName, - simpleStatementName) - .addStatement("prepareStages.add($L)", preparedStatement.fieldName); - // - add the stage's result to the constructor call (which will be executed once all stages - // are complete) - newDaoStatement.add( - ",\n$T.getCompleted($L)", CompletableFutures.class, preparedStatement.fieldName); - } - - initAsyncBuilder.addComment("Initialize all method invokers"); - // For each query provider that was requested by a method generator: - for (GeneratedQueryProvider queryProvider : queryProviders) { - // - create an instance - initAsyncBuilder.addCode( - "$[$1T $2L = new $1T(context", queryProvider.providerClass, queryProvider.fieldName); - for (String helperName : queryProvider.entityHelperNames) { - initAsyncBuilder.addCode(", $L", helperName); - } - initAsyncBuilder.addCode(");$]\n"); - - // - add it as a parameter to the constructor call - newDaoStatement.add(",\n$L", queryProvider.fieldName); - } - - newDaoStatement.add(")"); - - initAsyncBuilder - .addComment("Build the DAO when all statements are prepared") - .addCode("$[return $T.allSuccessful(prepareStages)", CompletableFutures.class) - .addCode("\n.thenApply(v -> ($T) ", interfaceElement) - .addCode(newDaoStatement.build()) - .addCode(")\n$<$<.toCompletableFuture();$]\n") - .nextControlFlow("catch ($T t)", Throwable.class) - .addStatement("return $T.failedFuture(t)", CompletableFutures.class) - .endControlFlow(); - return initAsyncBuilder; - } - - private void generateValidationCheck(MethodSpec.Builder initAsyncBuilder, String fieldName) { - initAsyncBuilder.beginControlFlow( - "if (($1T)context.getCustomState().get($2S))", - Boolean.class, - SCHEMA_VALIDATION_ENABLED_SETTING); - initAsyncBuilder.addStatement("$1L.validateEntityFields()", fieldName); - initAsyncBuilder.endControlFlow(); - } - - private void generateProtocolVersionCheck(MethodSpec.Builder builder) { - List methodElements = - preparedStatements.stream().map(v -> v.methodElement).collect(Collectors.toList()); - DefaultNullSavingStrategy interfaceAnnotation = - getAnnotation(DefaultNullSavingStrategy.class).orElse(null); - if (nullSavingStrategyValidation.hasDoNotSetOnAnyLevel(methodElements, interfaceAnnotation)) { - builder.addStatement("throwIfProtocolVersionV3(context)"); - } - } - - /** Generates the DAO's init() method: it's a simple synchronous wrapper of initAsync(). */ - private MethodSpec.Builder getInitContents() { - return MethodSpec.methodBuilder("init") - .returns(ClassName.get(interfaceElement)) - .addModifiers(Modifier.PUBLIC, Modifier.STATIC) - .addParameter(MapperContext.class, "context") - .addStatement("$T.checkNotDriverThread()", BlockingOperation.class) - .addStatement("return $T.getUninterruptibly(initAsync(context))", CompletableFutures.class); - } - - private static class GeneratedPreparedStatement { - final ExecutableElement methodElement; - final String fieldName; - final BiConsumer simpleStatementGenerator; - - GeneratedPreparedStatement( - ExecutableElement methodElement, - String fieldName, - BiConsumer simpleStatementGenerator) { - this.methodElement = methodElement; - this.fieldName = fieldName; - this.simpleStatementGenerator = simpleStatementGenerator; - } - } - - private static class GeneratedQueryProvider { - final String fieldName; - final TypeMirror providerClass; - final List entityHelperNames; - - GeneratedQueryProvider( - String fieldName, TypeMirror providerClass, List entityHelperNames) { - this.fieldName = fieldName; - this.providerClass = providerClass; - this.entityHelperNames = entityHelperNames; - } - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoImplementationSharedCode.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoImplementationSharedCode.java deleted file mode 100644 index 788c3aac127..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoImplementationSharedCode.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.dao; - -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.mapper.MapperContext; -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.DefaultNullSavingStrategy; -import com.datastax.oss.driver.api.mapper.annotations.QueryProvider; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import com.datastax.oss.driver.internal.mapper.processor.util.generation.BindableHandlingSharedCode; -import com.squareup.javapoet.ClassName; -import com.squareup.javapoet.MethodSpec; -import java.util.List; -import java.util.Optional; -import java.util.function.BiConsumer; -import javax.lang.model.element.ExecutableElement; -import javax.lang.model.type.TypeMirror; - -/** - * Exposes callbacks that allow individual method generators for a {@link Dao}-annotated class to - * request the generation of class-level fields that they will use. - */ -public interface DaoImplementationSharedCode extends BindableHandlingSharedCode { - - /** - * Requests the generation of a prepared statement in this DAO. It will be initialized in {@code - * initAsync}, and then passed to the constructor which will store it in a private field. - * - * @param methodElement the method that will be using this statement. - * @param simpleStatementGenerator a callback that generates code to create a {@link - * SimpleStatement} local variable that will be used to create the statement. The first - * parameter is the method to add to, and the second the name of the local variable. - * @return the name of the generated field that will hold the statement. - */ - String addPreparedStatement( - ExecutableElement methodElement, - BiConsumer simpleStatementGenerator); - - /** - * Requests the instantiation of a user-provided query provider class in this DAO. It will be - * initialized in {@code initAsync}, and then passed to the constructor which will store it in a - * private field. - * - * @param methodElement the method that will be using this provider. - * @param providerClass the provider class. - * @param entityHelperTypes the types of the entity helpers that should be injected through the - * class's constructor (in addition to the {@link MapperContext}). - * @return the name of the generated field that will hold the provider. - * @see QueryProvider - */ - String addQueryProvider( - ExecutableElement methodElement, TypeMirror providerClass, List entityHelperTypes); - - /** - * @return {@link DefaultNullSavingStrategy#value()} ()} if annotation is present on a given - * {@link Dao}. If this annotation is not present on a Dao level it Returns {@code - * Optional.Empty} - */ - Optional getNullSavingStrategy(); -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoIncrementMethodGenerator.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoIncrementMethodGenerator.java deleted file mode 100644 index ee85372f744..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoIncrementMethodGenerator.java +++ /dev/null @@ -1,316 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.dao; - -import static com.datastax.oss.driver.internal.mapper.processor.dao.DefaultDaoReturnTypeKind.FUTURE_OF_VOID; -import static com.datastax.oss.driver.internal.mapper.processor.dao.DefaultDaoReturnTypeKind.REACTIVE_RESULT_SET; -import static com.datastax.oss.driver.internal.mapper.processor.dao.DefaultDaoReturnTypeKind.VOID; - -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.BoundStatementBuilder; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.mapper.annotations.CqlName; -import com.datastax.oss.driver.api.mapper.annotations.Increment; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import com.datastax.oss.driver.api.querybuilder.QueryBuilder; -import com.datastax.oss.driver.api.querybuilder.relation.Relation; -import com.datastax.oss.driver.internal.mapper.processor.ProcessorContext; -import com.datastax.oss.driver.internal.mapper.processor.entity.EntityDefinition; -import com.datastax.oss.driver.internal.mapper.processor.entity.PropertyDefinition; -import com.datastax.oss.driver.internal.mapper.processor.util.generation.GeneratedCodePatterns; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import com.squareup.javapoet.ClassName; -import com.squareup.javapoet.CodeBlock; -import com.squareup.javapoet.MethodSpec; -import com.squareup.javapoet.TypeName; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Set; -import java.util.stream.Collectors; -import java.util.stream.StreamSupport; -import javax.lang.model.element.ExecutableElement; -import javax.lang.model.element.Name; -import javax.lang.model.element.TypeElement; -import javax.lang.model.element.VariableElement; -import javax.lang.model.type.TypeKind; -import javax.lang.model.type.TypeMirror; - -public class DaoIncrementMethodGenerator extends DaoMethodGenerator { - - public DaoIncrementMethodGenerator( - ExecutableElement methodElement, - Map typeParameters, - TypeElement processedType, - DaoImplementationSharedCode enclosingClass, - ProcessorContext context) { - super(methodElement, typeParameters, processedType, enclosingClass, context); - } - - protected Set getSupportedReturnTypes() { - return ImmutableSet.of(VOID, FUTURE_OF_VOID, REACTIVE_RESULT_SET); - } - - @Override - public boolean requiresReactive() { - // Validate the return type: - DaoReturnType returnType = - parseAndValidateReturnType(getSupportedReturnTypes(), Increment.class.getSimpleName()); - if (returnType == null) { - return false; - } - return returnType.requiresReactive(); - } - - @Override - public Optional generate() { - - TypeElement entityElement = getEntityClassFromAnnotation(Increment.class); - EntityDefinition entityDefinition; - if (entityElement == null) { - context - .getMessager() - .error( - methodElement, - "Missing entity class: %s methods must always have an 'entityClass' argument", - Increment.class.getSimpleName()); - return Optional.empty(); - } else { - entityDefinition = context.getEntityFactory().getDefinition(entityElement); - } - - // Validate the parameters: - // - all the PK components of the entity, in order. - // - one or more increment parameters that must match non-PK columns. - // - a Function can be added in last position. - List parameters = methodElement.getParameters(); - VariableElement boundStatementFunction = findBoundStatementFunction(methodElement); - if (boundStatementFunction != null) { - parameters = parameters.subList(0, parameters.size() - 1); - } - - List primaryKeyParameters = parameters; - // Must have at least enough parameters for the full PK - if (primaryKeyParameters.size() < entityDefinition.getPrimaryKey().size()) { - List primaryKeyTypes = - entityDefinition.getPrimaryKey().stream() - .map(d -> d.getType().asTypeName()) - .collect(Collectors.toList()); - context - .getMessager() - .error( - methodElement, - "Invalid parameter list: %s methods must specify the entire primary key " - + "(expected primary keys of %s: %s)", - Increment.class.getSimpleName(), - entityElement.getSimpleName(), - primaryKeyTypes); - return Optional.empty(); - } else { - primaryKeyParameters = - primaryKeyParameters.subList(0, entityDefinition.getPrimaryKey().size()); - warnIfCqlNamePresent(primaryKeyParameters); - } - // PK parameter types must match - if (!EntityUtils.areParametersValid( - entityElement, - entityDefinition, - primaryKeyParameters, - Increment.class, - context, - methodElement, - processedType, - "" /* no condition, @Increment must always have the full PK */)) { - return Optional.empty(); - } - - // The remaining parameters are the increments to the counter columns - List incrementParameters = - parameters.subList(primaryKeyParameters.size(), parameters.size()); - if (!validateCqlNamesPresent(incrementParameters)) { - return Optional.empty(); - } - for (VariableElement parameter : incrementParameters) { - TypeMirror type = parameter.asType(); - if (type.getKind() != TypeKind.LONG && !context.getClassUtils().isSame(type, Long.class)) { - context - .getMessager() - .error( - methodElement, - "Invalid argument type: increment parameters of %s methods can only be " - + "primitive longs or java.lang.Long. Offending parameter: '%s' (%s)", - Increment.class.getSimpleName(), - parameter.getSimpleName(), - type); - return Optional.empty(); - } - } - - // Validate the return type: - DaoReturnType returnType = - parseAndValidateReturnType(getSupportedReturnTypes(), Increment.class.getSimpleName()); - if (returnType == null) { - return Optional.empty(); - } - - // Generate the method: - String helperFieldName = enclosingClass.addEntityHelperField(ClassName.get(entityElement)); - String statementName = - enclosingClass.addPreparedStatement( - methodElement, - (methodBuilder, requestName) -> - generatePrepareRequest( - methodBuilder, - requestName, - entityDefinition, - helperFieldName, - incrementParameters)); - - CodeBlock.Builder updateStatementBlock = CodeBlock.builder(); - - updateStatementBlock.addStatement( - "$T boundStatementBuilder = $L.boundStatementBuilder()", - BoundStatementBuilder.class, - statementName); - - populateBuilderWithStatementAttributes(updateStatementBlock, methodElement); - populateBuilderWithFunction(updateStatementBlock, boundStatementFunction); - - // Bind the counter increments. The bind parameter names are always the raw parameter names, see - // generatePrepareRequest. - List bindMarkerNames = - incrementParameters.stream() - .map(p -> CodeBlock.of("$S", p.getSimpleName())) - .collect(Collectors.toList()); - // Force the null saving strategy. This will fail if the user targets Cassandra 2.2, but - // SET_TO_NULL would not work with counters anyway. - updateStatementBlock.addStatement( - "final $1T nullSavingStrategy = $1T.$2L", - NullSavingStrategy.class, - NullSavingStrategy.DO_NOT_SET); - GeneratedCodePatterns.bindParameters( - incrementParameters, bindMarkerNames, updateStatementBlock, enclosingClass, context, true); - - // Bind the PK columns - List primaryKeyNames = - entityDefinition.getPrimaryKey().stream() - .map(PropertyDefinition::getCqlName) - .collect(Collectors.toList()); - GeneratedCodePatterns.bindParameters( - primaryKeyParameters, - primaryKeyNames, - updateStatementBlock, - enclosingClass, - context, - false); - - updateStatementBlock.addStatement( - "$T boundStatement = boundStatementBuilder.build()", BoundStatement.class); - - return crudMethod(updateStatementBlock, returnType, helperFieldName); - } - - private void generatePrepareRequest( - MethodSpec.Builder methodBuilder, - String requestName, - EntityDefinition entityDefinition, - String helperFieldName, - List incrementParameters) { - - if (incrementParameters.isEmpty()) { - context - .getMessager() - .error( - methodElement, - "%s method must take at least one parameter representing an increment to a " - + "counter column", - Increment.class.getSimpleName()); - return; - } - - methodBuilder - .addStatement("$L.throwIfKeyspaceMissing()", helperFieldName) - .addCode( - "$[$1T $2L = (($3L.getKeyspaceId() == null)\n" - + "? $4T.update($3L.getTableId())\n" - + ": $4T.update($3L.getKeyspaceId(), $3L.getTableId()))", - SimpleStatement.class, - requestName, - helperFieldName, - QueryBuilder.class); - - // Add an increment clause for every non-PK parameter. Example: for a parameter `long oneStar` - // => `.append("one_star", QueryBuilder.bindMarker("oneStar"))` - for (VariableElement parameter : incrementParameters) { - CodeBlock cqlName = null; - CqlName annotation = parameter.getAnnotation(CqlName.class); - if (annotation != null) { - // If a CQL name is provided, use that - cqlName = CodeBlock.of("$S", annotation.value()); - } else { - // Otherwise, try to match the parameter to an entity property based on the names, for - // example parameter `oneStar` matches `ProductRating.getOneStar()`. - for (PropertyDefinition property : entityDefinition.getRegularColumns()) { - if (property.getJavaName().equals(parameter.getSimpleName().toString())) { - cqlName = property.getCqlName(); - break; - } - } - if (cqlName == null) { - List javaNames = - StreamSupport.stream(entityDefinition.getRegularColumns().spliterator(), false) - .map(PropertyDefinition::getJavaName) - .collect(Collectors.toList()); - context - .getMessager() - .error( - parameter, - "Could not match '%s' with any counter column in %s (expected one of: %s). " - + "You can also specify a CQL name directly with @%s.", - parameter.getSimpleName(), - entityDefinition.getClassName().simpleName(), - javaNames, - CqlName.class.getSimpleName()); - // Don't return abruptly, execute the rest of the method to finish the Java statement - // cleanly (otherwise JavaPoet throws an error). The generated statement will be - // incorrect, but that doesn't matter since we've already thrown a compile error. - break; - } - } - - // Always use the parameter name. This is what the binding code will expect (see - // GeneratedCodePatterns.bindParameters call in generate()) - String bindMarkerName = parameter.getSimpleName().toString(); - - // We use `append` to generate "c=c+?". QueryBuilder also has `increment` that produces - // "c+=?", but that doesn't work with Cassandra 2.1. - methodBuilder.addCode( - "\n.append($1L, $2T.bindMarker($3S))", cqlName, QueryBuilder.class, bindMarkerName); - } - - for (PropertyDefinition property : entityDefinition.getPrimaryKey()) { - methodBuilder.addCode( - "\n.where($1T.column($2L).isEqualTo($3T.bindMarker($2L)))", - Relation.class, - property.getCqlName(), - QueryBuilder.class); - } - - methodBuilder.addCode("\n.build()$];\n"); - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoInsertMethodGenerator.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoInsertMethodGenerator.java deleted file mode 100644 index a598ce669e9..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoInsertMethodGenerator.java +++ /dev/null @@ -1,201 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.dao; - -import static com.datastax.oss.driver.internal.mapper.processor.dao.DefaultDaoReturnTypeKind.BOOLEAN; -import static com.datastax.oss.driver.internal.mapper.processor.dao.DefaultDaoReturnTypeKind.BOUND_STATEMENT; -import static com.datastax.oss.driver.internal.mapper.processor.dao.DefaultDaoReturnTypeKind.CUSTOM; -import static com.datastax.oss.driver.internal.mapper.processor.dao.DefaultDaoReturnTypeKind.ENTITY; -import static com.datastax.oss.driver.internal.mapper.processor.dao.DefaultDaoReturnTypeKind.FUTURE_OF_ASYNC_RESULT_SET; -import static com.datastax.oss.driver.internal.mapper.processor.dao.DefaultDaoReturnTypeKind.FUTURE_OF_BOOLEAN; -import static com.datastax.oss.driver.internal.mapper.processor.dao.DefaultDaoReturnTypeKind.FUTURE_OF_ENTITY; -import static com.datastax.oss.driver.internal.mapper.processor.dao.DefaultDaoReturnTypeKind.FUTURE_OF_OPTIONAL_ENTITY; -import static com.datastax.oss.driver.internal.mapper.processor.dao.DefaultDaoReturnTypeKind.FUTURE_OF_VOID; -import static com.datastax.oss.driver.internal.mapper.processor.dao.DefaultDaoReturnTypeKind.OPTIONAL_ENTITY; -import static com.datastax.oss.driver.internal.mapper.processor.dao.DefaultDaoReturnTypeKind.REACTIVE_RESULT_SET; -import static com.datastax.oss.driver.internal.mapper.processor.dao.DefaultDaoReturnTypeKind.RESULT_SET; -import static com.datastax.oss.driver.internal.mapper.processor.dao.DefaultDaoReturnTypeKind.VOID; - -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.BoundStatementBuilder; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.mapper.annotations.Insert; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import com.datastax.oss.driver.internal.mapper.processor.ProcessorContext; -import com.datastax.oss.driver.internal.mapper.processor.util.generation.GeneratedCodePatterns; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import com.squareup.javapoet.ClassName; -import com.squareup.javapoet.CodeBlock; -import com.squareup.javapoet.MethodSpec; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Set; -import javax.lang.model.element.ExecutableElement; -import javax.lang.model.element.Name; -import javax.lang.model.element.TypeElement; -import javax.lang.model.element.VariableElement; - -public class DaoInsertMethodGenerator extends DaoMethodGenerator { - - private final NullSavingStrategyValidation nullSavingStrategyValidation; - - public DaoInsertMethodGenerator( - ExecutableElement methodElement, - Map typeParameters, - TypeElement processedType, - DaoImplementationSharedCode enclosingClass, - ProcessorContext context) { - super(methodElement, typeParameters, processedType, enclosingClass, context); - nullSavingStrategyValidation = new NullSavingStrategyValidation(context); - } - - protected Set getSupportedReturnTypes() { - return ImmutableSet.of( - VOID, - FUTURE_OF_VOID, - ENTITY, - FUTURE_OF_ENTITY, - OPTIONAL_ENTITY, - FUTURE_OF_OPTIONAL_ENTITY, - BOOLEAN, - FUTURE_OF_BOOLEAN, - RESULT_SET, - BOUND_STATEMENT, - FUTURE_OF_ASYNC_RESULT_SET, - REACTIVE_RESULT_SET, - CUSTOM); - } - - @Override - public boolean requiresReactive() { - DaoReturnType returnType = - parseAndValidateReturnType(getSupportedReturnTypes(), Insert.class.getSimpleName()); - if (returnType == null) { - return false; - } - return returnType.requiresReactive(); - } - - @Override - public Optional generate() { - - // Validate the parameters: - // - the first one must be the entity. - // - the others are completely free-form (they'll be used as additional bind variables) - // A Function can be added in last position. - List parameters = methodElement.getParameters(); - VariableElement boundStatementFunction = findBoundStatementFunction(methodElement); - if (boundStatementFunction != null) { - parameters = parameters.subList(0, parameters.size() - 1); - } - TypeElement entityElement = - parameters.isEmpty() - ? null - : EntityUtils.asEntityElement(parameters.get(0), typeParameters); - if (entityElement == null) { - context - .getMessager() - .error( - methodElement, - "%s methods must take the entity to insert as the first parameter", - Insert.class.getSimpleName()); - return Optional.empty(); - } - - // Validate the return type: - DaoReturnType returnType = - parseAndValidateReturnType(getSupportedReturnTypes(), Insert.class.getSimpleName()); - if (returnType == null) { - return Optional.empty(); - } - if (returnType.getEntityElement() != null - && !returnType.getEntityElement().equals(entityElement)) { - context - .getMessager() - .error( - methodElement, - "Invalid return type: %s methods must return the same entity as their argument ", - Insert.class.getSimpleName()); - return Optional.empty(); - } - - // Generate the method: - String helperFieldName = enclosingClass.addEntityHelperField(ClassName.get(entityElement)); - String statementName = - enclosingClass.addPreparedStatement( - methodElement, - (methodBuilder, requestName) -> - generatePrepareRequest(methodBuilder, requestName, helperFieldName)); - - CodeBlock.Builder createStatementBlock = CodeBlock.builder(); - - createStatementBlock.addStatement( - "$T boundStatementBuilder = $L.boundStatementBuilder()", - BoundStatementBuilder.class, - statementName); - - populateBuilderWithStatementAttributes(createStatementBlock, methodElement); - populateBuilderWithFunction(createStatementBlock, boundStatementFunction); - - warnIfCqlNamePresent(parameters.subList(0, 1)); - String entityParameterName = parameters.get(0).getSimpleName().toString(); - - NullSavingStrategy nullSavingStrategy = - nullSavingStrategyValidation.getNullSavingStrategy( - Insert.class, Insert::nullSavingStrategy, methodElement, enclosingClass); - - createStatementBlock.addStatement( - "$1L.set($2L, boundStatementBuilder, $3T.$4L, false)", - helperFieldName, - entityParameterName, - NullSavingStrategy.class, - nullSavingStrategy); - - // Handle all remaining parameters as additional bound values - if (parameters.size() > 1) { - List bindMarkers = parameters.subList(1, parameters.size()); - if (validateCqlNamesPresent(bindMarkers)) { - GeneratedCodePatterns.bindParameters( - bindMarkers, createStatementBlock, enclosingClass, context, false); - } else { - return Optional.empty(); - } - } - - createStatementBlock.addStatement( - "$T boundStatement = boundStatementBuilder.build()", BoundStatement.class); - - return crudMethod(createStatementBlock, returnType, helperFieldName); - } - - private void generatePrepareRequest( - MethodSpec.Builder methodBuilder, String requestName, String helperFieldName) { - methodBuilder.addCode( - "$[$T $L = $L.insert()", SimpleStatement.class, requestName, helperFieldName); - Insert annotation = methodElement.getAnnotation(Insert.class); - if (annotation.ifNotExists()) { - methodBuilder.addCode(".ifNotExists()"); - } - - maybeAddTtl(annotation.ttl(), methodBuilder); - maybeAddTimestamp(annotation.timestamp(), methodBuilder); - - methodBuilder.addCode(".build()$];\n"); - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoMethodGenerator.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoMethodGenerator.java deleted file mode 100644 index 77ce2bf9161..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoMethodGenerator.java +++ /dev/null @@ -1,320 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.dao; - -import static com.datastax.oss.driver.internal.mapper.processor.dao.DefaultDaoReturnTypeKind.CUSTOM; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.cql.BoundStatementBuilder; -import com.datastax.oss.driver.api.mapper.annotations.CqlName; -import com.datastax.oss.driver.api.mapper.annotations.Delete; -import com.datastax.oss.driver.api.mapper.annotations.Increment; -import com.datastax.oss.driver.api.mapper.annotations.StatementAttributes; -import com.datastax.oss.driver.api.mapper.result.MapperResultProducer; -import com.datastax.oss.driver.api.querybuilder.QueryBuilder; -import com.datastax.oss.driver.internal.core.util.Reflection; -import com.datastax.oss.driver.internal.mapper.processor.MethodGenerator; -import com.datastax.oss.driver.internal.mapper.processor.ProcessorContext; -import com.datastax.oss.driver.internal.mapper.processor.util.generation.GeneratedCodePatterns; -import com.squareup.javapoet.CodeBlock; -import com.squareup.javapoet.MethodSpec; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Set; -import java.util.function.Function; -import java.util.function.UnaryOperator; -import java.util.stream.Collectors; -import javax.lang.model.element.AnnotationMirror; -import javax.lang.model.element.AnnotationValue; -import javax.lang.model.element.ExecutableElement; -import javax.lang.model.element.Name; -import javax.lang.model.element.TypeElement; -import javax.lang.model.element.VariableElement; -import javax.lang.model.type.DeclaredType; -import javax.lang.model.type.TypeKind; -import javax.lang.model.type.TypeMirror; - -public abstract class DaoMethodGenerator implements MethodGenerator { - - protected final ExecutableElement methodElement; - protected final TypeElement processedType; - protected final DaoImplementationSharedCode enclosingClass; - protected final ProcessorContext context; - protected final Map typeParameters; - - public DaoMethodGenerator( - ExecutableElement methodElement, - Map typeParameters, - TypeElement processedType, - DaoImplementationSharedCode enclosingClass, - ProcessorContext context) { - this.methodElement = methodElement; - this.typeParameters = typeParameters; - this.processedType = processedType; - this.enclosingClass = enclosingClass; - this.context = context; - } - - @Nullable - protected DaoReturnType parseAndValidateReturnType( - @NonNull Set validKinds, @NonNull String annotationName) { - DaoReturnType returnType = - context - .getCodeGeneratorFactory() - .getDaoReturnTypeParser() - .parse(methodElement.getReturnType(), typeParameters); - if (!validKinds.contains(returnType.getKind())) { - context - .getMessager() - .error( - methodElement, - "Invalid return type: %s methods must return one of %s", - annotationName, - validKinds.stream() - .filter(k -> k != CUSTOM) - .map(Object::toString) - .collect(Collectors.joining(", ", "[", "]"))); - return null; - } - return returnType; - } - - protected void maybeAddTtl(String ttl, MethodSpec.Builder methodBuilder) { - maybeAddSimpleClause(ttl, Integer::parseInt, "usingTtl", "ttl", methodBuilder); - } - - protected void maybeAddTimestamp(String timestamp, MethodSpec.Builder methodBuilder) { - maybeAddSimpleClause(timestamp, Long::parseLong, "usingTimestamp", "timestamp", methodBuilder); - } - - protected void maybeAddSimpleClause( - String annotationValue, - Function numberParser, - String dslMethodName, - String valueDescription, - MethodSpec.Builder methodBuilder) { - if (!annotationValue.isEmpty()) { - if (annotationValue.startsWith(":")) { - String bindMarkerName = annotationValue.substring(1); - try { - CqlIdentifier.fromCql(bindMarkerName); - } catch (IllegalArgumentException ignored) { - context - .getMessager() - .warn( - methodElement, - "Invalid " - + valueDescription - + " value: " - + "'%s' is not a valid placeholder, the generated query will probably fail", - annotationValue); - } - methodBuilder.addCode( - ".$L($T.bindMarker($S))", dslMethodName, QueryBuilder.class, bindMarkerName); - } else { - try { - Number unused = numberParser.apply(annotationValue); - } catch (NumberFormatException ignored) { - context - .getMessager() - .warn( - methodElement, - "Invalid " - + valueDescription - + " value: " - + "'%s' is not a bind marker name and can't be parsed as a number literal " - + "either, the generated query will probably fail", - annotationValue); - } - methodBuilder.addCode(".$L($L)", dslMethodName, annotationValue); - } - } - } - - protected void populateBuilderWithFunction( - CodeBlock.Builder builder, VariableElement functionParam) { - if (functionParam != null) { - builder.addStatement( - "boundStatementBuilder = $L.apply(boundStatementBuilder)", - functionParam.getSimpleName().toString()); - } - } - - protected void populateBuilderWithStatementAttributes( - CodeBlock.Builder builder, ExecutableElement methodElement) { - StatementAttributes statementAttributes = - methodElement.getAnnotation(StatementAttributes.class); - if (statementAttributes != null) { - builder.addStatement( - "boundStatementBuilder = populateBoundStatementWithStatementAttributes(" - + "boundStatementBuilder, $1S, $2S, $3S, $4L, $5L, $6S, $7S)", - statementAttributes.executionProfileName(), - statementAttributes.consistencyLevel(), - statementAttributes.serialConsistencyLevel(), - (statementAttributes.idempotence().length == 0) - ? null - : statementAttributes.idempotence()[0], - statementAttributes.pageSize(), - statementAttributes.timeout(), - statementAttributes.routingKeyspace()); - } - } - - protected VariableElement findBoundStatementFunction(ExecutableElement methodElement) { - if (methodElement.getParameters().size() > 0) { - int lastParamIndex = methodElement.getParameters().size() - 1; - VariableElement lastParam = methodElement.getParameters().get(lastParamIndex); - TypeMirror mirror = lastParam.asType(); - if (mirror.getKind() == TypeKind.DECLARED) { - DeclaredType declaredType = (DeclaredType) mirror; - if ((context.getClassUtils().isSame(declaredType.asElement(), Function.class) - && context - .getClassUtils() - .isSame(declaredType.getTypeArguments().get(0), BoundStatementBuilder.class) - && context - .getClassUtils() - .isSame(declaredType.getTypeArguments().get(1), BoundStatementBuilder.class)) - || (context.getClassUtils().isSame(declaredType.asElement(), UnaryOperator.class) - && context - .getClassUtils() - .isSame(declaredType.getTypeArguments().get(0), BoundStatementBuilder.class))) { - return lastParam; - } - } - } - return null; - } - - protected boolean validateCqlNamesPresent(List parameters) { - boolean valid = true; - if (isFromClassFile()) { - for (VariableElement parameter : parameters) { - CqlName cqlName = parameter.getAnnotation(CqlName.class); - if (cqlName == null) { - context - .getMessager() - .error( - methodElement, - "Parameter %s is declared in a compiled method " - + "and refers to a bind marker " - + "and thus must be annotated with @%s", - parameter.getSimpleName(), - CqlName.class.getSimpleName()); - valid = false; - } - } - } - return valid; - } - - protected void warnIfCqlNamePresent(List parameters) { - for (VariableElement parameter : parameters) { - CqlName cqlName = parameter.getAnnotation(CqlName.class); - if (cqlName != null) { - context - .getMessager() - .warn( - methodElement, - "Parameter %s does not refer to a bind marker, " + "@%s annotation will be ignored", - parameter.getSimpleName(), - CqlName.class.getSimpleName()); - } - } - } - - protected boolean isFromClassFile() { - TypeElement enclosingElement = (TypeElement) methodElement.getEnclosingElement(); - return Reflection.loadClass(null, enclosingElement.getQualifiedName().toString()) != null; - } - - /** - * Common pattern for CRUD methods that build a bound statement, execute it and convert the result - * into a target type. - * - * @param createStatementBlock the code that creates the statement. It must store it into a - * variable named "boundStatement". - */ - protected Optional crudMethod( - CodeBlock.Builder createStatementBlock, DaoReturnType returnType, String helperFieldName) { - - MethodSpec.Builder method = GeneratedCodePatterns.override(methodElement, typeParameters); - if (returnType.getKind() == CUSTOM) { - method.addStatement( - "$T producer = context.getResultProducer($L)", - MapperResultProducer.class, - enclosingClass.addGenericTypeConstant( - GeneratedCodePatterns.getTypeName(methodElement.getReturnType(), typeParameters))); - } - returnType - .getKind() - .addExecuteStatement(createStatementBlock, helperFieldName, methodElement, typeParameters); - method.addCode( - returnType - .getKind() - .wrapWithErrorHandling(createStatementBlock.build(), methodElement, typeParameters)); - return Optional.of(method.build()); - } - - /** - * Reads the "entityClass" parameter from method annotations that define it (such as {@link - * Delete} or {@link Increment}), and finds the corresponding entity class element if it exists. - */ - protected TypeElement getEntityClassFromAnnotation(Class annotation) { - - // Note: because entityClass references a class, we can't read it directly through - // methodElement.getAnnotation(annotation). - - AnnotationMirror annotationMirror = null; - for (AnnotationMirror candidate : methodElement.getAnnotationMirrors()) { - if (context.getClassUtils().isSame(candidate.getAnnotationType(), annotation)) { - annotationMirror = candidate; - break; - } - } - assert annotationMirror != null; - - for (Map.Entry entry : - annotationMirror.getElementValues().entrySet()) { - if (entry.getKey().getSimpleName().contentEquals("entityClass")) { - @SuppressWarnings("unchecked") - List values = - (List) entry.getValue().getValue(); - if (values.isEmpty()) { - return null; - } - TypeMirror mirror = (TypeMirror) values.get(0).getValue(); - TypeElement element = EntityUtils.asEntityElement(mirror, typeParameters); - if (values.size() > 1) { - context - .getMessager() - .warn( - methodElement, - "Too many entity classes: %s must have at most one 'entityClass' argument " - + "(will use the first one: %s)", - annotation.getSimpleName(), - mirror); - } - return element; - } - } - return null; - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoQueryMethodGenerator.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoQueryMethodGenerator.java deleted file mode 100644 index 352f28bdc92..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoQueryMethodGenerator.java +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.dao; - -import static com.datastax.oss.driver.internal.mapper.processor.dao.DefaultDaoReturnTypeKind.UNSUPPORTED; -import static com.datastax.oss.driver.internal.mapper.processor.dao.DefaultDaoReturnTypeKind.values; - -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.BoundStatementBuilder; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.mapper.annotations.Query; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import com.datastax.oss.driver.internal.mapper.processor.ProcessorContext; -import com.datastax.oss.driver.internal.mapper.processor.util.generation.GeneratedCodePatterns; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import com.squareup.javapoet.ClassName; -import com.squareup.javapoet.CodeBlock; -import com.squareup.javapoet.MethodSpec; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Set; -import javax.lang.model.element.ExecutableElement; -import javax.lang.model.element.Name; -import javax.lang.model.element.TypeElement; -import javax.lang.model.element.VariableElement; - -public class DaoQueryMethodGenerator extends DaoMethodGenerator { - - private final String queryString; - private final NullSavingStrategyValidation nullSavingStrategyValidation; - - public DaoQueryMethodGenerator( - ExecutableElement methodElement, - Map typeParameters, - TypeElement processedType, - DaoImplementationSharedCode enclosingClass, - ProcessorContext context) { - super(methodElement, typeParameters, processedType, enclosingClass, context); - this.queryString = methodElement.getAnnotation(Query.class).value(); - nullSavingStrategyValidation = new NullSavingStrategyValidation(context); - } - - protected Set getSupportedReturnTypes() { - ImmutableSet.Builder builder = ImmutableSet.builder(); - for (DefaultDaoReturnTypeKind value : values()) { - if (value != UNSUPPORTED) { - builder.add(value); - } - } - return builder.build(); - } - - @Override - public boolean requiresReactive() { - // Validate the return type: - DaoReturnType returnType = - parseAndValidateReturnType(getSupportedReturnTypes(), Query.class.getSimpleName()); - if (returnType == null) { - return false; - } - return returnType.requiresReactive(); - } - - @Override - public Optional generate() { - - // Validate the return type: - DaoReturnType returnType = - parseAndValidateReturnType(getSupportedReturnTypes(), Query.class.getSimpleName()); - if (returnType == null) { - return Optional.empty(); - } - - // Generate the method: - TypeElement entityElement = returnType.getEntityElement(); - String helperFieldName = - (entityElement == null) - ? null - : enclosingClass.addEntityHelperField(ClassName.get(entityElement)); - String statementName = - enclosingClass.addPreparedStatement( - methodElement, - (methodBuilder, requestName) -> - generatePrepareRequest(methodBuilder, requestName, helperFieldName)); - - CodeBlock.Builder createStatementBlock = CodeBlock.builder(); - - List parameters = methodElement.getParameters(); - - VariableElement boundStatementFunction = findBoundStatementFunction(methodElement); - if (boundStatementFunction != null) { - parameters = parameters.subList(0, methodElement.getParameters().size() - 1); - } - - createStatementBlock.addStatement( - "$T boundStatementBuilder = $L.boundStatementBuilder()", - BoundStatementBuilder.class, - statementName); - - NullSavingStrategy nullSavingStrategy = - nullSavingStrategyValidation.getNullSavingStrategy( - Query.class, Query::nullSavingStrategy, methodElement, enclosingClass); - - createStatementBlock.addStatement( - "$1T nullSavingStrategy = $1T.$2L", NullSavingStrategy.class, nullSavingStrategy); - - populateBuilderWithStatementAttributes(createStatementBlock, methodElement); - populateBuilderWithFunction(createStatementBlock, boundStatementFunction); - - if (validateCqlNamesPresent(parameters)) { - GeneratedCodePatterns.bindParameters( - parameters, createStatementBlock, enclosingClass, context, true); - - createStatementBlock.addStatement( - "$T boundStatement = boundStatementBuilder.build()", BoundStatement.class); - - return crudMethod(createStatementBlock, returnType, helperFieldName); - } else { - return Optional.empty(); - } - } - - private void generatePrepareRequest( - MethodSpec.Builder methodBuilder, String requestName, String helperFieldName) { - methodBuilder.addStatement( - "$T $L = replaceKeyspaceAndTablePlaceholders($S, context, $L)", - SimpleStatement.class, - requestName, - queryString, - helperFieldName); - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoQueryProviderMethodGenerator.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoQueryProviderMethodGenerator.java deleted file mode 100644 index 2727f6afa2f..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoQueryProviderMethodGenerator.java +++ /dev/null @@ -1,174 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.dao; - -import static com.datastax.oss.driver.internal.mapper.processor.dao.DefaultDaoReturnTypeKind.UNSUPPORTED; -import static com.datastax.oss.driver.internal.mapper.processor.dao.DefaultDaoReturnTypeKind.values; - -import com.datastax.oss.driver.api.mapper.annotations.Entity; -import com.datastax.oss.driver.api.mapper.annotations.QueryProvider; -import com.datastax.oss.driver.internal.mapper.processor.ProcessorContext; -import com.datastax.oss.driver.internal.mapper.processor.util.generation.GeneratedCodePatterns; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import com.squareup.javapoet.ClassName; -import com.squareup.javapoet.MethodSpec; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Set; -import javax.lang.model.element.AnnotationMirror; -import javax.lang.model.element.AnnotationValue; -import javax.lang.model.element.ExecutableElement; -import javax.lang.model.element.Name; -import javax.lang.model.element.TypeElement; -import javax.lang.model.element.VariableElement; -import javax.lang.model.type.TypeKind; -import javax.lang.model.type.TypeMirror; - -public class DaoQueryProviderMethodGenerator extends DaoMethodGenerator { - - public DaoQueryProviderMethodGenerator( - ExecutableElement methodElement, - Map typeParameters, - TypeElement processedType, - DaoImplementationSharedCode enclosingClass, - ProcessorContext context) { - super(methodElement, typeParameters, processedType, enclosingClass, context); - } - - protected Set getSupportedReturnTypes() { - ImmutableSet.Builder builder = ImmutableSet.builder(); - for (DefaultDaoReturnTypeKind value : values()) { - if (value != UNSUPPORTED) { - builder.add(value); - } - } - return builder.build(); - } - - @Override - public boolean requiresReactive() { - // Validate the return type: - DaoReturnType returnType = - parseAndValidateReturnType(getSupportedReturnTypes(), QueryProvider.class.getSimpleName()); - if (returnType == null) { - return false; - } - return returnType.requiresReactive(); - } - - @Override - public Optional generate() { - - // No parameter or return type validation, they're completely free-form as long as the provider - // method matches. - - MethodSpec.Builder methodBuilder = - GeneratedCodePatterns.override(methodElement, typeParameters); - - // Request the instantiation of the provider during DAO initialization - List entityHelperTypes = getEntityHelperTypes(); - String providerName = - enclosingClass.addQueryProvider(methodElement, getProviderClass(), entityHelperTypes); - - // Delegate to the provider's method - String providerMethod = methodElement.getAnnotation(QueryProvider.class).providerMethod(); - if (providerMethod.isEmpty()) { - providerMethod = methodElement.getSimpleName().toString(); - } - - methodBuilder.addCode("$["); - if (methodElement.getReturnType().getKind() != TypeKind.VOID) { - methodBuilder.addCode("return "); - } - methodBuilder.addCode("$L.$L(", providerName, providerMethod); - boolean first = true; - for (VariableElement parameter : methodElement.getParameters()) { - if (first) { - first = false; - } else { - methodBuilder.addCode(", "); - } - methodBuilder.addCode("$L", parameter.getSimpleName().toString()); - } - methodBuilder.addCode(");$]\n"); - - return Optional.of(methodBuilder.build()); - } - - private TypeMirror getProviderClass() { - AnnotationMirror annotationMirror = getQueryProviderAnnotationMirror(); - for (Map.Entry entry : - annotationMirror.getElementValues().entrySet()) { - if (entry.getKey().getSimpleName().contentEquals("providerClass")) { - return ((TypeMirror) entry.getValue().getValue()); - } - } - // providerClass is mandatory on the annotation, so if we get here the user will already have to - // deal with a compile error. - // But return something so that the processor doesn't crash downstream. - return context.getTypeUtils().getPrimitiveType(TypeKind.INT); - } - - @SuppressWarnings("MixedMutabilityReturnType") - private List getEntityHelperTypes() { - AnnotationMirror annotationMirror = getQueryProviderAnnotationMirror(); - for (Map.Entry entry : - annotationMirror.getElementValues().entrySet()) { - if (entry.getKey().getSimpleName().contentEquals("entityHelpers")) { - @SuppressWarnings("unchecked") - List values = (List) entry.getValue().getValue(); - List result = new ArrayList<>(values.size()); - for (AnnotationValue value : values) { - TypeMirror entityMirror = (TypeMirror) value.getValue(); - TypeElement entityElement = EntityUtils.asEntityElement(entityMirror, typeParameters); - if (entityElement == null) { - context - .getMessager() - .error( - methodElement, - "Invalid annotation configuration: the elements in %s.entityHelpers " - + "must be %s-annotated classes (offending element: %s)", - QueryProvider.class.getSimpleName(), - Entity.class.getSimpleName(), - entityMirror); - // No need to keep going, compilation will fail anyway - return Collections.emptyList(); - } else { - result.add(ClassName.get(entityElement)); - } - } - return result; - } - } - return Collections.emptyList(); - } - - private AnnotationMirror getQueryProviderAnnotationMirror() { - for (AnnotationMirror candidate : methodElement.getAnnotationMirrors()) { - if (context.getClassUtils().isSame(candidate.getAnnotationType(), QueryProvider.class)) { - return candidate; - } - } - // We'll never get here because we wouldn't be in this class if the method didn't have the - // annotation - throw new AssertionError("Expected to find QueryProvider annotation"); - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoReturnType.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoReturnType.java deleted file mode 100644 index e7fd92b9464..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoReturnType.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.dao; - -import javax.lang.model.element.TypeElement; - -/** Holds information about the return type of a DAO method. */ -public class DaoReturnType { - - public static final DaoReturnType VOID = new DaoReturnType(DefaultDaoReturnTypeKind.VOID); - public static final DaoReturnType BOOLEAN = new DaoReturnType(DefaultDaoReturnTypeKind.BOOLEAN); - public static final DaoReturnType LONG = new DaoReturnType(DefaultDaoReturnTypeKind.LONG); - public static final DaoReturnType ROW = new DaoReturnType(DefaultDaoReturnTypeKind.ROW); - public static final DaoReturnType RESULT_SET = - new DaoReturnType(DefaultDaoReturnTypeKind.RESULT_SET); - public static final DaoReturnType BOUND_STATEMENT = - new DaoReturnType(DefaultDaoReturnTypeKind.BOUND_STATEMENT); - public static final DaoReturnType FUTURE_OF_VOID = - new DaoReturnType(DefaultDaoReturnTypeKind.FUTURE_OF_VOID); - public static final DaoReturnType FUTURE_OF_BOOLEAN = - new DaoReturnType(DefaultDaoReturnTypeKind.FUTURE_OF_BOOLEAN); - public static final DaoReturnType FUTURE_OF_LONG = - new DaoReturnType(DefaultDaoReturnTypeKind.FUTURE_OF_LONG); - public static final DaoReturnType FUTURE_OF_ROW = - new DaoReturnType(DefaultDaoReturnTypeKind.FUTURE_OF_ROW); - public static final DaoReturnType FUTURE_OF_ASYNC_RESULT_SET = - new DaoReturnType(DefaultDaoReturnTypeKind.FUTURE_OF_ASYNC_RESULT_SET); - public static final DaoReturnType REACTIVE_RESULT_SET = - new DaoReturnType(DefaultDaoReturnTypeKind.REACTIVE_RESULT_SET); - public static final DaoReturnType UNSUPPORTED = - new DaoReturnType(DefaultDaoReturnTypeKind.UNSUPPORTED); - - private final DaoReturnTypeKind kind; - private final TypeElement entityElement; - - public DaoReturnType(DaoReturnTypeKind kind, TypeElement entityElement) { - this.kind = kind; - this.entityElement = entityElement; - } - - public DaoReturnType(DaoReturnTypeKind kind) { - this(kind, null); - } - - public DaoReturnTypeKind getKind() { - return kind; - } - - /** - * If the type is parameterized by an entity-annotated class, return that entity. - * - *

    For example {@code CompletionStage} => {@code Product}. - */ - public TypeElement getEntityElement() { - return entityElement; - } - - /** - * Whether this return type requires the Reactive Streams API. - * - *

    If true, the generated DAO class will inherit from {@link - * com.datastax.dse.driver.internal.mapper.reactive.ReactiveDaoBase}, otherwise it will inherit - * from {@link com.datastax.oss.driver.internal.mapper.DaoBase}. - */ - public boolean requiresReactive() { - return kind.requiresReactive(); - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoReturnTypeKind.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoReturnTypeKind.java deleted file mode 100644 index 72089fdccb3..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoReturnTypeKind.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.dao; - -import com.squareup.javapoet.CodeBlock; -import java.util.Map; -import javax.lang.model.element.ExecutableElement; -import javax.lang.model.element.Name; -import javax.lang.model.element.TypeElement; - -/** - * A "kind" of return type of a DAO method. - * - *

    This represents a category of types that will be produced with the same pattern in the - * generated code. For example, "future of entity" is a kind that encompasses {@code - * CompletableFuture}, {@code CompletionStage}, etc. - */ -public interface DaoReturnTypeKind { - - /** - * Generates the code to execute a given statement (accessible through a local variable named - * {@code boundStatement}), and convert the result set into this kind. - * - * @param methodBuilder the method to add the code to. - * @param helperFieldName the name of the helper for entity conversions (might not get used for - * certain kinds, in that case it's ok to pass null). - * @param methodElement the return type of the method (in case the result must be cast). - * @param typeParameters - */ - void addExecuteStatement( - CodeBlock.Builder methodBuilder, - String helperFieldName, - ExecutableElement methodElement, - Map typeParameters); - - /** - * Generates a try-catch around the given code block, to translate unchecked exceptions into a - * result consistent with this kind. - * - *

    For example, for futures, we want to generate this: - * - *

    -   * CompletionStage<Product> findByIdAsync() {
    -   *   try {
    -   *     ... // innerBlock
    -   *   } catch (Throwable t) {
    -   *     return CompletableFutures.failedFuture(t);
    -   *   }
    -   * }
    -   * 
    - * - *

    For some kinds, it's fine to let unchecked exceptions bubble up and no try-catch is - * necessary; in this case, this method can return {@code innerBlock} unchanged. - */ - CodeBlock wrapWithErrorHandling( - CodeBlock innerBlock, ExecutableElement methodElement, Map typeParameters); - - /** A short description suitable for error messages. */ - String getDescription(); - - /** - * Whether this return type kind requires the Reactive Streams API. - * - *

    If true, the generated DAO class will inherit from {@link - * com.datastax.dse.driver.internal.mapper.reactive.ReactiveDaoBase}, otherwise it will inherit - * from {@link com.datastax.oss.driver.internal.mapper.DaoBase}. - */ - boolean requiresReactive(); -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoReturnTypeParser.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoReturnTypeParser.java deleted file mode 100644 index 76c073ddc68..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoReturnTypeParser.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.dao; - -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; -import javax.lang.model.element.Name; -import javax.lang.model.element.TypeElement; -import javax.lang.model.type.TypeMirror; - -public interface DaoReturnTypeParser { - @NonNull - DaoReturnType parse( - @NonNull TypeMirror returnTypeMirror, @NonNull Map typeParameters); -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoSelectMethodGenerator.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoSelectMethodGenerator.java deleted file mode 100644 index dea570c61bc..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoSelectMethodGenerator.java +++ /dev/null @@ -1,281 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.dao; - -import static com.datastax.oss.driver.internal.mapper.processor.dao.DefaultDaoReturnTypeKind.CUSTOM; -import static com.datastax.oss.driver.internal.mapper.processor.dao.DefaultDaoReturnTypeKind.ENTITY; -import static com.datastax.oss.driver.internal.mapper.processor.dao.DefaultDaoReturnTypeKind.FUTURE_OF_ASYNC_PAGING_ITERABLE; -import static com.datastax.oss.driver.internal.mapper.processor.dao.DefaultDaoReturnTypeKind.FUTURE_OF_ENTITY; -import static com.datastax.oss.driver.internal.mapper.processor.dao.DefaultDaoReturnTypeKind.FUTURE_OF_OPTIONAL_ENTITY; -import static com.datastax.oss.driver.internal.mapper.processor.dao.DefaultDaoReturnTypeKind.FUTURE_OF_STREAM; -import static com.datastax.oss.driver.internal.mapper.processor.dao.DefaultDaoReturnTypeKind.MAPPED_REACTIVE_RESULT_SET; -import static com.datastax.oss.driver.internal.mapper.processor.dao.DefaultDaoReturnTypeKind.OPTIONAL_ENTITY; -import static com.datastax.oss.driver.internal.mapper.processor.dao.DefaultDaoReturnTypeKind.PAGING_ITERABLE; -import static com.datastax.oss.driver.internal.mapper.processor.dao.DefaultDaoReturnTypeKind.STREAM; - -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.BoundStatementBuilder; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.metadata.schema.ClusteringOrder; -import com.datastax.oss.driver.api.mapper.annotations.CqlName; -import com.datastax.oss.driver.api.mapper.annotations.Select; -import com.datastax.oss.driver.internal.mapper.processor.ProcessorContext; -import com.datastax.oss.driver.internal.mapper.processor.entity.EntityDefinition; -import com.datastax.oss.driver.internal.mapper.processor.entity.PropertyDefinition; -import com.datastax.oss.driver.internal.mapper.processor.util.generation.GeneratedCodePatterns; -import com.datastax.oss.driver.shaded.guava.common.base.Splitter; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import com.squareup.javapoet.ClassName; -import com.squareup.javapoet.CodeBlock; -import com.squareup.javapoet.MethodSpec; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Set; -import java.util.stream.Collectors; -import javax.lang.model.element.ExecutableElement; -import javax.lang.model.element.Name; -import javax.lang.model.element.TypeElement; -import javax.lang.model.element.VariableElement; - -public class DaoSelectMethodGenerator extends DaoMethodGenerator { - - public DaoSelectMethodGenerator( - ExecutableElement methodElement, - Map typeParameters, - TypeElement processedType, - DaoImplementationSharedCode enclosingClass, - ProcessorContext context) { - super(methodElement, typeParameters, processedType, enclosingClass, context); - } - - protected Set getSupportedReturnTypes() { - return ImmutableSet.of( - ENTITY, - OPTIONAL_ENTITY, - FUTURE_OF_ENTITY, - FUTURE_OF_OPTIONAL_ENTITY, - PAGING_ITERABLE, - STREAM, - FUTURE_OF_ASYNC_PAGING_ITERABLE, - FUTURE_OF_STREAM, - MAPPED_REACTIVE_RESULT_SET, - CUSTOM); - } - - @Override - public boolean requiresReactive() { - // Validate the return type: - DaoReturnType returnType = - parseAndValidateReturnType(getSupportedReturnTypes(), Select.class.getSimpleName()); - if (returnType == null) { - return false; - } - return returnType.requiresReactive(); - } - - @Override - public Optional generate() { - - // Validate the return type: - DaoReturnType returnType = - parseAndValidateReturnType(getSupportedReturnTypes(), Select.class.getSimpleName()); - if (returnType == null) { - return Optional.empty(); - } - - TypeElement entityElement = returnType.getEntityElement(); - EntityDefinition entityDefinition = context.getEntityFactory().getDefinition(entityElement); - - // Validate the parameters: - // - if there is a custom clause, they are free-form (they'll be used as bind variables) - // - otherwise, we accept the primary key components or a subset thereof (possibly empty to - // select all rows), followed by free-form parameters bound to the secondary clauses (such as - // LIMIT). - // In either case, a Function can be added in last - // position. - List parameters = methodElement.getParameters(); - - VariableElement boundStatementFunction = findBoundStatementFunction(methodElement); - if (boundStatementFunction != null) { - parameters = parameters.subList(0, parameters.size() - 1); - } - - final List primaryKeyParameters; - final List freeFormParameters; - Select selectAnnotation = methodElement.getAnnotation(Select.class); - assert selectAnnotation != null; // otherwise we wouldn't have gotten into this class - String customClause = selectAnnotation.customWhereClause(); - if (parameters.isEmpty()) { - primaryKeyParameters = freeFormParameters = Collections.emptyList(); - } else if (customClause.isEmpty()) { - // If we have a partial primary key *and* free-form parameters, things get ambiguous because - // we don't know where the primary key ends. By convention, we require the first free-form - // parameter to be annotated with @CqlName in those cases. - // So the boundary is either when we have enough parameters for a full primary key, or when we - // encounter the first annotated parameter. - int firstNamedParameter = parameters.size(); - for (int i = 0; i < parameters.size(); i++) { - if (parameters.get(i).getAnnotation(CqlName.class) != null) { - firstNamedParameter = i; - break; - } - } - int primaryKeyEnd = Math.min(firstNamedParameter, entityDefinition.getPrimaryKey().size()); - if (primaryKeyEnd >= parameters.size()) { - primaryKeyParameters = parameters; - freeFormParameters = Collections.emptyList(); - } else { - primaryKeyParameters = parameters.subList(0, primaryKeyEnd); - freeFormParameters = parameters.subList(primaryKeyEnd, parameters.size()); - } - } else { - primaryKeyParameters = Collections.emptyList(); - freeFormParameters = parameters; - } - - // If we have parameters for some primary key components, validate that the types match: - if (!primaryKeyParameters.isEmpty() - && !EntityUtils.areParametersValid( - entityElement, - entityDefinition, - primaryKeyParameters, - Select.class, - context, - methodElement, - processedType, - "don't use a custom clause")) { - return Optional.empty(); - } - - // Generate the method: - String helperFieldName = enclosingClass.addEntityHelperField(ClassName.get(entityElement)); - String statementName = - enclosingClass.addPreparedStatement( - methodElement, - (methodBuilder, requestName) -> - generateSelectRequest( - methodBuilder, requestName, helperFieldName, primaryKeyParameters.size())); - - CodeBlock.Builder createStatementBlock = CodeBlock.builder(); - - createStatementBlock.addStatement( - "$T boundStatementBuilder = $L.boundStatementBuilder()", - BoundStatementBuilder.class, - statementName); - populateBuilderWithStatementAttributes(createStatementBlock, methodElement); - populateBuilderWithFunction(createStatementBlock, boundStatementFunction); - - if (!primaryKeyParameters.isEmpty()) { - List primaryKeyNames = - entityDefinition.getPrimaryKey().stream() - .map(PropertyDefinition::getCqlName) - .collect(Collectors.toList()) - .subList(0, primaryKeyParameters.size()); - GeneratedCodePatterns.bindParameters( - primaryKeyParameters, - primaryKeyNames, - createStatementBlock, - enclosingClass, - context, - false); - } - - if (!freeFormParameters.isEmpty()) { - if (validateCqlNamesPresent(freeFormParameters)) { - GeneratedCodePatterns.bindParameters( - freeFormParameters, createStatementBlock, enclosingClass, context, false); - } else { - return Optional.empty(); - } - } - - createStatementBlock.addStatement( - "$T boundStatement = boundStatementBuilder.build()", BoundStatement.class); - - return crudMethod(createStatementBlock, returnType, helperFieldName); - } - - private void generateSelectRequest( - MethodSpec.Builder methodBuilder, - String requestName, - String helperFieldName, - int numberOfPrimaryKeyPartsInWhereClause) { - Select annotation = methodElement.getAnnotation(Select.class); - String customWhereClause = annotation.customWhereClause(); - if (customWhereClause.isEmpty()) { - methodBuilder.addCode( - "$[$T $L = $L.selectByPrimaryKeyParts($L)", - SimpleStatement.class, - requestName, - helperFieldName, - numberOfPrimaryKeyPartsInWhereClause); - } else { - methodBuilder.addCode( - "$[$T $L = $L.selectStart().whereRaw($S)", - SimpleStatement.class, - requestName, - helperFieldName, - customWhereClause); - } - maybeAddSimpleClause(annotation.limit(), Integer::parseInt, "limit", "limit", methodBuilder); - maybeAddSimpleClause( - annotation.perPartitionLimit(), - Integer::parseInt, - "perPartitionLimit", - "perPartitionLimit", - methodBuilder); - for (String orderingSpec : annotation.orderBy()) { - addOrdering(orderingSpec, methodBuilder); - } - for (String groupByColumn : annotation.groupBy()) { - methodBuilder.addCode(".groupBy($S)", groupByColumn); - } - if (annotation.allowFiltering()) { - methodBuilder.addCode(".allowFiltering()"); - } - methodBuilder.addCode(".build();$]\n"); - } - - private void addOrdering(String orderingSpec, MethodSpec.Builder methodBuilder) { - List tokens = ON_SPACES.splitToList(orderingSpec); - ClusteringOrder clusteringOrder; - if (tokens.size() != 2 || (clusteringOrder = parseClusteringOrder(tokens.get(1))) == null) { - context - .getMessager() - .error( - methodElement, - "Can't parse ordering '%s', expected a column name followed by ASC or DESC", - orderingSpec); - return; - } - methodBuilder.addCode( - ".orderBy($S, $T.$L)", tokens.get(0), ClusteringOrder.class, clusteringOrder); - } - - private ClusteringOrder parseClusteringOrder(String spec) { - try { - return ClusteringOrder.valueOf(spec.toUpperCase()); - } catch (IllegalArgumentException e) { - return null; - } - } - - private static final Splitter ON_SPACES = Splitter.on(' ').omitEmptyStrings(); -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoSetEntityMethodGenerator.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoSetEntityMethodGenerator.java deleted file mode 100644 index 023ed4b7310..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoSetEntityMethodGenerator.java +++ /dev/null @@ -1,147 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.dao; - -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.data.SettableByName; -import com.datastax.oss.driver.api.mapper.annotations.SetEntity; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import com.datastax.oss.driver.internal.mapper.processor.ProcessorContext; -import com.datastax.oss.driver.internal.mapper.processor.util.generation.GeneratedCodePatterns; -import com.squareup.javapoet.ClassName; -import com.squareup.javapoet.MethodSpec; -import java.util.Map; -import java.util.Optional; -import javax.lang.model.element.ExecutableElement; -import javax.lang.model.element.Name; -import javax.lang.model.element.TypeElement; -import javax.lang.model.element.VariableElement; -import javax.lang.model.type.TypeKind; -import javax.lang.model.type.TypeMirror; - -public class DaoSetEntityMethodGenerator extends DaoMethodGenerator { - - private final NullSavingStrategyValidation nullSavingStrategyValidation; - private final boolean lenient; - - public DaoSetEntityMethodGenerator( - ExecutableElement methodElement, - Map typeParameters, - TypeElement processedType, - DaoImplementationSharedCode enclosingClass, - ProcessorContext context) { - super(methodElement, typeParameters, processedType, enclosingClass, context); - nullSavingStrategyValidation = new NullSavingStrategyValidation(context); - lenient = methodElement.getAnnotation(SetEntity.class).lenient(); - } - - @Override - public Optional generate() { - - String entityParameterName = null; - TypeElement entityElement = null; - String targetParameterName = null; - - // Validate the parameters: one is an annotated entity, and the other a subtype of - // SettableByName. - if (methodElement.getParameters().size() != 2) { - context - .getMessager() - .error( - methodElement, - "Wrong number of parameters: %s methods must have two", - SetEntity.class.getSimpleName()); - return Optional.empty(); - } - TypeMirror targetParameterType = null; - for (VariableElement parameterElement : methodElement.getParameters()) { - TypeMirror parameterType = parameterElement.asType(); - if (context.getClassUtils().implementsSettableByName(parameterType)) { - targetParameterName = parameterElement.getSimpleName().toString(); - targetParameterType = parameterElement.asType(); - } else if (parameterType.getKind() == TypeKind.DECLARED - || parameterType.getKind() == TypeKind.TYPEVAR) { - TypeElement parameterTypeElement = - EntityUtils.asEntityElement(parameterType, typeParameters); - if (parameterTypeElement != null) { - entityParameterName = parameterElement.getSimpleName().toString(); - entityElement = parameterTypeElement; - } - } - } - if (entityParameterName == null || targetParameterName == null) { - context - .getMessager() - .error( - methodElement, - "Wrong parameter types: %s methods must take a %s " - + "and an annotated entity (in any order)", - SetEntity.class.getSimpleName(), - SettableByName.class.getSimpleName()); - return Optional.empty(); - } - - // Validate the return type: either void or the same SettableByName as the parameter - TypeMirror returnType = methodElement.getReturnType(); - boolean isVoid = returnType.getKind() == TypeKind.VOID; - if (isVoid) { - if (context.getClassUtils().isSame(targetParameterType, BoundStatement.class)) { - context - .getMessager() - .warn( - methodElement, - "BoundStatement is immutable, " - + "this method will not modify '%s' in place. " - + "It should probably return BoundStatement rather than void", - targetParameterName); - } - } else if (!context.getTypeUtils().isSameType(returnType, targetParameterType)) { - context - .getMessager() - .error( - methodElement, - "Invalid return type: %s methods must either be void, or return the same " - + "type as their settable parameter (in this case, %s to match '%s')", - SetEntity.class.getSimpleName(), - targetParameterType, - targetParameterName); - return Optional.empty(); - } - - // Generate the method: - String helperFieldName = enclosingClass.addEntityHelperField(ClassName.get(entityElement)); - - NullSavingStrategy nullSavingStrategy = - nullSavingStrategyValidation.getNullSavingStrategy( - SetEntity.class, SetEntity::nullSavingStrategy, methodElement, enclosingClass); - - // Forward to the base injector in the helper: - return Optional.of( - GeneratedCodePatterns.override(methodElement, typeParameters) - .addStatement( - "$1L$2L.set($3L, $4L, $5T.$6L, $7L)", - isVoid ? "" : "return ", - helperFieldName, - entityParameterName, - targetParameterName, - NullSavingStrategy.class, - nullSavingStrategy, - lenient) - .build()); - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoUpdateMethodGenerator.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoUpdateMethodGenerator.java deleted file mode 100644 index 3509540296f..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoUpdateMethodGenerator.java +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.dao; - -import static com.datastax.oss.driver.internal.mapper.processor.dao.DefaultDaoReturnTypeKind.BOOLEAN; -import static com.datastax.oss.driver.internal.mapper.processor.dao.DefaultDaoReturnTypeKind.BOUND_STATEMENT; -import static com.datastax.oss.driver.internal.mapper.processor.dao.DefaultDaoReturnTypeKind.CUSTOM; -import static com.datastax.oss.driver.internal.mapper.processor.dao.DefaultDaoReturnTypeKind.FUTURE_OF_ASYNC_RESULT_SET; -import static com.datastax.oss.driver.internal.mapper.processor.dao.DefaultDaoReturnTypeKind.FUTURE_OF_BOOLEAN; -import static com.datastax.oss.driver.internal.mapper.processor.dao.DefaultDaoReturnTypeKind.FUTURE_OF_VOID; -import static com.datastax.oss.driver.internal.mapper.processor.dao.DefaultDaoReturnTypeKind.REACTIVE_RESULT_SET; -import static com.datastax.oss.driver.internal.mapper.processor.dao.DefaultDaoReturnTypeKind.RESULT_SET; -import static com.datastax.oss.driver.internal.mapper.processor.dao.DefaultDaoReturnTypeKind.VOID; - -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.BoundStatementBuilder; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.mapper.annotations.Update; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import com.datastax.oss.driver.internal.mapper.processor.ProcessorContext; -import com.datastax.oss.driver.internal.mapper.processor.entity.EntityDefinition; -import com.datastax.oss.driver.internal.mapper.processor.entity.PropertyDefinition; -import com.datastax.oss.driver.internal.mapper.processor.util.generation.GeneratedCodePatterns; -import com.datastax.oss.driver.internal.querybuilder.update.DefaultUpdate; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import com.squareup.javapoet.ClassName; -import com.squareup.javapoet.CodeBlock; -import com.squareup.javapoet.MethodSpec; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Set; -import javax.lang.model.element.ExecutableElement; -import javax.lang.model.element.Name; -import javax.lang.model.element.TypeElement; -import javax.lang.model.element.VariableElement; - -public class DaoUpdateMethodGenerator extends DaoMethodGenerator { - - private final NullSavingStrategyValidation nullSavingStrategyValidation; - - public DaoUpdateMethodGenerator( - ExecutableElement methodElement, - Map typeParameters, - TypeElement processedType, - DaoImplementationSharedCode enclosingClass, - ProcessorContext context) { - super(methodElement, typeParameters, processedType, enclosingClass, context); - nullSavingStrategyValidation = new NullSavingStrategyValidation(context); - } - - protected Set getSupportedReturnTypes() { - return ImmutableSet.of( - VOID, - FUTURE_OF_VOID, - RESULT_SET, - BOUND_STATEMENT, - FUTURE_OF_ASYNC_RESULT_SET, - BOOLEAN, - FUTURE_OF_BOOLEAN, - REACTIVE_RESULT_SET, - CUSTOM); - } - - @Override - public boolean requiresReactive() { - // Validate the return type: - DaoReturnType returnType = - parseAndValidateReturnType(getSupportedReturnTypes(), Update.class.getSimpleName()); - if (returnType == null) { - return false; - } - return returnType.requiresReactive(); - } - - @Override - public Optional generate() { - - // Validate the parameters: - // - the first one must be the entity. - // - the others are completely free-form (they'll be used as additional bind variables) - // A Function can be added in last position. - List parameters = methodElement.getParameters(); - VariableElement boundStatementFunction = findBoundStatementFunction(methodElement); - if (boundStatementFunction != null) { - parameters = parameters.subList(0, parameters.size() - 1); - } - TypeElement entityElement = - parameters.isEmpty() - ? null - : EntityUtils.asEntityElement(parameters.get(0), typeParameters); - if (entityElement == null) { - context - .getMessager() - .error( - methodElement, - "%s methods must take the entity to update as the first parameter", - Update.class.getSimpleName()); - return Optional.empty(); - } - warnIfCqlNamePresent(parameters.subList(0, 1)); - EntityDefinition entityDefinition = context.getEntityFactory().getDefinition(entityElement); - - // Validate the return type: - DaoReturnType returnType = - parseAndValidateReturnType(getSupportedReturnTypes(), Update.class.getSimpleName()); - if (returnType == null) { - return Optional.empty(); - } - - // Generate the method: - String helperFieldName = enclosingClass.addEntityHelperField(ClassName.get(entityElement)); - String statementName = - enclosingClass.addPreparedStatement( - methodElement, - (methodBuilder, requestName) -> - generatePrepareRequest(methodBuilder, requestName, helperFieldName)); - - CodeBlock.Builder createStatementBlock = CodeBlock.builder(); - - createStatementBlock.addStatement( - "$T boundStatementBuilder = $L.boundStatementBuilder()", - BoundStatementBuilder.class, - statementName); - - populateBuilderWithStatementAttributes(createStatementBlock, methodElement); - populateBuilderWithFunction(createStatementBlock, boundStatementFunction); - - String entityParameterName = parameters.get(0).getSimpleName().toString(); - - Update annotation = methodElement.getAnnotation(Update.class); - String customWhereClause = annotation.customWhereClause(); - - NullSavingStrategy nullSavingStrategy = - nullSavingStrategyValidation.getNullSavingStrategy( - Update.class, Update::nullSavingStrategy, methodElement, enclosingClass); - - if (customWhereClause.isEmpty()) { - // We generated an update by primary key (see maybeAddWhereClause), all entity properties are - // present as placeholders. - createStatementBlock.addStatement( - "$1L.set($2L, boundStatementBuilder, $3T.$4L, false)", - helperFieldName, - entityParameterName, - NullSavingStrategy.class, - nullSavingStrategy); - } else { - createStatementBlock.addStatement( - "$1T nullSavingStrategy = $1T.$2L", NullSavingStrategy.class, nullSavingStrategy); - - // Only non-PK properties are present in SET ... clauses. - // (if the custom clause has custom placeholders, this will be addressed below) - for (PropertyDefinition property : entityDefinition.getRegularColumns()) { - GeneratedCodePatterns.setValue( - property.getCqlName(), - property.getType(), - CodeBlock.of("$L.$L()", entityParameterName, property.getGetterName()), - "boundStatementBuilder", - createStatementBlock, - enclosingClass, - true, - false); - } - } - - // Handle all remaining parameters as additional bound values in customWhereClause or - // customIfClause - if (parameters.size() > 1) { - List bindMarkers = parameters.subList(1, parameters.size()); - if (validateCqlNamesPresent(bindMarkers)) { - GeneratedCodePatterns.bindParameters( - bindMarkers, createStatementBlock, enclosingClass, context, false); - } else { - return Optional.empty(); - } - } - - createStatementBlock.addStatement( - "$T boundStatement = boundStatementBuilder.build()", BoundStatement.class); - - return crudMethod(createStatementBlock, returnType, helperFieldName); - } - - private void generatePrepareRequest( - MethodSpec.Builder methodBuilder, String requestName, String helperFieldName) { - Update annotation = methodElement.getAnnotation(Update.class); - - maybeAddWhereClause( - methodBuilder, requestName, helperFieldName, annotation.customWhereClause()); - maybeAddTtl(annotation.ttl(), methodBuilder); - maybeAddTimestamp(annotation.timestamp(), methodBuilder); - methodBuilder.addCode(")"); - maybeAddIfClause(methodBuilder, annotation); - - methodBuilder.addCode(".asCql()"); - methodBuilder.addCode(")$];\n"); - } - - private void maybeAddWhereClause( - MethodSpec.Builder methodBuilder, - String requestName, - String helperFieldName, - String customWhereClause) { - - if (customWhereClause.isEmpty()) { - methodBuilder.addCode( - "$[$1T $2L = $1T.newInstance((($4T)$3L.updateByPrimaryKey()", - SimpleStatement.class, - requestName, - helperFieldName, - DefaultUpdate.class); - } else { - methodBuilder.addCode( - "$[$1T $2L = $1T.newInstance((($5T)$3L.updateStart().whereRaw($4S)", - SimpleStatement.class, - requestName, - helperFieldName, - customWhereClause, - DefaultUpdate.class); - } - } - - private void maybeAddIfClause(MethodSpec.Builder methodBuilder, Update annotation) { - if (annotation.ifExists() && !annotation.customIfClause().isEmpty()) { - context - .getMessager() - .error( - methodElement, - "Invalid annotation parameters: %s cannot have both ifExists and customIfClause", - Update.class.getSimpleName()); - } - - if (annotation.ifExists()) { - methodBuilder.addCode(".ifExists()"); - } - - if (!annotation.customIfClause().isEmpty()) { - methodBuilder.addCode(".ifRaw($S)", annotation.customIfClause()); - } - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/DefaultDaoReturnTypeKind.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/DefaultDaoReturnTypeKind.java deleted file mode 100644 index 11c61c847df..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/DefaultDaoReturnTypeKind.java +++ /dev/null @@ -1,532 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.dao; - -import com.datastax.dse.driver.internal.core.cql.reactive.FailedReactiveResultSet; -import com.datastax.dse.driver.internal.mapper.reactive.FailedMappedReactiveResultSet; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.driver.internal.mapper.processor.util.generation.GeneratedCodePatterns; -import com.datastax.oss.driver.shaded.guava.common.base.Throwables; -import com.squareup.javapoet.CodeBlock; -import com.squareup.javapoet.TypeName; -import java.util.Map; -import javax.lang.model.element.ExecutableElement; -import javax.lang.model.element.Name; -import javax.lang.model.element.TypeElement; -import javax.lang.model.type.TypeMirror; - -public enum DefaultDaoReturnTypeKind implements DaoReturnTypeKind { - VOID { - @Override - public void addExecuteStatement( - CodeBlock.Builder methodBuilder, - String helperFieldName, - ExecutableElement methodElement, - Map typeParameters) { - // Note that the execute* methods in the generated code are defined in DaoBase - methodBuilder.addStatement("execute(boundStatement)"); - } - - @Override - public CodeBlock wrapWithErrorHandling( - CodeBlock innerBlock, - ExecutableElement methodElement, - Map typeParameters) { - return innerBlock; - } - }, - BOOLEAN { - @Override - public void addExecuteStatement( - CodeBlock.Builder methodBuilder, - String helperFieldName, - ExecutableElement methodElement, - Map typeParameters) { - methodBuilder.addStatement("return executeAndMapWasAppliedToBoolean(boundStatement)"); - } - - @Override - public CodeBlock wrapWithErrorHandling( - CodeBlock innerBlock, - ExecutableElement methodElement, - Map typeParameters) { - return innerBlock; - } - }, - LONG { - @Override - public void addExecuteStatement( - CodeBlock.Builder methodBuilder, - String helperFieldName, - ExecutableElement methodElement, - Map typeParameters) { - methodBuilder.addStatement("return executeAndMapFirstColumnToLong(boundStatement)"); - } - - @Override - public CodeBlock wrapWithErrorHandling( - CodeBlock innerBlock, - ExecutableElement methodElement, - Map typeParameters) { - return innerBlock; - } - }, - ROW { - @Override - public void addExecuteStatement( - CodeBlock.Builder methodBuilder, - String helperFieldName, - ExecutableElement methodElement, - Map typeParameters) { - methodBuilder.addStatement("return executeAndExtractFirstRow(boundStatement)"); - } - - @Override - public CodeBlock wrapWithErrorHandling( - CodeBlock innerBlock, - ExecutableElement methodElement, - Map typeParameters) { - return innerBlock; - } - }, - ENTITY { - @Override - public void addExecuteStatement( - CodeBlock.Builder methodBuilder, - String helperFieldName, - ExecutableElement methodElement, - Map typeParameters) { - methodBuilder.addStatement( - "return executeAndMapToSingleEntity(boundStatement, $L)", helperFieldName); - } - - @Override - public CodeBlock wrapWithErrorHandling( - CodeBlock innerBlock, - ExecutableElement methodElement, - Map typeParameters) { - return innerBlock; - } - }, - OPTIONAL_ENTITY { - @Override - public void addExecuteStatement( - CodeBlock.Builder methodBuilder, - String helperFieldName, - ExecutableElement methodElement, - Map typeParameters) { - methodBuilder.addStatement( - "return executeAndMapToOptionalEntity(boundStatement, $L)", helperFieldName); - } - - @Override - public CodeBlock wrapWithErrorHandling( - CodeBlock innerBlock, - ExecutableElement methodElement, - Map typeParameters) { - return innerBlock; - } - }, - RESULT_SET { - @Override - public void addExecuteStatement( - CodeBlock.Builder methodBuilder, - String helperFieldName, - ExecutableElement methodElement, - Map typeParameters) { - methodBuilder.addStatement("return execute(boundStatement)"); - } - - @Override - public CodeBlock wrapWithErrorHandling( - CodeBlock innerBlock, - ExecutableElement methodElement, - Map typeParameters) { - return innerBlock; - } - }, - BOUND_STATEMENT { - @Override - public void addExecuteStatement( - CodeBlock.Builder methodBuilder, - String helperFieldName, - ExecutableElement methodElement, - Map typeParameters) { - methodBuilder.addStatement("return boundStatement"); - } - - @Override - public CodeBlock wrapWithErrorHandling( - CodeBlock innerBlock, - ExecutableElement methodElement, - Map typeParameters) { - return innerBlock; - } - }, - PAGING_ITERABLE { - @Override - public void addExecuteStatement( - CodeBlock.Builder methodBuilder, - String helperFieldName, - ExecutableElement methodElement, - Map typeParameters) { - methodBuilder.addStatement( - "return executeAndMapToEntityIterable(boundStatement, $L)", helperFieldName); - } - - @Override - public CodeBlock wrapWithErrorHandling( - CodeBlock innerBlock, - ExecutableElement methodElement, - Map typeParameters) { - return innerBlock; - } - }, - - FUTURE_OF_VOID { - @Override - public void addExecuteStatement( - CodeBlock.Builder methodBuilder, - String helperFieldName, - ExecutableElement methodElement, - Map typeParameters) { - methodBuilder.addStatement("return executeAsyncAndMapToVoid(boundStatement)"); - } - - @Override - public CodeBlock wrapWithErrorHandling( - CodeBlock innerBlock, - ExecutableElement methodElement, - Map typeParameters) { - return wrapWithErrorHandling(innerBlock, FAILED_FUTURE); - } - }, - FUTURE_OF_BOOLEAN { - @Override - public void addExecuteStatement( - CodeBlock.Builder methodBuilder, - String helperFieldName, - ExecutableElement methodElement, - Map typeParameters) { - methodBuilder.addStatement("return executeAsyncAndMapWasAppliedToBoolean(boundStatement)"); - } - - @Override - public CodeBlock wrapWithErrorHandling( - CodeBlock innerBlock, - ExecutableElement methodElement, - Map typeParameters) { - return wrapWithErrorHandling(innerBlock, FAILED_FUTURE); - } - }, - FUTURE_OF_LONG { - @Override - public void addExecuteStatement( - CodeBlock.Builder methodBuilder, - String helperFieldName, - ExecutableElement methodElement, - Map typeParameters) { - methodBuilder.addStatement("return executeAsyncAndMapFirstColumnToLong(boundStatement)"); - } - - @Override - public CodeBlock wrapWithErrorHandling( - CodeBlock innerBlock, - ExecutableElement methodElement, - Map typeParameters) { - return wrapWithErrorHandling(innerBlock, FAILED_FUTURE); - } - }, - FUTURE_OF_ROW { - @Override - public void addExecuteStatement( - CodeBlock.Builder methodBuilder, - String helperFieldName, - ExecutableElement methodElement, - Map typeParameters) { - methodBuilder.addStatement("return executeAsyncAndExtractFirstRow(boundStatement)"); - } - - @Override - public CodeBlock wrapWithErrorHandling( - CodeBlock innerBlock, - ExecutableElement methodElement, - Map typeParameters) { - return wrapWithErrorHandling(innerBlock, FAILED_FUTURE); - } - }, - FUTURE_OF_ENTITY { - @Override - public void addExecuteStatement( - CodeBlock.Builder methodBuilder, - String helperFieldName, - ExecutableElement methodElement, - Map typeParameters) { - methodBuilder.addStatement( - "return executeAsyncAndMapToSingleEntity(boundStatement, $L)", helperFieldName); - } - - @Override - public CodeBlock wrapWithErrorHandling( - CodeBlock innerBlock, - ExecutableElement methodElement, - Map typeParameters) { - return wrapWithErrorHandling(innerBlock, FAILED_FUTURE); - } - }, - FUTURE_OF_OPTIONAL_ENTITY { - @Override - public void addExecuteStatement( - CodeBlock.Builder methodBuilder, - String helperFieldName, - ExecutableElement methodElement, - Map typeParameters) { - methodBuilder.addStatement( - "return executeAsyncAndMapToOptionalEntity(boundStatement, $L)", helperFieldName); - } - - @Override - public CodeBlock wrapWithErrorHandling( - CodeBlock innerBlock, - ExecutableElement methodElement, - Map typeParameters) { - return wrapWithErrorHandling(innerBlock, FAILED_FUTURE); - } - }, - FUTURE_OF_ASYNC_RESULT_SET { - @Override - public void addExecuteStatement( - CodeBlock.Builder methodBuilder, - String helperFieldName, - ExecutableElement methodElement, - Map typeParameters) { - methodBuilder.addStatement("return executeAsync(boundStatement)"); - } - - @Override - public CodeBlock wrapWithErrorHandling( - CodeBlock innerBlock, - ExecutableElement methodElement, - Map typeParameters) { - return wrapWithErrorHandling(innerBlock, FAILED_FUTURE); - } - }, - FUTURE_OF_ASYNC_PAGING_ITERABLE { - @Override - public void addExecuteStatement( - CodeBlock.Builder methodBuilder, - String helperFieldName, - ExecutableElement methodElement, - Map typeParameters) { - methodBuilder.addStatement( - "return executeAsyncAndMapToEntityIterable(boundStatement, $L)", helperFieldName); - } - - @Override - public CodeBlock wrapWithErrorHandling( - CodeBlock innerBlock, - ExecutableElement methodElement, - Map typeParameters) { - return wrapWithErrorHandling(innerBlock, FAILED_FUTURE); - } - }, - REACTIVE_RESULT_SET { - @Override - public void addExecuteStatement( - CodeBlock.Builder methodBuilder, - String helperFieldName, - ExecutableElement methodElement, - Map typeParameters) { - methodBuilder.addStatement("return executeReactive(boundStatement)"); - } - - @Override - public CodeBlock wrapWithErrorHandling( - CodeBlock innerBlock, - ExecutableElement methodElement, - Map typeParameters) { - return wrapWithErrorHandling(innerBlock, FAILED_REACTIVE_RESULT_SET); - } - }, - MAPPED_REACTIVE_RESULT_SET { - @Override - public void addExecuteStatement( - CodeBlock.Builder methodBuilder, - String helperFieldName, - ExecutableElement methodElement, - Map typeParameters) { - methodBuilder.addStatement( - "return executeReactiveAndMap(boundStatement, $L)", helperFieldName); - } - - @Override - public CodeBlock wrapWithErrorHandling( - CodeBlock innerBlock, - ExecutableElement methodElement, - Map typeParameters) { - return wrapWithErrorHandling(innerBlock, FAILED_MAPPED_REACTIVE_RESULT_SET); - } - }, - - CUSTOM { - @Override - public void addExecuteStatement( - CodeBlock.Builder methodBuilder, - String helperFieldName, - ExecutableElement methodElement, - Map typeParameters) { - TypeName returnTypeName = - GeneratedCodePatterns.getTypeName(methodElement.getReturnType(), typeParameters); - methodBuilder - .addStatement( - "@$1T(\"unchecked\") $2T result =\n($2T) producer.execute(boundStatement, context, $3L)", - SuppressWarnings.class, - returnTypeName, - helperFieldName) - .addStatement("return result"); - } - - @Override - public CodeBlock wrapWithErrorHandling( - CodeBlock innerBlock, - ExecutableElement methodElement, - Map typeParameters) { - - TypeName returnTypeName = - GeneratedCodePatterns.getTypeName(methodElement.getReturnType(), typeParameters); - - // We're wrapping the whole DAO method with a catch block that calls producer.wrapError. - // wrapError can itself throw, so it's wrapped in a nested try-catch: - CodeBlock.Builder callWrapError = - CodeBlock.builder() - .beginControlFlow("try") - .addStatement( - "@$1T(\"unchecked\") $2T result =\n($2T) producer.wrapError(e)", - SuppressWarnings.class, - returnTypeName) - .addStatement("return result"); - - // Any exception that is explicitly declared by the DAO method can be rethrown directly. - // (note: manually a multi-catch would be cleaner, but from here it's simpler to generate - // separate clauses) - for (TypeMirror thrownType : methodElement.getThrownTypes()) { - callWrapError.nextControlFlow("catch ($T e2)", thrownType).addStatement("throw e2"); - } - - // Otherwise, rethrow unchecked exceptions and wrap checked ones. - callWrapError - .nextControlFlow("catch ($T e2)", Exception.class) - .addStatement("$T.throwIfUnchecked(e2)", Throwables.class) - .addStatement("throw new $T(e2)", RuntimeException.class) - .endControlFlow(); - - return wrapWithErrorHandling(innerBlock, callWrapError.build()); - } - }, - - UNSUPPORTED() { - @Override - public void addExecuteStatement( - CodeBlock.Builder methodBuilder, - String helperFieldName, - ExecutableElement methodElement, - Map typeParameters) { - throw new AssertionError("Should never get here"); - } - - @Override - public CodeBlock wrapWithErrorHandling( - CodeBlock innerBlock, - ExecutableElement methodElement, - Map typeParameters) { - throw new AssertionError("Should never get here"); - } - }, - - STREAM { - @Override - public void addExecuteStatement( - CodeBlock.Builder methodBuilder, - String helperFieldName, - ExecutableElement methodElement, - Map typeParameters) { - methodBuilder.addStatement( - "return executeAndMapToEntityStream(boundStatement, $L)", helperFieldName); - } - - @Override - public CodeBlock wrapWithErrorHandling( - CodeBlock innerBlock, - ExecutableElement methodElement, - Map typeParameters) { - return innerBlock; - } - }, - - FUTURE_OF_STREAM { - @Override - public void addExecuteStatement( - CodeBlock.Builder methodBuilder, - String helperFieldName, - ExecutableElement methodElement, - Map typeParameters) { - methodBuilder.addStatement( - "return executeAsyncAndMapToEntityStream(boundStatement, $L)", helperFieldName); - } - - @Override - public CodeBlock wrapWithErrorHandling( - CodeBlock innerBlock, - ExecutableElement methodElement, - Map typeParameters) { - return wrapWithErrorHandling(innerBlock, FAILED_FUTURE); - } - }, - ; - - @Override - public String getDescription() { - return name(); - } - - @Override - public boolean requiresReactive() { - return this == REACTIVE_RESULT_SET || this == MAPPED_REACTIVE_RESULT_SET; - } - - static CodeBlock wrapWithErrorHandling(CodeBlock innerBlock, CodeBlock catchBlock) { - return CodeBlock.builder() - .beginControlFlow("try") - .add(innerBlock) - .nextControlFlow("catch ($T e)", Exception.class) - .add(catchBlock) - .endControlFlow() - .build(); - } - - private static final CodeBlock FAILED_FUTURE = - CodeBlock.builder() - .addStatement("return $T.failedFuture(e)", CompletableFutures.class) - .build(); - private static final CodeBlock FAILED_REACTIVE_RESULT_SET = - CodeBlock.builder().addStatement("return new $T(e)", FailedReactiveResultSet.class).build(); - private static final CodeBlock FAILED_MAPPED_REACTIVE_RESULT_SET = - CodeBlock.builder() - .addStatement("return new $T(e)", FailedMappedReactiveResultSet.class) - .build(); -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/DefaultDaoReturnTypeParser.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/DefaultDaoReturnTypeParser.java deleted file mode 100644 index 0966a417b0a..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/DefaultDaoReturnTypeParser.java +++ /dev/null @@ -1,302 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.dao; - -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveResultSet; -import com.datastax.dse.driver.api.mapper.reactive.MappedReactiveResultSet; -import com.datastax.oss.driver.api.core.MappedAsyncPagingIterable; -import com.datastax.oss.driver.api.core.PagingIterable; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.internal.mapper.processor.ProcessorContext; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.stream.Stream; -import javax.lang.model.element.Element; -import javax.lang.model.element.ElementKind; -import javax.lang.model.element.Name; -import javax.lang.model.element.TypeElement; -import javax.lang.model.type.DeclaredType; -import javax.lang.model.type.TypeKind; -import javax.lang.model.type.TypeMirror; -import javax.lang.model.type.TypeVariable; - -public class DefaultDaoReturnTypeParser implements DaoReturnTypeParser { - - /** - * The return types that can be inferred directly from {@link TypeMirror#getKind()} (void and - * primitives). - */ - protected static final Map DEFAULT_TYPE_KIND_MATCHES = - ImmutableMap.builder() - .put(TypeKind.VOID, DaoReturnType.VOID) - .put(TypeKind.BOOLEAN, DaoReturnType.BOOLEAN) - .put(TypeKind.LONG, DaoReturnType.LONG) - .build(); - - /** The return types that correspond directly to a non-generic Java class. */ - protected static final Map, DaoReturnType> DEFAULT_CLASS_MATCHES = - ImmutableMap., DaoReturnType>builder() - .put(Boolean.class, DaoReturnType.BOOLEAN) - .put(Long.class, DaoReturnType.LONG) - .put(Row.class, DaoReturnType.ROW) - .put(ResultSet.class, DaoReturnType.RESULT_SET) - .put(BoundStatement.class, DaoReturnType.BOUND_STATEMENT) - .put(ReactiveResultSet.class, DaoReturnType.REACTIVE_RESULT_SET) - .build(); - - /** - * The return types that correspond to a generic class with a single type parameter that is an - * entity class. - */ - protected static final Map, DaoReturnTypeKind> DEFAULT_ENTITY_CONTAINER_MATCHES = - ImmutableMap., DaoReturnTypeKind>builder() - .put(Optional.class, DefaultDaoReturnTypeKind.OPTIONAL_ENTITY) - .put(CompletionStage.class, DefaultDaoReturnTypeKind.FUTURE_OF_ENTITY) - .put(CompletableFuture.class, DefaultDaoReturnTypeKind.FUTURE_OF_ENTITY) - .put(PagingIterable.class, DefaultDaoReturnTypeKind.PAGING_ITERABLE) - .put(Stream.class, DefaultDaoReturnTypeKind.STREAM) - .put(MappedReactiveResultSet.class, DefaultDaoReturnTypeKind.MAPPED_REACTIVE_RESULT_SET) - .build(); - - /** The return types that correspond to a future of a non-generic Java class. */ - protected static final Map, DaoReturnType> DEFAULT_FUTURE_OF_CLASS_MATCHES = - ImmutableMap., DaoReturnType>builder() - .put(Void.class, DaoReturnType.FUTURE_OF_VOID) - .put(Boolean.class, DaoReturnType.FUTURE_OF_BOOLEAN) - .put(Long.class, DaoReturnType.FUTURE_OF_LONG) - .put(Row.class, DaoReturnType.FUTURE_OF_ROW) - .put(AsyncResultSet.class, DaoReturnType.FUTURE_OF_ASYNC_RESULT_SET) - .build(); - - /** - * The return types that correspond to a future of a generic class with a single type parameter - * that is an entity class. - */ - protected static final Map, DaoReturnTypeKind> - DEFAULT_FUTURE_OF_ENTITY_CONTAINER_MATCHES = - ImmutableMap., DaoReturnTypeKind>builder() - .put(Optional.class, DefaultDaoReturnTypeKind.FUTURE_OF_OPTIONAL_ENTITY) - .put( - MappedAsyncPagingIterable.class, - DefaultDaoReturnTypeKind.FUTURE_OF_ASYNC_PAGING_ITERABLE) - .put(Stream.class, DefaultDaoReturnTypeKind.FUTURE_OF_STREAM) - .build(); - - protected final ProcessorContext context; - private final Map typeKindMatches; - private final Map, DaoReturnType> classMatches; - private final Map, DaoReturnTypeKind> entityContainerMatches; - private final Map, DaoReturnType> futureOfClassMatches; - private final Map, DaoReturnTypeKind> futureOfEntityContainerMatches; - - public DefaultDaoReturnTypeParser(ProcessorContext context) { - this( - context, - DEFAULT_TYPE_KIND_MATCHES, - DEFAULT_CLASS_MATCHES, - DEFAULT_ENTITY_CONTAINER_MATCHES, - DEFAULT_FUTURE_OF_CLASS_MATCHES, - DEFAULT_FUTURE_OF_ENTITY_CONTAINER_MATCHES); - } - - protected DefaultDaoReturnTypeParser( - ProcessorContext context, - Map typeKindMatches, - Map, DaoReturnType> classMatches, - Map, DaoReturnTypeKind> entityContainerMatches, - Map, DaoReturnType> futureOfClassMatches, - Map, DaoReturnTypeKind> futureOfEntityContainerMatches) { - this.context = context; - this.typeKindMatches = typeKindMatches; - this.classMatches = classMatches; - this.entityContainerMatches = entityContainerMatches; - this.futureOfClassMatches = futureOfClassMatches; - this.futureOfEntityContainerMatches = futureOfEntityContainerMatches; - } - - @NonNull - @Override - public DaoReturnType parse( - @NonNull TypeMirror returnTypeMirror, @NonNull Map typeParameters) { - - // void or a primitive? - DaoReturnType match = typeKindMatches.get(returnTypeMirror.getKind()); - if (match != null) { - return match; - } - - if (returnTypeMirror.getKind() == TypeKind.DECLARED) { - - // entity class? e.g. Product - TypeElement entityElement; - if ((entityElement = EntityUtils.asEntityElement(returnTypeMirror, typeParameters)) != null) { - return new DaoReturnType(DefaultDaoReturnTypeKind.ENTITY, entityElement); - } - - // simple class? e.g. Boolean - DeclaredType declaredReturnType = (DeclaredType) returnTypeMirror; - for (Map.Entry, DaoReturnType> entry : classMatches.entrySet()) { - Class simpleClass = entry.getKey(); - if (context.getClassUtils().isSame(declaredReturnType, simpleClass)) { - return entry.getValue(); - } - } - - // entity container? e.g. Optional - if (declaredReturnType.getTypeArguments().size() == 1 - && (entityElement = - EntityUtils.typeArgumentAsEntityElement(returnTypeMirror, typeParameters)) - != null) { - Element returnElement = declaredReturnType.asElement(); - for (Map.Entry, DaoReturnTypeKind> entry : entityContainerMatches.entrySet()) { - Class containerClass = entry.getKey(); - if (context.getClassUtils().isSame(returnElement, containerClass)) { - return new DaoReturnType(entry.getValue(), entityElement); - } - } - } - - if (context.getClassUtils().isFuture(declaredReturnType)) { - TypeMirror typeArgumentMirror = declaredReturnType.getTypeArguments().get(0); - - // future of a simple class? e.g. CompletableFuture - for (Map.Entry, DaoReturnType> entry : futureOfClassMatches.entrySet()) { - Class simpleClassArgument = entry.getKey(); - if (context.getClassUtils().isSame(typeArgumentMirror, simpleClassArgument)) { - return entry.getValue(); - } - } - - // Note that futures of entities (e.g. CompletionStage) are already covered by the - // "entity container" check above - - // future of entity container? e.g. CompletionStage> - if (typeArgumentMirror.getKind() == TypeKind.DECLARED) { - DeclaredType declaredTypeArgument = (DeclaredType) typeArgumentMirror; - if (declaredTypeArgument.getTypeArguments().size() == 1 - && (entityElement = - EntityUtils.typeArgumentAsEntityElement(typeArgumentMirror, typeParameters)) - != null) { - Element typeArgumentElement = declaredTypeArgument.asElement(); - for (Map.Entry, DaoReturnTypeKind> entry : - futureOfEntityContainerMatches.entrySet()) { - Class containerClass = entry.getKey(); - if (context.getClassUtils().isSame(typeArgumentElement, containerClass)) { - return new DaoReturnType(entry.getValue(), entityElement); - } - } - } - } - } - - // Otherwise assume a custom type. A MappedResultProducer will be looked up from the - // MapperContext at runtime. - if (context.areCustomResultsEnabled()) { - return new DaoReturnType( - DefaultDaoReturnTypeKind.CUSTOM, - findEntityInCustomType(declaredReturnType, typeParameters, new ArrayList<>())); - } - } - - if (returnTypeMirror.getKind() == TypeKind.TYPEVAR) { - - // entity class? e.g. Product - TypeElement entityElement; - if ((entityElement = EntityUtils.asEntityElement(returnTypeMirror, typeParameters)) != null) { - return new DaoReturnType(DefaultDaoReturnTypeKind.ENTITY, entityElement); - } - - // simple class? e.g. Boolean - TypeVariable typeVariable = ((TypeVariable) returnTypeMirror); - Name name = typeVariable.asElement().getSimpleName(); - TypeElement element = typeParameters.get(name); - if (element != null) { - for (Map.Entry, DaoReturnType> entry : classMatches.entrySet()) { - Class simpleClass = entry.getKey(); - if (context.getClassUtils().isSame(element, simpleClass)) { - return entry.getValue(); - } - } - } - - // DAO parameterization by more complex types (futures, containers...) is not supported - } - - return DaoReturnType.UNSUPPORTED; - } - - /** - * If we're dealing with a {@link DefaultDaoReturnTypeKind#CUSTOM}, we allow one entity element to - * appear at any level of nesting in the type, e.g. {@code MyCustomFuture>}. - */ - private TypeElement findEntityInCustomType( - TypeMirror type, - Map typeParameters, - List alreadyCheckedTypes) { - - // Generic types can be recursive! e.g. Integer implements Comparable. Avoid infinite - // recursion: - for (TypeMirror alreadyCheckedType : alreadyCheckedTypes) { - if (context.getTypeUtils().isSameType(type, alreadyCheckedType)) { - return null; - } - } - alreadyCheckedTypes.add(type); - - TypeElement entityElement = EntityUtils.asEntityElement(type, typeParameters); - if (entityElement != null) { - return entityElement; - } else if (type.getKind() == TypeKind.DECLARED) { - // Check type arguments, e.g. `Foo` where T = Product - DeclaredType declaredType = (DeclaredType) type; - for (TypeMirror typeArgument : declaredType.getTypeArguments()) { - entityElement = findEntityInCustomType(typeArgument, typeParameters, alreadyCheckedTypes); - if (entityElement != null) { - return entityElement; - } - } - Element element = declaredType.asElement(); - if (element.getKind() == ElementKind.CLASS || element.getKind() == ElementKind.INTERFACE) { - // Check interfaces, e.g. `Foo implements Iterable`, where T = Product - TypeElement typeElement = (TypeElement) element; - for (TypeMirror parentInterface : typeElement.getInterfaces()) { - entityElement = - findEntityInCustomType(parentInterface, typeParameters, alreadyCheckedTypes); - if (entityElement != null) { - return entityElement; - } - } - // Check superclass (if there is none then the mirror has TypeKind.NONE and the recursive - // call will return null). - return findEntityInCustomType( - typeElement.getSuperclass(), typeParameters, alreadyCheckedTypes); - } - } - // null is a valid result even at the top level, a custom type may not contain any entity - return null; - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/EntityUtils.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/EntityUtils.java deleted file mode 100644 index 7978336299a..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/EntityUtils.java +++ /dev/null @@ -1,185 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.dao; - -import com.datastax.oss.driver.api.mapper.annotations.Entity; -import com.datastax.oss.driver.internal.mapper.processor.ProcessorContext; -import com.datastax.oss.driver.internal.mapper.processor.entity.EntityDefinition; -import com.squareup.javapoet.TypeName; -import java.lang.annotation.Annotation; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; -import javax.lang.model.element.Element; -import javax.lang.model.element.ElementKind; -import javax.lang.model.element.ExecutableElement; -import javax.lang.model.element.Name; -import javax.lang.model.element.TypeElement; -import javax.lang.model.element.VariableElement; -import javax.lang.model.type.DeclaredType; -import javax.lang.model.type.TypeKind; -import javax.lang.model.type.TypeMirror; -import javax.lang.model.type.TypeVariable; - -public class EntityUtils { - - /** - * If the type of this parameter is an {@link Entity}-annotated class, return that class's - * element, otherwise {@code null}. - */ - public static TypeElement asEntityElement( - VariableElement parameter, Map typeParameters) { - return asEntityElement(parameter.asType(), typeParameters); - } - - /** - * If this mirror's first type argument is an {@link Entity}-annotated class, return that class's - * element, otherwise {@code null}. - * - *

    This method will fail if the mirror does not reference a generic type, the caller is - * responsible to perform that check beforehand. - */ - public static TypeElement typeArgumentAsEntityElement( - TypeMirror mirror, Map typeParameters) { - DeclaredType declaredType = (DeclaredType) mirror; - assert !declaredType.getTypeArguments().isEmpty(); - return asEntityElement(declaredType.getTypeArguments().get(0), typeParameters); - } - - /** - * If this mirror is an {@link Entity}-annotated class, return that class's element, otherwise - * {@code null}. - */ - public static TypeElement asEntityElement( - TypeMirror mirror, Map typeParameters) { - Element element; - if (mirror.getKind() == TypeKind.TYPEVAR) { - // extract concrete implementation for type variable. - TypeVariable typeVariable = ((TypeVariable) mirror); - Name name = typeVariable.asElement().getSimpleName(); - element = typeParameters.get(name); - if (element == null) { - return null; - } - } else if (mirror.getKind() == TypeKind.DECLARED) { - element = ((DeclaredType) mirror).asElement(); - } else { - return null; - } - if (element.getKind() != ElementKind.CLASS - // Hack to support Java 14 records without having to compile against JDK 14 - && !element.getKind().name().equals("RECORD")) { - return null; - } - TypeElement typeElement = (TypeElement) element; - if (typeElement.getAnnotation(Entity.class) == null) { - return null; - } - return typeElement; - } - - /** - * Validates that the given parameters are valid for an {@link EntityDefinition}, meaning that - * there are at least enough parameters provided to match the number of partition key columns and - * that parameter types match the primary key types. - * - *

    If it is determined that the parameters are not valid, false is returned and an error - * message is emitted on the given method element. - */ - public static boolean areParametersValid( - TypeElement entityElement, - EntityDefinition entityDefinition, - List parameters, - Class annotationClass, - ProcessorContext context, - ExecutableElement methodElement, - TypeElement processedType, - String exceptionCondition) { - - if (exceptionCondition == null || exceptionCondition.isEmpty()) { - exceptionCondition = ""; - } else { - exceptionCondition = " that " + exceptionCondition; - } - - List primaryKeyTypes = - entityDefinition.getPrimaryKey().stream() - .map(d -> d.getType().asTypeName()) - .collect(Collectors.toList()); - List partitionKeyTypes = - entityDefinition.getPartitionKey().stream() - .map(d -> d.getType().asTypeName()) - .collect(Collectors.toList()); - List parameterTypes = - parameters.stream().map(p -> TypeName.get(p.asType())).collect(Collectors.toList()); - // if parameters are provided, we must have at least enough to match partition key. - if (parameterTypes.size() < partitionKeyTypes.size()) { - context - .getMessager() - .error( - methodElement, - "Invalid parameter list: %s methods%s " - + "must at least specify partition key components " - + "(expected partition key of %s: %s)", - annotationClass.getSimpleName(), - exceptionCondition, - entityElement.getSimpleName(), - partitionKeyTypes); - return false; - } - - if (parameterTypes.size() > primaryKeyTypes.size()) { - context - .getMessager() - .error( - methodElement, - "Invalid parameter list: %s methods%s " - + "must match the primary key components in the exact order " - + "(expected primary key of %s: %s). Too many parameters provided", - annotationClass.getSimpleName(), - exceptionCondition, - entityElement.getSimpleName(), - primaryKeyTypes); - return false; - } - - // validate that each parameter type matches the primary key type - for (int parameterIndex = 0; parameterIndex < parameterTypes.size(); parameterIndex++) { - TypeName parameterType = parameterTypes.get(parameterIndex); - TypeName primaryKeyParameterType = primaryKeyTypes.get(parameterIndex); - if (!parameterType.equals(primaryKeyParameterType)) { - context - .getMessager() - .error( - methodElement, - "Invalid parameter list: %s methods%s " - + "must match the primary key components in the exact order " - + "(expected primary key of %s: %s). Mismatch at index %d: %s should be %s", - annotationClass.getSimpleName(), - exceptionCondition, - entityElement.getSimpleName(), - primaryKeyTypes, - parameterIndex, - parameterType, - primaryKeyParameterType); - return false; - } - } - return true; - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/LoggingGenerator.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/LoggingGenerator.java deleted file mode 100644 index 4f0455b2af5..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/LoggingGenerator.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.dao; - -import com.squareup.javapoet.ClassName; -import com.squareup.javapoet.CodeBlock; -import com.squareup.javapoet.FieldSpec; -import com.squareup.javapoet.MethodSpec; -import com.squareup.javapoet.TypeSpec; -import javax.lang.model.element.Modifier; - -public class LoggingGenerator { - - // Reference these types by name. They are in the classpath but that is more of a workaround in - // case they get accidentally referenced via driver core types (see JAVA-2863), the mapper - // processor does not directly "use" SLF4J. - private static final ClassName LOGGER_FACTORY_CLASS_NAME = - ClassName.get("org.slf4j", "LoggerFactory"); - private static final ClassName LOGGER_CLASS_NAME = ClassName.get("org.slf4j", "Logger"); - - private final boolean logsEnabled; - - public LoggingGenerator(boolean logsEnabled) { - this.logsEnabled = logsEnabled; - } - - /** - * Generates a logger in a constant, such as: - * - *

    -   *   private static final Logger LOG = LoggerFactory.getLogger(Foobar.class);
    -   * 
    - * - * @param classBuilder where to generate. - * @param className the name of the class ({@code Foobar}). - */ - public void addLoggerField(TypeSpec.Builder classBuilder, ClassName className) { - if (logsEnabled) { - classBuilder.addField( - FieldSpec.builder( - LOGGER_CLASS_NAME, "LOG", Modifier.PRIVATE, Modifier.FINAL, Modifier.STATIC) - .initializer("$T.getLogger($T.class)", LOGGER_FACTORY_CLASS_NAME, className) - .build()); - } - } - - /** - * Generates a debug log statement, such as: - * - *
    -   *   LOG.debug("setting {} = {}", key, value);
    -   * 
    - * - *

    This assumes that {@link #addLoggerField(TypeSpec.Builder, ClassName)} has already been - * called for the class where this is generated. - * - * @param builder where to generate. - * @param template the message ({@code "setting {} = {}"}). - * @param arguments the arguments ({@code key} and {@code value}). - */ - public void debug(MethodSpec.Builder builder, String template, CodeBlock... arguments) { - log("debug", builder, template, arguments); - } - - /** - * Generates a warn log statement, such as: - * - *

    -   *   LOG.warn("setting {} = {}", key, value);
    -   * 
    - * - *

    This assumes that {@link #addLoggerField(TypeSpec.Builder, ClassName)} has already been - * called for the class where this is generated. - * - * @param builder where to generate. - * @param template the message ({@code "setting {} = {}"}). - * @param arguments the arguments ({@code key} and {@code value}). - */ - public void warn(MethodSpec.Builder builder, String template, CodeBlock... arguments) { - log("warn", builder, template, arguments); - } - - public void log( - String logLevel, MethodSpec.Builder builder, String template, CodeBlock... arguments) { - if (logsEnabled) { - builder.addCode("$[LOG.$L($S", logLevel, template); - for (CodeBlock argument : arguments) { - builder.addCode(",\n$L", argument); - } - builder.addCode(");$]\n"); - } - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/NullSavingStrategyValidation.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/NullSavingStrategyValidation.java deleted file mode 100644 index f030e2f7225..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/dao/NullSavingStrategyValidation.java +++ /dev/null @@ -1,230 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.dao; - -import com.datastax.oss.driver.api.mapper.annotations.DefaultNullSavingStrategy; -import com.datastax.oss.driver.api.mapper.annotations.Insert; -import com.datastax.oss.driver.api.mapper.annotations.Query; -import com.datastax.oss.driver.api.mapper.annotations.SetEntity; -import com.datastax.oss.driver.api.mapper.annotations.Update; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import com.datastax.oss.driver.internal.mapper.processor.ProcessorContext; -import com.datastax.oss.driver.internal.mapper.processor.util.Classes; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.lang.annotation.Annotation; -import java.util.List; -import java.util.Optional; -import java.util.function.Function; -import javax.lang.model.element.AnnotationMirror; -import javax.lang.model.element.Element; -import javax.lang.model.element.ExecutableElement; - -public class NullSavingStrategyValidation { - private final Classes classUtils; - - public NullSavingStrategyValidation(ProcessorContext context) { - classUtils = context.getClassUtils(); - } - - /** - * For every ExecutableElement that has @{@link NullSavingStrategy} property checks if {@link - * NullSavingStrategy#DO_NOT_SET} was set. The underlying annotations that have that strategy are: - * {@link Update#nullSavingStrategy()}, {@link Insert#nullSavingStrategy()}, {@link - * SetEntity#nullSavingStrategy()} {@link Query#nullSavingStrategy()} - * - * @return true if: - *

    DAO level has SET_TO_NULL and any of underlying has explicit set to DO_NOT_SET - *

    DAO level has DO_NOT_SET and not all underlying override it explicitly to SET_TO_NULL - *

    DAO level annotation do not present and any of method level strategy has DO_NOT_SET - * (including the default one) - */ - public boolean hasDoNotSetOnAnyLevel( - List methodElements, @Nullable DefaultNullSavingStrategy annotation) { - boolean anyMethodHasOrDefaultsToDoNotSet = - methodElements.stream() - .anyMatch( - v -> - updateHasDoNotSet(v, false) - || insertHasDoNotSet(v, false) - || setEntityHasDoNotSet(v, false) - || queryHasDoNotSet(v, false)); - - boolean anyMethodHasDoNotSetExplicitly = - methodElements.stream() - .anyMatch( - v -> - updateHasDoNotSet(v, true) - || insertHasDoNotSet(v, true) - || setEntityHasDoNotSet(v, true) - || queryHasDoNotSet(v, true)); - - boolean allMethodsHaveSetToNull = - methodElements.stream() - .filter(this::isOperationWithNullSavingStrategy) - .allMatch( - v -> - updateHasSetToNullExplicitly(v) - || insertHasSetToNullExplicitly(v) - || setEntitySetToNullExplicitly(v) - || queryHasSetToNullExplicitly(v)); - - // if DAO level SET_TO_NULL check all underlying annotations for explicit set to DO_NOT_SET - // (they may override it) - if (daoHasSetToNull(annotation) && anyMethodHasDoNotSetExplicitly) { - return true; - // if DAO level DO_NOT_SET check if all underlying override it explicitly to SET_TO_NULL - } else if (daoHasDoNotSet(annotation) && !allMethodsHaveSetToNull) { - return true; - // if DAO level annotation do not present, check method level strategy - // (including the default one) - } else { - return daoIsNotAnnotated(annotation) && anyMethodHasOrDefaultsToDoNotSet; - } - } - - private boolean daoHasDoNotSet(DefaultNullSavingStrategy annotation) { - if (annotation != null) { - return annotation.value() == NullSavingStrategy.DO_NOT_SET; - } - return false; - } - - private boolean daoHasSetToNull(DefaultNullSavingStrategy annotation) { - if (annotation != null) { - return annotation.value() == NullSavingStrategy.SET_TO_NULL; - } - return false; - } - - private boolean daoIsNotAnnotated(DefaultNullSavingStrategy annotation) { - return annotation == null; - } - - private boolean queryHasDoNotSet(ExecutableElement v, boolean explicitSet) { - return hasDoNotSet(Query.class, Query::nullSavingStrategy, v, explicitSet); - } - - private boolean setEntityHasDoNotSet(ExecutableElement v, boolean explicitSet) { - return hasDoNotSet(SetEntity.class, SetEntity::nullSavingStrategy, v, explicitSet); - } - - private boolean insertHasDoNotSet(ExecutableElement v, boolean explicitSet) { - return hasDoNotSet(Insert.class, Insert::nullSavingStrategy, v, explicitSet); - } - - private boolean updateHasDoNotSet(ExecutableElement v, boolean explicitSet) { - return hasDoNotSet(Update.class, Update::nullSavingStrategy, v, explicitSet); - } - - private boolean hasDoNotSet( - Class clazz, - Function extractor, - ExecutableElement v, - boolean explicitSet) { - A annotation = v.getAnnotation(clazz); - if (annotation != null) { - NullSavingStrategy strategy = extractor.apply(annotation); - if (explicitSet) { - return strategy == NullSavingStrategy.DO_NOT_SET - && nullSavingStrategyExplicitlySet(v, clazz); - } else { - return strategy == NullSavingStrategy.DO_NOT_SET; - } - } - return false; - } - - private boolean queryHasSetToNullExplicitly(ExecutableElement v) { - return hadSetToNullExplicitly(Query.class, Query::nullSavingStrategy, v); - } - - private boolean setEntitySetToNullExplicitly(ExecutableElement v) { - return hadSetToNullExplicitly(SetEntity.class, SetEntity::nullSavingStrategy, v); - } - - private boolean insertHasSetToNullExplicitly(ExecutableElement v) { - return hadSetToNullExplicitly(Insert.class, Insert::nullSavingStrategy, v); - } - - private boolean updateHasSetToNullExplicitly(ExecutableElement v) { - return hadSetToNullExplicitly(Update.class, Update::nullSavingStrategy, v); - } - - private boolean hadSetToNullExplicitly( - Class clazz, Function extractor, ExecutableElement v) { - A annotation = v.getAnnotation(clazz); - if (annotation != null) { - NullSavingStrategy strategy = extractor.apply(annotation); - return strategy == NullSavingStrategy.SET_TO_NULL - && nullSavingStrategyExplicitlySet(v, clazz); - } else { - return false; - } - } - - private boolean isOperationWithNullSavingStrategy(ExecutableElement v) { - return v.getAnnotation(Update.class) != null - || v.getAnnotation(Insert.class) != null - || v.getAnnotation(SetEntity.class) != null - || v.getAnnotation(Query.class) != null; - } - - private boolean nullSavingStrategyExplicitlySet(Element methodElement, Class javaClass) { - Optional annotationMirrorForJavaClass = - getAnnotationMirrorForJavaClass(methodElement, javaClass); - // Find out if NullSavingStrategy was set explicitly - if (annotationMirrorForJavaClass.isPresent()) { - for (ExecutableElement executableElement : - annotationMirrorForJavaClass.get().getElementValues().keySet()) { - if (executableElement.getSimpleName().contentEquals("nullSavingStrategy")) { - return true; - } - } - } - return false; - } - - private Optional getAnnotationMirrorForJavaClass( - Element methodElement, Class javaClass) { - List annotationMirrors = methodElement.getAnnotationMirrors(); - for (AnnotationMirror annotationMirror : annotationMirrors) { - if (classUtils.isSame(annotationMirror.getAnnotationType(), javaClass)) { - return Optional.of(annotationMirror); - } - } - return Optional.empty(); - } - - public NullSavingStrategy getNullSavingStrategy( - Class clazz, - Function extractor, - ExecutableElement methodElement, - DaoImplementationSharedCode daoClass) { - A annotation = methodElement.getAnnotation(clazz); - Optional daoNullSavingStrategy = daoClass.getNullSavingStrategy(); - boolean methodNullSavingStrategyExplicitlySet = - nullSavingStrategyExplicitlySet(methodElement, clazz); - // Take method level strategy when explicitly set OR dao level default not specified - if (methodNullSavingStrategyExplicitlySet || !daoNullSavingStrategy.isPresent()) { - return extractor.apply(annotation); - } else { - // Take default when method level not specified and DAO level default present - return daoNullSavingStrategy.get(); - } - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/BuiltInNameConversions.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/BuiltInNameConversions.java deleted file mode 100644 index 5c225fa8bf8..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/BuiltInNameConversions.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.entity; - -import com.datastax.oss.driver.api.mapper.entity.naming.NameConverter; -import com.datastax.oss.driver.api.mapper.entity.naming.NamingConvention; -import com.datastax.oss.driver.internal.core.util.Strings; -import com.datastax.oss.driver.shaded.guava.common.base.CaseFormat; - -/** - * Handles the {@link NamingConvention built-in naming conventions}. - * - *

    Unlike user-provided {@link NameConverter}s, this code is invoked at compile time by the - * mapper processor (built-in conventions are applied directly in the generated code). - */ -public class BuiltInNameConversions { - - public static String toCassandraName(String javaName, NamingConvention convention) { - switch (convention) { - case CASE_INSENSITIVE: - return javaName; - case EXACT_CASE: - return Strings.doubleQuote(javaName); - case LOWER_CAMEL_CASE: - // Piggy-back on Guava's CaseFormat. Note that we indicate that the input is upper-camel - // when in reality it can be lower-camel for a property name, but CaseFormat is lenient and - // handles that correctly. - return Strings.doubleQuote(CaseFormat.UPPER_CAMEL.to(CaseFormat.LOWER_CAMEL, javaName)); - case UPPER_CAMEL_CASE: - return Strings.doubleQuote(CaseFormat.LOWER_CAMEL.to(CaseFormat.UPPER_CAMEL, javaName)); - case SNAKE_CASE_INSENSITIVE: - return CaseFormat.LOWER_CAMEL.to(CaseFormat.LOWER_UNDERSCORE, javaName); - case UPPER_SNAKE_CASE: - return Strings.doubleQuote( - CaseFormat.LOWER_CAMEL.to(CaseFormat.UPPER_UNDERSCORE, javaName)); - case UPPER_CASE: - return Strings.doubleQuote(javaName.toUpperCase()); - default: - throw new AssertionError("Unsupported convention: " + convention); - } - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/CqlNameGenerator.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/CqlNameGenerator.java deleted file mode 100644 index 1d0ec08a1f1..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/CqlNameGenerator.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.entity; - -import com.datastax.oss.driver.api.mapper.annotations.NamingStrategy; -import com.datastax.oss.driver.api.mapper.entity.naming.NamingConvention; -import com.squareup.javapoet.CodeBlock; -import javax.lang.model.type.TypeMirror; - -/** - * Generates the code blocks for CQL names according to the strategy defined by the {@link - * NamingStrategy} annotation on an entity class. - * - *

    If the entity uses a built-in convention, we convert right away and the code blocks are just - * simple strings: - * - *

    - * target.set("product_id", entity.getProductId(), UUID.class);
    - * 
    - * - * If it uses a custom converter class, we must instantiate it and invoke it at runtime: - * - *
    - * target.set(context.getNameConverter(SomeCustomConverter.class).toCassandraName("productId"),
    - *            entity.getProductId(), UUID.class);
    - * 
    - */ -public class CqlNameGenerator { - - /** The default (when an entity is not annotated). */ - public static final CqlNameGenerator DEFAULT = - new CqlNameGenerator(NamingConvention.SNAKE_CASE_INSENSITIVE); - - private final NamingConvention namingConvention; - private final TypeMirror converterClassMirror; - - public CqlNameGenerator(NamingConvention namingConvention) { - this(namingConvention, null); - } - - public CqlNameGenerator(TypeMirror converterClassMirror) { - this(null, converterClassMirror); - } - - private CqlNameGenerator(NamingConvention namingConvention, TypeMirror converterClassMirror) { - assert namingConvention == null ^ converterClassMirror == null; - this.namingConvention = namingConvention; - this.converterClassMirror = converterClassMirror; - } - - public CodeBlock buildCqlName(String javaName) { - if (namingConvention != null) { - return CodeBlock.of("$S", BuiltInNameConversions.toCassandraName(javaName, namingConvention)); - } else { - return CodeBlock.of( - "context.getNameConverter($T.class).toCassandraName($S)", converterClassMirror, javaName); - } - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/DefaultEntityDefinition.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/DefaultEntityDefinition.java deleted file mode 100644 index 9b87066bf28..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/DefaultEntityDefinition.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.entity; - -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.squareup.javapoet.ClassName; -import com.squareup.javapoet.CodeBlock; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.List; -import java.util.Optional; - -public class DefaultEntityDefinition implements EntityDefinition { - - private final ClassName className; - private final CodeBlock cqlName; - private final List partitionKey; - private final List clusteringColumns; - private final ImmutableList regularColumns; - private final ImmutableList computedValues; - private final String defaultKeyspace; - private final boolean mutable; - - public DefaultEntityDefinition( - ClassName className, - String javaName, - String defaultKeyspace, - Optional customCqlName, - List partitionKey, - List clusteringColumns, - List regularColumns, - List computedValues, - CqlNameGenerator cqlNameGenerator, - boolean mutable) { - this.className = className; - this.cqlName = - customCqlName - .map(n -> CodeBlock.of("$S", n)) - .orElse(cqlNameGenerator.buildCqlName(javaName)); - this.defaultKeyspace = defaultKeyspace; - this.partitionKey = partitionKey; - this.clusteringColumns = clusteringColumns; - this.regularColumns = ImmutableList.copyOf(regularColumns); - this.computedValues = ImmutableList.copyOf(computedValues); - this.mutable = mutable; - } - - @Override - public ClassName getClassName() { - return className; - } - - @Override - public CodeBlock getCqlName() { - return cqlName; - } - - @Nullable - @Override - public String getDefaultKeyspace() { - return defaultKeyspace; - } - - @Override - public List getPartitionKey() { - return partitionKey; - } - - @Override - public List getClusteringColumns() { - return clusteringColumns; - } - - @Override - public Iterable getRegularColumns() { - return regularColumns; - } - - @Override - public Iterable getComputedValues() { - return computedValues; - } - - @Override - public boolean isMutable() { - return mutable; - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/DefaultEntityFactory.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/DefaultEntityFactory.java deleted file mode 100644 index 84d0ec61267..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/DefaultEntityFactory.java +++ /dev/null @@ -1,740 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.entity; - -import com.datastax.oss.driver.api.mapper.annotations.ClusteringColumn; -import com.datastax.oss.driver.api.mapper.annotations.Computed; -import com.datastax.oss.driver.api.mapper.annotations.CqlName; -import com.datastax.oss.driver.api.mapper.annotations.Entity; -import com.datastax.oss.driver.api.mapper.annotations.NamingStrategy; -import com.datastax.oss.driver.api.mapper.annotations.PartitionKey; -import com.datastax.oss.driver.api.mapper.annotations.PropertyStrategy; -import com.datastax.oss.driver.api.mapper.annotations.Transient; -import com.datastax.oss.driver.api.mapper.annotations.TransientProperties; -import com.datastax.oss.driver.api.mapper.entity.naming.GetterStyle; -import com.datastax.oss.driver.api.mapper.entity.naming.NamingConvention; -import com.datastax.oss.driver.api.mapper.entity.naming.SetterStyle; -import com.datastax.oss.driver.internal.mapper.processor.ProcessorContext; -import com.datastax.oss.driver.internal.mapper.processor.util.AnnotationScanner; -import com.datastax.oss.driver.internal.mapper.processor.util.Capitalizer; -import com.datastax.oss.driver.internal.mapper.processor.util.HierarchyScanner; -import com.datastax.oss.driver.internal.mapper.processor.util.ResolvedAnnotation; -import com.datastax.oss.driver.internal.mapper.processor.util.generation.PropertyType; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import com.datastax.oss.driver.shaded.guava.common.collect.Maps; -import com.datastax.oss.driver.shaded.guava.common.collect.Sets; -import com.squareup.javapoet.ClassName; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.lang.annotation.Annotation; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Set; -import java.util.SortedMap; -import java.util.TreeMap; -import java.util.stream.Collectors; -import javax.lang.model.element.AnnotationMirror; -import javax.lang.model.element.AnnotationValue; -import javax.lang.model.element.Element; -import javax.lang.model.element.ElementKind; -import javax.lang.model.element.ExecutableElement; -import javax.lang.model.element.Modifier; -import javax.lang.model.element.Name; -import javax.lang.model.element.TypeElement; -import javax.lang.model.element.VariableElement; -import javax.lang.model.type.DeclaredType; -import javax.lang.model.type.TypeKind; -import javax.lang.model.type.TypeMirror; - -public class DefaultEntityFactory implements EntityFactory { - - private final ProcessorContext context; - - // property annotations of which only 1 is allowed on a property - private static final Set> EXCLUSIVE_PROPERTY_ANNOTATIONS = - ImmutableSet.of(ClusteringColumn.class, PartitionKey.class, Transient.class, Computed.class); - - // all valid property annotations to scan for. - private static final Set> PROPERTY_ANNOTATIONS = - ImmutableSet.>builder() - .addAll(EXCLUSIVE_PROPERTY_ANNOTATIONS) - .add(CqlName.class) - .build(); - - public DefaultEntityFactory(ProcessorContext context) { - this.context = context; - } - - @Override - public EntityDefinition getDefinition(TypeElement processedClass) { - Set types = HierarchyScanner.resolveTypeHierarchy(processedClass, context); - Set typeHierarchy = Sets.newLinkedHashSet(); - for (TypeMirror type : types) { - typeHierarchy.add((TypeElement) context.getTypeUtils().asElement(type)); - } - - Language language = Language.detect(typeHierarchy); - - Optional propertyStrategy = getPropertyStrategy(typeHierarchy); - GetterStyle getterStyle = - propertyStrategy.map(PropertyStrategy::getterStyle).orElse(language.defaultGetterStyle); - SetterStyle setterStyle = - propertyStrategy.map(PropertyStrategy::setterStyle).orElse(language.defaultSetterStyle); - boolean mutable = - propertyStrategy.map(PropertyStrategy::mutable).orElse(language.defaultMutable); - CqlNameGenerator cqlNameGenerator = buildCqlNameGenerator(typeHierarchy); - Set transientProperties = getTransientPropertyNames(typeHierarchy); - - Set encounteredPropertyNames = Sets.newHashSet(); - SortedMap partitionKey = new TreeMap<>(); - SortedMap clusteringColumns = new TreeMap<>(); - ImmutableList.Builder regularColumns = ImmutableList.builder(); - ImmutableList.Builder computedValues = ImmutableList.builder(); - - // scan hierarchy for properties - for (TypeElement typeElement : typeHierarchy) { - for (Element child : typeElement.getEnclosedElements()) { - Set modifiers = child.getModifiers(); - if (child.getKind() != ElementKind.METHOD - || modifiers.contains(Modifier.STATIC) - || modifiers.contains(Modifier.PRIVATE)) { - continue; - } - ExecutableElement getMethod = (ExecutableElement) child; - if (!getMethod.getParameters().isEmpty()) { - continue; - } - TypeMirror typeMirror = getMethod.getReturnType(); - if (typeMirror.getKind() == TypeKind.VOID) { - continue; - } - - String getMethodName = getMethod.getSimpleName().toString(); - - // Skip methods that test as false positives with the fluent getter style: toString(), - // hashCode() and a few Scala or Kotlin methods. - if (getMethodName.equals("toString") - || getMethodName.equals("hashCode") - || (language == Language.SCALA_CASE_CLASS - && (getMethodName.equals("productPrefix") - || getMethodName.equals("productArity") - || getMethodName.equals("productIterator") - || getMethodName.equals("productElementNames") - || getMethodName.startsWith("copy$default$"))) - || (language == Language.KOTLIN_DATA_CLASS - && getMethodName.matches("component[0-9]+"))) { - continue; - } - - String propertyName = inferPropertyName(getMethodName, getterStyle, typeMirror); - if (propertyName == null) { - // getMethodName does not follow a known pattern => this is not a getter, skip - continue; - } - - // skip properties we've already encountered. - if (encounteredPropertyNames.contains(propertyName)) { - continue; - } - - String setMethodName; - if (mutable) { - setMethodName = inferSetMethodName(propertyName, setterStyle); - ExecutableElement setMethod = findSetMethod(typeHierarchy, setMethodName, typeMirror); - if (setMethod == null) { - continue; // must have both - } - } else { - setMethodName = null; - } - VariableElement field = findField(typeHierarchy, propertyName, typeMirror); - - Map, Annotation> propertyAnnotations = - scanPropertyAnnotations(typeHierarchy, getMethod, field); - if (isTransient(propertyAnnotations, propertyName, transientProperties, getMethod, field)) { - continue; - } - - int partitionKeyIndex = getPartitionKeyIndex(propertyAnnotations); - int clusteringColumnIndex = getClusteringColumnIndex(propertyAnnotations); - Optional customCqlName = getCustomCqlName(propertyAnnotations); - Optional computedFormula = - getComputedFormula(propertyAnnotations, getMethod, field); - - PropertyType propertyType = PropertyType.parse(typeMirror, context); - PropertyDefinition property = - new DefaultPropertyDefinition( - propertyName, - customCqlName, - computedFormula, - getMethodName, - setMethodName, - propertyType, - cqlNameGenerator); - encounteredPropertyNames.add(propertyName); - - if (partitionKeyIndex >= 0) { - PropertyDefinition previous = partitionKey.putIfAbsent(partitionKeyIndex, property); - if (previous != null) { - context - .getMessager() - .error( - getMethod, - "Duplicate partition key index: if multiple properties are annotated " - + "with @%s, the annotation must be parameterized with an integer " - + "indicating the position. Found duplicate index %d for %s and %s.", - PartitionKey.class.getSimpleName(), - partitionKeyIndex, - previous.getGetterName(), - property.getGetterName()); - } - } else if (clusteringColumnIndex >= 0) { - PropertyDefinition previous = - clusteringColumns.putIfAbsent(clusteringColumnIndex, property); - if (previous != null) { - context - .getMessager() - .error( - getMethod, - "Duplicate clustering column index: if multiple properties are annotated " - + "with @%s, the annotation must be parameterized with an integer " - + "indicating the position. Found duplicate index %d for %s and %s.", - ClusteringColumn.class.getSimpleName(), - clusteringColumnIndex, - previous.getGetterName(), - property.getGetterName()); - } - } else if (computedFormula.isPresent()) { - computedValues.add(property); - } else { - regularColumns.add(property); - } - } - } - - if (encounteredPropertyNames.isEmpty()) { - context - .getMessager() - .error( - processedClass, - "@%s-annotated class must have at least one property defined.", - Entity.class.getSimpleName()); - } - - String entityName = Capitalizer.decapitalize(processedClass.getSimpleName().toString()); - String defaultKeyspace = processedClass.getAnnotation(Entity.class).defaultKeyspace(); - - EntityDefinition entityDefinition = - new DefaultEntityDefinition( - ClassName.get(processedClass), - entityName, - defaultKeyspace.isEmpty() ? null : defaultKeyspace, - Optional.ofNullable(processedClass.getAnnotation(CqlName.class)).map(CqlName::value), - ImmutableList.copyOf(partitionKey.values()), - ImmutableList.copyOf(clusteringColumns.values()), - regularColumns.build(), - computedValues.build(), - cqlNameGenerator, - mutable); - validateConstructor(entityDefinition, processedClass); - return entityDefinition; - } - - private String inferPropertyName(String getMethodName, GetterStyle getterStyle, TypeMirror type) { - switch (getterStyle) { - case FLUENT: - return getMethodName; - case JAVABEANS: - if (getMethodName.startsWith("get") && getMethodName.length() > 3) { - return Capitalizer.decapitalize(getMethodName.substring(3)); - } else if (getMethodName.startsWith("is") - && getMethodName.length() > 2 - && (type.getKind() == TypeKind.BOOLEAN - || context.getClassUtils().isSame(type, Boolean.class))) { - return Capitalizer.decapitalize(getMethodName.substring(2)); - } else { - return null; - } - default: - throw new AssertionError("Unsupported getter style " + getterStyle); - } - } - - private String inferSetMethodName(String propertyName, SetterStyle setterStyle) { - String setMethodName; - switch (setterStyle) { - case JAVABEANS: - setMethodName = "set" + Capitalizer.capitalize(propertyName); - break; - case FLUENT: - setMethodName = propertyName; - break; - default: - throw new AssertionError("Unsupported setter style " + setterStyle); - } - return setMethodName; - } - - @Nullable - private VariableElement findField( - Set typeHierarchy, String propertyName, TypeMirror fieldType) { - for (TypeElement classElement : typeHierarchy) { - // skip interfaces as they can't have fields - if (classElement.getKind().isInterface()) { - continue; - } - for (Element child : classElement.getEnclosedElements()) { - if (child.getKind() != ElementKind.FIELD) { - continue; - } - VariableElement field = (VariableElement) child; - if (field.getSimpleName().toString().equals(propertyName) - && context.getTypeUtils().isAssignable(fieldType, field.asType())) { - return field; - } - } - } - return null; - } - - @Nullable - private ExecutableElement findSetMethod( - Set typeHierarchy, String setMethodName, TypeMirror fieldType) { - for (TypeElement classElement : typeHierarchy) { - for (Element child : classElement.getEnclosedElements()) { - Set modifiers = child.getModifiers(); - if (child.getKind() != ElementKind.METHOD - || modifiers.contains(Modifier.STATIC) - || modifiers.contains(Modifier.PRIVATE)) { - continue; - } - ExecutableElement setMethod = (ExecutableElement) child; - List parameters = setMethod.getParameters(); - - if (setMethod.getSimpleName().toString().equals(setMethodName) - && parameters.size() == 1 - && context.getTypeUtils().isAssignable(fieldType, parameters.get(0).asType())) { - return setMethod; - } - } - } - return null; - } - - private Optional getCustomCqlName( - Map, Annotation> annotations) { - CqlName cqlName = (CqlName) annotations.get(CqlName.class); - return cqlName != null ? Optional.of(cqlName.value()) : Optional.empty(); - } - - private int getPartitionKeyIndex(Map, Annotation> annotations) { - PartitionKey partitionKey = (PartitionKey) annotations.get(PartitionKey.class); - return partitionKey != null ? partitionKey.value() : -1; - } - - private int getClusteringColumnIndex(Map, Annotation> annotations) { - ClusteringColumn clusteringColumn = (ClusteringColumn) annotations.get(ClusteringColumn.class); - return clusteringColumn != null ? clusteringColumn.value() : -1; - } - - private Optional getComputedFormula( - Map, Annotation> annotations, - ExecutableElement getMethod, - @Nullable VariableElement field) { - Computed annotation = (Computed) annotations.get(Computed.class); - - if (annotation != null) { - // ensure formula is non-empty - String value = annotation.value(); - if (value.isEmpty()) { - Element element = - field != null && field.getAnnotation(Computed.class) != null ? field : getMethod; - context.getMessager().error(element, "@Computed value should be non-empty."); - } - return Optional.of(value); - } - return Optional.empty(); - } - - private CqlNameGenerator buildCqlNameGenerator(Set typeHierarchy) { - Optional> annotation = - AnnotationScanner.getClassAnnotation(NamingStrategy.class, typeHierarchy); - if (!annotation.isPresent()) { - return CqlNameGenerator.DEFAULT; - } - - NamingStrategy namingStrategy = annotation.get().getAnnotation(); - // Safe cast because the annotation can only be used on types: - TypeElement classElement = (TypeElement) annotation.get().getElement(); - if (namingStrategy == null) { - return CqlNameGenerator.DEFAULT; - } - - NamingConvention[] conventions = namingStrategy.convention(); - TypeMirror[] customConverterClasses = readCustomConverterClasses(classElement); - - if (conventions.length > 0 && customConverterClasses.length > 0) { - context - .getMessager() - .error( - classElement, - "Invalid annotation configuration: %s must have either a 'convention' " - + "or 'customConverterClass' argument, but not both", - NamingStrategy.class.getSimpleName()); - // Return a generator anyway, so that the processor doesn't crash downstream - return new CqlNameGenerator(conventions[0]); - } else if (conventions.length == 0 && customConverterClasses.length == 0) { - context - .getMessager() - .error( - classElement, - "Invalid annotation configuration: %s must have either a 'convention' " - + "or 'customConverterClass' argument", - NamingStrategy.class.getSimpleName()); - return CqlNameGenerator.DEFAULT; - } else if (conventions.length > 0) { - if (conventions.length > 1) { - context - .getMessager() - .warn( - classElement, - "Too many naming conventions: %s must have at most one 'convention' " - + "argument (will use the first one: %s)", - NamingStrategy.class.getSimpleName(), - conventions[0]); - } - return new CqlNameGenerator(conventions[0]); - } else { - if (customConverterClasses.length > 1) { - context - .getMessager() - .warn( - classElement, - "Too many custom converters: %s must have at most one " - + "'customConverterClass' argument (will use the first one: %s)", - NamingStrategy.class.getSimpleName(), - customConverterClasses[0]); - } - return new CqlNameGenerator(customConverterClasses[0]); - } - } - - private TypeMirror[] readCustomConverterClasses(Element classElement) { - // customConverterClass references a class that might not be compiled yet, so we can't read it - // directly, we need to go through mirrors. - AnnotationMirror annotationMirror = null; - for (AnnotationMirror candidate : classElement.getAnnotationMirrors()) { - if (context.getClassUtils().isSame(candidate.getAnnotationType(), NamingStrategy.class)) { - annotationMirror = candidate; - break; - } - } - assert annotationMirror != null; // We've checked that in the caller already - - for (Map.Entry entry : - annotationMirror.getElementValues().entrySet()) { - if (entry.getKey().getSimpleName().contentEquals("customConverterClass")) { - @SuppressWarnings("unchecked") - List values = (List) entry.getValue().getValue(); - TypeMirror[] result = new TypeMirror[values.size()]; - for (int i = 0; i < values.size(); i++) { - result[i] = ((TypeMirror) values.get(i).getValue()); - } - return result; - } - } - return new TypeMirror[0]; - } - - private boolean isTransient( - Map, Annotation> annotations, - String propertyName, - Set transientProperties, - ExecutableElement getMethod, - @Nullable VariableElement field) { - - Transient transientAnnotation = (Transient) annotations.get(Transient.class); - // check if property name is included in @TransientProperties - // -or- if property is annotated with @Transient - // -or- if field has transient keyword modifier - boolean isTransient = - transientProperties.contains(propertyName) - || transientAnnotation != null - || (field != null && field.getModifiers().contains(Modifier.TRANSIENT)); - - // if annotations contains an exclusive annotation that isn't transient, raise - // an error here. - Class exclusiveAnnotation = getExclusiveAnnotation(annotations); - if (isTransient && transientAnnotation == null && exclusiveAnnotation != null) { - Element element = field != null ? field : getMethod; - context - .getMessager() - .error( - element, - "Property that is considered transient cannot be annotated with @%s.", - exclusiveAnnotation.getSimpleName()); - } - - return isTransient; - } - - private Set getTransientPropertyNames(Set typeHierarchy) { - Optional> annotation = - AnnotationScanner.getClassAnnotation(TransientProperties.class, typeHierarchy); - - return annotation.isPresent() - ? Sets.newHashSet(annotation.get().getAnnotation().value()) - : Collections.emptySet(); - } - - private Optional getPropertyStrategy(Set typeHierarchy) { - return AnnotationScanner.getClassAnnotation(PropertyStrategy.class, typeHierarchy) - .map(ResolvedAnnotation::getAnnotation); - } - - private void reportMultipleAnnotationError( - Element element, Class a0, Class a1) { - if (a0 == a1) { - context - .getMessager() - .warn( - element, - "@%s should be used either on the field or the getter, but not both. " - + "The annotation on this field will be ignored.", - a0.getSimpleName()); - } else { - context - .getMessager() - .error( - element, - "Properties can't be annotated with both @%s and @%s.", - a0.getSimpleName(), - a1.getSimpleName()); - } - } - - private Map, Annotation> scanPropertyAnnotations( - Set typeHierarchy, - ExecutableElement getMethod, - @Nullable VariableElement field) { - Map, Annotation> annotations = Maps.newHashMap(); - - // scan methods first as they should take precedence. - scanMethodAnnotations(typeHierarchy, getMethod, annotations); - if (field != null) { - scanFieldAnnotations(field, annotations); - } - - return ImmutableMap.copyOf(annotations); - } - - @Nullable - private Class getExclusiveAnnotation( - Map, Annotation> annotations) { - for (Class annotationClass : annotations.keySet()) { - if (EXCLUSIVE_PROPERTY_ANNOTATIONS.contains(annotationClass)) { - return annotationClass; - } - } - return null; - } - - private void scanFieldAnnotations( - VariableElement field, Map, Annotation> annotations) { - Class exclusiveAnnotation = getExclusiveAnnotation(annotations); - for (Class annotationClass : PROPERTY_ANNOTATIONS) { - Annotation annotation = field.getAnnotation(annotationClass); - if (annotation != null) { - if (EXCLUSIVE_PROPERTY_ANNOTATIONS.contains(annotationClass)) { - if (exclusiveAnnotation == null) { - exclusiveAnnotation = annotationClass; - } else { - reportMultipleAnnotationError(field, exclusiveAnnotation, annotationClass); - } - } - if (!annotations.containsKey(annotationClass)) { - annotations.put(annotationClass, annotation); - } - } - } - } - - private void scanMethodAnnotations( - Set typeHierarchy, - ExecutableElement getMethod, - Map, Annotation> annotations) { - Class exclusiveAnnotation = getExclusiveAnnotation(annotations); - for (Class annotationClass : PROPERTY_ANNOTATIONS) { - Optional> annotation = - AnnotationScanner.getMethodAnnotation(annotationClass, getMethod, typeHierarchy); - if (annotation.isPresent()) { - if (EXCLUSIVE_PROPERTY_ANNOTATIONS.contains(annotationClass)) { - if (exclusiveAnnotation == null) { - exclusiveAnnotation = annotationClass; - } else { - reportMultipleAnnotationError( - annotation.get().getElement(), exclusiveAnnotation, annotationClass); - } - } - if (!annotations.containsKey(annotationClass)) { - annotations.put(annotationClass, annotation.get().getAnnotation()); - } - } - } - } - - private void validateConstructor(EntityDefinition entity, TypeElement processedClass) { - if (entity.isMutable()) { - validateNoArgConstructor(processedClass); - } else { - validateAllValuesConstructor(processedClass, entity.getAllValues()); - } - } - - private void validateNoArgConstructor(TypeElement processedClass) { - for (Element child : processedClass.getEnclosedElements()) { - if (child.getKind() == ElementKind.CONSTRUCTOR) { - ExecutableElement constructor = (ExecutableElement) child; - Set modifiers = constructor.getModifiers(); - if (!modifiers.contains(Modifier.PRIVATE) && constructor.getParameters().isEmpty()) { - return; - } - } - } - context - .getMessager() - .error( - processedClass, - "Mutable @%s-annotated class must have a no-arg constructor.", - Entity.class.getSimpleName()); - } - - private void validateAllValuesConstructor( - TypeElement processedClass, List columns) { - for (Element child : processedClass.getEnclosedElements()) { - if (child.getKind() == ElementKind.CONSTRUCTOR) { - ExecutableElement constructor = (ExecutableElement) child; - Set modifiers = constructor.getModifiers(); - if (!modifiers.contains(Modifier.PRIVATE) - && areAssignable(columns, constructor.getParameters())) { - return; - } - } - } - String signature = - columns.stream() - .map( - column -> - String.format("%s %s", column.getType().asTypeMirror(), column.getJavaName())) - .collect(Collectors.joining(", ")); - context - .getMessager() - .error( - processedClass, - "Immutable @%s-annotated class must have an \"all values\" constructor. " - + "Expected signature: %s(%s).", - Entity.class.getSimpleName(), - processedClass.getSimpleName(), - signature); - } - - private boolean areAssignable( - List columns, List parameters) { - if (columns.size() != parameters.size()) { - return false; - } else { - for (int i = 0; i < columns.size(); i++) { - // What the generated code will pass to the constructor: - TypeMirror argumentType = columns.get(i).getType().asTypeMirror(); - // What the constructor declares: - TypeMirror parameterType = parameters.get(i).asType(); - if (!context.getTypeUtils().isAssignable(argumentType, parameterType)) { - return false; - } - } - return true; - } - } - - /** - * The source language (and construct) of an entity type. It impacts the defaults for entities - * that do not explicitly declare the {@link PropertyStrategy} annotation. - */ - private enum Language { - SCALA_CASE_CLASS(false, GetterStyle.FLUENT, null), - KOTLIN_DATA_CLASS(false, GetterStyle.JAVABEANS, null), - JAVA14_RECORD(false, GetterStyle.FLUENT, null), - UNKNOWN(true, GetterStyle.JAVABEANS, SetterStyle.JAVABEANS), - ; - - final boolean defaultMutable; - final GetterStyle defaultGetterStyle; - final SetterStyle defaultSetterStyle; - - Language( - boolean defaultMutable, GetterStyle defaultGetterStyle, SetterStyle defaultSetterStyle) { - this.defaultMutable = defaultMutable; - this.defaultGetterStyle = defaultGetterStyle; - this.defaultSetterStyle = defaultSetterStyle; - } - - static Language detect(Set typeHierarchy) { - for (TypeElement type : typeHierarchy) { - if (isNamed(type, "scala.Product")) { - return SCALA_CASE_CLASS; - } - if (isNamed(type, "java.lang.Record")) { - return JAVA14_RECORD; - } - } - - TypeElement entityClass = typeHierarchy.iterator().next(); - // Kotlin adds `@kotlin.Metadata` on every generated class, we also check `component1` which - // is a generated method specific to data classes (to eliminate regular Kotlin classes). - if (entityClass.getAnnotationMirrors().stream().anyMatch(Language::isKotlinMetadata) - && entityClass.getEnclosedElements().stream() - .anyMatch(e -> isMethodNamed(e, "component1"))) { - return KOTLIN_DATA_CLASS; - } - - return UNKNOWN; - } - - private static boolean isNamed(TypeElement type, String expectedName) { - Name name = type.getQualifiedName(); - return name != null && name.toString().equals(expectedName); - } - - private static boolean isKotlinMetadata(AnnotationMirror a) { - DeclaredType declaredType = a.getAnnotationType(); - if (declaredType.getKind() == TypeKind.DECLARED) { - TypeElement element = (TypeElement) declaredType.asElement(); - return element.getQualifiedName().toString().equals("kotlin.Metadata"); - } - return false; - } - - private static boolean isMethodNamed(Element element, String methodName) { - return element.getKind() == ElementKind.METHOD - && element.getSimpleName().toString().equals(methodName); - } - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/DefaultPropertyDefinition.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/DefaultPropertyDefinition.java deleted file mode 100644 index 88abb972a1e..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/DefaultPropertyDefinition.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.entity; - -import com.datastax.oss.driver.internal.mapper.processor.util.generation.PropertyType; -import com.squareup.javapoet.CodeBlock; -import java.util.Optional; - -public class DefaultPropertyDefinition implements PropertyDefinition { - - private final String javaName; - private final CodeBlock selector; - private final CodeBlock cqlName; - private final String getterName; - private final String setterName; - private final PropertyType type; - - public DefaultPropertyDefinition( - String javaName, - Optional customCqlName, - Optional computedFormula, - String getterName, - String setterName, - PropertyType type, - CqlNameGenerator cqlNameGenerator) { - this.javaName = javaName; - - this.cqlName = - customCqlName - .map(n -> CodeBlock.of("$S", n)) - .orElse(cqlNameGenerator.buildCqlName(javaName)); - - /* - * If computed formula is present, this property does not map to a particular column, - * but rather a computed result. In this case, we need to use column aliasing - * i.e. 'count(*) as X' as the name is not deterministic from the computed formula. - * In this case we use the cqlName (or customCqlName if present) as the aliased name, - * and the formula as the selector. - */ - this.selector = computedFormula.map(n -> CodeBlock.of("$S", n)).orElse(cqlName); - - this.getterName = getterName; - this.setterName = setterName; - this.type = type; - } - - @Override - public String getJavaName() { - return javaName; - } - - @Override - public CodeBlock getSelector() { - return selector; - } - - @Override - public CodeBlock getCqlName() { - return cqlName; - } - - @Override - public String getGetterName() { - return getterName; - } - - @Override - public String getSetterName() { - return setterName; - } - - @Override - public PropertyType getType() { - return type; - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityDefinition.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityDefinition.java deleted file mode 100644 index 10ab8ef55f7..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityDefinition.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.entity; - -import com.datastax.oss.driver.api.mapper.annotations.PropertyStrategy; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.squareup.javapoet.ClassName; -import com.squareup.javapoet.CodeBlock; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.List; - -public interface EntityDefinition { - - ClassName getClassName(); - - CodeBlock getCqlName(); - - @Nullable - String getDefaultKeyspace(); - - List getPartitionKey(); - - List getClusteringColumns(); - - /** - * @return the primary key, obtained by concatenating {@link #getPartitionKey()} and {@link - * #getClusteringColumns()}, in that order. - */ - default List getPrimaryKey() { - return ImmutableList.builder() - .addAll(getPartitionKey()) - .addAll(getClusteringColumns()) - .build(); - } - - Iterable getRegularColumns(); - - Iterable getComputedValues(); - - /** - * @return the concatenation of {@link #getPartitionKey()}, {@link #getClusteringColumns()} and - * {@link #getRegularColumns()}, in that order. - */ - default List getAllColumns() { - return ImmutableList.builder() - .addAll(getPartitionKey()) - .addAll(getClusteringColumns()) - .addAll(getRegularColumns()) - .build(); - } - - /** - * @return the concatenation of {@link #getPartitionKey()}, {@link #getClusteringColumns()}, - * {@link #getRegularColumns()}, and {@link #getComputedValues()} in that order. - */ - default List getAllValues() { - return ImmutableList.builder() - .addAll(getPartitionKey()) - .addAll(getClusteringColumns()) - .addAll(getRegularColumns()) - .addAll(getComputedValues()) - .build(); - } - - /** @see PropertyStrategy#mutable() */ - boolean isMutable(); -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityFactory.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityFactory.java deleted file mode 100644 index 348f6fcff2b..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityFactory.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.entity; - -import com.datastax.oss.driver.api.mapper.annotations.Entity; -import javax.lang.model.element.TypeElement; - -public interface EntityFactory { - - /** - * Parses an {@link Entity}-annotated POJO and returns a descriptor of its properties and - * annotations. - */ - EntityDefinition getDefinition(TypeElement processedClass); -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityHelperDeleteByPrimaryKeyMethodGenerator.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityHelperDeleteByPrimaryKeyMethodGenerator.java deleted file mode 100644 index 717697decf8..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityHelperDeleteByPrimaryKeyMethodGenerator.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.entity; - -import com.datastax.oss.driver.api.querybuilder.delete.Delete; -import com.datastax.oss.driver.internal.mapper.processor.MethodGenerator; -import com.squareup.javapoet.MethodSpec; -import java.util.Optional; -import javax.lang.model.element.Modifier; - -public class EntityHelperDeleteByPrimaryKeyMethodGenerator implements MethodGenerator { - - @Override - public Optional generate() { - MethodSpec.Builder deleteByPrimaryKeyBuilder = - MethodSpec.methodBuilder("deleteByPrimaryKey") - .addAnnotation(Override.class) - .addModifiers(Modifier.PUBLIC) - .returns(Delete.class) - .addStatement("return deleteByPrimaryKeyParts(primaryKeys.size())"); - - return Optional.of(deleteByPrimaryKeyBuilder.build()); - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityHelperDeleteByPrimaryKeyPartsMethodGenerator.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityHelperDeleteByPrimaryKeyPartsMethodGenerator.java deleted file mode 100644 index 39c5f529474..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityHelperDeleteByPrimaryKeyPartsMethodGenerator.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.entity; - -import com.datastax.oss.driver.api.mapper.MapperException; -import com.datastax.oss.driver.api.querybuilder.QueryBuilder; -import com.datastax.oss.driver.api.querybuilder.delete.Delete; -import com.datastax.oss.driver.api.querybuilder.delete.DeleteSelection; -import com.datastax.oss.driver.internal.mapper.processor.MethodGenerator; -import com.squareup.javapoet.MethodSpec; -import com.squareup.javapoet.TypeName; -import java.util.Optional; -import javax.lang.model.element.Modifier; - -public class EntityHelperDeleteByPrimaryKeyPartsMethodGenerator implements MethodGenerator { - - private final EntityDefinition entityDefinition; - - public EntityHelperDeleteByPrimaryKeyPartsMethodGenerator(EntityDefinition entityDefinition) { - this.entityDefinition = entityDefinition; - } - - @Override - public Optional generate() { - MethodSpec.Builder deleteByPrimaryKeyPartsBuilder = - MethodSpec.methodBuilder("deleteByPrimaryKeyParts") - .addModifiers(Modifier.PUBLIC) - .addParameter(TypeName.INT, "parameterCount") - .returns(Delete.class); - - if (entityDefinition.getPrimaryKey().isEmpty()) { - deleteByPrimaryKeyPartsBuilder.addStatement( - "throw new $T($S)", - MapperException.class, - String.format( - "Entity %s does not declare a primary key", - entityDefinition.getClassName().simpleName())); - } else { - deleteByPrimaryKeyPartsBuilder.beginControlFlow("if (parameterCount <= 0)"); - deleteByPrimaryKeyPartsBuilder.addStatement( - "throw new $T($S)", MapperException.class, "parameterCount must be greater than 0"); - deleteByPrimaryKeyPartsBuilder.endControlFlow(); - - deleteByPrimaryKeyPartsBuilder.addStatement( - "$1T deleteSelection = deleteStart()", DeleteSelection.class); - - deleteByPrimaryKeyPartsBuilder.addStatement( - "$1T columnName = primaryKeys.get(0)", String.class); - deleteByPrimaryKeyPartsBuilder.addStatement( - "$1T delete = deleteSelection.whereColumn(columnName).isEqualTo($2T.bindMarker" - + "(columnName))", - Delete.class, - QueryBuilder.class); - deleteByPrimaryKeyPartsBuilder.beginControlFlow( - "for (int i = 1; i < parameterCount && i < " + "primaryKeys.size(); i++)"); - deleteByPrimaryKeyPartsBuilder.addStatement("columnName = primaryKeys.get(i)"); - deleteByPrimaryKeyPartsBuilder.addStatement( - "delete = delete.whereColumn(columnName).isEqualTo($1T.bindMarker(columnName))", - QueryBuilder.class); - deleteByPrimaryKeyPartsBuilder.endControlFlow(); - deleteByPrimaryKeyPartsBuilder.addStatement("return delete"); - } - return Optional.of(deleteByPrimaryKeyPartsBuilder.build()); - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityHelperDeleteStartMethodGenerator.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityHelperDeleteStartMethodGenerator.java deleted file mode 100644 index c2acb2504ed..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityHelperDeleteStartMethodGenerator.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.entity; - -import com.datastax.oss.driver.api.querybuilder.QueryBuilder; -import com.datastax.oss.driver.api.querybuilder.delete.DeleteSelection; -import com.datastax.oss.driver.internal.mapper.processor.MethodGenerator; -import com.squareup.javapoet.MethodSpec; -import java.util.Optional; -import javax.lang.model.element.Modifier; - -public class EntityHelperDeleteStartMethodGenerator implements MethodGenerator { - - @Override - public Optional generate() { - MethodSpec.Builder deleteStartBuilder = - MethodSpec.methodBuilder("deleteStart") - .addModifiers(Modifier.PUBLIC) - .returns(DeleteSelection.class) - .addStatement("throwIfKeyspaceMissing()") - .addStatement( - "return (keyspaceId == null)\n" - + "? $1T.deleteFrom(tableId)\n" - + ": $1T.deleteFrom(keyspaceId, tableId)", - QueryBuilder.class); - return Optional.of(deleteStartBuilder.build()); - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityHelperGenerator.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityHelperGenerator.java deleted file mode 100644 index e3712cfc0e1..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityHelperGenerator.java +++ /dev/null @@ -1,190 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.entity; - -import com.datastax.oss.driver.api.mapper.MapperContext; -import com.datastax.oss.driver.internal.mapper.entity.EntityHelperBase; -import com.datastax.oss.driver.internal.mapper.processor.GeneratedNames; -import com.datastax.oss.driver.internal.mapper.processor.MethodGenerator; -import com.datastax.oss.driver.internal.mapper.processor.ProcessorContext; -import com.datastax.oss.driver.internal.mapper.processor.SingleFileCodeGenerator; -import com.datastax.oss.driver.internal.mapper.processor.util.Capitalizer; -import com.datastax.oss.driver.internal.mapper.processor.util.NameIndex; -import com.datastax.oss.driver.internal.mapper.processor.util.generation.BindableHandlingSharedCode; -import com.datastax.oss.driver.internal.mapper.processor.util.generation.GenericTypeConstantGenerator; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.squareup.javapoet.AnnotationSpec; -import com.squareup.javapoet.ClassName; -import com.squareup.javapoet.CodeBlock; -import com.squareup.javapoet.FieldSpec; -import com.squareup.javapoet.JavaFile; -import com.squareup.javapoet.MethodSpec; -import com.squareup.javapoet.ParameterizedTypeName; -import com.squareup.javapoet.TypeName; -import com.squareup.javapoet.TypeSpec; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import javax.lang.model.element.Modifier; -import javax.lang.model.element.TypeElement; - -public class EntityHelperGenerator extends SingleFileCodeGenerator - implements BindableHandlingSharedCode { - - private final TypeElement classElement; - private final ClassName helperName; - private final NameIndex nameIndex = new NameIndex(); - private final GenericTypeConstantGenerator genericTypeConstantGenerator = - new GenericTypeConstantGenerator(nameIndex); - private final Map childHelpers = new HashMap<>(); - - public EntityHelperGenerator(TypeElement classElement, ProcessorContext context) { - super(context); - this.classElement = classElement; - helperName = GeneratedNames.entityHelper(classElement); - } - - @Override - public NameIndex getNameIndex() { - return nameIndex; - } - - @Override - protected ClassName getPrincipalTypeName() { - return helperName; - } - - @Override - public String addGenericTypeConstant(TypeName type) { - return genericTypeConstantGenerator.add(type); - } - - @Override - public String addEntityHelperField(ClassName childEntityName) { - return childHelpers.computeIfAbsent( - childEntityName, - k -> { - String baseName = Capitalizer.decapitalize(childEntityName.simpleName()) + "Helper"; - return nameIndex.uniqueField(baseName); - }); - } - - @Override - protected JavaFile.Builder getContents() { - EntityDefinition entityDefinition = context.getEntityFactory().getDefinition(classElement); - TypeSpec.Builder classContents = - TypeSpec.classBuilder(helperName) - .addJavadoc(JAVADOC_GENERATED_WARNING) - .addAnnotation( - AnnotationSpec.builder(SuppressWarnings.class) - .addMember("value", "\"all\"") - .build()) - .addModifiers(Modifier.PUBLIC) - .superclass( - ParameterizedTypeName.get( - ClassName.get(EntityHelperBase.class), ClassName.get(classElement))); - - context.getLoggingGenerator().addLoggerField(classContents, helperName); - - classContents.addMethod( - MethodSpec.methodBuilder("getEntityClass") - .addModifiers(Modifier.PUBLIC) - .addAnnotation(Override.class) - .returns( - ParameterizedTypeName.get( - ClassName.get(Class.class), entityDefinition.getClassName())) - .addStatement("return $T.class", entityDefinition.getClassName()) - .build()); - - for (MethodGenerator methodGenerator : - ImmutableList.of( - new EntityHelperSetMethodGenerator(entityDefinition, this), - new EntityHelperGetMethodGenerator(entityDefinition, this), - new EntityHelperInsertMethodGenerator(entityDefinition), - new EntityHelperSelectByPrimaryKeyPartsMethodGenerator(), - new EntityHelperSelectByPrimaryKeyMethodGenerator(), - new EntityHelperSelectStartMethodGenerator(entityDefinition), - new EntityHelperDeleteStartMethodGenerator(), - new EntityHelperDeleteByPrimaryKeyPartsMethodGenerator(entityDefinition), - new EntityHelperDeleteByPrimaryKeyMethodGenerator(), - new EntityHelperUpdateStartMethodGenerator(entityDefinition), - new EntityHelperUpdateByPrimaryKeyMethodGenerator(entityDefinition), - new EntityHelperSchemaValidationMethodGenerator( - entityDefinition, classElement, context.getLoggingGenerator(), this))) { - methodGenerator.generate().ifPresent(classContents::addMethod); - } - - MethodSpec.Builder constructorContents = - MethodSpec.constructorBuilder().addModifiers(Modifier.PUBLIC); - - constructorContents.addParameter(ClassName.get(MapperContext.class), "context"); - - if (entityDefinition.getDefaultKeyspace() == null) { - constructorContents.addStatement("super(context, $L)", entityDefinition.getCqlName()); - } else { - constructorContents.addStatement( - "super(context, $S, $L)", - entityDefinition.getDefaultKeyspace(), - entityDefinition.getCqlName()); - } - context - .getLoggingGenerator() - .debug( - constructorContents, - String.format( - "[{}] Entity %s will be mapped to {}{}", - entityDefinition.getClassName().simpleName()), - CodeBlock.of("context.getSession().getName()"), - CodeBlock.of("getKeyspaceId() == null ? \"\" : getKeyspaceId() + \".\""), - CodeBlock.of("getTableId()")); - - // retain primary keys for reference in methods. - classContents.addField( - FieldSpec.builder( - ParameterizedTypeName.get(List.class, String.class), - "primaryKeys", - Modifier.PRIVATE, - Modifier.FINAL) - .build()); - - constructorContents.addCode( - "$[this.primaryKeys = $1T.<$2T>builder()", ImmutableList.class, String.class); - for (PropertyDefinition propertyDefinition : entityDefinition.getPrimaryKey()) { - constructorContents.addCode("\n.add($1L)", propertyDefinition.getCqlName()); - } - - constructorContents.addCode("\n.build()$];\n"); - - genericTypeConstantGenerator.generate(classContents); - - for (Map.Entry entry : childHelpers.entrySet()) { - ClassName childEntityName = entry.getKey(); - String fieldName = entry.getValue(); - - ClassName helperClassName = GeneratedNames.entityHelper(childEntityName); - classContents.addField( - FieldSpec.builder(helperClassName, fieldName, Modifier.PRIVATE, Modifier.FINAL).build()); - - constructorContents.addStatement("this.$L = new $T(context)", fieldName, helperClassName); - } - - classContents.addMethod(constructorContents.build()); - - return JavaFile.builder(helperName.packageName(), classContents.build()); - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityHelperGetMethodGenerator.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityHelperGetMethodGenerator.java deleted file mode 100644 index 198b25f7c4c..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityHelperGetMethodGenerator.java +++ /dev/null @@ -1,338 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.entity; - -import com.datastax.oss.driver.api.core.data.GettableByName; -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.internal.mapper.processor.MethodGenerator; -import com.datastax.oss.driver.internal.mapper.processor.util.generation.BindableHandlingSharedCode; -import com.datastax.oss.driver.internal.mapper.processor.util.generation.GeneratedCodePatterns; -import com.datastax.oss.driver.internal.mapper.processor.util.generation.PropertyType; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import com.datastax.oss.driver.shaded.guava.common.collect.Maps; -import com.datastax.oss.driver.shaded.guava.common.collect.Sets; -import com.squareup.javapoet.ClassName; -import com.squareup.javapoet.CodeBlock; -import com.squareup.javapoet.MethodSpec; -import com.squareup.javapoet.ParameterSpec; -import com.squareup.javapoet.ParameterizedTypeName; -import com.squareup.javapoet.TypeName; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import javax.lang.model.element.Modifier; - -public class EntityHelperGetMethodGenerator implements MethodGenerator { - - private final EntityDefinition entityDefinition; - private final BindableHandlingSharedCode enclosingClass; - - public EntityHelperGetMethodGenerator( - EntityDefinition entityDefinition, BindableHandlingSharedCode enclosingClass) { - this.entityDefinition = entityDefinition; - this.enclosingClass = enclosingClass; - } - - @Override - public Optional generate() { - MethodSpec.Builder getBuilder = - MethodSpec.methodBuilder("get") - .addAnnotation(Override.class) - .addModifiers(Modifier.PUBLIC) - .addParameter( - ParameterSpec.builder(ClassName.get(GettableByName.class), "source").build()) - .addParameter(ParameterSpec.builder(TypeName.BOOLEAN, "lenient").build()) - .returns(entityDefinition.getClassName()); - - TypeName returnType = entityDefinition.getClassName(); - String resultName = "returnValue"; - boolean mutable = entityDefinition.isMutable(); - if (mutable) { - // Create an instance now, we'll call the setters as we go through the properties - getBuilder.addStatement("$1T $2L = new $1T()", returnType, resultName); - } - - // We store each read property into a local variable, store the names here (this is only used if - // the entity is immutable, we'll call the all-arg constructor at the end). - List propertyValueNames = new ArrayList<>(); - - for (PropertyDefinition property : entityDefinition.getAllValues()) { - PropertyType type = property.getType(); - CodeBlock cqlName = property.getCqlName(); - String setterName = property.getSetterName(); - String propertyValueName = enclosingClass.getNameIndex().uniqueField("propertyValue"); - propertyValueNames.add(propertyValueName); - - if (type instanceof PropertyType.Simple) { - TypeName typeName = ((PropertyType.Simple) type).typeName; - String primitiveAccessor = GeneratedCodePatterns.PRIMITIVE_ACCESSORS.get(typeName); - if (primitiveAccessor != null) { - // Primitive type: use dedicated getter, since it is optimized to avoid boxing - // int propertyValue1 = source.getInt("length"); - if (mutable) { - getBuilder - .beginControlFlow("if (!lenient || hasProperty(source, $L))", cqlName) - .addStatement( - "$T $L = source.get$L($L)", - typeName, - propertyValueName, - primitiveAccessor, - cqlName) - .addStatement("$L.$L($L)", resultName, setterName, propertyValueName) - .endControlFlow(); - } else { - getBuilder.addStatement( - "$T $L = !lenient || hasProperty(source, $L) ? source.get$L($L) : $L", - typeName, - propertyValueName, - cqlName, - primitiveAccessor, - cqlName, - typeName.equals(TypeName.BOOLEAN) ? false : 0); - } - } else if (typeName instanceof ClassName) { - // Unparameterized class: use the generic, class-based getter: - // UUID propertyValue1 = source.get("id", UUID.class); - if (mutable) { - getBuilder - .beginControlFlow("if (!lenient || hasProperty(source, $L))", cqlName) - .addStatement( - "$T $L = source.get($L, $T.class)", - typeName, - propertyValueName, - cqlName, - typeName) - .addStatement("$L.$L($L)", resultName, setterName, propertyValueName) - .endControlFlow(); - } else { - getBuilder.addStatement( - "$T $L = !lenient || hasProperty(source, $L) ? source.get($L, $T.class) : null", - typeName, - propertyValueName, - cqlName, - cqlName, - typeName); - } - } else { - // Parameterized type: create a constant and use the GenericType-based getter: - // private static final GenericType> GENERIC_TYPE = - // new GenericType>(){}; - // List propertyValue1 = source.get("names", GENERIC_TYPE); - // Note that lists, sets and maps of unparameterized classes also fall under that - // category. Their getter creates a GenericType under the hood, so there's no performance - // advantage in calling them instead of the generic get(). - if (mutable) { - getBuilder - .beginControlFlow("if (!lenient || hasProperty(source, $L))", cqlName) - .addStatement( - "$T $L = source.get($L, $L)", - typeName, - propertyValueName, - cqlName, - enclosingClass.addGenericTypeConstant(typeName)) - .addStatement("$L.$L($L)", resultName, setterName, propertyValueName) - .endControlFlow(); - } else { - getBuilder.addStatement( - "$T $L = !lenient || hasProperty(source, $L) ? source.get($L, $L) : null", - typeName, - propertyValueName, - cqlName, - cqlName, - enclosingClass.addGenericTypeConstant(typeName)); - } - } - } else if (type instanceof PropertyType.SingleEntity) { - ClassName entityClass = ((PropertyType.SingleEntity) type).entityName; - // Other entity class: the CQL column is a mapped UDT: - // Dimensions propertyValue1; - // UdtValue udtValue1 = source.getUdtValue("dimensions"); - // propertyValue1 = udtValue1 == null ? null : dimensionsHelper.get(udtValue1); - String udtValueName = enclosingClass.getNameIndex().uniqueField("udtValue"); - if (mutable) { - getBuilder.beginControlFlow("if (!lenient || hasProperty(source, $L))", cqlName); - getBuilder.addStatement("$T $L", entityClass, propertyValueName); - } else { - getBuilder.addStatement("$T $L = null", entityClass, propertyValueName); - getBuilder.beginControlFlow("if (!lenient || hasProperty(source, $L))", cqlName); - } - getBuilder.addStatement( - "$T $L = source.getUdtValue($L)", UdtValue.class, udtValueName, cqlName); - - // Get underlying udt object and set it on return type - String childHelper = enclosingClass.addEntityHelperField(entityClass); - getBuilder.addStatement( - "$L = $L == null ? null : $L.get($L, lenient)", - propertyValueName, - udtValueName, - childHelper, - udtValueName); - - if (mutable) { - getBuilder.addStatement("$L.$L($L)", resultName, setterName, propertyValueName); - } - getBuilder.endControlFlow(); - } else { - // Collection of other entity class(es): the CQL column is a collection of mapped UDTs - // Build a copy of the value, decoding all UdtValue instances into entities on the fly. - // CollectionTypeT propertyValue1; - // RawCollectionTypeT rawCollection1 = source.get("column", GENERIC_TYPE); - // if (rawCollection1 == null) { - // propertyValue1 = null; - // } else { - // traverse rawCollection1 and convert all UdtValue into entity classes, recursing - // into nested collections if necessary - // } - if (mutable) { - getBuilder.beginControlFlow("if (!lenient || hasProperty(source, $L))", cqlName); - getBuilder.addStatement("$T $L", type.asTypeName(), propertyValueName); - } else { - getBuilder.addStatement("$T $L = null", type.asTypeName(), propertyValueName); - getBuilder.beginControlFlow("if (!lenient || hasProperty(source, $L))", cqlName); - } - - String rawCollectionName = enclosingClass.getNameIndex().uniqueField("rawCollection"); - TypeName rawCollectionType = type.asRawTypeName(); - getBuilder.addStatement( - "$T $L = source.get($L, $L)", - rawCollectionType, - rawCollectionName, - cqlName, - enclosingClass.addGenericTypeConstant(rawCollectionType)); - - getBuilder - .beginControlFlow("if ($L == null)", rawCollectionName) - .addStatement("$L = null", propertyValueName) - .nextControlFlow("else"); - convertUdtsIntoEntities(rawCollectionName, propertyValueName, type, getBuilder); - getBuilder.endControlFlow(); - - if (mutable) { - getBuilder.addStatement("$L.$L($L)", resultName, setterName, propertyValueName); - } - getBuilder.endControlFlow(); - } - } - - if (mutable) { - // We've already created an instance and filled the properties as we went - getBuilder.addStatement("return returnValue"); - } else { - // Assume an all-arg constructor exists, and call it with all the temporary variables - getBuilder.addCode("$[return new $T(", returnType); - for (int i = 0; i < propertyValueNames.size(); i++) { - getBuilder.addCode((i == 0 ? "\n$L" : ",\n$L"), propertyValueNames.get(i)); - } - getBuilder.addCode(")$];"); - } - return Optional.of(getBuilder.build()); - } - - /** - * Generates the code to convert a collection of UDT instances, for example a {@code Map} into a {@code Map}. - * - * @param rawObjectName the name of the local variable containing the value to convert. - * @param mappedObjectName the name of the local variable that will hold the converted value (it - * already exists). - * @param type the type of the value. - * @param getBuilder the method where the generated code will be appended. - */ - private void convertUdtsIntoEntities( - String rawObjectName, - String mappedObjectName, - PropertyType type, - MethodSpec.Builder getBuilder) { - - if (type instanceof PropertyType.SingleEntity) { - ClassName entityClass = ((PropertyType.SingleEntity) type).entityName; - String entityHelperName = enclosingClass.addEntityHelperField(entityClass); - getBuilder.addStatement( - "$L = $L.get($L, lenient)", mappedObjectName, entityHelperName, rawObjectName); - } else if (type instanceof PropertyType.EntityList) { - getBuilder.addStatement( - "$L = $T.newArrayListWithExpectedSize($L.size())", - mappedObjectName, - Lists.class, - rawObjectName); - PropertyType mappedElementType = ((PropertyType.EntityList) type).elementType; - TypeName rawElementType = mappedElementType.asRawTypeName(); - String rawElementName = enclosingClass.getNameIndex().uniqueField("rawElement"); - getBuilder.beginControlFlow("for ($T $L: $L)", rawElementType, rawElementName, rawObjectName); - String mappedElementName = enclosingClass.getNameIndex().uniqueField("mappedElement"); - getBuilder.addStatement("$T $L", mappedElementType.asTypeName(), mappedElementName); - convertUdtsIntoEntities(rawElementName, mappedElementName, mappedElementType, getBuilder); - getBuilder.addStatement("$L.add($L)", mappedObjectName, mappedElementName).endControlFlow(); - } else if (type instanceof PropertyType.EntitySet) { - getBuilder.addStatement( - "$L = $T.newLinkedHashSetWithExpectedSize($L.size())", - mappedObjectName, - Sets.class, - rawObjectName); - PropertyType mappedElementType = ((PropertyType.EntitySet) type).elementType; - TypeName rawElementType = mappedElementType.asRawTypeName(); - String rawElementName = enclosingClass.getNameIndex().uniqueField("rawElement"); - getBuilder.beginControlFlow("for ($T $L: $L)", rawElementType, rawElementName, rawObjectName); - String mappedElementName = enclosingClass.getNameIndex().uniqueField("mappedElement"); - getBuilder.addStatement("$T $L", mappedElementType.asTypeName(), mappedElementName); - convertUdtsIntoEntities(rawElementName, mappedElementName, mappedElementType, getBuilder); - getBuilder.addStatement("$L.add($L)", mappedObjectName, mappedElementName).endControlFlow(); - } else if (type instanceof PropertyType.EntityMap) { - getBuilder.addStatement( - "$L = $T.newLinkedHashMapWithExpectedSize($L.size())", - mappedObjectName, - Maps.class, - rawObjectName); - PropertyType mappedKeyType = ((PropertyType.EntityMap) type).keyType; - PropertyType mappedValueType = ((PropertyType.EntityMap) type).valueType; - String rawEntryName = enclosingClass.getNameIndex().uniqueField("rawEntry"); - getBuilder.beginControlFlow( - "for ($T $L: $L.entrySet())", - ParameterizedTypeName.get( - ClassName.get(Map.Entry.class), - mappedKeyType.asRawTypeName(), - mappedValueType.asRawTypeName()), - rawEntryName, - rawObjectName); - String rawKeyName = CodeBlock.of("$L.getKey()", rawEntryName).toString(); - String mappedKeyName; - if (mappedKeyType instanceof PropertyType.Simple) { - mappedKeyName = rawKeyName; // no conversion, use the instance as-is - } else { - mappedKeyName = enclosingClass.getNameIndex().uniqueField("mappedKey"); - getBuilder.addStatement("$T $L", mappedKeyType.asTypeName(), mappedKeyName); - convertUdtsIntoEntities(rawKeyName, mappedKeyName, mappedKeyType, getBuilder); - } - String rawValueName = CodeBlock.of("$L.getValue()", rawEntryName).toString(); - String mappedValueName; - if (mappedValueType instanceof PropertyType.Simple) { - mappedValueName = rawValueName; - } else { - mappedValueName = enclosingClass.getNameIndex().uniqueField("mappedValue"); - getBuilder.addStatement("$T $L", mappedValueType.asTypeName(), mappedValueName); - convertUdtsIntoEntities(rawValueName, mappedValueName, mappedValueType, getBuilder); - } - getBuilder - .addStatement("$L.put($L, $L)", mappedObjectName, mappedKeyName, mappedValueName) - .endControlFlow(); - } else { - throw new AssertionError("Unsupported type " + type.asTypeName()); - } - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityHelperInsertMethodGenerator.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityHelperInsertMethodGenerator.java deleted file mode 100644 index f795951201f..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityHelperInsertMethodGenerator.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.entity; - -import com.datastax.oss.driver.api.querybuilder.QueryBuilder; -import com.datastax.oss.driver.api.querybuilder.insert.InsertInto; -import com.datastax.oss.driver.api.querybuilder.insert.RegularInsert; -import com.datastax.oss.driver.internal.mapper.processor.MethodGenerator; -import com.squareup.javapoet.MethodSpec; -import java.util.Optional; -import javax.lang.model.element.Modifier; - -public class EntityHelperInsertMethodGenerator implements MethodGenerator { - - private final EntityDefinition entityDefinition; - - public EntityHelperInsertMethodGenerator(EntityDefinition entityDefinition) { - this.entityDefinition = entityDefinition; - } - - @Override - public Optional generate() { - MethodSpec.Builder insertBuilder = - MethodSpec.methodBuilder("insert") - .addAnnotation(Override.class) - .addModifiers(Modifier.PUBLIC) - .returns(RegularInsert.class) - .addStatement("throwIfKeyspaceMissing()") - .addStatement( - "$1T insertInto = (keyspaceId == null)\n" - + "? $2T.insertInto(tableId)\n" - + ": $2T.insertInto(keyspaceId, tableId)", - InsertInto.class, - QueryBuilder.class) - .addCode("$[return insertInto"); - - for (PropertyDefinition property : entityDefinition.getAllColumns()) { - insertBuilder.addCode( - "\n.value($1L, $2T.bindMarker($1L))", property.getCqlName(), QueryBuilder.class); - } - insertBuilder.addCode("$];\n"); - return Optional.of(insertBuilder.build()); - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityHelperSchemaValidationMethodGenerator.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityHelperSchemaValidationMethodGenerator.java deleted file mode 100644 index eed2e8bef9d..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityHelperSchemaValidationMethodGenerator.java +++ /dev/null @@ -1,381 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.entity; - -import static com.datastax.oss.driver.api.mapper.annotations.SchemaHint.TargetElement; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.api.mapper.annotations.SchemaHint; -import com.datastax.oss.driver.internal.mapper.processor.MethodGenerator; -import com.datastax.oss.driver.internal.mapper.processor.dao.LoggingGenerator; -import com.squareup.javapoet.CodeBlock; -import com.squareup.javapoet.MethodSpec; -import com.squareup.javapoet.TypeName; -import java.util.ArrayList; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.stream.Collectors; -import javax.lang.model.element.Modifier; -import javax.lang.model.element.TypeElement; - -public class EntityHelperSchemaValidationMethodGenerator implements MethodGenerator { - - private final EntityDefinition entityDefinition; - private TypeElement entityTypeElement; - private LoggingGenerator loggingGenerator; - private EntityHelperGenerator entityHelperGenerator; - - public EntityHelperSchemaValidationMethodGenerator( - EntityDefinition entityDefinition, - TypeElement entityTypeElement, - LoggingGenerator loggingGenerator, - EntityHelperGenerator entityHelperGenerator) { - this.entityDefinition = entityDefinition; - this.entityTypeElement = entityTypeElement; - this.loggingGenerator = loggingGenerator; - this.entityHelperGenerator = entityHelperGenerator; - } - - @Override - public Optional generate() { - MethodSpec.Builder methodBuilder = - MethodSpec.methodBuilder("validateEntityFields") - .addAnnotation(Override.class) - .addModifiers(Modifier.PUBLIC) - .returns(TypeName.VOID); - - Optional targetElement = - Optional.ofNullable(entityTypeElement.getAnnotation(SchemaHint.class)) - .map(SchemaHint::targetElement); - - if (targetElement.isPresent() && targetElement.get() == TargetElement.NONE) { - methodBuilder.addComment( - "Nothing to do, validation was disabled with @SchemaHint(targetElement = NONE)"); - } else { - // get keyspaceId from context, and if not present fallback to keyspace set on session - methodBuilder.addStatement( - "$1T keyspaceId = this.keyspaceId != null ? this.keyspaceId : context.getSession().getKeyspace().orElse(null)", - CqlIdentifier.class); - - methodBuilder.addStatement("String entityClassName = $S", entityDefinition.getClassName()); - generateKeyspaceNull(methodBuilder); - - generateKeyspaceNameWrong(methodBuilder); - - methodBuilder.addStatement( - "$1T<$2T> keyspace = context.getSession().getMetadata().getKeyspace(keyspaceId)", - Optional.class, - KeyspaceMetadata.class); - - // Generates expected names to be present in cql (table or udt) - List expectedCqlNames = - entityDefinition.getAllColumns().stream() - .map(PropertyDefinition::getCqlName) - .collect(Collectors.toList()); - methodBuilder.addStatement( - "$1T<$2T> expectedCqlNames = new $3T<>()", - List.class, - CqlIdentifier.class, - ArrayList.class); - for (CodeBlock expectedCqlName : expectedCqlNames) { - methodBuilder.addStatement( - "expectedCqlNames.add($1T.fromCql($2L))", CqlIdentifier.class, expectedCqlName); - } - - methodBuilder.addStatement( - "$1T<$2T> tableMetadata = keyspace.flatMap(v -> v.getTable(tableId))", - Optional.class, - TableMetadata.class); - - // Generated UserDefineTypes metadata - methodBuilder.addStatement( - "$1T<$2T> userDefinedType = keyspace.flatMap(v -> v.getUserDefinedType(tableId))", - Optional.class, - UserDefinedType.class); - - generateValidationChecks(methodBuilder, targetElement); - - logMissingMetadata(methodBuilder); - } - return Optional.of(methodBuilder.build()); - } - - private void logMissingMetadata(MethodSpec.Builder methodBuilder) { - methodBuilder.addComment( - "warn if there is not keyspace.table for defined entity - it means that table is missing, or schema it out of date."); - methodBuilder.beginControlFlow("else"); - loggingGenerator.warn( - methodBuilder, - "[{}] There is no ks.table or UDT: {}.{} for the entity class: {}, or metadata is out of date.", - CodeBlock.of("context.getSession().getName()"), - CodeBlock.of("keyspaceId"), - CodeBlock.of("tableId"), - CodeBlock.of("entityClassName")); - methodBuilder.endControlFlow(); - } - - // handle case where keyspace name is not present in metadata keyspaces - private void generateKeyspaceNameWrong(MethodSpec.Builder methodBuilder) { - methodBuilder.beginControlFlow( - "if(!keyspaceNamePresent(context.getSession().getMetadata().getKeyspaces(), keyspaceId))"); - loggingGenerator.warn( - methodBuilder, - "[{}] Unable to validate table: {} for the entity class: {} " - + "because the session metadata has no information about the keyspace: {}.", - CodeBlock.of("context.getSession().getName()"), - CodeBlock.of("tableId"), - CodeBlock.of("entityClassName"), - CodeBlock.of("keyspaceId")); - methodBuilder.addStatement("return"); - methodBuilder.endControlFlow(); - } - - // Handle case where keyspaceId = null. - // In such case we cannot infer and validate schema for table or udt - private void generateKeyspaceNull(MethodSpec.Builder methodBuilder) { - methodBuilder.beginControlFlow("if (keyspaceId == null)"); - loggingGenerator.warn( - methodBuilder, - "[{}] Unable to validate table: {} for the entity class: {} because the keyspace " - + "is unknown (the entity does not declare a default keyspace, and neither the " - + "session nor the DAO were created with a keyspace). The DAO will only work if it " - + "uses fully-qualified queries with @Query or @QueryProvider.", - CodeBlock.of("context.getSession().getName()"), - CodeBlock.of("tableId"), - CodeBlock.of("entityClassName")); - methodBuilder.addStatement("return"); - methodBuilder.endControlFlow(); - } - - private void generateValidationChecks( - MethodSpec.Builder methodBuilder, Optional targetElement) { - // if SchemaHint was not provided explicitly try to match TABLE, then fallback to UDT - if (!targetElement.isPresent()) { - validateColumnsInTable(methodBuilder); - validateColumnsInUdt(methodBuilder, true); - } - // if explicitly provided SchemaHint is TABLE, then generate only TABLE check - else if (targetElement.get().equals(TargetElement.TABLE)) { - validateColumnsInTable(methodBuilder); - } - // if explicitly provided SchemaHint is UDT, then generate only UDT check - else if (targetElement.get().equals(TargetElement.UDT)) { - validateColumnsInUdt(methodBuilder, false); - } - } - - private void validateColumnsInTable(MethodSpec.Builder methodBuilder) { - methodBuilder.beginControlFlow("if (tableMetadata.isPresent())"); - - generateMissingClusteringColumnsCheck(methodBuilder); - - generateMissingPKsCheck(methodBuilder); - - generateMissingColumnsCheck(methodBuilder); - - generateColumnsTypeCheck(methodBuilder); - - methodBuilder.endControlFlow(); - } - - private void generateColumnsTypeCheck(MethodSpec.Builder methodBuilder) { - methodBuilder.addComment("validation of types"); - generateExpectedTypesPerColumn(methodBuilder); - - methodBuilder.addStatement( - "$1T<$2T> missingTableTypes = findTypeMismatches(expectedTypesPerColumn, tableMetadata.get().getColumns(), context.getSession().getContext().getCodecRegistry())", - List.class, - String.class); - methodBuilder.addStatement( - "throwMissingTableTypesIfNotEmpty(missingTableTypes, keyspaceId, tableId, entityClassName)"); - } - - private void generateMissingColumnsCheck(MethodSpec.Builder methodBuilder) { - methodBuilder.addComment("validation of all columns"); - - methodBuilder.addStatement( - "$1T<$2T> missingTableCqlNames = findMissingCqlIdentifiers(expectedCqlNames, tableMetadata.get().getColumns().keySet())", - List.class, - CqlIdentifier.class); - - // Throw if there are any missingTableCqlNames - CodeBlock missingCqlColumnExceptionMessage = - CodeBlock.of( - "String.format(\"The CQL ks.table: %s.%s has missing columns: %s that are defined in the entity class: %s\", " - + "keyspaceId, tableId, missingTableCqlNames, entityClassName)"); - methodBuilder.beginControlFlow("if (!missingTableCqlNames.isEmpty())"); - methodBuilder.addStatement( - "throw new $1T($2L)", IllegalArgumentException.class, missingCqlColumnExceptionMessage); - methodBuilder.endControlFlow(); - } - - private void generateMissingPKsCheck(MethodSpec.Builder methodBuilder) { - methodBuilder.addComment("validation of missing PKs"); - List expectedCqlPKs = - entityDefinition.getPartitionKey().stream() - .map(PropertyDefinition::getCqlName) - .collect(Collectors.toList()); - - methodBuilder.addStatement( - "$1T<$2T> expectedCqlPKs = new $3T<>()", List.class, CqlIdentifier.class, ArrayList.class); - for (CodeBlock expectedCqlName : expectedCqlPKs) { - methodBuilder.addStatement( - "expectedCqlPKs.add($1T.fromCql($2L))", CqlIdentifier.class, expectedCqlName); - } - methodBuilder.addStatement( - "$1T<$2T> missingTablePksNames = findMissingColumns(expectedCqlPKs, tableMetadata.get().getPartitionKey())", - List.class, - CqlIdentifier.class); - - // throw if there are any missing PK columns - CodeBlock missingCqlColumnExceptionMessage = - CodeBlock.of( - "String.format(\"The CQL ks.table: %s.%s has missing Primary Key columns: %s that are defined in the entity class: %s\", " - + "keyspaceId, tableId, missingTablePksNames, entityClassName)"); - methodBuilder.beginControlFlow("if (!missingTablePksNames.isEmpty())"); - methodBuilder.addStatement( - "throw new $1T($2L)", IllegalArgumentException.class, missingCqlColumnExceptionMessage); - methodBuilder.endControlFlow(); - } - - private void generateMissingClusteringColumnsCheck(MethodSpec.Builder methodBuilder) { - List expectedCqlClusteringColumns = - entityDefinition.getClusteringColumns().stream() - .map(PropertyDefinition::getCqlName) - .collect(Collectors.toList()); - - if (!expectedCqlClusteringColumns.isEmpty()) { - methodBuilder.addComment("validation of missing Clustering Columns"); - methodBuilder.addStatement( - "$1T<$2T> expectedCqlClusteringColumns = new $3T<>()", - List.class, - CqlIdentifier.class, - ArrayList.class); - for (CodeBlock expectedCqlName : expectedCqlClusteringColumns) { - methodBuilder.addStatement( - "expectedCqlClusteringColumns.add($1T.fromCql($2L))", - CqlIdentifier.class, - expectedCqlName); - } - - methodBuilder.addStatement( - "$1T<$2T> missingTableClusteringColumnNames = findMissingColumns(expectedCqlClusteringColumns, tableMetadata.get().getClusteringColumns().keySet())", - List.class, - CqlIdentifier.class); - - // throw if there are any missing Clustering Columns columns - CodeBlock missingCqlColumnExceptionMessage = - CodeBlock.of( - "String.format(\"The CQL ks.table: %s.%s has missing Clustering columns: %s that are defined in the entity class: %s\", " - + "keyspaceId, tableId, missingTableClusteringColumnNames, entityClassName)"); - methodBuilder.beginControlFlow("if (!missingTableClusteringColumnNames.isEmpty())"); - methodBuilder.addStatement( - "throw new $1T($2L)", IllegalArgumentException.class, missingCqlColumnExceptionMessage); - methodBuilder.endControlFlow(); - } - } - - // Finds out missingTableCqlNames - columns that are present in Entity Mapping but NOT present in - // UDT table - private void validateColumnsInUdt(MethodSpec.Builder methodBuilder, boolean generateElse) { - if (generateElse) { - methodBuilder.beginControlFlow("else if (userDefinedType.isPresent())"); - } else { - methodBuilder.beginControlFlow("if (userDefinedType.isPresent())"); - } - - generateUdtMissingColumnsCheck(methodBuilder); - - generateUdtColumnsTypeCheck(methodBuilder); - - methodBuilder.endControlFlow(); - } - - private void generateUdtColumnsTypeCheck(MethodSpec.Builder methodBuilder) { - methodBuilder.addComment("validation of UDT types"); - generateExpectedTypesPerColumn(methodBuilder); - - methodBuilder.addStatement( - "$1T<$2T> expectedColumns = userDefinedType.get().getFieldNames()", - List.class, - CqlIdentifier.class); - methodBuilder.addStatement( - "$1T<$2T> expectedTypes = userDefinedType.get().getFieldTypes()", - List.class, - DataType.class); - - methodBuilder.addStatement( - "$1T<$2T> missingTableTypes = findTypeMismatches(expectedTypesPerColumn, expectedColumns, expectedTypes, context.getSession().getContext().getCodecRegistry())", - List.class, - String.class); - methodBuilder.addStatement( - "throwMissingUdtTypesIfNotEmpty(missingTableTypes, keyspaceId, tableId, entityClassName)"); - } - - private void generateUdtMissingColumnsCheck(MethodSpec.Builder methodBuilder) { - methodBuilder.addComment("validation of UDT columns"); - methodBuilder.addStatement( - "$1T<$2T> columns = userDefinedType.get().getFieldNames()", - List.class, - CqlIdentifier.class); - - methodBuilder.addStatement( - "$1T<$2T> missingTableCqlNames = findMissingCqlIdentifiers(expectedCqlNames, columns)", - List.class, - CqlIdentifier.class); - - // Throw if there are any missingTableCqlNames - CodeBlock missingCqlUdtExceptionMessage = - CodeBlock.of( - "String.format(\"The CQL ks.udt: %s.%s has missing columns: %s that are defined in the entity class: %s\", " - + "keyspaceId, tableId, missingTableCqlNames, entityClassName)"); - methodBuilder.beginControlFlow("if (!missingTableCqlNames.isEmpty())"); - methodBuilder.addStatement( - "throw new $1T($2L)", IllegalArgumentException.class, missingCqlUdtExceptionMessage); - methodBuilder.endControlFlow(); - } - - private void generateExpectedTypesPerColumn(MethodSpec.Builder methodBuilder) { - methodBuilder.addStatement( - "$1T<$2T, $3T> expectedTypesPerColumn = new $4T<>()", - Map.class, - CqlIdentifier.class, - GenericType.class, - LinkedHashMap.class); - - Map expectedTypesPerColumn = - entityDefinition.getAllColumns().stream() - .collect( - Collectors.toMap(PropertyDefinition::getCqlName, v -> v.getType().asRawTypeName())); - - for (Map.Entry expected : expectedTypesPerColumn.entrySet()) { - methodBuilder.addStatement( - "expectedTypesPerColumn.put($1T.fromCql($2L), $3L)", - CqlIdentifier.class, - expected.getKey(), - entityHelperGenerator.addGenericTypeConstant(expected.getValue().box())); - } - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityHelperSelectByPrimaryKeyMethodGenerator.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityHelperSelectByPrimaryKeyMethodGenerator.java deleted file mode 100644 index 4e614177637..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityHelperSelectByPrimaryKeyMethodGenerator.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.entity; - -import com.datastax.oss.driver.api.querybuilder.select.Select; -import com.datastax.oss.driver.internal.mapper.processor.MethodGenerator; -import com.squareup.javapoet.MethodSpec; -import java.util.Optional; -import javax.lang.model.element.Modifier; - -public class EntityHelperSelectByPrimaryKeyMethodGenerator implements MethodGenerator { - - @Override - public Optional generate() { - MethodSpec.Builder selectByPrimaryKeyBuilder = - MethodSpec.methodBuilder("selectByPrimaryKey") - .addAnnotation(Override.class) - .addModifiers(Modifier.PUBLIC) - .returns(Select.class) - .addStatement("return selectByPrimaryKeyParts(primaryKeys.size())"); - - return Optional.of(selectByPrimaryKeyBuilder.build()); - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityHelperSelectByPrimaryKeyPartsMethodGenerator.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityHelperSelectByPrimaryKeyPartsMethodGenerator.java deleted file mode 100644 index 1b45517f9df..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityHelperSelectByPrimaryKeyPartsMethodGenerator.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.entity; - -import com.datastax.oss.driver.api.querybuilder.QueryBuilder; -import com.datastax.oss.driver.api.querybuilder.select.Select; -import com.datastax.oss.driver.internal.mapper.processor.MethodGenerator; -import com.squareup.javapoet.MethodSpec; -import com.squareup.javapoet.TypeName; -import java.util.Optional; -import javax.lang.model.element.Modifier; - -public class EntityHelperSelectByPrimaryKeyPartsMethodGenerator implements MethodGenerator { - - @Override - public Optional generate() { - MethodSpec.Builder selectByPrimaryKeyPartsBuilder = - MethodSpec.methodBuilder("selectByPrimaryKeyParts") - .addModifiers(Modifier.PUBLIC) - .addParameter(TypeName.INT, "parameterCount") - .returns(Select.class); - - selectByPrimaryKeyPartsBuilder.addStatement("$1T select = selectStart()", Select.class); - selectByPrimaryKeyPartsBuilder.beginControlFlow( - "for (int i = 0; i < parameterCount && i < " + "primaryKeys.size(); i++)"); - selectByPrimaryKeyPartsBuilder.addStatement( - "$1T columnName = primaryKeys.get(i)", String.class); - selectByPrimaryKeyPartsBuilder.addStatement( - "select = select.whereColumn(columnName).isEqualTo($1T.bindMarker(columnName))", - QueryBuilder.class); - selectByPrimaryKeyPartsBuilder.endControlFlow(); - selectByPrimaryKeyPartsBuilder.addStatement("return select"); - - return Optional.of(selectByPrimaryKeyPartsBuilder.build()); - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityHelperSelectStartMethodGenerator.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityHelperSelectStartMethodGenerator.java deleted file mode 100644 index 91848a13558..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityHelperSelectStartMethodGenerator.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.entity; - -import com.datastax.oss.driver.api.querybuilder.QueryBuilder; -import com.datastax.oss.driver.api.querybuilder.select.Select; -import com.datastax.oss.driver.api.querybuilder.select.SelectFrom; -import com.datastax.oss.driver.internal.mapper.processor.MethodGenerator; -import com.squareup.javapoet.MethodSpec; -import java.util.Optional; -import javax.lang.model.element.Modifier; - -public class EntityHelperSelectStartMethodGenerator implements MethodGenerator { - - private final EntityDefinition entityDefinition; - - public EntityHelperSelectStartMethodGenerator(EntityDefinition entityDefinition) { - this.entityDefinition = entityDefinition; - } - - @Override - public Optional generate() { - MethodSpec.Builder selectStartBuilder = - MethodSpec.methodBuilder("selectStart") - .addAnnotation(Override.class) - .addModifiers(Modifier.PUBLIC) - .returns(Select.class) - .addStatement("throwIfKeyspaceMissing()") - .addStatement( - "$1T selectFrom = (keyspaceId == null)\n" - + "? $2T.selectFrom(tableId)\n" - + ": $2T.selectFrom(keyspaceId, tableId)", - SelectFrom.class, - QueryBuilder.class) - .addCode("$[return selectFrom"); - - for (PropertyDefinition property : entityDefinition.getAllColumns()) { - selectStartBuilder.addCode("\n.column($L)", property.getSelector()); - } - for (PropertyDefinition property : entityDefinition.getComputedValues()) { - selectStartBuilder.addCode( - "\n.raw($L).as($L)", property.getSelector(), property.getCqlName()); - } - selectStartBuilder.addCode("$];\n"); - return Optional.of(selectStartBuilder.build()); - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityHelperSetMethodGenerator.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityHelperSetMethodGenerator.java deleted file mode 100644 index 841d4be34f9..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityHelperSetMethodGenerator.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.entity; - -import com.datastax.oss.driver.api.core.data.SettableByName; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import com.datastax.oss.driver.internal.mapper.processor.MethodGenerator; -import com.datastax.oss.driver.internal.mapper.processor.util.generation.BindableHandlingSharedCode; -import com.datastax.oss.driver.internal.mapper.processor.util.generation.GeneratedCodePatterns; -import com.squareup.javapoet.ClassName; -import com.squareup.javapoet.CodeBlock; -import com.squareup.javapoet.MethodSpec; -import com.squareup.javapoet.ParameterSpec; -import com.squareup.javapoet.ParameterizedTypeName; -import com.squareup.javapoet.TypeName; -import com.squareup.javapoet.TypeVariableName; -import java.util.Optional; -import javax.lang.model.element.Modifier; - -public class EntityHelperSetMethodGenerator implements MethodGenerator { - - private final EntityDefinition entityDefinition; - private final BindableHandlingSharedCode enclosingClass; - - public EntityHelperSetMethodGenerator( - EntityDefinition entityDefinition, BindableHandlingSharedCode enclosingClass) { - this.entityDefinition = entityDefinition; - this.enclosingClass = enclosingClass; - } - - @Override - public Optional generate() { - - // The method's type variable: > - TypeVariableName settableT = TypeVariableName.get("SettableT"); - settableT = - settableT.withBounds( - ParameterizedTypeName.get(ClassName.get(SettableByName.class), settableT)); - - MethodSpec.Builder injectBuilder = - MethodSpec.methodBuilder("set") - .addAnnotation(Override.class) - .addModifiers(Modifier.PUBLIC) - .addTypeVariable(settableT) - .addParameter(ParameterSpec.builder(entityDefinition.getClassName(), "entity").build()) - .addParameter(ParameterSpec.builder(settableT, "target").build()) - .addParameter( - ParameterSpec.builder(NullSavingStrategy.class, "nullSavingStrategy").build()) - .addParameter(ParameterSpec.builder(TypeName.BOOLEAN, "lenient").build()) - .returns(settableT); - - CodeBlock.Builder injectBodyBuilder = CodeBlock.builder(); - for (PropertyDefinition property : entityDefinition.getAllColumns()) { - - injectBodyBuilder.beginControlFlow( - "if (!lenient || hasProperty(target, $L))", property.getCqlName()); - - GeneratedCodePatterns.setValue( - property.getCqlName(), - property.getType(), - CodeBlock.of("entity.$L()", property.getGetterName()), - "target", - injectBodyBuilder, - enclosingClass, - true, - true); - - injectBodyBuilder.endControlFlow(); - } - injectBodyBuilder.addStatement("return target"); - return Optional.of(injectBuilder.addCode(injectBodyBuilder.build()).build()); - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityHelperUpdateByPrimaryKeyMethodGenerator.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityHelperUpdateByPrimaryKeyMethodGenerator.java deleted file mode 100644 index e4cc2978c76..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityHelperUpdateByPrimaryKeyMethodGenerator.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.entity; - -import com.datastax.oss.driver.api.querybuilder.QueryBuilder; -import com.datastax.oss.driver.api.querybuilder.relation.Relation; -import com.datastax.oss.driver.internal.mapper.processor.MethodGenerator; -import com.datastax.oss.driver.internal.querybuilder.update.DefaultUpdate; -import com.squareup.javapoet.MethodSpec; -import java.util.Optional; -import javax.lang.model.element.Modifier; - -public class EntityHelperUpdateByPrimaryKeyMethodGenerator implements MethodGenerator { - - private final EntityDefinition entityDefinition; - - EntityHelperUpdateByPrimaryKeyMethodGenerator(EntityDefinition entityDefinition) { - this.entityDefinition = entityDefinition; - } - - @Override - public Optional generate() { - MethodSpec.Builder methodBuilder = - MethodSpec.methodBuilder("updateByPrimaryKey") - .addAnnotation(Override.class) - .addModifiers(Modifier.PUBLIC) - .returns(DefaultUpdate.class) - .addCode("$[return (($T)updateStart()", DefaultUpdate.class); - - for (PropertyDefinition property : entityDefinition.getPrimaryKey()) { - methodBuilder.addCode( - "\n.where($1T.column($2L).isEqualTo($3T.bindMarker($2L)))", - Relation.class, - property.getCqlName(), - QueryBuilder.class); - } - - methodBuilder.addCode(")"); - methodBuilder.addCode("$];\n"); - return Optional.of(methodBuilder.build()); - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityHelperUpdateStartMethodGenerator.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityHelperUpdateStartMethodGenerator.java deleted file mode 100644 index f471080f843..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityHelperUpdateStartMethodGenerator.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.entity; - -import com.datastax.oss.driver.api.mapper.MapperException; -import com.datastax.oss.driver.api.mapper.annotations.Update; -import com.datastax.oss.driver.api.querybuilder.QueryBuilder; -import com.datastax.oss.driver.api.querybuilder.update.UpdateStart; -import com.datastax.oss.driver.internal.mapper.processor.MethodGenerator; -import com.datastax.oss.driver.internal.querybuilder.update.DefaultUpdate; -import com.squareup.javapoet.MethodSpec; -import java.util.Optional; -import javax.lang.model.element.Modifier; - -public class EntityHelperUpdateStartMethodGenerator implements MethodGenerator { - - private final EntityDefinition entityDefinition; - - EntityHelperUpdateStartMethodGenerator(EntityDefinition entityDefinition) { - this.entityDefinition = entityDefinition; - } - - @Override - public Optional generate() { - MethodSpec.Builder updateBuilder = - MethodSpec.methodBuilder("updateStart") - .addAnnotation(Override.class) - .addModifiers(Modifier.PUBLIC) - .returns(DefaultUpdate.class); - - if (!entityDefinition.getRegularColumns().iterator().hasNext()) { - updateBuilder.addStatement( - "throw new $T($S)", - MapperException.class, - String.format( - "Entity %s does not have any non PK columns. %s is not possible", - entityDefinition.getClassName().simpleName(), Update.class.getSimpleName())); - } else { - updateBuilder - .addStatement("throwIfKeyspaceMissing()") - .addStatement( - "$1T update = (keyspaceId == null)\n" - + "? $2T.update(tableId)\n" - + ": $2T.update(keyspaceId, tableId)", - UpdateStart.class, - QueryBuilder.class) - .addCode("$[return (($1T)update", DefaultUpdate.class); - - for (PropertyDefinition property : entityDefinition.getRegularColumns()) { - // we cannot use getAllColumns because update cannot SET for PKs - updateBuilder.addCode( - "\n.setColumn($1L, $2T.bindMarker($1L))", property.getCqlName(), QueryBuilder.class); - } - updateBuilder.addCode(")"); - updateBuilder.addCode("$];\n"); - } - return Optional.of(updateBuilder.build()); - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/PropertyDefinition.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/PropertyDefinition.java deleted file mode 100644 index 0ae3559301f..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/entity/PropertyDefinition.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.entity; - -import com.datastax.oss.driver.api.core.data.GettableByName; -import com.datastax.oss.driver.api.core.data.SettableByName; -import com.datastax.oss.driver.api.mapper.annotations.Entity; -import com.datastax.oss.driver.api.mapper.annotations.PropertyStrategy; -import com.datastax.oss.driver.internal.mapper.processor.util.generation.PropertyType; -import com.squareup.javapoet.CodeBlock; - -/** - * Defines a property belonging to an {@link Entity}, how it should be used in forming a CQL query, - * and how to extract and set the value of the property on the {@link Entity}. - */ -public interface PropertyDefinition { - - /** - * @return the name of the property, in the JavaBeans sense. In other words this is {@link - * #getGetterName()} minus the "get" prefix and decapitalized. - */ - String getJavaName(); - - /** - * @return A Java snippet that produces the corresponding expression in a SELECT - * statement, for example: - *
      - *
    • "id" in selectFrom.column("id") for a regular column. - *
    • "writetime(v)" in selectFrom.raw("writetime(v)") for a - * computed value. - *
    - */ - CodeBlock getSelector(); - - /** - * @return A Java snippet that produces the name of the property in a {@link GettableByName} or - * {@link SettableByName}, for example "id" in row.get("id"). - */ - CodeBlock getCqlName(); - - /** - * @return The name of the "get" method associated with this property used to retrieve the value - * of the property from the entity. - */ - String getGetterName(); - - /** - * @return The name of the "set" method associated with this property used to update the value of - * the property on the entity, or {@code null} if the entity was marked as not {@link - * PropertyStrategy#mutable()}. - */ - String getSetterName(); - - /** - * @return The {@link PropertyType} of this definition, which dictates how to interact with this - * property. - */ - PropertyType getType(); -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/mapper/MapperBuilderGenerator.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/mapper/MapperBuilderGenerator.java deleted file mode 100644 index a5a2243b74f..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/mapper/MapperBuilderGenerator.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.mapper; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.mapper.MapperBuilder; -import com.datastax.oss.driver.internal.mapper.DefaultMapperContext; -import com.datastax.oss.driver.internal.mapper.processor.GeneratedNames; -import com.datastax.oss.driver.internal.mapper.processor.ProcessorContext; -import com.datastax.oss.driver.internal.mapper.processor.SingleFileCodeGenerator; -import com.squareup.javapoet.AnnotationSpec; -import com.squareup.javapoet.ClassName; -import com.squareup.javapoet.JavaFile; -import com.squareup.javapoet.MethodSpec; -import com.squareup.javapoet.ParameterizedTypeName; -import com.squareup.javapoet.TypeSpec; -import javax.lang.model.element.Modifier; -import javax.lang.model.element.TypeElement; - -public class MapperBuilderGenerator extends SingleFileCodeGenerator { - - private final ClassName builderName; - private final TypeElement interfaceElement; - - public MapperBuilderGenerator(TypeElement interfaceElement, ProcessorContext context) { - super(context); - this.builderName = GeneratedNames.mapperBuilder(interfaceElement); - this.interfaceElement = interfaceElement; - } - - @Override - protected ClassName getPrincipalTypeName() { - return builderName; - } - - protected Class getSessionClass() { - return CqlSession.class; - } - - @Override - protected JavaFile.Builder getContents() { - TypeSpec.Builder classContents = - TypeSpec.classBuilder(builderName) - .superclass( - ParameterizedTypeName.get( - ClassName.get(MapperBuilder.class), ClassName.get(interfaceElement))) - .addJavadoc( - "Builds an instance of {@link $T} wrapping a driver {@link $T}.", - interfaceElement, - getSessionClass()) - .addJavadoc(JAVADOC_PARAGRAPH_SEPARATOR) - .addJavadoc(JAVADOC_GENERATED_WARNING) - .addAnnotation( - AnnotationSpec.builder(SuppressWarnings.class) - .addMember("value", "\"all\"") - .build()) - .addModifiers(Modifier.PUBLIC) - .addMethod( - MethodSpec.constructorBuilder() - .addModifiers(Modifier.PUBLIC) - .addParameter(getSessionClass(), "session") - .addStatement("super(session)") - .build()) - .addMethod( - MethodSpec.methodBuilder("build") - .addModifiers(Modifier.PUBLIC) - .addAnnotation(Override.class) - .returns(ClassName.get(interfaceElement)) - .addStatement( - "$1T context = new $1T(session, defaultKeyspaceId, " - + "defaultExecutionProfileName, defaultExecutionProfile, customState)", - DefaultMapperContext.class) - .addStatement( - "return new $T(context)", - GeneratedNames.mapperImplementation(interfaceElement)) - .build()); - return JavaFile.builder(builderName.packageName(), classContents.build()); - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/mapper/MapperDaoFactoryMethodGenerator.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/mapper/MapperDaoFactoryMethodGenerator.java deleted file mode 100644 index 7ad5f018f5f..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/mapper/MapperDaoFactoryMethodGenerator.java +++ /dev/null @@ -1,258 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.mapper; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; -import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; -import com.datastax.oss.driver.api.mapper.annotations.DaoProfile; -import com.datastax.oss.driver.api.mapper.annotations.DaoTable; -import com.datastax.oss.driver.api.mapper.annotations.Mapper; -import com.datastax.oss.driver.internal.mapper.DaoCacheKey; -import com.datastax.oss.driver.internal.mapper.processor.GeneratedNames; -import com.datastax.oss.driver.internal.mapper.processor.MethodGenerator; -import com.datastax.oss.driver.internal.mapper.processor.ProcessorContext; -import com.datastax.oss.driver.internal.mapper.processor.util.generation.GeneratedCodePatterns; -import com.squareup.javapoet.ClassName; -import com.squareup.javapoet.MethodSpec; -import com.squareup.javapoet.TypeName; -import java.util.Optional; -import javax.lang.model.element.Element; -import javax.lang.model.element.ExecutableElement; -import javax.lang.model.element.TypeElement; -import javax.lang.model.element.VariableElement; -import javax.lang.model.type.DeclaredType; -import javax.lang.model.type.TypeKind; -import javax.lang.model.type.TypeMirror; - -/** - * Generates the implementation of a DAO-producing method in a {@link Mapper}-annotated interface. - */ -public class MapperDaoFactoryMethodGenerator implements MethodGenerator { - - private final ExecutableElement methodElement; - private final MapperImplementationSharedCode enclosingClass; - private final ProcessorContext context; - - public MapperDaoFactoryMethodGenerator( - ExecutableElement methodElement, - MapperImplementationSharedCode enclosingClass, - ProcessorContext context) { - this.methodElement = methodElement; - this.enclosingClass = enclosingClass; - this.context = context; - } - - @Override - public Optional generate() { - - // Validate the return type, which tells us what DAO to build, and whether the method should be - // async. - ClassName daoImplementationName = null; - boolean isAsync = false; - TypeMirror returnTypeMirror = methodElement.getReturnType(); - if (returnTypeMirror.getKind() == TypeKind.DECLARED) { - DeclaredType declaredReturnType = (DeclaredType) returnTypeMirror; - if (declaredReturnType.getTypeArguments().isEmpty()) { - Element returnTypeElement = declaredReturnType.asElement(); - if (returnTypeElement.getAnnotation(Dao.class) != null) { - daoImplementationName = - GeneratedNames.daoImplementation(((TypeElement) returnTypeElement)); - } - } else if (context.getClassUtils().isFuture(declaredReturnType)) { - TypeMirror typeArgument = declaredReturnType.getTypeArguments().get(0); - if (typeArgument.getKind() == TypeKind.DECLARED) { - Element typeArgumentElement = ((DeclaredType) typeArgument).asElement(); - if (typeArgumentElement.getAnnotation(Dao.class) != null) { - daoImplementationName = - GeneratedNames.daoImplementation(((TypeElement) typeArgumentElement)); - isAsync = true; - } - } - } - } - if (daoImplementationName == null) { - context - .getMessager() - .error( - methodElement, - "Invalid return type: %s methods must return a %s-annotated interface, " - + "or future thereof", - DaoFactory.class.getSimpleName(), - Dao.class.getSimpleName()); - return Optional.empty(); - } - - // Validate the arguments - String keyspaceArgumentName = null; - String tableArgumentName = null; - String profileArgumentName = null; - boolean profileIsClass = false; - - for (VariableElement parameterElement : methodElement.getParameters()) { - if (parameterElement.getAnnotation(DaoKeyspace.class) != null) { - keyspaceArgumentName = - validateKeyspaceOrTableParameter( - parameterElement, keyspaceArgumentName, DaoKeyspace.class, context); - if (keyspaceArgumentName == null) { - return Optional.empty(); - } - } else if (parameterElement.getAnnotation(DaoTable.class) != null) { - tableArgumentName = - validateKeyspaceOrTableParameter( - parameterElement, tableArgumentName, DaoTable.class, context); - if (tableArgumentName == null) { - return Optional.empty(); - } - } else if (parameterElement.getAnnotation(DaoProfile.class) != null) { - - profileArgumentName = - validateExecutionProfile(parameterElement, profileArgumentName, context); - profileIsClass = - context.getClassUtils().isSame(parameterElement.asType(), DriverExecutionProfile.class); - if (profileArgumentName == null) { - return Optional.empty(); - } - } else { - context - .getMessager() - .error( - methodElement, - "Invalid parameter annotations: " - + "%s method parameters must be annotated with @%s, @%s or @%s", - DaoFactory.class.getSimpleName(), - DaoKeyspace.class.getSimpleName(), - DaoTable.class.getSimpleName(), - DaoProfile.class.getSimpleName()); - return Optional.empty(); - } - } - boolean isCachedByMethodArguments = - (keyspaceArgumentName != null || tableArgumentName != null || profileArgumentName != null); - - TypeName returnTypeName = ClassName.get(methodElement.getReturnType()); - String suggestedFieldName = methodElement.getSimpleName() + "Cache"; - String fieldName = - isCachedByMethodArguments - ? enclosingClass.addDaoMapField(suggestedFieldName, returnTypeName) - : enclosingClass.addDaoSimpleField( - suggestedFieldName, returnTypeName, daoImplementationName, isAsync); - - MethodSpec.Builder overridingMethodBuilder = GeneratedCodePatterns.override(methodElement); - - if (isCachedByMethodArguments) { - // DaoCacheKey key = new DaoCacheKey(, , , ) - // where ,
    is either the name of the parameter or "(CqlIdentifier)null" - overridingMethodBuilder.addCode("$1T key = new $1T(", DaoCacheKey.class); - if (keyspaceArgumentName == null) { - overridingMethodBuilder.addCode("($T)null", CqlIdentifier.class); - } else { - overridingMethodBuilder.addCode("$L", keyspaceArgumentName); - } - overridingMethodBuilder.addCode(", "); - if (tableArgumentName == null) { - overridingMethodBuilder.addCode("($T)null", CqlIdentifier.class); - } else { - overridingMethodBuilder.addCode("$L", tableArgumentName); - } - overridingMethodBuilder.addCode(", "); - if (profileArgumentName == null) { - overridingMethodBuilder.addCode("null, null);\n"); - } else { - if (profileIsClass) { - overridingMethodBuilder.addCode("null, $L);\n", profileArgumentName); - } else { - overridingMethodBuilder.addCode("$L, null);\n", profileArgumentName); - } - } - - overridingMethodBuilder.addStatement( - "return $L.computeIfAbsent(key, " - + "k -> $T.$L(context.withDaoParameters(k.getKeyspaceId(), k.getTableId(), " - + "k.getExecutionProfileName(), k.getExecutionProfile())))", - fieldName, - daoImplementationName, - isAsync ? "initAsync" : "init"); - } else { - overridingMethodBuilder.addStatement("return $L.get()", fieldName); - } - return Optional.of(overridingMethodBuilder.build()); - } - - private String validateKeyspaceOrTableParameter( - VariableElement candidate, String previous, Class annotation, ProcessorContext context) { - if (!isSingleAnnotation(candidate, previous, annotation, context)) { - return null; - } - TypeMirror type = candidate.asType(); - if (!context.getClassUtils().isSame(type, String.class) - && !context.getClassUtils().isSame(type, CqlIdentifier.class)) { - context - .getMessager() - .error( - candidate, - "Invalid parameter type: @%s-annotated parameter of %s methods must be of type %s or %s", - annotation.getSimpleName(), - DaoFactory.class.getSimpleName(), - String.class.getSimpleName(), - CqlIdentifier.class.getSimpleName()); - return null; - } - return candidate.getSimpleName().toString(); - } - - private String validateExecutionProfile( - VariableElement candidate, String previous, ProcessorContext context) { - if (!isSingleAnnotation(candidate, previous, DaoProfile.class, context)) { - return null; - } - TypeMirror type = candidate.asType(); - if (!context.getClassUtils().isSame(type, String.class) - && !context.getClassUtils().isSame(type, DriverExecutionProfile.class)) { - context - .getMessager() - .error( - candidate, - "Invalid parameter type: @%s-annotated parameter of %s methods must be of type %s or %s ", - DaoProfile.class.getSimpleName(), - DaoFactory.class.getSimpleName(), - String.class.getSimpleName(), - DriverExecutionProfile.class.getSimpleName()); - return null; - } - return candidate.getSimpleName().toString(); - } - - private boolean isSingleAnnotation( - VariableElement candidate, String previous, Class annotation, ProcessorContext context) { - if (previous != null) { - context - .getMessager() - .error( - candidate, - "Invalid parameter annotations: " - + "only one %s method parameter can be annotated with @%s", - DaoFactory.class.getSimpleName(), - annotation.getSimpleName()); - return false; - } - return true; - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/mapper/MapperGenerator.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/mapper/MapperGenerator.java deleted file mode 100644 index d9d3632a16b..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/mapper/MapperGenerator.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.mapper; - -import com.datastax.oss.driver.api.mapper.annotations.Mapper; -import com.datastax.oss.driver.internal.mapper.processor.CodeGenerator; -import com.datastax.oss.driver.internal.mapper.processor.CodeGeneratorFactory; -import com.datastax.oss.driver.internal.mapper.processor.ProcessorContext; -import javax.lang.model.element.TypeElement; - -/** Entry point to generate all the types related to a {@link Mapper}-annotated interface. */ -public class MapperGenerator implements CodeGenerator { - - private final TypeElement interfaceElement; - private final ProcessorContext context; - - public MapperGenerator(TypeElement interfaceElement, ProcessorContext context) { - this.interfaceElement = interfaceElement; - this.context = context; - } - - @Override - public void generate() { - CodeGeneratorFactory factory = context.getCodeGeneratorFactory(); - factory.newMapperBuilder(interfaceElement).generate(); - factory.newMapperImplementation(interfaceElement).generate(); - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/mapper/MapperImplementationGenerator.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/mapper/MapperImplementationGenerator.java deleted file mode 100644 index 3300335fbf9..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/mapper/MapperImplementationGenerator.java +++ /dev/null @@ -1,189 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.mapper; - -import com.datastax.oss.driver.internal.core.util.concurrent.LazyReference; -import com.datastax.oss.driver.internal.mapper.DaoCacheKey; -import com.datastax.oss.driver.internal.mapper.DefaultMapperContext; -import com.datastax.oss.driver.internal.mapper.processor.GeneratedNames; -import com.datastax.oss.driver.internal.mapper.processor.MethodGenerator; -import com.datastax.oss.driver.internal.mapper.processor.ProcessorContext; -import com.datastax.oss.driver.internal.mapper.processor.SingleFileCodeGenerator; -import com.datastax.oss.driver.internal.mapper.processor.util.NameIndex; -import com.datastax.oss.driver.internal.mapper.processor.util.generation.GeneratedCodePatterns; -import com.squareup.javapoet.AnnotationSpec; -import com.squareup.javapoet.ClassName; -import com.squareup.javapoet.FieldSpec; -import com.squareup.javapoet.JavaFile; -import com.squareup.javapoet.MethodSpec; -import com.squareup.javapoet.ParameterizedTypeName; -import com.squareup.javapoet.TypeName; -import com.squareup.javapoet.TypeSpec; -import java.util.ArrayList; -import java.util.List; -import java.util.Optional; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import javax.lang.model.element.Element; -import javax.lang.model.element.ElementKind; -import javax.lang.model.element.ExecutableElement; -import javax.lang.model.element.Modifier; -import javax.lang.model.element.TypeElement; - -public class MapperImplementationGenerator extends SingleFileCodeGenerator - implements MapperImplementationSharedCode { - - private final TypeElement interfaceElement; - private final ClassName className; - private final NameIndex nameIndex = new NameIndex(); - private final List daoSimpleFields = new ArrayList<>(); - private final List daoMapFields = new ArrayList<>(); - - public MapperImplementationGenerator(TypeElement interfaceElement, ProcessorContext context) { - super(context); - this.interfaceElement = interfaceElement; - className = GeneratedNames.mapperImplementation(interfaceElement); - } - - @Override - public String addDaoSimpleField( - String suggestedFieldName, - TypeName fieldType, - TypeName daoImplementationType, - boolean isAsync) { - String fieldName = nameIndex.uniqueField(suggestedFieldName); - daoSimpleFields.add(new DaoSimpleField(fieldName, fieldType, daoImplementationType, isAsync)); - return fieldName; - } - - @Override - public String addDaoMapField(String suggestedFieldName, TypeName mapValueType) { - String fieldName = nameIndex.uniqueField(suggestedFieldName); - daoMapFields.add(new DaoMapField(fieldName, mapValueType)); - return fieldName; - } - - @Override - protected ClassName getPrincipalTypeName() { - return className; - } - - @Override - protected JavaFile.Builder getContents() { - - TypeSpec.Builder classContents = - TypeSpec.classBuilder(className) - .addJavadoc( - "Do not instantiate this class directly, use {@link $T} instead.", - GeneratedNames.mapperBuilder(interfaceElement)) - .addJavadoc(JAVADOC_PARAGRAPH_SEPARATOR) - .addJavadoc(JAVADOC_GENERATED_WARNING) - .addAnnotation( - AnnotationSpec.builder(SuppressWarnings.class) - .addMember("value", "\"all\"") - .build()) - .addModifiers(Modifier.PUBLIC) - .addSuperinterface(ClassName.get(interfaceElement)); - - for (Element child : interfaceElement.getEnclosedElements()) { - if (child.getKind() == ElementKind.METHOD) { - ExecutableElement methodElement = (ExecutableElement) child; - Set modifiers = methodElement.getModifiers(); - if (!modifiers.contains(Modifier.STATIC) && !modifiers.contains(Modifier.DEFAULT)) { - Optional maybeGenerator = - context - .getCodeGeneratorFactory() - .newMapperImplementationMethod(methodElement, interfaceElement, this); - if (!maybeGenerator.isPresent()) { - context - .getMessager() - .error( - methodElement, - "Unrecognized method signature: no implementation will be generated"); - } else { - maybeGenerator.flatMap(MethodGenerator::generate).ifPresent(classContents::addMethod); - } - } - } - } - - MethodSpec.Builder constructorContents = - MethodSpec.constructorBuilder().addModifiers(Modifier.PUBLIC); - - GeneratedCodePatterns.addFinalFieldAndConstructorArgument( - ClassName.get(DefaultMapperContext.class), "context", classContents, constructorContents); - - // Add all the fields that were requested by DAO method generators: - for (DaoSimpleField field : daoSimpleFields) { - classContents.addField( - FieldSpec.builder( - ParameterizedTypeName.get(ClassName.get(LazyReference.class), field.type), - field.name, - Modifier.PRIVATE, - Modifier.FINAL) - .build()); - constructorContents.addStatement( - "this.$1L = new $2T<>(() -> $3T.$4L(context))", - field.name, - LazyReference.class, - field.daoImplementationType, - field.isAsync ? "initAsync" : "init"); - } - for (DaoMapField field : daoMapFields) { - classContents.addField( - FieldSpec.builder( - ParameterizedTypeName.get( - ClassName.get(ConcurrentMap.class), - TypeName.get(DaoCacheKey.class), - field.mapValueType), - field.name, - Modifier.PRIVATE, - Modifier.FINAL) - .initializer("new $T<>()", ConcurrentHashMap.class) - .build()); - } - classContents.addMethod(constructorContents.build()); - - return JavaFile.builder(className.packageName(), classContents.build()); - } - - private static class DaoSimpleField { - final String name; - final TypeName type; - final TypeName daoImplementationType; - final boolean isAsync; - - DaoSimpleField(String name, TypeName type, TypeName daoImplementationType, boolean isAsync) { - this.name = name; - this.type = type; - this.daoImplementationType = daoImplementationType; - this.isAsync = isAsync; - } - } - - private static class DaoMapField { - final String name; - final TypeName mapValueType; - - DaoMapField(String name, TypeName mapValueType) { - this.name = name; - this.mapValueType = mapValueType; - } - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/mapper/MapperImplementationSharedCode.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/mapper/MapperImplementationSharedCode.java deleted file mode 100644 index 97586d62297..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/mapper/MapperImplementationSharedCode.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.mapper; - -import com.datastax.oss.driver.api.mapper.annotations.Mapper; -import com.squareup.javapoet.TypeName; - -/** - * Exposes callbacks that allow individual method generators for a {@link Mapper}-annotated class to - * request the generation of class-level fields that they will use. - */ -public interface MapperImplementationSharedCode { - - /** - * Requests the generation of a field that caches a single DAO instance, initialized in the - * constructor. - * - *

    For example: - * - *

    {@code
    -   * private final LazyReference productDaoCache;
    -   *
    -   * public MyMapper_Impl(MapperContext context) {
    -   *   this.productDaoCache = new LazyReference<>(() -> ProductDao_Impl.init(context));
    -   * }
    -   * }
    - * - * @return the name of the new field: {@code suggestedFieldName}, possibly prefixed by an index to - * avoid duplicates. - */ - String addDaoSimpleField( - String suggestedFieldName, - TypeName fieldType, - TypeName daoImplementationType, - boolean isAsync); - - /** - * Requests the generation of a map field, for DAOs that can be fetched by keyspace and/or table. - * - *

    For example: - * - *

    {@code
    -   * private final ConcurrentMap productDaoCache = new ConcurrentHashMap<>();
    -   * }
    - * - * @return the name of the new field: {@code suggestedFieldName}, possibly prefixed by an index to - * avoid duplicates. - */ - String addDaoMapField(String suggestedFieldName, TypeName mapValueType); -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/util/AnnotationScanner.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/util/AnnotationScanner.java deleted file mode 100644 index 17d61690596..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/util/AnnotationScanner.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.util; - -import java.lang.annotation.Annotation; -import java.util.Optional; -import java.util.Set; -import javax.lang.model.element.Element; -import javax.lang.model.element.ElementKind; -import javax.lang.model.element.ExecutableElement; -import javax.lang.model.element.Modifier; -import javax.lang.model.element.TypeElement; -import javax.lang.model.type.TypeKind; -import javax.lang.model.type.TypeMirror; - -public class AnnotationScanner { - public static Optional> getClassAnnotation( - Class annotationType, Set typeHierarchy) { - for (TypeElement element : typeHierarchy) { - A annotation = element.getAnnotation(annotationType); - if (annotation != null) { - return Optional.of(new ResolvedAnnotation<>(annotation, element)); - } - } - return Optional.empty(); - } - - public static Optional> getMethodAnnotation( - Class annotationType, ExecutableElement getMethod, Set typeHierarchy) { - // first try evaluating the method as it is. - A annotation = getMethod.getAnnotation(annotationType); - if (annotation != null) { - return Optional.of(new ResolvedAnnotation<>(annotation, getMethod)); - } - - // otherwise navigate the hierarchy until an annotation is found. - for (TypeElement typeElement : typeHierarchy) { - for (Element child : typeElement.getEnclosedElements()) { - Set modifiers = child.getModifiers(); - if (child.getKind() != ElementKind.METHOD - || modifiers.contains(Modifier.STATIC) - || modifiers.contains(Modifier.PRIVATE)) { - continue; - } - ExecutableElement candidateMethod = (ExecutableElement) child; - TypeMirror typeMirror = candidateMethod.getReturnType(); - if (typeMirror.getKind() == TypeKind.VOID) { - continue; - } - - if (candidateMethod.getSimpleName().equals(getMethod.getSimpleName())) { - annotation = candidateMethod.getAnnotation(annotationType); - if (annotation != null) { - return Optional.of(new ResolvedAnnotation<>(annotation, candidateMethod)); - } - } - } - } - return Optional.empty(); - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/util/Capitalizer.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/util/Capitalizer.java deleted file mode 100644 index fef29f61f59..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/util/Capitalizer.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.util; - -import java.beans.Introspector; -import java.util.Objects; - -public class Capitalizer { - - /** - * Lower cases the first character of a name, for example when inferring the name of a field from - * the name of a getter stripped of its {@code get} prefix. - * - *

    This method respects a weird corner case of the JavaBeans conventions: "in the (unusual) - * special case when there is more than one character and both the first and second characters are - * upper case, we leave it alone. Thus {@code FooBah} becomes {@code fooBah} and {@code X} becomes - * {@code x}, but {@code URL} stays as {@code URL}.". - */ - public static String decapitalize(String name) { - return Introspector.decapitalize(Objects.requireNonNull(name)); - } - - /** - * Upper cases the first character of a name, for example when inferring the name of a setter from - * the name of a field. - * - *

    Mirroring the behavior of {@link #decapitalize(String)}, this method returns the string - * unchanged not only if the first character is uppercase, but also if the second is. For - * example, if a field is named {@code cId}, we want to produce the setter name {@code setcId()}, - * not {@code setCId()}. Otherwise applying the process in reverse would produce the field name - * {@code CId}. - */ - public static String capitalize(String name) { - Objects.requireNonNull(name); - if (name.isEmpty() - || Character.isUpperCase(name.charAt(0)) - || (name.length() > 1 && Character.isUpperCase(name.charAt(1)))) { - return name; - } else { - char[] chars = name.toCharArray(); - chars[0] = Character.toUpperCase(chars[0]); - return new String(chars); - } - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/util/Classes.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/util/Classes.java deleted file mode 100644 index de45d36769d..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/util/Classes.java +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.util; - -import com.datastax.oss.driver.api.core.data.GettableByName; -import com.datastax.oss.driver.api.core.data.SettableByName; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import javax.lang.model.element.Element; -import javax.lang.model.element.TypeElement; -import javax.lang.model.type.DeclaredType; -import javax.lang.model.type.TypeMirror; -import javax.lang.model.util.Elements; -import javax.lang.model.util.Types; - -/** - * Utility methods to work with existing classes in the context of an annotation processing round. - * - *

    Similar to {@link Types} and {@link Elements}, but for classes that are known at compile time, - * e.g. they belong to the JDK or one of the processor's dependencies. - */ -public class Classes { - - private final Types typeUtils; - private final Elements elementUtils; - - private final TypeMirror settableByNameType; - private final TypeMirror gettableByNameType; - private final TypeElement listElement; - private final TypeElement setElement; - private final TypeElement mapElement; - private final TypeElement completionStageElement; - private final TypeElement completableFutureElement; - - public Classes(Types typeUtils, Elements elementUtils) { - this.typeUtils = typeUtils; - this.elementUtils = elementUtils; - - this.settableByNameType = - typeUtils.erasure(elementUtils.getTypeElement(SettableByName.class.getName()).asType()); - this.gettableByNameType = - typeUtils.erasure(elementUtils.getTypeElement(GettableByName.class.getName()).asType()); - this.listElement = elementUtils.getTypeElement(List.class.getCanonicalName()); - this.setElement = elementUtils.getTypeElement(Set.class.getCanonicalName()); - this.mapElement = elementUtils.getTypeElement(Map.class.getCanonicalName()); - this.completionStageElement = - elementUtils.getTypeElement(CompletionStage.class.getCanonicalName()); - this.completableFutureElement = - elementUtils.getTypeElement(CompletableFuture.class.getCanonicalName()); - } - - /** Whether an element is the {@link TypeElement} for the given class. */ - public boolean isSame(Element element, Class javaClass) { - return element.equals(elementUtils.getTypeElement(javaClass.getName())); - } - - /** - * Whether a type mirror is the {@link DeclaredType} for the given class. - * - *

    Note that this only intended for non-parameterized classes, e.g. {@code String}. If the type - * mirror is {@code List}, it won't match {@code List.class}. - */ - public boolean isSame(TypeMirror mirror, Class javaClass) { - return typeUtils.isSameType(mirror, elementUtils.getTypeElement(javaClass.getName()).asType()); - } - - public boolean implementsSettableByName(TypeMirror mirror) { - return typeUtils.isAssignable(mirror, settableByNameType); - } - - public boolean implementsGettableByName(TypeMirror mirror) { - return typeUtils.isAssignable(mirror, gettableByNameType); - } - - /** Whether a type mirror is a parameterized {@code java.util.List}. */ - public boolean isList(DeclaredType declaredType) { - return declaredType.asElement().equals(listElement); - } - - /** Whether a type mirror is a parameterized {@code java.util.Set}. */ - public boolean isSet(DeclaredType declaredType) { - return declaredType.asElement().equals(setElement); - } - - /** Whether a type mirror is a parameterized {@code java.util.Map}. */ - public boolean isMap(DeclaredType declaredType) { - return declaredType.asElement().equals(mapElement); - } - - /** - * Whether a type mirror is a parameterized Java 8 future ({@code CompletionStage or - * CompletableFuture}. - */ - public boolean isFuture(DeclaredType declaredType) { - return declaredType.asElement().equals(completionStageElement) - || declaredType.asElement().equals(completableFutureElement); - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/util/HierarchyScanner.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/util/HierarchyScanner.java deleted file mode 100644 index 39c6bbc1037..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/util/HierarchyScanner.java +++ /dev/null @@ -1,245 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.util; - -import com.datastax.oss.driver.api.mapper.annotations.HierarchyScanStrategy; -import com.datastax.oss.driver.internal.mapper.processor.ProcessorContext; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import com.datastax.oss.driver.shaded.guava.common.collect.Sets; -import java.util.Collection; -import java.util.Collections; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.atomic.AtomicReference; -import java.util.function.Consumer; -import java.util.function.Function; -import javax.lang.model.element.AnnotationMirror; -import javax.lang.model.element.AnnotationValue; -import javax.lang.model.element.Element; -import javax.lang.model.element.ExecutableElement; -import javax.lang.model.element.TypeElement; -import javax.lang.model.type.TypeKind; -import javax.lang.model.type.TypeMirror; -import javax.lang.model.util.Elements; - -/** Provides mechanisms for building and traversing a class/interfaces hierarchy. */ -public class HierarchyScanner { - - // placeholder class for resolving the default HierarchyScanStrategy - @HierarchyScanStrategy - private static final class ClassForDefaultScanStrategy {} - - private static final HierarchyScanStrategy defaultStrategy = - ClassForDefaultScanStrategy.class.getAnnotation(HierarchyScanStrategy.class); - - /** - * Resolves the type hierarchy for the given type by first looking for a {@link - * HierarchyScanStrategy}-annotated class or interface in the given types hierarchy. The hierarchy - * is traversed until a {@link HierarchyScanStrategy} is encountered in a depth-first manner as - * follows: - * - *

      - *
    1. Initialize interfacesToScan as an empty set - *
    2. Visit typeElement - *
    3. Visit typeElement's parent class (superClassElement), if - * superClassElement is null, stop traversing after evaluating remaining interfaces - *
    4. Visit typeElements's interfaces, and record those interfaces' parents for - * later use (newInterfacesToScan) - *
    5. Visit interfacesToScan, and append those interface's parents to - * newInterfacesToScan for later use) - *
    6. If superClassElement != null Set typeElement := - * superClassElement, interfacesToScan := newInterfacesToScan and repeat starting at - * step 3 - *
    7. Visit newInterfacesToScan interfaces and their parents until we've reached - * root - *
    - * - * Once a {@link HierarchyScanStrategy} is identified, the returning hierarchy is built by - * traversing the hierarchy again using the chosen strategy. - * - * @param typeElement The type whose hierarchy will be traversed. - * @param context provides utilities for working with types. - * @return The type hierarchy, ordered from typeElement to the highestAncestor, as dictated by the - * resolved {@link HierarchyScanStrategy} - */ - public static Set resolveTypeHierarchy( - TypeElement typeElement, ProcessorContext context) { - HierarchyScanStrategyOptions hierarchyScanStrategy = - HierarchyScanner.resolveHierarchyScanStrategy(typeElement, context); - - ImmutableSet.Builder hierarchy = ImmutableSet.builder(); - traverseFullHierarchy(hierarchyScanStrategy, typeElement, context, hierarchy::add); - return hierarchy.build(); - } - - private static HierarchyScanStrategyOptions resolveHierarchyScanStrategy( - TypeElement classElement, ProcessorContext context) { - // Use the default HierarchyScanStrategy to find the configured HierarchyScanStrategy. - // This is done because the default strategy is the most permissive. - - // traverse hierarchy until a strategy is found. - final HierarchyScanStrategyOptions defaultOptions = - new HierarchyScanStrategyOptions(context.getElementUtils()); - final AtomicReference ref = new AtomicReference<>(defaultOptions); - traverseHierarchy( - defaultOptions, - classElement, - context, - (TypeMirror mirror) -> { - // if we find a strategy, set it and stop traversing. - Element element = context.getTypeUtils().asElement(mirror); - for (AnnotationMirror candidate : element.getAnnotationMirrors()) { - if (context - .getClassUtils() - .isSame(candidate.getAnnotationType(), HierarchyScanStrategy.class)) { - ref.compareAndSet(defaultOptions, new HierarchyScanStrategyOptions(candidate)); - return false; - } - } - return true; - }); - - return ref.get(); - } - - private static class HierarchyScanStrategyOptions { - boolean scanAncestors = defaultStrategy.scanAncestors(); - TypeMirror highestAncestor = null; - boolean includeHighestAncestor = defaultStrategy.includeHighestAncestor(); - - HierarchyScanStrategyOptions(Elements elements) { - // its assumed the defaultStrategy's highestAncestor is Object.class, or something - // that is already resolvable. - this.highestAncestor = - elements.getTypeElement(defaultStrategy.highestAncestor().getName()).asType(); - } - - HierarchyScanStrategyOptions(AnnotationMirror annotationMirror) { - for (Map.Entry entry : - annotationMirror.getElementValues().entrySet()) { - if (entry.getKey().getSimpleName().contentEquals("scanAncestors")) { - this.scanAncestors = (Boolean) entry.getValue().getValue(); - } else if (entry.getKey().getSimpleName().contentEquals("highestAncestor")) { - this.highestAncestor = (TypeMirror) entry.getValue().getValue(); - } else if (entry.getKey().getSimpleName().contentEquals("includeHighestAncestor")) { - this.includeHighestAncestor = (Boolean) entry.getValue().getValue(); - } - } - } - - boolean atHighest(TypeMirror mirror, ProcessorContext context) { - return highestAncestor != null && context.getTypeUtils().isSameType(mirror, highestAncestor); - } - } - - private static void traverseFullHierarchy( - HierarchyScanStrategyOptions hierarchyScanStrategy, - TypeElement classElement, - ProcessorContext context, - Consumer typeConsumer) { - traverseHierarchy( - hierarchyScanStrategy, - classElement, - context, - (TypeMirror t) -> { - typeConsumer.accept(t); - return true; - }); - } - - private static void traverseHierarchy( - HierarchyScanStrategyOptions hierarchyScanStrategy, - TypeElement classElement, - ProcessorContext context, - Function typeConsumer) { - - if (!typeConsumer.apply(classElement.asType()) || !hierarchyScanStrategy.scanAncestors) { - return; - } - - Set interfacesToScan = Collections.emptySet(); - boolean atHighestClass = hierarchyScanStrategy.atHighest(classElement.asType(), context); - while (!atHighestClass) { - // add super class - TypeMirror superClass = classElement.getSuperclass(); - TypeElement superClassElement = null; - if (superClass.getKind() == TypeKind.DECLARED) { - superClassElement = (TypeElement) context.getTypeUtils().asElement(superClass); - atHighestClass = hierarchyScanStrategy.atHighest(superClass, context); - if (!atHighestClass || hierarchyScanStrategy.includeHighestAncestor) { - if (!typeConsumer.apply(superClass)) { - return; - } - } - } else { - // at highest level, no need to proceed. - atHighestClass = true; - } - - // as we encounter interfaces, also keep track of their parents. - Set newInterfacesToScan = Sets.newLinkedHashSet(); - - // scan parent classes interfaces and add them. - scanInterfaces( - hierarchyScanStrategy, - classElement.getInterfaces(), - newInterfacesToScan, - context, - typeConsumer); - // and then add interfaces to scan from previous class interfaces parents. - scanInterfaces( - hierarchyScanStrategy, interfacesToScan, newInterfacesToScan, context, typeConsumer); - - // navigate up to the superclass and to the class' encountered interfaces' parents. - classElement = superClassElement; - interfacesToScan = newInterfacesToScan; - } - - // if we've exhausted the class hierarchy, we may still need to consume the interface hierarchy. - while (!interfacesToScan.isEmpty()) { - Set newInterfacesToScan = Sets.newLinkedHashSet(); - scanInterfaces( - hierarchyScanStrategy, interfacesToScan, newInterfacesToScan, context, typeConsumer); - interfacesToScan = newInterfacesToScan; - } - } - - private static void scanInterfaces( - HierarchyScanStrategyOptions hierarchyScanStrategy, - Collection interfacesToScan, - Set newInterfacesToScan, - ProcessorContext context, - Function typeConsumer) { - for (TypeMirror interfaceType : interfacesToScan) { - if (interfaceType.getKind() == TypeKind.DECLARED) { - TypeElement interfaceElement = - (TypeElement) context.getTypeUtils().asElement(interfaceType); - // skip if at highest ancestor. - boolean atHighest = hierarchyScanStrategy.atHighest(interfaceType, context); - if (!atHighest || hierarchyScanStrategy.includeHighestAncestor) { - if (!typeConsumer.apply(interfaceType)) { - return; - } - } - if (!atHighest) { - newInterfacesToScan.addAll(interfaceElement.getInterfaces()); - } - } - } - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/util/NameIndex.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/util/NameIndex.java deleted file mode 100644 index 08ec8f35170..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/util/NameIndex.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.util; - -import java.util.HashMap; -import java.util.Map; - -/** Ensures the unicity of the members of a generated class. */ -public class NameIndex { - - private Map fieldIndices = new HashMap<>(); - - /** - * Generates a field or constant name that is unique across this index: if the given name is free, - * it's used as-is, otherwise an incrementing index is appended to it. - */ - public String uniqueField(String baseName) { - Integer index = fieldIndices.compute(baseName, (k, v) -> (v == null) ? 0 : v + 1); - return (index == 0) ? baseName : baseName + index; - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/util/ResolvedAnnotation.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/util/ResolvedAnnotation.java deleted file mode 100644 index a6441d739bb..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/util/ResolvedAnnotation.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.util; - -import java.lang.annotation.Annotation; -import javax.lang.model.element.Element; - -public class ResolvedAnnotation
    { - private final A annotation; - private final Element element; - - public ResolvedAnnotation(A annotation, Element typeElement) { - this.annotation = annotation; - this.element = typeElement; - } - - public A getAnnotation() { - return annotation; - } - - public Element getElement() { - return element; - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/util/generation/BindableHandlingSharedCode.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/util/generation/BindableHandlingSharedCode.java deleted file mode 100644 index b0d88d11837..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/util/generation/BindableHandlingSharedCode.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.util.generation; - -import com.datastax.oss.driver.api.core.data.GettableByName; -import com.datastax.oss.driver.api.core.data.SettableByName; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.api.mapper.entity.EntityHelper; -import com.datastax.oss.driver.internal.mapper.processor.MethodGenerator; -import com.datastax.oss.driver.internal.mapper.processor.util.NameIndex; -import com.squareup.javapoet.ClassName; -import com.squareup.javapoet.TypeName; - -/** - * Shared class-level code for {@link MethodGenerator} components that generate code that - * manipulates {@link SettableByName} or {@link GettableByName} objects. - * - *

    This allows method generators to create fields, ensuring that they will get reused if another - * method generator also needs them. - */ -public interface BindableHandlingSharedCode { - - NameIndex getNameIndex(); - - /** - * Requests the generation of a constant holding the {@link GenericType} for the given type. - * - *

    If this is called multiple times, only a single constant will be created. - * - * @return the name of the constant. - */ - String addGenericTypeConstant(TypeName type); - - /** - * Requests the generation of a field holding the {@link EntityHelper} that was generated for the - * given entity class, along with the initializing code (where appropriate for this class). - * - *

    If this is called multiple times, only a single field will be created. - * - * @return the name of the field. - */ - String addEntityHelperField(ClassName entityClassName); -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/util/generation/GeneratedCodePatterns.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/util/generation/GeneratedCodePatterns.java deleted file mode 100644 index bc832008b31..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/util/generation/GeneratedCodePatterns.java +++ /dev/null @@ -1,603 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.util.generation; - -import com.datastax.oss.driver.api.core.data.GettableByName; -import com.datastax.oss.driver.api.core.data.SettableByName; -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.core.type.ListType; -import com.datastax.oss.driver.api.core.type.MapType; -import com.datastax.oss.driver.api.core.type.SetType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.api.mapper.annotations.CqlName; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import com.datastax.oss.driver.internal.mapper.processor.ProcessorContext; -import com.datastax.oss.driver.internal.mapper.processor.util.Capitalizer; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import com.datastax.oss.driver.shaded.guava.common.collect.Maps; -import com.datastax.oss.driver.shaded.guava.common.collect.Sets; -import com.squareup.javapoet.ClassName; -import com.squareup.javapoet.CodeBlock; -import com.squareup.javapoet.FieldSpec; -import com.squareup.javapoet.MethodSpec; -import com.squareup.javapoet.ParameterizedTypeName; -import com.squareup.javapoet.TypeName; -import com.squareup.javapoet.TypeSpec; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import javax.lang.model.element.ExecutableElement; -import javax.lang.model.element.Modifier; -import javax.lang.model.element.Name; -import javax.lang.model.element.TypeElement; -import javax.lang.model.element.VariableElement; -import javax.lang.model.type.DeclaredType; -import javax.lang.model.type.TypeKind; -import javax.lang.model.type.TypeMirror; -import javax.lang.model.type.TypeVariable; - -/** A collection of recurring patterns in our generated sources. */ -public class GeneratedCodePatterns { - - /** - * The names of the primitive getters/setters on {@link GettableByName} and {@link - * SettableByName}. - */ - public static final Map PRIMITIVE_ACCESSORS = - ImmutableMap.builder() - .put(TypeName.BOOLEAN, "Boolean") - .put(TypeName.BYTE, "Byte") - .put(TypeName.DOUBLE, "Double") - .put(TypeName.FLOAT, "Float") - .put(TypeName.INT, "Int") - .put(TypeName.LONG, "Long") - .put(TypeName.SHORT, "Short") - .build(); - - private static final String NULL_SAVING_STRATEGY = "nullSavingStrategy"; - - /** Starts the generation of a method that overrides an interface method. */ - public static MethodSpec.Builder override(ExecutableElement interfaceMethod) { - return override(interfaceMethod, Collections.emptyMap()); - } - - public static MethodSpec.Builder override( - ExecutableElement interfaceMethod, Map typeParameters) { - MethodSpec.Builder result = - MethodSpec.methodBuilder(interfaceMethod.getSimpleName().toString()) - .addAnnotation(Override.class) - .addModifiers(Modifier.PUBLIC) - .returns(getTypeName(interfaceMethod.getReturnType(), typeParameters)); - for (VariableElement parameterElement : interfaceMethod.getParameters()) { - TypeName type = getTypeName(parameterElement.asType(), typeParameters); - result.addParameter(type, parameterElement.getSimpleName().toString()); - } - for (TypeMirror thrownType : interfaceMethod.getThrownTypes()) { - result.addException(TypeName.get(thrownType)); - } - return result; - } - - public static TypeName getTypeName(TypeMirror mirror, Map typeParameters) { - if (mirror.getKind() == TypeKind.TYPEVAR) { - TypeVariable typeVariable = (TypeVariable) mirror; - Name name = typeVariable.asElement().getSimpleName(); - TypeElement element = typeParameters.get(name); - return ClassName.get(element); - } else if (mirror.getKind() == TypeKind.DECLARED) { - DeclaredType declaredType = (DeclaredType) mirror; - TypeElement element = (TypeElement) declaredType.asElement(); - if (declaredType.getTypeArguments().size() == 0) { - return ClassName.get(element); - } else { - // resolve types for each type argument. - TypeName[] types = new TypeName[declaredType.getTypeArguments().size()]; - for (int i = 0; i < declaredType.getTypeArguments().size(); i++) { - TypeMirror typeArgument = declaredType.getTypeArguments().get(i); - types[i] = getTypeName(typeArgument, typeParameters); - } - return ParameterizedTypeName.get(ClassName.get(element), types); - } - } else { - return ClassName.get(mirror); - } - } - - /** Adds a private final field to a class, that gets initialized through its constructor. */ - public static void addFinalFieldAndConstructorArgument( - TypeName fieldType, - String fieldName, - TypeSpec.Builder classBuilder, - MethodSpec.Builder constructorBuilder) { - - classBuilder.addField( - FieldSpec.builder(fieldType, fieldName, Modifier.PRIVATE, Modifier.FINAL).build()); - constructorBuilder.addParameter(fieldType, fieldName).addStatement("this.$1L = $1L", fieldName); - } - - /** - * Treats a list of method parameters as bind variables in a query, assuming that the bind markers - * have the same names as the parameters, unless they are annotated with {@link CqlName}. - * - *

    The generated code assumes that a {@code BoundStatementBuilder boundStatementBuilder} local - * variable already exists. - */ - public static void bindParameters( - @NonNull List parameters, - CodeBlock.Builder methodBuilder, - BindableHandlingSharedCode enclosingClass, - ProcessorContext context, - boolean useNullSavingStrategy) { - List bindMarkerNames = new ArrayList<>(); - for (VariableElement parameter : parameters) { - CqlName cqlName = parameter.getAnnotation(CqlName.class); - String parameterName; - if (cqlName == null) { - parameterName = parameter.getSimpleName().toString(); - } else { - parameterName = cqlName.value(); - } - bindMarkerNames.add(CodeBlock.of("$S", parameterName)); - } - bindParameters( - parameters, bindMarkerNames, methodBuilder, enclosingClass, context, useNullSavingStrategy); - } - - /** - * Treats a list of method parameters as bind variables in a query, using the provided bind - * markers. - * - *

    The generated code assumes that a {@code BoundStatementBuilder boundStatementBuilder} local - * variable already exists. - */ - public static void bindParameters( - @NonNull List parameters, - @NonNull List bindMarkerNames, - CodeBlock.Builder methodBuilder, - BindableHandlingSharedCode enclosingClass, - ProcessorContext context, - boolean useNullSavingStrategy) { - - assert bindMarkerNames.size() == parameters.size(); - for (int i = 0; i < parameters.size(); i++) { - VariableElement parameter = parameters.get(i); - String parameterName = parameter.getSimpleName().toString(); - PropertyType type = PropertyType.parse(parameter.asType(), context); - setValue( - bindMarkerNames.get(i), - type, - CodeBlock.of("$L", parameterName), - "boundStatementBuilder", - methodBuilder, - enclosingClass, - useNullSavingStrategy, - false); - } - } - - /** - * Generates the code to set a value on a {@link SettableByName} instance. - * - *

    Example: - * - *

    {@code
    -   * target = target.set("id", entity.getId(), UUID.class);
    -   * }
    - * - * @param cqlName the CQL name to set ({@code "id"}) - * @param type the type of the value ({@code UUID}) - * @param valueExtractor the code snippet to extract the value ({@code entity.getId()} - * @param targetName the name of the target {@link SettableByName} instance ({@code target}) - * @param methodBuilder where to add the code - * @param enclosingClass a reference to the parent generator (in case type constants or entity - * helpers are needed) - */ - public static void setValue( - CodeBlock cqlName, - PropertyType type, - CodeBlock valueExtractor, - String targetName, - CodeBlock.Builder methodBuilder, - BindableHandlingSharedCode enclosingClass) { - setValue( - cqlName, type, valueExtractor, targetName, methodBuilder, enclosingClass, false, false); - } - - public static void setValue( - CodeBlock cqlName, - PropertyType type, - CodeBlock valueExtractor, - String targetName, - CodeBlock.Builder methodBuilder, - BindableHandlingSharedCode enclosingClass, - boolean useNullSavingStrategy, - boolean useLeniency) { - - if (type instanceof PropertyType.Simple) { - TypeName typeName = ((PropertyType.Simple) type).typeName; - String primitiveAccessor = GeneratedCodePatterns.PRIMITIVE_ACCESSORS.get(typeName); - if (primitiveAccessor != null) { - // Primitive type: use dedicated setter, since it is optimized to avoid boxing. - // target = target.setInt("length", entity.getLength()); - methodBuilder.addStatement( - "$1L = $1L.set$2L($3L, $4L)", - targetName, - primitiveAccessor, - cqlName, - valueExtractor); // null saving strategy for primitiveSet does not apply - } else if (typeName instanceof ClassName) { - // Unparameterized class: use the generic, class-based setter. - // target = target.set("id", entity.getId(), UUID.class); - generateSetWithClass( - cqlName, valueExtractor, targetName, methodBuilder, typeName, useNullSavingStrategy); - } else { - // Parameterized type: create a constant and use the GenericType-based setter. - // private static final GenericType> GENERIC_TYPE = - // new GenericType>(){}; - // target = target.set("names", entity.getNames(), GENERIC_TYPE); - // Note that lists, sets and maps of unparameterized classes also fall under that - // category. Their setter creates a GenericType under the hood, so there's no performance - // advantage in calling them instead of the generic set(). - generateParameterizedSet( - cqlName, - valueExtractor, - targetName, - methodBuilder, - typeName, - enclosingClass, - useNullSavingStrategy); - } - } else if (type instanceof PropertyType.SingleEntity) { - ClassName entityClass = ((PropertyType.SingleEntity) type).entityName; - // Other entity class: the CQL column is a mapped UDT. Example of generated code: - // Dimensions value = entity.getDimensions(); - // if (value != null) { - // UserDefinedType udtType = (UserDefinedType) target.getType("dimensions"); - // UdtValue udtValue = udtType.newValue(); - // dimensionsHelper.set(value, udtValue); - // target = target.setUdtValue("dimensions", udtValue); - // } - - // Generate unique names for our temporary variables. Note that they are local so we don't - // strictly need class-wide uniqueness, but it's simpler to reuse the NameIndex - String udtTypeName = enclosingClass.getNameIndex().uniqueField("udtType"); - String udtValueName = enclosingClass.getNameIndex().uniqueField("udtValue"); - String valueName = enclosingClass.getNameIndex().uniqueField("value"); - - methodBuilder - .addStatement("$T $L = $L", entityClass, valueName, valueExtractor) - .beginControlFlow("if ($L != null)", valueName) - .addStatement( - "$1T $2L = ($1T) $3L.getType($4L)", - UserDefinedType.class, - udtTypeName, - targetName, - cqlName) - .addStatement("$T $L = $L.newValue()", UdtValue.class, udtValueName, udtTypeName); - String childHelper = enclosingClass.addEntityHelperField(entityClass); - methodBuilder - // driver doesn't have the ability to send partial UDT, unset values values will be - // serialized to null - set NullSavingStrategy.DO_NOT_SET explicitly - .addStatement( - "$L.set($L, $L, $T.$L, $L)", - childHelper, - valueName, - udtValueName, - NullSavingStrategy.class, - NullSavingStrategy.DO_NOT_SET, - useLeniency ? "lenient" : false) - .addStatement("$1L = $1L.setUdtValue($2L, $3L)", targetName, cqlName, udtValueName); - if (useNullSavingStrategy) { - methodBuilder.nextControlFlow( - "else if ($L == $T.$L)", - NULL_SAVING_STRATEGY, - NullSavingStrategy.class, - NullSavingStrategy.SET_TO_NULL); - } else { - methodBuilder.nextControlFlow("else"); - } - methodBuilder - .addStatement("$1L = $1L.setUdtValue($2L, null)", targetName, cqlName) - .endControlFlow(); - } else { - // Collection of other entity class(es): the CQL column is a collection of mapped UDTs - // Build a copy of the value, encoding all entities into UdtValue instances on the fly. - String mappedCollectionName = enclosingClass.getNameIndex().uniqueField("mappedCollection"); - String rawCollectionName = enclosingClass.getNameIndex().uniqueField("rawCollection"); - methodBuilder - .addStatement("$T $L = $L", type.asTypeName(), mappedCollectionName, valueExtractor) - .beginControlFlow("if ($L != null)", mappedCollectionName); - - CodeBlock currentCqlType = CodeBlock.of("$L.getType($L)", targetName, cqlName); - CodeBlock.Builder udtTypesBuilder = CodeBlock.builder(); - CodeBlock.Builder conversionCodeBuilder = CodeBlock.builder(); - convertEntitiesIntoUdts( - mappedCollectionName, - rawCollectionName, - type, - currentCqlType, - udtTypesBuilder, - conversionCodeBuilder, - enclosingClass, - useLeniency); - - methodBuilder - .add(udtTypesBuilder.build()) - .add(conversionCodeBuilder.build()) - .addStatement( - "$1L = $1L.set($2L, $3L, $4L)", - targetName, - cqlName, - rawCollectionName, - enclosingClass.addGenericTypeConstant(type.asRawTypeName())); - if (useNullSavingStrategy) { - methodBuilder.nextControlFlow( - "else if ($L == $T.$L)", - NULL_SAVING_STRATEGY, - NullSavingStrategy.class, - NullSavingStrategy.SET_TO_NULL); - } else { - methodBuilder.nextControlFlow("else"); - } - methodBuilder - .addStatement( - "$1L = $1L.set($2L, null, $3L)", - targetName, - cqlName, - enclosingClass.addGenericTypeConstant(type.asRawTypeName())) - .endControlFlow(); - } - } - - private static void generateParameterizedSet( - CodeBlock cqlName, - CodeBlock valueExtractor, - String targetName, - CodeBlock.Builder methodBuilder, - TypeName typeName, - BindableHandlingSharedCode enclosingClass, - boolean useNullSavingStrategy) { - generateSetWithNullSavingStrategy( - valueExtractor, - methodBuilder, - CodeBlock.of( - "$1L = $1L.set($2L, $3L, $4L)", - targetName, - cqlName, - valueExtractor, - enclosingClass.addGenericTypeConstant(typeName)), - useNullSavingStrategy); - } - - private static void generateSetWithClass( - CodeBlock cqlName, - CodeBlock valueExtractor, - String targetName, - CodeBlock.Builder methodBuilder, - TypeName typeName, - boolean useNullSavingStrategy) { - - generateSetWithNullSavingStrategy( - valueExtractor, - methodBuilder, - CodeBlock.of( - "$1L = $1L.set($2L, $3L, $4T.class)", targetName, cqlName, valueExtractor, typeName), - useNullSavingStrategy); - } - - /** - * If this method is invoked with useNullSavingStrategy = true it assumes that NullSavingStrategy - * nullSavingStrategy = ...; variable is already defined on the MethodBuilder. - */ - private static void generateSetWithNullSavingStrategy( - CodeBlock valueExtractor, - CodeBlock.Builder methodBuilder, - CodeBlock nonNullStatement, - boolean useNullSavingStrategy) { - if (useNullSavingStrategy) { - methodBuilder.beginControlFlow( - "if ($1L != null || $2L == $3T.$4L)", - valueExtractor, - NULL_SAVING_STRATEGY, - NullSavingStrategy.class, - NullSavingStrategy.SET_TO_NULL); - methodBuilder.addStatement(nonNullStatement); - methodBuilder.endControlFlow(); - } else { - methodBuilder.addStatement(nonNullStatement); - } - } - - /** - * Shortcut for {@link #setValue(CodeBlock, PropertyType, CodeBlock, String, CodeBlock.Builder, - * BindableHandlingSharedCode)} when the cqlName is a string known at compile time. - */ - public static void setValue( - String cqlName, - PropertyType type, - CodeBlock valueExtractor, - String targetName, - CodeBlock.Builder methodBuilder, - BindableHandlingSharedCode enclosingClass, - boolean useNullSavingStrategy) { - setValue( - CodeBlock.of("$S", cqlName), - type, - valueExtractor, - targetName, - methodBuilder, - enclosingClass, - useNullSavingStrategy, - false); - } - - /** - * Generates the code to convert a collection of mapped entities, for example a {@code Map} into a {@code Map}. - * - * @param mappedObjectName the name of the local variable containing the value to convert. - * @param rawObjectName the name of the local variable that will hold the converted value (it does - * not exist yet, this method must generate the declaration). - * @param type the type of the value. - * @param currentCqlType a code snippet to extract the CQL type corresponding to {@code type}. - * @param udtTypesBuilder a code block that comes before the conversion code, and creates local - * variables that extract the required {@link UserDefinedType} instances from the target - * container. - * @param conversionBuilder the code block to generate the conversion code into. - * @param useLeniency whether the 'lenient' boolean variable is in scope. - */ - private static void convertEntitiesIntoUdts( - String mappedObjectName, - String rawObjectName, - PropertyType type, - CodeBlock currentCqlType, - CodeBlock.Builder udtTypesBuilder, - CodeBlock.Builder conversionBuilder, - BindableHandlingSharedCode enclosingClass, - boolean useLeniency) { - - if (type instanceof PropertyType.SingleEntity) { - ClassName entityClass = ((PropertyType.SingleEntity) type).entityName; - String udtTypeName = - enclosingClass - .getNameIndex() - .uniqueField(Capitalizer.decapitalize(entityClass.simpleName()) + "UdtType"); - udtTypesBuilder.addStatement( - "$1T $2L = ($1T) $3L", UserDefinedType.class, udtTypeName, currentCqlType); - - String entityHelperName = enclosingClass.addEntityHelperField(entityClass); - conversionBuilder - .addStatement("$T $L = $L.newValue()", UdtValue.class, rawObjectName, udtTypeName) - // driver doesn't have the ability to send partial UDT, unset values values will be - // serialized to null - set NullSavingStrategy.DO_NOT_SET explicitly - .addStatement( - "$L.set($L, $L, $T.$L, $L)", - entityHelperName, - mappedObjectName, - rawObjectName, - NullSavingStrategy.class, - NullSavingStrategy.DO_NOT_SET, - useLeniency ? "lenient" : false); - } else if (type instanceof PropertyType.EntityList) { - TypeName rawCollectionType = type.asRawTypeName(); - conversionBuilder.addStatement( - "$T $L = $T.newArrayListWithExpectedSize($L.size())", - rawCollectionType, - rawObjectName, - Lists.class, - mappedObjectName); - PropertyType mappedElementType = ((PropertyType.EntityList) type).elementType; - String mappedElementName = enclosingClass.getNameIndex().uniqueField("mappedElement"); - conversionBuilder.beginControlFlow( - "for ($T $L: $L)", mappedElementType.asTypeName(), mappedElementName, mappedObjectName); - String rawElementName = enclosingClass.getNameIndex().uniqueField("rawElement"); - convertEntitiesIntoUdts( - mappedElementName, - rawElementName, - mappedElementType, - CodeBlock.of("(($T) $L).getElementType()", ListType.class, currentCqlType), - udtTypesBuilder, - conversionBuilder, - enclosingClass, - useLeniency); - conversionBuilder.addStatement("$L.add($L)", rawObjectName, rawElementName).endControlFlow(); - } else if (type instanceof PropertyType.EntitySet) { - TypeName rawCollectionType = type.asRawTypeName(); - conversionBuilder.addStatement( - "$T $L = $T.newLinkedHashSetWithExpectedSize($L.size())", - rawCollectionType, - rawObjectName, - Sets.class, - mappedObjectName); - PropertyType mappedElementType = ((PropertyType.EntitySet) type).elementType; - String mappedElementName = enclosingClass.getNameIndex().uniqueField("mappedElement"); - conversionBuilder.beginControlFlow( - "for ($T $L: $L)", mappedElementType.asTypeName(), mappedElementName, mappedObjectName); - String rawElementName = enclosingClass.getNameIndex().uniqueField("rawElement"); - convertEntitiesIntoUdts( - mappedElementName, - rawElementName, - mappedElementType, - CodeBlock.of("(($T) $L).getElementType()", SetType.class, currentCqlType), - udtTypesBuilder, - conversionBuilder, - enclosingClass, - useLeniency); - conversionBuilder.addStatement("$L.add($L)", rawObjectName, rawElementName).endControlFlow(); - } else if (type instanceof PropertyType.EntityMap) { - TypeName rawCollectionType = type.asRawTypeName(); - conversionBuilder.addStatement( - "$T $L = $T.newLinkedHashMapWithExpectedSize($L.size())", - rawCollectionType, - rawObjectName, - Maps.class, - mappedObjectName); - PropertyType mappedKeyType = ((PropertyType.EntityMap) type).keyType; - PropertyType mappedValueType = ((PropertyType.EntityMap) type).valueType; - String mappedEntryName = enclosingClass.getNameIndex().uniqueField("mappedEntry"); - conversionBuilder.beginControlFlow( - "for ($T $L: $L.entrySet())", - ParameterizedTypeName.get( - ClassName.get(Map.Entry.class), - mappedKeyType.asTypeName(), - mappedValueType.asTypeName()), - mappedEntryName, - mappedObjectName); - String mappedKeyName = CodeBlock.of("$L.getKey()", mappedEntryName).toString(); - String rawKeyName; - if (mappedKeyType instanceof PropertyType.Simple) { - rawKeyName = mappedKeyName; // no conversion, use the instance as-is - } else { - rawKeyName = enclosingClass.getNameIndex().uniqueField("rawKey"); - convertEntitiesIntoUdts( - mappedKeyName, - rawKeyName, - mappedKeyType, - CodeBlock.of("(($T) $L).getKeyType()", MapType.class, currentCqlType), - udtTypesBuilder, - conversionBuilder, - enclosingClass, - useLeniency); - } - String mappedValueName = CodeBlock.of("$L.getValue()", mappedEntryName).toString(); - String rawValueName; - if (mappedValueType instanceof PropertyType.Simple) { - rawValueName = mappedValueName; - } else { - rawValueName = enclosingClass.getNameIndex().uniqueField("rawValue"); - convertEntitiesIntoUdts( - mappedValueName, - rawValueName, - mappedValueType, - CodeBlock.of("(($T) $L).getValueType()", MapType.class, currentCqlType), - udtTypesBuilder, - conversionBuilder, - enclosingClass, - useLeniency); - } - conversionBuilder - .addStatement("$L.put($L, $L)", rawObjectName, rawKeyName, rawValueName) - .endControlFlow(); - } else { - throw new AssertionError("Unsupported type " + type.asTypeName()); - } - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/util/generation/GenericTypeConstantGenerator.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/util/generation/GenericTypeConstantGenerator.java deleted file mode 100644 index a9bf077f787..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/util/generation/GenericTypeConstantGenerator.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.util.generation; - -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.mapper.processor.GeneratedNames; -import com.datastax.oss.driver.internal.mapper.processor.util.NameIndex; -import com.squareup.javapoet.ClassName; -import com.squareup.javapoet.FieldSpec; -import com.squareup.javapoet.ParameterizedTypeName; -import com.squareup.javapoet.TypeName; -import com.squareup.javapoet.TypeSpec; -import java.util.LinkedHashMap; -import java.util.Map; -import javax.lang.model.element.Modifier; - -/** Helper class for {@link BindableHandlingSharedCode#addGenericTypeConstant(TypeName)}. */ -public class GenericTypeConstantGenerator { - - private final NameIndex nameIndex; - - public GenericTypeConstantGenerator(NameIndex nameIndex) { - this.nameIndex = nameIndex; - } - - private final Map typeConstantNames = new LinkedHashMap<>(); - - public String add(TypeName type) { - return typeConstantNames.computeIfAbsent( - type, k -> nameIndex.uniqueField(GeneratedNames.GENERIC_TYPE_CONSTANT)); - } - - public void generate(TypeSpec.Builder classBuilder) { - for (Map.Entry entry : typeConstantNames.entrySet()) { - TypeName typeParameter = entry.getKey(); - String name = entry.getValue(); - ParameterizedTypeName type = - ParameterizedTypeName.get(ClassName.get(GenericType.class), typeParameter); - classBuilder.addField( - FieldSpec.builder(type, name, Modifier.PRIVATE, Modifier.STATIC, Modifier.FINAL) - .initializer("new $T(){}", type) - .build()); - } - } -} diff --git a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/util/generation/PropertyType.java b/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/util/generation/PropertyType.java deleted file mode 100644 index 0ae05544d46..00000000000 --- a/mapper-processor/src/main/java/com/datastax/oss/driver/internal/mapper/processor/util/generation/PropertyType.java +++ /dev/null @@ -1,200 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.util.generation; - -import com.datastax.oss.driver.api.core.data.GettableByName; -import com.datastax.oss.driver.api.core.data.SettableByName; -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.mapper.annotations.Entity; -import com.datastax.oss.driver.internal.mapper.processor.ProcessorContext; -import com.squareup.javapoet.ClassName; -import com.squareup.javapoet.ParameterizedTypeName; -import com.squareup.javapoet.TypeName; -import java.util.List; -import java.util.Map; -import java.util.Set; -import javax.lang.model.type.DeclaredType; -import javax.lang.model.type.TypeKind; -import javax.lang.model.type.TypeMirror; - -/** - * Wraps the declared type of an entity property (or DAO method parameter) that will be injected - * into a {@link SettableByName}, or extracted from a {@link GettableByName}. - * - *

    The goal is to detect if the type contains other mapped entities, that must be translated into - * UDT values. - */ -public abstract class PropertyType { - - private static final ClassName UDT_VALUE_CLASS_NAME = ClassName.get(UdtValue.class); - public static final ClassName LIST_CLASS_NAME = ClassName.get(List.class); - public static final ClassName SET_CLASS_NAME = ClassName.get(Set.class); - public static final ClassName MAP_CLASS_NAME = ClassName.get(Map.class); - - public static PropertyType parse(TypeMirror typeMirror, ProcessorContext context) { - if (typeMirror.getKind() == TypeKind.DECLARED) { - DeclaredType declaredType = (DeclaredType) typeMirror; - if (declaredType.asElement().getAnnotation(Entity.class) != null) { - return new SingleEntity(declaredType); - } else if (context.getClassUtils().isList(declaredType)) { - PropertyType elementType = parse(declaredType.getTypeArguments().get(0), context); - return (elementType instanceof Simple) - ? new Simple(typeMirror) - : new EntityList(typeMirror, elementType); - } else if (context.getClassUtils().isSet(declaredType)) { - PropertyType elementType = parse(declaredType.getTypeArguments().get(0), context); - return (elementType instanceof Simple) - ? new Simple(typeMirror) - : new EntitySet(typeMirror, elementType); - } else if (context.getClassUtils().isMap(declaredType)) { - PropertyType keyType = parse(declaredType.getTypeArguments().get(0), context); - PropertyType valueType = parse(declaredType.getTypeArguments().get(1), context); - return (keyType instanceof Simple && valueType instanceof Simple) - ? new Simple(typeMirror) - : new EntityMap(typeMirror, keyType, valueType); - } - } - return new Simple(typeMirror); - } - - private final TypeMirror typeMirror; - - protected PropertyType(TypeMirror typeMirror) { - this.typeMirror = typeMirror; - } - - public TypeMirror asTypeMirror() { - return typeMirror; - } - - public abstract TypeName asTypeName(); - - /** - * Returns the name of the type we will convert to before saving to the database; that is, - * replacing every entity class by {@code UdtValue}. - */ - public abstract TypeName asRawTypeName(); - - /** - * A type that does not contain any mapped entity. - * - *

    Note that it can still be a collection, for example {@code Map>}. - */ - public static class Simple extends PropertyType { - public final TypeName typeName; - - public Simple(TypeMirror typeMirror) { - super(typeMirror); - this.typeName = ClassName.get(typeMirror); - } - - @Override - public TypeName asTypeName() { - return typeName; - } - - @Override - public TypeName asRawTypeName() { - return typeName; - } - } - - /** A mapped entity. */ - public static class SingleEntity extends PropertyType { - public final ClassName entityName; - - public SingleEntity(DeclaredType declaredType) { - super(declaredType); - this.entityName = (ClassName) TypeName.get(declaredType); - } - - @Override - public TypeName asTypeName() { - return entityName; - } - - @Override - public TypeName asRawTypeName() { - return UDT_VALUE_CLASS_NAME; - } - } - - /** A list of another non-simple type. */ - public static class EntityList extends PropertyType { - public final PropertyType elementType; - - public EntityList(TypeMirror typeMirror, PropertyType elementType) { - super(typeMirror); - this.elementType = elementType; - } - - @Override - public TypeName asTypeName() { - return ParameterizedTypeName.get(LIST_CLASS_NAME, elementType.asTypeName()); - } - - @Override - public TypeName asRawTypeName() { - return ParameterizedTypeName.get(LIST_CLASS_NAME, elementType.asRawTypeName()); - } - } - - /** A set of another non-simple type. */ - public static class EntitySet extends PropertyType { - public final PropertyType elementType; - - public EntitySet(TypeMirror typeMirror, PropertyType elementType) { - super(typeMirror); - this.elementType = elementType; - } - - @Override - public TypeName asTypeName() { - return ParameterizedTypeName.get(SET_CLASS_NAME, elementType.asTypeName()); - } - - @Override - public TypeName asRawTypeName() { - return ParameterizedTypeName.get(SET_CLASS_NAME, elementType.asRawTypeName()); - } - } - - /** A map where either the key type, the value type, or both, are non-simple types. */ - public static class EntityMap extends PropertyType { - public final PropertyType keyType; - public final PropertyType valueType; - - public EntityMap(TypeMirror typeMirror, PropertyType keyType, PropertyType valueType) { - super(typeMirror); - this.keyType = keyType; - this.valueType = valueType; - } - - @Override - public TypeName asTypeName() { - return ParameterizedTypeName.get( - MAP_CLASS_NAME, keyType.asTypeName(), valueType.asTypeName()); - } - - @Override - public TypeName asRawTypeName() { - return ParameterizedTypeName.get( - MAP_CLASS_NAME, keyType.asRawTypeName(), valueType.asRawTypeName()); - } - } -} diff --git a/mapper-processor/src/main/resources/META-INF/services/javax.annotation.processing.Processor b/mapper-processor/src/main/resources/META-INF/services/javax.annotation.processing.Processor deleted file mode 100644 index a7ff54415a6..00000000000 --- a/mapper-processor/src/main/resources/META-INF/services/javax.annotation.processing.Processor +++ /dev/null @@ -1 +0,0 @@ -com.datastax.oss.driver.internal.mapper.processor.MapperProcessor diff --git a/mapper-processor/src/test/java/com/datastax/dse/driver/internal/mapper/processor/DependencyCheckTest.java b/mapper-processor/src/test/java/com/datastax/dse/driver/internal/mapper/processor/DependencyCheckTest.java deleted file mode 100644 index 7aa2accb1fe..00000000000 --- a/mapper-processor/src/test/java/com/datastax/dse/driver/internal/mapper/processor/DependencyCheckTest.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.mapper.processor; - -import com.datastax.dse.driver.internal.DependencyCheckTestBase; -import java.nio.file.Path; -import java.nio.file.Paths; - -public class DependencyCheckTest extends DependencyCheckTestBase { - - @Override - protected Path getDepsTxtPath() { - return Paths.get( - getBaseResourcePathString(), - "target", - "classes", - "com", - "datastax", - "dse", - "driver", - "internal", - "mapper", - "processor", - "deps.txt"); - } -} diff --git a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/entity/EntityHelperBaseTest.java b/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/entity/EntityHelperBaseTest.java deleted file mode 100644 index a2660a5ae2d..00000000000 --- a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/entity/EntityHelperBaseTest.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.entity; - -import static org.assertj.core.api.AssertionsForClassTypes.assertThatThrownBy; -import static org.assertj.core.api.AssertionsForInterfaceTypes.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class EntityHelperBaseTest { - - @Test - @UseDataProvider("typesProvider") - public void should_find_not_matching_types( - Map> entityColumns, - Map cqlColumns, - List expected) { - // when - List missingTypes = - EntityHelperBase.findTypeMismatches(entityColumns, cqlColumns, CodecRegistry.DEFAULT); - - // then - assertThat(missingTypes).isEqualTo(expected); - } - - @Test - public void should_throw_if_there_is_not_matching_cql_column() { - // given - ImmutableMap> entityColumns = - ImmutableMap.of(CqlIdentifier.fromCql("c1"), GenericType.of(Integer.class)); - ColumnMetadata columnMetadataInt = mock(ColumnMetadata.class); - when(columnMetadataInt.getType()).thenReturn(DataTypes.INT); - ImmutableMap cqlColumns = - ImmutableMap.of(CqlIdentifier.fromCql("c2"), columnMetadataInt); - - // when, then - assertThatThrownBy( - () -> - EntityHelperBase.findTypeMismatches( - entityColumns, cqlColumns, CodecRegistry.DEFAULT)) - .isInstanceOf(AssertionError.class) - .hasMessageContaining("There is no cql column for entity column: c1"); - } - - @DataProvider - public static Object[][] typesProvider() { - ColumnMetadata columnMetadataText = mock(ColumnMetadata.class); - when(columnMetadataText.getType()).thenReturn(DataTypes.TEXT); - ColumnMetadata columnMetadataInt = mock(ColumnMetadata.class); - when(columnMetadataInt.getType()).thenReturn(DataTypes.INT); - - CqlIdentifier c1 = CqlIdentifier.fromCql("c1"); - CqlIdentifier c2 = CqlIdentifier.fromCql("c2"); - return new Object[][] { - { - ImmutableMap.of(c1, GenericType.of(String.class)), - ImmutableMap.of(c1, columnMetadataText), - Collections.emptyList() - }, - { - ImmutableMap.of(c1, GenericType.of(Integer.class)), - ImmutableMap.of(c1, columnMetadataText), - ImmutableList.of("Field: c1, Entity Type: java.lang.Integer, CQL type: TEXT") - }, - { - ImmutableMap.of(c1, GenericType.of(String.class), c2, GenericType.of(Integer.class)), - ImmutableMap.of(c1, columnMetadataText, c2, columnMetadataInt), - Collections.emptyList() - }, - { - ImmutableMap.of(c1, GenericType.of(String.class), c2, GenericType.of(Integer.class)), - ImmutableMap.of(c1, columnMetadataInt, c2, columnMetadataText), - ImmutableList.of( - "Field: c1, Entity Type: java.lang.String, CQL type: INT", - "Field: c2, Entity Type: java.lang.Integer, CQL type: TEXT") - } - }; - } -} diff --git a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/MapperProcessorTest.java b/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/MapperProcessorTest.java deleted file mode 100644 index 3da206f09a2..00000000000 --- a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/MapperProcessorTest.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor; - -import static com.google.testing.compile.CompilationSubject.assertThat; - -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.google.testing.compile.Compilation; -import com.google.testing.compile.Compiler; -import com.squareup.javapoet.JavaFile; -import com.squareup.javapoet.TypeSpec; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import javax.tools.JavaFileObject; - -public abstract class MapperProcessorTest { - - /** - * Launches an in-process execution of javac with {@link MapperProcessor} enabled. - * - * @param packageName the package of the types to process. Note that it is currently not possible - * to process multiple packages (and it's unlikely to be needed in unit tests). - * @param options the compiler options (use to pass -A options to the processor). - * @param typeSpecs the contents of the classes or interfaces to process. - */ - protected Compilation compileWithMapperProcessor( - String packageName, Iterable options, TypeSpec... typeSpecs) { - List files = new ArrayList<>(); - for (TypeSpec typeSpec : typeSpecs) { - files.add(JavaFile.builder(packageName, typeSpec).build().toJavaFileObject()); - } - return Compiler.javac() - .withProcessors(getMapperProcessor()) - .withOptions(options) - .compile(files); - } - - @NonNull - protected MapperProcessor getMapperProcessor() { - return new MapperProcessor(); - } - - /** - * Launches an in-process execution of javac with {@link MapperProcessor} enabled, and custom - * result types disabled. - * - * @param packageName the package of the types to process. Note that it is currently not possible - * to process multiple packages (and it's unlikely to be needed in unit tests). - * @param typeSpecs the contents of the classes or interfaces to process. - */ - protected Compilation compileWithMapperProcessor(String packageName, TypeSpec... typeSpecs) { - return compileWithMapperProcessor( - packageName, - ImmutableList.of("-Acom.datastax.oss.driver.mapper.customResults.enabled=false"), - typeSpecs); - } - - protected void should_fail_with_expected_error( - String expectedError, String packageName, TypeSpec... typeSpecs) { - Compilation compilation = compileWithMapperProcessor(packageName, typeSpecs); - assertThat(compilation).hadErrorContaining(expectedError); - } - - protected void should_succeed_with_expected_warning( - String expectedWarning, String packageName, TypeSpec... typeSpecs) { - Compilation compilation = compileWithMapperProcessor(packageName, typeSpecs); - assertThat(compilation).hadWarningContaining(expectedWarning); - } - - protected void should_succeed_without_warnings(String packageName, TypeSpec... typeSpecs) { - Compilation compilation = - compileWithMapperProcessor(packageName, Collections.emptyList(), typeSpecs); - assertThat(compilation).succeededWithoutWarnings(); - } -} diff --git a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoAnnotationTest.java b/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoAnnotationTest.java deleted file mode 100644 index f8a50cae851..00000000000 --- a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoAnnotationTest.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.dao; - -import static com.google.testing.compile.CompilationSubject.assertThat; - -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.internal.mapper.processor.MapperProcessorTest; -import com.google.testing.compile.Compilation; -import com.squareup.javapoet.ClassName; -import com.squareup.javapoet.TypeSpec; -import javax.lang.model.element.Modifier; -import javax.tools.StandardLocation; -import org.junit.Test; - -public class DaoAnnotationTest extends MapperProcessorTest { - - @Test - public void should_work_on_nested_interface() { - Compilation compilation = - compileWithMapperProcessor( - "test", - TypeSpec.classBuilder(ClassName.get("test", "Foo")) - .addModifiers(Modifier.PUBLIC) - .addType( - TypeSpec.interfaceBuilder("Bar") - .addModifiers(Modifier.PUBLIC, Modifier.STATIC) - .addAnnotation(Dao.class) - .build()) - .build()); - - assertThat(compilation).succeededWithoutWarnings(); - assertThat(compilation) - .generatedFile(StandardLocation.SOURCE_OUTPUT, "test", "Foo_BarImpl__MapperGenerated.java") - .contentsAsUtf8String() - .contains("class Foo_BarImpl__MapperGenerated extends DaoBase implements Foo.Bar"); - } - - @Test - public void should_fail_on_class() { - should_fail_with_expected_error( - "Only INTERFACE elements can be annotated with Dao", - "test", - TypeSpec.classBuilder(ClassName.get("test", "Foo")) - .addModifiers(Modifier.PUBLIC) - .addAnnotation(Dao.class) - .build()); - } -} diff --git a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoDeleteMethodGeneratorTest.java b/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoDeleteMethodGeneratorTest.java deleted file mode 100644 index f965a6993ec..00000000000 --- a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoDeleteMethodGeneratorTest.java +++ /dev/null @@ -1,204 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.dao; - -import static com.google.testing.compile.CompilationSubject.assertThat; - -import com.datastax.oss.driver.api.mapper.annotations.CqlName; -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.Delete; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.google.testing.compile.Compilation; -import com.squareup.javapoet.AnnotationSpec; -import com.squareup.javapoet.ClassName; -import com.squareup.javapoet.MethodSpec; -import com.squareup.javapoet.ParameterSpec; -import com.squareup.javapoet.TypeSpec; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.Collections; -import java.util.List; -import java.util.UUID; -import javax.lang.model.element.Modifier; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class DaoDeleteMethodGeneratorTest extends DaoMethodGeneratorTest { - - @Test - @Override - @UseDataProvider("invalidSignatures") - public void should_fail_with_expected_error( - String expectedError, MethodSpec method, TypeSpec entitySpec) { - super.should_fail_with_expected_error(expectedError, method, entitySpec); - } - - @DataProvider - public static Object[][] invalidSignatures() { - return new Object[][] { - { - "Invalid annotation parameters: Delete cannot have both ifExists and customIfClause", - MethodSpec.methodBuilder("delete") - .addAnnotation( - AnnotationSpec.builder(Delete.class) - .addMember("ifExists", "true") - .addMember("customIfClause", "$S", "whatever") - .build()) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .build(), - ENTITY_SPEC - }, - { - "Wrong number of parameters: Delete methods with no custom clause " - + "must take either an entity instance, or the primary key components", - MethodSpec.methodBuilder("delete") - .addAnnotation(Delete.class) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .build(), - ENTITY_SPEC - }, - { - "Missing entity class: Delete methods that do not operate on an entity " - + "instance must have an 'entityClass' argument", - MethodSpec.methodBuilder("delete") - .addAnnotation(Delete.class) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .addParameter(UUID.class, "id") - .build(), - ENTITY_SPEC - }, - { - "Invalid parameter list: Delete methods that do not operate on an entity instance " - + "and lack a custom where clause must match the primary key components in the exact " - + "order (expected primary key of Product: [java.util.UUID]). Mismatch at index 0: java.lang" - + ".Integer should be java.util.UUID", - MethodSpec.methodBuilder("delete") - .addAnnotation( - AnnotationSpec.builder(Delete.class) - .addMember("entityClass", "$T.class", ENTITY_CLASS_NAME) - .build()) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .addParameter(Integer.class, "id") - .build(), - ENTITY_SPEC - }, - { - "Invalid parameter list: Delete methods that do not operate on an entity instance " - + "and lack a custom where clause must at least specify partition key components " - + "(expected partition key of ProductSale: [java.util.UUID, java.lang.String])", - MethodSpec.methodBuilder("delete") - .addAnnotation( - AnnotationSpec.builder(Delete.class) - .addMember("entityClass", "$T.class", SALE_ENTITY_CLASS_NAME) - .build()) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .addParameter(Integer.class, "id") - .build(), - SALE_ENTITY_SPEC - }, - { - "Delete methods must return one of [VOID, FUTURE_OF_VOID, BOOLEAN, FUTURE_OF_BOOLEAN, " - + "RESULT_SET, BOUND_STATEMENT, FUTURE_OF_ASYNC_RESULT_SET, REACTIVE_RESULT_SET]", - MethodSpec.methodBuilder("delete") - .addAnnotation(Delete.class) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .addParameter(ENTITY_CLASS_NAME, "entity") - .returns(Integer.class) - .build(), - ENTITY_SPEC - }, - { - "Wrong number of parameters: Delete methods can only have additional parameters " - + "if they specify a custom WHERE or IF clause", - MethodSpec.methodBuilder("delete") - .addAnnotation(Delete.class) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .addParameter(ENTITY_CLASS_NAME, "entity") - .addParameter(Integer.class, "extra") - .build(), - ENTITY_SPEC - }, - { - "Delete methods that have a custom where clause must not take an Entity (Product) as a " - + "parameter", - MethodSpec.methodBuilder("delete") - .addAnnotation( - AnnotationSpec.builder(Delete.class) - .addMember("customWhereClause", "$S", "hello") - .build()) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .addParameter(ENTITY_CLASS_NAME, "entity") - .returns(Integer.class) - .build(), - ENTITY_SPEC - }, - }; - } - - @Test - public void should_warn_when_non_bind_marker_has_cql_name() { - should_succeed_with_expected_warning( - "Parameter id does not refer to a bind marker, @CqlName annotation will be ignored", - MethodSpec.methodBuilder("delete") - .addAnnotation( - AnnotationSpec.builder(Delete.class) - .addMember("entityClass", ENTITY_CLASS_NAME + ".class") - .addMember("customIfClause", "$S", "description = :description") - .build()) - .addParameter( - ParameterSpec.builder(UUID.class, "id") - .addAnnotation( - AnnotationSpec.builder(CqlName.class) - .addMember("value", "$S", "irrelevant") - .build()) - .build()) - .addParameter(String.class, "description") - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .build()); - } - - @Test - public void should_not_fail_on_unsupported_result_when_custom_results_enabled() { - - MethodSpec methodSpec = - MethodSpec.methodBuilder("delete") - .addAnnotation(Delete.class) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .addParameter(ENTITY_CLASS_NAME, "entity") - .returns(Integer.class) // not a built-in return type - .build(); - TypeSpec daoSpec = - TypeSpec.interfaceBuilder(ClassName.get("test", "ProductDao")) - .addModifiers(Modifier.PUBLIC) - .addAnnotation(Dao.class) - .addMethod(methodSpec) - .build(); - - for (List compilerOptions : - ImmutableList.of( - ImmutableList.of("-Acom.datastax.oss.driver.mapper.customResults.enabled=true"), - // The option defaults to true, so it should also work without explicit options: - Collections.emptyList())) { - Compilation compilation = - compileWithMapperProcessor("test", compilerOptions, ENTITY_SPEC, daoSpec); - assertThat(compilation).succeededWithoutWarnings(); - } - } -} diff --git a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoGetEntityMethodGeneratorTest.java b/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoGetEntityMethodGeneratorTest.java deleted file mode 100644 index 467de1d9a5a..00000000000 --- a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoGetEntityMethodGeneratorTest.java +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.dao; - -import com.datastax.oss.driver.api.core.MappedAsyncPagingIterable; -import com.datastax.oss.driver.api.core.PagingIterable; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.mapper.annotations.GetEntity; -import com.squareup.javapoet.ClassName; -import com.squareup.javapoet.MethodSpec; -import com.squareup.javapoet.ParameterSpec; -import com.squareup.javapoet.ParameterizedTypeName; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.stream.Stream; -import javax.lang.model.element.Modifier; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class DaoGetEntityMethodGeneratorTest extends DaoMethodGeneratorTest { - - @Test - @Override - @UseDataProvider("invalidSignatures") - public void should_fail_with_expected_error(String expectedError, MethodSpec method) { - super.should_fail_with_expected_error(expectedError, method); - } - - @DataProvider - public static Object[][] invalidSignatures() { - return new Object[][] { - { - "Wrong number of parameters: GetEntity methods must have exactly one", - MethodSpec.methodBuilder("get") - .addAnnotation(GetEntity.class) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .build(), - }, - { - "Wrong number of parameters: GetEntity methods must have exactly one", - MethodSpec.methodBuilder("get") - .addAnnotation(GetEntity.class) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .addParameter(ParameterSpec.builder(String.class, "a").build()) - .addParameter(ParameterSpec.builder(String.class, "b").build()) - .build(), - }, - { - "Invalid parameter type: GetEntity methods must take a GettableByName, ResultSet or AsyncResultSet", - MethodSpec.methodBuilder("get") - .addAnnotation(GetEntity.class) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .addParameter(ParameterSpec.builder(String.class, "a").build()) - .build(), - }, - { - "Invalid return type: GetEntity methods must return a Entity-annotated class, or a PagingIterable, a Stream or MappedAsyncPagingIterable thereof", - MethodSpec.methodBuilder("get") - .addAnnotation(GetEntity.class) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .addParameter(ParameterSpec.builder(Row.class, "source").build()) - .build(), - }, - { - "Invalid return type: GetEntity methods must return a Entity-annotated class, or a PagingIterable, a Stream or MappedAsyncPagingIterable thereof", - MethodSpec.methodBuilder("get") - .addAnnotation(GetEntity.class) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .addParameter(ParameterSpec.builder(ResultSet.class, "source").build()) - .returns(ParameterizedTypeName.get(PagingIterable.class, Integer.class)) - .build(), - }, - { - "Invalid return type: GetEntity methods returning PagingIterable must have an argument of type ResultSet", - MethodSpec.methodBuilder("get") - .addAnnotation(GetEntity.class) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .addParameter(ParameterSpec.builder(Row.class, "source").build()) - .returns( - ParameterizedTypeName.get(ClassName.get(PagingIterable.class), ENTITY_CLASS_NAME)) - .build(), - }, - { - "Invalid return type: GetEntity methods returning Stream must have an argument of type ResultSet", - MethodSpec.methodBuilder("get") - .addAnnotation(GetEntity.class) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .addParameter(ParameterSpec.builder(Row.class, "source").build()) - .returns(ParameterizedTypeName.get(ClassName.get(Stream.class), ENTITY_CLASS_NAME)) - .build(), - }, - { - "Invalid return type: GetEntity methods returning MappedAsyncPagingIterable must have an argument of type AsyncResultSet", - MethodSpec.methodBuilder("get") - .addAnnotation(GetEntity.class) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .addParameter(ParameterSpec.builder(Row.class, "source").build()) - .returns( - ParameterizedTypeName.get( - ClassName.get(MappedAsyncPagingIterable.class), ENTITY_CLASS_NAME)) - .build(), - }, - }; - } -} diff --git a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoImplementationGeneratorTest.java b/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoImplementationGeneratorTest.java deleted file mode 100644 index 95042307709..00000000000 --- a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoImplementationGeneratorTest.java +++ /dev/null @@ -1,307 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.dao; - -import static com.google.testing.compile.CompilationSubject.assertThat; - -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveResultSet; -import com.datastax.dse.driver.api.mapper.reactive.MappedReactiveResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.Delete; -import com.datastax.oss.driver.api.mapper.annotations.Insert; -import com.datastax.oss.driver.api.mapper.annotations.Query; -import com.datastax.oss.driver.api.mapper.annotations.Select; -import com.datastax.oss.driver.api.mapper.annotations.Update; -import com.google.testing.compile.Compilation; -import com.squareup.javapoet.AnnotationSpec; -import com.squareup.javapoet.ClassName; -import com.squareup.javapoet.MethodSpec; -import com.squareup.javapoet.ParameterSpec; -import com.squareup.javapoet.ParameterizedTypeName; -import com.squareup.javapoet.TypeSpec; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.Collections; -import java.util.UUID; -import javax.lang.model.element.Modifier; -import javax.tools.StandardLocation; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class DaoImplementationGeneratorTest extends DaoMethodGeneratorTest { - - private static final ClassName REACTIVE_RESULT_CLASS_NAME = - ClassName.get(ReactiveResultSet.class); - - private static final ClassName MAPPED_REACTIVE_RESULT_CLASS_NAME = - ClassName.get(MappedReactiveResultSet.class); - - private static final ParameterizedTypeName ENTITY_MAPPED_REACTIVE_RESULT_SET = - ParameterizedTypeName.get(MAPPED_REACTIVE_RESULT_CLASS_NAME, ENTITY_CLASS_NAME); - - @Test - public void should_fail_if_method_is_not_annotated() { - should_fail_with_expected_error( - "Unrecognized method signature: no implementation will be generated", - MethodSpec.methodBuilder("get") - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .addParameter(ParameterSpec.builder(Row.class, "source").build()) - .returns(ENTITY_CLASS_NAME) - .build()); - } - - @Test - public void should_ignore_static_methods() { - should_succeed_without_warnings( - MethodSpec.methodBuilder("doNothing") - .addModifiers(Modifier.PUBLIC, Modifier.STATIC) - .build()); - } - - @Test - public void should_ignore_default_methods() { - should_succeed_without_warnings( - MethodSpec.methodBuilder("doNothing") - .addModifiers(Modifier.PUBLIC, Modifier.DEFAULT) - .build()); - } - - @Test - public void should_compile_with_logging_enabled() { - Compilation compilation = - compileWithMapperProcessor( - "test", - Collections.emptyList(), - ENTITY_SPEC, - TypeSpec.interfaceBuilder(ClassName.get("test", "ProductDao")) - .addModifiers(Modifier.PUBLIC) - .addAnnotation(Dao.class) - .addMethod( - MethodSpec.methodBuilder("update") - .addAnnotation(Update.class) - .addParameter(ENTITY_CLASS_NAME, "product") - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .build()) - .build()); - - assertThat(compilation).succeededWithoutWarnings(); - assertGeneratedFileContains( - compilation, - "private static final Logger LOG = LoggerFactory.getLogger(ProductDaoImpl__MapperGenerated.class);"); - assertGeneratedFileContains(compilation, "LOG.debug(\"[{}] Initializing new instance"); - assertGeneratedFileContains(compilation, "LOG.debug(\"[{}] Preparing query `{}` for method"); - } - - @Test - @UseDataProvider("disabledLoggingOptions") - public void should_compile_with_logging_disabled(Iterable options) { - Compilation compilation = - compileWithMapperProcessor( - "test", - options, - ENTITY_SPEC, - TypeSpec.interfaceBuilder(ClassName.get("test", "ProductDao")) - .addModifiers(Modifier.PUBLIC) - .addAnnotation(Dao.class) - .addMethod( - MethodSpec.methodBuilder("update") - .addAnnotation(Update.class) - .addParameter(ENTITY_CLASS_NAME, "product") - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .build()) - .build()); - assertThat(compilation).succeededWithoutWarnings(); - assertGeneratedFileDoesNotContain(compilation, "LoggerFactory.getLogger"); - assertGeneratedFileDoesNotContain(compilation, "LOG.debug"); - } - - @Test - public void should_generate_findById_method_returning_MappedReactiveResultSet() { - Compilation compilation = - compileWithMapperProcessor( - "test", - Collections.emptyList(), - ENTITY_SPEC, - TypeSpec.interfaceBuilder(ClassName.get("test", "ProductDao")) - .addModifiers(Modifier.PUBLIC) - .addAnnotation(Dao.class) - .addMethod( - MethodSpec.methodBuilder("findById") - .addAnnotation(Select.class) - .addParameter(ParameterSpec.builder(UUID.class, "pk").build()) - .returns(ENTITY_MAPPED_REACTIVE_RESULT_SET) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .build()) - .build()); - assertThat(compilation).succeededWithoutWarnings(); - assertGeneratedFileContains( - compilation, "public MappedReactiveResultSet findById(UUID pk)"); - assertGeneratedFileContains( - compilation, "return executeReactiveAndMap(boundStatement, productHelper);"); - } - - @Test - public void should_generate_insert_method_returning_ReactiveResultSet() { - Compilation compilation = - compileWithMapperProcessor( - "test", - Collections.emptyList(), - ENTITY_SPEC, - TypeSpec.interfaceBuilder(ClassName.get("test", "ProductDao")) - .addModifiers(Modifier.PUBLIC) - .addAnnotation(Dao.class) - .addMethod( - MethodSpec.methodBuilder("insertIfNotExists") - .addAnnotation(Insert.class) - .addParameter(ParameterSpec.builder(ENTITY_CLASS_NAME, "product").build()) - .returns(REACTIVE_RESULT_CLASS_NAME) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .build()) - .build()); - assertThat(compilation).succeededWithoutWarnings(); - assertGeneratedFileContains( - compilation, "public ReactiveResultSet insertIfNotExists(Product product)"); - assertGeneratedFileContains(compilation, "return executeReactive(boundStatement);"); - } - - @Test - public void should_generate_update_method_returning_ReactiveResultSet() { - Compilation compilation = - compileWithMapperProcessor( - "test", - Collections.emptyList(), - ENTITY_SPEC, - TypeSpec.interfaceBuilder(ClassName.get("test", "ProductDao")) - .addModifiers(Modifier.PUBLIC) - .addAnnotation(Dao.class) - .addMethod( - MethodSpec.methodBuilder("updateIfExists") - .addAnnotation(Update.class) - .addParameter(ParameterSpec.builder(ENTITY_CLASS_NAME, "product").build()) - .returns(REACTIVE_RESULT_CLASS_NAME) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .build()) - .build()); - assertThat(compilation).succeededWithoutWarnings(); - assertGeneratedFileContains( - compilation, "public ReactiveResultSet updateIfExists(Product product)"); - assertGeneratedFileContains(compilation, "return executeReactive(boundStatement);"); - } - - @Test - public void should_generate_delete_method_returning_ReactiveResultSet() { - Compilation compilation = - compileWithMapperProcessor( - "test", - Collections.emptyList(), - ENTITY_SPEC, - TypeSpec.interfaceBuilder(ClassName.get("test", "ProductDao")) - .addModifiers(Modifier.PUBLIC) - .addAnnotation(Dao.class) - .addMethod( - MethodSpec.methodBuilder("delete") - .addAnnotation(Delete.class) - .addParameter(ParameterSpec.builder(ENTITY_CLASS_NAME, "product").build()) - .returns(REACTIVE_RESULT_CLASS_NAME) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .build()) - .build()); - assertThat(compilation).succeededWithoutWarnings(); - assertGeneratedFileContains(compilation, "public ReactiveResultSet delete(Product product)"); - assertGeneratedFileContains(compilation, "return executeReactive(boundStatement);"); - } - - @Test - public void should_generate_query_method_returning_ReactiveResultSet() { - Compilation compilation = - compileWithMapperProcessor( - "test", - Collections.emptyList(), - ENTITY_SPEC, - TypeSpec.interfaceBuilder(ClassName.get("test", "ProductDao")) - .addModifiers(Modifier.PUBLIC) - .addAnnotation(Dao.class) - .addMethod( - MethodSpec.methodBuilder("queryReactive") - .addAnnotation( - AnnotationSpec.builder(Query.class) - .addMember("value", "$S", "SELECT * FROM whatever") - .build()) - .returns(REACTIVE_RESULT_CLASS_NAME) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .build()) - .build()); - assertThat(compilation).succeededWithoutWarnings(); - assertGeneratedFileContains(compilation, "public ReactiveResultSet queryReactive()"); - assertGeneratedFileContains(compilation, "return executeReactive(boundStatement);"); - } - - @Test - public void should_generate_query_method_returning_MappedReactiveResultSet() { - Compilation compilation = - compileWithMapperProcessor( - "test", - Collections.emptyList(), - ENTITY_SPEC, - TypeSpec.interfaceBuilder(ClassName.get("test", "ProductDao")) - .addModifiers(Modifier.PUBLIC) - .addAnnotation(Dao.class) - .addMethod( - MethodSpec.methodBuilder("queryReactiveMapped") - .addAnnotation( - AnnotationSpec.builder(Query.class) - .addMember("value", "$S", "SELECT * FROM whatever") - .build()) - .returns(ENTITY_MAPPED_REACTIVE_RESULT_SET) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .build()) - .build()); - assertThat(compilation).succeededWithoutWarnings(); - assertGeneratedFileContains( - compilation, "public MappedReactiveResultSet queryReactiveMapped()"); - assertGeneratedFileContains( - compilation, "return executeReactiveAndMap(boundStatement, productHelper);"); - } - - protected void assertGeneratedFileDoesNotContain(Compilation compilation, String string) { - assertThat(compilation) - .generatedFile( - StandardLocation.SOURCE_OUTPUT, "test", "ProductDaoImpl__MapperGenerated.java") - .contentsAsUtf8String() - .doesNotContain(string); - } - - protected void assertGeneratedFileContains(Compilation compilation, String string) { - assertThat(compilation) - .generatedFile( - StandardLocation.SOURCE_OUTPUT, "test", "ProductDaoImpl__MapperGenerated.java") - .contentsAsUtf8String() - .contains(string); - } - - @DataProvider - public static Object[][] disabledLoggingOptions() { - return new Object[][] { - {Collections.singletonList("-Acom.datastax.oss.driver.mapper.logs.enabled=false")}, - {Collections.singletonList("-Acom.datastax.oss.driver.mapper.logs.enabled=malformed")} - }; - } -} diff --git a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoInsertMethodGeneratorTest.java b/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoInsertMethodGeneratorTest.java deleted file mode 100644 index 5b2e3220463..00000000000 --- a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoInsertMethodGeneratorTest.java +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.dao; - -import com.datastax.oss.driver.api.mapper.annotations.Insert; -import com.squareup.javapoet.AnnotationSpec; -import com.squareup.javapoet.MethodSpec; -import com.squareup.javapoet.ParameterSpec; -import com.squareup.javapoet.TypeName; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import javax.lang.model.element.Modifier; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class DaoInsertMethodGeneratorTest extends DaoMethodGeneratorTest { - - @Test - @Override - @UseDataProvider("invalidSignatures") - public void should_fail_with_expected_error(String expectedError, MethodSpec method) { - super.should_fail_with_expected_error(expectedError, method); - } - - @DataProvider - public static Object[][] invalidSignatures() { - return new Object[][] { - { - "Insert methods must take the entity to insert as the first parameter", - MethodSpec.methodBuilder("insert") - .addAnnotation(Insert.class) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .build(), - }, - { - "Insert methods must take the entity to insert as the first parameter", - MethodSpec.methodBuilder("insert") - .addAnnotation(Insert.class) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .addParameter(ParameterSpec.builder(String.class, "a").build()) - .build(), - }, - { - "Insert methods must return one of [VOID, FUTURE_OF_VOID, ENTITY, FUTURE_OF_ENTITY, " - + "OPTIONAL_ENTITY, FUTURE_OF_OPTIONAL_ENTITY, BOOLEAN, FUTURE_OF_BOOLEAN, RESULT_SET, " - + "BOUND_STATEMENT, FUTURE_OF_ASYNC_RESULT_SET, REACTIVE_RESULT_SET]", - MethodSpec.methodBuilder("insert") - .addAnnotation(Insert.class) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .addParameter(ParameterSpec.builder(ENTITY_CLASS_NAME, "entity").build()) - .returns(TypeName.INT) - .build(), - }, - }; - } - - @Test - @Override - @UseDataProvider("warningSignatures") - public void should_succeed_with_expected_warning(String expectedWarning, MethodSpec method) { - super.should_succeed_with_expected_warning(expectedWarning, method); - } - - @DataProvider - public static Object[][] warningSignatures() { - return new Object[][] { - { - "Invalid ttl value: " - + "':foo bar' is not a valid placeholder, the generated query will probably fail", - MethodSpec.methodBuilder("insert") - .addAnnotation( - AnnotationSpec.builder(Insert.class).addMember("ttl", "$S", ":foo bar").build()) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .addParameter(ENTITY_CLASS_NAME, "entity") - .build(), - }, - { - "Invalid ttl value: " - + "'foo' is not a bind marker name and can't be parsed as a number literal either, " - + "the generated query will probably fail", - MethodSpec.methodBuilder("insert") - .addAnnotation( - AnnotationSpec.builder(Insert.class).addMember("ttl", "$S", "foo").build()) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .addParameter(ENTITY_CLASS_NAME, "entity") - .build(), - }, - { - "Invalid timestamp value: " - + "':foo bar' is not a valid placeholder, the generated query will probably fail", - MethodSpec.methodBuilder("insert") - .addAnnotation( - AnnotationSpec.builder(Insert.class) - .addMember("timestamp", "$S", ":foo bar") - .build()) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .addParameter(ENTITY_CLASS_NAME, "entity") - .build(), - }, - { - "Invalid timestamp value: " - + "'foo' is not a bind marker name and can't be parsed as a number literal either, " - + "the generated query will probably fail", - MethodSpec.methodBuilder("insert") - .addAnnotation( - AnnotationSpec.builder(Insert.class).addMember("timestamp", "$S", "foo").build()) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .addParameter(ENTITY_CLASS_NAME, "entity") - .build(), - }, - }; - } -} diff --git a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoMethodGeneratorTest.java b/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoMethodGeneratorTest.java deleted file mode 100644 index 3f3431eaf1f..00000000000 --- a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoMethodGeneratorTest.java +++ /dev/null @@ -1,166 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.dao; - -import com.datastax.oss.driver.api.mapper.annotations.ClusteringColumn; -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.Entity; -import com.datastax.oss.driver.api.mapper.annotations.PartitionKey; -import com.datastax.oss.driver.internal.mapper.processor.MapperProcessorTest; -import com.squareup.javapoet.AnnotationSpec; -import com.squareup.javapoet.ClassName; -import com.squareup.javapoet.MethodSpec; -import com.squareup.javapoet.TypeSpec; -import java.util.UUID; -import javax.lang.model.element.Modifier; - -public abstract class DaoMethodGeneratorTest extends MapperProcessorTest { - - // Dummy entity class that can be reused across tests - protected static final ClassName ENTITY_CLASS_NAME = ClassName.get("test", "Product"); - protected static final TypeSpec ENTITY_SPEC = - TypeSpec.classBuilder(ENTITY_CLASS_NAME) - .addModifiers(Modifier.PUBLIC) - .addAnnotation(Entity.class) - .addField(UUID.class, "id", Modifier.PRIVATE) - .addMethod( - MethodSpec.methodBuilder("setId") - .addParameter(UUID.class, "id") - .addModifiers(Modifier.PUBLIC) - .addStatement("this.id = id") - .build()) - .addMethod( - MethodSpec.methodBuilder("getId") - .addAnnotation(PartitionKey.class) - .returns(UUID.class) - .addModifiers(Modifier.PUBLIC) - .addStatement("return id") - .build()) - .build(); - protected static final ClassName SALE_ENTITY_CLASS_NAME = ClassName.get("test", "ProductSale"); - protected static final TypeSpec SALE_ENTITY_SPEC = - TypeSpec.classBuilder(SALE_ENTITY_CLASS_NAME) - .addModifiers(Modifier.PUBLIC) - .addAnnotation(Entity.class) - .addField(UUID.class, "id", Modifier.PRIVATE) - .addField(String.class, "day", Modifier.PRIVATE) - .addField(UUID.class, "customerId", Modifier.PRIVATE) - .addField(UUID.class, "ts", Modifier.PRIVATE) - .addMethod( - MethodSpec.methodBuilder("setId") - .addParameter(UUID.class, "id") - .addModifiers(Modifier.PUBLIC) - .addStatement("this.id = id") - .build()) - .addMethod( - MethodSpec.methodBuilder("getId") - .addAnnotation(PartitionKey.class) - .returns(UUID.class) - .addModifiers(Modifier.PUBLIC) - .addStatement("return id") - .build()) - .addMethod( - MethodSpec.methodBuilder("setDay") - .addParameter(String.class, "day") - .addModifiers(Modifier.PUBLIC) - .addStatement("this.day = day") - .build()) - .addMethod( - MethodSpec.methodBuilder("getDay") - .addAnnotation( - AnnotationSpec.builder(PartitionKey.class) - .addMember("value", "$L", 1) - .build()) - .returns(String.class) - .addModifiers(Modifier.PUBLIC) - .addStatement("return day") - .build()) - .addMethod( - MethodSpec.methodBuilder("setCustomerId") - .addParameter(UUID.class, "customerId") - .addModifiers(Modifier.PUBLIC) - .addStatement("this.customerId = customerId") - .build()) - .addMethod( - MethodSpec.methodBuilder("getCustomerId") - .addAnnotation( - AnnotationSpec.builder(ClusteringColumn.class) - .addMember("value", "$L", 0) - .build()) - .returns(UUID.class) - .addModifiers(Modifier.PUBLIC) - .addStatement("return customerId") - .build()) - .addMethod( - MethodSpec.methodBuilder("setTs") - .addParameter(UUID.class, "ts") - .addModifiers(Modifier.PUBLIC) - .addStatement("this.ts = ts") - .build()) - .addMethod( - MethodSpec.methodBuilder("getTs") - .addAnnotation( - AnnotationSpec.builder(ClusteringColumn.class) - .addMember("value", "$L", 1) - .build()) - .returns(UUID.class) - .addModifiers(Modifier.PUBLIC) - .addStatement("return ts") - .build()) - .build(); - - protected void should_fail_with_expected_error(String expectedError, MethodSpec method) { - should_fail_with_expected_error(expectedError, method, ENTITY_SPEC); - } - - protected void should_fail_with_expected_error( - String expectedError, MethodSpec method, TypeSpec entitySpec) { - should_fail_with_expected_error( - expectedError, - "test", - entitySpec, - TypeSpec.interfaceBuilder(ClassName.get("test", "ProductDao")) - .addModifiers(Modifier.PUBLIC) - .addAnnotation(Dao.class) - .addMethod(method) - .build()); - } - - protected void should_succeed_with_expected_warning(String expectedWarning, MethodSpec method) { - should_succeed_with_expected_warning( - expectedWarning, - "test", - ENTITY_SPEC, - TypeSpec.interfaceBuilder(ClassName.get("test", "ProductDao")) - .addModifiers(Modifier.PUBLIC) - .addAnnotation(Dao.class) - .addMethod(method) - .build()); - } - - protected void should_succeed_without_warnings(MethodSpec method) { - should_succeed_without_warnings( - "test", - ENTITY_SPEC, - TypeSpec.interfaceBuilder(ClassName.get("test", "ProductDao")) - .addModifiers(Modifier.PUBLIC) - .addAnnotation(Dao.class) - .addMethod(method) - .build()); - } -} diff --git a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoQueryMethodGeneratorTest.java b/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoQueryMethodGeneratorTest.java deleted file mode 100644 index 45377c3e2b7..00000000000 --- a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoQueryMethodGeneratorTest.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.dao; - -import com.datastax.oss.driver.api.mapper.annotations.Query; -import com.squareup.javapoet.AnnotationSpec; -import com.squareup.javapoet.MethodSpec; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.UUID; -import javax.lang.model.element.Modifier; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class DaoQueryMethodGeneratorTest extends DaoMethodGeneratorTest { - - @Test - @Override - @UseDataProvider("invalidSignatures") - public void should_fail_with_expected_error(String expectedError, MethodSpec method) { - super.should_fail_with_expected_error(expectedError, method); - } - - @DataProvider - public static Object[][] invalidSignatures() { - // Not many error cases to cover, the return type/parameters are pretty open - return new Object[][] { - { - "Invalid return type: Query methods must return one of [VOID, BOOLEAN, LONG, ROW, " - + "ENTITY, OPTIONAL_ENTITY, RESULT_SET, BOUND_STATEMENT, PAGING_ITERABLE, FUTURE_OF_VOID, " - + "FUTURE_OF_BOOLEAN, FUTURE_OF_LONG, FUTURE_OF_ROW, " - + "FUTURE_OF_ENTITY, FUTURE_OF_OPTIONAL_ENTITY, " - + "FUTURE_OF_ASYNC_RESULT_SET, FUTURE_OF_ASYNC_PAGING_ITERABLE, " - + "REACTIVE_RESULT_SET, MAPPED_REACTIVE_RESULT_SET, " - + "STREAM, FUTURE_OF_STREAM]", - MethodSpec.methodBuilder("select") - .addAnnotation( - AnnotationSpec.builder(Query.class) - .addMember("value", "$S", "SELECT * FROM whatever") - .build()) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .returns(UUID.class) - .build(), - }, - }; - } -} diff --git a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoQueryProviderMethodGeneratorTest.java b/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoQueryProviderMethodGeneratorTest.java deleted file mode 100644 index 765b39b25c8..00000000000 --- a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoQueryProviderMethodGeneratorTest.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.dao; - -import com.datastax.oss.driver.api.mapper.annotations.QueryProvider; -import com.squareup.javapoet.AnnotationSpec; -import com.squareup.javapoet.MethodSpec; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import javax.lang.model.element.Modifier; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class DaoQueryProviderMethodGeneratorTest extends DaoMethodGeneratorTest { - - @Test - @Override - @UseDataProvider("invalidSignatures") - public void should_fail_with_expected_error(String expectedError, MethodSpec method) { - super.should_fail_with_expected_error(expectedError, method); - } - - @DataProvider - public static Object[][] invalidSignatures() { - return new Object[][] { - { - "Invalid annotation configuration: the elements in QueryProvider.entityHelpers " - + "must be Entity-annotated classes (offending element: java.lang.String)", - MethodSpec.methodBuilder("select") - .addAnnotation( - AnnotationSpec.builder(QueryProvider.class) - // We don't go until instantiation, any class will do - .addMember("providerClass", "$T.class", String.class) - .addMember( - "entityHelpers", "{ $T.class, $T.class }", ENTITY_CLASS_NAME, String.class) - .build()) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .build(), - }, - }; - } -} diff --git a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoSelectMethodGeneratorTest.java b/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoSelectMethodGeneratorTest.java deleted file mode 100644 index 285c78e5a57..00000000000 --- a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoSelectMethodGeneratorTest.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.dao; - -import com.datastax.oss.driver.api.mapper.annotations.Select; -import com.squareup.javapoet.MethodSpec; -import com.squareup.javapoet.ParameterizedTypeName; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.concurrent.CompletionStage; -import javax.lang.model.element.Modifier; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class DaoSelectMethodGeneratorTest extends DaoMethodGeneratorTest { - - @Test - @Override - @UseDataProvider("invalidSignatures") - public void should_fail_with_expected_error(String expectedError, MethodSpec method) { - super.should_fail_with_expected_error(expectedError, method); - } - - @DataProvider - public static Object[][] invalidSignatures() { - return new Object[][] { - { - "Invalid return type: Select methods must return one of [ENTITY, OPTIONAL_ENTITY, " - + "FUTURE_OF_ENTITY, FUTURE_OF_OPTIONAL_ENTITY, PAGING_ITERABLE, STREAM, " - + "FUTURE_OF_ASYNC_PAGING_ITERABLE, FUTURE_OF_STREAM, MAPPED_REACTIVE_RESULT_SET]", - MethodSpec.methodBuilder("select") - .addAnnotation(Select.class) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .returns(Integer.class) - .build(), - }, - { - "Invalid return type: Select methods must return one of [ENTITY, OPTIONAL_ENTITY, " - + "FUTURE_OF_ENTITY, FUTURE_OF_OPTIONAL_ENTITY, PAGING_ITERABLE, STREAM, " - + "FUTURE_OF_ASYNC_PAGING_ITERABLE, FUTURE_OF_STREAM, MAPPED_REACTIVE_RESULT_SET]", - MethodSpec.methodBuilder("select") - .addAnnotation(Select.class) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .returns(ParameterizedTypeName.get(CompletionStage.class, Integer.class)) - .build(), - }, - { - "Select methods that don't use a custom clause must match the primary key components " - + "in the exact order (expected primary key of Product: [java.util.UUID]). Mismatch " - + "at index 0: java.lang.String should be java.util.UUID", - MethodSpec.methodBuilder("select") - .addAnnotation(Select.class) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .addParameter(String.class, "id") - .returns(ENTITY_CLASS_NAME) - .build(), - }, - }; - } -} diff --git a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoSetEntityMethodGeneratorTest.java b/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoSetEntityMethodGeneratorTest.java deleted file mode 100644 index bc4e235ff2c..00000000000 --- a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoSetEntityMethodGeneratorTest.java +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.dao; - -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.mapper.annotations.SetEntity; -import com.squareup.javapoet.MethodSpec; -import com.squareup.javapoet.ParameterSpec; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import javax.lang.model.element.Modifier; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class DaoSetEntityMethodGeneratorTest extends DaoMethodGeneratorTest { - - @Test - @Override - @UseDataProvider("invalidSignatures") - public void should_fail_with_expected_error(String expectedError, MethodSpec method) { - super.should_fail_with_expected_error(expectedError, method); - } - - @DataProvider - public static Object[][] invalidSignatures() { - return new Object[][] { - { - "Wrong number of parameters: SetEntity methods must have two", - MethodSpec.methodBuilder("set") - .addAnnotation(SetEntity.class) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .addParameter(ParameterSpec.builder(String.class, "a").build()) - .build(), - }, - { - "Wrong number of parameters: SetEntity methods must have two", - MethodSpec.methodBuilder("set") - .addAnnotation(SetEntity.class) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .addParameter(ParameterSpec.builder(String.class, "a").build()) - .addParameter(ParameterSpec.builder(String.class, "b").build()) - .addParameter(ParameterSpec.builder(String.class, "c").build()) - .build(), - }, - { - "Wrong parameter types: SetEntity methods must take a SettableByName and an annotated entity (in any order)", - MethodSpec.methodBuilder("set") - .addAnnotation(SetEntity.class) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .addParameter(ParameterSpec.builder(ENTITY_CLASS_NAME, "entity").build()) - .addParameter(ParameterSpec.builder(Integer.class, "target").build()) - .build(), - }, - { - "Wrong parameter types: SetEntity methods must take a SettableByName and an annotated entity (in any order)", - MethodSpec.methodBuilder("set") - .addAnnotation(SetEntity.class) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .addParameter(ParameterSpec.builder(String.class, "entity").build()) - .addParameter(ParameterSpec.builder(BoundStatement.class, "target").build()) - .build(), - }, - { - "Invalid return type: SetEntity methods must either be void, or return the same type as their settable parameter " - + "(in this case, com.datastax.oss.driver.api.core.cql.BoundStatement to match 'target')", - MethodSpec.methodBuilder("set") - .addAnnotation(SetEntity.class) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .addParameter(ParameterSpec.builder(ENTITY_CLASS_NAME, "entity").build()) - .addParameter(ParameterSpec.builder(BoundStatement.class, "target").build()) - .returns(Integer.class) - .build(), - }, - // Return type is a SettableByName, but not the same subtype as the parameter: - { - "Invalid return type: SetEntity methods must either be void, or return the same type as their settable parameter " - + "(in this case, com.datastax.oss.driver.api.core.cql.BoundStatement to match 'target')", - MethodSpec.methodBuilder("set") - .addAnnotation(SetEntity.class) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .addParameter(ParameterSpec.builder(ENTITY_CLASS_NAME, "entity").build()) - .addParameter(ParameterSpec.builder(BoundStatement.class, "target").build()) - .returns(UdtValue.class) - .build(), - }, - }; - } - - @Test - public void should_warn_when_void_and_target_is_bound_statement() { - super.should_succeed_with_expected_warning( - "BoundStatement is immutable, " - + "this method will not modify 'target' in place. " - + "It should probably return BoundStatement rather than void", - MethodSpec.methodBuilder("set") - .addAnnotation(SetEntity.class) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .addParameter(ParameterSpec.builder(ENTITY_CLASS_NAME, "entity").build()) - .addParameter(ParameterSpec.builder(BoundStatement.class, "target").build()) - .build()); - } -} diff --git a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoUpdateMethodGeneratorTest.java b/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoUpdateMethodGeneratorTest.java deleted file mode 100644 index 1bbcb6d8596..00000000000 --- a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/dao/DaoUpdateMethodGeneratorTest.java +++ /dev/null @@ -1,184 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.dao; - -import static com.google.common.truth.Truth.assertThat; -import static org.mockito.Mockito.mock; - -import com.datastax.oss.driver.api.mapper.annotations.CqlName; -import com.datastax.oss.driver.api.mapper.annotations.Update; -import com.datastax.oss.driver.api.querybuilder.QueryBuilder; -import com.datastax.oss.driver.internal.mapper.processor.ProcessorContext; -import com.squareup.javapoet.AnnotationSpec; -import com.squareup.javapoet.CodeBlock; -import com.squareup.javapoet.MethodSpec; -import com.squareup.javapoet.ParameterSpec; -import com.squareup.javapoet.TypeName; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import javax.lang.model.element.Modifier; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class DaoUpdateMethodGeneratorTest extends DaoMethodGeneratorTest { - - private static final AnnotationSpec UPDATE_ANNOTATION = - AnnotationSpec.builder(Update.class).build(); - - @Test - @Override - @UseDataProvider("invalidSignatures") - public void should_fail_with_expected_error(String expectedError, MethodSpec method) { - super.should_fail_with_expected_error(expectedError, method); - } - - @DataProvider - public static Object[][] invalidSignatures() { - return new Object[][] { - { - "Update methods must take the entity to update as the first parameter", - MethodSpec.methodBuilder("update") - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .addAnnotation(UPDATE_ANNOTATION) - .build(), - }, - { - "Update methods must take the entity to update as the first parameter", - MethodSpec.methodBuilder("update") - .addAnnotation(UPDATE_ANNOTATION) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .addParameter(ParameterSpec.builder(String.class, "a").build()) - .build(), - }, - { - "Invalid return type: Update methods must return one of [VOID, FUTURE_OF_VOID, " - + "RESULT_SET, BOUND_STATEMENT, FUTURE_OF_ASYNC_RESULT_SET, BOOLEAN, " - + "FUTURE_OF_BOOLEAN, REACTIVE_RESULT_SET]", - MethodSpec.methodBuilder("update") - .addAnnotation(UPDATE_ANNOTATION) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .addParameter(ParameterSpec.builder(ENTITY_CLASS_NAME, "entity").build()) - .returns(TypeName.INT) - .build(), - }, - { - "Invalid annotation parameters: Update cannot have both ifExists and customIfClause", - MethodSpec.methodBuilder("update") - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .addAnnotation( - AnnotationSpec.builder(Update.class) - .addMember("ifExists", "true") - .addMember("customIfClause", "$S", "1 = 1") - .build()) - .addParameter(ParameterSpec.builder(ENTITY_CLASS_NAME, "entity").build()) - .returns(TypeName.VOID) - .build(), - }, - }; - } - - @Test - public void should_warn_when_non_bind_marker_has_cql_name() { - should_succeed_with_expected_warning( - "Parameter entity does not refer " - + "to a bind marker, @CqlName annotation will be ignored", - MethodSpec.methodBuilder("update") - .addAnnotation( - AnnotationSpec.builder(Update.class) - .addMember("customIfClause", "$S", "description LIKE :searchString") - .build()) - .addParameter( - ParameterSpec.builder(ENTITY_CLASS_NAME, "entity") - .addAnnotation( - AnnotationSpec.builder(CqlName.class) - .addMember("value", "$S", "irrelevant") - .build()) - .build()) - .addParameter( - ParameterSpec.builder(String.class, "searchString") - .addAnnotation( - AnnotationSpec.builder(CqlName.class) - .addMember("value", "$S", "irrelevant") - .build()) - .build()) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .build()); - } - - @Test - @UseDataProvider("usingTimestampProvider") - public void should_process_timestamp(String timestamp, CodeBlock expected) { - // given - ProcessorContext processorContext = mock(ProcessorContext.class); - DaoUpdateMethodGenerator daoUpdateMethodGenerator = - new DaoUpdateMethodGenerator(null, null, null, null, processorContext); - MethodSpec.Builder builder = MethodSpec.constructorBuilder(); - - // when - daoUpdateMethodGenerator.maybeAddTimestamp(timestamp, builder); - - // then - assertThat(builder.build().code).isEqualTo(expected); - } - - @Test - @UseDataProvider("usingTtlProvider") - public void should_process_ttl(String ttl, CodeBlock expected) { - // given - ProcessorContext processorContext = mock(ProcessorContext.class); - DaoUpdateMethodGenerator daoUpdateMethodGenerator = - new DaoUpdateMethodGenerator(null, null, null, null, processorContext); - MethodSpec.Builder builder = MethodSpec.constructorBuilder(); - - // when - daoUpdateMethodGenerator.maybeAddTtl(ttl, builder); - - // then - assertThat(builder.build().code).isEqualTo(expected); - } - - @DataProvider - public static Object[][] usingTimestampProvider() { - return new Object[][] { - {"1", CodeBlock.of(".usingTimestamp(1)")}, - { - ":ts", CodeBlock.of(".usingTimestamp($T.bindMarker($S))", QueryBuilder.class, "ts"), - }, - {"1", CodeBlock.of(".usingTimestamp(1)")}, - { - ":TS", CodeBlock.of(".usingTimestamp($T.bindMarker($S))", QueryBuilder.class, "TS"), - }, - }; - } - - @DataProvider - public static Object[][] usingTtlProvider() { - return new Object[][] { - {"1", CodeBlock.of(".usingTtl(1)")}, - { - ":ttl", CodeBlock.of(".usingTtl($T.bindMarker($S))", QueryBuilder.class, "ttl"), - }, - {"1", CodeBlock.of(".usingTtl(1)")}, - { - ":TTL", CodeBlock.of(".usingTtl($T.bindMarker($S))", QueryBuilder.class, "TTL"), - }, - }; - } -} diff --git a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/dao/compiled/CompiledProduct.java b/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/dao/compiled/CompiledProduct.java deleted file mode 100644 index 2eb5d516e2f..00000000000 --- a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/dao/compiled/CompiledProduct.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.dao.compiled; - -import com.datastax.oss.driver.api.mapper.annotations.Entity; -import com.datastax.oss.driver.api.mapper.annotations.PartitionKey; -import java.util.UUID; - -@Entity -public class CompiledProduct { - - @PartitionKey private UUID id; - private String description; - - public CompiledProduct() {} - - public CompiledProduct(UUID id, String description) { - this.id = id; - this.description = description; - } - - public UUID getId() { - return id; - } - - public void setId(UUID id) { - this.id = id; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } -} diff --git a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/dao/compiled/CompiledProductDao.java b/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/dao/compiled/CompiledProductDao.java deleted file mode 100644 index b10d5047f18..00000000000 --- a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/dao/compiled/CompiledProductDao.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.dao.compiled; - -import com.datastax.oss.driver.api.core.PagingIterable; -import com.datastax.oss.driver.api.mapper.annotations.CqlName; -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.Select; - -@Dao -public interface CompiledProductDao { - - @Select(customWhereClause = "description LIKE :searchString") - PagingIterable findByDescriptionCompiled( - @CqlName("searchString") String searchString); - - @Select(customWhereClause = "description LIKE :searchString") - PagingIterable findByDescriptionCompiledWrong(String searchString); -} diff --git a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/dao/compiled/DaoCompiledMethodGeneratorTest.java b/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/dao/compiled/DaoCompiledMethodGeneratorTest.java deleted file mode 100644 index a0ed703ccc5..00000000000 --- a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/dao/compiled/DaoCompiledMethodGeneratorTest.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.dao.compiled; - -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.internal.mapper.processor.dao.DaoMethodGeneratorTest; -import com.squareup.javapoet.ClassName; -import com.squareup.javapoet.TypeSpec; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import javax.lang.model.element.Modifier; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class DaoCompiledMethodGeneratorTest extends DaoMethodGeneratorTest { - - @Test - public void should_fail_with_expected_error() { - should_fail_with_expected_error( - "[CompiledProductDao.findByDescriptionCompiledWrong] " - + "Parameter arg0 is declared in a compiled method " - + "and refers to a bind marker " - + "and thus must be annotated with @CqlName", - "test", - ENTITY_SPEC, - TypeSpec.interfaceBuilder(ClassName.get("test", "ProductDao")) - .addModifiers(Modifier.PUBLIC) - .addSuperinterface(CompiledProductDao.class) - .addAnnotation(Dao.class) - .build()); - } -} diff --git a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/entity/BuiltInNameConversionsTest.java b/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/entity/BuiltInNameConversionsTest.java deleted file mode 100644 index cc5222c4f9a..00000000000 --- a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/entity/BuiltInNameConversionsTest.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.entity; - -import static com.datastax.oss.driver.api.mapper.entity.naming.NamingConvention.CASE_INSENSITIVE; -import static com.datastax.oss.driver.api.mapper.entity.naming.NamingConvention.EXACT_CASE; -import static com.datastax.oss.driver.api.mapper.entity.naming.NamingConvention.LOWER_CAMEL_CASE; -import static com.datastax.oss.driver.api.mapper.entity.naming.NamingConvention.SNAKE_CASE_INSENSITIVE; -import static com.datastax.oss.driver.api.mapper.entity.naming.NamingConvention.UPPER_CAMEL_CASE; -import static com.datastax.oss.driver.api.mapper.entity.naming.NamingConvention.UPPER_CASE; -import static com.datastax.oss.driver.api.mapper.entity.naming.NamingConvention.UPPER_SNAKE_CASE; -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.mapper.entity.naming.NamingConvention; -import org.junit.Test; - -public class BuiltInNameConversionsTest { - - @Test - public void should_convert_to_cql() { - should_convert_to_cql("Product", CASE_INSENSITIVE, "Product"); - should_convert_to_cql("productId", CASE_INSENSITIVE, "productId"); - - should_convert_to_cql("Product", EXACT_CASE, "\"Product\""); - should_convert_to_cql("productId", EXACT_CASE, "\"productId\""); - - should_convert_to_cql("Product", LOWER_CAMEL_CASE, "\"product\""); - should_convert_to_cql("productId", LOWER_CAMEL_CASE, "\"productId\""); - - should_convert_to_cql("Product", UPPER_CAMEL_CASE, "\"Product\""); - should_convert_to_cql("productId", UPPER_CAMEL_CASE, "\"ProductId\""); - - should_convert_to_cql("Product", SNAKE_CASE_INSENSITIVE, "product"); - should_convert_to_cql("productId", SNAKE_CASE_INSENSITIVE, "product_id"); - - should_convert_to_cql("Product", UPPER_SNAKE_CASE, "\"PRODUCT\""); - should_convert_to_cql("productId", UPPER_SNAKE_CASE, "\"PRODUCT_ID\""); - - should_convert_to_cql("Product", UPPER_CASE, "\"PRODUCT\""); - should_convert_to_cql("productId", UPPER_CASE, "\"PRODUCTID\""); - } - - private void should_convert_to_cql( - String javaName, NamingConvention convention, String expectedCqlName) { - String actualCqlName = BuiltInNameConversions.toCassandraName(javaName, convention); - assertThat(actualCqlName).isEqualTo(expectedCqlName); - } -} diff --git a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityAnnotationTest.java b/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityAnnotationTest.java deleted file mode 100644 index 8e9fa723436..00000000000 --- a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityAnnotationTest.java +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.entity; - -import static com.google.testing.compile.CompilationSubject.assertThat; - -import com.datastax.oss.driver.api.mapper.annotations.Entity; -import com.datastax.oss.driver.internal.mapper.processor.MapperProcessorTest; -import com.google.common.truth.StringSubject; -import com.google.testing.compile.Compilation; -import com.squareup.javapoet.ClassName; -import com.squareup.javapoet.MethodSpec; -import com.squareup.javapoet.TypeName; -import com.squareup.javapoet.TypeSpec; -import javax.lang.model.element.Modifier; -import javax.tools.StandardLocation; -import org.junit.Test; - -public class EntityAnnotationTest extends MapperProcessorTest { - - @Test - public void should_work_on_nested_class() { - Compilation compilation = - compileWithMapperProcessor( - "test", - TypeSpec.classBuilder(ClassName.get("test", "Foo")) - .addModifiers(Modifier.PUBLIC) - .addType( - TypeSpec.classBuilder("Bar") - .addModifiers(Modifier.PUBLIC, Modifier.STATIC) - .addAnnotation(Entity.class) - // Dummy getter and setter to have at least one mapped property - .addMethod( - MethodSpec.methodBuilder("setI") - .addParameter(TypeName.INT, "i") - .addModifiers(Modifier.PUBLIC) - .build()) - .addMethod( - MethodSpec.methodBuilder("getI") - .returns(TypeName.INT) - .addModifiers(Modifier.PUBLIC) - .addStatement("return 0") - .build()) - .build()) - .build()); - - assertThat(compilation).succeededWithoutWarnings(); - assertThat(compilation) - .generatedFile( - StandardLocation.SOURCE_OUTPUT, "test", "Foo_BarHelper__MapperGenerated.java") - .contentsAsUtf8String() - .contains("class Foo_BarHelper__MapperGenerated extends EntityHelperBase"); - } - - @Test - public void should_detect_boolean_getter() { - Compilation compilation = - compileWithMapperProcessor( - "test", - TypeSpec.classBuilder(ClassName.get("test", "Foo")) - .addModifiers(Modifier.PUBLIC) - .addType( - TypeSpec.classBuilder("Bar") - .addModifiers(Modifier.PUBLIC, Modifier.STATIC) - .addAnnotation(Entity.class) - // Dummy getter and setter to have at least one mapped property - .addMethod( - MethodSpec.methodBuilder("setBool") - .addParameter(TypeName.BOOLEAN, "bool") - .addModifiers(Modifier.PUBLIC) - .build()) - .addMethod( - MethodSpec.methodBuilder("isBool") - .returns(TypeName.BOOLEAN) - .addModifiers(Modifier.PUBLIC) - .addStatement("return true") - .build()) - .build()) - .build()); - assertThat(compilation).succeededWithoutWarnings(); - StringSubject contents = - assertThat(compilation) - .generatedFile( - StandardLocation.SOURCE_OUTPUT, "test", "Foo_BarHelper__MapperGenerated.java") - .contentsAsUtf8String(); - contents.contains("target = target.setBoolean(\"bool\", entity.isBool())"); - contents.contains("boolean propertyValue = source.getBoolean(\"bool\");"); - } - - @Test - public void should_fail_on_interface() { - should_fail_with_expected_error( - "Only CLASS elements can be annotated with Entity", - "test", - TypeSpec.interfaceBuilder(ClassName.get("test", "Foo")) - .addModifiers(Modifier.PUBLIC) - .addAnnotation(Entity.class) - .build()); - } -} diff --git a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityNamingStrategyTest.java b/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityNamingStrategyTest.java deleted file mode 100644 index b6e63d50616..00000000000 --- a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityNamingStrategyTest.java +++ /dev/null @@ -1,133 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.entity; - -import com.datastax.oss.driver.api.mapper.annotations.Entity; -import com.datastax.oss.driver.api.mapper.annotations.NamingStrategy; -import com.datastax.oss.driver.api.mapper.entity.naming.NameConverter; -import com.datastax.oss.driver.api.mapper.entity.naming.NamingConvention; -import com.datastax.oss.driver.internal.mapper.processor.MapperProcessorTest; -import com.squareup.javapoet.AnnotationSpec; -import com.squareup.javapoet.ClassName; -import com.squareup.javapoet.MethodSpec; -import com.squareup.javapoet.TypeName; -import com.squareup.javapoet.TypeSpec; -import javax.lang.model.element.Modifier; -import org.junit.Test; - -public class EntityNamingStrategyTest extends MapperProcessorTest { - - private static final ClassName CUSTOM_CONVERTER_CLASS_NAME = - ClassName.get("test", "CustomConverter"); - - private static final TypeSpec CUSTOM_CONVERTER_CLASS = - TypeSpec.classBuilder(CUSTOM_CONVERTER_CLASS_NAME) - .addSuperinterface(NameConverter.class) - .addMethod( - MethodSpec.methodBuilder("toCassandraName") - .addModifiers(Modifier.PUBLIC) - .returns(String.class) - .addParameter(String.class, "javaName") - .addStatement("return null;") // doesn't matter, converter won't be invoked - .build()) - .build(); - - // Common code for the entity class, to avoid repeating it in every test - private TypeSpec.Builder entityTemplate() { - return TypeSpec.classBuilder("TestEntity") - .addModifiers(Modifier.PUBLIC) - .addAnnotation(Entity.class) - .addField(TypeName.INT, "i", Modifier.PRIVATE) - .addMethod( - MethodSpec.methodBuilder("setI") - .addParameter(TypeName.INT, "i") - .addModifiers(Modifier.PUBLIC) - .build()) - .addMethod( - MethodSpec.methodBuilder("getI") - .returns(TypeName.INT) - .addModifiers(Modifier.PUBLIC) - .addStatement("return 0") - .build()); - } - - @Test - public void should_fail_if_both_convention_and_converter_specified() { - should_fail_with_expected_error( - "Invalid annotation configuration: " - + "NamingStrategy must have either a 'convention' or 'customConverterClass' argument, " - + "but not both", - "test", - CUSTOM_CONVERTER_CLASS, - entityTemplate() - .addAnnotation( - AnnotationSpec.builder(NamingStrategy.class) - .addMember("convention", "$T.CASE_INSENSITIVE", NamingConvention.class) - .addMember("customConverterClass", "$T.class", CUSTOM_CONVERTER_CLASS_NAME) - .build()) - .build()); - } - - @Test - public void should_fail_if_neither_convention_nor_converter_specified() { - should_fail_with_expected_error( - "Invalid annotation configuration: " - + "NamingStrategy must have either a 'convention' or 'customConverterClass' argument", - "test", - CUSTOM_CONVERTER_CLASS, - entityTemplate().addAnnotation(NamingStrategy.class).build()); - } - - @Test - public void should_warn_if_multiple_conventions_specified() { - should_succeed_with_expected_warning( - "Too many naming conventions: " - + "NamingStrategy must have at most one 'convention' argument " - + "(will use the first one: CASE_INSENSITIVE)", - "test", - CUSTOM_CONVERTER_CLASS, - entityTemplate() - .addAnnotation( - AnnotationSpec.builder(NamingStrategy.class) - .addMember( - "convention", - "{ $1T.CASE_INSENSITIVE, $1T.EXACT_CASE }", - NamingConvention.class) - .build()) - .build()); - } - - @Test - public void should_warn_if_multiple_converters_specified() { - should_succeed_with_expected_warning( - "Too many custom converters: " - + "NamingStrategy must have at most one 'customConverterClass' argument " - + "(will use the first one: test.CustomConverter)", - "test", - CUSTOM_CONVERTER_CLASS, - entityTemplate() - .addAnnotation( - AnnotationSpec.builder(NamingStrategy.class) - .addMember( - "customConverterClass", - "{ $1T.class, $1T.class }", - CUSTOM_CONVERTER_CLASS_NAME) - .build()) - .build()); - } -} diff --git a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityPropertyAnnotationsTest.java b/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityPropertyAnnotationsTest.java deleted file mode 100644 index 015df9f1bef..00000000000 --- a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/entity/EntityPropertyAnnotationsTest.java +++ /dev/null @@ -1,402 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.entity; - -import com.datastax.oss.driver.api.mapper.annotations.ClusteringColumn; -import com.datastax.oss.driver.api.mapper.annotations.Computed; -import com.datastax.oss.driver.api.mapper.annotations.Entity; -import com.datastax.oss.driver.api.mapper.annotations.PartitionKey; -import com.datastax.oss.driver.api.mapper.annotations.PropertyStrategy; -import com.datastax.oss.driver.api.mapper.annotations.Transient; -import com.datastax.oss.driver.internal.mapper.processor.MapperProcessorTest; -import com.squareup.javapoet.AnnotationSpec; -import com.squareup.javapoet.ClassName; -import com.squareup.javapoet.FieldSpec; -import com.squareup.javapoet.MethodSpec; -import com.squareup.javapoet.ParameterSpec; -import com.squareup.javapoet.TypeSpec; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.UUID; -import javax.lang.model.element.Modifier; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class EntityPropertyAnnotationsTest extends MapperProcessorTest { - - @Test - @UseDataProvider("entitiesWithWarnings") - public void should_succeed_with_expected_warning(String expectedWarning, TypeSpec entitySpec) { - super.should_succeed_with_expected_warning(expectedWarning, "test", entitySpec); - } - - @DataProvider - public static Object[][] entitiesWithWarnings() { - return new Object[][] { - { - "@PartitionKey should be used either on the field or the getter, but not both. " - + "The annotation on this field will be ignored.", - TypeSpec.classBuilder(ClassName.get("test", "Product")) - .addAnnotation(Entity.class) - .addField( - FieldSpec.builder(UUID.class, "id", Modifier.PRIVATE) - .addAnnotation(PartitionKey.class) - .build()) - .addMethod( - MethodSpec.methodBuilder("getId") - .addAnnotation(PartitionKey.class) - .returns(UUID.class) - .addModifiers(Modifier.PUBLIC) - .addStatement("return id") - .build()) - .addMethod( - MethodSpec.methodBuilder("setId") - .addParameter(UUID.class, "id") - .addModifiers(Modifier.PUBLIC) - .addStatement("this.id = id") - .build()) - .build(), - }, - { - "@ClusteringColumn should be used either on the field or the getter, but not both. " - + "The annotation on this field will be ignored.", - TypeSpec.classBuilder(ClassName.get("test", "Product")) - .addAnnotation(Entity.class) - .addField( - FieldSpec.builder(UUID.class, "id", Modifier.PRIVATE) - .addAnnotation(ClusteringColumn.class) - .build()) - .addMethod( - MethodSpec.methodBuilder("getId") - .addAnnotation(ClusteringColumn.class) - .returns(UUID.class) - .addModifiers(Modifier.PUBLIC) - .addStatement("return id") - .build()) - .addMethod( - MethodSpec.methodBuilder("setId") - .addParameter(UUID.class, "id") - .addModifiers(Modifier.PUBLIC) - .addStatement("this.id = id") - .build()) - .build(), - }, - }; - } - - @Test - @UseDataProvider("entitiesWithErrors") - public void should_fail_with_expected_error(String expectedError, TypeSpec entitySpec) { - super.should_fail_with_expected_error(expectedError, "test", entitySpec); - } - - @DataProvider - public static Object[][] entitiesWithErrors() { - return new Object[][] { - { - "Properties can't be annotated with both @ClusteringColumn and @PartitionKey.", - TypeSpec.classBuilder(ClassName.get("test", "Product")) - .addAnnotation(Entity.class) - .addField( - FieldSpec.builder(UUID.class, "id", Modifier.PRIVATE) - .addAnnotation(PartitionKey.class) - .addAnnotation(ClusteringColumn.class) - .build()) - .addMethod( - MethodSpec.methodBuilder("getId") - .returns(UUID.class) - .addModifiers(Modifier.PUBLIC) - .addStatement("return id") - .build()) - .addMethod( - MethodSpec.methodBuilder("setId") - .addParameter(UUID.class, "id") - .addModifiers(Modifier.PUBLIC) - .addStatement("this.id = id") - .build()) - .build(), - }, - { - "Properties can't be annotated with both @ClusteringColumn and @PartitionKey.", - TypeSpec.classBuilder(ClassName.get("test", "Product")) - .addAnnotation(Entity.class) - .addField( - FieldSpec.builder(UUID.class, "id", Modifier.PRIVATE) - .addAnnotation(PartitionKey.class) - .build()) - .addMethod( - MethodSpec.methodBuilder("getId") - .addAnnotation(ClusteringColumn.class) - .returns(UUID.class) - .addModifiers(Modifier.PUBLIC) - .addStatement("return id") - .build()) - .addMethod( - MethodSpec.methodBuilder("setId") - .addParameter(UUID.class, "id") - .addModifiers(Modifier.PUBLIC) - .addStatement("this.id = id") - .build()) - .build(), - }, - { - "Duplicate partition key index: if multiple properties are annotated with @PartitionKey, " - + "the annotation must be parameterized with an integer indicating the position. " - + "Found duplicate index 0 for getId1 and getId2.", - TypeSpec.classBuilder(ClassName.get("test", "Product")) - .addAnnotation(Entity.class) - .addField( - FieldSpec.builder(UUID.class, "id1", Modifier.PRIVATE) - .addAnnotation(PartitionKey.class) - .build()) - .addMethod( - MethodSpec.methodBuilder("getId1") - .returns(UUID.class) - .addModifiers(Modifier.PUBLIC) - .addStatement("return id1") - .build()) - .addMethod( - MethodSpec.methodBuilder("setId1") - .addParameter(UUID.class, "id1") - .addModifiers(Modifier.PUBLIC) - .addStatement("this.id1 = id1") - .build()) - .addField( - FieldSpec.builder(UUID.class, "id2", Modifier.PRIVATE) - .addAnnotation(PartitionKey.class) - .build()) - .addMethod( - MethodSpec.methodBuilder("getId2") - .returns(UUID.class) - .addModifiers(Modifier.PUBLIC) - .addStatement("return id2") - .build()) - .addMethod( - MethodSpec.methodBuilder("setId2") - .addParameter(UUID.class, "id2") - .addModifiers(Modifier.PUBLIC) - .addStatement("this.id2 = id2") - .build()) - .build(), - }, - { - "Duplicate clustering column index: if multiple properties are annotated with @ClusteringColumn, " - + "the annotation must be parameterized with an integer indicating the position. " - + "Found duplicate index 1 for getId1 and getId2.", - TypeSpec.classBuilder(ClassName.get("test", "Product")) - .addAnnotation(Entity.class) - .addField( - FieldSpec.builder(UUID.class, "id1", Modifier.PRIVATE) - .addAnnotation( - AnnotationSpec.builder(ClusteringColumn.class) - .addMember("value", "1") - .build()) - .build()) - .addMethod( - MethodSpec.methodBuilder("getId1") - .returns(UUID.class) - .addModifiers(Modifier.PUBLIC) - .addStatement("return id1") - .build()) - .addMethod( - MethodSpec.methodBuilder("setId1") - .addParameter(UUID.class, "id1") - .addModifiers(Modifier.PUBLIC) - .addStatement("this.id1 = id1") - .build()) - .addField( - FieldSpec.builder(UUID.class, "id2", Modifier.PRIVATE) - .addAnnotation( - AnnotationSpec.builder(ClusteringColumn.class) - .addMember("value", "1") - .build()) - .build()) - .addMethod( - MethodSpec.methodBuilder("getId2") - .returns(UUID.class) - .addModifiers(Modifier.PUBLIC) - .addStatement("return id2") - .build()) - .addMethod( - MethodSpec.methodBuilder("setId2") - .addParameter(UUID.class, "id2") - .addModifiers(Modifier.PUBLIC) - .addStatement("this.id2 = id2") - .build()) - .build(), - }, - { - "Properties can't be annotated with both @ClusteringColumn and @Transient.", - TypeSpec.classBuilder(ClassName.get("test", "Product")) - .addAnnotation(Entity.class) - .addField( - FieldSpec.builder(UUID.class, "id", Modifier.PRIVATE) - .addAnnotation(ClusteringColumn.class) - .addAnnotation(Transient.class) - .build()) - .addMethod( - MethodSpec.methodBuilder("getId") - .returns(UUID.class) - .addModifiers(Modifier.PUBLIC) - .addStatement("return id") - .build()) - .addMethod( - MethodSpec.methodBuilder("setId") - .addParameter(UUID.class, "id") - .addModifiers(Modifier.PUBLIC) - .addStatement("this.id = id") - .build()) - .build(), - }, - { - "Property that is considered transient cannot be annotated with @PartitionKey.", - TypeSpec.classBuilder(ClassName.get("test", "Product")) - .addAnnotation(Entity.class) - .addField( - FieldSpec.builder(UUID.class, "id", Modifier.PRIVATE) - .addModifiers(Modifier.TRANSIENT) - .addAnnotation(PartitionKey.class) - .build()) - .addMethod( - MethodSpec.methodBuilder("getId") - .returns(UUID.class) - .addModifiers(Modifier.PUBLIC) - .addStatement("return id") - .build()) - .addMethod( - MethodSpec.methodBuilder("setId") - .addParameter(UUID.class, "id") - .addModifiers(Modifier.PUBLIC) - .addStatement("this.id = id") - .build()) - .build(), - }, - { - "@Computed value should be non-empty.", - TypeSpec.classBuilder(ClassName.get("test", "Product")) - .addAnnotation(Entity.class) - .addField( - FieldSpec.builder(UUID.class, "id", Modifier.PRIVATE) - .addAnnotation( - AnnotationSpec.builder(Computed.class).addMember("value", "$S", "").build()) - .build()) - .addMethod( - MethodSpec.methodBuilder("getId") - .returns(UUID.class) - .addModifiers(Modifier.PUBLIC) - .addStatement("return id") - .build()) - .addMethod( - MethodSpec.methodBuilder("setId") - .addParameter(UUID.class, "id") - .addModifiers(Modifier.PUBLIC) - .addStatement("this.id = id") - .build()) - .build(), - }, - { - "@Entity-annotated class must have at least one property defined.", - TypeSpec.classBuilder(ClassName.get("test", "Product")) - .addAnnotation(Entity.class) - .addField(FieldSpec.builder(UUID.class, "id", Modifier.PRIVATE).build()) - .addMethod( - MethodSpec.methodBuilder("getId") - .returns(UUID.class) - .addModifiers(Modifier.PUBLIC) - .addStatement("return id") - .build()) - .build(), - }, - { - "Mutable @Entity-annotated class must have a no-arg constructor.", - TypeSpec.classBuilder(ClassName.get("test", "Product")) - .addAnnotation(Entity.class) - .addField( - FieldSpec.builder(UUID.class, "id", Modifier.PRIVATE) - .addModifiers(Modifier.FINAL) - .addAnnotation(PartitionKey.class) - .build()) - .addMethod( - MethodSpec.constructorBuilder() - .addParameter(ParameterSpec.builder(UUID.class, "id").build()) - .addModifiers(Modifier.PUBLIC) - .addStatement("this.id = id") - .build()) - .addMethod( - MethodSpec.methodBuilder("getId") - .returns(UUID.class) - .addModifiers(Modifier.PUBLIC) - .addStatement("return id") - .build()) - .addMethod( - MethodSpec.methodBuilder("setId") - .addParameter(UUID.class, "id") - .addModifiers(Modifier.PUBLIC) - .addStatement("this.id = id") - .build()) - .build(), - }, - { - "Immutable @Entity-annotated class must have an \"all values\" constructor. " - + "Expected signature: Product(java.util.UUID id, java.lang.String name, long writetime).", - TypeSpec.classBuilder(ClassName.get("test", "Product")) - .addAnnotation(Entity.class) - .addAnnotation( - AnnotationSpec.builder(PropertyStrategy.class) - .addMember("mutable", "false") - .build()) - .addField( - FieldSpec.builder(UUID.class, "id", Modifier.PRIVATE) - .addModifiers(Modifier.FINAL) - .addAnnotation(PartitionKey.class) - .build()) - .addField( - FieldSpec.builder(String.class, "name", Modifier.PRIVATE) - .addModifiers(Modifier.FINAL) - .build()) - .addField( - FieldSpec.builder(String.class, "writetime", Modifier.PRIVATE) - .addModifiers(Modifier.FINAL) - .addAnnotation( - AnnotationSpec.builder(Computed.class).addMember("value", "$S", "").build()) - .build()) - .addMethod( - MethodSpec.methodBuilder("getId") - .returns(UUID.class) - .addModifiers(Modifier.PUBLIC) - .addStatement("return id") - .build()) - .addMethod( - MethodSpec.methodBuilder("getName") - .returns(String.class) - .addModifiers(Modifier.PUBLIC) - .addStatement("return name") - .build()) - .addMethod( - MethodSpec.methodBuilder("getWritetime") - .returns(Long.TYPE) - .addModifiers(Modifier.PUBLIC) - .addStatement("return writetime") - .build()) - .build(), - }, - }; - } -} diff --git a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/mapper/MapperAnnotationTest.java b/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/mapper/MapperAnnotationTest.java deleted file mode 100644 index 8618827df54..00000000000 --- a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/mapper/MapperAnnotationTest.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.mapper; - -import static com.google.testing.compile.CompilationSubject.assertThat; - -import com.datastax.oss.driver.api.mapper.annotations.Mapper; -import com.datastax.oss.driver.internal.mapper.processor.MapperProcessorTest; -import com.google.testing.compile.Compilation; -import com.squareup.javapoet.ClassName; -import com.squareup.javapoet.TypeSpec; -import javax.lang.model.element.Modifier; -import javax.tools.StandardLocation; -import org.junit.Test; - -/** Checks that the mapper correctly processes nested annotated types. */ -public class MapperAnnotationTest extends MapperProcessorTest { - - @Test - public void should_work_on_nested_interface() { - Compilation compilation = - compileWithMapperProcessor( - "test", - TypeSpec.classBuilder(ClassName.get("test", "Foo")) - .addModifiers(Modifier.PUBLIC) - .addType( - TypeSpec.interfaceBuilder("Bar") - .addModifiers(Modifier.PUBLIC, Modifier.STATIC) - .addAnnotation(Mapper.class) - .build()) - .build()); - - assertThat(compilation).succeededWithoutWarnings(); - assertThat(compilation) - .generatedFile(StandardLocation.SOURCE_OUTPUT, "test", "Foo_BarImpl__MapperGenerated.java") - .contentsAsUtf8String() - .contains("class Foo_BarImpl__MapperGenerated implements Foo.Bar"); - assertThat(compilation) - .generatedFile(StandardLocation.SOURCE_OUTPUT, "test", "Foo_BarBuilder.java"); - } - - @Test - public void should_fail_on_class() { - should_fail_with_expected_error( - "Only INTERFACE elements can be annotated with Mapper", - "test", - TypeSpec.classBuilder(ClassName.get("test", "Foo")) - .addModifiers(Modifier.PUBLIC) - .addAnnotation(Mapper.class) - .build()); - } -} diff --git a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/mapper/MapperDaoFactoryMethodGeneratorTest.java b/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/mapper/MapperDaoFactoryMethodGeneratorTest.java deleted file mode 100644 index be1b5d384f8..00000000000 --- a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/mapper/MapperDaoFactoryMethodGeneratorTest.java +++ /dev/null @@ -1,199 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.mapper; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; -import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; -import com.datastax.oss.driver.api.mapper.annotations.DaoTable; -import com.squareup.javapoet.ClassName; -import com.squareup.javapoet.MethodSpec; -import com.squareup.javapoet.ParameterSpec; -import com.squareup.javapoet.ParameterizedTypeName; -import com.squareup.javapoet.TypeName; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import javax.lang.model.element.Modifier; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class MapperDaoFactoryMethodGeneratorTest extends MapperMethodGeneratorTest { - - @Test - @Override - @UseDataProvider("invalidSignatures") - public void should_fail_with_expected_error(String expectedError, MethodSpec method) { - super.should_fail_with_expected_error(expectedError, method); - } - - @DataProvider - public static Object[][] invalidSignatures() { - return new Object[][] { - { - "Invalid return type: DaoFactory methods must return a Dao-annotated interface, or future thereof", - MethodSpec.methodBuilder("productDao") - .addAnnotation(DaoFactory.class) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .returns(TypeName.INT) - .build(), - }, - { - "Invalid parameter annotations: DaoFactory method parameters must be annotated with @DaoKeyspace, @DaoTable or @DaoProfile", - MethodSpec.methodBuilder("productDao") - .addAnnotation(DaoFactory.class) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .returns(DAO_CLASS_NAME) - .addParameter(String.class, "table") - .build(), - }, - { - "Invalid parameter annotations: only one DaoFactory method parameter can be annotated with @DaoTable", - MethodSpec.methodBuilder("productDao") - .addAnnotation(DaoFactory.class) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .returns(DAO_CLASS_NAME) - .addParameter( - ParameterSpec.builder(String.class, "table1").addAnnotation(DaoTable.class).build()) - .addParameter( - ParameterSpec.builder(String.class, "table2").addAnnotation(DaoTable.class).build()) - .build(), - }, - { - "Invalid parameter annotations: only one DaoFactory method parameter can be annotated with @DaoKeyspace", - MethodSpec.methodBuilder("productDao") - .addAnnotation(DaoFactory.class) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .returns(DAO_CLASS_NAME) - .addParameter( - ParameterSpec.builder(String.class, "keyspace1") - .addAnnotation(DaoKeyspace.class) - .build()) - .addParameter( - ParameterSpec.builder(String.class, "table").addAnnotation(DaoTable.class).build()) - .addParameter( - ParameterSpec.builder(String.class, "keyspace2") - .addAnnotation(DaoKeyspace.class) - .build()) - .build(), - }, - { - "Invalid parameter type: @DaoTable-annotated parameter of DaoFactory methods must be of type String or CqlIdentifier", - MethodSpec.methodBuilder("productDao") - .addAnnotation(DaoFactory.class) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .returns(DAO_CLASS_NAME) - .addParameter( - ParameterSpec.builder(Integer.class, "table").addAnnotation(DaoTable.class).build()) - .build(), - }, - { - "Invalid parameter type: @DaoKeyspace-annotated parameter of DaoFactory methods must be of type String or CqlIdentifier", - MethodSpec.methodBuilder("productDao") - .addAnnotation(DaoFactory.class) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .returns(DAO_CLASS_NAME) - .addParameter( - ParameterSpec.builder(Integer.class, "keyspace") - .addAnnotation(DaoKeyspace.class) - .build()) - .build(), - }, - }; - } - - @Test - @Override - @UseDataProvider("validSignatures") - public void should_succeed_without_warnings(MethodSpec method) { - super.should_succeed_without_warnings(method); - } - - @DataProvider - public static Object[][] validSignatures() { - return new Object[][] { - { - MethodSpec.methodBuilder("productDao") - .addAnnotation(DaoFactory.class) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .returns(DAO_CLASS_NAME) - .build() - }, - { - MethodSpec.methodBuilder("productDao") - .addAnnotation(DaoFactory.class) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .returns( - ParameterizedTypeName.get(ClassName.get(CompletionStage.class), DAO_CLASS_NAME)) - .build() - }, - { - MethodSpec.methodBuilder("productDao") - .addAnnotation(DaoFactory.class) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .returns( - ParameterizedTypeName.get(ClassName.get(CompletableFuture.class), DAO_CLASS_NAME)) - .build() - }, - { - MethodSpec.methodBuilder("productDao") - .addAnnotation(DaoFactory.class) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .returns(DAO_CLASS_NAME) - .addParameter( - ParameterSpec.builder(String.class, "keyspace") - .addAnnotation(DaoKeyspace.class) - .build()) - .addParameter( - ParameterSpec.builder(String.class, "table").addAnnotation(DaoTable.class).build()) - .build() - }, - { - MethodSpec.methodBuilder("productDao") - .addAnnotation(DaoFactory.class) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .returns(DAO_CLASS_NAME) - .addParameter( - ParameterSpec.builder(CqlIdentifier.class, "keyspace") - .addAnnotation(DaoKeyspace.class) - .build()) - .addParameter( - ParameterSpec.builder(CqlIdentifier.class, "table") - .addAnnotation(DaoTable.class) - .build()) - .build() - }, - { - MethodSpec.methodBuilder("productDao") - .addAnnotation(DaoFactory.class) - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .returns(DAO_CLASS_NAME) - .addParameter( - ParameterSpec.builder(String.class, "table").addAnnotation(DaoTable.class).build()) - .addParameter( - ParameterSpec.builder(CqlIdentifier.class, "keyspace") - .addAnnotation(DaoKeyspace.class) - .build()) - .build() - }, - }; - } -} diff --git a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/mapper/MapperImplementationGeneratorTest.java b/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/mapper/MapperImplementationGeneratorTest.java deleted file mode 100644 index eda57fbd147..00000000000 --- a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/mapper/MapperImplementationGeneratorTest.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.mapper; - -import com.squareup.javapoet.MethodSpec; -import javax.lang.model.element.Modifier; -import org.junit.Test; - -public class MapperImplementationGeneratorTest extends MapperMethodGeneratorTest { - - @Test - public void should_fail_if_method_is_not_annotated() { - should_fail_with_expected_error( - "Unrecognized method signature: no implementation will be generated", - MethodSpec.methodBuilder("productDao") - .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) - .returns(DAO_CLASS_NAME) - .build()); - } - - @Test - public void should_ignore_static_methods() { - should_succeed_without_warnings( - MethodSpec.methodBuilder("doNothing") - .addModifiers(Modifier.PUBLIC, Modifier.STATIC) - .build()); - } - - @Test - public void should_ignore_default_methods() { - should_succeed_without_warnings( - MethodSpec.methodBuilder("doNothing") - .addModifiers(Modifier.PUBLIC, Modifier.DEFAULT) - .build()); - } -} diff --git a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/mapper/MapperMethodGeneratorTest.java b/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/mapper/MapperMethodGeneratorTest.java deleted file mode 100644 index 5557f865bae..00000000000 --- a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/mapper/MapperMethodGeneratorTest.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.mapper; - -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.Mapper; -import com.datastax.oss.driver.internal.mapper.processor.MapperProcessorTest; -import com.squareup.javapoet.ClassName; -import com.squareup.javapoet.MethodSpec; -import com.squareup.javapoet.TypeSpec; -import javax.lang.model.element.Modifier; - -public class MapperMethodGeneratorTest extends MapperProcessorTest { - - // Dummy DAO interface that is reused across tests - protected static final ClassName DAO_CLASS_NAME = ClassName.get("test", "ProductDao"); - protected static final TypeSpec DAO_SPEC = - TypeSpec.interfaceBuilder(DAO_CLASS_NAME) - .addModifiers(Modifier.PUBLIC) - .addAnnotation(Dao.class) - .build(); - - protected void should_fail_with_expected_error(String expectedError, MethodSpec method) { - should_fail_with_expected_error( - expectedError, - "test", - DAO_SPEC, - TypeSpec.interfaceBuilder(ClassName.get("test", "InventoryMapper")) - .addModifiers(Modifier.PUBLIC) - .addAnnotation(Mapper.class) - .addMethod(method) - .build()); - } - - protected void should_succeed_without_warnings(MethodSpec method) { - should_succeed_without_warnings( - "test", - DAO_SPEC, - TypeSpec.interfaceBuilder(ClassName.get("test", "InventoryMapper")) - .addModifiers(Modifier.PUBLIC) - .addAnnotation(Mapper.class) - .addMethod(method) - .build()); - } -} diff --git a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/util/CapitalizerTest.java b/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/util/CapitalizerTest.java deleted file mode 100644 index 7f2055eba5c..00000000000 --- a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/util/CapitalizerTest.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.util; - -import static com.datastax.oss.driver.Assertions.assertThat; - -import org.junit.Test; - -public class CapitalizerTest { - - @Test - public void should_decapitalize_regular_strings() { - assertThat(Capitalizer.decapitalize("foo")).isEqualTo("foo"); - assertThat(Capitalizer.decapitalize("Foo")).isEqualTo("foo"); - assertThat(Capitalizer.decapitalize("FooBar")).isEqualTo("fooBar"); - } - - @Test - public void should_not_decapitalize_when_second_char_is_uppercase() { - assertThat(Capitalizer.decapitalize("ID")).isEqualTo("ID"); - assertThat(Capitalizer.decapitalize("XML")).isEqualTo("XML"); - assertThat(Capitalizer.decapitalize("XMLRequest")).isEqualTo("XMLRequest"); - } - - @Test - public void should_capitalize_regular_strings() { - assertThat(Capitalizer.capitalize("foo")).isEqualTo("Foo"); - assertThat(Capitalizer.capitalize("fooBar")).isEqualTo("FooBar"); - } - - @Test - public void should_not_capitalize_when_second_char_is_uppercase() { - assertThat(Capitalizer.capitalize("cId")).isEqualTo("cId"); - } - - @Test - public void should_infer_field_name_and_setter_from_getter() { - // This is the sequence in which the processor uses those methods - String getterName = "getcId"; - String fieldName = Capitalizer.decapitalize(getterName.substring(3)); - String setterName = "set" + Capitalizer.capitalize(fieldName); - assertThat(fieldName).isEqualTo("cId"); - assertThat(setterName).isEqualTo("setcId"); - } -} diff --git a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/util/HierarchyScannerTest.java b/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/util/HierarchyScannerTest.java deleted file mode 100644 index e5dd5c1b527..00000000000 --- a/mapper-processor/src/test/java/com/datastax/oss/driver/internal/mapper/processor/util/HierarchyScannerTest.java +++ /dev/null @@ -1,354 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.processor.util; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.mapper.annotations.HierarchyScanStrategy; -import com.datastax.oss.driver.internal.mapper.processor.ProcessorContext; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import com.datastax.oss.driver.shaded.guava.common.collect.Maps; -import java.util.Arrays; -import java.util.List; -import java.util.Map; -import javax.lang.model.element.AnnotationMirror; -import javax.lang.model.element.AnnotationValue; -import javax.lang.model.element.Element; -import javax.lang.model.element.ExecutableElement; -import javax.lang.model.element.Name; -import javax.lang.model.element.TypeElement; -import javax.lang.model.type.DeclaredType; -import javax.lang.model.type.TypeKind; -import javax.lang.model.type.TypeMirror; -import javax.lang.model.util.Elements; -import javax.lang.model.util.Types; -import org.junit.Test; -import org.mockito.Mockito; - -public class HierarchyScannerTest { - - private final ProcessorContext context; - - public HierarchyScannerTest() { - this.context = Mockito.mock(ProcessorContext.class); - Types types = Mockito.mock(Types.class); - Classes classUtils = Mockito.mock(Classes.class); - - // used for resolving TypeMirror for default HierarchyScanStrategy highestAncestor - // (Object.class), in this case just return a mocked TypeElement. - Elements elements = Mockito.mock(Elements.class); - Mockito.when(elements.getTypeElement(Mockito.anyString())) - .thenReturn(Mockito.mock(TypeElement.class)); - - Mockito.when(context.getTypeUtils()).thenReturn(types); - Mockito.when(context.getClassUtils()).thenReturn(classUtils); - Mockito.when(context.getElementUtils()).thenReturn(elements); - - Mockito.when(classUtils.isSame(Mockito.any(Element.class), Mockito.any(Class.class))) - .thenReturn(false); - Mockito.when(types.isSameType(Mockito.any(TypeMirror.class), Mockito.any(TypeMirror.class))) - .thenAnswer(invocation -> invocation.getArgument(0) == invocation.getArgument(1)); - } - - @Test - public void should_build_proper_hierarchy_with_default_strategy() { - /* - * given the following hierarchy - * - * a - * / - * z - * / - * a y - * / / - * b x w - * \ / / - * c - */ - MockInterface a = i("a"); - MockInterface z = i("z", a); - MockInterface y = i("y", z); - MockInterface x = i("x", y); - MockInterface w = i("w"); - MockClass b = c("b", null, a); - MockClass c = c("c", b, x, w); - - // when no HierarchyScanStrategy is defined, then the default behavior should be used - // which is to scan the entire tree and return it in the correct (bottom up) order. - assertThat(HierarchyScanner.resolveTypeHierarchy(c.classElement, context).toString()) - .isEqualTo("[c, b, x, w, a, y, z]"); - } - - @Test - public void should_not_scan_hierarchy_if_scanAncestors_is_false() { - /* - * given the following hierarchy - * - * a - * / - * z - * / - * a @y - * / / - * b x w - * \ / / - * c - */ - // with a HierarchyScanStrategy annotation indicates to not scan ancestors - // then only traverse the base element. - // while odd, the strategy is defined on a parent interface, but for practical reasons - // this seems reasonable. If the intent is not to allow annotations beyond y, it - // could be specified that way. - HierarchyScanStrategy strategy = Mockito.mock(HierarchyScanStrategy.class); - Mockito.when(strategy.scanAncestors()).thenReturn(false); - MockInterface a = i("a"); - MockInterface z = i("z", a); - MockInterface y = i("y", strategy, null, z); - MockInterface x = i("x", y); - MockInterface w = i("w"); - MockClass b = c("b", null, a); - MockClass c = c("c", b, x, w); - - // when no HierarchyScanStrategy is defined, then the default behavior should be used - // which is to scan the entire tree and return it in the correct (bottom up) order. - assertThat(HierarchyScanner.resolveTypeHierarchy(c.classElement, context).toString()) - .isEqualTo("[c]"); - } - - @Test - @SuppressWarnings("unchecked") - public void should_build_property_hierarchy_with_strategy_defined_on_class() { - /* - * given the following hierarchy - * - * a - * / - * a r z - * / / / - * b y - * \ / - * @d x w - * \ / / - * c - */ - // with a HierarchyScanStrategy annotation on "d" that dictates that "b" is the highest class, - // but to not include it. - HierarchyScanStrategy bHighest = Mockito.mock(HierarchyScanStrategy.class); - // return this class' name just so we have something to check against. - Mockito.when(bHighest.highestAncestor()).thenReturn((Class) this.getClass()); - Mockito.when(bHighest.includeHighestAncestor()).thenReturn(false); - Mockito.when(bHighest.scanAncestors()).thenReturn(true); - MockInterface a = i("a"); - MockInterface r = i("r"); - MockInterface z = i("z", a); - MockInterface y = i("y", z); - MockInterface x = i("x", y); - MockInterface w = i("w"); - - MockClass b = c("b", null, a, r); - // when checking to see if we're at the 'highest' class (this.getClass()) return true - // for b. - Mockito.when(context.getClassUtils().isSame(b.classElement, this.getClass())).thenReturn(true); - MockClass d = c("d", bHighest, b, b); - MockClass c = c("c", d, x, w); - - // b and its interfaces should be skipped, however a will also be encountered - // as it is still in the hierarchy of c -> x -> y -> z -> a - assertThat(HierarchyScanner.resolveTypeHierarchy(c.classElement, context).toString()) - .isEqualTo("[c, d, x, w, y, z, a]"); - } - - private MockClass c(String name, MockClass parent, MockInterface... interfaces) { - return new MockClass(name, null, null, parent, interfaces); - } - - @Test - @SuppressWarnings("unchecked") - public void should_build_property_hierarchy_with_strategy_defined_on_interface_include_highest() { - /* - * given the following hierarchy - * - * a - * / - * a r z - * / / / - * b y - * \ / - * d @x w - * \ / / - * c - */ - // with a HierarchyScanStrategy annotation on "x" that dictates that "y" is the highest class, - // but to include it. - HierarchyScanStrategy yHighest = Mockito.mock(HierarchyScanStrategy.class); - Mockito.when(yHighest.includeHighestAncestor()).thenReturn(true); - Mockito.when(yHighest.scanAncestors()).thenReturn(true); - MockInterface a = i("a"); - MockInterface r = i("r"); - MockInterface z = i("z", a); - MockInterface y = i("y", z); - MockInterface x = i("x", yHighest, y, y); - // when checking to see if we're at the 'highest' class (this.getClass()) return true - // for y. - Mockito.when(context.getClassUtils().isSame(y.classElement, this.getClass())).thenReturn(true); - MockInterface w = i("w"); - - MockClass b = c("b", null, a, r); - MockClass d = c("d", b); - MockClass c = c("c", d, x, w); - - // y's parent interface z should be skipped, however a will also be encountered - // as it is still in the hierarchy of c -> d -> b -> a - assertThat(HierarchyScanner.resolveTypeHierarchy(c.classElement, context).toString()) - .isEqualTo("[c, d, x, w, b, y, a, r]"); - } - - private MockClass c( - String name, - HierarchyScanStrategy strategy, - MockElement highestAncestor, - MockClass parent, - MockInterface... interfaces) { - return new MockClass(name, strategy, highestAncestor, parent, interfaces); - } - - private MockInterface i(String name, MockInterface... interfaces) { - return new MockInterface(name, null, null, interfaces); - } - - private MockInterface i( - String name, - HierarchyScanStrategy strategy, - MockElement highestAncestor, - MockInterface... interfaces) { - return new MockInterface(name, strategy, highestAncestor, interfaces); - } - - private TypeMirror root() { - TypeMirror noneMirror = Mockito.mock(TypeMirror.class); - Mockito.when(noneMirror.getKind()).thenReturn(TypeKind.NONE); - - return noneMirror; - } - - class MockElement { - final String name; - final TypeMirror mirror; - final TypeElement classElement; - final List interfaces; - final MockClass parent; - final Name qfName; - - @SuppressWarnings("unchecked") - MockElement( - String name, - HierarchyScanStrategy strategy, - MockElement highestAncestor, - MockClass parent, - MockInterface... interfaces) { - this.name = name; - this.parent = parent; - this.interfaces = Arrays.asList(interfaces); - - this.classElement = Mockito.mock(TypeElement.class); - Mockito.when(classElement.toString()).thenReturn(name); - - TypeMirror parentMirror = parent != null ? parent.mirror : root(); - Mockito.when(classElement.getSuperclass()).thenReturn(parentMirror); - - List interfaceList = Lists.newArrayList(); - for (MockElement i : interfaces) { - interfaceList.add(i.mirror); - } - Mockito.when(classElement.getInterfaces()).thenReturn(interfaceList); - - this.mirror = Mockito.mock(TypeMirror.class); - Mockito.when(mirror.getKind()).thenReturn(TypeKind.DECLARED); - Mockito.when(mirror.toString()).thenReturn(name); - Mockito.when(classElement.asType()).thenReturn(mirror); - Mockito.when(context.getTypeUtils().asElement(mirror)).thenReturn(classElement); - - this.qfName = Mockito.mock(Name.class); - Mockito.when(qfName.toString()).thenReturn(name); - Mockito.when(classElement.getQualifiedName()).thenReturn(qfName); - - // mock annotation mirror logic. - if (strategy != null) { - Mockito.when(classElement.getAnnotation(HierarchyScanStrategy.class)).thenReturn(strategy); - - AnnotationMirror annotationMirror = Mockito.mock(AnnotationMirror.class); - DeclaredType annotationType = Mockito.mock(DeclaredType.class); - Mockito.when(annotationMirror.getAnnotationType()).thenReturn(annotationType); - List annotationMirrors = Lists.newArrayList(annotationMirror); - Mockito.when(classElement.getAnnotationMirrors()).thenReturn(annotationMirrors); - Mockito.when(context.getClassUtils().isSame(annotationType, HierarchyScanStrategy.class)) - .thenReturn(true); - - Map annotationElementValues = Maps.newHashMap(); - annotationElementValues.put( - mockAnnotationElement("scanAncestors"), mockAnnotationValue(strategy.scanAncestors())); - annotationElementValues.put( - mockAnnotationElement("includeHighestAncestor"), - mockAnnotationValue(strategy.includeHighestAncestor())); - if (highestAncestor != null) { - annotationElementValues.put( - mockAnnotationElement("highestAncestor"), - mockAnnotationValue(highestAncestor.mirror)); - } - Mockito.when(annotationMirror.getElementValues()).thenReturn(annotationElementValues); - } - } - - private ExecutableElement mockAnnotationElement(String key) { - ExecutableElement element = Mockito.mock(ExecutableElement.class); - Name name = Mockito.mock(Name.class); - Mockito.when(name.contentEquals(key)).thenReturn(true); - Mockito.when(element.getSimpleName()).thenReturn(name); - return element; - } - - private AnnotationValue mockAnnotationValue(Object value) { - AnnotationValue aValue = Mockito.mock(AnnotationValue.class); - Mockito.when(aValue.getValue()).thenReturn(value); - return aValue; - } - } - - class MockClass extends MockElement { - - MockClass( - String name, - HierarchyScanStrategy strategy, - MockElement highestAncestor, - MockClass parent, - MockInterface... interfaces) { - super(name, strategy, highestAncestor, parent, interfaces); - } - } - - class MockInterface extends MockElement { - - MockInterface( - String name, - HierarchyScanStrategy strategy, - MockElement highestAncestor, - MockInterface... interfaces) { - super(name, strategy, highestAncestor, null, interfaces); - } - } -} diff --git a/mapper-processor/src/test/resources/logback-test.xml b/mapper-processor/src/test/resources/logback-test.xml deleted file mode 100644 index 6c74716c03a..00000000000 --- a/mapper-processor/src/test/resources/logback-test.xml +++ /dev/null @@ -1,31 +0,0 @@ - - - - - - %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n - - - - - - - diff --git a/mapper-processor/src/test/resources/project.properties b/mapper-processor/src/test/resources/project.properties deleted file mode 100644 index 66eab90b6e4..00000000000 --- a/mapper-processor/src/test/resources/project.properties +++ /dev/null @@ -1,19 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -project.basedir=${basedir} \ No newline at end of file diff --git a/mapper-runtime/pom.xml b/mapper-runtime/pom.xml deleted file mode 100644 index 57fbd5d3432..00000000000 --- a/mapper-runtime/pom.xml +++ /dev/null @@ -1,227 +0,0 @@ - - - - 4.0.0 - - org.apache.cassandra - java-driver-parent - 4.19.3-SNAPSHOT - - java-driver-mapper-runtime - bundle - Apache Cassandra Java Driver - object mapper runtime - - - - ${project.groupId} - java-driver-bom - ${project.version} - pom - import - - - - - - ${project.groupId} - java-driver-core - - - ${project.groupId} - java-driver-query-builder - - - com.github.stephenc.jcip - jcip-annotations - provided - - - com.github.spotbugs - spotbugs-annotations - provided - - - junit - junit - test - - - org.testng - testng - test - - - org.reactivestreams - reactive-streams-tck - test - - - io.reactivex.rxjava2 - rxjava - test - - - org.mockito - mockito-core - test - - - org.assertj - assertj-core - test - - - org.apache.cassandra - java-driver-core - test - test-jar - - - - - - src/main/resources - - - ${project.basedir}/.. - - LICENSE - NOTICE_binary.txt - NOTICE.txt - - META-INF - - - - - src/test/resources - - project.properties - - true - - - src/test/resources - - project.properties - - false - - - - - maven-jar-plugin - - - - com.datastax.oss.driver.mapper.runtime - - - - - - maven-surefire-plugin - - ${testing.jvm}/bin/java - 1 - - - - junit - false - - - suitename - Reactive Streams TCK - - - - - - org.apache.maven.surefire - surefire-junit47 - ${surefire.version} - - - org.apache.maven.surefire - surefire-testng - ${surefire.version} - - - - - org.apache.felix - maven-bundle-plugin - true - - - - bundle - - - - com.datastax.oss.driver.mapper-runtime - - * - - !net.jcip.annotations.*, !edu.umd.cs.findbugs.annotations.*, - org.reactivestreams.*;resolution:=optional, * - - com.datastax.*.driver.*.mapper.* - - - - - - - maven-dependency-plugin - - - generate-dependency-list - - list - - generate-resources - - runtime - true - com.datastax.cassandra,com.datastax.dse - ${project.build.outputDirectory}/com/datastax/dse/driver/internal/mapper/deps.txt - - - - - - - diff --git a/mapper-runtime/revapi.json b/mapper-runtime/revapi.json deleted file mode 100644 index 5e6fca667bd..00000000000 --- a/mapper-runtime/revapi.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "revapi": { - "java": { - "filter": { - "packages": { - "regex": true, - "exclude": [ - "com\\.datastax\\.oss\\.protocol\\.internal(\\..+)?", - "com\\.datastax\\.(oss|dse)\\.driver\\.internal(\\..+)?", - "com\\.datastax\\.oss\\.driver\\.shaded(\\..+)?", - "com\\.datastax\\.oss\\.simulacron(\\..+)?", - "// Don't re-check sibling modules that this module depends on", - "com\\.datastax\\.(oss|dse)\\.driver\\.api\\.core(\\..+)?", - "com\\.datastax\\.(oss|dse)\\.driver\\.api\\.querybuilder(\\..+)?" - ] - } - } - }, - "ignore": [ - ] - } -} diff --git a/mapper-runtime/src/main/java/com/datastax/dse/driver/api/mapper/reactive/MappedReactiveResultSet.java b/mapper-runtime/src/main/java/com/datastax/dse/driver/api/mapper/reactive/MappedReactiveResultSet.java deleted file mode 100644 index b4e6960ed66..00000000000 --- a/mapper-runtime/src/main/java/com/datastax/dse/driver/api/mapper/reactive/MappedReactiveResultSet.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.mapper.reactive; - -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveQueryMetadata; -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveResultSet; -import org.reactivestreams.Publisher; - -/** - * A {@link Publisher} of mapped entities returned by DAO methods. In other words, this interface is - * the equivalent of {@link ReactiveResultSet} for mapped entities. - * - *

    By default, all implementations returned by the driver are cold, unicast, single-subscriber - * only publishers. In other words, they do not support multiple subscriptions; consider - * caching the results produced by such publishers if you need to consume them by more than one - * downstream subscriber. - * - *

    Also, note that mapped reactive result sets may emit items to their subscribers on an internal - * driver IO thread. Subscriber implementors are encouraged to abide by Reactive Streams - * Specification rule 2.2 and avoid performing heavy computations or blocking calls inside - * {@link org.reactivestreams.Subscriber#onNext(Object) onNext} calls, as doing so could slow down - * the driver and impact performance. Instead, they should asynchronously dispatch received signals - * to their processing logic. - * - *

    This type is located in a {@code dse} package for historical reasons; reactive result sets - * work with both Cassandra and DSE. - * - * @see ReactiveResultSet - */ -public interface MappedReactiveResultSet - extends Publisher, ReactiveQueryMetadata {} diff --git a/mapper-runtime/src/main/java/com/datastax/dse/driver/internal/mapper/reactive/DefaultMappedReactiveResultSet.java b/mapper-runtime/src/main/java/com/datastax/dse/driver/internal/mapper/reactive/DefaultMappedReactiveResultSet.java deleted file mode 100644 index e1e701faddd..00000000000 --- a/mapper-runtime/src/main/java/com/datastax/dse/driver/internal/mapper/reactive/DefaultMappedReactiveResultSet.java +++ /dev/null @@ -1,247 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.mapper.reactive; - -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveResultSet; -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveRow; -import com.datastax.dse.driver.api.mapper.reactive.MappedReactiveResultSet; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Objects; -import java.util.function.Function; -import org.reactivestreams.Publisher; -import org.reactivestreams.Subscriber; -import org.reactivestreams.Subscription; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class DefaultMappedReactiveResultSet implements MappedReactiveResultSet { - - private static final Logger LOG = LoggerFactory.getLogger(DefaultMappedReactiveResultSet.class); - - private static final Subscription EMPTY_SUBSCRIPTION = - new Subscription() { - @Override - public void request(long n) {} - - @Override - public void cancel() {} - }; - - @NonNull private final ReactiveResultSet source; - - @NonNull private final Function mapper; - - public DefaultMappedReactiveResultSet( - @NonNull ReactiveResultSet source, @NonNull Function mapper) { - this.source = source; - this.mapper = mapper; - } - - @Override - @NonNull - public Publisher getColumnDefinitions() { - return source.getColumnDefinitions(); - } - - @Override - @NonNull - public Publisher getExecutionInfos() { - return source.getExecutionInfos(); - } - - @Override - @NonNull - public Publisher wasApplied() { - return source.wasApplied(); - } - - @Override - public void subscribe(@NonNull Subscriber subscriber) { - // As per rule 1.9, we need to throw an NPE if subscriber is null - Objects.requireNonNull(subscriber, "Subscriber cannot be null"); - // As per rule 1.11, this publisher supports multiple subscribers in a unicast configuration, - // as long as the source publisher does too. - MappedReactiveResultSetSubscriber s = new MappedReactiveResultSetSubscriber(subscriber); - try { - source.subscribe(s); - } catch (Throwable t) { - // As per rule 1.9: subscribe MUST return normally. The only legal way to signal failure (or - // reject the Subscriber) is by calling onError (after calling onSubscribe). - s.cancel(); - IllegalStateException error = - new IllegalStateException( - "Publisher violated $1.9 by throwing an exception from subscribe.", t); - LOG.error(error.getMessage(), error.getCause()); - // This may violate 1.9 since we cannot know if subscriber.onSubscribe was called or not. - subscriber.onSubscribe(EMPTY_SUBSCRIPTION); - subscriber.onError(error); - } - // As per 1.9, this method must return normally (i.e. not throw) - } - - private class MappedReactiveResultSetSubscriber implements Subscriber, Subscription { - - private volatile Subscriber downstreamSubscriber; - private volatile Subscription upstreamSubscription; - private volatile boolean terminated; - - MappedReactiveResultSetSubscriber(@NonNull Subscriber subscriber) { - this.downstreamSubscriber = subscriber; - } - - @Override - public void onSubscribe(@NonNull Subscription subscription) { - // As per rule 2.13, we need to throw NPE if the subscription is null - Objects.requireNonNull(subscription, "Subscription cannot be null"); - // As per rule 2.12, Subscriber.onSubscribe MUST be called at most once for a given subscriber - if (upstreamSubscription != null) { - try { - // Cancel the additional subscription - subscription.cancel(); - } catch (Throwable t) { - // As per rule 3.15, Subscription.cancel is not allowed to throw an exception; the only - // thing we can do is log. - LOG.error("Subscription violated $3.15 by throwing an exception from cancel.", t); - } - } else if (!terminated) { - upstreamSubscription = subscription; - try { - downstreamSubscriber.onSubscribe(this); - } catch (Throwable t) { - // As per rule 2.13: In the case that this rule is violated, - // any associated Subscription to the Subscriber MUST be considered as - // cancelled... - cancel(); - // ...and the caller MUST raise this error condition in a fashion that is "adequate for - // the runtime environment" (we choose to log). - LOG.error("Subscriber violated $2.13 by throwing an exception from onSubscribe.", t); - } - } - } - - @Override - public void onNext(@NonNull ReactiveRow row) { - LOG.trace("Received onNext: {}", row); - if (upstreamSubscription == null) { - LOG.error("Publisher violated $1.09 by signalling onNext prior to onSubscribe."); - } else if (!terminated) { - Objects.requireNonNull(row, "Publisher violated $2.13 by emitting a null element"); - EntityT entity; - try { - entity = mapper.apply(row); - } catch (Throwable t) { - onError(t); - return; - } - Objects.requireNonNull(entity, "Publisher violated $2.13 by generating a null entity"); - try { - downstreamSubscriber.onNext(entity); - } catch (Throwable t) { - LOG.error("Subscriber violated $2.13 by throwing an exception from onNext.", t); - cancel(); - } - } - } - - @Override - public void onComplete() { - LOG.trace("Received onComplete"); - if (upstreamSubscription == null) { - LOG.error("Publisher violated $1.09 by signalling onComplete prior to onSubscribe."); - } else if (!terminated) { - try { - downstreamSubscriber.onComplete(); - } catch (Throwable t) { - LOG.error("Subscriber violated $2.13 by throwing an exception from onComplete.", t); - } - // We need to consider this Subscription as cancelled as per rule 1.6 - cancel(); - } - } - - @Override - public void onError(@NonNull Throwable error) { - LOG.trace("Received onError", error); - if (upstreamSubscription == null) { - LOG.error("Publisher violated $1.09 by signalling onError prior to onSubscribe."); - } else if (!terminated) { - Objects.requireNonNull(error, "Publisher violated $2.13 by signalling a null error"); - try { - downstreamSubscriber.onError(error); - } catch (Throwable t) { - t.addSuppressed(error); - LOG.error("Subscriber violated $2.13 by throwing an exception from onError.", t); - } - // We need to consider this Subscription as cancelled as per rule 1.6 - cancel(); - } - } - - @Override - public void request(long n) { - LOG.trace("Received request: {}", n); - // As per 3.6: after the Subscription is cancelled, additional calls to request() MUST be - // NOPs. - // Implementation note: triggering onError() from below may break 1.3 because this method is - // called by the subscriber thread, and it can race with the producer thread. But these - // situations are already abnormal, so there is no point in trying to prevent the race - // condition with locks. - if (!terminated) { - if (n <= 0) { - // Validate request as per rule 3.9: While the subscription is not cancelled, - // Subscription.request(long n) MUST signal onError with a - // java.lang.IllegalArgumentException if the argument is <= 0. - // The cause message SHOULD explain that non-positive request signals are illegal. - onError( - new IllegalArgumentException( - "Subscriber violated $3.9 by requesting a non-positive number of elements.")); - } else { - try { - upstreamSubscription.request(n); - } catch (Throwable t) { - // As per rule 3.16, Subscription.request is not allowed to throw - IllegalStateException error = - new IllegalStateException( - "Subscription violated $3.16 by throwing an exception from request.", t); - onError(error); - } - } - } - } - - @Override - public void cancel() { - // As per 3.5: Subscription.cancel() MUST respect the responsiveness of its caller by - // returning in a timely manner, MUST be idempotent and MUST be thread-safe. - if (!terminated) { - terminated = true; - LOG.trace("Cancelling"); - // propagate cancellation, if we got a chance to subscribe to the upstream source - if (upstreamSubscription != null) { - upstreamSubscription.cancel(); - } - // As per 3.13, Subscription.cancel() MUST request the Publisher to - // eventually drop any references to the corresponding subscriber. - downstreamSubscriber = null; - upstreamSubscription = null; - } - } - } -} diff --git a/mapper-runtime/src/main/java/com/datastax/dse/driver/internal/mapper/reactive/FailedMappedReactiveResultSet.java b/mapper-runtime/src/main/java/com/datastax/dse/driver/internal/mapper/reactive/FailedMappedReactiveResultSet.java deleted file mode 100644 index 3ed27edbf9d..00000000000 --- a/mapper-runtime/src/main/java/com/datastax/dse/driver/internal/mapper/reactive/FailedMappedReactiveResultSet.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.mapper.reactive; - -import com.datastax.dse.driver.api.mapper.reactive.MappedReactiveResultSet; -import com.datastax.dse.driver.internal.core.cql.reactive.FailedPublisher; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import edu.umd.cs.findbugs.annotations.NonNull; -import org.reactivestreams.Publisher; - -/** - * A mapped reactive result set that immediately signals the error passed at instantiation to all - * its subscribers. - */ -public class FailedMappedReactiveResultSet extends FailedPublisher - implements MappedReactiveResultSet { - - public FailedMappedReactiveResultSet(Throwable error) { - super(error); - } - - @NonNull - @Override - public Publisher getColumnDefinitions() { - return new FailedPublisher<>(error); - } - - @NonNull - @Override - public Publisher getExecutionInfos() { - return new FailedPublisher<>(error); - } - - @NonNull - @Override - public Publisher wasApplied() { - return new FailedPublisher<>(error); - } -} diff --git a/mapper-runtime/src/main/java/com/datastax/dse/driver/internal/mapper/reactive/ReactiveDaoBase.java b/mapper-runtime/src/main/java/com/datastax/dse/driver/internal/mapper/reactive/ReactiveDaoBase.java deleted file mode 100644 index 56576829a40..00000000000 --- a/mapper-runtime/src/main/java/com/datastax/dse/driver/internal/mapper/reactive/ReactiveDaoBase.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.mapper.reactive; - -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveResultSet; -import com.datastax.dse.driver.api.mapper.reactive.MappedReactiveResultSet; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.mapper.MapperContext; -import com.datastax.oss.driver.api.mapper.entity.EntityHelper; -import com.datastax.oss.driver.internal.mapper.DaoBase; - -public class ReactiveDaoBase extends DaoBase { - - protected ReactiveDaoBase(MapperContext context) { - super(context); - } - - protected ReactiveResultSet executeReactive(Statement statement) { - return context.getSession().executeReactive(statement); - } - - protected MappedReactiveResultSet executeReactiveAndMap( - Statement statement, EntityHelper entityHelper) { - ReactiveResultSet source = executeReactive(statement); - return new DefaultMappedReactiveResultSet<>(source, row -> entityHelper.get(row, false)); - } -} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/MapperBuilder.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/MapperBuilder.java deleted file mode 100644 index 3838892172a..00000000000 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/MapperBuilder.java +++ /dev/null @@ -1,193 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.mapper; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; -import com.datastax.oss.driver.api.mapper.annotations.Mapper; -import com.datastax.oss.driver.api.mapper.annotations.NamingStrategy; -import com.datastax.oss.driver.api.mapper.annotations.QueryProvider; -import com.datastax.oss.driver.api.mapper.annotations.SchemaHint; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.HashMap; -import java.util.Map; - -/** - * Builds an instance of a {@link Mapper}-annotated interface wrapping a {@link CqlSession}. - * - *

    The mapper generates an implementation of this class for every such interface. It is either - * named {@code Builder}, or what you provided in {@link Mapper#builderName()}. - */ -public abstract class MapperBuilder { - - public static final String SCHEMA_VALIDATION_ENABLED_SETTING = - "datastax.mapper.schemaValidationEnabled"; - protected final CqlSession session; - protected CqlIdentifier defaultKeyspaceId; - protected Map customState; - protected String defaultExecutionProfileName; - protected DriverExecutionProfile defaultExecutionProfile; - - protected MapperBuilder(CqlSession session) { - this.session = session; - this.customState = new HashMap<>(); - // schema validation is enabled by default - customState.put(SCHEMA_VALIDATION_ENABLED_SETTING, true); - } - - /** - * Specifies a default keyspace that will be used for all DAOs built with this mapper (unless they - * specify their own keyspace). - * - *

    In other words, given the following definitions: - * - *

    -   * @Mapper
    -   * public interface InventoryMapper {
    -   *   @DaoFactory
    -   *   ProductDao productDao();
    -   *
    -   *   @DaoFactory
    -   *   ProductDao productDao(@DaoKeyspace CqlIdentifier keyspace);
    -   * }
    -   *
    -   * InventoryMapper mapper1 = new InventoryMapperBuilder(session)
    -   *     .withDefaultKeyspace(CqlIdentifier.fromCql("ks1"))
    -   *     .build();
    -   * InventoryMapper mapper2 = new InventoryMapperBuilder(session)
    -   *     .withDefaultKeyspace(CqlIdentifier.fromCql("ks2"))
    -   *     .build();
    -   * 
    - * - * Then: - * - *
      - *
    • {@code mapper1.productDao()} will use keyspace {@code ks1}; - *
    • {@code mapper2.productDao()} will use keyspace {@code ks2}; - *
    • {@code mapper1.productDao(CqlIdentifier.fromCql("ks3"))} will use keyspace {@code ks3}. - *
    - * - * @see DaoFactory - */ - @NonNull - public MapperBuilder withDefaultKeyspace(@Nullable CqlIdentifier keyspaceId) { - this.defaultKeyspaceId = keyspaceId; - return this; - } - - /** - * Shortcut for {@link #withDefaultKeyspace(CqlIdentifier) - * withDefaultKeyspace(CqlIdentifier.fromCql(keyspaceName))}. - */ - @NonNull - public MapperBuilder withDefaultKeyspace(@Nullable String keyspaceName) { - return withDefaultKeyspace(keyspaceName == null ? null : CqlIdentifier.fromCql(keyspaceName)); - } - - /** - * Specifies a default execution profile name that will be used for all DAOs built with this - * mapper (unless they specify their own execution profile). - * - *

    This works the same way as the {@linkplain #withDefaultKeyspace(CqlIdentifier) default - * keyspace}. - * - *

    Note that if you had already set a profile with #withDefaultExecutionProfile, this method - * erases it. - * - * @see DaoFactory - */ - @NonNull - public MapperBuilder withDefaultExecutionProfileName( - @Nullable String executionProfileName) { - this.defaultExecutionProfileName = executionProfileName; - if (executionProfileName != null) { - this.defaultExecutionProfile = null; - } - return this; - } - - /** - * Specifies a default execution profile name that will be used for all DAOs built with this - * mapper (unless they specify their own execution profile). - * - *

    This works the same way as the {@linkplain #withDefaultKeyspace(CqlIdentifier) default - * keyspace}. - * - *

    Note that if you had already set a profile name with #withDefaultExecutionProfileName, this - * method erases it. - * - * @see DaoFactory - */ - @NonNull - public MapperBuilder withDefaultExecutionProfile( - @Nullable DriverExecutionProfile executionProfile) { - this.defaultExecutionProfile = executionProfile; - if (executionProfile != null) { - this.defaultExecutionProfileName = null; - } - return this; - } - - /** - * Whether to validate mapped entities against the database schema. - * - *

    If this is enabled, then every time a new DAO gets created, for each entity referenced in - * the DAO, the mapper will check that there is a corresponding table or UDT. - * - *

      - *
    • for each entity field, the database table or UDT must contain a column with the - * corresponding name (according to the {@link NamingStrategy}). - *
    • the types must be compatible, according to the {@link CodecRegistry} used by the session. - *
    • additionally, if the target element is a table, the primary key must be properly - * annotated in the entity. - *
    - * - * If any of those steps fails, an {@link IllegalArgumentException} is thrown. - * - *

    Schema validation is enabled by default; it adds a small startup overhead, so once your - * application is stable you may want to disable it. - * - * @see SchemaHint - */ - public MapperBuilder withSchemaValidationEnabled(boolean enableSchemaValidation) { - customState.put(SCHEMA_VALIDATION_ENABLED_SETTING, enableSchemaValidation); - return this; - } - - /** - * Stores custom state that will be propagated to {@link MapperContext#getCustomState()}. - * - *

    This is intended mainly for {@link QueryProvider} methods: since provider classes are - * instantiated directly by the generated mapper code, they have no way to access non-static state - * from the rest of your application. This method allows you to pass that state while building the - * mapper, and access it later at runtime. - * - *

    Note that this state will be accessed concurrently, it should be thread-safe. - */ - @NonNull - public MapperBuilder withCustomState(@Nullable Object key, @Nullable Object value) { - customState.put(key, value); - return this; - } - - public abstract MapperT build(); -} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/MapperContext.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/MapperContext.java deleted file mode 100644 index 9f9df5f93f7..00000000000 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/MapperContext.java +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.mapper; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.api.mapper.entity.naming.NameConverter; -import com.datastax.oss.driver.api.mapper.result.MapperResultProducer; -import com.datastax.oss.driver.api.mapper.result.MapperResultProducerService; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Map; - -/** - * A runtime context that gets passed from the mapper to DAO components to share global resources - * and configuration. - */ -public interface MapperContext { - - @NonNull - CqlSession getSession(); - - /** - * If this context belongs to a DAO that was built with a keyspace-parameterized mapper method, - * the value of that parameter. Otherwise null. - */ - @Nullable - CqlIdentifier getKeyspaceId(); - - /** - * If this context belongs to a DAO that was built with a table-parameterized mapper method, the - * value of that parameter. Otherwise null. - */ - @Nullable - CqlIdentifier getTableId(); - - /** - * If this context belongs to a DAO that was built with a method that takes an execution profile - * name as parameter, the value of that parameter. Otherwise null. - * - *

    Note that this is mutually exclusive with {@link #getExecutionProfile()}: at most one of the - * two methods returns a non-null value (or both return null if no profile was provided). - */ - @Nullable - String getExecutionProfileName(); - - /** - * If this context belongs to a DAO that was built with a method that takes an execution profile - * as parameter, the value of that parameter. Otherwise null. - * - *

    Note that this is mutually exclusive with {@link #getExecutionProfileName()}: at most one of - * the two methods returns a non-null value (or both return null if no profile was provided). - */ - @Nullable - DriverExecutionProfile getExecutionProfile(); - - /** - * Returns an instance of the given converter class. - * - *

    The results of this method are cached at the mapper level. If no instance of this class - * exists yet for this mapper, a new instance is built by looking for a public no-arg constructor. - */ - @NonNull - NameConverter getNameConverter(Class converterClass); - - /** - * Retrieves any custom state that was set while building the mapper with {@link - * MapperBuilder#withCustomState(Object, Object)}. - * - *

    The returned map is immutable. If no state was set on the builder, it will be empty. - */ - @NonNull - Map getCustomState(); - - /** - * Returns a component that will execute a statement and convert it into a custom result of the - * given type. - * - *

    These components must be registered through the Java Service Provider Interface mechanism, - * see {@link MapperResultProducerService}. - * - *

    The results of this method are cached at the JVM level. - * - * @throws IllegalArgumentException if no producer was registered for this type. - */ - @NonNull - MapperResultProducer getResultProducer(@NonNull GenericType resultToProduce); -} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/MapperException.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/MapperException.java deleted file mode 100644 index f659ac00ad5..00000000000 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/MapperException.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.mapper; - -import com.datastax.oss.driver.api.core.DriverException; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** - * A runtime issue with the object mapper. - * - *

    Most configuration issues (e.g. misuse of the annotations) can be detected at compile-time, - * and will be reported as compiler errors instead. This exception is reserved for things that can - * only be checked at runtime, for example session state (protocol version, schema, etc). - * - *

    {@link #getExecutionInfo()} always returns {@code null} for this type. - */ -public class MapperException extends DriverException { - - public MapperException(@NonNull String message, @Nullable Throwable cause) { - super(message, null, cause, true); - } - - public MapperException(@NonNull String message) { - this(message, null); - } - - @NonNull - @Override - public DriverException copy() { - return new MapperException(getMessage(), this); - } -} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/ClusteringColumn.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/ClusteringColumn.java deleted file mode 100644 index cfbf97e73aa..00000000000 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/ClusteringColumn.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.mapper.annotations; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * Annotates the field or getter of an {@link Entity} property, to indicate that it's a clustering - * column. - * - *

    Example: - * - *

    - * @ClusteringColumn private int month;
    - * 
    - * - * This information is used by the mapper processor to generate default queries (for example a basic - * {@link Select}). - * - *

    If there are multiple clustering columns, you must specify {@link #value()} to indicate the - * position of each property: - * - *

    - * @ClusteringColumn(1) private int month;
    - * @ClusteringColumn(2) private int day;
    - * 
    - * - * If you don't specify positions, or if there are duplicates, the mapper processor will issue a - * compile-time error. - * - *

    This annotation is mutually exclusive with {@link PartitionKey}. - */ -@Target({ElementType.FIELD, ElementType.METHOD}) -@Retention(RetentionPolicy.RUNTIME) -public @interface ClusteringColumn { - - /** - * The position of the clustering column. - * - *

    This is only required if there are multiple clustering columns. Positions are not strictly - * required to be consecutive or start at a given index, but for clarity it is recommended to use - * consecutive integers. - */ - int value() default 0; -} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Computed.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Computed.java deleted file mode 100644 index 817bbf2c294..00000000000 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Computed.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.mapper.annotations; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * Annotates the field or getter of an {@link Entity} property, to indicate that when retrieving - * data that the property should be set to the result of computation on the Cassandra side, - * typically a function call. - * - *

    Example: - * - *

    - * @Computed("writetime(v)")
    - * private int writeTime;
    - * 
    - */ -@Target({ElementType.FIELD, ElementType.METHOD}) -@Retention(RetentionPolicy.RUNTIME) -public @interface Computed { - - /** - * The formula used to compute the property. - * - *

    This is a CQL expression like you would use directly in a query, for instance {@code - * writetime(v)}. - * - * @return the formula. - */ - String value(); -} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/CqlName.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/CqlName.java deleted file mode 100644 index 9b9ef15afb6..00000000000 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/CqlName.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.mapper.annotations; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * Annotates an {@link Entity} class or one of its properties (field or getter), to specify a custom - * CQL name. - * - *

    This annotation can also be used to annotate DAO method parameters when they correspond to - * bind markers in custom {@code WHERE} clauses; if the parameter is not annotated with {@link - * CqlName}, its programmatic name will be used instead as the bind marker name. - * - *

    Beware that if the DAO interface was pre-compiled and is located in a jar, then all its bind - * marker parameters must be annotated with {@link CqlName}; failing to do so will result in a - * compilation failure because class files do not retain parameter names by default. If you intend - * to distribute your DAO interface in a pre-compiled fashion, it is preferable to always annotate - * such method parameters. - * - *

    Example: - * - *

    - * @Entity
    - * public class Product {
    - *   @PartitionKey
    - *   @CqlName("product_id")
    - *   private int id;
    - *   ...
    - * }
    - * @Dao
    - * public class ProductDao {
    - *     @Query("SELECT count(*) FROM ${qualifiedTableId} WHERE product_id = :product_id")
    - *     long countById(@CqlName("product_id") int id);
    - *   ...
    - * }
    - * 
    - * - * This annotation takes precedence over the {@link NamingStrategy naming strategy} defined for the - * entity. - */ -@Target({ElementType.TYPE, ElementType.FIELD, ElementType.METHOD, ElementType.PARAMETER}) -@Retention(RetentionPolicy.RUNTIME) -public @interface CqlName { - - /** - * The CQL name to use for this entity or property. - * - *

    If you want it to be case-sensitive, it must be enclosed in double-quotes, for example: - * - *

    -   * @CqlName("\"productId\"")
    -   * private int id;
    -   * 
    - */ - String value(); -} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Dao.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Dao.java deleted file mode 100644 index bcab01d98fc..00000000000 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Dao.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.mapper.annotations; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * Annotates an interface that defines a set of query methods, usually (but not necessarily) related - * to a given entity class. - * - *

    Example: - * - *

    - * @Dao
    - * public interface ProductDao {
    - *   @Select
    - *   Product findById(UUID productId);
    - *
    - *   @Insert
    - *   void save(Product product);
    - *
    - *   @Delete
    - *   void delete(Product product);
    - * }
    - * 
    - * - * DAO instances are created via {@link DaoFactory} methods. - * - *

    DAO interfaces can define the following methods: - * - *

      - *
    • {@link Delete} - *
    • {@link GetEntity} - *
    • {@link Insert} - *
    • {@link Query} - *
    • {@link QueryProvider} - *
    • {@link Select} - *
    • {@link SetEntity} - *
    • {@link Update} - *
    - */ -@Target(ElementType.TYPE) -@Retention(RetentionPolicy.RUNTIME) -public @interface Dao {} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/DaoFactory.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/DaoFactory.java deleted file mode 100644 index c792c132fb0..00000000000 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/DaoFactory.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.mapper.annotations; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * Annotates a DAO-producing method in a {@link Mapper} interface. - * - *

    Example: - * - *

    - * @Mapper
    - * public interface InventoryMapper {
    - *   @DaoFactory
    - *   ProductDao productDao();
    - * }
    - * 
    - * - * The return type of the method must be a {@link Dao}-annotated interface. - * - *

    If the method takes no arguments, the DAO operates on the session's default keyspace (assuming - * that one was set), and the entity's default table: - * - *

    - * // Example 1: the session has a default keyspace
    - * CqlSession session = CqlSession.builder().withKeyspace("test").build();
    - * InventoryMapper inventoryMapper = new InventoryMapperBuilder(session).build();
    - * ProductDao dao = inventoryMapper.productDao();
    - * Product product = dao.selectById(1);
    - * // => success (selects from test.product)
    - *
    - * // Example 2: the session has no default keyspace
    - * CqlSession session = CqlSession.builder().build();
    - * InventoryMapper inventoryMapper = new InventoryMapperBuilder(session).build();
    - * ProductDao dao = inventoryMapper.productDao();
    - * Product product = dao.selectById(1);
    - * // => CQL error (No keyspace has been specified. USE a keyspace, or explicitly specify keyspace.tablename)
    - * 
    - * - * You can also have the method take the keyspace and table as arguments (annotated respectively - * with {@link DaoKeyspace} and {@link DaoTable}): - * - *
    - * @Mapper
    - * public interface InventoryMapper {
    - *   @DaoFactory
    - *   ProductDao productDao(@DaoKeyspace String keyspace);
    - *
    - *   @DaoFactory
    - *   ProductDao productDao(@DaoKeyspace String keyspace, @DaoTable String table);
    - * }
    - * 
    - * - * This allows you to reuse the same DAO interface to operate on different tables: - * - *
    - * ProductDao dao1 = inventoryMapper.productDao("keyspace1");
    - * Product product = dao1.selectById(1); // selects from keyspace1.product
    - *
    - * ProductDao dao2 = inventoryMapper.productDao("keyspace2");
    - * Product product = dao2.selectById(1); // selects from keyspace2.product
    - *
    - * ProductDao dao3 = inventoryMapper.productDao("keyspace3", "table3");
    - * Product product = dao3.selectById(1); // selects from keyspace3.table3
    - * 
    - * - * In all cases, DAO instances are initialized lazily and cached for future calls: - * - *
    - * ProductDao dao1 = inventoryMapper.productDao("keyspace1", "product");
    - * ProductDao dao2 = inventoryMapper.productDao("keyspace1", "product");
    - * assert dao1 == dao2; // same arguments, same instance
    - * 
    - * - * Note that the cache is a simple map with no eviction mechanism. - */ -@Target(ElementType.METHOD) -@Retention(RetentionPolicy.RUNTIME) -public @interface DaoFactory {} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/DaoKeyspace.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/DaoKeyspace.java deleted file mode 100644 index 6f24ffa9d56..00000000000 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/DaoKeyspace.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.mapper.annotations; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * Annotates the parameter of a {@link DaoFactory} method that indicates the keyspace to create a - * DAO for. - * - *

    Example: - * - *

    - * @Mapper
    - * public interface InventoryMapper {
    - *   ProductDao productDao(@DaoKeyspace String ks);
    - * }
    - * 
    - * - * The annotated parameter can be a {@link String} or {@link CqlIdentifier}. If it is present, the - * value will be injected in the DAO instance, where it will be used in generated queries, and can - * be substituted in text queries. This allows you to reuse the same DAO for different keyspaces. - * - * @see DaoFactory - */ -@Target(ElementType.PARAMETER) -@Retention(RetentionPolicy.RUNTIME) -public @interface DaoKeyspace {} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/DaoProfile.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/DaoProfile.java deleted file mode 100644 index 66fc3ed433a..00000000000 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/DaoProfile.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.mapper.annotations; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * Annotates the parameter of a {@link DaoFactory} method that indicates the execution profile to - * create a DAO for. - * - *

    Example: - * - *

    - *  * @Mapper
    - *  * public interface InventoryMapper {
    - *  *   ProductDao productDao(@DaoProfile String executionProfile);
    - *  * }
    - *  * 
    - * - * The annotated parameter can be a {@link String} or {@link DriverExecutionProfile}. If it is - * present, the value will be injected in the DAO instance, where it will be used in generated - * queries. This allows you to reuse the same DAO for different execution profiles. - * - * @see DaoFactory - */ -@Target(ElementType.PARAMETER) -@Retention(RetentionPolicy.RUNTIME) -public @interface DaoProfile {} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/DaoTable.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/DaoTable.java deleted file mode 100644 index 7ac0d66dd6c..00000000000 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/DaoTable.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.mapper.annotations; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * Annotates the parameter of a {@link DaoFactory} method that indicates the table to create a DAO - * for. - * - *

    Example: - * - *

    - * @Mapper
    - * public interface InventoryMapper {
    - *   ProductDao productDao(@DaoTable String table);
    - * }
    - * 
    - * - * The annotated parameter can be a {@link String} or {@link CqlIdentifier}. If it is present, the - * value will be injected in the DAO instance, where it will be used in generated queries, and can - * be substituted in text queries. This allows you to reuse the same DAO for different tables. - * - * @see DaoFactory - */ -@Target(ElementType.PARAMETER) -@Retention(RetentionPolicy.RUNTIME) -public @interface DaoTable {} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/DefaultNullSavingStrategy.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/DefaultNullSavingStrategy.java deleted file mode 100644 index a5c33b3f17f..00000000000 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/DefaultNullSavingStrategy.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.mapper.annotations; - -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * Annotates a {@link Dao} interface to define a default {@link NullSavingStrategy}, that will apply - * to all methods that don't explicitly declare one. - * - *

    For example, given this interface: - * - *

    - * @Dao
    - * @DefaultNullSavingStrategy(SET_TO_NULL)
    - * public interface ProductDao {
    - *
    - *   @Insert
    - *   void insert(Product product);
    - *
    - *   @Update(nullSavingStrategy = DO_NOT_SET)
    - *   void update(Product product);
    - * }
    - * 
    - * - *
      - *
    • {@code insert(Product)} will use {@link NullSavingStrategy#SET_TO_NULL SET_TO_NULL} - * (inherited from the DAO's default). - *
    • {@code update(Product)} will use {@link NullSavingStrategy#DO_NOT_SET DO_NOT_SET}. - *
    - * - * If the DAO interface isn't annotated with {@link DefaultNullSavingStrategy}, any method that does - * not declare its own value defaults to {@link NullSavingStrategy#DO_NOT_SET DO_NOT_SET}. - * - *

    Note that null saving strategies are only relevant for {@link Update}, {@link Insert}, {@link - * Query} and {@link SetEntity} methods. - */ -@Target(ElementType.TYPE) -@Retention(RetentionPolicy.RUNTIME) -public @interface DefaultNullSavingStrategy { - NullSavingStrategy value(); -} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Delete.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Delete.java deleted file mode 100644 index a9d2c03912f..00000000000 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Delete.java +++ /dev/null @@ -1,202 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.mapper.annotations; - -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveResultSet; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.session.SessionBuilder; -import com.datastax.oss.driver.api.mapper.result.MapperResultProducer; -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.function.Function; -import java.util.function.UnaryOperator; - -/** - * Annotates a {@link Dao} method that deletes an instance of an {@link Entity}-annotated class. - * - *

    Example: - * - *

    - * @Dao
    - * public interface ProductDao {
    - *   @Delete
    - *   void delete(Product product);
    - * }
    - * 
    - * - *

    Parameters

    - * - * The method can operate either on an entity instance, or on a primary key (partition key + - * clustering columns). - * - *

    In the latter case, the parameters must match the types of the primary key columns, in the - * exact order (which is defined by the integer values of the {@link PartitionKey} and {@link - * ClusteringColumn} annotations in the entity class). The parameter names don't necessarily need to - * match the names of the columns. In addition, because the entity class can't be inferred from the - * method signature, it must be specified in the annotation with {@link #entityClass()}: - * - *

    - * @Delete(entityClass = Product.class)
    - * void deleteById(UUID productId);
    - * 
    - * - * An {@linkplain #customIfClause() optional IF clause} can be appended to the generated query. It - * can contain placeholders, for which the method must have corresponding parameters (same name, and - * a compatible Java type): - * - *
    - * @Delete(entityClass = Product.class, customIfClause = "description = :expectedDescription")
    - * ResultSet deleteIfDescriptionMatches(UUID productId, String expectedDescription);
    - * 
    - * - *

    A {@link Function Function<BoundStatementBuilder, BoundStatementBuilder>} or {@link - * UnaryOperator UnaryOperator<BoundStatementBuilder>} can be added as the last - * parameter. It will be applied to the statement before execution. This allows you to customize - * certain aspects of the request (page size, timeout, etc) at runtime. - * - *

    Return type

    - * - * The method can return: - * - *
      - *
    • {@code void}. - *
    • a {@code boolean} or {@link Boolean}, which will be mapped to {@link - * ResultSet#wasApplied()}. This is intended for IF EXISTS queries: - *
      - * @Delete(ifExists = true)
      - * boolean deleteIfExists(Product product);
      - *       
      - *
    • a {@link ResultSet}. This is intended for queries with custom IF clauses; when those - * queries are not applied, they return the actual values of the tested columns. - *
      - * @Delete(entityClass = Product.class, customIfClause = "description = :expectedDescription")
      - * ResultSet deleteIfDescriptionMatches(UUID productId, String expectedDescription);
      - * // if the condition fails, the result set will contain columns '[applied]' and 'description'
      - *       
      - *
    • a {@link BoundStatement}. This is intended for queries where you will execute this - * statement later or in a batch. - *
      - * @Delete
      - * BoundStatement delete(Product product);
      - *       
      - *
    • a {@link CompletionStage} or {@link CompletableFuture} of any of the above. The method will - * execute the query asynchronously. Note that for result sets, you need to switch to {@link - * AsyncResultSet}. - *
      - * @Delete
      - * CompletableFuture<Void> deleteAsync(Product product);
      - *
      - * @Delete(ifExists = true)
      - * CompletionStage<Boolean> deleteIfExistsAsync(Product product);
      - *
      - * @Delete(entityClass = Product.class, customIfClause = "description = :expectedDescription")
      - * CompletionStage<AsyncResultSet> deleteIfDescriptionMatchesAsync(UUID productId, String expectedDescription);
      - *       
      - *
    • a {@link ReactiveResultSet}. - *
      - * @Delete
      - * ReactiveResultSet deleteReactive(Product product);
      - *       
      - *
    • a {@linkplain MapperResultProducer custom type}. - *
    - * - * Note that you can also return a boolean or result set for non-conditional queries, but there's no - * practical purpose for that since those queries always return {@code wasApplied = true} and an - * empty result set. - * - *

    Target keyspace and table

    - * - * If a keyspace was specified when creating the DAO (see {@link DaoFactory}), then the generated - * query targets that keyspace. Otherwise, it doesn't specify a keyspace, and will only work if the - * mapper was built from a {@link Session} that has a {@linkplain - * SessionBuilder#withKeyspace(CqlIdentifier) default keyspace} set. - * - *

    If a table was specified when creating the DAO, then the generated query targets that table. - * Otherwise, it uses the default table name for the entity (which is determined by the name of the - * entity class and the naming convention). - */ -@Target(ElementType.METHOD) -@Retention(RetentionPolicy.RUNTIME) -public @interface Delete { - - /** - * A hint to indicate the entity class, for cases where it can't be determined from the method's - * signature. - * - *

    This is only needed if the method receives the primary key components as arguments or uses a - * custom where clause: - * - *

    -   * @Delete(entityClass = Product.class)
    -   * void delete(UUID productId);
    -   *
    -   * @Delete(entityClass = Product.class, customWhereClause="product_id = :productId")
    -   * void delete(UUID productId);
    -   * 
    - * - * Note that, for technical reasons, this is an array, but only one element is expected. If you - * specify more than one class, the mapper processor will generate a compile-time warning, and - * proceed with the first one. - */ - Class[] entityClass() default {}; - - /** - * A custom WHERE clause for the DELETE query. - * - *

    If this is not empty, it completely replaces the WHERE clause in the generated query. Note - * that the provided string must not contain the {@code WHERE} keyword and {@link - * #entityClass()} must be specified. - * - *

    This clause can contain placeholders that will be bound with the method's parameters; see - * the top-level javadocs of this class for more explanations. - * - *

    Also note that this can be used in conjunction with {@link #customIfClause()} or {@link - * #ifExists()}. - */ - String customWhereClause() default ""; - - /** - * Whether to append an IF EXISTS clause at the end of the generated DELETE query. - * - *

    This is mutually exclusive with {@link #customIfClause()} (if both are set, the mapper - * processor will generate a compile-time error). - */ - boolean ifExists() default false; - - /** - * A custom IF clause for the DELETE query. - * - *

    This is mutually exclusive with {@link #ifExists()} (if both are set, the mapper processor - * will generate a compile-time error). - * - *

    If this is not empty, it gets added to the generated query. Note that the provided string - * must not contain the {@code IF} keyword. - * - *

    This clause can contain placeholders that will be bound with the method's parameters; see - * the top-level javadocs of this class for more explanations. - */ - String customIfClause() default ""; -} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Entity.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Entity.java deleted file mode 100644 index 506c7f13d22..00000000000 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Entity.java +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.mapper.annotations; - -import com.datastax.oss.driver.api.core.session.SessionBuilder; -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * Annotates a class that will be mapped to a Cassandra table or UDT. - * - *

    Example: - * - *

    - * @Entity
    - * public class Product {
    - *   @PartitionKey private UUID id;
    - *   private String description;
    - *
    - *   public UUID getId() { return id; }
    - *   public void setId(UUID id) { this.id = id; }
    - *   public String getDescription() { return description; }
    - *   public void setDescription(String description) { this.description = description; }
    - * }
    - * 
    - * - * Entity classes follow the usual "POJO" conventions. Each property will be mapped to a CQL column. - * In order to detect a property: - * - *
      - *
    • there must be a getter method that follows the usual naming convention (e.g. {@code - * getDescription}) and has no parameters. The name of the property is obtained by removing - * the "get" prefix and decapitalizing ({@code description}), and the type of the property is - * the return type of the getter. - *
    • unless the entity is {@linkplain PropertyStrategy#mutable() immutable}, there must - * be a matching setter method ({@code setDescription}), with a single parameter that has the - * same type as the property (the return type does not matter). - *
    - * - * There may also be a matching field ({@code description}) that has the same type as the - * property, but this is not mandatory: a property can have only a getter and a setter (for example - * if the value is computed, or the field has a different name, or is nested into another field, - * etc.) - * - *

    Properties can be annotated to configure various aspects of the mapping. The annotation can be - * either on the field, or on the getter (if both are specified, the mapper processor issues a - * compile-time warning, and the field annotation will be ignored). The available annotations are: - * - *

      - *
    • {@link PartitionKey} - *
    • {@link ClusteringColumn} - *
    • {@link Computed} - *
    • {@link Transient} - *
    • {@link CqlName} - *
    - * - *

    The class must expose a no-arg constructor that is at least package-private. - * - *

    Entities are used as arguments or return types of {@link Dao} methods. They can also be nested - * inside other entities (to map UDT columns). - */ -@Target(ElementType.TYPE) -@Retention(RetentionPolicy.RUNTIME) -public @interface Entity { - /** - * Specifies a default keyspace to use when doing operations on this entity. - * - *

    This will be used when you build a DAO without an explicit keyspace parameter: - * - *

    -   * @Entity(defaultKeyspace = "inventory")
    -   * public class Product { ... }
    -   *
    -   * @Mapper
    -   * public interface InventoryMapper {
    -   *   @DaoFactory
    -   *   ProductDao productDao();
    -   *
    -   *   @DaoFactory
    -   *   ProductDao productDao(@DaoKeyspace String keyspace);
    -   * }
    -   *
    -   * ProductDao productDao = mapper.productDao();
    -   * productDao.insert(product); // inserts into inventory.product
    -   *
    -   * ProductDao productDaoTest = mapper.productDao("test");
    -   * productDaoTest.insert(product); // inserts into test.product
    -   * 
    - * - * The default keyspace optional: if it is not specified, and you build a DAO without a keyspace, - * then the session must have a default keyspace (set with {@link - * SessionBuilder#withKeyspace(String)}), otherwise an error will be thrown: - * - *
    -   * @Entity
    -   * public class Product { ... }
    -   *
    -   * CqlSession session = CqlSession.builder()
    -   *     .withKeyspace("default_ks")
    -   *     .build();
    -   * InventoryMapper mapper = new InventoryMapperBuilder(session).build();
    -   *
    -   * ProductDao productDao = mapper.productDao();
    -   * productDao.insert(product); // inserts into default_ks.product
    -   * 
    - * - * If you want the name to be case-sensitive, it must be enclosed in double-quotes, for example: - * - *
    -   * @Entity(defaultKeyspace = "\"defaultKs\"")
    -   * 
    - */ - String defaultKeyspace() default ""; -} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/GetEntity.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/GetEntity.java deleted file mode 100644 index d86174bdc49..00000000000 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/GetEntity.java +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.mapper.annotations; - -import com.datastax.oss.driver.api.core.MappedAsyncPagingIterable; -import com.datastax.oss.driver.api.core.PagingIterable; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.data.GettableByName; -import com.datastax.oss.driver.api.core.data.UdtValue; -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * Annotates a {@link Dao} method that converts a core driver data structure into one or more - * instances of an {@link Entity} class. - * - *

    Example: - * - *

    - * @Dao
    - * public interface ProductDao {
    - *   @GetEntity
    - *   Product asProduct(Row row);
    - * }
    - * 
    - * - * The generated code will retrieve each entity property from the source, such as: - * - *
    - * Product product = new Product();
    - * product.setId(row.get("id", UUID.class));
    - * product.setDescription(row.get("description", String.class));
    - * ...
    - * 
    - * - *

    It does not perform a query. Instead, those methods are intended for cases where you already - * have a query result, and just need the conversion logic. - * - *

    Parameters

    - * - * The method must have a single parameter. The following types are allowed: - * - *
      - *
    • {@link GettableByName} or one of its subtypes (the most likely candidates are {@link Row} - * and {@link UdtValue}). - *
    • {@link ResultSet}. - *
    • {@link AsyncResultSet}. - *
    - * - * The data must match the target entity: the generated code will try to extract every mapped - * property, and fail if one is missing. - * - *

    Return type

    - * - * The method can return: - * - *
      - *
    • a single entity instance. If the argument is a result set type, the generated code will - * extract the first row and convert it, or return {@code null} if the result set is empty. - *
      - * @GetEntity
      - * Product asProduct(Row row);
      - *
      - * @GetEntity
      - * Product firstRowAsProduct(ResultSet resultSet);
      - *       
      - *
    • a {@link PagingIterable} of an entity class. In that case, the type of the parameter - * must be {@link ResultSet}. Each row in the result set will be converted into an - * entity instance. - *
      - * @GetEntity
      - * PagingIterable<Product> asProducts(ResultSet resultSet);
      - *       
      - *
    • a {@link MappedAsyncPagingIterable} of an entity class. In that case, the type of the - * parameter must be {@link AsyncResultSet}. Each row in the result set will be - * converted into an entity instance. - *
      - * @GetEntity
      - * MappedAsyncPagingIterable<Product> asProducts(AsyncResultSet resultSet);
      - *       
      - *
    - * - * If the return type doesn't match the parameter type (for example {@link PagingIterable} for - * {@link AsyncResultSet}), the mapper processor will issue a compile-time error. - */ -@Target(ElementType.METHOD) -@Retention(RetentionPolicy.RUNTIME) -public @interface GetEntity { - - /** - * Whether to tolerate missing columns in the source data structure. - * - *

    If {@code false} (the default), then the source must contain a matching column for every - * property in the entity definition, including computed ones. If such a column is not - * found, an {@link IllegalArgumentException} will be thrown. - * - *

    If {@code true}, the mapper will operate on a best-effort basis and attempt to read all - * entity properties that have a matching column in the source, leaving unmatched properties - * untouched. Beware that this may result in a partially-populated entity instance. - */ - boolean lenient() default false; -} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/HierarchyScanStrategy.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/HierarchyScanStrategy.java deleted file mode 100644 index 0b064b8597d..00000000000 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/HierarchyScanStrategy.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.mapper.annotations; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * A strategy to define which ancestors to scan for annotations for {@link Entity}-annotated classes - * and {@link Dao}-annotated interfaces. - * - *

    In addition, properties will also be scanned for on {@link Entity}-annotated classes. - * - *

    By default, the mapper will transparently scan all parent classes and interfaces. The closer - * in proximity a type is to the base type, the more precedence it is given when scanning. In - * addition, parent classes are given precedence over interfaces. - * - *

    For entities, this enables polymorphic mapping of a class hierarchy into different CQL tables - * or UDTs. - * - *

    For DAOs, this enables sharing configuration between DAOs by implementing an interface that - * has annotations defining how the DAO methods should behave. - * - *

    To disable scanning, set {@link #scanAncestors()} to false. - * - *

    To control the highest ancestor considered in scanning for annotations, use {@link - * #highestAncestor()} and {@link #includeHighestAncestor()}. - */ -@Target(ElementType.TYPE) -@Retention(RetentionPolicy.RUNTIME) -public @interface HierarchyScanStrategy { - - /** - * Whether or not ancestors should be scanned for properties and annotations. - * - *

    If false only the {@link Entity} class or {@link Dao} interface will be - * scanned. - * - *

    Defaults to true. - */ - boolean scanAncestors() default true; - - /** - * The {@link Class} to consider the highest ancestor, meaning the classes that this class extends - * or implements will not be scanned for annotations. - * - *

    Note that If you have a complex hierarchy involving both parent classes and interfaces and - * highestAncestor specifies a class for example, all interfaces will still be included. Therefore - * it is recommended to avoid creating complex type hierarchies or to only do so if you expect the - * entire hierarchy to be scanned. - * - *

    Defaults to {@link Object}. - */ - Class highestAncestor() default Object.class; - - /** - * Whether or not to include the specified {@link #highestAncestor()} in scanning. - * - *

    Defaults to false. - */ - boolean includeHighestAncestor() default false; -} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Increment.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Increment.java deleted file mode 100644 index bb86fa5b8ab..00000000000 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Increment.java +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.mapper.annotations; - -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveResultSet; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.session.SessionBuilder; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.function.Function; -import java.util.function.UnaryOperator; - -/** - * Annotates a {@link Dao} method that increments a counter table that is mapped to an {@link - * Entity}-annotated class. - * - *

    Example: - * - *

    - * @Entity
    - * public class Votes {
    - *   @PartitionKey private int articleId;
    - *   private long upVotes;
    - *   private long downVotes;
    - *   ... // constructor(s), getters and setters, etc.
    - * }
    - * @Dao
    - * public interface VotesDao {
    - *   @Increment(entityClass = Votes.class)
    - *   void incrementUpVotes(int articleId, long upVotes);
    - *
    - *   @Increment(entityClass = Votes.class)
    - *   void incrementDownVotes(int articleId, long downVotes);
    - *
    - *   @Select
    - *   Votes findById(int articleId);
    - * }
    - * 
    - * - *

    Parameters

    - * - * The entity class must be specified with {@link #entityClass()}. - * - *

    The method's parameters must start with the full primary key, in the exact order (as defined - * by the {@link PartitionKey} and {@link ClusteringColumn} annotations in the entity class). The - * parameter names don't necessarily need to match the names of the columns, but the types must - * match. Unlike other methods like {@link Select} or {@link Delete}, counter updates cannot operate - * on a whole partition, they need to target exactly one row; so all the partition key and - * clustering columns must be specified. - * - *

    Then must follow one or more parameters representing counter increments. Their type must be - * {@code long} or {@link Long}. The name of the parameter must match the name of the entity - * property that maps to the counter (that is, the name of the getter without "get" and - * decapitalized). Alternatively, you may annotate a parameter with {@link CqlName} to specify the - * raw column name directly; in that case, the name of the parameter does not matter: - * - *

    - * @Increment(entityClass = Votes.class)
    - * void incrementUpVotes(int articleId, @CqlName("up_votes") long foobar);
    - * 
    - * - * When you invoke the method, each parameter value is interpreted as a delta that will be - * applied to the counter. In other words, if you pass 1, the counter will be incremented by 1. - * Negative values are allowed. If you are using Cassandra 2.2 or above, you can use {@link Long} - * and pass {@code null} for some of the parameters, they will be ignored (following {@link - * NullSavingStrategy#DO_NOT_SET} semantics). If you are using Cassandra 2.1, {@code null} values - * will trigger a runtime error. - * - *

    A {@link Function Function<BoundStatementBuilder, BoundStatementBuilder>} or {@link - * UnaryOperator UnaryOperator<BoundStatementBuilder>} can be added as the last - * parameter. It will be applied to the statement before execution. This allows you to customize - * certain aspects of the request (page size, timeout, etc) at runtime. - * - *

    Return type

    - * - *

    The method can return {@code void}, a void {@link CompletionStage} or {@link - * CompletableFuture}, or a {@link ReactiveResultSet}. - * - *

    Target keyspace and table

    - * - *

    If a keyspace was specified when creating the DAO (see {@link DaoFactory}), then the generated - * query targets that keyspace. Otherwise, it doesn't specify a keyspace, and will only work if the - * mapper was built from a {@link Session} that has a {@linkplain - * SessionBuilder#withKeyspace(CqlIdentifier) default keyspace} set. - * - *

    If a table was specified when creating the DAO, then the generated query targets that table. - * Otherwise, it uses the default table name for the entity (which is determined by the name of the - * entity class and the naming convention). - */ -@Target(ElementType.METHOD) -@Retention(RetentionPolicy.RUNTIME) -public @interface Increment { - - /** - * A hint to indicate the entity class that is being targeted. This is mandatory, the mapper will - * issue a compile error if you leave it unset. - * - *

    Note that, for technical reasons, this is an array, but only one element is expected. If you - * specify more than one class, the mapper processor will generate a compile-time warning, and - * proceed with the first one. - */ - Class[] entityClass() default {}; -} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Insert.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Insert.java deleted file mode 100644 index 602a673d8a7..00000000000 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Insert.java +++ /dev/null @@ -1,174 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.mapper.annotations; - -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveResultSet; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.session.SessionBuilder; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import com.datastax.oss.driver.api.mapper.result.MapperResultProducer; -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; -import java.util.Optional; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.function.Function; -import java.util.function.UnaryOperator; - -/** - * Annotates a {@link Dao} method that inserts an instance of an {@link Entity}-annotated class. - * - *

    Example: - * - *

    - * @Dao
    - * public interface ProductDao {
    - *   @Insert
    - *   void insert(Product product);
    - * }
    - * 
    - * - *

    Parameters

    - * - * The first parameter must be the entity to insert. - * - *

    If the query has a {@linkplain #ttl() TTL} and/or {@linkplain #timestamp() timestamp} with - * placeholders, the method must have corresponding additional parameters (same name, and a - * compatible Java type): - * - *

    - * @Insert(ttl = ":ttl")
    - * void insertWithTtl(Product product, int ttl);
    - * 
    - * - *

    A {@link Function Function<BoundStatementBuilder, BoundStatementBuilder>} or {@link - * UnaryOperator UnaryOperator<BoundStatementBuilder>} can be added as the last - * parameter. It will be applied to the statement before execution. This allows you to customize - * certain aspects of the request (page size, timeout, etc) at runtime. - * - *

    Return type

    - * - * The method can return: - * - *
      - *
    • {@code void}. - *
    • the entity class. This is intended for {@code INSERT ... IF NOT EXISTS} queries. The method - * will return {@code null} if the insertion succeeded, or the existing entity if it failed. - *
      - * @Insert(ifNotExists = true)
      - * Product insertIfNotExists(Product product);
      - *       
      - *
    • an {@link Optional} of the entity class, as a null-safe alternative for {@code INSERT ... - * IF NOT EXISTS} queries. - *
      - * @Insert(ifNotExists = true)
      - * Optional<Product> insertIfNotExists(Product product);
      - *       
      - *
    • a {@code boolean} or {@link Boolean}, which will be mapped to {@link - * ResultSet#wasApplied()}. This is intended for IF NOT EXISTS queries: - *
      - * @Insert(ifNotExists = true)
      - * boolean saveIfNotExists(Product product);
      - *       
      - *
    • a {@link ResultSet}. This is intended for cases where you intend to inspect data associated - * with the result, such as {@link ResultSet#getExecutionInfo()}. - *
      - * @Insert
      - * ResultSet save(Product product);
      - *       
      - *
    • a {@link BoundStatement} This is intended for cases where you intend to execute this - * statement later or in a batch: - *
      - * @Insert
      - * BoundStatement save(Product product);
      - *      
      - *
    • a {@link CompletionStage} or {@link CompletableFuture} of any of the above. The mapper will - * execute the query asynchronously. - *
      - * @Insert
      - * CompletionStage<Void> insert(Product product);
      - *
      - * @Insert(ifNotExists = true)
      - * CompletableFuture<Product> insertIfNotExists(Product product);
      - *
      - * @Insert(ifNotExists = true)
      - * CompletableFuture<Optional<Product>> insertIfNotExists(Product product);
      - *       
      - *
    • a {@link ReactiveResultSet}. - *
      - * @Insert
      - * ReactiveResultSet insertReactive(Product product);
      - *       
      - *
    • a {@linkplain MapperResultProducer custom type}. - *
    - * - *

    Target keyspace and table

    - * - * If a keyspace was specified when creating the DAO (see {@link DaoFactory}), then the generated - * query targets that keyspace. Otherwise, it doesn't specify a keyspace, and will only work if the - * mapper was built from a {@link Session} that has a {@linkplain - * SessionBuilder#withKeyspace(CqlIdentifier) default keyspace} set. - * - *

    If a table was specified when creating the DAO, then the generated query targets that table. - * Otherwise, it uses the default table name for the entity (which is determined by the name of the - * entity class and the naming convention). - */ -@Target(ElementType.METHOD) -@Retention(RetentionPolicy.RUNTIME) -public @interface Insert { - /** Whether to append an IF NOT EXISTS clause at the end of the generated INSERT query. */ - boolean ifNotExists() default false; - - /** - * The TTL (time to live) to use in the generated INSERT query. - * - *

    If this starts with ":", it is interpreted as a named placeholder (that must have a - * corresponding parameter in the method signature). Otherwise, it must be a literal integer value - * (representing a number of seconds). - * - *

    If the placeholder name is invalid or the literal can't be parsed as an integer (according - * to the rules of {@link Integer#parseInt(String)}), the mapper will issue a compile-time - * warning. - */ - String ttl() default ""; - - /** - * The timestamp to use in the generated INSERT query. - * - *

    If this starts with ":", it is interpreted as a named placeholder (that must have a - * corresponding parameter in the method signature). Otherwise, it must be literal long value - * (representing a number of microseconds since epoch). - * - *

    If the placeholder name is invalid or the literal can't be parsed as a long (according to - * the rules of {@link Long#parseLong(String)}), the mapper will issue a compile-time warning. - */ - String timestamp() default ""; - - /** - * How to handle null entity properties during the insertion. - * - *

    This defaults either to the {@link DefaultNullSavingStrategy DAO-level strategy} (if set), - * or {@link NullSavingStrategy#DO_NOT_SET}. - */ - NullSavingStrategy nullSavingStrategy() default NullSavingStrategy.DO_NOT_SET; -} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Mapper.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Mapper.java deleted file mode 100644 index 8cdaf28fc51..00000000000 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Mapper.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.mapper.annotations; - -import com.datastax.oss.driver.api.core.CqlSession; -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * Annotates an interface that will serve as the entry point to mapper features. - * - *

    Example: - * - *

    - * @Mapper
    - * public interface InventoryMapper {
    - *   @DaoFactory
    - *   ProductDao productDao();
    - * }
    - * 
    - * - * The mapper annotation processor will generate an implementation, and a builder that allows you to - * create an instance from a {@link CqlSession}: - * - *
    - * InventoryMapper inventoryMapper = new InventoryMapperBuilder(session).build();
    - * 
    - * - * By default, the builder's name is the name of the interface with the suffix "Builder", and it - * resides in the same package. You can also use a custom name with {@link #builderName()}. - * - *

    The interface should define one or more {@link DaoFactory} methods. - */ -@Target(ElementType.TYPE) -@Retention(RetentionPolicy.RUNTIME) -public @interface Mapper { - - /** - * The fully-qualified name of the builder class that will get generated in order to - * create instances of the manager, for example "com.mycompany.MyCustomBuilder". - * - *

    If this is left empty (the default), the builder's name is the name of the interface with - * the suffix "Builder", and it * resides in the same package. - */ - String builderName() default ""; -} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/NamingStrategy.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/NamingStrategy.java deleted file mode 100644 index b5121b144b2..00000000000 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/NamingStrategy.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.mapper.annotations; - -import com.datastax.oss.driver.api.mapper.entity.naming.NameConverter; -import com.datastax.oss.driver.api.mapper.entity.naming.NamingConvention; -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * Annotates an {@link Entity} to indicate how CQL names will be inferred from the names in the Java - * class. - * - *

    This applies to: - * - *

      - *
    • The name of the class (e.g. {@code Product}), that will be converted into a table name. - *
    • The name of the entity properties (e.g. {@code productId}), that will be converted into - * column names. - *
    - * - *

    Either of {@link #convention()} or {@link #customConverterClass()} must be specified, but not - * both. - * - *

    This annotation is optional. If it is not specified, the entity will default to {@link - * NamingConvention#SNAKE_CASE_INSENSITIVE}. - */ -@Target(ElementType.TYPE) -@Retention(RetentionPolicy.RUNTIME) -public @interface NamingStrategy { - - /** - * Specifies a built-in naming convention. - * - *

    The mapper processor will apply the conversion at compile time, and generate code that - * hard-codes the converted strings. In other words, this is slightly more efficient than a custom - * converter because no conversion happens at runtime. - * - *

    This is mutually exclusive with {@link #customConverterClass()}. - * - *

    Note that, for technical reasons, this is an array, but only one element is expected. If you - * specify more than one element, the mapper processor will generate a compile-time warning, and - * proceed with the first one. - */ - NamingConvention[] convention() default {}; - - /** - * Specifies a custom converter implementation. - * - *

    The provided class must implement {@link NameConverter} and expose a public no-arg - * constructor. The code generated by the mapper will create an instance at runtime, and invoke it - * every time it generates a new request. - * - *

    This is mutually exclusive with {@link #convention()}. - * - *

    Note that, for technical reasons, this is an array, but only one element is expected. If you - * specify more than one element, the mapper processor will generate a compile-time warning, and - * proceed with the first one. - */ - Class[] customConverterClass() default {}; -} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/PartitionKey.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/PartitionKey.java deleted file mode 100644 index 1dff4280f5b..00000000000 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/PartitionKey.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.mapper.annotations; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * Annotates the field or getter of an {@link Entity} property, to indicate that it's part of the - * partition key. - * - *

    Example: - * - *

    - * @PartitionKey private UUID id;
    - * 
    - * - * This information is used by the mapper processor to generate default queries (for example a basic - * {@link Select}). Note that an entity is not required to have a partition key, for example if it - * only gets mapped as a UDT. - * - *

    If the partition key is composite, you must specify {@link #value()} to indicate the position - * of each property: - * - *

    - * @PartitionKey(1) private int pk1;
    - * @PartitionKey(2) private int pk2;
    - * 
    - * - * If you don't specify positions, or if there are duplicates, the mapper processor will issue a - * compile-time error. - * - *

    This annotation is mutually exclusive with {@link ClusteringColumn}. - */ -@Target({ElementType.FIELD, ElementType.METHOD}) -@Retention(RetentionPolicy.RUNTIME) -public @interface PartitionKey { - - /** - * The position of the property in the partition key. - * - *

    This is only required if the partition key is composite. Positions are not strictly required - * to be consecutive or start at a given index, but for clarity it is recommended to use - * consecutive integers. - */ - int value() default 0; -} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/PropertyStrategy.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/PropertyStrategy.java deleted file mode 100644 index 4d66fd84e33..00000000000 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/PropertyStrategy.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.mapper.annotations; - -import com.datastax.oss.driver.api.mapper.entity.naming.GetterStyle; -import com.datastax.oss.driver.api.mapper.entity.naming.SetterStyle; -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * Annotates an {@link Entity} to customize certain aspects of the introspection process that - * determines which methods are considered as properties, and how new instances will be created. - * - *

    Example: - * - *

    - * @Entity
    - * @PropertyStrategy(getterStyle = FLUENT)
    - * public class Account {
    - *   ...
    - * }
    - * 
    - * - * This annotation can be inherited from an interface or parent class. - * - *

    When neither the entity class nor any of its parent is explicitly annotated, the mapper will - * assume context-dependent defaults: - * - *

      - *
    • for a Scala case class: {@code mutable = false} and {@code getterStyle = FLUENT}. The - * mapper detects this case by checking if the entity implements {@code scala.Product}. - *
    • for a Kotlin data class: {@code mutable = false} and {@code getterStyle = JAVABEANS}. The - * mapper detects this case by checking if the entity is annotated with {@code - * kotlin.Metadata}, and if it has any method named {@code component1} (both of these are - * added automatically by the Kotlin compiler). - *
    • Java records (JDK 14 and above): {@code mutable = false} and {@code getterStyle = FLUENT}. - * The mapper detects this case by checking if the entity extends {@code java.lang.Record}. - *
    • any other case: {@code mutable = true}, {@code getterStyle = JAVABEANS} and {@code - * setterStyle = JAVABEANS}. - *
    - * - * Not that this only applies if the annotation is completely absent. If it is present with only - * some of its attributes, the remaining attributes will get the default declared by the annotation, - * not the context-dependent default above (for example, if a Kotlin data class is annotated with - * {@code @PropertyStrategy(getterStyle = FLUENT)}, it will be mutable). - */ -@Target({ElementType.TYPE}) -@Retention(RetentionPolicy.RUNTIME) -public @interface PropertyStrategy { - - /** The style of getter. See {@link GetterStyle} and its constants for more explanations. */ - GetterStyle getterStyle() default GetterStyle.JAVABEANS; - - /** - * The style of setter. See {@link SetterStyle} and its constants for more explanations. - * - *

    This has no effect if {@link #mutable()} is false. - */ - SetterStyle setterStyle() default SetterStyle.JAVABEANS; - - /** - * Whether the entity is mutable. - * - *

    If this is set to false: - * - *

      - *
    • the mapper won't try to discover setters for the properties; - *
    • it will assume that the entity class has a visible constructor that takes all the - * non-transient properties as arguments. - *
    - */ - boolean mutable() default true; -} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Query.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Query.java deleted file mode 100644 index c362453bb3a..00000000000 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Query.java +++ /dev/null @@ -1,170 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.mapper.annotations; - -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveResultSet; -import com.datastax.dse.driver.api.mapper.reactive.MappedReactiveResultSet; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.MappedAsyncPagingIterable; -import com.datastax.oss.driver.api.core.PagingIterable; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.session.SessionBuilder; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import com.datastax.oss.driver.api.mapper.result.MapperResultProducer; -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; -import java.util.Optional; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.function.Function; -import java.util.function.UnaryOperator; - -/** - * Annotates a {@link Dao} method that executes a user-provided query. - * - *

    Example: - * - *

    - * @Dao
    - * public interface SensorReadingDao {
    - *   @Query("SELECT count(*) FROM sensor_readings WHERE id = :id")
    - *   long countById(int id);
    - * }
    - * 
    - * - * This is the equivalent of what was called "accessor methods" in the driver 3 mapper. - * - *

    Parameters

    - * - * The query string provided in {@link #value()} will typically contain CQL placeholders. The - * method's parameters must match those placeholders: same name and a compatible Java type. - * - *
    - * @Query("SELECT count(*) FROM sensor_readings WHERE id = :id AND year = :year")
    - * long countByIdAndYear(int id, int year);
    - * 
    - * - *

    A {@link Function Function<BoundStatementBuilder, BoundStatementBuilder>} or {@link - * UnaryOperator UnaryOperator<BoundStatementBuilder>} can be added as the last - * parameter. It will be applied to the statement before execution. This allows you to customize - * certain aspects of the request (page size, timeout, etc) at runtime. - * - *

    Return type

    - * - * The method can return: - * - *
      - *
    • {@code void}. - *
    • a {@code boolean} or {@link Boolean}, which will be mapped to {@link - * ResultSet#wasApplied()}. This is intended for conditional queries. - *
    • a {@code long} or {@link Long}, which will be mapped to the first column of the first row, - * expecting CQL type {@code BIGINT}. This is intended for count queries. The method will fail - * if the result set is empty, or does not match the expected format. - *
    • a {@link Row}. This means the result is not converted, the mapper only extracts the first - * row of the result set and returns it. The method will return {@code null} if the result set - * is empty. - *
    • a single instance of an {@link Entity} class. The method will extract the first row and - * convert it, or return {@code null} if the result set is empty. - *
    • an {@link Optional} of an entity class. The method will extract the first row and convert - * it, or return {@code Optional.empty()} if the result set is empty. - *
    • a {@link ResultSet}. The method will return the raw query result, without any conversion. - *
    • a {@link BoundStatement}. This is intended for cases where you intend to execute this - * statement later or in a batch: - *
    • a {@link PagingIterable}. The method will convert each row into an entity instance. - *
    • a {@link CompletionStage} or {@link CompletableFuture} of any of the above. The method will - * execute the query asynchronously. Note that for result sets and iterables, you need to - * switch to the asynchronous equivalent {@link AsyncResultSet} and {@link - * MappedAsyncPagingIterable} respectively. - *
    • a {@link ReactiveResultSet}, or a {@link MappedReactiveResultSet} of the entity class. - *
    • a {@linkplain MapperResultProducer custom type}. - *
    - * - *

    Target keyspace and table

    - * - * To avoid hard-coding the keyspace and table name, the query string supports 3 additional - * placeholders: {@code ${keyspaceId}}, {@code ${tableId}} and {@code ${qualifiedTableId}}. They get - * substituted at DAO initialization time, with the keyspace and table that the DAO was built with - * (see {@link DaoFactory}). - * - *

    For example, given the following: - * - *

    - * @Dao
    - * public interface TestDao {
    - *   @Query("SELECT * FROM ${keyspaceId}.${tableId}")
    - *   ResultSet queryFromKeyspaceAndTable();
    - *
    - *   @Query("SELECT * FROM ${qualifiedTableId}")
    - *   ResultSet queryFromQualifiedTable();
    - * }
    - *
    - * @Mapper
    - * public interface TestMapper {
    - *   @DaoFactory
    - *   TestDao dao(@DaoKeyspace String keyspace, @DaoTable String table);
    - *
    - *   @DaoFactory
    - *   TestDao dao(@DaoTable String table);
    - * }
    - *
    - * TestDao dao1 = mapper.dao("ks", "t");
    - * TestDao dao2 = mapper.dao("t");
    - * 
    - * - * Then: - * - *
      - *
    • {@code dao1.queryFromKeyspaceAndTable()} and {@code dao1.queryFromQualifiedTable()} both - * execute {@code SELECT * FROM ks.t}. - *
    • {@code dao2.queryFromKeyspaceAndTable()} fails: no keyspace was specified for this DAO, so - * {@code ${keyspaceId}} can't be substituted. - *
    • {@code dao1.queryFromQualifiedTable()} executes {@code SELECT * FROM t}. In other words, - * {@code ${qualifiedTableId}} uses the keyspace if it is available, but resolves to the table - * name only if it isn't. Whether the query succeeds or not depends on whether the {@link - * Session} that the mapper was built with has a {@linkplain - * SessionBuilder#withKeyspace(CqlIdentifier) default keyspace}. - *
    - */ -@Target(ElementType.METHOD) -@Retention(RetentionPolicy.RUNTIME) -public @interface Query { - - /** - * The query string to execute. - * - *

    It can contain CQL placeholders (e.g. {@code :id}) that will be bound with the method's - * parameters; and also special text placeholders {@code ${keyspaceId}}, {@code ${tableId}} and - * {@code ${qualifiedTableId}} that will be substituted with the keyspace and table that the DAO - * was built with. See the top-level javadocs of this class for more explanations. - */ - String value(); - - /** - * How to handle null query parameters. - * - *

    This defaults either to the {@link DefaultNullSavingStrategy DAO-level strategy} (if set), - * or {@link NullSavingStrategy#DO_NOT_SET}. - */ - NullSavingStrategy nullSavingStrategy() default NullSavingStrategy.DO_NOT_SET; -} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/QueryProvider.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/QueryProvider.java deleted file mode 100644 index d8194d12e8b..00000000000 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/QueryProvider.java +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.mapper.annotations; - -import com.datastax.oss.driver.api.mapper.MapperBuilder; -import com.datastax.oss.driver.api.mapper.MapperContext; -import com.datastax.oss.driver.api.mapper.entity.EntityHelper; -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * Annotates a {@link Dao} method that delegates the execution of the query to a user-provided - * class. - * - *

    Example: - * - *

    - * @Dao
    - * public interface SensorDao {
    - *   @QueryProvider(providerClass = FindSliceProvider.class, entityHelpers = SensorReading.class)
    - *   PagingIterable<SensorReading> findSlice(int id, Integer month, Integer day);
    - * }
    - *
    - * public class FindSliceProvider {
    - *   public FindSliceProvider(
    - *       MapperContext context, EntityHelper<SensorReading> sensorReadingHelper) {
    - *     ...
    - *   }
    - *
    - *   public PagingIterable<SensorReading> findSlice(int id, Integer month, Integer day) {
    - *     ... // implement the query logic here
    - *   }
    - * }
    - * 
    - * - *

    Use this for requests that can't be expressed as static query strings, for example if some - * clauses are added dynamically depending on the values of some parameters. - * - *

    The parameters and return type are completely free-form, as long as they match those of the - * provider method. - * - * @see MapperBuilder#withCustomState(Object, Object) - */ -@Target(ElementType.METHOD) -@Retention(RetentionPolicy.RUNTIME) -public @interface QueryProvider { - - /** - * The class that will execute the query. - * - *

    The mapper will create an instance of this class for each DAO instance. It must expose a - * constructor that is accessible from the DAO interface's package, and takes the following - * parameter types: - * - *

      - *
    • {@link MapperContext}. - *
    • zero or more {@link EntityHelper}s, as defined by {@link #entityHelpers()}. - *
    - * - * It must also expose a method that will be invoked each time the DAO method is called, see - * {@link #providerMethod()}. - */ - Class providerClass(); - - /** - * The method to invoke on the provider class. - * - *

    It must be accessible from the DAO interface's package, and have the same parameters and - * return type as the annotated DAO method. - * - *

    This is optional; if not provided, it defaults to the name of the annotated DAO method. - */ - String providerMethod() default ""; - - /** - * A list of entities for which {@link EntityHelper} instances should be injected into the - * provider class's constructor (in addition to the mapper context). - * - *

    For example, if {@code entityHelpers = {Product.class, Dimensions.class}}, your provider - * class must expose a constructor that takes the parameter types{@code (MapperContext, - * EntityHelper, EntityHelper)}. - * - *

    All provided classes must be annotated with {@link Entity}. Otherwise, the mapper will issue - * a compile-time error. - */ - Class[] entityHelpers() default {}; -} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/SchemaHint.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/SchemaHint.java deleted file mode 100644 index d680798ba5a..00000000000 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/SchemaHint.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.mapper.annotations; - -import com.datastax.oss.driver.api.mapper.MapperBuilder; -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * Annotates an entity to indicate which type of schema element it is supposed to map to. This is - * only used to optimize {@linkplain MapperBuilder#withSchemaValidationEnabled(boolean) schema - * validation}, it has no impact on query execution. - * - *

    Example: - * - *

    - * @Entity
    - * @SchemaHint(targetElement = SchemaHint.TargetElement.TABLE)
    - * public class Product {
    - *   // fields of the entity
    - * }
    - * 
    - * - *

    By default, the mapper first tries to match the entity with a table, and if that doesn't work, - * with a UDT. This annotation allows you to provide a hint as to which check should be done, so - * that the mapper can skip the other one. - * - *

    In addition, you can ask to completely skip the validation for this entity by using {@link - * TargetElement#NONE}. - */ -@Target(ElementType.TYPE) -@Retention(RetentionPolicy.RUNTIME) -public @interface SchemaHint { - TargetElement targetElement(); - - enum TargetElement { - TABLE, - UDT, - NONE, - ; - } -} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Select.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Select.java deleted file mode 100644 index 46c7994809d..00000000000 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Select.java +++ /dev/null @@ -1,206 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.mapper.annotations; - -import com.datastax.dse.driver.api.mapper.reactive.MappedReactiveResultSet; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.MappedAsyncPagingIterable; -import com.datastax.oss.driver.api.core.PagingIterable; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.session.SessionBuilder; -import com.datastax.oss.driver.api.mapper.result.MapperResultProducer; -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; -import java.util.Optional; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.function.Function; -import java.util.function.UnaryOperator; - -/** - * Annotates a {@link Dao} method that selects one or more rows, and maps them to instances of an - * {@link Entity}-annotated class. - * - *

    Example: - * - *

    - * @Dao
    - * public interface ProductDao {
    - *   @Select
    - *   Product findById(UUID productId);
    - * }
    - * 
    - * - *

    Parameters

    - * - * If {@link #customWhereClause()} is empty, the mapper defaults to a selection by primary key - * (partition key + clustering columns). The method's parameters must match the types of the primary - * key columns, in the exact order (which is defined by the integer values of the {@link - * PartitionKey} and {@link ClusteringColumn} annotations in the entity class). The parameter names - * don't necessarily need to match the names of the columns. It is also possible for the method to - * only take a partial primary key (the first n columns), in which case it will return - * multiple entities. - * - *

    If {@link #customWhereClause()} is not empty, it completely replaces the WHERE clause. The - * provided string can contain named placeholders. In that case, the method must have a - * corresponding parameter for each, with the same name and a compatible Java type: - * - *

    - * @Select(customWhereClause = "description LIKE :searchString")
    - * PagingIterable<Product> findByDescription(String searchString);
    - * 
    - * - * The generated SELECT query can be further customized with {@link #limit()}, {@link - * #perPartitionLimit()}, {@link #orderBy()}, {@link #groupBy()} and {@link #allowFiltering()}. Some - * of these clauses can also contain placeholders whose values will be provided through additional - * method parameters. Note that it is sometimes not possible to determine if a parameter is a - * primary key component or a placeholder value; therefore the rule is that if your method takes - * a partial primary key, the first parameter that is not a primary key component must be explicitly - * annotated with {@link CqlName}. For example if the primary key is {@code ((day int, hour int, - * minute int), ts timestamp)}: - * - *
    - * // Annotate 'l' so that it's not mistaken for the second PK component
    - * @Select(limit = ":l")
    - * PagingIterable<Sale> findDailySales(int day, @CqlName("l") int l);
    - * 
    - * - *

    A {@link Function Function<BoundStatementBuilder, BoundStatementBuilder>} or {@link - * UnaryOperator UnaryOperator<BoundStatementBuilder>} can be added as the last - * parameter. It will be applied to the statement before execution. This allows you to customize - * certain aspects of the request (page size, timeout, etc) at runtime. - * - *

    Return type

    - * - *

    In all cases, the method can return: - * - *

      - *
    • the entity class itself. If the query returns no rows, the method will return {@code null}. - * If it returns more than one row, subsequent rows will be discarded. - *
      - * @Select
      - * Product findById(UUID productId);
      - *       
      - *
    • an {@link Optional} of the entity class. If the query returns no rows, the method will - * return {@code Optional.empty()}. If it returns more than one row, subsequent rows will be - * discarded. - *
      - * @Select
      - * Optional<Product> findById(UUID productId);
      - *       
      - *
    • a {@link PagingIterable} of the entity class. It behaves like a result set, except that - * each element is a mapped entity instead of a row. - *
      - * @Select(customWhereClause = "description LIKE :searchString")
      - * PagingIterable<Product> findByDescription(String searchString);
      - *       
      - *
    • a {@link CompletionStage} or {@link CompletableFuture} of any of the above. The method will - * execute the query asynchronously. Note that for iterables, you need to switch to the - * asynchronous equivalent {@link MappedAsyncPagingIterable}. - *
      - * @Select
      - * CompletionStage<Product> findByIdAsync(UUID productId);
      - *
      - * @Select
      - * CompletionStage<Optional<Product>> findByIdAsync(UUID productId);
      - *
      - * @Select(customWhereClause = "description LIKE :searchString")
      - * CompletionStage<MappedAsyncPagingIterable<Product>> findByDescriptionAsync(String searchString);
      - *       
      - *
    • a {@link MappedReactiveResultSet} of the entity class. - *
      - * @Select(customWhereClause = "description LIKE :searchString")
      - * MappedReactiveResultSet<Product> findByDescriptionReactive(String searchString);
      - *       
      - *
    • a {@linkplain MapperResultProducer custom type}. - *
    - * - *

    Target keyspace and table

    - * - * If a keyspace was specified when creating the DAO (see {@link DaoFactory}), then the generated - * query targets that keyspace. Otherwise, it doesn't specify a keyspace, and will only work if the - * mapper was built from a {@link Session} that has a {@linkplain - * SessionBuilder#withKeyspace(CqlIdentifier) default keyspace} set. - * - *

    If a table was specified when creating the DAO, then the generated query targets that table. - * Otherwise, it uses the default table name for the entity (which is determined by the name of the - * entity class and the naming convention). - */ -@Target(ElementType.METHOD) -@Retention(RetentionPolicy.RUNTIME) -public @interface Select { - - /** - * A custom WHERE clause for the SELECT query. - * - *

    If this is not empty, it completely replaces the WHERE clause in the generated query. Note - * that the provided string must not contain the {@code WHERE} keyword. - * - *

    This clause can contain placeholders that will be bound with the method's parameters; see - * the top-level javadocs of this class for more explanations. - */ - String customWhereClause() default ""; - - /** - * The LIMIT to use in the SELECT query. - * - *

    If this starts with ":", it is interpreted as a named placeholder (that must have a - * corresponding parameter in the method signature). Otherwise, it must be a literal integer - * value. - * - *

    If the placeholder name is invalid or the literal can't be parsed as an integer (according - * to the rules of {@link Integer#parseInt(String)}), the mapper will issue a compile-time - * warning. - */ - String limit() default ""; - - /** - * The PER PARTITION LIMIT to use in the SELECT query. - * - *

    If this starts with ":", it is interpreted as a named placeholder (that must have a - * corresponding parameter in the method signature). Otherwise, it must be a literal integer - * value. - * - *

    If the placeholder name is invalid or the literal can't be parsed as an integer (according - * to the rules of {@link Integer#parseInt(String)}), the mapper will issue a compile-time - * warning. - */ - String perPartitionLimit() default ""; - - /** - * A list of orderings to add to an ORDER BY clause in the SELECT query. - * - *

    Each element must be a column name followed by a space and the word "ASC" or "DESC". If - * there are multiple columns, pass an array: - * - *

    -   * @Select(orderBy = {"hour DESC", "minute DESC"})
    -   * 
    - * - *

    If an element can't be parsed, the mapper will issue a compile-time error. - */ - String[] orderBy() default {}; - - /** A list of column names to be added to a GROUP BY clause in the SELECT query. */ - String[] groupBy() default {}; - - /** Whether to add an ALLOW FILTERING clause to the SELECT query. */ - boolean allowFiltering() default false; -} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/SetEntity.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/SetEntity.java deleted file mode 100644 index cc1cb9b7e88..00000000000 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/SetEntity.java +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.mapper.annotations; - -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.BoundStatementBuilder; -import com.datastax.oss.driver.api.core.data.SettableByName; -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * Annotates a {@link Dao} method that fills a core driver data structure from an instance of an - * {@link Entity} class. - * - *

    Example: - * - *

    - * public interface ProductDao {
    - *   @SetEntity
    - *   BoundStatement bind(Product product, BoundStatement boundStatement);
    - * }
    - * 
    - * - * The generated code will set each entity property on the target, such as: - * - *
    - * boundStatement = boundStatement.set("id", product.getId(), UUID.class);
    - * boundStatement = boundStatement.set("description", product.getDescription(), String.class);
    - * ...
    - * 
    - * - * It does not perform a query. Instead, those methods are intended for cases where you will execute - * the query yourself, and just need the conversion logic. - * - *

    Parameters

    - * - * The method must have two parameters: one is the entity instance, the other must be a subtype of - * {@link SettableByName} (the most likely candidates are {@link BoundStatement}, {@link - * BoundStatementBuilder} and {@link UdtValue}). Note that you can't use {@link SettableByName} - * itself. - * - *

    The order of the parameters does not matter. - * - *

    Return type

    - * - * The method can either be void, or return the exact same type as its settable parameter. - * - *
    - * @SetEntity
    - * void bind(Product product, UdtValue udtValue);
    - *
    - * @SetEntity
    - * void bind(Product product, BoundStatementBuilder builder);
    - * 
    - * - * Note that if the settable parameter is immutable, the method should return a new instance, - * because the generated code won't be able to modify the argument in place. This is the case for - * {@link BoundStatement}, which is immutable in the driver: - * - *
    - * // Wrong: statement won't be modified
    - * @SetEntity
    - * void bind(Product product, BoundStatement statement);
    - *
    - * // Do this instead:
    - * @SetEntity
    - * BoundStatement bind(Product product, BoundStatement statement);
    - * 
    - * - * If you use a void method with {@link BoundStatement}, the mapper processor will issue a - * compile-time warning. - */ -@Target(ElementType.METHOD) -@Retention(RetentionPolicy.RUNTIME) -public @interface SetEntity { - - /** - * How to handle null entity properties. - * - *

    This defaults either to the {@link DefaultNullSavingStrategy DAO-level strategy} (if set), - * or {@link NullSavingStrategy#DO_NOT_SET}. - */ - NullSavingStrategy nullSavingStrategy() default NullSavingStrategy.DO_NOT_SET; - - /** - * Whether to tolerate missing columns in the target data structure. - * - *

    If {@code false} (the default), then the target must contain a matching column for every - * property in the entity definition, except computed ones. If such a column is not - * found, an {@link IllegalArgumentException} will be thrown. - * - *

    If {@code true}, the mapper will operate on a best-effort basis and attempt to write all - * entity properties that have a matching column in the target, leaving unmatched properties - * untouched. Beware that this may result in a partially-populated target. - */ - boolean lenient() default false; -} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/StatementAttributes.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/StatementAttributes.java deleted file mode 100644 index 56f32432ea8..00000000000 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/StatementAttributes.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.mapper.annotations; - -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.cql.Statement; -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; -import java.time.Duration; - -/** - * A set of compile time parameters to use for mapped queries (this can be used for methods - * annotated with {@link Delete}, {@link Insert}, {@link Query}, {@link Select} or {@link Update}). - * - *

    If you decorate a DAO method with this, it will use those values when constructing the bound - * statement. Note that the method can also take a function argument to modify the statement at - * runtime; in that case, the values from this annotation will be applied first, and the function - * second. - */ -@Target(ElementType.METHOD) -@Retention(RetentionPolicy.RUNTIME) -public @interface StatementAttributes { - /** - * The name of the execution profile to use. - * - * @see Statement#setExecutionProfileName(String) - */ - String executionProfileName() default ""; - - /** - * The page size to use. - * - *

    If unset, the mapper won't set any value on the statement (letting it default to the value - * defined in the configuration). - * - * @see Statement#setPageSize(int) - */ - int pageSize() default Integer.MIN_VALUE; - - /** - * Whether the request is idempotent; that is, whether applying the request twice leaves the - * database in the same state. - * - *

    If unset, the mapper won't set any value on the statement (letting it default to the value * - * defined in the configuration). - * - *

    Note that this attribute is an array only to allow an empty default; only the first element - * will be considered. - * - * @see Statement#setIdempotent(Boolean) - */ - boolean[] idempotence() default {}; - - /** - * The consistency level to use for the statement. - * - *

    If unset, the mapper won't set any value on the statement (letting it default to the value - * defined in the configuration). - * - * @see Statement#setConsistencyLevel(ConsistencyLevel) - */ - String consistencyLevel() default ""; - - /** - * The serial consistency level to use for the statement. - * - *

    If unset, the mapper won't set any value on the statement (letting it default to the value - * defined in the configuration). - * - * @see Statement#setSerialConsistencyLevel(ConsistencyLevel) - */ - String serialConsistencyLevel() default ""; - - /** - * How long to wait for this request to complete. - * - *

    This expects a string in the format accepted by {@link Duration#parse(CharSequence)}. - * - *

    If unset, the mapper won't set any value on the statement (letting it default to the value - * defined in the configuration). - * - * @see Statement#setTimeout(Duration) - */ - String timeout() default ""; - - /** - * The keyspace to use for token-aware routing. - * - *

    If unset, the mapper won't set any value on the statement. - * - * @see Statement#setRoutingKeyspace(String) - */ - String routingKeyspace() default ""; -} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Transient.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Transient.java deleted file mode 100644 index 1db111ccfd2..00000000000 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Transient.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.mapper.annotations; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * Annotates the field or getter of an {@link Entity} property, to indicate that it will not be - * mapped to any column (neither during reads nor writes). - * - *

    Example: - * - *

    - * @Transient private int notAColumn;
    - * 
    - * - * This information is used by the mapper processor to exclude the property from generated queries. - * - *

    Please note that {@code Transient} takes precedence over {@link PartitionKey} and {@link - * ClusteringColumn} annotations. - */ -@Target({ElementType.FIELD, ElementType.METHOD}) -@Retention(RetentionPolicy.RUNTIME) -public @interface Transient {} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/TransientProperties.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/TransientProperties.java deleted file mode 100644 index 97b8c5c99a2..00000000000 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/TransientProperties.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.mapper.annotations; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * Annotates an {@link Entity} to indicate which properties should be considered 'transient', - * meaning that they should not be mapped to any column (neither during reads nor writes). - * - *

    Example: - * - *

    - * @TransientProperties({"notAColumn", "x"})
    - * @Entity
    - * public class Product {
    - *   @PartitionKey private UUID id;
    - *   private String description;
    - *   // these columns are not included because their names are specified in @TransientProperties
    - *   private int notAColumn;
    - *   private int x;
    - *   ...
    - * }
    - * 
    - * - *

    This annotation is an alternative to using the {@link Transient} annotation on a field or - * getter. It is useful in cases where this annotation may be specified on a parent class where - * implementing classes will share a common configuration without needing to explicitly annotate - * each property with a {@link Transient} annotation. - */ -@Target({ElementType.TYPE}) -@Retention(RetentionPolicy.RUNTIME) -public @interface TransientProperties { - - /** Specifies a list of property names that should be considered transient. */ - String[] value() default {}; -} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Update.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Update.java deleted file mode 100644 index 02930d73aa4..00000000000 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Update.java +++ /dev/null @@ -1,218 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.mapper.annotations; - -import com.datastax.dse.driver.api.core.cql.reactive.ReactiveResultSet; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.session.SessionBuilder; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import com.datastax.oss.driver.api.mapper.result.MapperResultProducer; -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.function.Function; -import java.util.function.UnaryOperator; - -/** - * Annotates a {@link Dao} method that updates one or more instances of an {@link Entity}-annotated - * class. - * - *

    Example: - * - *

    - * @Dao
    - * public interface ProductDao {
    - *   @Update
    - *   void update(Product product);
    - * }
    - * 
    - * - *

    Parameters

    - * - *

    The first parameter must be an entity instance. All of its non-PK properties will be - * interpreted as values to update. - * - *

      - *
    • If {@link #customWhereClause()} is empty, the mapper defaults to an update by primary key - * (partition key + clustering columns). The WHERE clause is generated automatically, and - * bound with the PK components of the provided entity instance. The query will update at most - * one row. - *
    • If {@link #customWhereClause()} is not empty, it completely replaces the WHERE clause. If - * the provided string contains placeholders, the method must have corresponding additional - * parameters (same name, and a compatible Java type): - *
      - * @Update(customWhereClause = "description LIKE :searchString")
      - * void updateIfDescriptionMatches(Product product, String searchString);
      - *       
      - * The PK components of the provided entity are ignored. Multiple rows may be updated. - *
    - * - *

    If the query has a {@linkplain #ttl() TTL} or {@linkplain #timestamp() timestamp} with - * placeholders, the method must have corresponding additional parameters (same name, and a - * compatible Java type): - * - *

    - * @Update(ttl = ":ttl")
    - * void updateWithTtl(Product product, int ttl);
    - * 
    - * - *
    - * @Update(timestamp = ":timestamp")
    - * void updateWithTimestamp(Product product, long timestamp);
    - * 
    - * - *

    A {@link Function Function<BoundStatementBuilder, BoundStatementBuilder>} or {@link - * UnaryOperator UnaryOperator<BoundStatementBuilder>} can be added as the last - * parameter. It will be applied to the statement before execution. This allows you to customize - * certain aspects of the request (page size, timeout, etc) at runtime. - * - *

    Return type

    - * - *

    The method can return: - * - *

      - *
    • {@code void}. - *
    • a {@code boolean} or {@link Boolean}, which will be mapped to {@link - * ResultSet#wasApplied()}. This is intended for conditional queries. - *
      - * @Update(ifExists = true)
      - * boolean updateIfExists(Product product);
      - *       
      - *
    • a {@link ResultSet}. The method will return the raw query result, without any conversion. - * This is intended for queries with custom IF clauses; when those queries are not applied, - * they return the actual values of the tested columns. - *
      - * @Update(customIfClause = "description = :expectedDescription")
      - * ResultSet updateIfDescriptionMatches(Product product, String expectedDescription);
      - * // if the condition fails, the result set will contain columns '[applied]' and 'description'
      - *       
      - *
    • a {@link BoundStatement}. This is intended for queries where you will execute this - * statement later or in a batch: - *
      - * @Update
      - * BoundStatement update(Product product);
      - *      
      - *
    • a {@link CompletionStage} or {@link CompletableFuture} of any of the above. The mapper will - * execute the query asynchronously. Note that for result sets, you need to switch to the - * asynchronous equivalent {@link AsyncResultSet}. - *
      - * @Update
      - * CompletionStage<Void> update(Product product);
      - *
      - * @Update(ifExists = true)
      - * CompletableFuture<Boolean> updateIfExists(Product product);
      - *
      - * @Update(customIfClause = "description = :expectedDescription")
      - * CompletableFuture<AsyncResultSet> updateIfDescriptionMatches(Product product, String expectedDescription);
      - *       
      - *
    • a {@link ReactiveResultSet}. - *
      - * @Update
      - * ReactiveResultSet updateReactive(Product product);
      - *       
      - *
    • a {@linkplain MapperResultProducer custom type}. - *
    - * - *

    Target keyspace and table

    - * - *

    If a keyspace was specified when creating the DAO (see {@link DaoFactory}), then the generated - * query targets that keyspace. Otherwise, it doesn't specify a keyspace, and will only work if the - * mapper was built from a {@link Session} that has a {@linkplain - * SessionBuilder#withKeyspace(CqlIdentifier) default keyspace} set. - * - *

    If a table was specified when creating the DAO, then the generated query targets that table. - * Otherwise, it uses the default table name for the entity (which is determined by the name of the - * entity class and the naming convention). - */ -@Target(ElementType.METHOD) -@Retention(RetentionPolicy.RUNTIME) -public @interface Update { - - /** - * A custom WHERE clause for the UPDATE query. - * - *

    If this is not empty, it completely replaces the WHERE clause in the generated query. Note - * that the provided string must not contain the {@code WHERE} keyword. - * - *

    This clause can contain placeholders that will be bound with the method's parameters; see - * the top-level javadocs of this class for more explanations. - */ - String customWhereClause() default ""; - - /** - * Whether to append an IF EXISTS clause at the end of the generated UPDATE query. - * - *

    This is mutually exclusive with {@link #customIfClause()} (if both are set, the mapper - * processor will generate a compile-time error). - */ - boolean ifExists() default false; - - /** - * A custom IF clause for the UPDATE query. - * - *

    This is mutually exclusive with {@link #ifExists()} (if both are set, the mapper processor - * will generate a compile-time error). - * - *

    If this is not empty, it gets added to the generated query. Note that the provided string - * must not contain the {@code IF} keyword. - * - *

    This clause can contain placeholders that will be bound with the method's parameters; see - * the top-level javadocs of this class for more explanations. - */ - String customIfClause() default ""; - - /** - * The TTL (time to live) to use in the generated INSERT query. - * - *

    If this starts with ":", it is interpreted as a named placeholder (that must have a - * corresponding parameter in the method signature). Otherwise, it must be a literal integer value - * (representing a number of seconds). - * - *

    If the placeholder name is invalid or the literal can't be parsed as an integer (according - * to the rules of {@link Integer#parseInt(String)}), the mapper will issue a compile-time - * warning. - */ - String ttl() default ""; - - /** - * The timestamp to use in the generated INSERT query. - * - *

    If this starts with ":", it is interpreted as a named placeholder (that must have a - * corresponding parameter in the method signature). Otherwise, it must be literal long value - * (representing a number of microseconds since epoch). - * - *

    If the placeholder name is invalid or the literal can't be parsed as a long (according to - * the rules of {@link Long#parseLong(String)}), the mapper will issue a compile-time warning. - */ - String timestamp() default ""; - - /** - * How to handle null entity properties during the update. - * - *

    This defaults either to the {@link DefaultNullSavingStrategy DAO-level strategy} (if set), - * or {@link NullSavingStrategy#DO_NOT_SET}. - */ - NullSavingStrategy nullSavingStrategy() default NullSavingStrategy.DO_NOT_SET; -} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/entity/EntityHelper.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/entity/EntityHelper.java deleted file mode 100644 index 653b02c5d0c..00000000000 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/entity/EntityHelper.java +++ /dev/null @@ -1,312 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.mapper.entity; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.BoundStatementBuilder; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.data.GettableByName; -import com.datastax.oss.driver.api.core.data.SettableByName; -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.mapper.annotations.ClusteringColumn; -import com.datastax.oss.driver.api.mapper.annotations.CqlName; -import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; -import com.datastax.oss.driver.api.mapper.annotations.DaoTable; -import com.datastax.oss.driver.api.mapper.annotations.Entity; -import com.datastax.oss.driver.api.mapper.annotations.NamingStrategy; -import com.datastax.oss.driver.api.mapper.annotations.PartitionKey; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import com.datastax.oss.driver.api.querybuilder.delete.Delete; -import com.datastax.oss.driver.api.querybuilder.insert.RegularInsert; -import com.datastax.oss.driver.api.querybuilder.select.Select; -import com.datastax.oss.driver.api.querybuilder.update.Update; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** - * A set of utility methods related to a particular mapped entity. - * - *

    The mapper processor generates an implementation of this interface for each {@link - * Entity}-annotated class. It is used internally by other mapper components, and can also be - * injected in custom query providers. - */ -public interface EntityHelper { - - /** - * Sets the properties of an entity instance into a target data structure. - * - * @deprecated Use {@link #set(Object, SettableByName, NullSavingStrategy, boolean)} instead. - */ - @NonNull - @Deprecated - > SettableT set( - @NonNull EntityT entity, - @NonNull SettableT target, - @NonNull NullSavingStrategy nullSavingStrategy); - - /** - * Sets the properties of an entity instance into a target data structure. - * - *

    The generated code will attempt to write all entity properties in the target data structure. - * For example: - * - *

    {@code
    -   * target = target.set("id", entity.getId(), UUID.class);
    -   * target = target.set("name", entity.getName(), String.class);
    -   * ...
    -   * }
    - * - * The column names are inferred from the naming strategy for this entity. - * - *

    The target will typically be one of the built-in driver subtypes: {@link BoundStatement}, - * {@link BoundStatementBuilder} or {@link UdtValue}. Note that the default {@link BoundStatement} - * implementation is immutable, therefore this argument won't be modified in-place: you need to - * use the return value to get the resulting structure. - * - *

    If {@code lenient} is {@code true}, the mapper will operate on a best-effort basis and - * attempt to write all entity properties that have a matching column in the target, leaving - * unmatched properties untouched. Beware that this may result in a partially-populated target. - * - *

    If {@code lenient} is {@code false}, then the target must contain a matching column for - * every property in the entity definition, except computed ones. If such a column is not - * found, an {@link IllegalArgumentException} will be thrown. - * - * @param entity the entity that the values will be read from. - * @param target the data structure to fill. - * @param lenient whether to tolerate incomplete targets. - * @return the data structure resulting from the assignments. This is useful for immutable target - * implementations (see above), otherwise it will be the same as {@code target}. - * @throws IllegalArgumentException if lenient is false and the target does not contain matching - * columns for every entity property. - */ - @NonNull - default > SettableT set( - @NonNull EntityT entity, - @NonNull SettableT target, - @NonNull NullSavingStrategy nullSavingStrategy, - boolean lenient) { - return set(entity, target, nullSavingStrategy); - } - - /** - * Gets values from a data structure to fill an entity instance. - * - * @deprecated Use {@link #get(GettableByName, boolean)} instead. - */ - @NonNull - @Deprecated - EntityT get(@NonNull GettableByName source); - - /** - * Gets values from a data structure to fill an entity instance. - * - *

    The generated code will attempt to read all entity properties from the source data - * structure. For example: - * - *

    {@code
    -   * User returnValue = new User();
    -   * returnValue.setId(source.get("id", UUID.class));
    -   * returnValue.setName(source.get("name", String.class));
    -   * ...
    -   * }
    - * - * The column names are inferred from the naming strategy for this entity. - * - *

    The source will typically be one of the built-in driver subtypes: {@link Row} or {@link - * UdtValue} ({@link BoundStatement} and {@link BoundStatementBuilder} are also possible, although - * it's less likely that data would be read back from them in this manner). - * - *

    If {@code lenient} is {@code true}, the mapper will operate on a best-effort basis and - * attempt to read all entity properties that have a matching column in the source, leaving - * unmatched properties untouched. Beware that this may result in a partially-populated entity - * instance. - * - *

    If {@code lenient} is {@code false}, then the source must contain a matching column for - * every property in the entity definition, including computed ones. If such a column is - * not found, an {@link IllegalArgumentException} will be thrown. - * - * @param source the data structure to read from. - * @param lenient whether to tolerate incomplete sources. - * @return the resulting entity. - * @throws IllegalArgumentException if lenient is false and the source does not contain matching - * columns for every entity property. - */ - @NonNull - default EntityT get(@NonNull GettableByName source, boolean lenient) { - return get(source); - } - - /** - * Builds an insert query for this entity. - * - *

    The returned query is roughly the equivalent of: - * - *

    {@code
    -   * QueryBuilder.insertInto(keyspaceId, tableId)
    -   *     .value("id", QueryBuilder.bindMarker("id"))
    -   *     .value("name", QueryBuilder.bindMarker("name"))
    -   *     ...
    -   * }
    - * - * All mapped properties of the entity are included as bindable values (the bind markers have the - * same names as the columns). - * - *

    The column names are inferred from the naming strategy for this entity. - * - *

    The keyspace and table identifiers are those of the DAO that this helper was obtained from; - * if the DAO was built without a specific keyspace and table, the query doesn't specify a - * keyspace, and the table name is inferred from the naming strategy. - */ - @NonNull - RegularInsert insert(); - - /** - * Builds the beginning of a Update query to update an entity. - * - *

    This is the same as {@link #updateByPrimaryKey()} ()}, but without the {@code WHERE} clause. - * This would typically not be executed as-is, but instead completed with a custom {@code WHERE} - * clause (either added with the query builder DSL, or concatenated to the built query). - */ - @NonNull - Update updateStart(); - - /** - * Builds a Update query to update an instance of the entity by primary key (partition key + - * clustering columns). - * - *

    The returned query is roughly the equivalent of: - * - *

    {@code
    -   * QueryBuilder.update(keyspaceId, tableId)
    -   *     .setColumn("description", QueryBuilder.bindMarker("description"))
    -   *     ... // (other non-PK columns)
    -   *     .where(Relation.column("id").isEqualTo(QueryBuilder.bindMarker("id"))
    -   *     ... // (other PK columns)
    -   * }
    - * - * All non-PK properties of the entity are set, with bind markers that have the same names as the - * columns. - * - *

    All components of the primary key are listed in the {@code WHERE} clause as bindable values - * (the bind markers have the same names as the columns). They are listed in the natural order, - * i.e. partition key columns first, followed by clustering columns (in the order defined by the - * {@link PartitionKey} and {@link ClusteringColumn} annotations on the entity class). - * - *

    The keyspace and table identifiers are those of the DAO that this helper was obtained from; - * if the DAO was built without a specific keyspace and table, the query doesn't specify a - * keyspace, and the table name is inferred from the naming strategy. - */ - @NonNull - Update updateByPrimaryKey(); - - /** - * Builds a select query to fetch an instance of the entity by primary key (partition key + - * clustering columns). - * - *

    The returned query is roughly the equivalent of: - * - *

    {@code
    -   * QueryBuilder.selectFrom(keyspaceId, tableId)
    -   *     .column("id")
    -   *     .column("name")
    -   *     .whereColumn("id").isEqualTo(QueryBuilder.bindMarker("id"));
    -   *   ...
    -   * }
    - * - * All mapped properties of the entity are included in the result set. - * - *

    All components of the primary key are listed in the {@code WHERE} clause as bindable values - * (the bind markers have the same names as the columns). They are listed in the natural order, - * i.e. partition key columns first, followed by clustering columns (in the order defined by the - * {@link PartitionKey} and {@link ClusteringColumn} annotations on the entity class). - * - *

    The keyspace and table identifiers are those of the DAO that this helper was obtained from; - * if the DAO was built without a specific keyspace and table, the query doesn't specify a - * keyspace, and the table name is inferred from the naming strategy. - */ - @NonNull - Select selectByPrimaryKey(); - - /** - * Builds the beginning of a select query to fetch one or more instances of the entity. - * - *

    This is the same as {@link #selectByPrimaryKey()}, but without the {@code WHERE} clause. - * This would typically not be executed as-is, but instead completed with a custom {@code WHERE} - * clause (either added with the query builder DSL, or concatenated to the built query). - */ - @NonNull - Select selectStart(); - - /** - * Builds a delete query to delete an instance of the entity by primary key (partition key + - * clustering columns). - * - *

    The returned query is roughly the equivalent of: - * - *

    {@code
    -   * Delete delete = QueryBuilder.deleteFrom(keyspaceId, tableId)
    -   *     .whereColumn("id").isEqualTo(QueryBuilder.bindMarker("id"));
    -   * }
    - * - * All components of the primary key are listed in the {@code WHERE} clause as bindable values - * (the bind markers have the same names as the columns). They are listed in the natural order, - * i.e. partition key columns first, followed by clustering columns (in the order defined by the - * {@link PartitionKey} and {@link ClusteringColumn} annotations on the entity class). - * - *

    The keyspace and table identifiers are those of the DAO that this helper was obtained from; - * * if the DAO was built without a specific keyspace and table, the query doesn't specify a - * keyspace, and the table name is inferred from the naming strategy. - */ - @NonNull - Delete deleteByPrimaryKey(); - - /** - * The keyspace used in the queries generated by this helper. - * - *

    This is determined by the following rules: - * - *

      - *
    • If the DAO that this helper belongs to was created with a keyspace (see {@link - * DaoKeyspace}), use that; - *
    • Otherwise, if {@link Entity#defaultKeyspace()} is set for the entity class, use that; - *
    • Otherwise, return {@code null}. - *
    - */ - @Nullable - CqlIdentifier getKeyspaceId(); - - /** - * The table used in the queries generated by this helper. - * - *

    This is determined by the following rules: - * - *

      - *
    • If the DAO that this helper belongs to was created with a table (see {@link DaoTable}), - * use that; - *
    • Otherwise, if the entity class is annotated with {@link CqlName}, use that; - *
    • Otherwise, use the name of the entity class, transformed by its {@link NamingStrategy}. - *
    - */ - @NonNull - CqlIdentifier getTableId(); - - /** The class of the mapped entity. */ - @NonNull - Class getEntityClass(); -} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/entity/naming/GetterStyle.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/entity/naming/GetterStyle.java deleted file mode 100644 index 21e4755f4dd..00000000000 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/entity/naming/GetterStyle.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.mapper.entity.naming; - -import com.datastax.oss.driver.api.mapper.annotations.PropertyStrategy; - -/** - * The style of getter that the mapper will look for when introspecting an entity class. - * - *

    Note that introspection always starts by looking for getters first: no-arg, non-void methods - * that follow the configured style. Then the mapper will try to find a matching field (which is not - * required), and, if the entity is mutable, a setter. - * - * @see PropertyStrategy - */ -public enum GetterStyle { - - /** - * "JavaBeans" style: the method name must start with "get", or "is" for boolean properties. The - * name of the property is the getter name without a prefix, and decapitalized, for example {@code - * int getFoo() => foo}. - */ - JAVABEANS, - - /** - * "Fluent" style: any name will match (as long as the no-arg, not-void rule also holds), and is - * considered to be the property name without any prefix. For example {@code int foo() => foo}. - * - *

    Note that this is the convention used in compiled Scala case classes. Whenever the mapper - * processes a type that implements {@code scala.Product}, it will switch to this style by - * default. - */ - FLUENT, - ; -} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/entity/naming/NameConverter.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/entity/naming/NameConverter.java deleted file mode 100644 index ac9d05895b9..00000000000 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/entity/naming/NameConverter.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.mapper.entity.naming; - -import com.datastax.oss.driver.api.mapper.annotations.Entity; -import com.datastax.oss.driver.api.mapper.annotations.NamingStrategy; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * A custom converter to infer CQL column names from the names used in an {@link Entity}-annotated - * class. - * - * @see NamingStrategy - */ -public interface NameConverter { - - /** - * Convert the given Java name into a CQL name. - * - *

    Note that this will be invoked by the generated code each time the name is - * referenced. If the conversion is expensive, implementors might consider an internal cache. - * - * @param javaName the name to convert. Note that if it is capitalized (e.g. {@code Product}), it - * is a class name, to be converted into a table name; otherwise (e.g. {@code productId}), it - * is a property name, to be converted into a column name. - * @return the corresponding CQL name. If you want it to be case-sensitive, it must be enclosed in - * double-quotes. - */ - @NonNull - String toCassandraName(@NonNull String javaName); -} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/entity/naming/NamingConvention.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/entity/naming/NamingConvention.java deleted file mode 100644 index 8846e4f6fcb..00000000000 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/entity/naming/NamingConvention.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.mapper.entity.naming; - -import com.datastax.oss.driver.api.mapper.annotations.Entity; -import com.datastax.oss.driver.api.mapper.annotations.NamingStrategy; - -/** - * A built-in convention to infer CQL column names from the names used in an {@link - * Entity}-annotated class. - * - * @see NamingStrategy - */ -public enum NamingConvention { - - /** - * Uses the Java name as-is, as a case-insensitive CQL name, for example {@code Product - * => Product, productId => productId}. - * - *

    In practice this is the same as lower-casing everything ({@code product, productid}), but it - * makes generated queries a bit easier to read. - */ - CASE_INSENSITIVE, - - /** - * Uses the Java name as-is, as a case-sensitive CQL name, for example {@code Product => - * "Product", productId => "productId"}. - * - *

    Use this if your schema uses camel case and you want to preserve capitalization in table - * names. - */ - EXACT_CASE, - - /** - * Divide the Java name into words, splitting on upper-case characters; capitalize every word - * except the first; then concatenate the words and make the result a case-sensitive CQL - * name, for example {@code Product => "product", productId => "productId"}. - */ - LOWER_CAMEL_CASE, - - /** - * Divide the Java name into words, splitting on upper-case characters; capitalize every word; - * then concatenate the words and make the result a case-sensitive CQL name, for example - * {@code Product => "Product", productId => "ProductId"}. - */ - UPPER_CAMEL_CASE, - - /** - * Divide the Java name into words, splitting on upper-case characters; lower-case everything; - * then concatenate the words with underscore separators, and make the result a - * case-insensitive CQL name, for example {@code Product => product, productId => - * product_id}. - */ - SNAKE_CASE_INSENSITIVE, - - /** - * Divide the Java name into words, splitting on upper-case characters; upper-case everything; - * then concatenate the words with underscore separators, and make the result a - * case-sensitive CQL name, for example {@code Product => "PRODUCT", productId => - * "PRODUCT_ID"}. - */ - UPPER_SNAKE_CASE, - - /** - * Upper-case everything, and make the result a case-sensitive CQL name, for example - * {@code Product => "PRODUCT", productId => "PRODUCTID"}. - */ - UPPER_CASE, -} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/entity/naming/SetterStyle.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/entity/naming/SetterStyle.java deleted file mode 100644 index 26e301c5f76..00000000000 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/entity/naming/SetterStyle.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.mapper.entity.naming; - -import com.datastax.oss.driver.api.mapper.annotations.PropertyStrategy; - -/** - * The style of setter that the mapper will look for when introspecting a mutable entity class. - * - *

    Note that introspection always starts by looking for getters first (see {@link GetterStyle}). - * Once a getter has been found, and if the entity is declared as {@link PropertyStrategy#mutable() - * mutable}, the mapper will try to find a matching setter: name inferred as described below, - * exactly one argument matching the property type, and the return type does not matter. - * - * @see PropertyStrategy - */ -public enum SetterStyle { - - /** - * "JavaBeans" style: the method name must start with "set", for example {@code int foo => - * setFoo(int)}. - */ - JAVABEANS, - - /** - * "Fluent" style: the method name must be the name of the property, without any prefix, for - * example {@code int foo => foo(int)}. - */ - FLUENT, - ; -} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/entity/saving/NullSavingStrategy.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/entity/saving/NullSavingStrategy.java deleted file mode 100644 index f2233e5721c..00000000000 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/entity/saving/NullSavingStrategy.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.mapper.entity.saving; - -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.mapper.MapperException; -import com.datastax.oss.driver.api.mapper.annotations.Entity; - -/** Defines how null {@link Entity} properties will be handled during data insertion. */ -public enum NullSavingStrategy { - - /** - * Do not insert null properties. - * - *

    In other words, the mapper won't call the corresponding setter on the {@link - * BoundStatement}. The generated code looks approximately like this: - * - *

    -   * if (entity.getDescription() != null) {
    -   *   boundStatement = boundStatement.setString("description", entity.getDescription());
    -   * }
    -   * 
    - * - * This avoids inserting tombstones for null properties. On the other hand, if the query is an - * update and the column previously had another value, it won't be overwritten. - * - *

    Note that unset values are only supported with {@link DefaultProtocolVersion#V4 native - * protocol v4} (Cassandra 2.2) or above. If you try to use this strategy with a lower Cassandra - * version, the mapper will throw a {@link MapperException} when you try to build the - * corresponding DAO. - * - * @see CASSANDRA-7304 - */ - DO_NOT_SET, - - /** - * Insert null properties as a CQL {@code NULL}. - * - *

    In other words, the mapper will always call the corresponding setter on the {@link - * BoundStatement}. The generated code looks approximately like this: - * - *

    -   * // Called even if entity.getDescription() == null
    -   * boundStatement = boundStatement.setString("description", entity.getDescription());
    -   * 
    - */ - SET_TO_NULL -} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/result/MapperResultProducer.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/result/MapperResultProducer.java deleted file mode 100644 index d262986b75f..00000000000 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/result/MapperResultProducer.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.mapper.result; - -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.data.GettableByName; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.api.mapper.MapperContext; -import com.datastax.oss.driver.api.mapper.entity.EntityHelper; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.concurrent.CompletionStage; - -/** - * A component that can be plugged into the object mapper, in order to return custom result types - * from DAO methods. - * - *

    For example, this could be used to substitute a 3rd-party future implementation for {@link - * CompletionStage}: - * - *

    - * public class CustomFutureProducer implements MapperResultProducer {
    - *   ...
    - * }
    - * 
    - * - *

    Producers are registered via the Java Service Provider mechanism (see {@link - * MapperResultProducerService}). DAO methods can then use the new type: - * - *

    - * @Dao
    - * public interface ProductDao {
    - *   @Select
    - *   CustomFuture<Product> findById(UUID productId);
    - * }
    - * 
    - * - * See the javadocs of the methods in this interface for more explanations. - */ -public interface MapperResultProducer { - - /** - * Checks if this producer can handle a particular result type. - * - *

    This will be invoked at runtime to select a producer: if a DAO method declares a return type - * that is not supported natively, then the mapper generates an implementation which, for every - * invocation, iterates through all the producers in the order that they were registered, - * and picks the first one where {@code canProduce()} returns true. - * - * @param resultType the DAO method's declared return type. If checking the top-level type is - * sufficient, then {@link GenericType#getRawType()} should do the trick. If you need to - * recurse into the type arguments, call {@link GenericType#getType()} and use the {@code - * java.lang.reflect} APIs. - */ - boolean canProduce(@NonNull GenericType resultType); - - /** - * Executes the statement generated by the mapper, and converts the result to the expected type. - * - *

    This will be executed at runtime, every time the DAO method is called. - * - * @param statement the statement, ready to execute: the mapper has already bound all the values, - * and set all the necessary attributes (consistency, page size, etc). - * @param context the context in which the DAO method is executed. In particular, this is how you - * get access to the {@linkplain MapperContext#getSession() session}. - * @param entityHelper if the type to produce contains a mapped entity (e.g. {@code - * ListenableFuture}), an instance of the helper class to manipulate that entity. In - * particular, {@link EntityHelper#get(GettableByName) entityHelper.get()} allows you to - * convert rows into entity instances. If the type to produce does not contain an entity, this - * will be {@code null}. - * @return the object to return from the DAO method. This must match the type that this producer - * was selected for, there will be an unchecked cast at runtime. - */ - @Nullable - Object execute( - @NonNull Statement statement, - @NonNull MapperContext context, - @Nullable EntityHelper entityHelper); - - /** - * Surfaces any error encountered in the DAO method (either in the generated mapper code that - * builds the statement, or during invocation of {@link #execute}). - * - *

    For some result types, it is expected that errors will be wrapped in some sort of container - * instead of thrown directly; for example a failed future or publisher. - * - *

    If rethrowing is the right thing to do, then it is perfectly fine to do so from this method. - * If you throw checked exceptions, they will be propagated directly if the DAO method also - * declares them, or wrapped into a {@link RuntimeException} otherwise. - */ - @Nullable - Object wrapError(@NonNull Exception e) throws Exception; -} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/result/MapperResultProducerService.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/result/MapperResultProducerService.java deleted file mode 100644 index b8afdba53b1..00000000000 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/result/MapperResultProducerService.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.mapper.result; - -/** - * Provides the custom mapper result types that will be used in an application. - * - *

    This class is loaded with the Java Service Provider Interface mechanism, you must reference it - * via a service descriptor: create a file {@code - * META-INF/services/com.datastax.oss.driver.api.mapper.result.MapperResultProducerService}, with - * one or more lines, each referencing the name of an implementing class. - */ -public interface MapperResultProducerService { - - /** - * Returns the producers provided by this service. - * - *

    Note that order matters, the producers will be tried from left to right until one matches. - * If there is some overlap between your producers' {@link MapperResultProducer#canProduce - * canProduce()} implementations, put the most specific ones first. - */ - Iterable getProducers(); -} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/internal/mapper/DaoBase.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/internal/mapper/DaoBase.java deleted file mode 100644 index 5f617de52e1..00000000000 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/internal/mapper/DaoBase.java +++ /dev/null @@ -1,317 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper; - -import com.datastax.oss.driver.api.core.ConsistencyLevel; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.MappedAsyncPagingIterable; -import com.datastax.oss.driver.api.core.PagingIterable; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.BoundStatementBuilder; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.mapper.MapperContext; -import com.datastax.oss.driver.api.mapper.MapperException; -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.Query; -import com.datastax.oss.driver.api.mapper.entity.EntityHelper; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import com.datastax.oss.driver.internal.core.ConsistencyLevelRegistry; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.cql.ResultSets; -import com.datastax.oss.protocol.internal.ProtocolConstants; -import java.time.Duration; -import java.util.Optional; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.stream.Stream; -import java.util.stream.StreamSupport; - -/** Base class for generated implementations of {@link Dao}-annotated interfaces. */ -public class DaoBase { - - /** The keyspace id placeholder in {@link Query#value()}. */ - public static final String KEYSPACE_ID_PLACEHOLDER = "${keyspaceId}"; - - /** The table id placeholder in {@link Query#value()}. */ - public static final String TABLE_ID_PLACEHOLDER = "${tableId}"; - - /** The qualified table id placeholder in {@link Query#value()}. */ - public static final String QUALIFIED_TABLE_ID_PLACEHOLDER = "${qualifiedTableId}"; - - private static final CqlIdentifier APPLIED = CqlIdentifier.fromInternal("[applied]"); - - protected static CompletionStage prepare( - SimpleStatement statement, MapperContext context) { - if (context.getExecutionProfileName() != null) { - statement = statement.setExecutionProfileName(context.getExecutionProfileName()); - } else if (context.getExecutionProfile() != null) { - statement = statement.setExecutionProfile(context.getExecutionProfile()); - } - return context.getSession().prepareAsync(statement); - } - - /** - * Replaces {@link #KEYSPACE_ID_PLACEHOLDER}, {@link #TABLE_ID_PLACEHOLDER} and/or {@link - * #QUALIFIED_TABLE_ID_PLACEHOLDER} in a query string, and turns it into a statement. - * - *

    This is used for {@link Query} methods. - * - * @param queryStringTemplate the query string to process. - * @param context the context that contains the keyspace and table that the DAO was created with - * (if any). - * @param entityHelper the helper the entity that is returned from this query, or {@code null} if - * the query does not return entities. - */ - protected static SimpleStatement replaceKeyspaceAndTablePlaceholders( - String queryStringTemplate, MapperContext context, EntityHelper entityHelper) { - - CqlIdentifier keyspaceId = - (entityHelper != null) ? entityHelper.getKeyspaceId() : context.getKeyspaceId(); - CqlIdentifier tableId = - (entityHelper != null) ? entityHelper.getTableId() : context.getTableId(); - - String queryString = queryStringTemplate; - if (queryString.contains(KEYSPACE_ID_PLACEHOLDER)) { - if (keyspaceId == null) { - throw new MapperException( - String.format( - "Cannot substitute %s in query '%s': the DAO wasn't built with a keyspace%s", - KEYSPACE_ID_PLACEHOLDER, - queryStringTemplate, - (entityHelper == null) - ? "" - : " and entity " - + entityHelper.getEntityClass().getSimpleName() - + " does not define a default keyspace")); - } else { - queryString = queryString.replace(KEYSPACE_ID_PLACEHOLDER, keyspaceId.asCql(false)); - } - } - - if (queryString.contains(TABLE_ID_PLACEHOLDER)) { - if (tableId == null) { - throw new MapperException( - String.format( - "Cannot substitute %s in query '%s': the DAO wasn't built with a table", - TABLE_ID_PLACEHOLDER, queryStringTemplate)); - } else { - queryString = queryString.replace(TABLE_ID_PLACEHOLDER, tableId.asCql(false)); - } - } - - if (queryString.contains(QUALIFIED_TABLE_ID_PLACEHOLDER)) { - if (tableId == null) { - throw new MapperException( - String.format( - "Cannot substitute %s in query '%s': the DAO wasn't built with a table", - QUALIFIED_TABLE_ID_PLACEHOLDER, queryStringTemplate)); - } else { - String qualifiedId = - (keyspaceId == null) - ? tableId.asCql(false) - : keyspaceId.asCql(false) + '.' + tableId.asCql(false); - queryString = queryString.replace(QUALIFIED_TABLE_ID_PLACEHOLDER, qualifiedId); - } - } - - return SimpleStatement.newInstance(queryString); - } - - public BoundStatementBuilder populateBoundStatementWithStatementAttributes( - BoundStatementBuilder builder, - String profileName, - String consistencyLevel, - String serialConsistencyLevel, - Boolean idempotent, - int pageSize, - String timeout, - String keyspace) { - - if (!profileName.isEmpty()) { - builder = builder.setExecutionProfileName(profileName); - } - if (!consistencyLevel.isEmpty()) { - builder = builder.setConsistencyLevel(getConsistencyLevelFromName(consistencyLevel)); - } - if (!serialConsistencyLevel.isEmpty()) { - builder = - builder.setSerialConsistencyLevel(getConsistencyLevelFromName(serialConsistencyLevel)); - } - if (idempotent != null) { - builder = builder.setIdempotence(idempotent); - } - if (pageSize > 0) { - builder = builder.setPageSize(pageSize); - } - if (!timeout.isEmpty()) { - builder = builder.setTimeout(Duration.parse(timeout)); - } - if (!keyspace.isEmpty()) { - builder = builder.setRoutingKeyspace(keyspace); - } - return builder; - } - - private ConsistencyLevel getConsistencyLevelFromName(String name) { - InternalDriverContext idContext = (InternalDriverContext) context.getSession().getContext(); - ConsistencyLevelRegistry registry = idContext.getConsistencyLevelRegistry(); - return registry.codeToLevel(registry.nameToCode(name)); - } - - protected final MapperContext context; - protected final boolean isProtocolVersionV3; - - protected DaoBase(MapperContext context) { - this.context = context; - this.isProtocolVersionV3 = isProtocolVersionV3(context); - } - - protected ResultSet execute(Statement statement) { - return context.getSession().execute(statement); - } - - protected boolean executeAndMapWasAppliedToBoolean(Statement statement) { - ResultSet rs = execute(statement); - return rs.wasApplied(); - } - - protected long executeAndMapFirstColumnToLong(Statement statement) { - Row row = executeAndExtractFirstRow(statement); - return extractCount(row); - } - - private long extractCount(Row row) { - if (row == null) { - throw new MapperException( - "Expected the query to return at least one row " - + "(return type long is intended for COUNT queries)"); - } - if (row.getColumnDefinitions().size() == 0 - || !row.getColumnDefinitions().get(0).getType().equals(DataTypes.BIGINT)) { - throw new MapperException( - "Expected the query to return a column with CQL type BIGINT in first position " - + "(return type long is intended for COUNT queries)"); - } - return row.getLong(0); - } - - protected Row executeAndExtractFirstRow(Statement statement) { - return execute(statement).one(); - } - - protected EntityT executeAndMapToSingleEntity( - Statement statement, EntityHelper entityHelper) { - ResultSet rs = execute(statement); - return asEntity(rs.one(), entityHelper); - } - - private EntityT asEntity(Row row, EntityHelper entityHelper) { - return (row == null - // Special case for INSERT IF NOT EXISTS. If the row did not exist, the query returns - // only [applied], we want to return null to indicate there was no previous entity - || (row.getColumnDefinitions().size() == 1 - && row.getColumnDefinitions().get(0).getName().equals(APPLIED))) - ? null - : entityHelper.get(row, false); - } - - protected Optional executeAndMapToOptionalEntity( - Statement statement, EntityHelper entityHelper) { - return Optional.ofNullable(executeAndMapToSingleEntity(statement, entityHelper)); - } - - protected PagingIterable executeAndMapToEntityIterable( - Statement statement, EntityHelper entityHelper) { - return execute(statement).map(row -> entityHelper.get(row, false)); - } - - protected Stream executeAndMapToEntityStream( - Statement statement, EntityHelper entityHelper) { - return StreamSupport.stream( - execute(statement).map(row -> entityHelper.get(row, false)).spliterator(), false); - } - - protected CompletableFuture executeAsync(Statement statement) { - CompletionStage stage = context.getSession().executeAsync(statement); - // We allow DAO interfaces to return CompletableFuture instead of CompletionStage. This method - // returns CompletableFuture, which makes the implementation code a bit simpler to generate. - // In practice this has no performance impact, because the default implementation of - // toCompletableFuture in the JDK is `return this`. - return stage.toCompletableFuture(); - } - - protected CompletableFuture executeAsyncAndMapToVoid(Statement statement) { - return executeAsync(statement).thenApply(rs -> null); - } - - protected CompletableFuture executeAsyncAndMapWasAppliedToBoolean( - Statement statement) { - return executeAsync(statement).thenApply(AsyncResultSet::wasApplied); - } - - protected CompletableFuture executeAsyncAndMapFirstColumnToLong(Statement statement) { - return executeAsyncAndExtractFirstRow(statement).thenApply(this::extractCount); - } - - protected CompletableFuture executeAsyncAndExtractFirstRow(Statement statement) { - return executeAsync(statement).thenApply(AsyncResultSet::one); - } - - protected CompletableFuture executeAsyncAndMapToSingleEntity( - Statement statement, EntityHelper entityHelper) { - return executeAsync(statement).thenApply(rs -> asEntity(rs.one(), entityHelper)); - } - - protected CompletableFuture> executeAsyncAndMapToOptionalEntity( - Statement statement, EntityHelper entityHelper) { - return executeAsync(statement) - .thenApply(rs -> Optional.ofNullable(asEntity(rs.one(), entityHelper))); - } - - protected - CompletableFuture> executeAsyncAndMapToEntityIterable( - Statement statement, EntityHelper entityHelper) { - return executeAsync(statement).thenApply(rs -> rs.map(row -> entityHelper.get(row, false))); - } - - protected CompletableFuture> executeAsyncAndMapToEntityStream( - Statement statement, EntityHelper entityHelper) { - return executeAsync(statement) - .thenApply(ResultSets::newInstance) - .thenApply(rs -> StreamSupport.stream(rs.map(entityHelper::get).spliterator(), false)); - } - - protected static void throwIfProtocolVersionV3(MapperContext context) { - if (isProtocolVersionV3(context)) { - throw new MapperException( - String.format( - "You cannot use %s.%s for protocol version V3.", - NullSavingStrategy.class.getSimpleName(), NullSavingStrategy.DO_NOT_SET.name())); - } - } - - protected static boolean isProtocolVersionV3(MapperContext context) { - return context.getSession().getContext().getProtocolVersion().getCode() - <= ProtocolConstants.Version.V3; - } -} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/internal/mapper/DaoCacheKey.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/internal/mapper/DaoCacheKey.java deleted file mode 100644 index 32fae259769..00000000000 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/internal/mapper/DaoCacheKey.java +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import java.util.Objects; - -public class DaoCacheKey { - - private final CqlIdentifier keyspaceId; - private final CqlIdentifier tableId; - private final String executionProfileName; - private final DriverExecutionProfile executionProfile; - - public DaoCacheKey( - CqlIdentifier keyspaceId, - CqlIdentifier tableId, - String executionProfileName, - DriverExecutionProfile executionProfile) { - this.keyspaceId = keyspaceId; - this.tableId = tableId; - this.executionProfileName = executionProfileName; - this.executionProfile = executionProfile; - } - - public DaoCacheKey( - CqlIdentifier keyspaceId, - String tableName, - String executionProfileName, - DriverExecutionProfile executionProfile) { - this(keyspaceId, toId(tableName), executionProfileName, executionProfile); - } - - public DaoCacheKey( - String keyspaceName, - CqlIdentifier tableId, - String executionProfileName, - DriverExecutionProfile executionProfile) { - this(toId(keyspaceName), tableId, executionProfileName, executionProfile); - } - - public DaoCacheKey( - String keyspaceName, - String tableName, - String executionProfileName, - DriverExecutionProfile executionProfile) { - this(toId(keyspaceName), toId(tableName), executionProfileName, executionProfile); - } - - private static CqlIdentifier toId(String name) { - return name == null ? null : CqlIdentifier.fromCql(name); - } - - public CqlIdentifier getKeyspaceId() { - return keyspaceId; - } - - public CqlIdentifier getTableId() { - return tableId; - } - - public String getExecutionProfileName() { - return executionProfileName; - } - - public DriverExecutionProfile getExecutionProfile() { - return executionProfile; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof DaoCacheKey) { - DaoCacheKey that = (DaoCacheKey) other; - return Objects.equals(this.keyspaceId, that.keyspaceId) - && Objects.equals(this.tableId, that.tableId) - && Objects.equals(this.executionProfileName, that.executionProfileName) - && Objects.equals(this.executionProfile, that.executionProfile); - } else { - return false; - } - } - - @Override - public int hashCode() { - return Objects.hash(keyspaceId, tableId, executionProfileName, executionProfile); - } -} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/internal/mapper/DefaultMapperContext.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/internal/mapper/DefaultMapperContext.java deleted file mode 100644 index 2d09c2e853f..00000000000 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/internal/mapper/DefaultMapperContext.java +++ /dev/null @@ -1,214 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.api.mapper.MapperContext; -import com.datastax.oss.driver.api.mapper.MapperException; -import com.datastax.oss.driver.api.mapper.entity.naming.NameConverter; -import com.datastax.oss.driver.api.mapper.result.MapperResultProducer; -import com.datastax.oss.driver.api.mapper.result.MapperResultProducerService; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.lang.reflect.InvocationTargetException; -import java.util.Map; -import java.util.Objects; -import java.util.ServiceConfigurationError; -import java.util.ServiceLoader; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class DefaultMapperContext implements MapperContext { - - private static final Logger LOGGER = LoggerFactory.getLogger(DefaultMapperContext.class); - - private final ConcurrentMap, MapperResultProducer> resultProducerCache = - new ConcurrentHashMap<>(); - - private final CqlSession session; - private final CqlIdentifier keyspaceId; - private final CqlIdentifier tableId; - private final String executionProfileName; - private final DriverExecutionProfile executionProfile; - private final ConcurrentMap, NameConverter> nameConverterCache; - private final Map customState; - private final ImmutableList resultProducers; - - public DefaultMapperContext( - @NonNull CqlSession session, - @Nullable CqlIdentifier keyspaceId, - @Nullable String executionProfileName, - @Nullable DriverExecutionProfile executionProfile, - @NonNull Map customState) { - this( - session, - keyspaceId, - null, - executionProfileName, - executionProfile, - new ConcurrentHashMap<>(), - NullAllowingImmutableMap.copyOf(customState)); - } - - private DefaultMapperContext( - CqlSession session, - CqlIdentifier keyspaceId, - CqlIdentifier tableId, - String executionProfileName, - DriverExecutionProfile executionProfile, - ConcurrentMap, NameConverter> nameConverterCache, - Map customState) { - if (executionProfile != null && executionProfileName != null) { - // the mapper code prevents this, so we should never get here - throw new IllegalArgumentException("Can't provide both a profile and a name"); - } - this.session = session; - this.keyspaceId = keyspaceId; - this.tableId = tableId; - this.nameConverterCache = nameConverterCache; - this.customState = customState; - this.executionProfileName = executionProfileName; - this.executionProfile = executionProfile; - this.resultProducers = - locateResultProducers(((InternalDriverContext) session.getContext()).getClassLoader()); - } - - public DefaultMapperContext withDaoParameters( - @Nullable CqlIdentifier newKeyspaceId, - @Nullable CqlIdentifier newTableId, - @Nullable String newExecutionProfileName, - @Nullable DriverExecutionProfile newExecutionProfile) { - return (Objects.equals(newKeyspaceId, this.keyspaceId) - && Objects.equals(newTableId, this.tableId) - && Objects.equals(newExecutionProfileName, this.executionProfileName) - && Objects.equals(newExecutionProfile, this.executionProfile)) - ? this - : new DefaultMapperContext( - session, - newKeyspaceId, - newTableId, - newExecutionProfileName, - newExecutionProfile, - nameConverterCache, - customState); - } - - @NonNull - @Override - public CqlSession getSession() { - return session; - } - - @Nullable - @Override - public CqlIdentifier getKeyspaceId() { - return keyspaceId; - } - - @Nullable - @Override - public CqlIdentifier getTableId() { - return tableId; - } - - @Nullable - @Override - public String getExecutionProfileName() { - return executionProfileName; - } - - @Nullable - @Override - public DriverExecutionProfile getExecutionProfile() { - return executionProfile; - } - - @NonNull - @Override - public NameConverter getNameConverter(Class converterClass) { - return nameConverterCache.computeIfAbsent( - converterClass, DefaultMapperContext::buildNameConverter); - } - - @NonNull - @Override - public Map getCustomState() { - return customState; - } - - @NonNull - @Override - public MapperResultProducer getResultProducer(@NonNull GenericType resultToProduce) { - return resultProducerCache.computeIfAbsent( - resultToProduce, - k -> { - for (MapperResultProducer resultProducer : resultProducers) { - if (resultProducer.canProduce(k)) { - return resultProducer; - } - } - throw new IllegalArgumentException( - String.format( - "Found no registered %s that can produce %s", - MapperResultProducer.class.getSimpleName(), k)); - }); - } - - private static NameConverter buildNameConverter(Class converterClass) { - try { - return converterClass.getDeclaredConstructor().newInstance(); - } catch (InstantiationException - | IllegalAccessException - | NoSuchMethodException - | InvocationTargetException e) { - throw new MapperException( - String.format( - "Error while building an instance of %s. " - + "%s implementations must have a public no-arg constructor", - converterClass, NameConverter.class.getSimpleName()), - e); - } - } - - private static ImmutableList locateResultProducers( - ClassLoader classLoader) { - LOGGER.debug( - "Locating result producers with CL = {}, MapperResultProducerService CL = {}", - classLoader, - MapperResultProducerService.class.getClassLoader()); - ImmutableList.Builder builder = ImmutableList.builder(); - try { - ServiceLoader loader = - ServiceLoader.load(MapperResultProducerService.class, classLoader); - loader.iterator().forEachRemaining(provider -> builder.addAll(provider.getProducers())); - } catch (Exception | ServiceConfigurationError e) { - LOGGER.error("Failed to locate result producers", e); - } - ImmutableList producers = builder.build(); - LOGGER.debug("Located {} result producers: {}", producers.size(), producers); - return producers; - } -} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/internal/mapper/entity/EntityHelperBase.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/internal/mapper/entity/EntityHelperBase.java deleted file mode 100644 index 3977ea0c451..00000000000 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/internal/mapper/entity/EntityHelperBase.java +++ /dev/null @@ -1,253 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.mapper.entity; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.BoundStatementBuilder; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.data.AccessibleByName; -import com.datastax.oss.driver.api.core.data.GettableByName; -import com.datastax.oss.driver.api.core.data.SettableByName; -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.codec.CodecNotFoundException; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.api.mapper.MapperBuilder; -import com.datastax.oss.driver.api.mapper.MapperContext; -import com.datastax.oss.driver.api.mapper.MapperException; -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; -import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; -import com.datastax.oss.driver.api.mapper.annotations.Entity; -import com.datastax.oss.driver.api.mapper.entity.EntityHelper; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import com.datastax.oss.driver.internal.core.util.CollectionsUtils; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; - -public abstract class EntityHelperBase implements EntityHelper { - - protected final CqlIdentifier keyspaceId; - - protected final CqlIdentifier tableId; - - protected final MapperContext context; - - protected EntityHelperBase(MapperContext context, String defaultTableName) { - this(context, null, defaultTableName); - } - - protected EntityHelperBase( - MapperContext context, String defaultKeyspaceName, String defaultTableName) { - this.context = context; - this.tableId = - context.getTableId() != null - ? context.getTableId() - : CqlIdentifier.fromCql(defaultTableName); - this.keyspaceId = - context.getKeyspaceId() != null - ? context.getKeyspaceId() - : (defaultKeyspaceName == null ? null : CqlIdentifier.fromCql(defaultKeyspaceName)); - } - - @Nullable - @Override - public CqlIdentifier getKeyspaceId() { - return keyspaceId; - } - - @NonNull - @Override - public CqlIdentifier getTableId() { - return tableId; - } - - @NonNull - @Override - @Deprecated - public > SettableT set( - @NonNull EntityT entity, - @NonNull SettableT target, - @NonNull NullSavingStrategy nullSavingStrategy) { - return set(entity, target, nullSavingStrategy, false); - } - - @NonNull - @Override - @Deprecated - public EntityT get(@NonNull GettableByName source) { - return get(source, false); - } - - public void throwIfKeyspaceMissing() { - if (this.getKeyspaceId() == null && !context.getSession().getKeyspace().isPresent()) { - throw new MapperException( - String.format( - "Missing keyspace. Suggestions: use SessionBuilder.withKeyspace() " - + "when creating your session, specify a default keyspace on %s with @%s" - + "(defaultKeyspace), or use a @%s method with a @%s parameter", - this.getEntityClass().getSimpleName(), - Entity.class.getSimpleName(), - DaoFactory.class.getSimpleName(), - DaoKeyspace.class.getSimpleName())); - } - } - - public List findMissingColumns( - List entityColumns, Collection cqlColumns) { - return findMissingCqlIdentifiers( - entityColumns, - cqlColumns.stream().map(ColumnMetadata::getName).collect(Collectors.toList())); - } - - public List findMissingCqlIdentifiers( - List entityColumns, Collection cqlColumns) { - List missingColumns = new ArrayList<>(); - for (CqlIdentifier entityCqlIdentifier : entityColumns) { - if (!cqlColumns.contains(entityCqlIdentifier)) { - missingColumns.add(entityCqlIdentifier); - } - } - return missingColumns; - } - - /** - * When the new instance of a class annotated with {@link Dao} is created an automatic check for - * schema validation is performed. It verifies if all {@link Dao} entity fields are present in CQL - * table. If not the {@link IllegalArgumentException} exception with detailed message is thrown. - * This check has startup overhead so once your app is stable you may want to disable it. The - * schema validation check is enabled by default. It can be disabled using the {@link - * MapperBuilder#withSchemaValidationEnabled(boolean)} method. - */ - public abstract void validateEntityFields(); - - public static List findTypeMismatches( - Map> entityColumns, - Map cqlColumns, - CodecRegistry codecRegistry) { - Map cqlColumnsDataTypes = - cqlColumns.entrySet().stream() - .collect( - Collectors.toMap( - Map.Entry::getKey, - cqlIdentifierColumnMetadataEntry -> - cqlIdentifierColumnMetadataEntry.getValue().getType())); - - return findDataTypeMismatches(entityColumns, cqlColumnsDataTypes, codecRegistry); - } - - public static List findTypeMismatches( - Map> entityColumns, - List cqlColumns, - List cqlTypes, - CodecRegistry codecRegistry) { - return findDataTypeMismatches( - entityColumns, - CollectionsUtils.combineListsIntoOrderedMap(cqlColumns, cqlTypes), - codecRegistry); - } - - private static List findDataTypeMismatches( - Map> entityColumns, - Map cqlColumns, - CodecRegistry codecRegistry) { - List missingCodecs = new ArrayList<>(); - - for (Map.Entry> entityEntry : entityColumns.entrySet()) { - DataType datType = cqlColumns.get(entityEntry.getKey()); - if (datType == null) { - // this will not happen because it will be catch by the generateMissingColumnsCheck() method - throw new AssertionError( - "There is no cql column for entity column: " + entityEntry.getKey()); - } - try { - codecRegistry.codecFor(datType, entityEntry.getValue()); - } catch (CodecNotFoundException exception) { - missingCodecs.add( - String.format( - "Field: %s, Entity Type: %s, CQL type: %s", - entityEntry.getKey(), exception.getJavaType(), exception.getCqlType())); - } - } - return missingCodecs; - } - - public void throwMissingUdtTypesIfNotEmpty( - List missingTypes, - CqlIdentifier keyspaceId, - CqlIdentifier tableId, - String entityClassName) { - throwMissingTypesIfNotEmpty(missingTypes, keyspaceId, tableId, entityClassName, "udt"); - } - - public void throwMissingTableTypesIfNotEmpty( - List missingTypes, - CqlIdentifier keyspaceId, - CqlIdentifier tableId, - String entityClassName) { - throwMissingTypesIfNotEmpty(missingTypes, keyspaceId, tableId, entityClassName, "table"); - } - - public void throwMissingTypesIfNotEmpty( - List missingTypes, - CqlIdentifier keyspaceId, - CqlIdentifier tableId, - String entityClassName, - String type) { - if (!missingTypes.isEmpty()) { - throw new IllegalArgumentException( - String.format( - "The CQL ks.%s: %s.%s defined in the entity class: %s declares type mappings that are not supported by the codec registry:\n%s", - type, keyspaceId, tableId, entityClassName, String.join("\n", missingTypes))); - } - } - - public boolean keyspaceNamePresent( - Map keyspaces, CqlIdentifier keyspaceId) { - return keyspaces.containsKey(keyspaceId); - } - - public boolean hasProperty(AccessibleByName source, String name) { - if (source instanceof Row) { - return ((Row) source).getColumnDefinitions().contains(name); - } else if (source instanceof UdtValue) { - return ((UdtValue) source).getType().contains(name); - } else if (source instanceof BoundStatement) { - return ((BoundStatement) source) - .getPreparedStatement() - .getVariableDefinitions() - .contains(name); - } else if (source instanceof BoundStatementBuilder) { - return ((BoundStatementBuilder) source) - .getPreparedStatement() - .getVariableDefinitions() - .contains(name); - } - // other implementations: assume the property is present - return true; - } -} diff --git a/mapper-runtime/src/test/java/com/datastax/dse/driver/api/mapper/DependencyCheckTest.java b/mapper-runtime/src/test/java/com/datastax/dse/driver/api/mapper/DependencyCheckTest.java deleted file mode 100644 index 03c1e5bb24f..00000000000 --- a/mapper-runtime/src/test/java/com/datastax/dse/driver/api/mapper/DependencyCheckTest.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.mapper; - -import com.datastax.dse.driver.internal.DependencyCheckTestBase; -import java.nio.file.Path; -import java.nio.file.Paths; - -public class DependencyCheckTest extends DependencyCheckTestBase { - - @Override - protected Path getDepsTxtPath() { - return Paths.get( - getBaseResourcePathString(), - "target", - "classes", - "com", - "datastax", - "dse", - "driver", - "internal", - "mapper", - "deps.txt"); - } -} diff --git a/mapper-runtime/src/test/java/com/datastax/dse/driver/api/mapper/reactive/MappedReactiveResultSetTckTest.java b/mapper-runtime/src/test/java/com/datastax/dse/driver/api/mapper/reactive/MappedReactiveResultSetTckTest.java deleted file mode 100644 index efd223b1314..00000000000 --- a/mapper-runtime/src/test/java/com/datastax/dse/driver/api/mapper/reactive/MappedReactiveResultSetTckTest.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.mapper.reactive; - -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.dse.driver.internal.core.cql.reactive.DefaultReactiveResultSet; -import com.datastax.dse.driver.internal.mapper.reactive.DefaultMappedReactiveResultSet; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import io.reactivex.Flowable; -import java.util.Collections; -import java.util.List; -import java.util.concurrent.CompletableFuture; -import org.reactivestreams.Publisher; -import org.reactivestreams.tck.PublisherVerification; -import org.reactivestreams.tck.TestEnvironment; - -public class MappedReactiveResultSetTckTest extends PublisherVerification { - - public MappedReactiveResultSetTckTest() { - super(new TestEnvironment()); - } - - @Override - public Publisher createPublisher(long elements) { - // The TCK usually requests between 0 and 20 items, or Long.MAX_VALUE. - // Past 3 elements it never checks how many elements have been effectively produced, - // so we can safely cap at, say, 20. - int effective = (int) Math.min(elements, 20L); - return new DefaultMappedReactiveResultSet<>( - new DefaultReactiveResultSet(() -> createResults(effective)), row -> row.getInt(0)); - } - - @Override - public Publisher createFailedPublisher() { - DefaultReactiveResultSet publisher = new DefaultReactiveResultSet(() -> createResults(1)); - // Since our publisher does not support multiple - // subscriptions, we use that to create a failed publisher. - publisher.subscribe(new TestSubscriber<>()); - return new DefaultMappedReactiveResultSet<>(publisher, row -> row.getInt(0)); - } - - private static CompletableFuture createResults(int elements) { - CompletableFuture previous = null; - if (elements > 0) { - // create pages of 5 elements each to exercise pagination - List pages = - Flowable.range(0, elements).buffer(5).map(List::size).toList().blockingGet(); - Collections.reverse(pages); - for (Integer size : pages) { - List rows = - Flowable.range(0, size) - .map( - i -> { - Row row = mock(Row.class); - when(row.getInt(0)).thenReturn(i); - return row; - }) - .toList() - .blockingGet(); - CompletableFuture future = new CompletableFuture<>(); - future.complete(new MockAsyncResultSet(rows, previous)); - previous = future; - } - } else { - previous = new CompletableFuture<>(); - previous.complete(new MockAsyncResultSet(0, null)); - } - return previous; - } -} diff --git a/mapper-runtime/src/test/java/com/datastax/dse/driver/api/mapper/reactive/MockAsyncResultSet.java b/mapper-runtime/src/test/java/com/datastax/dse/driver/api/mapper/reactive/MockAsyncResultSet.java deleted file mode 100644 index 849839b7904..00000000000 --- a/mapper-runtime/src/test/java/com/datastax/dse/driver/api/mapper/reactive/MockAsyncResultSet.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.mapper.reactive; - -import static org.mockito.Mockito.mock; - -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.api.core.cql.Row; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import java.util.concurrent.CompletionStage; -import java.util.stream.Collectors; -import java.util.stream.IntStream; - -public class MockAsyncResultSet implements AsyncResultSet { - - private final List rows; - private final Iterator iterator; - private final CompletionStage nextPage; - private final ExecutionInfo executionInfo = mock(ExecutionInfo.class); - private final ColumnDefinitions columnDefinitions = mock(ColumnDefinitions.class); - private int remaining; - - public MockAsyncResultSet(int size, CompletionStage nextPage) { - this(IntStream.range(0, size).boxed().map(MockRow::new).collect(Collectors.toList()), nextPage); - } - - public MockAsyncResultSet(List rows, CompletionStage nextPage) { - this.rows = rows; - iterator = rows.iterator(); - remaining = rows.size(); - this.nextPage = nextPage; - } - - @Override - public Row one() { - Row next = iterator.next(); - remaining--; - return next; - } - - @Override - public int remaining() { - return remaining; - } - - @NonNull - @Override - public List currentPage() { - return new ArrayList<>(rows); - } - - @Override - public boolean hasMorePages() { - return nextPage != null; - } - - @NonNull - @Override - public CompletionStage fetchNextPage() throws IllegalStateException { - return nextPage; - } - - @NonNull - @Override - public ColumnDefinitions getColumnDefinitions() { - return columnDefinitions; - } - - @NonNull - @Override - public ExecutionInfo getExecutionInfo() { - return executionInfo; - } - - @Override - public boolean wasApplied() { - return true; - } -} diff --git a/mapper-runtime/src/test/java/com/datastax/dse/driver/api/mapper/reactive/MockRow.java b/mapper-runtime/src/test/java/com/datastax/dse/driver/api/mapper/reactive/MockRow.java deleted file mode 100644 index 0c3ead94349..00000000000 --- a/mapper-runtime/src/test/java/com/datastax/dse/driver/api/mapper/reactive/MockRow.java +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.mapper.reactive; - -import static org.mockito.Mockito.mock; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.detach.AttachmentPoint; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.internal.core.cql.EmptyColumnDefinitions; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; -import java.util.Collections; -import java.util.List; - -class MockRow implements Row { - - private int index; - - MockRow(int index) { - this.index = index; - } - - @Override - public int size() { - return 0; - } - - @NonNull - @Override - public CodecRegistry codecRegistry() { - return mock(CodecRegistry.class); - } - - @NonNull - @Override - public ProtocolVersion protocolVersion() { - return DefaultProtocolVersion.V4; - } - - @NonNull - @Override - public ColumnDefinitions getColumnDefinitions() { - return EmptyColumnDefinitions.INSTANCE; - } - - @NonNull - @Override - public List allIndicesOf(@NonNull String name) { - return Collections.singletonList(0); - } - - @Override - public int firstIndexOf(@NonNull String name) { - return 0; - } - - @NonNull - @Override - public List allIndicesOf(@NonNull CqlIdentifier id) { - return Collections.singletonList(0); - } - - @Override - public int firstIndexOf(@NonNull CqlIdentifier id) { - return 0; - } - - @NonNull - @Override - public DataType getType(int i) { - return DataTypes.INT; - } - - @NonNull - @Override - public DataType getType(@NonNull String name) { - return DataTypes.INT; - } - - @NonNull - @Override - public DataType getType(@NonNull CqlIdentifier id) { - return DataTypes.INT; - } - - @Override - public ByteBuffer getBytesUnsafe(int i) { - return null; - } - - @Override - public boolean isDetached() { - return false; - } - - @Override - public void attach(@NonNull AttachmentPoint attachmentPoint) {} - - // equals and hashCode required for TCK tests that check that two subscribers - // receive the exact same set of items. - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof MockRow)) { - return false; - } - MockRow mockRow = (MockRow) o; - return index == mockRow.index; - } - - @Override - public int hashCode() { - return index; - } -} diff --git a/mapper-runtime/src/test/java/com/datastax/dse/driver/api/mapper/reactive/TestSubscriber.java b/mapper-runtime/src/test/java/com/datastax/dse/driver/api/mapper/reactive/TestSubscriber.java deleted file mode 100644 index 6886b9a7622..00000000000 --- a/mapper-runtime/src/test/java/com/datastax/dse/driver/api/mapper/reactive/TestSubscriber.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.mapper.reactive; - -import static org.assertj.core.api.Fail.fail; - -import com.datastax.oss.driver.shaded.guava.common.util.concurrent.Uninterruptibles; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; -import org.reactivestreams.Subscriber; -import org.reactivestreams.Subscription; - -public class TestSubscriber implements Subscriber { - - private final List elements = new ArrayList<>(); - private final CountDownLatch latch = new CountDownLatch(1); - private Subscription subscription; - private Throwable error; - - @Override - public void onSubscribe(Subscription s) { - if (subscription != null) { - throw new AssertionError("already subscribed"); - } - subscription = s; - s.request(Long.MAX_VALUE); - } - - @Override - public void onNext(T t) { - elements.add(t); - } - - @Override - public void onError(Throwable t) { - error = t; - latch.countDown(); - } - - @Override - public void onComplete() { - latch.countDown(); - } - - @Nullable - public Throwable getError() { - return error; - } - - @NonNull - public List getElements() { - return elements; - } - - public void awaitTermination() { - if (!Uninterruptibles.awaitUninterruptibly(latch, 1, TimeUnit.MINUTES)) { - fail("subscriber not terminated"); - } - } -} diff --git a/mapper-runtime/src/test/resources/project.properties b/mapper-runtime/src/test/resources/project.properties deleted file mode 100644 index 66eab90b6e4..00000000000 --- a/mapper-runtime/src/test/resources/project.properties +++ /dev/null @@ -1,19 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -project.basedir=${basedir} \ No newline at end of file diff --git a/metrics/micrometer/pom.xml b/metrics/micrometer/pom.xml deleted file mode 100644 index 37ba8556a53..00000000000 --- a/metrics/micrometer/pom.xml +++ /dev/null @@ -1,152 +0,0 @@ - - - - 4.0.0 - - org.apache.cassandra - java-driver-parent - 4.19.3-SNAPSHOT - ../../ - - java-driver-metrics-micrometer - bundle - Apache Cassandra Java Driver - Metrics - Micrometer - - - - ${project.groupId} - java-driver-bom - ${project.version} - pom - import - - - - - - io.micrometer - micrometer-core - - - org.apache.cassandra - java-driver-core - - - io.dropwizard.metrics - metrics-core - - - org.hdrhistogram - HdrHistogram - - - - - com.github.stephenc.jcip - jcip-annotations - provided - - - com.github.spotbugs - spotbugs-annotations - provided - - - ch.qos.logback - logback-classic - test - - - junit - junit - test - - - com.tngtech.java - junit-dataprovider - test - - - org.assertj - assertj-core - test - - - org.mockito - mockito-core - test - - - org.apache.cassandra - java-driver-core - test - test-jar - - - - - - src/main/resources - - - ${project.basedir}/../.. - - LICENSE - NOTICE_binary.txt - NOTICE.txt - - META-INF - - - - - maven-jar-plugin - - - - javadoc-jar - package - - jar - - - javadoc - - ** - - - - - - - org.revapi - revapi-maven-plugin - - - true - - - - - diff --git a/metrics/micrometer/src/main/java/com/datastax/oss/driver/internal/metrics/micrometer/MicrometerMetricUpdater.java b/metrics/micrometer/src/main/java/com/datastax/oss/driver/internal/metrics/micrometer/MicrometerMetricUpdater.java deleted file mode 100644 index b9507c8b7cf..00000000000 --- a/metrics/micrometer/src/main/java/com/datastax/oss/driver/internal/metrics/micrometer/MicrometerMetricUpdater.java +++ /dev/null @@ -1,186 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.metrics.micrometer; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.config.DriverOption; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metrics.AbstractMetricUpdater; -import com.datastax.oss.driver.internal.core.metrics.MetricId; -import edu.umd.cs.findbugs.annotations.Nullable; -import io.micrometer.core.instrument.Counter; -import io.micrometer.core.instrument.DistributionSummary; -import io.micrometer.core.instrument.Gauge; -import io.micrometer.core.instrument.Meter; -import io.micrometer.core.instrument.MeterRegistry; -import io.micrometer.core.instrument.Tag; -import io.micrometer.core.instrument.Timer; -import java.util.List; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.TimeUnit; -import java.util.function.Supplier; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public abstract class MicrometerMetricUpdater extends AbstractMetricUpdater { - - protected final MeterRegistry registry; - - protected final ConcurrentMap metrics = new ConcurrentHashMap<>(); - - protected MicrometerMetricUpdater( - InternalDriverContext context, Set enabledMetrics, MeterRegistry registry) { - super(context, enabledMetrics); - this.registry = registry; - } - - @Override - public void incrementCounter(MetricT metric, @Nullable String profileName, long amount) { - if (isEnabled(metric, profileName)) { - getOrCreateCounterFor(metric).increment(amount); - } - } - - @Override - public void updateHistogram(MetricT metric, @Nullable String profileName, long value) { - if (isEnabled(metric, profileName)) { - getOrCreateDistributionSummaryFor(metric).record(value); - } - } - - @Override - public void markMeter(MetricT metric, @Nullable String profileName, long amount) { - if (isEnabled(metric, profileName)) { - // There is no meter type in Micrometer, so use a counter - getOrCreateCounterFor(metric).increment(amount); - } - } - - @Override - public void updateTimer( - MetricT metric, @Nullable String profileName, long duration, TimeUnit unit) { - if (isEnabled(metric, profileName)) { - getOrCreateTimerFor(metric).record(duration, unit); - } - } - - @Override - public void clearMetrics() { - for (Meter metric : metrics.values()) { - registry.remove(metric); - } - metrics.clear(); - } - - protected abstract MetricId getMetricId(MetricT metric); - - protected void initializeGauge( - MetricT metric, DriverExecutionProfile profile, Supplier supplier) { - if (isEnabled(metric, profile.getName())) { - metrics.computeIfAbsent( - metric, - m -> { - MetricId id = getMetricId(m); - Iterable tags = MicrometerTags.toMicrometerTags(id.getTags()); - return Gauge.builder(id.getName(), supplier).tags(tags).register(registry); - }); - } - } - - protected void initializeCounter(MetricT metric, DriverExecutionProfile profile) { - if (isEnabled(metric, profile.getName())) { - getOrCreateCounterFor(metric); - } - } - - protected void initializeTimer(MetricT metric, DriverExecutionProfile profile) { - if (isEnabled(metric, profile.getName())) { - getOrCreateTimerFor(metric); - } - } - - protected Counter getOrCreateCounterFor(MetricT metric) { - return (Counter) - metrics.computeIfAbsent( - metric, - m -> { - MetricId id = getMetricId(m); - Iterable tags = MicrometerTags.toMicrometerTags(id.getTags()); - return Counter.builder(id.getName()).tags(tags).register(registry); - }); - } - - protected DistributionSummary getOrCreateDistributionSummaryFor(MetricT metric) { - return (DistributionSummary) - metrics.computeIfAbsent( - metric, - m -> { - MetricId id = getMetricId(m); - Iterable tags = MicrometerTags.toMicrometerTags(id.getTags()); - DistributionSummary.Builder builder = - DistributionSummary.builder(id.getName()).tags(tags); - builder = configureDistributionSummary(builder, metric, id); - return builder.register(registry); - }); - } - - protected Timer getOrCreateTimerFor(MetricT metric) { - return (Timer) - metrics.computeIfAbsent( - metric, - m -> { - MetricId id = getMetricId(m); - Iterable tags = MicrometerTags.toMicrometerTags(id.getTags()); - Timer.Builder builder = Timer.builder(id.getName()).tags(tags); - builder = configureTimer(builder, metric, id); - return builder.register(registry); - }); - } - - protected Timer.Builder configureTimer(Timer.Builder builder, MetricT metric, MetricId id) { - DriverExecutionProfile profile = context.getConfig().getDefaultProfile(); - if (profile.getBoolean(DefaultDriverOption.METRICS_GENERATE_AGGREGABLE_HISTOGRAMS)) { - builder.publishPercentileHistogram(); - } - return builder; - } - - @SuppressWarnings("unused") - protected DistributionSummary.Builder configureDistributionSummary( - DistributionSummary.Builder builder, MetricT metric, MetricId id) { - DriverExecutionProfile profile = context.getConfig().getDefaultProfile(); - if (profile.getBoolean(DefaultDriverOption.METRICS_GENERATE_AGGREGABLE_HISTOGRAMS)) { - builder.publishPercentileHistogram(); - } - return builder; - } - - static double[] toDoubleArray(List doubleList) { - return doubleList.stream().mapToDouble(Double::doubleValue).toArray(); - } - - static void configurePercentilesPublishIfDefined( - Timer.Builder builder, DriverExecutionProfile profile, DriverOption driverOption) { - if (profile.isDefined(driverOption)) { - builder.publishPercentiles(toDoubleArray(profile.getDoubleList(driverOption))); - } - } -} diff --git a/metrics/micrometer/src/main/java/com/datastax/oss/driver/internal/metrics/micrometer/MicrometerMetricsFactory.java b/metrics/micrometer/src/main/java/com/datastax/oss/driver/internal/metrics/micrometer/MicrometerMetricsFactory.java deleted file mode 100644 index 83cd0f80b02..00000000000 --- a/metrics/micrometer/src/main/java/com/datastax/oss/driver/internal/metrics/micrometer/MicrometerMetricsFactory.java +++ /dev/null @@ -1,133 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.metrics.micrometer; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeState; -import com.datastax.oss.driver.api.core.metrics.Metrics; -import com.datastax.oss.driver.api.core.metrics.NodeMetric; -import com.datastax.oss.driver.api.core.metrics.SessionMetric; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.NodeStateEvent; -import com.datastax.oss.driver.internal.core.metrics.MetricPaths; -import com.datastax.oss.driver.internal.core.metrics.MetricsFactory; -import com.datastax.oss.driver.internal.core.metrics.NodeMetricUpdater; -import com.datastax.oss.driver.internal.core.metrics.NoopNodeMetricUpdater; -import com.datastax.oss.driver.internal.core.metrics.NoopSessionMetricUpdater; -import com.datastax.oss.driver.internal.core.metrics.SessionMetricUpdater; -import com.datastax.oss.driver.internal.core.util.concurrent.RunOrSchedule; -import io.micrometer.core.instrument.MeterRegistry; -import io.netty.util.concurrent.EventExecutor; -import java.util.Optional; -import java.util.Set; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -public class MicrometerMetricsFactory implements MetricsFactory { - - private static final Logger LOG = LoggerFactory.getLogger(MicrometerMetricsFactory.class); - - private final InternalDriverContext context; - private final Set enabledNodeMetrics; - private final MeterRegistry registry; - private final SessionMetricUpdater sessionUpdater; - - public MicrometerMetricsFactory(DriverContext context) { - this.context = (InternalDriverContext) context; - String logPrefix = context.getSessionName(); - DriverExecutionProfile config = context.getConfig().getDefaultProfile(); - Set enabledSessionMetrics = - MetricPaths.parseSessionMetricPaths( - config.getStringList(DefaultDriverOption.METRICS_SESSION_ENABLED), logPrefix); - this.enabledNodeMetrics = - MetricPaths.parseNodeMetricPaths( - config.getStringList(DefaultDriverOption.METRICS_NODE_ENABLED), logPrefix); - if (enabledSessionMetrics.isEmpty() && enabledNodeMetrics.isEmpty()) { - LOG.debug("[{}] All metrics are disabled, Session.getMetrics will be empty", logPrefix); - this.registry = null; - this.sessionUpdater = NoopSessionMetricUpdater.INSTANCE; - } else { - // try to get the metric registry from the context - Object possibleMetricRegistry = this.context.getMetricRegistry(); - if (possibleMetricRegistry == null) { - // metrics are enabled, but a metric registry was not supplied to the context - // use the global registry - possibleMetricRegistry = io.micrometer.core.instrument.Metrics.globalRegistry; - } - if (possibleMetricRegistry instanceof MeterRegistry) { - this.registry = (MeterRegistry) possibleMetricRegistry; - this.sessionUpdater = - new MicrometerSessionMetricUpdater(this.context, enabledSessionMetrics, this.registry); - } else { - // Metrics are enabled, but the registry object is not an expected type - throw new IllegalArgumentException( - "Unexpected Metrics registry object. Expected registry object to be of type '" - + MeterRegistry.class.getName() - + "', but was '" - + possibleMetricRegistry.getClass().getName() - + "'"); - } - if (!enabledNodeMetrics.isEmpty()) { - EventExecutor adminEventExecutor = - this.context.getNettyOptions().adminEventExecutorGroup().next(); - this.context - .getEventBus() - .register( - NodeStateEvent.class, - RunOrSchedule.on(adminEventExecutor, this::processNodeStateEvent)); - } - } - } - - @Override - public Optional getMetrics() { - return Optional.empty(); - } - - @Override - public SessionMetricUpdater getSessionUpdater() { - return sessionUpdater; - } - - @Override - public NodeMetricUpdater newNodeUpdater(Node node) { - if (registry == null) { - return NoopNodeMetricUpdater.INSTANCE; - } else { - return new MicrometerNodeMetricUpdater(node, context, enabledNodeMetrics, registry); - } - } - - protected void processNodeStateEvent(NodeStateEvent event) { - if (event.newState == NodeState.DOWN - || event.newState == NodeState.FORCED_DOWN - || event.newState == null) { - // node is DOWN or REMOVED - ((MicrometerNodeMetricUpdater) event.node.getMetricUpdater()).startMetricsExpirationTimeout(); - } else if (event.newState == NodeState.UP || event.newState == NodeState.UNKNOWN) { - // node is UP or ADDED - ((MicrometerNodeMetricUpdater) event.node.getMetricUpdater()) - .cancelMetricsExpirationTimeout(); - } - } -} diff --git a/metrics/micrometer/src/main/java/com/datastax/oss/driver/internal/metrics/micrometer/MicrometerNodeMetricUpdater.java b/metrics/micrometer/src/main/java/com/datastax/oss/driver/internal/metrics/micrometer/MicrometerNodeMetricUpdater.java deleted file mode 100644 index cb8303de965..00000000000 --- a/metrics/micrometer/src/main/java/com/datastax/oss/driver/internal/metrics/micrometer/MicrometerNodeMetricUpdater.java +++ /dev/null @@ -1,138 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.metrics.micrometer; - -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.dse.driver.api.core.metrics.DseNodeMetric; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; -import com.datastax.oss.driver.api.core.metrics.NodeMetric; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metrics.MetricId; -import com.datastax.oss.driver.internal.core.metrics.NodeMetricUpdater; -import io.micrometer.core.instrument.MeterRegistry; -import io.micrometer.core.instrument.Timer; -import java.time.Duration; -import java.util.Set; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class MicrometerNodeMetricUpdater extends MicrometerMetricUpdater - implements NodeMetricUpdater { - - private final Node node; - - public MicrometerNodeMetricUpdater( - Node node, - InternalDriverContext context, - Set enabledMetrics, - MeterRegistry registry) { - super(context, enabledMetrics, registry); - this.node = node; - - DriverExecutionProfile profile = context.getConfig().getDefaultProfile(); - - initializeGauge(DefaultNodeMetric.OPEN_CONNECTIONS, profile, node::getOpenConnections); - initializeGauge(DefaultNodeMetric.AVAILABLE_STREAMS, profile, () -> availableStreamIds(node)); - initializeGauge(DefaultNodeMetric.IN_FLIGHT, profile, () -> inFlightRequests(node)); - initializeGauge(DefaultNodeMetric.ORPHANED_STREAMS, profile, () -> orphanedStreamIds(node)); - - initializeCounter(DefaultNodeMetric.UNSENT_REQUESTS, profile); - initializeCounter(DefaultNodeMetric.ABORTED_REQUESTS, profile); - initializeCounter(DefaultNodeMetric.WRITE_TIMEOUTS, profile); - initializeCounter(DefaultNodeMetric.READ_TIMEOUTS, profile); - initializeCounter(DefaultNodeMetric.UNAVAILABLES, profile); - initializeCounter(DefaultNodeMetric.OTHER_ERRORS, profile); - initializeCounter(DefaultNodeMetric.RETRIES, profile); - initializeCounter(DefaultNodeMetric.RETRIES_ON_ABORTED, profile); - initializeCounter(DefaultNodeMetric.RETRIES_ON_READ_TIMEOUT, profile); - initializeCounter(DefaultNodeMetric.RETRIES_ON_WRITE_TIMEOUT, profile); - initializeCounter(DefaultNodeMetric.RETRIES_ON_UNAVAILABLE, profile); - initializeCounter(DefaultNodeMetric.RETRIES_ON_OTHER_ERROR, profile); - initializeCounter(DefaultNodeMetric.IGNORES, profile); - initializeCounter(DefaultNodeMetric.IGNORES_ON_ABORTED, profile); - initializeCounter(DefaultNodeMetric.IGNORES_ON_READ_TIMEOUT, profile); - initializeCounter(DefaultNodeMetric.IGNORES_ON_WRITE_TIMEOUT, profile); - initializeCounter(DefaultNodeMetric.IGNORES_ON_UNAVAILABLE, profile); - initializeCounter(DefaultNodeMetric.IGNORES_ON_OTHER_ERROR, profile); - initializeCounter(DefaultNodeMetric.SPECULATIVE_EXECUTIONS, profile); - initializeCounter(DefaultNodeMetric.CONNECTION_INIT_ERRORS, profile); - initializeCounter(DefaultNodeMetric.AUTHENTICATION_ERRORS, profile); - - initializeTimer(DefaultNodeMetric.CQL_MESSAGES, profile); - initializeTimer(DseNodeMetric.GRAPH_MESSAGES, profile); - } - - @Override - protected MetricId getMetricId(NodeMetric metric) { - return context.getMetricIdGenerator().nodeMetricId(node, metric); - } - - @Override - protected void startMetricsExpirationTimeout() { - super.startMetricsExpirationTimeout(); - } - - @Override - protected void cancelMetricsExpirationTimeout() { - super.cancelMetricsExpirationTimeout(); - } - - @Override - protected Timer.Builder configureTimer(Timer.Builder builder, NodeMetric metric, MetricId id) { - DriverExecutionProfile profile = context.getConfig().getDefaultProfile(); - super.configureTimer(builder, metric, id); - if (metric == DefaultNodeMetric.CQL_MESSAGES) { - builder - .minimumExpectedValue( - profile.getDuration(DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_LOWEST)) - .maximumExpectedValue( - profile.getDuration(DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_HIGHEST)) - .serviceLevelObjectives( - profile.isDefined(DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_SLO) - ? profile - .getDurationList(DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_SLO) - .toArray(new Duration[0]) - : null) - .percentilePrecision( - profile.getInt(DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_DIGITS)); - - configurePercentilesPublishIfDefined( - builder, profile, DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_PUBLISH_PERCENTILES); - } else if (metric == DseNodeMetric.GRAPH_MESSAGES) { - builder - .minimumExpectedValue( - profile.getDuration(DseDriverOption.METRICS_NODE_GRAPH_MESSAGES_LOWEST)) - .maximumExpectedValue( - profile.getDuration(DseDriverOption.METRICS_NODE_GRAPH_MESSAGES_HIGHEST)) - .serviceLevelObjectives( - profile.isDefined(DseDriverOption.METRICS_NODE_GRAPH_MESSAGES_SLO) - ? profile - .getDurationList(DseDriverOption.METRICS_NODE_GRAPH_MESSAGES_SLO) - .toArray(new Duration[0]) - : null) - .percentilePrecision(profile.getInt(DseDriverOption.METRICS_NODE_GRAPH_MESSAGES_DIGITS)); - - configurePercentilesPublishIfDefined( - builder, profile, DseDriverOption.METRICS_NODE_GRAPH_MESSAGES_PUBLISH_PERCENTILES); - } - return builder; - } -} diff --git a/metrics/micrometer/src/main/java/com/datastax/oss/driver/internal/metrics/micrometer/MicrometerSessionMetricUpdater.java b/metrics/micrometer/src/main/java/com/datastax/oss/driver/internal/metrics/micrometer/MicrometerSessionMetricUpdater.java deleted file mode 100644 index 559054ab510..00000000000 --- a/metrics/micrometer/src/main/java/com/datastax/oss/driver/internal/metrics/micrometer/MicrometerSessionMetricUpdater.java +++ /dev/null @@ -1,155 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.metrics.micrometer; - -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.dse.driver.api.core.metrics.DseSessionMetric; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; -import com.datastax.oss.driver.api.core.metrics.SessionMetric; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metrics.MetricId; -import com.datastax.oss.driver.internal.core.metrics.SessionMetricUpdater; -import io.micrometer.core.instrument.MeterRegistry; -import io.micrometer.core.instrument.Timer; -import java.time.Duration; -import java.util.Set; -import net.jcip.annotations.ThreadSafe; - -@ThreadSafe -public class MicrometerSessionMetricUpdater extends MicrometerMetricUpdater - implements SessionMetricUpdater { - - public MicrometerSessionMetricUpdater( - InternalDriverContext context, Set enabledMetrics, MeterRegistry registry) { - super(context, enabledMetrics, registry); - - DriverExecutionProfile profile = context.getConfig().getDefaultProfile(); - - initializeGauge(DefaultSessionMetric.CONNECTED_NODES, profile, this::connectedNodes); - initializeGauge(DefaultSessionMetric.THROTTLING_QUEUE_SIZE, profile, this::throttlingQueueSize); - initializeGauge( - DefaultSessionMetric.CQL_PREPARED_CACHE_SIZE, profile, this::preparedStatementCacheSize); - - initializeCounter(DefaultSessionMetric.CQL_CLIENT_TIMEOUTS, profile); - initializeCounter(DefaultSessionMetric.THROTTLING_ERRORS, profile); - initializeCounter(DseSessionMetric.GRAPH_CLIENT_TIMEOUTS, profile); - - initializeTimer(DefaultSessionMetric.CQL_REQUESTS, profile); - initializeTimer(DefaultSessionMetric.THROTTLING_DELAY, profile); - initializeTimer(DseSessionMetric.CONTINUOUS_CQL_REQUESTS, profile); - initializeTimer(DseSessionMetric.GRAPH_REQUESTS, profile); - } - - @Override - protected MetricId getMetricId(SessionMetric metric) { - return context.getMetricIdGenerator().sessionMetricId(metric); - } - - @Override - protected Timer.Builder configureTimer(Timer.Builder builder, SessionMetric metric, MetricId id) { - DriverExecutionProfile profile = context.getConfig().getDefaultProfile(); - super.configureTimer(builder, metric, id); - if (metric == DefaultSessionMetric.CQL_REQUESTS) { - builder - .minimumExpectedValue( - profile.getDuration(DefaultDriverOption.METRICS_SESSION_CQL_REQUESTS_LOWEST)) - .maximumExpectedValue( - profile.getDuration(DefaultDriverOption.METRICS_SESSION_CQL_REQUESTS_HIGHEST)) - .serviceLevelObjectives( - profile.isDefined(DefaultDriverOption.METRICS_SESSION_CQL_REQUESTS_SLO) - ? profile - .getDurationList(DefaultDriverOption.METRICS_SESSION_CQL_REQUESTS_SLO) - .toArray(new Duration[0]) - : null) - .percentilePrecision( - profile.isDefined(DefaultDriverOption.METRICS_SESSION_CQL_REQUESTS_DIGITS) - ? profile.getInt(DefaultDriverOption.METRICS_SESSION_CQL_REQUESTS_DIGITS) - : null); - - configurePercentilesPublishIfDefined( - builder, profile, DefaultDriverOption.METRICS_SESSION_CQL_REQUESTS_PUBLISH_PERCENTILES); - } else if (metric == DefaultSessionMetric.THROTTLING_DELAY) { - builder - .minimumExpectedValue( - profile.getDuration(DefaultDriverOption.METRICS_SESSION_THROTTLING_LOWEST)) - .maximumExpectedValue( - profile.getDuration(DefaultDriverOption.METRICS_SESSION_THROTTLING_HIGHEST)) - .serviceLevelObjectives( - profile.isDefined(DefaultDriverOption.METRICS_SESSION_THROTTLING_SLO) - ? profile - .getDurationList(DefaultDriverOption.METRICS_SESSION_THROTTLING_SLO) - .toArray(new Duration[0]) - : null) - .percentilePrecision( - profile.isDefined(DefaultDriverOption.METRICS_SESSION_THROTTLING_DIGITS) - ? profile.getInt(DefaultDriverOption.METRICS_SESSION_THROTTLING_DIGITS) - : null); - - configurePercentilesPublishIfDefined( - builder, profile, DefaultDriverOption.METRICS_SESSION_THROTTLING_PUBLISH_PERCENTILES); - } else if (metric == DseSessionMetric.CONTINUOUS_CQL_REQUESTS) { - builder - .minimumExpectedValue( - profile.getDuration( - DseDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_LOWEST)) - .maximumExpectedValue( - profile.getDuration( - DseDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_HIGHEST)) - .serviceLevelObjectives( - profile.isDefined(DseDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_SLO) - ? profile - .getDurationList( - DseDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_SLO) - .toArray(new Duration[0]) - : null) - .percentilePrecision( - profile.isDefined( - DseDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_DIGITS) - ? profile.getInt( - DseDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_DIGITS) - : null); - - configurePercentilesPublishIfDefined( - builder, - profile, - DseDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_PUBLISH_PERCENTILES); - } else if (metric == DseSessionMetric.GRAPH_REQUESTS) { - builder - .minimumExpectedValue( - profile.getDuration(DseDriverOption.METRICS_SESSION_GRAPH_REQUESTS_LOWEST)) - .maximumExpectedValue( - profile.getDuration(DseDriverOption.METRICS_SESSION_GRAPH_REQUESTS_HIGHEST)) - .serviceLevelObjectives( - profile.isDefined(DseDriverOption.METRICS_SESSION_GRAPH_REQUESTS_SLO) - ? profile - .getDurationList(DseDriverOption.METRICS_SESSION_GRAPH_REQUESTS_SLO) - .toArray(new Duration[0]) - : null) - .percentilePrecision( - profile.isDefined(DseDriverOption.METRICS_SESSION_GRAPH_REQUESTS_DIGITS) - ? profile.getInt(DseDriverOption.METRICS_SESSION_GRAPH_REQUESTS_DIGITS) - : null); - - configurePercentilesPublishIfDefined( - builder, profile, DseDriverOption.METRICS_SESSION_GRAPH_REQUESTS_PUBLISH_PERCENTILES); - } - return builder; - } -} diff --git a/metrics/micrometer/src/main/java/com/datastax/oss/driver/internal/metrics/micrometer/MicrometerTags.java b/metrics/micrometer/src/main/java/com/datastax/oss/driver/internal/metrics/micrometer/MicrometerTags.java deleted file mode 100644 index 10c7c821ae5..00000000000 --- a/metrics/micrometer/src/main/java/com/datastax/oss/driver/internal/metrics/micrometer/MicrometerTags.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.metrics.micrometer; - -import io.micrometer.core.instrument.Tag; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; - -public class MicrometerTags { - - public static Iterable toMicrometerTags(Map tags) { - List micrometerTags = new ArrayList<>(tags.size()); - for (Entry entry : tags.entrySet()) { - micrometerTags.add(Tag.of(entry.getKey(), entry.getValue())); - } - return micrometerTags; - } -} diff --git a/metrics/micrometer/src/main/resources/META-INF/native-image/com.datastax.oss/java-driver-metrics-micrometer/native-image.properties b/metrics/micrometer/src/main/resources/META-INF/native-image/com.datastax.oss/java-driver-metrics-micrometer/native-image.properties deleted file mode 100644 index fdbf4ccc7c2..00000000000 --- a/metrics/micrometer/src/main/resources/META-INF/native-image/com.datastax.oss/java-driver-metrics-micrometer/native-image.properties +++ /dev/null @@ -1,18 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -Args = -H:ReflectionConfigurationResources=${.}/reflection.json diff --git a/metrics/micrometer/src/main/resources/META-INF/native-image/com.datastax.oss/java-driver-metrics-micrometer/reflection.json b/metrics/micrometer/src/main/resources/META-INF/native-image/com.datastax.oss/java-driver-metrics-micrometer/reflection.json deleted file mode 100644 index 638cac60af1..00000000000 --- a/metrics/micrometer/src/main/resources/META-INF/native-image/com.datastax.oss/java-driver-metrics-micrometer/reflection.json +++ /dev/null @@ -1,6 +0,0 @@ -[ - { - "name": "com.datastax.oss.driver.internal.metrics.micrometer.MicrometerMetricsFactory", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] - } -] diff --git a/metrics/micrometer/src/test/java/com/datastax/oss/driver/internal/metrics/micrometer/MicrometerMetricsFactoryTest.java b/metrics/micrometer/src/test/java/com/datastax/oss/driver/internal/metrics/micrometer/MicrometerMetricsFactoryTest.java deleted file mode 100644 index 586b74d72c3..00000000000 --- a/metrics/micrometer/src/test/java/com/datastax/oss/driver/internal/metrics/micrometer/MicrometerMetricsFactoryTest.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.metrics.micrometer; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import io.micrometer.core.instrument.MeterRegistry; -import java.time.Duration; -import java.util.Collections; -import java.util.List; -import org.junit.Test; - -public class MicrometerMetricsFactoryTest { - - @Test - public void should_throw_if_wrong_or_missing_registry_type() { - // given - InternalDriverContext context = mock(InternalDriverContext.class); - DriverExecutionProfile profile = mock(DriverExecutionProfile.class); - DriverConfig config = mock(DriverConfig.class); - List enabledMetrics = - Collections.singletonList(DefaultSessionMetric.CQL_REQUESTS.getPath()); - // when - when(config.getDefaultProfile()).thenReturn(profile); - when(context.getConfig()).thenReturn(config); - when(context.getSessionName()).thenReturn("MockSession"); - // registry object is not a registry type - when(context.getMetricRegistry()).thenReturn(Integer.MAX_VALUE); - when(profile.getDuration(DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER)) - .thenReturn(Duration.ofHours(1)); - when(profile.getStringList(DefaultDriverOption.METRICS_SESSION_ENABLED)) - .thenReturn(enabledMetrics); - // then - try { - new MicrometerMetricsFactory(context); - fail( - "MetricsFactory should require correct registry object type: " - + MeterRegistry.class.getName()); - } catch (IllegalArgumentException iae) { - assertThat(iae.getMessage()) - .isEqualTo( - "Unexpected Metrics registry object. " - + "Expected registry object to be of type '%s', but was '%s'", - MeterRegistry.class.getName(), Integer.class.getName()); - } - } -} diff --git a/metrics/micrometer/src/test/java/com/datastax/oss/driver/internal/metrics/micrometer/MicrometerNodeMetricUpdaterTest.java b/metrics/micrometer/src/test/java/com/datastax/oss/driver/internal/metrics/micrometer/MicrometerNodeMetricUpdaterTest.java deleted file mode 100644 index 594c4166e98..00000000000 --- a/metrics/micrometer/src/test/java/com/datastax/oss/driver/internal/metrics/micrometer/MicrometerNodeMetricUpdaterTest.java +++ /dev/null @@ -1,278 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.metrics.micrometer; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.timeout; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import ch.qos.logback.classic.Level; -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.dse.driver.api.core.metrics.DseNodeMetric; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.config.DriverOption; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; -import com.datastax.oss.driver.api.core.metrics.NodeMetric; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metrics.AbstractMetricUpdater; -import com.datastax.oss.driver.internal.core.metrics.DefaultMetricId; -import com.datastax.oss.driver.internal.core.metrics.MetricId; -import com.datastax.oss.driver.internal.core.metrics.MetricIdGenerator; -import com.datastax.oss.driver.internal.core.util.LoggerTest; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import io.micrometer.core.instrument.Timer; -import io.micrometer.core.instrument.distribution.HistogramSnapshot; -import io.micrometer.core.instrument.simple.SimpleMeterRegistry; -import java.time.Duration; -import java.util.Arrays; -import java.util.Collections; -import java.util.Set; -import java.util.concurrent.TimeUnit; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class MicrometerNodeMetricUpdaterTest { - - private static final MetricId METRIC_ID = new DefaultMetricId("irrelevant", ImmutableMap.of()); - - @Test - public void should_log_warning_when_provided_eviction_time_setting_is_too_low() { - // given - LoggerTest.LoggerSetup logger = - LoggerTest.setupTestLogger(AbstractMetricUpdater.class, Level.WARN); - Node node = mock(Node.class); - InternalDriverContext context = mock(InternalDriverContext.class); - DriverExecutionProfile profile = mock(DriverExecutionProfile.class); - DriverConfig config = mock(DriverConfig.class); - MetricIdGenerator generator = mock(MetricIdGenerator.class); - Set enabledMetrics = Collections.singleton(DefaultNodeMetric.CQL_MESSAGES); - Duration expireAfter = AbstractMetricUpdater.MIN_EXPIRE_AFTER.minusMinutes(1); - - // when - when(context.getSessionName()).thenReturn("prefix"); - when(context.getConfig()).thenReturn(config); - when(config.getDefaultProfile()).thenReturn(profile); - when(context.getMetricIdGenerator()).thenReturn(generator); - when(profile.getDuration(DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER)) - .thenReturn(expireAfter); - when(generator.nodeMetricId(node, DefaultNodeMetric.CQL_MESSAGES)).thenReturn(METRIC_ID); - when(profile.getDuration(DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_HIGHEST)) - .thenReturn(Duration.ofSeconds(10)); - when(profile.getDuration(DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_LOWEST)) - .thenReturn(Duration.ofMillis(1)); - when(profile.getInt(DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_DIGITS)).thenReturn(5); - - MicrometerNodeMetricUpdater updater = - new MicrometerNodeMetricUpdater(node, context, enabledMetrics, new SimpleMeterRegistry()); - - // then - assertThat(updater.getExpireAfter()).isEqualTo(AbstractMetricUpdater.MIN_EXPIRE_AFTER); - verify(logger.appender, timeout(500).times(1)).doAppend(logger.loggingEventCaptor.capture()); - assertThat(logger.loggingEventCaptor.getValue().getMessage()).isNotNull(); - assertThat(logger.loggingEventCaptor.getValue().getFormattedMessage()) - .contains( - String.format( - "[prefix] Value too low for %s: %s. Forcing to %s instead.", - DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER.getPath(), - expireAfter, - AbstractMetricUpdater.MIN_EXPIRE_AFTER)); - } - - @Test - @UseDataProvider(value = "acceptableEvictionTimes") - public void should_not_log_warning_when_provided_eviction_time_setting_is_acceptable( - Duration expireAfter) { - // given - LoggerTest.LoggerSetup logger = - LoggerTest.setupTestLogger(AbstractMetricUpdater.class, Level.WARN); - Node node = mock(Node.class); - InternalDriverContext context = mock(InternalDriverContext.class); - DriverExecutionProfile profile = mock(DriverExecutionProfile.class); - DriverConfig config = mock(DriverConfig.class); - MetricIdGenerator generator = mock(MetricIdGenerator.class); - Set enabledMetrics = Collections.singleton(DefaultNodeMetric.CQL_MESSAGES); - - // when - when(context.getSessionName()).thenReturn("prefix"); - when(context.getConfig()).thenReturn(config); - when(config.getDefaultProfile()).thenReturn(profile); - when(context.getMetricIdGenerator()).thenReturn(generator); - when(profile.getDuration(DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER)) - .thenReturn(expireAfter); - when(generator.nodeMetricId(node, DefaultNodeMetric.CQL_MESSAGES)).thenReturn(METRIC_ID); - when(profile.getDuration(DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_HIGHEST)) - .thenReturn(Duration.ofSeconds(10)); - when(profile.getDuration(DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_LOWEST)) - .thenReturn(Duration.ofMillis(1)); - when(profile.getInt(DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_DIGITS)).thenReturn(5); - - MicrometerNodeMetricUpdater updater = - new MicrometerNodeMetricUpdater(node, context, enabledMetrics, new SimpleMeterRegistry()); - - // then - assertThat(updater.getExpireAfter()).isEqualTo(expireAfter); - verify(logger.appender, timeout(500).times(0)).doAppend(logger.loggingEventCaptor.capture()); - } - - @DataProvider - public static Object[][] acceptableEvictionTimes() { - return new Object[][] { - {AbstractMetricUpdater.MIN_EXPIRE_AFTER}, - {AbstractMetricUpdater.MIN_EXPIRE_AFTER.plusMinutes(1)} - }; - } - - @Test - @UseDataProvider(value = "timerMetrics") - public void should_create_timer( - NodeMetric metric, - DriverOption lowest, - DriverOption highest, - DriverOption digits, - DriverOption sla, - DriverOption percentiles) { - // given - Node node = mock(Node.class); - InternalDriverContext context = mock(InternalDriverContext.class); - DriverExecutionProfile profile = mock(DriverExecutionProfile.class); - DriverConfig config = mock(DriverConfig.class); - MetricIdGenerator generator = mock(MetricIdGenerator.class); - Set enabledMetrics = Collections.singleton(metric); - - // when - when(context.getSessionName()).thenReturn("prefix"); - when(context.getConfig()).thenReturn(config); - when(config.getDefaultProfile()).thenReturn(profile); - when(context.getMetricIdGenerator()).thenReturn(generator); - when(profile.getDuration(DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER)) - .thenReturn(Duration.ofHours(1)); - when(profile.getDuration(lowest)).thenReturn(Duration.ofMillis(10)); - when(profile.getDuration(highest)).thenReturn(Duration.ofSeconds(1)); - when(profile.getInt(digits)).thenReturn(5); - when(profile.isDefined(sla)).thenReturn(true); - when(profile.getDurationList(sla)) - .thenReturn(Arrays.asList(Duration.ofMillis(100), Duration.ofMillis(500))); - when(profile.isDefined(percentiles)).thenReturn(true); - when(profile.getDoubleList(percentiles)).thenReturn(Arrays.asList(0.75, 0.95, 0.99)); - when(generator.nodeMetricId(node, metric)).thenReturn(METRIC_ID); - - SimpleMeterRegistry registry = spy(new SimpleMeterRegistry()); - MicrometerNodeMetricUpdater updater = - new MicrometerNodeMetricUpdater(node, context, enabledMetrics, registry); - - for (int i = 0; i < 10; i++) { - updater.updateTimer(metric, null, 100, TimeUnit.MILLISECONDS); - } - - // then - Timer timer = registry.find(METRIC_ID.getName()).timer(); - assertThat(timer).isNotNull(); - assertThat(timer.count()).isEqualTo(10); - HistogramSnapshot snapshot = timer.takeSnapshot(); - assertThat(snapshot.histogramCounts()).hasSize(2); - assertThat(snapshot.percentileValues()).hasSize(3); - assertThat(snapshot.percentileValues()) - .satisfiesExactlyInAnyOrder( - valuePercentile -> assertThat(valuePercentile.percentile()).isEqualTo(0.75), - valuePercentile -> assertThat(valuePercentile.percentile()).isEqualTo(0.95), - valuePercentile -> assertThat(valuePercentile.percentile()).isEqualTo(0.99)); - } - - @Test - @UseDataProvider(value = "timerMetrics") - public void should_not_create_sla_percentiles( - NodeMetric metric, - DriverOption lowest, - DriverOption highest, - DriverOption digits, - DriverOption sla, - DriverOption percentiles) { - // given - Node node = mock(Node.class); - InternalDriverContext context = mock(InternalDriverContext.class); - DriverExecutionProfile profile = mock(DriverExecutionProfile.class); - DriverConfig config = mock(DriverConfig.class); - MetricIdGenerator generator = mock(MetricIdGenerator.class); - Set enabledMetrics = Collections.singleton(metric); - - // when - when(context.getSessionName()).thenReturn("prefix"); - when(context.getConfig()).thenReturn(config); - when(config.getDefaultProfile()).thenReturn(profile); - when(context.getMetricIdGenerator()).thenReturn(generator); - when(profile.getDuration(DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER)) - .thenReturn(Duration.ofHours(1)); - when(profile.getDuration(lowest)).thenReturn(Duration.ofMillis(10)); - when(profile.getDuration(highest)).thenReturn(Duration.ofSeconds(1)); - when(profile.getInt(digits)).thenReturn(5); - when(profile.isDefined(sla)).thenReturn(false); - when(profile.getDurationList(sla)) - .thenReturn(Arrays.asList(Duration.ofMillis(100), Duration.ofMillis(500))); - when(profile.isDefined(percentiles)).thenReturn(false); - when(profile.getDoubleList(percentiles)).thenReturn(Arrays.asList(0.75, 0.95, 0.99)); - when(generator.nodeMetricId(node, metric)).thenReturn(METRIC_ID); - - SimpleMeterRegistry registry = spy(new SimpleMeterRegistry()); - MicrometerNodeMetricUpdater updater = - new MicrometerNodeMetricUpdater(node, context, enabledMetrics, registry); - - for (int i = 0; i < 10; i++) { - updater.updateTimer(metric, null, 100, TimeUnit.MILLISECONDS); - } - - // then - Timer timer = registry.find(METRIC_ID.getName()).timer(); - assertThat(timer).isNotNull(); - assertThat(timer.count()).isEqualTo(10); - HistogramSnapshot snapshot = timer.takeSnapshot(); - assertThat(snapshot.histogramCounts()).hasSize(0); - assertThat(snapshot.percentileValues()).hasSize(0); - } - - @DataProvider - public static Object[][] timerMetrics() { - return new Object[][] { - { - DefaultNodeMetric.CQL_MESSAGES, - DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_LOWEST, - DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_HIGHEST, - DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_DIGITS, - DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_SLO, - DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_PUBLISH_PERCENTILES, - }, - { - DseNodeMetric.GRAPH_MESSAGES, - DseDriverOption.METRICS_NODE_GRAPH_MESSAGES_LOWEST, - DseDriverOption.METRICS_NODE_GRAPH_MESSAGES_HIGHEST, - DseDriverOption.METRICS_NODE_GRAPH_MESSAGES_DIGITS, - DseDriverOption.METRICS_NODE_GRAPH_MESSAGES_SLO, - DseDriverOption.METRICS_NODE_GRAPH_MESSAGES_PUBLISH_PERCENTILES, - }, - }; - } -} diff --git a/metrics/micrometer/src/test/java/com/datastax/oss/driver/internal/metrics/micrometer/MicrometerSessionMetricUpdaterTest.java b/metrics/micrometer/src/test/java/com/datastax/oss/driver/internal/metrics/micrometer/MicrometerSessionMetricUpdaterTest.java deleted file mode 100644 index 0deb377457a..00000000000 --- a/metrics/micrometer/src/test/java/com/datastax/oss/driver/internal/metrics/micrometer/MicrometerSessionMetricUpdaterTest.java +++ /dev/null @@ -1,198 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.metrics.micrometer; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.when; - -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.dse.driver.api.core.metrics.DseSessionMetric; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.config.DriverOption; -import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; -import com.datastax.oss.driver.api.core.metrics.SessionMetric; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metrics.DefaultMetricId; -import com.datastax.oss.driver.internal.core.metrics.MetricId; -import com.datastax.oss.driver.internal.core.metrics.MetricIdGenerator; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import io.micrometer.core.instrument.Timer; -import io.micrometer.core.instrument.distribution.HistogramSnapshot; -import io.micrometer.core.instrument.simple.SimpleMeterRegistry; -import java.time.Duration; -import java.util.Arrays; -import java.util.Collections; -import java.util.Set; -import java.util.concurrent.TimeUnit; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class MicrometerSessionMetricUpdaterTest { - - private static final MetricId METRIC_ID = new DefaultMetricId("irrelevant", ImmutableMap.of()); - - @Test - @UseDataProvider(value = "timerMetrics") - public void should_create_timer( - SessionMetric metric, - DriverOption lowest, - DriverOption highest, - DriverOption digits, - DriverOption sla, - DriverOption percentiles) { - // given - InternalDriverContext context = mock(InternalDriverContext.class); - DriverExecutionProfile profile = mock(DriverExecutionProfile.class); - DriverConfig config = mock(DriverConfig.class); - MetricIdGenerator generator = mock(MetricIdGenerator.class); - Set enabledMetrics = Collections.singleton(metric); - - // when - when(context.getSessionName()).thenReturn("prefix"); - when(context.getConfig()).thenReturn(config); - when(config.getDefaultProfile()).thenReturn(profile); - when(context.getMetricIdGenerator()).thenReturn(generator); - when(profile.getDuration(DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER)) - .thenReturn(Duration.ofHours(1)); - when(profile.getDuration(lowest)).thenReturn(Duration.ofMillis(10)); - when(profile.getDuration(highest)).thenReturn(Duration.ofSeconds(1)); - when(profile.getInt(digits)).thenReturn(5); - when(profile.isDefined(sla)).thenReturn(true); - when(profile.getDurationList(sla)) - .thenReturn(Arrays.asList(Duration.ofMillis(100), Duration.ofMillis(500))); - when(profile.isDefined(percentiles)).thenReturn(true); - when(profile.getDoubleList(percentiles)).thenReturn(Arrays.asList(0.75, 0.95, 0.99)); - when(generator.sessionMetricId(metric)).thenReturn(METRIC_ID); - - SimpleMeterRegistry registry = spy(new SimpleMeterRegistry()); - MicrometerSessionMetricUpdater updater = - new MicrometerSessionMetricUpdater(context, enabledMetrics, registry); - - for (int i = 0; i < 10; i++) { - updater.updateTimer(metric, null, 100, TimeUnit.MILLISECONDS); - } - - // then - Timer timer = registry.find(METRIC_ID.getName()).timer(); - assertThat(timer).isNotNull(); - assertThat(timer.count()).isEqualTo(10); - HistogramSnapshot snapshot = timer.takeSnapshot(); - assertThat(snapshot.histogramCounts()).hasSize(2); - assertThat(snapshot.percentileValues()).hasSize(3); - assertThat(snapshot.percentileValues()) - .satisfiesExactlyInAnyOrder( - valuePercentile -> assertThat(valuePercentile.percentile()).isEqualTo(0.75), - valuePercentile -> assertThat(valuePercentile.percentile()).isEqualTo(0.95), - valuePercentile -> assertThat(valuePercentile.percentile()).isEqualTo(0.99)); - } - - @Test - @UseDataProvider(value = "timerMetrics") - public void should_not_create_sla_percentiles( - SessionMetric metric, - DriverOption lowest, - DriverOption highest, - DriverOption digits, - DriverOption sla, - DriverOption percentiles) { - // given - InternalDriverContext context = mock(InternalDriverContext.class); - DriverExecutionProfile profile = mock(DriverExecutionProfile.class); - DriverConfig config = mock(DriverConfig.class); - MetricIdGenerator generator = mock(MetricIdGenerator.class); - Set enabledMetrics = Collections.singleton(metric); - - // when - when(context.getSessionName()).thenReturn("prefix"); - when(context.getConfig()).thenReturn(config); - when(config.getDefaultProfile()).thenReturn(profile); - when(context.getMetricIdGenerator()).thenReturn(generator); - when(profile.getDuration(DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER)) - .thenReturn(Duration.ofHours(1)); - when(profile.isDefined(sla)).thenReturn(false); - when(profile.getDurationList(sla)) - .thenReturn(Arrays.asList(Duration.ofMillis(100), Duration.ofMillis(500))); - when(profile.getBoolean(DefaultDriverOption.METRICS_GENERATE_AGGREGABLE_HISTOGRAMS)) - .thenReturn(true); - when(profile.isDefined(percentiles)).thenReturn(false); - when(profile.getDoubleList(percentiles)).thenReturn(Arrays.asList(0.75, 0.95, 0.99)); - when(generator.sessionMetricId(metric)).thenReturn(METRIC_ID); - - SimpleMeterRegistry registry = new SimpleMeterRegistry(); - MicrometerSessionMetricUpdater updater = - new MicrometerSessionMetricUpdater(context, enabledMetrics, registry); - - for (int i = 0; i < 10; i++) { - updater.updateTimer(metric, null, 100, TimeUnit.MILLISECONDS); - } - - // then - Timer timer = registry.find(METRIC_ID.getName()).timer(); - assertThat(timer).isNotNull(); - assertThat(timer.count()).isEqualTo(10); - HistogramSnapshot snapshot = timer.takeSnapshot(); - assertThat(snapshot.histogramCounts()).hasSize(0); - assertThat(snapshot.percentileValues()).hasSize(0); - } - - @DataProvider - public static Object[][] timerMetrics() { - return new Object[][] { - { - DefaultSessionMetric.CQL_REQUESTS, - DefaultDriverOption.METRICS_SESSION_CQL_REQUESTS_LOWEST, - DefaultDriverOption.METRICS_SESSION_CQL_REQUESTS_HIGHEST, - DefaultDriverOption.METRICS_SESSION_CQL_REQUESTS_DIGITS, - DefaultDriverOption.METRICS_SESSION_CQL_REQUESTS_SLO, - DefaultDriverOption.METRICS_SESSION_CQL_REQUESTS_PUBLISH_PERCENTILES, - }, - { - DseSessionMetric.GRAPH_REQUESTS, - DseDriverOption.METRICS_SESSION_GRAPH_REQUESTS_LOWEST, - DseDriverOption.METRICS_SESSION_GRAPH_REQUESTS_HIGHEST, - DseDriverOption.METRICS_SESSION_GRAPH_REQUESTS_DIGITS, - DseDriverOption.METRICS_SESSION_GRAPH_REQUESTS_SLO, - DseDriverOption.METRICS_SESSION_GRAPH_REQUESTS_PUBLISH_PERCENTILES, - }, - { - DseSessionMetric.CONTINUOUS_CQL_REQUESTS, - DseDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_LOWEST, - DseDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_HIGHEST, - DseDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_DIGITS, - DseDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_SLO, - DseDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_PUBLISH_PERCENTILES - }, - { - DefaultSessionMetric.THROTTLING_DELAY, - DefaultDriverOption.METRICS_SESSION_THROTTLING_LOWEST, - DefaultDriverOption.METRICS_SESSION_THROTTLING_HIGHEST, - DefaultDriverOption.METRICS_SESSION_THROTTLING_DIGITS, - DefaultDriverOption.METRICS_SESSION_THROTTLING_SLO, - DefaultDriverOption.METRICS_SESSION_THROTTLING_PUBLISH_PERCENTILES - }, - }; - } -} diff --git a/metrics/microprofile/pom.xml b/metrics/microprofile/pom.xml deleted file mode 100644 index 9893711d340..00000000000 --- a/metrics/microprofile/pom.xml +++ /dev/null @@ -1,157 +0,0 @@ - - - - 4.0.0 - - org.apache.cassandra - java-driver-parent - 4.19.3-SNAPSHOT - ../../ - - java-driver-metrics-microprofile - bundle - Apache Cassandra Java Driver - Metrics - Microprofile - - - - ${project.groupId} - java-driver-bom - ${project.version} - pom - import - - - - - - org.eclipse.microprofile.metrics - microprofile-metrics-api - - - org.apache.cassandra - java-driver-core - - - io.dropwizard.metrics - metrics-core - - - org.hdrhistogram - HdrHistogram - - - - - com.github.stephenc.jcip - jcip-annotations - provided - - - com.github.spotbugs - spotbugs-annotations - provided - - - io.smallrye - smallrye-metrics - test - - - ch.qos.logback - logback-classic - test - - - junit - junit - test - - - com.tngtech.java - junit-dataprovider - test - - - org.assertj - assertj-core - test - - - org.mockito - mockito-core - test - - - org.apache.cassandra - java-driver-core - test - test-jar - - - - - - src/main/resources - - - ${project.basedir}/../.. - - LICENSE - NOTICE_binary.txt - NOTICE.txt - - META-INF - - - - - maven-jar-plugin - - - - javadoc-jar - package - - jar - - - javadoc - - ** - - - - - - - org.revapi - revapi-maven-plugin - - - true - - - - - diff --git a/metrics/microprofile/src/main/java/com/datastax/oss/driver/internal/metrics/microprofile/MicroProfileMetricUpdater.java b/metrics/microprofile/src/main/java/com/datastax/oss/driver/internal/metrics/microprofile/MicroProfileMetricUpdater.java deleted file mode 100644 index df44fd69c51..00000000000 --- a/metrics/microprofile/src/main/java/com/datastax/oss/driver/internal/metrics/microprofile/MicroProfileMetricUpdater.java +++ /dev/null @@ -1,168 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.metrics.microprofile; - -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metrics.AbstractMetricUpdater; -import com.datastax.oss.driver.internal.core.metrics.MetricId; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.time.Duration; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.TimeUnit; -import net.jcip.annotations.ThreadSafe; -import org.eclipse.microprofile.metrics.Counter; -import org.eclipse.microprofile.metrics.Gauge; -import org.eclipse.microprofile.metrics.Histogram; -import org.eclipse.microprofile.metrics.Metadata; -import org.eclipse.microprofile.metrics.Meter; -import org.eclipse.microprofile.metrics.Metric; -import org.eclipse.microprofile.metrics.MetricID; -import org.eclipse.microprofile.metrics.MetricRegistry; -import org.eclipse.microprofile.metrics.MetricType; -import org.eclipse.microprofile.metrics.Tag; -import org.eclipse.microprofile.metrics.Timer; - -@ThreadSafe -public abstract class MicroProfileMetricUpdater extends AbstractMetricUpdater { - - protected final MetricRegistry registry; - - protected final ConcurrentMap metrics = new ConcurrentHashMap<>(); - - protected MicroProfileMetricUpdater( - InternalDriverContext context, Set enabledMetrics, MetricRegistry registry) { - super(context, enabledMetrics); - this.registry = registry; - } - - @Override - public void incrementCounter(MetricT metric, @Nullable String profileName, long amount) { - if (isEnabled(metric, profileName)) { - getOrCreateCounterFor(metric).inc(amount); - } - } - - @Override - public void updateHistogram(MetricT metric, @Nullable String profileName, long value) { - if (isEnabled(metric, profileName)) { - getOrCreateHistogramFor(metric).update(value); - } - } - - @Override - public void markMeter(MetricT metric, @Nullable String profileName, long amount) { - if (isEnabled(metric, profileName)) { - getOrCreateMeterFor(metric).mark(amount); - } - } - - @Override - public void updateTimer( - MetricT metric, @Nullable String profileName, long duration, TimeUnit unit) { - if (isEnabled(metric, profileName)) { - getOrCreateTimerFor(metric).update(Duration.ofNanos(unit.toNanos(duration))); - } - } - - @Override - public void clearMetrics() { - for (MetricT metric : metrics.keySet()) { - MetricId id = getMetricId(metric); - Tag[] tags = MicroProfileTags.toMicroProfileTags(id.getTags()); - registry.remove(new MetricID(id.getName(), tags)); - } - metrics.clear(); - } - - protected abstract MetricId getMetricId(MetricT metric); - - protected void initializeGauge( - MetricT metric, DriverExecutionProfile profile, Gauge supplier) { - if (isEnabled(metric, profile.getName())) { - metrics.computeIfAbsent( - metric, - m -> { - MetricId id = getMetricId(m); - String name = id.getName(); - Tag[] tags = MicroProfileTags.toMicroProfileTags(id.getTags()); - Metadata metadata = - Metadata.builder().withName(name).withType(MetricType.GAUGE).build(); - return registry.register(metadata, supplier, tags); - }); - } - } - - protected void initializeCounter(MetricT metric, DriverExecutionProfile profile) { - if (isEnabled(metric, profile.getName())) { - getOrCreateCounterFor(metric); - } - } - - protected void initializeTimer(MetricT metric, DriverExecutionProfile profile) { - if (isEnabled(metric, profile.getName())) { - getOrCreateTimerFor(metric); - } - } - - protected Counter getOrCreateCounterFor(MetricT metric) { - return (Counter) - metrics.computeIfAbsent( - metric, - m -> { - MetricId id = getMetricId(m); - Tag[] tags = MicroProfileTags.toMicroProfileTags(id.getTags()); - return registry.counter(id.getName(), tags); - }); - } - - protected Meter getOrCreateMeterFor(MetricT metric) { - return (Meter) - metrics.computeIfAbsent( - metric, - m -> { - MetricId id = getMetricId(m); - Tag[] tags = MicroProfileTags.toMicroProfileTags(id.getTags()); - return registry.meter(id.getName(), tags); - }); - } - - protected Histogram getOrCreateHistogramFor(MetricT metric) { - return (Histogram) - metrics.computeIfAbsent( - metric, - m -> { - MetricId id = getMetricId(m); - Tag[] tags = MicroProfileTags.toMicroProfileTags(id.getTags()); - return registry.histogram(id.getName(), tags); - }); - } - - protected Timer getOrCreateTimerFor(MetricT metric) { - return (Timer) - metrics.computeIfAbsent( - metric, - m -> { - MetricId id = getMetricId(m); - Tag[] tags = MicroProfileTags.toMicroProfileTags(id.getTags()); - return registry.timer(id.getName(), tags); - }); - } -} diff --git a/metrics/microprofile/src/main/java/com/datastax/oss/driver/internal/metrics/microprofile/MicroProfileMetricsFactory.java b/metrics/microprofile/src/main/java/com/datastax/oss/driver/internal/metrics/microprofile/MicroProfileMetricsFactory.java deleted file mode 100644 index e045b5fcb5e..00000000000 --- a/metrics/microprofile/src/main/java/com/datastax/oss/driver/internal/metrics/microprofile/MicroProfileMetricsFactory.java +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.metrics.microprofile; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeState; -import com.datastax.oss.driver.api.core.metrics.Metrics; -import com.datastax.oss.driver.api.core.metrics.NodeMetric; -import com.datastax.oss.driver.api.core.metrics.SessionMetric; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.NodeStateEvent; -import com.datastax.oss.driver.internal.core.metrics.MetricPaths; -import com.datastax.oss.driver.internal.core.metrics.MetricsFactory; -import com.datastax.oss.driver.internal.core.metrics.NodeMetricUpdater; -import com.datastax.oss.driver.internal.core.metrics.NoopNodeMetricUpdater; -import com.datastax.oss.driver.internal.core.metrics.NoopSessionMetricUpdater; -import com.datastax.oss.driver.internal.core.metrics.SessionMetricUpdater; -import com.datastax.oss.driver.internal.core.util.concurrent.RunOrSchedule; -import io.netty.util.concurrent.EventExecutor; -import java.util.Optional; -import java.util.Set; -import net.jcip.annotations.ThreadSafe; -import org.eclipse.microprofile.metrics.MetricRegistry; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@ThreadSafe -public class MicroProfileMetricsFactory implements MetricsFactory { - - private static final Logger LOG = LoggerFactory.getLogger(MicroProfileMetricsFactory.class); - - private final InternalDriverContext context; - private final Set enabledNodeMetrics; - private final MetricRegistry registry; - private final SessionMetricUpdater sessionUpdater; - - public MicroProfileMetricsFactory(DriverContext context) { - this.context = (InternalDriverContext) context; - String logPrefix = context.getSessionName(); - DriverExecutionProfile config = context.getConfig().getDefaultProfile(); - Set enabledSessionMetrics = - MetricPaths.parseSessionMetricPaths( - config.getStringList(DefaultDriverOption.METRICS_SESSION_ENABLED), logPrefix); - this.enabledNodeMetrics = - MetricPaths.parseNodeMetricPaths( - config.getStringList(DefaultDriverOption.METRICS_NODE_ENABLED), logPrefix); - if (enabledSessionMetrics.isEmpty() && enabledNodeMetrics.isEmpty()) { - LOG.debug("[{}] All metrics are disabled.", logPrefix); - this.registry = null; - this.sessionUpdater = NoopSessionMetricUpdater.INSTANCE; - } else { - Object possibleMetricRegistry = this.context.getMetricRegistry(); - if (possibleMetricRegistry == null) { - // metrics are enabled, but a metric registry was not supplied to the context - throw new IllegalArgumentException( - "No metric registry object found. Expected registry object to be of type '" - + MetricRegistry.class.getName() - + "'"); - } - if (possibleMetricRegistry instanceof MetricRegistry) { - this.registry = (MetricRegistry) possibleMetricRegistry; - this.sessionUpdater = - new MicroProfileSessionMetricUpdater( - this.context, enabledSessionMetrics, this.registry); - } else { - // Metrics are enabled, but the registry object is not an expected type - throw new IllegalArgumentException( - "Unexpected Metrics registry object. Expected registry object to be of type '" - + MetricRegistry.class.getName() - + "', but was '" - + possibleMetricRegistry.getClass().getName() - + "'"); - } - if (!enabledNodeMetrics.isEmpty()) { - EventExecutor adminEventExecutor = - this.context.getNettyOptions().adminEventExecutorGroup().next(); - this.context - .getEventBus() - .register( - NodeStateEvent.class, - RunOrSchedule.on(adminEventExecutor, this::processNodeStateEvent)); - } - } - } - - @Override - public Optional getMetrics() { - return Optional.empty(); - } - - @Override - public SessionMetricUpdater getSessionUpdater() { - return sessionUpdater; - } - - @Override - public NodeMetricUpdater newNodeUpdater(Node node) { - if (registry == null) { - return NoopNodeMetricUpdater.INSTANCE; - } else { - return new MicroProfileNodeMetricUpdater(node, context, enabledNodeMetrics, registry); - } - } - - protected void processNodeStateEvent(NodeStateEvent event) { - if (event.newState == NodeState.DOWN - || event.newState == NodeState.FORCED_DOWN - || event.newState == null) { - // node is DOWN or REMOVED - ((MicroProfileNodeMetricUpdater) event.node.getMetricUpdater()) - .startMetricsExpirationTimeout(); - } else if (event.newState == NodeState.UP || event.newState == NodeState.UNKNOWN) { - // node is UP or ADDED - ((MicroProfileNodeMetricUpdater) event.node.getMetricUpdater()) - .cancelMetricsExpirationTimeout(); - } - } -} diff --git a/metrics/microprofile/src/main/java/com/datastax/oss/driver/internal/metrics/microprofile/MicroProfileNodeMetricUpdater.java b/metrics/microprofile/src/main/java/com/datastax/oss/driver/internal/metrics/microprofile/MicroProfileNodeMetricUpdater.java deleted file mode 100644 index 8a2d235b59e..00000000000 --- a/metrics/microprofile/src/main/java/com/datastax/oss/driver/internal/metrics/microprofile/MicroProfileNodeMetricUpdater.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.metrics.microprofile; - -import com.datastax.dse.driver.api.core.metrics.DseNodeMetric; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; -import com.datastax.oss.driver.api.core.metrics.NodeMetric; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metrics.MetricId; -import com.datastax.oss.driver.internal.core.metrics.NodeMetricUpdater; -import java.util.Set; -import net.jcip.annotations.ThreadSafe; -import org.eclipse.microprofile.metrics.MetricRegistry; - -@ThreadSafe -public class MicroProfileNodeMetricUpdater extends MicroProfileMetricUpdater - implements NodeMetricUpdater { - - private final Node node; - - public MicroProfileNodeMetricUpdater( - Node node, - InternalDriverContext context, - Set enabledMetrics, - MetricRegistry registry) { - super(context, enabledMetrics, registry); - this.node = node; - - DriverExecutionProfile profile = context.getConfig().getDefaultProfile(); - - initializeGauge(DefaultNodeMetric.OPEN_CONNECTIONS, profile, node::getOpenConnections); - initializeGauge(DefaultNodeMetric.AVAILABLE_STREAMS, profile, () -> availableStreamIds(node)); - initializeGauge(DefaultNodeMetric.IN_FLIGHT, profile, () -> inFlightRequests(node)); - initializeGauge(DefaultNodeMetric.ORPHANED_STREAMS, profile, () -> orphanedStreamIds(node)); - - initializeCounter(DefaultNodeMetric.UNSENT_REQUESTS, profile); - initializeCounter(DefaultNodeMetric.ABORTED_REQUESTS, profile); - initializeCounter(DefaultNodeMetric.WRITE_TIMEOUTS, profile); - initializeCounter(DefaultNodeMetric.READ_TIMEOUTS, profile); - initializeCounter(DefaultNodeMetric.UNAVAILABLES, profile); - initializeCounter(DefaultNodeMetric.OTHER_ERRORS, profile); - initializeCounter(DefaultNodeMetric.RETRIES, profile); - initializeCounter(DefaultNodeMetric.RETRIES_ON_ABORTED, profile); - initializeCounter(DefaultNodeMetric.RETRIES_ON_READ_TIMEOUT, profile); - initializeCounter(DefaultNodeMetric.RETRIES_ON_WRITE_TIMEOUT, profile); - initializeCounter(DefaultNodeMetric.RETRIES_ON_UNAVAILABLE, profile); - initializeCounter(DefaultNodeMetric.RETRIES_ON_OTHER_ERROR, profile); - initializeCounter(DefaultNodeMetric.IGNORES, profile); - initializeCounter(DefaultNodeMetric.IGNORES_ON_ABORTED, profile); - initializeCounter(DefaultNodeMetric.IGNORES_ON_READ_TIMEOUT, profile); - initializeCounter(DefaultNodeMetric.IGNORES_ON_WRITE_TIMEOUT, profile); - initializeCounter(DefaultNodeMetric.IGNORES_ON_UNAVAILABLE, profile); - initializeCounter(DefaultNodeMetric.IGNORES_ON_OTHER_ERROR, profile); - initializeCounter(DefaultNodeMetric.SPECULATIVE_EXECUTIONS, profile); - initializeCounter(DefaultNodeMetric.CONNECTION_INIT_ERRORS, profile); - initializeCounter(DefaultNodeMetric.AUTHENTICATION_ERRORS, profile); - - initializeTimer(DefaultNodeMetric.CQL_MESSAGES, profile); - initializeTimer(DseNodeMetric.GRAPH_MESSAGES, profile); - } - - @Override - protected MetricId getMetricId(NodeMetric metric) { - return context.getMetricIdGenerator().nodeMetricId(node, metric); - } - - @Override - protected void startMetricsExpirationTimeout() { - super.startMetricsExpirationTimeout(); - } - - @Override - protected void cancelMetricsExpirationTimeout() { - super.cancelMetricsExpirationTimeout(); - } -} diff --git a/metrics/microprofile/src/main/java/com/datastax/oss/driver/internal/metrics/microprofile/MicroProfileSessionMetricUpdater.java b/metrics/microprofile/src/main/java/com/datastax/oss/driver/internal/metrics/microprofile/MicroProfileSessionMetricUpdater.java deleted file mode 100644 index f3c906e4422..00000000000 --- a/metrics/microprofile/src/main/java/com/datastax/oss/driver/internal/metrics/microprofile/MicroProfileSessionMetricUpdater.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.metrics.microprofile; - -import com.datastax.dse.driver.api.core.metrics.DseSessionMetric; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; -import com.datastax.oss.driver.api.core.metrics.SessionMetric; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metrics.MetricId; -import com.datastax.oss.driver.internal.core.metrics.SessionMetricUpdater; -import java.util.Set; -import net.jcip.annotations.ThreadSafe; -import org.eclipse.microprofile.metrics.MetricRegistry; - -@ThreadSafe -public class MicroProfileSessionMetricUpdater extends MicroProfileMetricUpdater - implements SessionMetricUpdater { - - public MicroProfileSessionMetricUpdater( - InternalDriverContext context, Set enabledMetrics, MetricRegistry registry) { - super(context, enabledMetrics, registry); - - DriverExecutionProfile profile = context.getConfig().getDefaultProfile(); - - initializeGauge(DefaultSessionMetric.CONNECTED_NODES, profile, this::connectedNodes); - initializeGauge(DefaultSessionMetric.THROTTLING_QUEUE_SIZE, profile, this::throttlingQueueSize); - initializeGauge( - DefaultSessionMetric.CQL_PREPARED_CACHE_SIZE, profile, this::preparedStatementCacheSize); - - initializeCounter(DefaultSessionMetric.CQL_CLIENT_TIMEOUTS, profile); - initializeCounter(DefaultSessionMetric.THROTTLING_ERRORS, profile); - initializeCounter(DseSessionMetric.GRAPH_CLIENT_TIMEOUTS, profile); - - initializeTimer(DefaultSessionMetric.CQL_REQUESTS, profile); - initializeTimer(DefaultSessionMetric.THROTTLING_DELAY, profile); - initializeTimer(DseSessionMetric.CONTINUOUS_CQL_REQUESTS, profile); - initializeTimer(DseSessionMetric.GRAPH_REQUESTS, profile); - } - - @Override - protected MetricId getMetricId(SessionMetric metric) { - return context.getMetricIdGenerator().sessionMetricId(metric); - } -} diff --git a/metrics/microprofile/src/main/java/com/datastax/oss/driver/internal/metrics/microprofile/MicroProfileTags.java b/metrics/microprofile/src/main/java/com/datastax/oss/driver/internal/metrics/microprofile/MicroProfileTags.java deleted file mode 100644 index 54ac9c77f98..00000000000 --- a/metrics/microprofile/src/main/java/com/datastax/oss/driver/internal/metrics/microprofile/MicroProfileTags.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.metrics.microprofile; - -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import org.eclipse.microprofile.metrics.Tag; - -public class MicroProfileTags { - - public static Tag[] toMicroProfileTags(Map tags) { - List micrometerTags = new ArrayList<>(tags.size()); - for (Entry entry : tags.entrySet()) { - micrometerTags.add(new Tag(entry.getKey(), entry.getValue())); - } - return micrometerTags.toArray(new Tag[0]); - } -} diff --git a/metrics/microprofile/src/main/resources/META-INF/native-image/com.datastax.oss/java-driver-metrics-microprofile/native-image.properties b/metrics/microprofile/src/main/resources/META-INF/native-image/com.datastax.oss/java-driver-metrics-microprofile/native-image.properties deleted file mode 100644 index fdbf4ccc7c2..00000000000 --- a/metrics/microprofile/src/main/resources/META-INF/native-image/com.datastax.oss/java-driver-metrics-microprofile/native-image.properties +++ /dev/null @@ -1,18 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -Args = -H:ReflectionConfigurationResources=${.}/reflection.json diff --git a/metrics/microprofile/src/main/resources/META-INF/native-image/com.datastax.oss/java-driver-metrics-microprofile/reflection.json b/metrics/microprofile/src/main/resources/META-INF/native-image/com.datastax.oss/java-driver-metrics-microprofile/reflection.json deleted file mode 100644 index 6d408897551..00000000000 --- a/metrics/microprofile/src/main/resources/META-INF/native-image/com.datastax.oss/java-driver-metrics-microprofile/reflection.json +++ /dev/null @@ -1,6 +0,0 @@ -[ - { - "name": "com.datastax.oss.driver.internal.metrics.microprofile.MicroProfileMetricsFactory", - "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] - } -] diff --git a/metrics/microprofile/src/test/java/com/datastax/oss/driver/internal/metrics/microprofile/MicroProfileMetricsFactoryTest.java b/metrics/microprofile/src/test/java/com/datastax/oss/driver/internal/metrics/microprofile/MicroProfileMetricsFactoryTest.java deleted file mode 100644 index f1fbfa2c907..00000000000 --- a/metrics/microprofile/src/test/java/com/datastax/oss/driver/internal/metrics/microprofile/MicroProfileMetricsFactoryTest.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.metrics.microprofile; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metrics.AbstractMetricUpdater; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import java.util.Collections; -import java.util.List; -import org.eclipse.microprofile.metrics.MetricRegistry; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class MicroProfileMetricsFactoryTest { - - @Test - @UseDataProvider(value = "invalidRegistryTypes") - public void should_throw_if_wrong_or_missing_registry_type( - Object registryObj, String expectedMsg) { - // given - InternalDriverContext context = mock(InternalDriverContext.class); - DriverExecutionProfile profile = mock(DriverExecutionProfile.class); - DriverConfig config = mock(DriverConfig.class); - List enabledMetrics = - Collections.singletonList(DefaultSessionMetric.CQL_REQUESTS.getPath()); - // when - when(config.getDefaultProfile()).thenReturn(profile); - when(context.getConfig()).thenReturn(config); - when(context.getSessionName()).thenReturn("MockSession"); - // registry object is not a registry type - when(context.getMetricRegistry()).thenReturn(registryObj); - when(profile.getDuration(DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER)) - .thenReturn(AbstractMetricUpdater.MIN_EXPIRE_AFTER); - when(profile.getStringList(DefaultDriverOption.METRICS_SESSION_ENABLED)) - .thenReturn(enabledMetrics); - // then - try { - new MicroProfileMetricsFactory(context); - fail( - "MetricsFactory should require correct registry object type: " - + MetricRegistry.class.getName()); - } catch (IllegalArgumentException iae) { - assertThat(iae.getMessage()).isEqualTo(expectedMsg); - } - } - - @DataProvider - public static Object[][] invalidRegistryTypes() { - return new Object[][] { - { - Integer.MAX_VALUE, - "Unexpected Metrics registry object. Expected registry object to be of type '" - + MetricRegistry.class.getName() - + "', but was '" - + Integer.class.getName() - + "'" - }, - { - null, - "No metric registry object found. Expected registry object to be of type '" - + MetricRegistry.class.getName() - + "'" - } - }; - } -} diff --git a/metrics/microprofile/src/test/java/com/datastax/oss/driver/internal/metrics/microprofile/MicroProfileNodeMetricsUpdaterTest.java b/metrics/microprofile/src/test/java/com/datastax/oss/driver/internal/metrics/microprofile/MicroProfileNodeMetricsUpdaterTest.java deleted file mode 100644 index aa73148fa77..00000000000 --- a/metrics/microprofile/src/test/java/com/datastax/oss/driver/internal/metrics/microprofile/MicroProfileNodeMetricsUpdaterTest.java +++ /dev/null @@ -1,154 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.metrics.microprofile; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.timeout; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import ch.qos.logback.classic.Level; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; -import com.datastax.oss.driver.api.core.metrics.NodeMetric; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metrics.AbstractMetricUpdater; -import com.datastax.oss.driver.internal.core.util.LoggerTest; -import com.tngtech.java.junit.dataprovider.DataProvider; -import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import com.tngtech.java.junit.dataprovider.UseDataProvider; -import io.smallrye.metrics.MetricsRegistryImpl; -import java.time.Duration; -import java.util.Collections; -import java.util.Set; -import org.eclipse.microprofile.metrics.Gauge; -import org.junit.Test; -import org.junit.runner.RunWith; - -@RunWith(DataProviderRunner.class) -public class MicroProfileNodeMetricsUpdaterTest { - - @Test - public void should_log_warning_when_provided_eviction_time_setting_is_too_low() { - // given - LoggerTest.LoggerSetup logger = - LoggerTest.setupTestLogger(AbstractMetricUpdater.class, Level.WARN); - Node node = mock(Node.class); - InternalDriverContext context = mock(InternalDriverContext.class); - DriverExecutionProfile profile = mock(DriverExecutionProfile.class); - DriverConfig config = mock(DriverConfig.class); - Set enabledMetrics = Collections.singleton(DefaultNodeMetric.CQL_MESSAGES); - Duration expireAfter = AbstractMetricUpdater.MIN_EXPIRE_AFTER.minusMinutes(1); - - // when - when(context.getSessionName()).thenReturn("prefix"); - when(context.getConfig()).thenReturn(config); - when(config.getDefaultProfile()).thenReturn(profile); - when(profile.getDuration(DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER)) - .thenReturn(expireAfter); - - MicroProfileNodeMetricUpdater updater = - new MicroProfileNodeMetricUpdater( - node, context, enabledMetrics, new MetricsRegistryImpl()) { - @Override - protected void initializeGauge( - NodeMetric metric, DriverExecutionProfile profile, Gauge supplier) { - // do nothing - } - - @Override - protected void initializeCounter(NodeMetric metric, DriverExecutionProfile profile) { - // do nothing - } - - @Override - protected void initializeTimer(NodeMetric metric, DriverExecutionProfile profile) { - // do nothing - } - }; - - // then - assertThat(updater.getExpireAfter()).isEqualTo(AbstractMetricUpdater.MIN_EXPIRE_AFTER); - verify(logger.appender, timeout(500).times(1)).doAppend(logger.loggingEventCaptor.capture()); - assertThat(logger.loggingEventCaptor.getValue().getMessage()).isNotNull(); - assertThat(logger.loggingEventCaptor.getValue().getFormattedMessage()) - .contains( - String.format( - "[prefix] Value too low for %s: %s. Forcing to %s instead.", - DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER.getPath(), - expireAfter, - AbstractMetricUpdater.MIN_EXPIRE_AFTER)); - } - - @Test - @UseDataProvider(value = "acceptableEvictionTimes") - public void should_not_log_warning_when_provided_eviction_time_setting_is_acceptable( - Duration expireAfter) { - // given - LoggerTest.LoggerSetup logger = - LoggerTest.setupTestLogger(AbstractMetricUpdater.class, Level.WARN); - Node node = mock(Node.class); - InternalDriverContext context = mock(InternalDriverContext.class); - DriverExecutionProfile profile = mock(DriverExecutionProfile.class); - DriverConfig config = mock(DriverConfig.class); - Set enabledMetrics = Collections.singleton(DefaultNodeMetric.CQL_MESSAGES); - - // when - when(context.getSessionName()).thenReturn("prefix"); - when(context.getConfig()).thenReturn(config); - when(config.getDefaultProfile()).thenReturn(profile); - when(profile.getDuration(DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER)) - .thenReturn(expireAfter); - - MicroProfileNodeMetricUpdater updater = - new MicroProfileNodeMetricUpdater( - node, context, enabledMetrics, new MetricsRegistryImpl()) { - @Override - protected void initializeGauge( - NodeMetric metric, DriverExecutionProfile profile, Gauge supplier) { - // do nothing - } - - @Override - protected void initializeCounter(NodeMetric metric, DriverExecutionProfile profile) { - // do nothing - } - - @Override - protected void initializeTimer(NodeMetric metric, DriverExecutionProfile profile) { - // do nothing - } - }; - - // then - assertThat(updater.getExpireAfter()).isEqualTo(expireAfter); - verify(logger.appender, timeout(500).times(0)).doAppend(logger.loggingEventCaptor.capture()); - } - - @DataProvider - public static Object[][] acceptableEvictionTimes() { - return new Object[][] { - {AbstractMetricUpdater.MIN_EXPIRE_AFTER}, - {AbstractMetricUpdater.MIN_EXPIRE_AFTER.plusMinutes(1)} - }; - } -} diff --git a/osgi-tests/README.md b/osgi-tests/README.md deleted file mode 100644 index 1ca6211d427..00000000000 --- a/osgi-tests/README.md +++ /dev/null @@ -1,67 +0,0 @@ - - -# Java Driver OSGi Tests - -This module contains OSGi tests for the driver. - -It declares a typical "application" bundle containing a few services that rely -on the driver, see `src/main`. - -The integration tests in `src/tests` interrogate the application bundle services -and check that they can operate normally. They exercise different provisioning -configurations to ensure that the driver is usable in most cases. - -## Running the tests - -In order to run the OSGi tests, all other driver modules must have been -previously compiled, that is, their respective `target/classes` directory must -be up-to-date and contain not only the class files, but also an up-to-date OSGi -manifest. - -Therefore, it is recommended to always compile all modules and run the OSGi -integration tests in one single pass, which can be easily done by running, -from the driver's parent module directory: - - mvn clean verify - -This will however also run other integration tests, and might take a long time -to finish. If you prefer to skip other integration tests, and only run the -OSGi ones, you can do so as follows: - - mvn clean verify \ - -DskipParallelizableITs=true \ - -DskipSerialITs=true \ - -DskipIsolatedITs=true - -You can pass the following system properties to your tests: - -1. `ccm.version`: the CCM version to use -2. `ccm.distribution`: choose target backend type (e.g. DSE, HCD) -3. `osgi.debug`: whether to enable remote debugging of the OSGi container (see - below). - -## Debugging OSGi tests - -First, you can enable DEBUG logs for the Pax Exam framework by editing the -`src/tests/resources/logback-test.xml` file. - -Alternatively, you can debug the remote OSGi container by passing the system -property `-Dosgi.debug=true`. In this case the framework will prompt for a -remote debugger on port 5005. diff --git a/osgi-tests/pom.xml b/osgi-tests/pom.xml deleted file mode 100644 index c2cc4d830f1..00000000000 --- a/osgi-tests/pom.xml +++ /dev/null @@ -1,303 +0,0 @@ - - - - 4.0.0 - - org.apache.cassandra - java-driver-parent - 4.19.3-SNAPSHOT - - java-driver-osgi-tests - jar - Apache Cassandra Java Driver - OSGi tests - - - - ${project.groupId} - java-driver-bom - ${project.version} - pom - import - - - - - - org.apache.cassandra - java-driver-core - - - org.apache.cassandra - java-driver-query-builder - - - org.apache.cassandra - java-driver-mapper-processor - - - org.apache.cassandra - java-driver-mapper-runtime - - - com.github.stephenc.jcip - jcip-annotations - provided - - - com.github.spotbugs - spotbugs-annotations - provided - - - ch.qos.logback - logback-classic - - - org.apache.cassandra - java-driver-guava-shaded - - - org.xerial.snappy - snappy-java - - - at.yawk.lz4 - lz4-java - - - org.reactivestreams - reactive-streams - - - com.esri.geometry - esri-geometry-api - - - org.apache.tinkerpop - gremlin-core - - - org.apache.tinkerpop - tinkergraph-gremlin - - - org.osgi - org.osgi.core - provided - - - org.apache.cassandra - java-driver-test-infra - test - - - org.ops4j.pax.exam - pax-exam-junit4 - test - - - org.ops4j.pax.exam - pax-exam-container-forked - test - - - org.ops4j.pax.exam - pax-exam-link-mvn - test - - - org.ops4j.pax.url - pax-url-wrap - test - - - org.ops4j.pax.url - pax-url-reference - test - - - javax.inject - javax.inject - test - - - org.apache.felix - org.apache.felix.framework - test - - - org.assertj - assertj-core - test - - - org.apache.commons - commons-exec - test - - - io.reactivex.rxjava2 - rxjava - test - - - org.awaitility - awaitility - test - - - - - - org.apache.servicemix.tooling - depends-maven-plugin - 1.4.0 - - - generate-depends-file - - generate-depends-file - - - - - - - org.ops4j - maven-pax-plugin - 1.6.0 - - felix - true - - --platform=felix - --version=${felix.version} - --log=debug - --bootDelegation=sun.misc - - - - - org.apache.felix - maven-bundle-plugin - - - com.datastax.oss.driver.osgi - com.datastax.oss.driver.internal.osgi.MailboxActivator - com.datastax.oss.driver.api.osgi.* - com.datastax.oss.driver.internal.osgi.* - !net.jcip.annotations.*,!edu.umd.cs.findbugs.annotations.*,org.apache.tinkerpop.*;resolution:=optional,* - <_include>-osgi.bnd - - - - - bundle-manifest - process-classes - - manifest - - - - - - maven-surefire-plugin - - ${testing.jvm}/bin/java - - ${project.basedir}/src/test/resources/logback-test.xml - - - - - maven-failsafe-plugin - - - osgi-tests - - integration-test - verify - - - - - ${testing.jvm}/bin/java - - ${project.basedir}/src/test/resources/logback-test.xml - - classes - 1 - - - - org.revapi - revapi-maven-plugin - - true - - - - maven-jar-plugin - - true - - - - maven-javadoc-plugin - - true - - - - maven-source-plugin - - true - - - - maven-install-plugin - - true - - - - maven-deploy-plugin - - true - - - - org.sonatype.plugins - nexus-staging-maven-plugin - - true - - - - - diff --git a/osgi-tests/src/main/java/com/datastax/oss/driver/api/osgi/CustomRetryPolicy.java b/osgi-tests/src/main/java/com/datastax/oss/driver/api/osgi/CustomRetryPolicy.java deleted file mode 100644 index 4e6b4e1394c..00000000000 --- a/osgi-tests/src/main/java/com/datastax/oss/driver/api/osgi/CustomRetryPolicy.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.osgi; - -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.internal.core.retry.DefaultRetryPolicy; - -public class CustomRetryPolicy extends DefaultRetryPolicy { - - public CustomRetryPolicy(DriverContext context, String profileName) { - super(context, profileName); - } -} diff --git a/osgi-tests/src/main/java/com/datastax/oss/driver/api/osgi/service/MailboxException.java b/osgi-tests/src/main/java/com/datastax/oss/driver/api/osgi/service/MailboxException.java deleted file mode 100644 index 112becb2e6d..00000000000 --- a/osgi-tests/src/main/java/com/datastax/oss/driver/api/osgi/service/MailboxException.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.osgi.service; - -public class MailboxException extends Exception { - - public MailboxException(Throwable cause) { - super("Failure interacting with Mailbox", cause); - } -} diff --git a/osgi-tests/src/main/java/com/datastax/oss/driver/api/osgi/service/MailboxMessage.java b/osgi-tests/src/main/java/com/datastax/oss/driver/api/osgi/service/MailboxMessage.java deleted file mode 100644 index 426399da98f..00000000000 --- a/osgi-tests/src/main/java/com/datastax/oss/driver/api/osgi/service/MailboxMessage.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.osgi.service; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.mapper.annotations.ClusteringColumn; -import com.datastax.oss.driver.api.mapper.annotations.CqlName; -import com.datastax.oss.driver.api.mapper.annotations.Entity; -import com.datastax.oss.driver.api.mapper.annotations.PartitionKey; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.time.Instant; -import java.util.Objects; - -@Entity -@CqlName("messages_by_recipient") -public class MailboxMessage { - - public static final CqlIdentifier MAILBOX_TABLE = - CqlIdentifier.fromInternal("messages_by_recipient"); - - @PartitionKey private String recipient; - - @ClusteringColumn private Instant timestamp; - - private String sender; - - private String body; - - public MailboxMessage() {} - - public MailboxMessage( - @NonNull String recipient, - @NonNull Instant timestamp, - @NonNull String sender, - @NonNull String body) { - this.recipient = recipient; - this.timestamp = timestamp; - this.sender = sender; - this.body = body; - } - - public String getRecipient() { - return recipient; - } - - public void setRecipient(String recipient) { - this.recipient = recipient; - } - - public Instant getTimestamp() { - return timestamp; - } - - public void setTimestamp(Instant timestamp) { - this.timestamp = timestamp; - } - - public String getSender() { - return sender; - } - - public void setSender(String sender) { - this.sender = sender; - } - - public String getBody() { - return body; - } - - public void setBody(String body) { - this.body = body; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof MailboxMessage)) { - return false; - } - MailboxMessage that = (MailboxMessage) o; - return Objects.equals(recipient, that.recipient) - && Objects.equals(timestamp, that.timestamp) - && Objects.equals(sender, that.sender) - && Objects.equals(body, that.body); - } - - @Override - public int hashCode() { - return Objects.hash(recipient, timestamp, sender, body); - } -} diff --git a/osgi-tests/src/main/java/com/datastax/oss/driver/api/osgi/service/MailboxService.java b/osgi-tests/src/main/java/com/datastax/oss/driver/api/osgi/service/MailboxService.java deleted file mode 100644 index 732a05e6a85..00000000000 --- a/osgi-tests/src/main/java/com/datastax/oss/driver/api/osgi/service/MailboxService.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.osgi.service; - -public interface MailboxService { - - /** - * Retrieve all messages for a given recipient. - * - * @param recipient User whose mailbox is being read. - * @return All messages in the mailbox. - */ - Iterable getMessages(String recipient) throws MailboxException; - - /** - * Stores the given message in the appropriate mailbox. - * - * @param message Message to send. - */ - void sendMessage(MailboxMessage message) throws MailboxException; - - /** - * Deletes all mail for the given recipient. - * - * @param recipient User whose mailbox will be cleared. - */ - void clearMailbox(String recipient) throws MailboxException; -} diff --git a/osgi-tests/src/main/java/com/datastax/oss/driver/api/osgi/service/geo/GeoMailboxMessage.java b/osgi-tests/src/main/java/com/datastax/oss/driver/api/osgi/service/geo/GeoMailboxMessage.java deleted file mode 100644 index 9b0b52cfa09..00000000000 --- a/osgi-tests/src/main/java/com/datastax/oss/driver/api/osgi/service/geo/GeoMailboxMessage.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.osgi.service.geo; - -import com.datastax.dse.driver.api.core.data.geometry.Point; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.mapper.annotations.ClusteringColumn; -import com.datastax.oss.driver.api.mapper.annotations.CqlName; -import com.datastax.oss.driver.api.mapper.annotations.Entity; -import com.datastax.oss.driver.api.mapper.annotations.PartitionKey; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Objects; - -@Entity -@CqlName("messages_by_location") -public class GeoMailboxMessage { - - public static final CqlIdentifier MAILBOX_TABLE = - CqlIdentifier.fromInternal("messages_by_location"); - - @PartitionKey private String recipient; - - @ClusteringColumn private Point location; - - private String sender; - - private String body; - - public GeoMailboxMessage() {} - - public GeoMailboxMessage( - @NonNull String recipient, - @NonNull Point location, - @NonNull String sender, - @NonNull String body) { - this.location = location; - this.recipient = recipient; - this.sender = sender; - this.body = body; - } - - public String getRecipient() { - return recipient; - } - - public void setRecipient(String recipient) { - this.recipient = recipient; - } - - public Point getLocation() { - return location; - } - - public void setLocation(Point location) { - this.location = location; - } - - public String getSender() { - return sender; - } - - public void setSender(String sender) { - this.sender = sender; - } - - public String getBody() { - return body; - } - - public void setBody(String body) { - this.body = body; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof GeoMailboxMessage)) { - return false; - } - GeoMailboxMessage that = (GeoMailboxMessage) o; - return Objects.equals(recipient, that.recipient) - && Objects.equals(location, that.location) - && Objects.equals(sender, that.sender) - && Objects.equals(body, that.body); - } - - @Override - public int hashCode() { - return Objects.hash(recipient, location, sender, body); - } -} diff --git a/osgi-tests/src/main/java/com/datastax/oss/driver/api/osgi/service/geo/GeoMailboxService.java b/osgi-tests/src/main/java/com/datastax/oss/driver/api/osgi/service/geo/GeoMailboxService.java deleted file mode 100644 index dcb7963ccc3..00000000000 --- a/osgi-tests/src/main/java/com/datastax/oss/driver/api/osgi/service/geo/GeoMailboxService.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.osgi.service.geo; - -import com.datastax.oss.driver.api.osgi.service.MailboxException; -import com.datastax.oss.driver.api.osgi.service.MailboxService; - -public interface GeoMailboxService extends MailboxService { - - void sendGeoMessage(GeoMailboxMessage message) throws MailboxException; - - Iterable getGeoMessages(String recipient) throws MailboxException; - - void clearGeoMailbox(String recipient) throws MailboxException; -} diff --git a/osgi-tests/src/main/java/com/datastax/oss/driver/api/osgi/service/graph/GraphMailboxService.java b/osgi-tests/src/main/java/com/datastax/oss/driver/api/osgi/service/graph/GraphMailboxService.java deleted file mode 100644 index 65999957066..00000000000 --- a/osgi-tests/src/main/java/com/datastax/oss/driver/api/osgi/service/graph/GraphMailboxService.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.osgi.service.graph; - -import com.datastax.oss.driver.api.osgi.service.MailboxException; -import com.datastax.oss.driver.api.osgi.service.MailboxMessage; -import com.datastax.oss.driver.api.osgi.service.MailboxService; - -public interface GraphMailboxService extends MailboxService { - - void sendGraphMessage(MailboxMessage message) throws MailboxException; - - Iterable getGraphMessages(String recipient) throws MailboxException; -} diff --git a/osgi-tests/src/main/java/com/datastax/oss/driver/api/osgi/service/reactive/ReactiveMailboxService.java b/osgi-tests/src/main/java/com/datastax/oss/driver/api/osgi/service/reactive/ReactiveMailboxService.java deleted file mode 100644 index 226db1b06d9..00000000000 --- a/osgi-tests/src/main/java/com/datastax/oss/driver/api/osgi/service/reactive/ReactiveMailboxService.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.osgi.service.reactive; - -import com.datastax.dse.driver.api.mapper.reactive.MappedReactiveResultSet; -import com.datastax.oss.driver.api.osgi.service.MailboxException; -import com.datastax.oss.driver.api.osgi.service.MailboxMessage; -import com.datastax.oss.driver.api.osgi.service.MailboxService; - -public interface ReactiveMailboxService extends MailboxService { - - MappedReactiveResultSet getMessagesReactive(String recipient) - throws MailboxException; -} diff --git a/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/MailboxActivator.java b/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/MailboxActivator.java deleted file mode 100644 index 8dff11520af..00000000000 --- a/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/MailboxActivator.java +++ /dev/null @@ -1,175 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.osgi; - -import com.datastax.dse.driver.api.core.config.DseDriverOption; -import com.datastax.dse.driver.internal.core.graph.GraphProtocol; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.CqlSessionBuilder; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.config.ProgrammaticDriverConfigLoaderBuilder; -import com.datastax.oss.driver.api.osgi.service.MailboxService; -import com.datastax.oss.driver.internal.osgi.service.MailboxServiceImpl; -import com.datastax.oss.driver.internal.osgi.service.geo.GeoMailboxServiceImpl; -import com.datastax.oss.driver.internal.osgi.service.graph.GraphMailboxServiceImpl; -import com.datastax.oss.driver.internal.osgi.service.reactive.ReactiveMailboxServiceImpl; -import java.net.InetSocketAddress; -import java.util.Dictionary; -import java.util.Hashtable; -import java.util.List; -import java.util.stream.Collectors; -import java.util.stream.Stream; -import org.osgi.framework.Bundle; -import org.osgi.framework.BundleActivator; -import org.osgi.framework.BundleContext; -import org.osgi.framework.wiring.BundleWiring; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class MailboxActivator implements BundleActivator { - - private static final Logger LOGGER = LoggerFactory.getLogger(MailboxActivator.class); - - private CqlSession session; - private CqlIdentifier keyspace; - private String graphName; - - @Override - public void start(BundleContext context) { - buildSession(context); - registerService(context); - } - - private void buildSession(BundleContext context) { - - Bundle bundle = context.getBundle(); - BundleWiring bundleWiring = bundle.adapt(BundleWiring.class); - ClassLoader classLoader = bundleWiring.getClassLoader(); - - LOGGER.info("Application class loader: {}", classLoader); - - // Use the application bundle class loader to load classes by reflection when - // they are located in the application bundle. This is not strictly required - // as the driver has a "Dynamic-Import:*" directive which makes it capable - // of loading classes outside its bundle. - CqlSessionBuilder builder = CqlSession.builder().withClassLoader(classLoader); - - // Use the application bundle class loader to load configuration resources located - // in the application bundle. This is required, otherwise these resources will - // not be found. - ProgrammaticDriverConfigLoaderBuilder configLoaderBuilder = - DriverConfigLoader.programmaticBuilder(classLoader); - - String contactPointsStr = context.getProperty("cassandra.contactpoints"); - if (contactPointsStr == null) { - contactPointsStr = "127.0.0.1"; - } - LOGGER.info("Contact points: {}", contactPointsStr); - - String portStr = context.getProperty("cassandra.port"); - if (portStr == null) { - portStr = "9042"; - } - LOGGER.info("Port: {}", portStr); - int port = Integer.parseInt(portStr); - - List contactPoints = - Stream.of(contactPointsStr.split(",")) - .map((String host) -> InetSocketAddress.createUnresolved(host, port)) - .collect(Collectors.toList()); - builder.addContactPoints(contactPoints); - - String keyspaceStr = context.getProperty("cassandra.keyspace"); - if (keyspaceStr == null) { - keyspaceStr = "mailbox"; - } - LOGGER.info("Keyspace: {}", keyspaceStr); - keyspace = CqlIdentifier.fromCql(keyspaceStr); - - String lbp = context.getProperty("cassandra.lbp"); - if (lbp != null) { - LOGGER.info("Custom LBP: " + lbp); - configLoaderBuilder.withString(DefaultDriverOption.LOAD_BALANCING_POLICY_CLASS, lbp); - } else { - LOGGER.info("Custom LBP: NO"); - } - - String datacenter = context.getProperty("cassandra.datacenter"); - if (datacenter != null) { - LOGGER.info("Custom datacenter: " + datacenter); - configLoaderBuilder.withString( - DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER, datacenter); - } else { - LOGGER.info("Custom datacenter: NO"); - } - - String compression = context.getProperty("cassandra.compression"); - if (compression != null) { - LOGGER.info("Compression: {}", compression); - configLoaderBuilder.withString(DefaultDriverOption.PROTOCOL_COMPRESSION, compression); - } else { - LOGGER.info("Compression: NONE"); - } - - graphName = context.getProperty("cassandra.graph.name"); - if (graphName != null) { - LOGGER.info("Graph name: {}", graphName); - configLoaderBuilder.withString(DseDriverOption.GRAPH_NAME, graphName); - configLoaderBuilder.withString( - DseDriverOption.GRAPH_SUB_PROTOCOL, GraphProtocol.GRAPH_BINARY_1_0.toInternalCode()); - } else { - LOGGER.info("Graph: NONE"); - } - - builder.withConfigLoader(configLoaderBuilder.build()); - - LOGGER.info("Initializing session"); - session = builder.build(); - LOGGER.info("Session initialized"); - } - - private void registerService(BundleContext context) { - MailboxServiceImpl mailbox; - if ("true".equalsIgnoreCase(context.getProperty("cassandra.reactive"))) { - mailbox = new ReactiveMailboxServiceImpl(session, keyspace); - } else if ("true".equalsIgnoreCase(context.getProperty("cassandra.geo"))) { - mailbox = new GeoMailboxServiceImpl(session, keyspace); - } else if ("true".equalsIgnoreCase(context.getProperty("cassandra.graph"))) { - mailbox = new GraphMailboxServiceImpl(session, keyspace, graphName); - } else { - mailbox = new MailboxServiceImpl(session, keyspace); - } - mailbox.init(); - @SuppressWarnings("JdkObsolete") - Dictionary properties = new Hashtable<>(); - context.registerService(MailboxService.class.getName(), mailbox, properties); - LOGGER.info("Mailbox Service successfully initialized"); - } - - @Override - public void stop(BundleContext context) { - if (session != null) { - LOGGER.info("Closing session"); - session.close(); - session = null; - LOGGER.info("Session closed"); - } - } -} diff --git a/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/MailboxMapper.java b/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/MailboxMapper.java deleted file mode 100644 index a67df807e2f..00000000000 --- a/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/MailboxMapper.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.osgi.service; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; -import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; -import com.datastax.oss.driver.api.mapper.annotations.Mapper; - -@Mapper -public interface MailboxMapper { - - @DaoFactory - MailboxMessageDao mailboxMessageDao(@DaoKeyspace CqlIdentifier keyspace); -} diff --git a/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/MailboxMessageDao.java b/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/MailboxMessageDao.java deleted file mode 100644 index 9f6363d90a4..00000000000 --- a/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/MailboxMessageDao.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.osgi.service; - -import com.datastax.oss.driver.api.core.PagingIterable; -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.DefaultNullSavingStrategy; -import com.datastax.oss.driver.api.mapper.annotations.Insert; -import com.datastax.oss.driver.api.mapper.annotations.Select; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import com.datastax.oss.driver.api.osgi.service.MailboxMessage; - -@Dao -@DefaultNullSavingStrategy(NullSavingStrategy.SET_TO_NULL) -public interface MailboxMessageDao { - - @Insert - void save(MailboxMessage message); - - @Select - PagingIterable findByRecipient(String recipient); -} diff --git a/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/MailboxServiceImpl.java b/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/MailboxServiceImpl.java deleted file mode 100644 index 1da97d7d611..00000000000 --- a/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/MailboxServiceImpl.java +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.osgi.service; - -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.bindMarker; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.deleteFrom; -import static com.datastax.oss.driver.api.querybuilder.relation.Relation.column; - -import com.codahale.metrics.Timer; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; -import com.datastax.oss.driver.api.core.metrics.Metrics; -import com.datastax.oss.driver.api.osgi.service.MailboxException; -import com.datastax.oss.driver.api.osgi.service.MailboxMessage; -import com.datastax.oss.driver.api.osgi.service.MailboxService; -import java.util.Optional; -import net.jcip.annotations.GuardedBy; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class MailboxServiceImpl implements MailboxService { - - private static final Logger LOGGER = LoggerFactory.getLogger(MailboxServiceImpl.class); - - protected final CqlSession session; - protected final CqlIdentifier keyspace; - - @GuardedBy("this") - protected boolean initialized = false; - - private PreparedStatement deleteStatement; - - protected MailboxMessageDao dao; - - public MailboxServiceImpl(CqlSession session, CqlIdentifier keyspace) { - this.session = session; - this.keyspace = keyspace; - } - - public synchronized void init() { - if (initialized) { - return; - } - createSchema(); - prepareStatements(); - createDaos(); - printMetrics(); - initialized = true; - } - - protected void createSchema() { - session.execute("DROP KEYSPACE IF EXISTS test_osgi"); - session.execute( - "CREATE KEYSPACE IF NOT EXISTS test_osgi with replication = {'class': 'SimpleStrategy', 'replication_factor' : 1}"); - session.execute( - "CREATE TABLE " - + keyspace - + "." - + MailboxMessage.MAILBOX_TABLE - + " (" - + "recipient text," - + "timestamp timestamp," - + "sender text," - + "body text," - + "PRIMARY KEY (recipient, timestamp))"); - } - - protected void prepareStatements() { - deleteStatement = - session.prepare( - deleteFrom(keyspace, MailboxMessage.MAILBOX_TABLE) - .where(column("recipient").isEqualTo(bindMarker())) - .build()); - } - - protected void createDaos() { - MailboxMapper mapper = new MailboxMapperBuilder(session).build(); - dao = mapper.mailboxMessageDao(keyspace); - } - - protected void printMetrics() { - // Exercise metrics - if (session.getMetrics().isPresent()) { - Metrics metrics = session.getMetrics().get(); - Optional cqlRequests = metrics.getSessionMetric(DefaultSessionMetric.CQL_REQUESTS); - cqlRequests.ifPresent( - counter -> LOGGER.info("Number of CQL requests: {}", counter.getCount())); - } - } - - @Override - public Iterable getMessages(String recipient) throws MailboxException { - try { - return dao.findByRecipient(recipient); - } catch (Exception e) { - throw new MailboxException(e); - } - } - - @Override - public void sendMessage(MailboxMessage message) throws MailboxException { - try { - dao.save(message); - } catch (Exception e) { - throw new MailboxException(e); - } - } - - @Override - public void clearMailbox(String recipient) throws MailboxException { - try { - BoundStatement statement = deleteStatement.bind(recipient); - session.execute(statement); - } catch (Exception e) { - throw new MailboxException(e); - } - } -} diff --git a/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/geo/GeoMailboxMapper.java b/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/geo/GeoMailboxMapper.java deleted file mode 100644 index 3beb990c1c9..00000000000 --- a/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/geo/GeoMailboxMapper.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.osgi.service.geo; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; -import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; -import com.datastax.oss.driver.api.mapper.annotations.Mapper; - -@Mapper -public interface GeoMailboxMapper { - - @DaoFactory - GeoMailboxMessageDao mailboxMessageDao(@DaoKeyspace CqlIdentifier keyspace); -} diff --git a/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/geo/GeoMailboxMessageDao.java b/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/geo/GeoMailboxMessageDao.java deleted file mode 100644 index 1ea255fbe1d..00000000000 --- a/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/geo/GeoMailboxMessageDao.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.osgi.service.geo; - -import com.datastax.oss.driver.api.core.PagingIterable; -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.DefaultNullSavingStrategy; -import com.datastax.oss.driver.api.mapper.annotations.Insert; -import com.datastax.oss.driver.api.mapper.annotations.Select; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import com.datastax.oss.driver.api.osgi.service.geo.GeoMailboxMessage; -import com.datastax.oss.driver.internal.osgi.service.MailboxMessageDao; - -@Dao -@DefaultNullSavingStrategy(NullSavingStrategy.SET_TO_NULL) -public interface GeoMailboxMessageDao extends MailboxMessageDao { - - @Insert - void save(GeoMailboxMessage message); - - @Select - PagingIterable findGeoByRecipient(String recipient); -} diff --git a/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/geo/GeoMailboxServiceImpl.java b/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/geo/GeoMailboxServiceImpl.java deleted file mode 100644 index 415ffaa35f4..00000000000 --- a/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/geo/GeoMailboxServiceImpl.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.osgi.service.geo; - -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.bindMarker; -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.deleteFrom; -import static com.datastax.oss.driver.api.querybuilder.relation.Relation.column; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.BoundStatement; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.osgi.service.MailboxException; -import com.datastax.oss.driver.api.osgi.service.geo.GeoMailboxMessage; -import com.datastax.oss.driver.api.osgi.service.geo.GeoMailboxService; -import com.datastax.oss.driver.internal.osgi.service.MailboxServiceImpl; - -public class GeoMailboxServiceImpl extends MailboxServiceImpl implements GeoMailboxService { - - private PreparedStatement deleteGeoStatement; - private GeoMailboxMessageDao geoDao; - - public GeoMailboxServiceImpl(CqlSession session, CqlIdentifier keyspace) { - super(session, keyspace); - } - - @Override - protected void createSchema() { - super.createSchema(); - session.execute( - "CREATE TABLE " - + keyspace - + "." - + GeoMailboxMessage.MAILBOX_TABLE - + " (" - + "recipient text," - + "location 'PointType'," - + "sender text," - + "body text," - + "PRIMARY KEY (recipient, location))"); - } - - @Override - protected void prepareStatements() { - super.prepareStatements(); - deleteGeoStatement = - session.prepare( - deleteFrom(keyspace, GeoMailboxMessage.MAILBOX_TABLE) - .where(column("recipient").isEqualTo(bindMarker())) - .build()); - } - - @Override - protected void createDaos() { - super.createDaos(); - GeoMailboxMapper mapper = new GeoMailboxMapperBuilder(session).build(); - geoDao = mapper.mailboxMessageDao(keyspace); - } - - @Override - public void sendGeoMessage(GeoMailboxMessage message) throws MailboxException { - try { - geoDao.save(message); - } catch (Exception e) { - throw new MailboxException(e); - } - } - - @Override - public Iterable getGeoMessages(String recipient) throws MailboxException { - try { - return geoDao.findGeoByRecipient(recipient); - } catch (Exception e) { - throw new MailboxException(e); - } - } - - @Override - public void clearGeoMailbox(String recipient) throws MailboxException { - try { - BoundStatement statement = deleteGeoStatement.bind(recipient); - session.execute(statement); - } catch (Exception e) { - throw new MailboxException(e); - } - } -} diff --git a/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/graph/GraphMailboxServiceImpl.java b/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/graph/GraphMailboxServiceImpl.java deleted file mode 100644 index b4637a27258..00000000000 --- a/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/graph/GraphMailboxServiceImpl.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.osgi.service.graph; - -import static com.datastax.dse.driver.api.core.graph.DseGraph.g; -import static org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.__.unfold; - -import com.datastax.dse.driver.api.core.graph.FluentGraphStatement; -import com.datastax.dse.driver.api.core.graph.GraphNode; -import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.osgi.service.MailboxException; -import com.datastax.oss.driver.api.osgi.service.MailboxMessage; -import com.datastax.oss.driver.api.osgi.service.graph.GraphMailboxService; -import com.datastax.oss.driver.internal.osgi.service.MailboxServiceImpl; -import java.time.Instant; -import java.util.stream.Collectors; - -public class GraphMailboxServiceImpl extends MailboxServiceImpl implements GraphMailboxService { - - private final String graphName; - - public GraphMailboxServiceImpl(CqlSession session, CqlIdentifier keyspace, String graphName) { - super(session, keyspace); - this.graphName = graphName; - } - - @Override - protected void createSchema() { - super.createSchema(); - session.execute( - ScriptGraphStatement.newInstance( - String.format("system.graph('%s').ifExists().drop()", graphName)) - .setSystemQuery(true), - ScriptGraphStatement.SYNC); - session.execute( - ScriptGraphStatement.newInstance( - String.format("system.graph('%s').ifNotExists().coreEngine().create()", graphName)) - .setSystemQuery(true), - ScriptGraphStatement.SYNC); - session.execute( - ScriptGraphStatement.newInstance( - "schema.vertexLabel('message')" - + ".partitionBy('recipient', Text)" - + ".clusterBy('timestamp', Timestamp)" - + ".property('sender', Text)" - + ".property('body', Text)" - + ".create();")); - } - - @Override - public Iterable getGraphMessages(String recipient) throws MailboxException { - FluentGraphStatement statement = - FluentGraphStatement.newInstance( - g.V().hasLabel("message").has("recipient", recipient).valueMap().by(unfold())); - try { - return session.execute(statement).all().stream() - .map(GraphNode::asMap) - .map( - vertex -> { - Instant timestamp = (Instant) vertex.get("timestamp"); - String sender = (String) vertex.get("sender"); - String body = (String) vertex.get("body"); - return new MailboxMessage(recipient, timestamp, sender, body); - }) - .collect(Collectors.toList()); - } catch (Exception e) { - throw new MailboxException(e); - } - } - - @Override - public void sendGraphMessage(MailboxMessage message) throws MailboxException { - FluentGraphStatement insertVertex = - FluentGraphStatement.newInstance( - g.addV("message") - .property("recipient", message.getRecipient()) - .property("timestamp", message.getTimestamp()) - .property("sender", message.getSender()) - .property("body", message.getBody())); - try { - session.execute(insertVertex); - } catch (Exception e) { - throw new MailboxException(e); - } - } -} diff --git a/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/reactive/ReactiveMailboxMapper.java b/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/reactive/ReactiveMailboxMapper.java deleted file mode 100644 index 7a1678c1ac8..00000000000 --- a/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/reactive/ReactiveMailboxMapper.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.osgi.service.reactive; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; -import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; -import com.datastax.oss.driver.api.mapper.annotations.Mapper; - -@Mapper -public interface ReactiveMailboxMapper { - - @DaoFactory - ReactiveMailboxMessageDao mailboxMessageDao(@DaoKeyspace CqlIdentifier keyspace); -} diff --git a/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/reactive/ReactiveMailboxMessageDao.java b/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/reactive/ReactiveMailboxMessageDao.java deleted file mode 100644 index fe6f34a839c..00000000000 --- a/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/reactive/ReactiveMailboxMessageDao.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.osgi.service.reactive; - -import com.datastax.dse.driver.api.mapper.reactive.MappedReactiveResultSet; -import com.datastax.oss.driver.api.mapper.annotations.Dao; -import com.datastax.oss.driver.api.mapper.annotations.DefaultNullSavingStrategy; -import com.datastax.oss.driver.api.mapper.annotations.Select; -import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import com.datastax.oss.driver.api.osgi.service.MailboxMessage; -import com.datastax.oss.driver.internal.osgi.service.MailboxMessageDao; - -@Dao -@DefaultNullSavingStrategy(NullSavingStrategy.SET_TO_NULL) -public interface ReactiveMailboxMessageDao extends MailboxMessageDao { - - @Select - MappedReactiveResultSet findByRecipientReactive(String recipient); -} diff --git a/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/reactive/ReactiveMailboxServiceImpl.java b/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/reactive/ReactiveMailboxServiceImpl.java deleted file mode 100644 index 5333524e884..00000000000 --- a/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/reactive/ReactiveMailboxServiceImpl.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.osgi.service.reactive; - -import com.datastax.dse.driver.api.mapper.reactive.MappedReactiveResultSet; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.osgi.service.MailboxException; -import com.datastax.oss.driver.api.osgi.service.MailboxMessage; -import com.datastax.oss.driver.api.osgi.service.reactive.ReactiveMailboxService; -import com.datastax.oss.driver.internal.osgi.service.MailboxServiceImpl; - -public class ReactiveMailboxServiceImpl extends MailboxServiceImpl - implements ReactiveMailboxService { - - private ReactiveMailboxMessageDao reactiveDao; - - public ReactiveMailboxServiceImpl(CqlSession session, CqlIdentifier keyspace) { - super(session, keyspace); - } - - @Override - protected void createDaos() { - super.createDaos(); - ReactiveMailboxMapper mapper = new ReactiveMailboxMapperBuilder(session).build(); - reactiveDao = mapper.mailboxMessageDao(keyspace); - } - - @Override - public MappedReactiveResultSet getMessagesReactive(String recipient) - throws MailboxException { - try { - return reactiveDao.findByRecipientReactive(recipient); - } catch (Exception e) { - throw new MailboxException(e); - } - } -} diff --git a/osgi-tests/src/main/resources/application.conf b/osgi-tests/src/main/resources/application.conf deleted file mode 100644 index 0c3e8e76c98..00000000000 --- a/osgi-tests/src/main/resources/application.conf +++ /dev/null @@ -1,59 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# Configuration overrides for integration tests -datastax-java-driver { - basic { - load-balancing-policy.class = DcInferringLoadBalancingPolicy - request.timeout = 10 seconds - graph.timeout = 10 seconds - } - advanced { - retry-policy.class = com.datastax.oss.driver.api.osgi.CustomRetryPolicy - connection { - init-query-timeout = 5 seconds - set-keyspace-timeout = 5 seconds - } - heartbeat.timeout = 5 seconds - control-connection.timeout = 5 seconds - request { - trace.interval = 1 second - warn-if-set-keyspace = false - } - graph { - name = "demo" - } - continuous-paging.timeout { - first-page = 10 seconds - other-pages = 10 seconds - } - metrics { - session.enabled = [cql-requests] - // Raise histogram bounds because the tests execute DDL queries with a higher timeout - session.cql_requests.highest_latency = 30 seconds - } - // adjust quiet period to 0 seconds to speed up tests - netty { - io-group { - shutdown {quiet-period = 0, timeout = 15, unit = SECONDS} - } - admin-group { - shutdown {quiet-period = 0, timeout = 15, unit = SECONDS} - } - } - } -} diff --git a/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/OsgiCustomLoadBalancingPolicyIT.java b/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/OsgiCustomLoadBalancingPolicyIT.java deleted file mode 100644 index 99bd7294934..00000000000 --- a/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/OsgiCustomLoadBalancingPolicyIT.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.osgi; - -import com.datastax.oss.driver.api.osgi.service.MailboxService; -import com.datastax.oss.driver.api.testinfra.loadbalancing.SortingLoadBalancingPolicy; -import com.datastax.oss.driver.internal.osgi.checks.DefaultServiceChecks; -import com.datastax.oss.driver.internal.osgi.support.BundleOptions; -import com.datastax.oss.driver.internal.osgi.support.CcmExamReactorFactory; -import com.datastax.oss.driver.internal.osgi.support.CcmPaxExam; -import javax.inject.Inject; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.ops4j.pax.exam.Configuration; -import org.ops4j.pax.exam.CoreOptions; -import org.ops4j.pax.exam.Option; -import org.ops4j.pax.exam.spi.reactors.ExamReactorStrategy; - -/** - * Test that uses a policy from a separate bundle from the core driver to ensure that the driver is - * able to load that policy via Reflection. To support this, the driver uses - * DynamicImport-Package: *. - */ -@RunWith(CcmPaxExam.class) -@ExamReactorStrategy(CcmExamReactorFactory.class) -public class OsgiCustomLoadBalancingPolicyIT { - - @Inject MailboxService service; - - @Configuration - public Option[] config() { - return CoreOptions.options( - BundleOptions.applicationBundle(), - BundleOptions.driverCoreBundle(), - BundleOptions.driverQueryBuilderBundle(), - BundleOptions.driverMapperRuntimeBundle(), - BundleOptions.commonBundles(), - BundleOptions.nettyBundles(), - BundleOptions.jacksonBundles(), - BundleOptions.testBundles(), - CoreOptions.systemProperty("cassandra.lbp") - // This LBP resides in test-infra bundle and will be loaded the driver - // class loader, thanks to the "Dynamic-Import:*" directive - .value(SortingLoadBalancingPolicy.class.getName())); - } - - @Test - public void test_custom_lbp() throws Exception { - DefaultServiceChecks.checkService(service); - } -} diff --git a/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/OsgiDefaultIT.java b/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/OsgiDefaultIT.java deleted file mode 100644 index a4dec25d96f..00000000000 --- a/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/OsgiDefaultIT.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.osgi; - -import com.datastax.oss.driver.api.osgi.service.MailboxService; -import com.datastax.oss.driver.internal.osgi.checks.DefaultServiceChecks; -import com.datastax.oss.driver.internal.osgi.support.BundleOptions; -import com.datastax.oss.driver.internal.osgi.support.CcmExamReactorFactory; -import com.datastax.oss.driver.internal.osgi.support.CcmPaxExam; -import javax.inject.Inject; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.ops4j.pax.exam.Configuration; -import org.ops4j.pax.exam.CoreOptions; -import org.ops4j.pax.exam.Option; -import org.ops4j.pax.exam.spi.reactors.ExamReactorStrategy; - -@RunWith(CcmPaxExam.class) -@ExamReactorStrategy(CcmExamReactorFactory.class) -public class OsgiDefaultIT { - - @Inject MailboxService service; - - @Configuration - public Option[] config() { - // this configuration purposely excludes bundles whose resolution should be optional: - // ESRI, Reactive Streams and Tinkerpop. This allows to validate that the driver can still - // work properly in an OSGi container as long as the missing packages are not accessed. - return CoreOptions.options( - BundleOptions.applicationBundle(), - BundleOptions.driverCoreBundle(), - BundleOptions.driverQueryBuilderBundle(), - BundleOptions.driverMapperRuntimeBundle(), - BundleOptions.commonBundles(), - BundleOptions.nettyBundles(), - BundleOptions.jacksonBundles(), - BundleOptions.testBundles()); - } - - @Test - public void test_default() throws Exception { - DefaultServiceChecks.checkService(service); - } -} diff --git a/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/OsgiGeoTypesIT.java b/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/OsgiGeoTypesIT.java deleted file mode 100644 index c5ca962a66b..00000000000 --- a/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/OsgiGeoTypesIT.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.osgi; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.osgi.service.MailboxService; -import com.datastax.oss.driver.api.osgi.service.geo.GeoMailboxService; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.internal.osgi.checks.DefaultServiceChecks; -import com.datastax.oss.driver.internal.osgi.checks.GeoServiceChecks; -import com.datastax.oss.driver.internal.osgi.support.BundleOptions; -import com.datastax.oss.driver.internal.osgi.support.CcmExamReactorFactory; -import com.datastax.oss.driver.internal.osgi.support.CcmPaxExam; -import javax.inject.Inject; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.ops4j.pax.exam.Configuration; -import org.ops4j.pax.exam.CoreOptions; -import org.ops4j.pax.exam.Option; -import org.ops4j.pax.exam.spi.reactors.ExamReactorStrategy; - -@RunWith(CcmPaxExam.class) -@ExamReactorStrategy(CcmExamReactorFactory.class) -@BackendRequirement( - type = BackendType.DSE, - minInclusive = "5.0", - description = "Requires geo types") -public class OsgiGeoTypesIT { - - @Inject MailboxService service; - - @Configuration - public Option[] config() { - return CoreOptions.options( - BundleOptions.applicationBundle(), - BundleOptions.driverCoreBundle(), - BundleOptions.driverQueryBuilderBundle(), - BundleOptions.driverMapperRuntimeBundle(), - BundleOptions.commonBundles(), - BundleOptions.nettyBundles(), - BundleOptions.jacksonBundles(), - BundleOptions.esriBundles(), - BundleOptions.testBundles()); - } - - @Test - public void test_geo_types() throws Exception { - DefaultServiceChecks.checkService(service); - assertThat(service).isInstanceOf(GeoMailboxService.class); - GeoServiceChecks.checkServiceGeo((GeoMailboxService) service); - } -} diff --git a/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/OsgiGraphIT.java b/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/OsgiGraphIT.java deleted file mode 100644 index be6997b9d02..00000000000 --- a/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/OsgiGraphIT.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.osgi; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.osgi.service.MailboxService; -import com.datastax.oss.driver.api.osgi.service.graph.GraphMailboxService; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.internal.osgi.checks.DefaultServiceChecks; -import com.datastax.oss.driver.internal.osgi.checks.GraphServiceChecks; -import com.datastax.oss.driver.internal.osgi.support.BundleOptions; -import com.datastax.oss.driver.internal.osgi.support.CcmExamReactorFactory; -import com.datastax.oss.driver.internal.osgi.support.CcmPaxExam; -import javax.inject.Inject; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.ops4j.pax.exam.Configuration; -import org.ops4j.pax.exam.CoreOptions; -import org.ops4j.pax.exam.Option; -import org.ops4j.pax.exam.spi.reactors.ExamReactorStrategy; - -@RunWith(CcmPaxExam.class) -@ExamReactorStrategy(CcmExamReactorFactory.class) -@BackendRequirement( - type = BackendType.DSE, - minInclusive = "6.8", - description = "Requires Core Graph") -public class OsgiGraphIT { - - @Inject MailboxService service; - - @Configuration - public Option[] config() { - return CoreOptions.options( - BundleOptions.applicationBundle(), - BundleOptions.driverCoreBundle(), - BundleOptions.driverQueryBuilderBundle(), - BundleOptions.driverMapperRuntimeBundle(), - BundleOptions.commonBundles(), - BundleOptions.nettyBundles(), - BundleOptions.jacksonBundles(), - BundleOptions.tinkerpopBundles(), - BundleOptions.testBundles()); - } - - @Test - public void test_graph() throws Exception { - DefaultServiceChecks.checkService(service); - assertThat(service).isInstanceOf(GraphMailboxService.class); - GraphServiceChecks.checkGraphService((GraphMailboxService) service); - } -} diff --git a/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/OsgiLz4IT.java b/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/OsgiLz4IT.java deleted file mode 100644 index e8f470d3fdc..00000000000 --- a/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/OsgiLz4IT.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.osgi; - -import com.datastax.oss.driver.api.osgi.service.MailboxService; -import com.datastax.oss.driver.internal.osgi.checks.DefaultServiceChecks; -import com.datastax.oss.driver.internal.osgi.support.BundleOptions; -import com.datastax.oss.driver.internal.osgi.support.CcmExamReactorFactory; -import com.datastax.oss.driver.internal.osgi.support.CcmPaxExam; -import javax.inject.Inject; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.ops4j.pax.exam.Configuration; -import org.ops4j.pax.exam.CoreOptions; -import org.ops4j.pax.exam.Option; -import org.ops4j.pax.exam.spi.reactors.ExamReactorStrategy; - -@RunWith(CcmPaxExam.class) -@ExamReactorStrategy(CcmExamReactorFactory.class) -public class OsgiLz4IT { - - @Inject MailboxService service; - - @Configuration - public Option[] config() { - return CoreOptions.options( - BundleOptions.applicationBundle(), - BundleOptions.driverCoreBundle(), - BundleOptions.driverQueryBuilderBundle(), - BundleOptions.driverMapperRuntimeBundle(), - BundleOptions.commonBundles(), - BundleOptions.nettyBundles(), - BundleOptions.jacksonBundles(), - BundleOptions.lz4Bundle(), - BundleOptions.testBundles()); - } - - @Test - public void test_lz4_compression() throws Exception { - DefaultServiceChecks.checkService(service); - } -} diff --git a/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/OsgiReactiveIT.java b/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/OsgiReactiveIT.java deleted file mode 100644 index 1710414b67d..00000000000 --- a/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/OsgiReactiveIT.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.osgi; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.osgi.service.MailboxService; -import com.datastax.oss.driver.api.osgi.service.reactive.ReactiveMailboxService; -import com.datastax.oss.driver.internal.osgi.checks.DefaultServiceChecks; -import com.datastax.oss.driver.internal.osgi.checks.ReactiveServiceChecks; -import com.datastax.oss.driver.internal.osgi.support.BundleOptions; -import com.datastax.oss.driver.internal.osgi.support.CcmExamReactorFactory; -import com.datastax.oss.driver.internal.osgi.support.CcmPaxExam; -import javax.inject.Inject; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.ops4j.pax.exam.Configuration; -import org.ops4j.pax.exam.CoreOptions; -import org.ops4j.pax.exam.Option; -import org.ops4j.pax.exam.spi.reactors.ExamReactorStrategy; - -@RunWith(CcmPaxExam.class) -@ExamReactorStrategy(CcmExamReactorFactory.class) -public class OsgiReactiveIT { - - @Inject MailboxService service; - - @Configuration - public Option[] config() { - return CoreOptions.options( - BundleOptions.applicationBundle(), - BundleOptions.driverCoreBundle(), - BundleOptions.driverQueryBuilderBundle(), - BundleOptions.driverMapperRuntimeBundle(), - BundleOptions.commonBundles(), - BundleOptions.nettyBundles(), - BundleOptions.jacksonBundles(), - BundleOptions.reactiveBundles(), - BundleOptions.testBundles()); - } - - @Test - public void test_reactive() throws Exception { - DefaultServiceChecks.checkService(service); - assertThat(service).isInstanceOf(ReactiveMailboxService.class); - ReactiveServiceChecks.checkServiceReactive((ReactiveMailboxService) service); - } -} diff --git a/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/OsgiShadedIT.java b/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/OsgiShadedIT.java deleted file mode 100644 index 780ed30874d..00000000000 --- a/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/OsgiShadedIT.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.osgi; - -import com.datastax.oss.driver.api.osgi.service.MailboxService; -import com.datastax.oss.driver.internal.osgi.checks.DefaultServiceChecks; -import com.datastax.oss.driver.internal.osgi.support.BundleOptions; -import com.datastax.oss.driver.internal.osgi.support.CcmExamReactorFactory; -import com.datastax.oss.driver.internal.osgi.support.CcmPaxExam; -import javax.inject.Inject; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.ops4j.pax.exam.Configuration; -import org.ops4j.pax.exam.CoreOptions; -import org.ops4j.pax.exam.Option; -import org.ops4j.pax.exam.spi.reactors.ExamReactorStrategy; - -@RunWith(CcmPaxExam.class) -@ExamReactorStrategy(CcmExamReactorFactory.class) -public class OsgiShadedIT { - - @Inject MailboxService service; - - @Configuration - public Option[] config() { - return CoreOptions.options( - BundleOptions.applicationBundle(), - BundleOptions.driverCoreShadedBundle(), - BundleOptions.driverQueryBuilderBundle(), - BundleOptions.driverMapperRuntimeBundle(), - BundleOptions.commonBundles(), - // Netty and Jackson are shaded - BundleOptions.testBundles()); - } - - @Test - public void test_shaded() throws Exception { - DefaultServiceChecks.checkService(service); - } -} diff --git a/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/OsgiSnappyIT.java b/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/OsgiSnappyIT.java deleted file mode 100644 index 37abceeed7a..00000000000 --- a/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/OsgiSnappyIT.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.osgi; - -import com.datastax.oss.driver.api.osgi.service.MailboxService; -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import com.datastax.oss.driver.internal.osgi.checks.DefaultServiceChecks; -import com.datastax.oss.driver.internal.osgi.support.BundleOptions; -import com.datastax.oss.driver.internal.osgi.support.CcmExamReactorFactory; -import com.datastax.oss.driver.internal.osgi.support.CcmPaxExam; -import javax.inject.Inject; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.ops4j.pax.exam.Configuration; -import org.ops4j.pax.exam.CoreOptions; -import org.ops4j.pax.exam.Option; -import org.ops4j.pax.exam.spi.reactors.ExamReactorStrategy; - -@RunWith(CcmPaxExam.class) -@ExamReactorStrategy(CcmExamReactorFactory.class) -@BackendRequirement(type = BackendType.CASSANDRA, maxExclusive = "4.0.0") -public class OsgiSnappyIT { - - @Inject MailboxService service; - - @Configuration - public Option[] config() { - return CoreOptions.options( - BundleOptions.applicationBundle(), - BundleOptions.driverCoreBundle(), - BundleOptions.driverQueryBuilderBundle(), - BundleOptions.driverMapperRuntimeBundle(), - BundleOptions.commonBundles(), - BundleOptions.nettyBundles(), - BundleOptions.jacksonBundles(), - BundleOptions.snappyBundle(), - BundleOptions.testBundles()); - } - - @Test - public void test_snappy_compression() throws Exception { - DefaultServiceChecks.checkService(service); - } -} diff --git a/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/checks/DefaultServiceChecks.java b/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/checks/DefaultServiceChecks.java deleted file mode 100644 index 90a6a2e4c8b..00000000000 --- a/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/checks/DefaultServiceChecks.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.osgi.checks; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.osgi.service.MailboxMessage; -import com.datastax.oss.driver.api.osgi.service.MailboxService; -import java.time.Instant; -import java.util.ArrayList; -import java.util.List; - -public class DefaultServiceChecks { - - /** - * Exercises an OSGi service provided by an OSGi bundle that depends on the driver. Ensures that - * queries can be made through the service with the current given configuration. - */ - public static void checkService(MailboxService service) throws Exception { - // Insert some data into mailbox for a particular user. - String recipient = "user@datastax.com"; - try { - List insertedMessages = new ArrayList<>(); - for (int i = 0; i < 30; i++) { - Instant timestamp = Instant.ofEpochMilli(i); - MailboxMessage message = new MailboxMessage(recipient, timestamp, "sender" + i, "body" + i); - insertedMessages.add(message); - service.sendMessage(message); - } - Iterable retrievedMessages = service.getMessages(recipient); - assertThat(retrievedMessages).containsExactlyElementsOf(insertedMessages); - } finally { - service.clearMailbox(recipient); - } - } -} diff --git a/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/checks/GeoServiceChecks.java b/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/checks/GeoServiceChecks.java deleted file mode 100644 index a0fb35e2df5..00000000000 --- a/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/checks/GeoServiceChecks.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.osgi.checks; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.dse.driver.api.core.data.geometry.Point; -import com.datastax.oss.driver.api.osgi.service.geo.GeoMailboxMessage; -import com.datastax.oss.driver.api.osgi.service.geo.GeoMailboxService; -import java.util.ArrayList; -import java.util.List; - -public class GeoServiceChecks { - - public static void checkServiceGeo(GeoMailboxService service) throws Exception { - // Insert some data into mailbox for a particular user. - String recipient = "user@datastax.com"; - try { - List insertedMessages = new ArrayList<>(); - for (int i = 0; i < 30; i++) { - Point location = Point.fromCoordinates(i, i); - GeoMailboxMessage message = - new GeoMailboxMessage(recipient, location, "sender" + i, "body" + i); - insertedMessages.add(message); - service.sendGeoMessage(message); - } - Iterable retrievedMessages = service.getGeoMessages(recipient); - assertThat(retrievedMessages).containsExactlyInAnyOrderElementsOf(insertedMessages); - } finally { - service.clearGeoMailbox(recipient); - } - } -} diff --git a/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/checks/GraphServiceChecks.java b/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/checks/GraphServiceChecks.java deleted file mode 100644 index 40bda10900b..00000000000 --- a/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/checks/GraphServiceChecks.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.osgi.checks; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.osgi.service.MailboxException; -import com.datastax.oss.driver.api.osgi.service.MailboxMessage; -import com.datastax.oss.driver.api.osgi.service.graph.GraphMailboxService; -import java.time.Instant; -import java.util.ArrayList; -import java.util.List; - -public class GraphServiceChecks { - - public static void checkGraphService(GraphMailboxService service) throws MailboxException { - // Insert some data into mailbox for a particular user. - String recipient = "user@datastax.com"; - List insertedMessages = new ArrayList<>(); - for (int i = 0; i < 30; i++) { - Instant timestamp = Instant.ofEpochMilli(i); - MailboxMessage message = new MailboxMessage(recipient, timestamp, "sender" + i, "body" + i); - insertedMessages.add(message); - service.sendGraphMessage(message); - } - Iterable retrievedMessages = service.getGraphMessages(recipient); - assertThat(retrievedMessages).containsExactlyElementsOf(insertedMessages); - } -} diff --git a/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/checks/ReactiveServiceChecks.java b/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/checks/ReactiveServiceChecks.java deleted file mode 100644 index fc4aa3448af..00000000000 --- a/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/checks/ReactiveServiceChecks.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.osgi.checks; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.dse.driver.api.mapper.reactive.MappedReactiveResultSet; -import com.datastax.oss.driver.api.osgi.service.MailboxException; -import com.datastax.oss.driver.api.osgi.service.MailboxMessage; -import com.datastax.oss.driver.api.osgi.service.reactive.ReactiveMailboxService; -import io.reactivex.Flowable; -import java.time.Instant; -import java.util.ArrayList; -import java.util.List; - -public class ReactiveServiceChecks { - - public static void checkServiceReactive(ReactiveMailboxService service) throws MailboxException { - // Insert some data into mailbox for a particular user. - String recipient = "user@datastax.com"; - try { - List insertedMessages = new ArrayList<>(); - for (int i = 0; i < 30; i++) { - Instant timestamp = Instant.ofEpochMilli(i); - MailboxMessage message = new MailboxMessage(recipient, timestamp, "sender" + i, "body" + i); - insertedMessages.add(message); - service.sendMessage(message); - } - MappedReactiveResultSet retrievedMessages = - service.getMessagesReactive(recipient); - List messageList = - Flowable.fromPublisher(retrievedMessages).toList().blockingGet(); - assertThat(messageList).containsExactlyElementsOf(insertedMessages); - } finally { - service.clearMailbox(recipient); - } - } -} diff --git a/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/support/BundleOptions.java b/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/support/BundleOptions.java deleted file mode 100644 index 63d11f8ee08..00000000000 --- a/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/support/BundleOptions.java +++ /dev/null @@ -1,224 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.osgi.support; - -import static org.ops4j.pax.exam.CoreOptions.bundle; -import static org.ops4j.pax.exam.CoreOptions.junitBundles; -import static org.ops4j.pax.exam.CoreOptions.mavenBundle; -import static org.ops4j.pax.exam.CoreOptions.options; -import static org.ops4j.pax.exam.CoreOptions.systemProperty; -import static org.ops4j.pax.exam.CoreOptions.systemTimeout; -import static org.ops4j.pax.exam.CoreOptions.vmOption; - -import org.ops4j.pax.exam.CoreOptions; -import org.ops4j.pax.exam.options.CompositeOption; -import org.ops4j.pax.exam.options.UrlProvisionOption; -import org.ops4j.pax.exam.options.WrappedUrlProvisionOption; - -public class BundleOptions { - - public static CompositeOption commonBundles() { - return () -> - options( - mavenBundle("org.apache.cassandra", "java-driver-guava-shaded").versionAsInProject(), - mavenBundle("io.dropwizard.metrics", "metrics-core").versionAsInProject(), - mavenBundle("org.slf4j", "slf4j-api").versionAsInProject(), - mavenBundle("org.hdrhistogram", "HdrHistogram").versionAsInProject(), - mavenBundle("com.typesafe", "config").versionAsInProject(), - mavenBundle("com.datastax.oss", "native-protocol").versionAsInProject(), - logbackBundles(), - debugOptions()); - } - - public static CompositeOption applicationBundle() { - return () -> - options( - systemProperty("cassandra.contactpoints").value("127.0.0.1"), - systemProperty("cassandra.port").value("9042"), - systemProperty("cassandra.keyspace").value("test_osgi"), - bundle("reference:file:target/classes")); - } - - public static UrlProvisionOption driverCoreBundle() { - return bundle("reference:file:../core/target/classes"); - } - - public static UrlProvisionOption driverCoreShadedBundle() { - return bundle("reference:file:../core-shaded/target/classes"); - } - - public static UrlProvisionOption driverQueryBuilderBundle() { - return bundle("reference:file:../query-builder/target/classes"); - } - - public static UrlProvisionOption driverMapperRuntimeBundle() { - return bundle("reference:file:../mapper-runtime/target/classes"); - } - - public static UrlProvisionOption driverTestInfraBundle() { - return bundle("reference:file:../test-infra/target/classes"); - } - - public static CompositeOption testBundles() { - return () -> - options( - driverTestInfraBundle(), - mavenBundle("org.apache.commons", "commons-exec").versionAsInProject(), - mavenBundle("org.assertj", "assertj-core").versionAsInProject(), - mavenBundle("org.awaitility", "awaitility").versionAsInProject(), - mavenBundle("org.hamcrest", "hamcrest").versionAsInProject(), - junitBundles()); - } - - public static CompositeOption nettyBundles() { - return () -> - options( - mavenBundle("io.netty", "netty-handler").versionAsInProject(), - mavenBundle("io.netty", "netty-buffer").versionAsInProject(), - mavenBundle("io.netty", "netty-codec").versionAsInProject(), - mavenBundle("io.netty", "netty-common").versionAsInProject(), - mavenBundle("io.netty", "netty-transport").versionAsInProject(), - mavenBundle("io.netty", "netty-transport-native-unix-common").versionAsInProject(), - mavenBundle("io.netty", "netty-resolver").versionAsInProject()); - } - - public static CompositeOption logbackBundles() { - return () -> - options( - mavenBundle("ch.qos.logback", "logback-classic").versionAsInProject(), - mavenBundle("ch.qos.logback", "logback-core").versionAsInProject(), - - // slf4j 2.x requires spifly in order to operate in an OSGi context - mavenBundle("org.apache.aries.spifly", "org.apache.aries.spifly.dynamic.bundle") - .version("1.3.7"), - mavenBundle("org.apache.aries", "org.apache.aries.util").version("1.1.1"), - mavenBundle("org.ow2.asm", "asm").version("9.6"), - mavenBundle("org.ow2.asm", "asm-commons").version("9.6"), - mavenBundle("org.ow2.asm", "asm-util").version("9.6"), - mavenBundle("org.ow2.asm", "asm-tree").version("9.6"), - mavenBundle("org.ow2.asm", "asm-analysis").version("9.6"), - systemProperty("logback.configurationFile") - .value("file:src/test/resources/logback-test.xml")); - } - - public static CompositeOption jacksonBundles() { - return () -> - options( - mavenBundle("com.fasterxml.jackson.core", "jackson-databind").versionAsInProject(), - mavenBundle("com.fasterxml.jackson.core", "jackson-core").versionAsInProject(), - mavenBundle("com.fasterxml.jackson.core", "jackson-annotations").versionAsInProject()); - } - - public static CompositeOption lz4Bundle() { - return () -> - options( - mavenBundle("at.yawk.lz4", "lz4-java").versionAsInProject(), - // at.yawk.lz4 requires sun.misc package - mavenBundle("com.diffplug.osgi", "com.diffplug.osgi.extension.sun.misc") - .version("0.0.0"), - systemProperty("cassandra.compression").value("LZ4")); - } - - public static CompositeOption snappyBundle() { - return () -> - options( - mavenBundle("org.xerial.snappy", "snappy-java").versionAsInProject(), - systemProperty("cassandra.compression").value("SNAPPY")); - } - - public static CompositeOption tinkerpopBundles() { - return () -> - options( - CoreOptions.wrappedBundle( - mavenBundle("org.apache.tinkerpop", "gremlin-core").versionAsInProject()) - .exports( - // avoid exporting 'org.apache.tinkerpop.gremlin.*' as other Tinkerpop jars have - // this root package as well - "org.apache.tinkerpop.gremlin.jsr223.*", - "org.apache.tinkerpop.gremlin.process.*", - "org.apache.tinkerpop.gremlin.structure.*", - "org.apache.tinkerpop.gremlin.util.*") - .bundleSymbolicName("org.apache.tinkerpop.gremlin-core") - .overwriteManifest(WrappedUrlProvisionOption.OverwriteMode.FULL), - CoreOptions.wrappedBundle( - mavenBundle("org.apache.tinkerpop", "tinkergraph-gremlin").versionAsInProject()) - .exports("org.apache.tinkerpop.gremlin.tinkergraph.*") - .bundleSymbolicName("org.apache.tinkerpop.tinkergraph-gremlin") - .overwriteManifest(WrappedUrlProvisionOption.OverwriteMode.FULL), - CoreOptions.wrappedBundle( - mavenBundle("org.apache.tinkerpop", "gremlin-shaded").versionAsInProject()) - .exports("org.apache.tinkerpop.shaded.*") - .bundleSymbolicName("org.apache.tinkerpop.gremlin-shaded") - .overwriteManifest(WrappedUrlProvisionOption.OverwriteMode.FULL), - // Note: the versions below are hard-coded because they shouldn't change very often, - // but if the tests fail because of them, we should consider parameterizing them - mavenBundle("com.sun.activation", "jakarta.activation", "2.0.1"), - mavenBundle("com.sun.mail", "mailapi", "2.0.1"), - mavenBundle("org.apache.commons", "commons-text", "1.8"), - mavenBundle("org.apache.commons", "commons-configuration2", "2.9.0"), - CoreOptions.wrappedBundle(mavenBundle("commons-logging", "commons-logging", "1.1.1")) - .exports("org.apache.commons.logging.*") - .bundleVersion("1.1.1") - .bundleSymbolicName("org.apache.commons.commons-logging") - .overwriteManifest(WrappedUrlProvisionOption.OverwriteMode.FULL), - mavenBundle("commons-collections", "commons-collections", "3.2.2"), - mavenBundle("org.apache.commons", "commons-lang3", "3.8.1"), - mavenBundle("commons-lang", "commons-lang", "2.6"), - CoreOptions.wrappedBundle(mavenBundle("org.javatuples", "javatuples", "1.2")) - .exports("org.javatuples.*") - .bundleVersion("1.2") - .bundleSymbolicName("org.javatuples") - .overwriteManifest(WrappedUrlProvisionOption.OverwriteMode.FULL), - systemProperty("cassandra.graph").value("true"), - systemProperty("cassandra.graph.name").value("test_osgi_graph")); - } - - public static CompositeOption esriBundles() { - return () -> - options( - CoreOptions.wrappedBundle( - mavenBundle("com.esri.geometry", "esri-geometry-api").versionAsInProject()) - .exports("com.esri.core.geometry.*") - .imports("org.json", "org.codehaus.jackson") - .bundleSymbolicName("com.esri.core.geometry") - .overwriteManifest(WrappedUrlProvisionOption.OverwriteMode.FULL), - mavenBundle("org.json", "json").versionAsInProject(), - mavenBundle("org.codehaus.jackson", "jackson-core-asl").versionAsInProject(), - systemProperty("cassandra.geo").value("true")); - } - - public static CompositeOption reactiveBundles() { - return () -> - options( - mavenBundle("org.reactivestreams", "reactive-streams").versionAsInProject(), - mavenBundle("io.reactivex.rxjava2", "rxjava").versionAsInProject(), - systemProperty("cassandra.reactive").value("true")); - } - - private static CompositeOption debugOptions() { - boolean debug = Boolean.getBoolean("osgi.debug"); - if (debug) { - return () -> - options( - vmOption("-Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=5005"), - systemTimeout(Long.MAX_VALUE)); - } else { - return CoreOptions::options; - } - } -} diff --git a/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/support/CcmExamReactorFactory.java b/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/support/CcmExamReactorFactory.java deleted file mode 100644 index eb9e71a76d9..00000000000 --- a/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/support/CcmExamReactorFactory.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.osgi.support; - -import java.util.List; -import org.ops4j.pax.exam.TestContainer; -import org.ops4j.pax.exam.TestProbeBuilder; -import org.ops4j.pax.exam.spi.StagedExamReactor; -import org.ops4j.pax.exam.spi.StagedExamReactorFactory; - -public class CcmExamReactorFactory implements StagedExamReactorFactory { - - @Override - public StagedExamReactor create(List containers, List mProbes) { - return new CcmStagedReactor(containers, mProbes); - } -} diff --git a/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/support/CcmPaxExam.java b/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/support/CcmPaxExam.java deleted file mode 100644 index d872acfa2b5..00000000000 --- a/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/support/CcmPaxExam.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.osgi.support; - -import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirementRule; -import org.junit.AssumptionViolatedException; -import org.junit.runner.Description; -import org.junit.runner.notification.Failure; -import org.junit.runner.notification.RunNotifier; -import org.junit.runners.model.InitializationError; -import org.ops4j.pax.exam.junit.PaxExam; - -public class CcmPaxExam extends PaxExam { - - public CcmPaxExam(Class klass) throws InitializationError { - super(klass); - } - - @Override - public void run(RunNotifier notifier) { - Description description = getDescription(); - if (BackendRequirementRule.meetsDescriptionRequirements(description)) { - super.run(notifier); - } else { - // requirements not met, throw reasoning assumption to skip test - AssumptionViolatedException e = - new AssumptionViolatedException(BackendRequirementRule.buildReasonString(description)); - notifier.fireTestAssumptionFailed(new Failure(description, e)); - } - } -} diff --git a/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/support/CcmStagedReactor.java b/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/support/CcmStagedReactor.java deleted file mode 100644 index ce4d9095361..00000000000 --- a/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/support/CcmStagedReactor.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.osgi.support; - -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.testinfra.ccm.CcmBridge; -import com.datastax.oss.driver.api.testinfra.requirement.BackendType; -import java.util.List; -import java.util.Objects; -import net.jcip.annotations.GuardedBy; -import org.ops4j.pax.exam.TestContainer; -import org.ops4j.pax.exam.TestProbeBuilder; -import org.ops4j.pax.exam.spi.reactors.AllConfinedStagedReactor; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class CcmStagedReactor extends AllConfinedStagedReactor { - - private static final Logger LOGGER = LoggerFactory.getLogger(CcmStagedReactor.class); - - public static final CcmBridge CCM_BRIDGE; - - public static final Version DSE_5_0 = Objects.requireNonNull(Version.parse("5.0")); - - static { - CcmBridge.Builder builder = CcmBridge.builder().withNodes(1); - if (CcmBridge.isDistributionOf(BackendType.DSE, (dist, cass) -> dist.compareTo(DSE_5_0) >= 0)) { - builder.withDseWorkloads("graph"); - } - CCM_BRIDGE = builder.build(); - } - - @GuardedBy("this") - private boolean running = false; - - public CcmStagedReactor(List containers, List mProbes) { - super(containers, mProbes); - } - - @Override - public synchronized void beforeSuite() { - if (!running) { - LOGGER.info( - "Starting CCM, running {} version {}", - CcmBridge.DISTRIBUTION, - CcmBridge.getDistributionVersion()); - CCM_BRIDGE.create(); - CCM_BRIDGE.start(); - LOGGER.info("CCM started"); - running = true; - Runtime.getRuntime() - .addShutdownHook( - new Thread( - () -> { - try { - afterSuite(); - } catch (Exception e) { - // silently remove as may have already been removed. - } - })); - } - } - - @Override - public synchronized void afterSuite() { - if (running) { - LOGGER.info("Stopping CCM"); - CCM_BRIDGE.stop(); - CCM_BRIDGE.close(); - running = false; - LOGGER.info("CCM stopped"); - } - } -} diff --git a/osgi-tests/src/test/resources/exam.properties b/osgi-tests/src/test/resources/exam.properties deleted file mode 100644 index ad702b0672c..00000000000 --- a/osgi-tests/src/test/resources/exam.properties +++ /dev/null @@ -1,20 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -pax.exam.system=test -pax.exam.logging=none \ No newline at end of file diff --git a/osgi-tests/src/test/resources/logback-test.xml b/osgi-tests/src/test/resources/logback-test.xml deleted file mode 100644 index 6c2a3f70250..00000000000 --- a/osgi-tests/src/test/resources/logback-test.xml +++ /dev/null @@ -1,36 +0,0 @@ - - - - - - %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n - - - - - - - - - - - - diff --git a/performance/README.md b/performance/README.md deleted file mode 100644 index ff66a453e9b..00000000000 --- a/performance/README.md +++ /dev/null @@ -1,95 +0,0 @@ - - -# How to run the Driver duration tests - -Note: the procedure described in this page is currently only accessible to DataStax employees. - -## Overview - -A duration test applies a constant, pre-defined load to the cluster for an extended period of time, -typically 2 or 3 days, while also generating some chaos by randomly restarting nodes. The load is -a mix of reads, writes, and deletes. - -Duration tests are useful to detect performance regressions between 2 different driver versions. - -The Java Driver duration tests are stored in a [private -repository](https://github.com/riptano/driver-examples/tree/java-driver-4.x/java/durationTest/) -accessible only to DataStax employees. - -A duration test executes in an infinite loop the following actions: - -1. Confirm row does not exist -2. Write row -3. Confirm read of row -4. Delete row -5. Confirm row does not exist - -The actions are performed randomly via SimpleStatements, BatchStatements (except on reads), and -PreparedStatements. - -## Running the duration tests on DataStax Fallout - -DataStax internal Fallout server has modules that allow to automate running and monitoring duration -tests. - -### Step 0: Set up a Graphite server - -1. If you haven't done this yet, create a new Fallout test based on the [graphite-setup.yaml] - template. -2. Run the test and wait for its successful completion. - * Choose a `keep_alive` parameter that is large enough to run all the planned duration tests. - E.g. if you intend to run duration tests for 10 days, set this parameter to a value greater - than or equal to `10d`. The default is 15 days. -3. Obtain the IP of the Graphite server: - * Navigate to the test artifacts. The IP can be found in the `ctool-cluster-info.txt` file of - the server group: - ![ctool-cluster-info](ctool-cluster-info.png) - * Log in to the Graphite server to check that the server was correctly set up: - `http://:3000` (VPN required). - The username/password is Graphite's default: `admin/admin`. - -Two Grafana dashboards should be loaded automatically: - -* `Java Driver 4 Duration Test Metrics (aggregate)`: provides high-level information such as - the number of completed tests per minute. Useful to compare different test runs. -* `Java Driver 4 Duration Test Metrics (focus)`: provides detailed information for one specific - test run. Can be useful to drill down on issues encountered during the test, or to inspect - latencies, throughput, etc. - -If the above Grafana dashboards are not loaded for some reason, they can be found in this [private -repository](https://github.com/riptano/testeng-devtools/tree/master/duration-tests/java/grafana). - -### Steps 1 to N: Run duration tests and compare results - -1. If you haven't done this yet, create a new Fallout test based on the [duration-test.yaml] - template. -2. For each combination of server and driver that you wish to test, launch a distinct test run and - modify its parameters to match the desired scenario: - * Change `server_type` and`server_version` to match the exact server you plan on testing - against; - * Change `driver_rev` and `driver_label` to be whatever driver revision you are using ( - `driver_label` is merely for reporting purposes); - * Don't forget to change the `graphite_host` parameter to match the Graphite server IP obtained - in the previous step; - * Finally, choose the desired duration (default is 2 days). -3. Run the test and monitor the performance on the Graphite server. - -Once a test run is finished, the cluster and the client VMs are destroyed, but their logs are -conserved as test artifacts in Fallout. diff --git a/performance/ctool-cluster-info.png b/performance/ctool-cluster-info.png deleted file mode 100644 index 550b077b7eb..00000000000 Binary files a/performance/ctool-cluster-info.png and /dev/null differ diff --git a/performance/duration-test.yaml b/performance/duration-test.yaml deleted file mode 100644 index 6e718f2add8..00000000000 --- a/performance/duration-test.yaml +++ /dev/null @@ -1,99 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# Possible values: cassandra or dse -server_type: cassandra -# Server version (e.g. 3.11.7 or 6.8.8) -server_version: 3.11.7 -# The driver Git revision to checkout and build (can be a branch name, a tag name or a commit SHA) -driver_rev: 4.x -# A distinctive driver label to use, for reporting purposes (will appear in Graphite metric names) -driver_label: 4.10.0 -# The IP of a running Graphite server, see graphite-setup.yaml -graphite_host: 1.2.3.4 -# How long to run the duration test, default: 2 days -duration: 2d -# Cloud-specific settings -cloud_provider: nebula -cloud_tenant: drivers-automation -instance_type: m4.4xlarge - ---- - -ensemble: - server: - node.count: 3 - provisioner: - name: ctool - properties: - mark_for_reuse: false - cloud.provider: {{cloud_provider}} - cloud.tenant: {{cloud_tenant}} - cloud.instance.type: {{instance_type}} - configuration_manager: - - name: ctool - properties: - java.version: openjdk8 - product.install.type: tarball - product.type: {{server_type}} - product.version: {{server_version}} - cassandra.yaml: - hinted_handoff_enabled: false - datacenters: - datacenter1: - size: 3 - workload: cassandra - client: - node.count: 1 - provisioner: - name: ctool - properties: - mark_for_reuse: false - cloud.provider: {{cloud_provider}} - cloud.tenant: {{cloud_tenant}} - cloud.instance.type: {{instance_type}} - configuration_manager: - - name: ctool - properties: - java.version: openjdk8 - install.maven: true - - name: java_driver - properties: - oss.git.repository: git@github.com:datastax/java-driver.git - oss.git.branch: {{driver_rev}} - type: FOUR_X_OSS - - name: java_driver_duration_test - properties: - git.branch: java-driver-4.x -workload: - phases: - - run-duration-test: - module: java_driver_duration_test - properties: - is.four: true - duration: {{duration}} - graphite.host: {{graphite_host}} - graphite.prefix: duration-test-java-driver-{{driver_label}}-{{server_type}}-{{server_version}} - kill-nodes: - module: killnode_rhino - properties: - target.strategy: whitelist - target.number_of_nodes: 1 - target.selector: "*:*" - repeat.delay: 120 - repeat.iterations: 0 - graceful: true diff --git a/performance/graphite-setup.yaml b/performance/graphite-setup.yaml deleted file mode 100644 index 99bb8ecc8cc..00000000000 --- a/performance/graphite-setup.yaml +++ /dev/null @@ -1,61 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# How long should the Graphite server be kept alive, default: 15 days -keep_alive: 15d -# Cloud-specific settings -cloud_provider: nebula -cloud_tenant: drivers-automation -instance_type: m4.2xlarge - ---- - -ensemble: - server: - node.count: 1 - provisioner: - name: ctool - properties: - mark_for_reuse: true - cluster_ttl: {{keep_alive}} - cloud.provider: {{cloud_provider}} - cloud.tenant: {{cloud_tenant}} - cloud.instance.type: {{instance_type}} - configuration_manager: - - name: ctool_monitoring - properties: - graphite.create_server: true - client: - node.count: 1 - provisioner: - name: ctool - properties: - mark_for_reuse: false - cloud.provider: {{cloud_provider}} - cloud.tenant: {{cloud_tenant}} - cloud.instance.type: {{instance_type}} -workload: - phases: - - upload-dashboards-to-grafana: - module: bash - properties: - script: | - echo "Graphite server IP: ${FALLOUT_SERVER_NODE0_MONITORING_GRAPHITE_HOST}" - git clone git@github.com:riptano/testeng-devtools.git ${FALLOUT_SCRATCH_DIR}/dashboard - curl --user admin:admin -d "@${FALLOUT_SCRATCH_DIR}/dashboard/duration-tests/java/grafana/aggregate4.json" -X POST -H "Content-Type: application/json" http://${FALLOUT_SERVER_NODE0_MONITORING_GRAPHITE_HOST}:3000/api/dashboards/db/ - curl --user admin:admin -d "@${FALLOUT_SCRATCH_DIR}/dashboard/duration-tests/java/grafana/focus4.json" -X POST -H "Content-Type: application/json" http://${FALLOUT_SERVER_NODE0_MONITORING_GRAPHITE_HOST}:3000/api/dashboards/db/ - target.group: client diff --git a/pom.xml b/pom.xml deleted file mode 100644 index 467373ae7d1..00000000000 --- a/pom.xml +++ /dev/null @@ -1,1066 +0,0 @@ - - - - 4.0.0 - - org.apache - apache - 23 - - org.apache.cassandra - java-driver-parent - 4.19.3-SNAPSHOT - pom - Apache Cassandra Java Driver - https://github.com/datastax/java-driver - 2017 - - core - core-shaded - query-builder - mapper-runtime - mapper-processor - metrics/micrometer - metrics/microprofile - guava-shaded - test-infra - integration-tests - osgi-tests - distribution-source - distribution - distribution-tests - examples - bom - - - UTF-8 - UTF-8 - 1.4.1 - - 2.1.12 - 4.1.18 - 4.1.130.Final - 1.2.1 - - 3.5.6 - - 2.0.16 - - 1.0.3 - 20230227 - 2.20.1 - ${jackson.version} - - 1.1.10.1 - 1.10.2 - - 3.19.0 - 1.3 - 4.13.2 - 1.3.15 - 6.0.0 - 7.0.1 - 4.13.4 - 2.6.4 - 0.11.0 - 1.1.4 - 2.31 - 2.5.0 - 2.1.1 - 1.1.4 - 2.2.2 - 4.0.3 - 2.0.0-M19 - 3.0.0 - 22.0.0.2 - false - ${skipTests} - - - - - org.apache.cassandra - java-driver-core - ${project.version} - test-jar - - - io.netty - netty-handler - ${netty.version} - - - - com.google.guava - guava - 33.3.1-jre - - - com.typesafe - config - ${config.version} - - - org.slf4j - slf4j-api - ${slf4j.version} - - - ch.qos.logback - logback-classic - ${logback.version} - - - org.xerial.snappy - snappy-java - ${snappy.version} - - - at.yawk.lz4 - lz4-java - ${lz4.version} - - - com.github.jnr - jnr-posix - - 3.1.15 - - - io.dropwizard.metrics - metrics-core - ${metrics.version} - - - org.hdrhistogram - HdrHistogram - ${hdrhistogram.version} - - - com.esri.geometry - esri-geometry-api - ${esri.version} - - - org.json - json - ${json.version} - - - org.apache.tinkerpop - gremlin-core - ${tinkerpop.version} - - - org.yaml - snakeyaml - - - com.carrotsearch - hppc - - - com.jcabi - * - - - net.objecthunter - exp4j - - - - - org.apache.tinkerpop - tinkergraph-gremlin - ${tinkerpop.version} - - - org.reactivestreams - reactive-streams - ${reactive-streams.version} - - - org.reactivestreams - reactive-streams-tck - ${reactive-streams.version} - - - com.github.stephenc.jcip - jcip-annotations - 1.0-1 - - - com.github.spotbugs - spotbugs-annotations - 3.1.12 - - - com.squareup - javapoet - 1.13.0 - - - junit - junit - ${junit.version} - - - com.tngtech.java - junit-dataprovider - 1.13.1 - - - org.assertj - assertj-core - ${assertj.version} - - - org.mockito - mockito-core - 2.28.2 - - - io.reactivex.rxjava2 - rxjava - ${rxjava.version} - - - com.datastax.oss.simulacron - simulacron-native-server - ${simulacron.version} - - - org.apache.commons - commons-exec - ${commons-exec.version} - - - org.osgi - org.osgi.core - ${osgi.version} - - - org.apache.felix - org.apache.felix.framework - ${felix.version} - - - org.ops4j.pax.exam - pax-exam-junit4 - ${pax-exam.version} - - - org.ops4j.pax.exam - pax-exam-container-forked - ${pax-exam.version} - - - org.ops4j.pax.exam - pax-exam-link-mvn - ${pax-exam.version} - - - org.ops4j.pax.url - pax-url-wrap - ${pax-url.version} - - - org.ops4j.pax.url - pax-url-reference - ${pax-url.version} - - - org.ops4j.pax.tinybundles - tinybundles - 3.0.0 - - - org.glassfish - javax.json - ${jsr353-ri.version} - - - javax.json - javax.json-api - ${jsr353-api.version} - - - javax.ws.rs - javax.ws.rs-api - ${jax-rs.version} - - - org.glassfish.jersey.core - jersey-server - ${jersey.version} - - - org.glassfish.jersey.media - jersey-media-json-jackson - ${jersey.version} - - - org.glassfish.jersey.containers - jersey-container-jdk-http - ${jersey.version} - - - org.glassfish.jersey.inject - jersey-hk2 - ${jersey.version} - - - org.glassfish.hk2 - hk2-api - ${hk2.version} - - - javax.inject - javax.inject - 1 - - - javax.annotation - javax.annotation-api - 1.3.2 - - - com.fasterxml.jackson.core - jackson-core - ${jackson.version} - - - com.fasterxml.jackson.core - jackson-databind - ${jackson-databind.version} - - - com.google.testing.compile - compile-testing - 0.19 - - - org.awaitility - awaitility - ${awaitility.version} - - - org.testng - testng - 7.3.0 - - - org.apache.directory.server - apacheds-core - ${apacheds.version} - - - org.slf4j - slf4j-log4j12 - - - - - org.apache.directory.server - apacheds-protocol-kerberos - ${apacheds.version} - - - org.apache.directory.server - apacheds-interceptor-kerberos - ${apacheds.version} - - - org.apache.directory.server - apacheds-protocol-ldap - ${apacheds.version} - - - org.apache.directory.server - apacheds-ldif-partition - ${apacheds.version} - - - org.apache.directory.server - apacheds-jdbm-partition - ${apacheds.version} - - - org.apache.directory.api - api-ldap-codec-standalone - 1.0.0-M26 - - - com.github.tomakehurst - wiremock - 2.25.0 - - - org.graalvm.sdk - graal-sdk - ${graalapi.version} - - - org.graalvm.nativeimage - svm - ${graalapi.version} - - - io.micrometer - micrometer-core - 1.6.5 - - - org.eclipse.microprofile.metrics - microprofile-metrics-api - 3.0 - - - io.smallrye - smallrye-metrics - 3.0.3 - - - io.projectreactor - reactor-bom - 2020.0.5 - pom - import - - - io.projectreactor.tools - blockhound - 1.0.8.RELEASE - - - io.projectreactor.tools - blockhound-junit-platform - 1.0.8.RELEASE - - - - - - - - maven-compiler-plugin - 3.8.1 - - - com.coveo - fmt-maven-plugin - 2.9 - - - au.com.acegi - xml-format-maven-plugin - 3.1.1 - - - com.mycila - license-maven-plugin - 3.0 - - - maven-surefire-plugin - ${surefire.version} - - - maven-failsafe-plugin - ${surefire.version} - - - maven-shade-plugin - 3.2.3 - - - maven-assembly-plugin - 3.3.0 - - - - net.alchim31.maven - scala-maven-plugin - 3.2.1 - - 2.11 - - -i - console.scala - - - - - maven-source-plugin - 3.1.0 - - - maven-javadoc-plugin - 3.2.0 - - - maven-jar-plugin - 3.2.0 - - - org.sonatype.plugins - nexus-staging-maven-plugin - 1.6.8 - - - maven-gpg-plugin - 1.6 - - - maven-release-plugin - 2.5.3 - - - maven-install-plugin - 2.5.2 - - - maven-deploy-plugin - 2.8.2 - - - maven-dependency-plugin - 3.1.2 - - - org.jacoco - jacoco-maven-plugin - 0.8.10 - - - org.apache.felix - maven-bundle-plugin - 5.1.1 - - - org.revapi - revapi-maven-plugin - 0.15.1 - - false - \d+\.\d+\.\d+ - - - ${project.groupId}:${project.artifactId}:RELEASE - - - revapi.json - - - - - org.revapi - revapi-java - 0.28.4 - - - - - org.codehaus.mojo - versions-maven-plugin - 2.7 - - - org.codehaus.mojo - flatten-maven-plugin - 1.2.1 - - - org.apache.maven.plugins - maven-enforcer-plugin - 3.5.0 - - - - - - maven-enforcer-plugin - - - enforce-maven - - enforce - - - - - - [3.8.1,) - - - - - - - - maven-compiler-plugin - - javac-with-errorprone - true - 1.8 - 1.8 - - -Xep:FutureReturnValueIgnored:OFF - -Xep:PreferJavaTimeOverload:OFF - -Xep:AnnotateFormatMethod:OFF - -Xep:WildcardImport:WARN - -XepExcludedPaths:.*/target/(?:generated-sources|generated-test-sources)/.* - - true - true - false - - - - org.codehaus.plexus - plexus-compiler-javac-errorprone - 2.8.6 - - - com.google.errorprone - error_prone_core - 2.3.4 - - - - - com.coveo - fmt-maven-plugin - - - - check - - process-sources - - - - - au.com.acegi - xml-format-maven-plugin - - - - xml-check - - - - - - .idea/** - **/target/** - **/dependency-reduced-pom.xml - **/.flattened-pom.xml - docs/** - - - - - com.mycila - license-maven-plugin - - - - src/**/*.java - src/**/*.xml - src/**/*.properties - **/pom.xml - - - src/**/native-image.properties - **/src/main/config/ide/** - - - SLASHSTAR_STYLE - SCRIPT_STYLE - - true - - - - check-license - initialize - - check - - - - - - org.jacoco - jacoco-maven-plugin - - - - prepare-agent - - - - report - prepare-package - - report - - - - - - maven-surefire-plugin - - ${testing.jvm}/bin/java - - ${project.basedir}/src/test/resources/logback-test.xml - - - - usedefaultlisteners - false - - - ${skipUnitTests} - - - - maven-failsafe-plugin - - - ${project.basedir}/src/test/resources/logback-test.xml - - - - - org.sonatype.plugins - nexus-staging-maven-plugin - true - - ossrh - https://repository.apache.org/ - false - true - - - - maven-source-plugin - - - attach-sources - - jar-no-fork - - - - LICENSE_binary - NOTICE_binary.txt - - - - - - - maven-javadoc-plugin - - false - true - all,-missing - com.datastax.*.driver.internal* - - - apiNote - a - API note: - - - - leaks - X - - - - - - - check-api-leaks - - javadoc - - process-classes - - com.datastax.oss.doclet.ApiPlumber - - com.datastax.oss - api-plumber-doclet - 1.0.0 - - - - -preventleak - com.datastax.oss.driver.internal - com.datastax.dse.driver.internal - - -preventleak - com.datastax.oss.driver.shaded - - -preventleak - com.typesafe.config - - -preventleak - com.codahale.metrics - - -preventleak - org.HdrHistogram - - -preventleak - io.netty - - -preventleak - jnr - -preventleak - com.kenai.constantine - -preventleak - com.kenai.jffi - -preventleak - com.kenai.jnr - - -preventleak - net.jpountz - -preventleak - org.xerial.snappy - - false - - - - - - maven-release-plugin - - @{project.version} - true - false - release - deploy - - -DskipITs - - - - org.apache.felix - maven-bundle-plugin - true - - - ${project.version} - <_include>-osgi.bnd - - - jar - bundle - pom - - - - - org.revapi - revapi-maven-plugin - - - - check - - - - - - org.apache.maven.plugins - maven-remote-resources-plugin - 1.7.0 - - true - - - - - - - release - - - - maven-gpg-plugin - - - sign-artifacts - verify - - sign - - - - - - - - - - fast - - true - true - true - true - true - true - true - true - - - - - test-jdk-environment - - - !testJavaHome - - - - ${env.JAVA_HOME} - - - - - test-jdk-specified - - - testJavaHome - - - - ${testJavaHome} - - - - - test-jdk-8 - - [8,) - - - - - test-jdk-11 - - [11,) - - - - - test-jdk-14 - - [14,) - - - - -XX:+AllowRedefinitionToAddDeleteMethods - - - - - test-jdk-17 - - [17,) - - - - -XX:+AllowRedefinitionToAddDeleteMethods - - --add-opens java.base/jdk.internal.util.random=ALL-UNNAMED - - - - - test-jdk-21 - - [21,) - - - - -XX:+AllowRedefinitionToAddDeleteMethods - - --add-opens=java.base/jdk.internal.util.random=ALL-UNNAMED - - - - - - Apache 2 - http://www.apache.org/licenses/LICENSE-2.0.txt - repo - Apache License Version 2.0 - - - - scm:git:git@github.com:datastax/java-driver.git - scm:git:git@github.com:datastax/java-driver.git - https://github.com/datastax/java-driver - HEAD - - - - Various - DataStax - - - diff --git a/pre-commit.sh b/pre-commit.sh deleted file mode 100755 index 912564ae81e..00000000000 --- a/pre-commit.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# STASH_NAME="pre-commit-$(date +%s)" -# git stash save --keep-index $STASH_NAME - -mvn clean test -RESULT=$? - -# STASHES=$(git stash list) -# if [[ $STASHES == *$STASH_NAME* ]]; then -# git stash pop -# fi - -[ $RESULT -ne 0 ] && exit 1 -exit 0 diff --git a/query-builder/pom.xml b/query-builder/pom.xml deleted file mode 100644 index 2bfe1bee8f5..00000000000 --- a/query-builder/pom.xml +++ /dev/null @@ -1,156 +0,0 @@ - - - - 4.0.0 - - org.apache.cassandra - java-driver-parent - 4.19.3-SNAPSHOT - - java-driver-query-builder - bundle - Apache Cassandra Java Driver - query builder - - - - ${project.groupId} - java-driver-bom - ${project.version} - pom - import - - - - - - org.apache.cassandra - java-driver-core - - - org.apache.cassandra - java-driver-guava-shaded - - - com.github.stephenc.jcip - jcip-annotations - provided - - - com.github.spotbugs - spotbugs-annotations - provided - - - junit - junit - test - - - com.tngtech.java - junit-dataprovider - test - - - org.assertj - assertj-core - test - - - org.apache.cassandra - java-driver-core - test - test-jar - - - - - - src/main/resources - - - ${project.basedir}/.. - - LICENSE - NOTICE_binary.txt - NOTICE.txt - - META-INF - - - - - src/test/resources - - project.properties - - true - - - src/test/resources - - project.properties - - false - - - - - maven-jar-plugin - - - - com.datastax.oss.driver.querybuilder - - - - - - org.apache.felix - maven-bundle-plugin - - - com.datastax.oss.driver.querybuilder - !net.jcip.annotations.*, !edu.umd.cs.findbugs.annotations.*, * - com.datastax.oss.driver.*.querybuilder.*, com.datastax.dse.driver.*.querybuilder.* - - - - - maven-dependency-plugin - - - generate-dependency-list - - list - - generate-resources - - runtime - true - com.datastax.cassandra,com.datastax.dse - ${project.build.outputDirectory}/com/datastax/dse/driver/internal/querybuilder/deps.txt - - - - - - - diff --git a/query-builder/revapi.json b/query-builder/revapi.json deleted file mode 100644 index 870924ba474..00000000000 --- a/query-builder/revapi.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "revapi": { - "java": { - "filter": { - "packages": { - "regex": true, - "exclude": [ - "com\\.datastax\\.(oss|dse)\\.protocol\\.internal(\\..+)?", - "com\\.datastax\\.(oss|dse)\\.driver\\.internal(\\..+)?", - "com\\.datastax\\.oss\\.driver\\.shaded(\\..+)?", - "org\\.assertj(\\..+)?", - "// Don't re-check sibling modules that this module depends on", - "com\\.datastax\\.(oss|dse)\\.driver\\.api\\.core(\\..+)?" - ] - } - } - }, - "ignore": [ - { - "code": "java.method.varargOverloadsOnlyDifferInVarargParameter", - "justification": "CASSJAVA-102: Suppress newly-supported varargs check" - } - ] - } -} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/DseQueryBuilder.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/DseQueryBuilder.java deleted file mode 100644 index 24e606897e5..00000000000 --- a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/DseQueryBuilder.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.querybuilder; - -import com.datastax.oss.driver.api.querybuilder.QueryBuilder; - -/** - * A DSE extension of the Cassandra driver's {@linkplain QueryBuilder query builder}. - * - *

    Note that, at this time, this class acts a simple pass-through: there is no DSE-specific - * syntax for DML queries, therefore it just inherits all of {@link QueryBuilder}'s methods, without - * adding any of its own. - * - *

    However, it is a good idea to use it as the entry point to the DSL in your DSE application, to - * avoid changing all your imports if specialized methods get added here in the future. - */ -public class DseQueryBuilder extends QueryBuilder { - // nothing to do -} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/DseSchemaBuilder.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/DseSchemaBuilder.java deleted file mode 100644 index 456746204b5..00000000000 --- a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/DseSchemaBuilder.java +++ /dev/null @@ -1,328 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.querybuilder; - -import com.datastax.dse.driver.api.querybuilder.schema.AlterDseKeyspaceStart; -import com.datastax.dse.driver.api.querybuilder.schema.AlterDseTableStart; -import com.datastax.dse.driver.api.querybuilder.schema.CreateDseAggregateStart; -import com.datastax.dse.driver.api.querybuilder.schema.CreateDseFunctionStart; -import com.datastax.dse.driver.api.querybuilder.schema.CreateDseKeyspaceStart; -import com.datastax.dse.driver.api.querybuilder.schema.CreateDseTableStart; -import com.datastax.dse.driver.internal.querybuilder.schema.DefaultAlterDseKeyspace; -import com.datastax.dse.driver.internal.querybuilder.schema.DefaultAlterDseTable; -import com.datastax.dse.driver.internal.querybuilder.schema.DefaultCreateDseAggregate; -import com.datastax.dse.driver.internal.querybuilder.schema.DefaultCreateDseFunction; -import com.datastax.dse.driver.internal.querybuilder.schema.DefaultCreateDseKeyspace; -import com.datastax.dse.driver.internal.querybuilder.schema.DefaultCreateDseTable; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.SchemaBuilder; -import com.datastax.oss.driver.api.querybuilder.schema.CreateAggregateStart; -import com.datastax.oss.driver.api.querybuilder.schema.CreateFunctionStart; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** - * An extension of {@link com.datastax.oss.driver.api.querybuilder.SchemaBuilder} for building - * schema entities that have DSE specific functionality. - */ -public class DseSchemaBuilder extends SchemaBuilder { - - /** - * Starts a CREATE AGGREGATE query with the given aggregate name. This assumes the keyspace name - * is already qualified for the Session or Statement. - */ - @NonNull - public static CreateDseAggregateStart createDseAggregate(@NonNull CqlIdentifier aggregateId) { - return new DefaultCreateDseAggregate(aggregateId); - } - - /** Starts a CREATE AGGREGATE query with the given aggregate name for the given keyspace name. */ - @NonNull - public static CreateDseAggregateStart createDseAggregate( - @Nullable CqlIdentifier keyspaceId, @NonNull CqlIdentifier aggregateId) { - return new DefaultCreateDseAggregate(keyspaceId, aggregateId); - } - - /** - * Shortcut for {@link #createDseAggregate(CqlIdentifier) - * createDseAggregate(CqlIdentifier.fromCql(aggregateName))}. - */ - @NonNull - public static CreateDseAggregateStart createDseAggregate(@NonNull String aggregateName) { - return new DefaultCreateDseAggregate(CqlIdentifier.fromCql(aggregateName)); - } - - /** - * Shortcut for {@link #createAggregate(CqlIdentifier, CqlIdentifier) - * createDseAggregate(CqlIdentifier.fromCql(keyspaceName), CqlIdentifier.fromCql(aggregateName))}. - */ - @NonNull - public static CreateDseAggregateStart createDseAggregate( - @Nullable String keyspaceName, @NonNull String aggregateName) { - return new DefaultCreateDseAggregate( - keyspaceName == null ? null : CqlIdentifier.fromCql(keyspaceName), - CqlIdentifier.fromCql(aggregateName)); - } - - /** - * Starts a CREATE AGGREGATE query with the given aggregate name. This assumes the keyspace name - * is already qualified for the Session or Statement. - * - *

    Note that this method only covers open-source Cassandra syntax. If you want to use - * DSE-specific features, such as the {@code DETERMINISTIC} keyword, use {@link - * #createDseAggregate(CqlIdentifier)}. - */ - @NonNull - public static CreateAggregateStart createAggregate(@NonNull CqlIdentifier aggregateName) { - return SchemaBuilder.createAggregate(aggregateName); - } - - /** - * Starts a CREATE AGGREGATE query with the given aggregate name for the given keyspace name. - * - *

    Note that this method only covers open-source Cassandra syntax. If you want to use - * DSE-specific features, such as the {@code DETERMINISTIC} keyword, use {@link - * #createDseAggregate(CqlIdentifier, CqlIdentifier)}. - */ - @NonNull - public static CreateAggregateStart createAggregate( - @Nullable CqlIdentifier keyspace, @NonNull CqlIdentifier aggregateName) { - return SchemaBuilder.createAggregate(keyspace, aggregateName); - } - - /** - * Shortcut for {@link #createAggregate(CqlIdentifier) - * createAggregate(CqlIdentifier.fromCql(aggregateName)}. - * - *

    Note that this method only covers open-source Cassandra syntax. If you want to use - * DSE-specific features, such as the {@code DETERMINISTIC} keyword, use {@link - * #createDseAggregate(String)}. - */ - @NonNull - public static CreateAggregateStart createAggregate(@NonNull String aggregateName) { - return SchemaBuilder.createAggregate(aggregateName); - } - - /** - * Shortcut for {@link #createAggregate(CqlIdentifier, CqlIdentifier) - * createAggregate(CqlIdentifier.fromCql(keyspace), CqlIdentifier.fromCql(aggregateName)}. - * - *

    Note that this method only covers open-source Cassandra syntax. If you want to use - * DSE-specific features, such as the {@code DETERMINISTIC} keyword, use {@link - * #createDseAggregate(String, String)}. - */ - @NonNull - public static CreateAggregateStart createAggregate( - @Nullable String keyspace, @NonNull String aggregateName) { - return SchemaBuilder.createAggregate(keyspace, aggregateName); - } - - /** - * Starts a CREATE FUNCTION query with the given function name. This assumes the keyspace name is - * already qualified for the Session or Statement. - */ - @NonNull - public static CreateDseFunctionStart createDseFunction(@NonNull CqlIdentifier functionId) { - return new DefaultCreateDseFunction(functionId); - } - - /** Starts a CREATE FUNCTION query with the given function name for the given keyspace name. */ - @NonNull - public static CreateDseFunctionStart createDseFunction( - @Nullable CqlIdentifier keyspaceId, @NonNull CqlIdentifier functionId) { - return new DefaultCreateDseFunction(keyspaceId, functionId); - } - - /** - * Shortcut for {@link #createFunction(CqlIdentifier) - * createFunction(CqlIdentifier.fromCql(functionName)} - */ - @NonNull - public static CreateDseFunctionStart createDseFunction(@NonNull String functionName) { - return new DefaultCreateDseFunction(CqlIdentifier.fromCql(functionName)); - } - - /** - * Shortcut for {@link #createFunction(CqlIdentifier, CqlIdentifier) - * createFunction(CqlIdentifier.fromCql(keyspaceName), CqlIdentifier.fromCql(functionName)} - */ - @NonNull - public static CreateDseFunctionStart createDseFunction( - @Nullable String keyspaceName, @NonNull String functionName) { - return new DefaultCreateDseFunction( - keyspaceName == null ? null : CqlIdentifier.fromCql(keyspaceName), - CqlIdentifier.fromCql(functionName)); - } - - /** - * Starts a CREATE FUNCTION query with the given function name. This assumes the keyspace name is - * already qualified for the Session or Statement. - * - *

    Note that this method only covers open-source Cassandra syntax. If you want to use - * DSE-specific features, such as the {@code MONOTONIC} or {@code DETERMINISTIC} keywords, use - * {@link #createDseFunction(CqlIdentifier)}. - */ - @NonNull - public static CreateFunctionStart createFunction(@NonNull CqlIdentifier functionName) { - return SchemaBuilder.createFunction(functionName); - } - - /** - * Starts a CREATE FUNCTION query with the given function name for the given keyspace name. - * - *

    Note that this method only covers open-source Cassandra syntax. If you want to use - * DSE-specific features, such as the {@code MONOTONIC} or {@code DETERMINISTIC} keywords, use - * {@link #createDseFunction(CqlIdentifier,CqlIdentifier)}. - */ - @NonNull - public static CreateFunctionStart createFunction( - @Nullable CqlIdentifier keyspace, @NonNull CqlIdentifier functionName) { - return SchemaBuilder.createFunction(keyspace, functionName); - } - - /** - * Shortcut for {@link #createFunction(CqlIdentifier, CqlIdentifier) - * createFunction(CqlIdentifier.fromCql(keyspace, functionName)} - * - *

    Note that this method only covers open-source Cassandra syntax. If you want to use - * DSE-specific features, such as the {@code MONOTONIC} or {@code DETERMINISTIC} keywords, use - * {@link #createDseFunction(String)}. - */ - @NonNull - public static CreateFunctionStart createFunction(@NonNull String functionName) { - return SchemaBuilder.createFunction(functionName); - } - - /** - * Shortcut for {@link #createFunction(CqlIdentifier) - * createFunction(CqlIdentifier.fromCql(keyspaceName),CqlIdentifier.fromCql(functionName)}. - * - *

    Note that this method only covers open-source Cassandra syntax. If you want to use - * DSE-specific features, such as the {@code MONOTONIC} or {@code DETERMINISTIC} keywords, use - * {@link #createDseFunction(String, String)}. - */ - @NonNull - public static CreateFunctionStart createFunction( - @Nullable String keyspace, @NonNull String functionName) { - return SchemaBuilder.createFunction(keyspace, functionName); - } - - /** Starts a CREATE KEYSPACE query. */ - @NonNull - public static CreateDseKeyspaceStart createDseKeyspace(@NonNull CqlIdentifier keyspaceName) { - return new DefaultCreateDseKeyspace(keyspaceName); - } - - /** - * Shortcut for {@link #createDseKeyspace(CqlIdentifier) - * createKeyspace(CqlIdentifier.fromCql(keyspaceName))} - */ - @NonNull - public static CreateDseKeyspaceStart createDseKeyspace(@NonNull String keyspaceName) { - return createDseKeyspace(CqlIdentifier.fromCql(keyspaceName)); - } - - /** Starts an ALTER KEYSPACE query. */ - @NonNull - public static AlterDseKeyspaceStart alterDseKeyspace(@NonNull CqlIdentifier keyspaceName) { - return new DefaultAlterDseKeyspace(keyspaceName); - } - - /** - * Shortcut for {@link #alterDseKeyspace(CqlIdentifier) - * alterKeyspace(CqlIdentifier.fromCql(keyspaceName)}. - */ - @NonNull - public static AlterDseKeyspaceStart alterDseKeyspace(@NonNull String keyspaceName) { - return DseSchemaBuilder.alterDseKeyspace(CqlIdentifier.fromCql(keyspaceName)); - } - - /** - * Starts a CREATE TABLE query with the given table name. This assumes the keyspace name is - * already qualified for the Session or Statement. - */ - @NonNull - public static CreateDseTableStart createDseTable(@NonNull CqlIdentifier tableName) { - return createDseTable(null, tableName); - } - - /** Starts a CREATE TABLE query with the given table name for the given keyspace name. */ - @NonNull - public static CreateDseTableStart createDseTable( - @Nullable CqlIdentifier keyspace, @NonNull CqlIdentifier tableName) { - return new DefaultCreateDseTable(keyspace, tableName); - } - - /** - * Shortcut for {@link #createDseTable(CqlIdentifier) - * createDseTable(CqlIdentifier.fromCql(tableName)} - */ - @NonNull - public static CreateDseTableStart createDseTable(@NonNull String tableName) { - return createDseTable(CqlIdentifier.fromCql(tableName)); - } - - /** - * Shortcut for {@link #createDseTable(CqlIdentifier,CqlIdentifier) - * createDseTable(CqlIdentifier.fromCql(keyspaceName),CqlIdentifier.fromCql(tableName)} - */ - @NonNull - public static CreateDseTableStart createDseTable( - @Nullable String keyspace, @NonNull String tableName) { - return createDseTable( - keyspace == null ? null : CqlIdentifier.fromCql(keyspace), - CqlIdentifier.fromCql(tableName)); - } - - /** - * Starts an ALTER TABLE query with the given table name. This assumes the keyspace name is - * already qualified for the Session or Statement. - */ - @NonNull - public static AlterDseTableStart alterDseTable(@NonNull CqlIdentifier tableName) { - return new DefaultAlterDseTable(tableName); - } - - /** Starts an ALTER TABLE query with the given table name for the given keyspace name. */ - @NonNull - public static AlterDseTableStart alterDseTable( - @Nullable CqlIdentifier keyspace, @NonNull CqlIdentifier tableName) { - return new DefaultAlterDseTable(keyspace, tableName); - } - - /** - * Shortcut for {@link #alterDseTable(CqlIdentifier) - * alterDseTable(CqlIdentifier.fromCql(tableName)} - */ - @NonNull - public static AlterDseTableStart alterDseTable(@NonNull String tableName) { - return alterDseTable(CqlIdentifier.fromCql(tableName)); - } - - /** - * Shortcut for {@link #alterDseTable(CqlIdentifier,CqlIdentifier) - * alterDseTable(CqlIdentifier.fromCql(keyspaceName),CqlIdentifier.fromCql(tableName)} - */ - @NonNull - public static AlterDseTableStart alterDseTable( - @Nullable String keyspace, @NonNull String tableName) { - return alterDseTable( - keyspace == null ? null : CqlIdentifier.fromCql(keyspace), - CqlIdentifier.fromCql(tableName)); - } -} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/package-info.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/package-info.java deleted file mode 100644 index 05f9d4e6912..00000000000 --- a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * This package effectively mirrors the Cassandra OSS Query Builder package to allow DSE extended - * schema and query building for the DSE driver. In general, a class in this package should simply - * extend the equivalent class in the OSS driver and add extended functionality. - */ -package com.datastax.dse.driver.api.querybuilder; diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseKeyspace.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseKeyspace.java deleted file mode 100644 index c7aa795ae24..00000000000 --- a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseKeyspace.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.querybuilder.BuildableQuery; -import com.datastax.oss.driver.api.querybuilder.schema.KeyspaceOptions; -import com.datastax.oss.driver.api.querybuilder.schema.KeyspaceReplicationOptions; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; - -public interface AlterDseKeyspace - extends BuildableQuery, - KeyspaceOptions, - KeyspaceReplicationOptions { - - @NonNull - @Override - AlterDseKeyspace withOption(@NonNull String name, @NonNull Object value); - - /** - * Adjusts durable writes configuration for this keyspace. If set to false, data written to the - * keyspace will bypass the commit log. - */ - @NonNull - @Override - AlterDseKeyspace withDurableWrites(boolean durableWrites); - - /** Adjusts the graph engine that will be used to interpret this keyspace. */ - @NonNull - AlterDseKeyspace withGraphEngine(String graphEngine); - - /** - * Adds 'replication' options. One should only use this when they have a custom replication - * strategy, otherwise it is advisable to use {@link #withSimpleStrategy(int)} or {@link - * #withNetworkTopologyStrategy(Map)}. - */ - @NonNull - @Override - AlterDseKeyspace withReplicationOptions(@NonNull Map replicationOptions); - - /** - * Adds SimpleStrategy replication options with the given replication factor. - * - *

    Note that using this will overwrite any previous use of this method or {@link - * #withNetworkTopologyStrategy(Map)}. - */ - @NonNull - @Override - AlterDseKeyspace withSimpleStrategy(int replicationFactor); - - /** - * Adds NetworkTopologyStrategy replication options with the given data center replication - * factors. - * - *

    Note that using this will overwrite any previous use of this method or {@link - * #withSimpleStrategy(int)}. - * - * @param replications Mapping of data center name to replication factor to use for that data - * center. - */ - @NonNull - @Override - AlterDseKeyspace withNetworkTopologyStrategy(@NonNull Map replications); -} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseKeyspaceStart.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseKeyspaceStart.java deleted file mode 100644 index 6a36d4b4d46..00000000000 --- a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseKeyspaceStart.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.querybuilder.schema.KeyspaceOptions; -import com.datastax.oss.driver.api.querybuilder.schema.KeyspaceReplicationOptions; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; - -public interface AlterDseKeyspaceStart - extends KeyspaceOptions, KeyspaceReplicationOptions { - - @NonNull - @Override - AlterDseKeyspace withOption(@NonNull String name, @NonNull Object value); - - /** - * Adjusts durable writes configuration for this keyspace. If set to false, data written to the - * keyspace will bypass the commit log. - */ - @NonNull - @Override - AlterDseKeyspace withDurableWrites(boolean durableWrites); - - /** Adjusts the graph engine that will be used to interpret this keyspace. */ - @NonNull - AlterDseKeyspace withGraphEngine(String graphEngine); - - /** - * Adds 'replication' options. One should only use this when they have a custom replication - * strategy, otherwise it is advisable to use {@link #withSimpleStrategy(int)} or {@link - * #withNetworkTopologyStrategy(Map)}. - */ - @NonNull - @Override - AlterDseKeyspace withReplicationOptions(@NonNull Map replicationOptions); - - /** - * Adds SimpleStrategy replication options with the given replication factor. - * - *

    Note that using this will overwrite any previous use of this method or {@link - * #withNetworkTopologyStrategy(Map)}. - */ - @NonNull - @Override - AlterDseKeyspace withSimpleStrategy(int replicationFactor); - - /** - * Adds NetworkTopologyStrategy replication options with the given data center replication - * factors. - * - *

    Note that using this will overwrite any previous use of this method or {@link - * #withSimpleStrategy(int)}. - * - * @param replications Mapping of data center name to replication factor to use for that data - * center. - */ - @NonNull - @Override - AlterDseKeyspace withNetworkTopologyStrategy(@NonNull Map replications); -} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableAddColumn.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableAddColumn.java deleted file mode 100644 index c5f05a661b9..00000000000 --- a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableAddColumn.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.querybuilder.SchemaBuilder; -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface AlterDseTableAddColumn { - /** - * Adds a column definition in the ALTER TABLE statement. - * - *

    To create the data type, use the constants and static methods in {@link DataTypes}, or - * {@link SchemaBuilder#udt(CqlIdentifier, boolean)}. - */ - @NonNull - AlterDseTableAddColumnEnd addColumn( - @NonNull CqlIdentifier columnName, @NonNull DataType dataType); - - /** - * Shortcut for {@link #addColumn(CqlIdentifier, DataType) - * addColumn(CqlIdentifier.asCql(columnName), dataType)}. - */ - @NonNull - default AlterDseTableAddColumnEnd addColumn( - @NonNull String columnName, @NonNull DataType dataType) { - return addColumn(CqlIdentifier.fromCql(columnName), dataType); - } - - /** - * Adds a static column definition in the ALTER TABLE statement. - * - *

    To create the data type, use the constants and static methods in {@link DataTypes}, or - * {@link SchemaBuilder#udt(CqlIdentifier, boolean)}. - */ - @NonNull - AlterDseTableAddColumnEnd addStaticColumn( - @NonNull CqlIdentifier columnName, @NonNull DataType dataType); - - /** - * Shortcut for {@link #addStaticColumn(CqlIdentifier, DataType) - * addStaticColumn(CqlIdentifier.asCql(columnName), dataType)}. - */ - @NonNull - default AlterDseTableAddColumnEnd addStaticColumn( - @NonNull String columnName, @NonNull DataType dataType) { - return addStaticColumn(CqlIdentifier.fromCql(columnName), dataType); - } -} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableAddColumnEnd.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableAddColumnEnd.java deleted file mode 100644 index 80d3cc2a665..00000000000 --- a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableAddColumnEnd.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.querybuilder.BuildableQuery; - -public interface AlterDseTableAddColumnEnd extends AlterDseTableAddColumn, BuildableQuery {} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableDropColumn.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableDropColumn.java deleted file mode 100644 index 50e672f8e6e..00000000000 --- a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableDropColumn.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface AlterDseTableDropColumn { - /** - * Adds column(s) to drop to ALTER TABLE specification. This may be repeated with successive calls - * to drop columns. - */ - @NonNull - AlterDseTableDropColumnEnd dropColumns(@NonNull CqlIdentifier... columnNames); - - /** Shortcut for {@link #dropColumns(CqlIdentifier...)}. */ - @NonNull - default AlterDseTableDropColumnEnd dropColumns(@NonNull String... columnNames) { - CqlIdentifier ids[] = new CqlIdentifier[columnNames.length]; - for (int i = 0; i < columnNames.length; i++) { - ids[i] = CqlIdentifier.fromCql(columnNames[i]); - } - return dropColumns(ids); - } - - /** - * Adds a column to drop to ALTER TABLE specification. This may be repeated with successive calls - * to drop columns. Shortcut for {@link #dropColumns(CqlIdentifier...) #dropColumns(columnName)}. - */ - @NonNull - default AlterDseTableDropColumnEnd dropColumn(@NonNull CqlIdentifier columnName) { - return dropColumns(columnName); - } - - /** - * Shortcut for {@link #dropColumn(CqlIdentifier) dropColumn(CqlIdentifier.fromCql(columnName))}. - */ - @NonNull - default AlterDseTableDropColumnEnd dropColumn(@NonNull String columnName) { - return dropColumns(CqlIdentifier.fromCql(columnName)); - } -} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableDropColumnEnd.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableDropColumnEnd.java deleted file mode 100644 index 7e3d424eb31..00000000000 --- a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableDropColumnEnd.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.querybuilder.BuildableQuery; - -public interface AlterDseTableDropColumnEnd extends AlterDseTableDropColumn, BuildableQuery {} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableRenameColumn.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableRenameColumn.java deleted file mode 100644 index 7a24a76f4ab..00000000000 --- a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableRenameColumn.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface AlterDseTableRenameColumn { - - /** - * Adds a column rename to ALTER TABLE specification. This may be repeated with successive calls - * to rename columns. - */ - @NonNull - AlterDseTableRenameColumnEnd renameColumn(@NonNull CqlIdentifier from, @NonNull CqlIdentifier to); - - /** - * Shortcut for {@link #renameColumn(CqlIdentifier, CqlIdentifier) - * renameField(CqlIdentifier.fromCql(from),CqlIdentifier.fromCql(to))}. - */ - @NonNull - default AlterDseTableRenameColumnEnd renameColumn(@NonNull String from, @NonNull String to) { - return renameColumn(CqlIdentifier.fromCql(from), CqlIdentifier.fromCql(to)); - } -} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableRenameColumnEnd.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableRenameColumnEnd.java deleted file mode 100644 index db2890b844b..00000000000 --- a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableRenameColumnEnd.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.querybuilder.BuildableQuery; - -public interface AlterDseTableRenameColumnEnd extends AlterDseTableRenameColumn, BuildableQuery {} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableStart.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableStart.java deleted file mode 100644 index bb34bb3fb38..00000000000 --- a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableStart.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.querybuilder.BuildableQuery; -import com.datastax.oss.driver.api.querybuilder.SchemaBuilder; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -public interface AlterDseTableStart - extends AlterDseTableWithOptions, - AlterDseTableAddColumn, - AlterDseTableDropColumn, - AlterDseTableRenameColumn, - DseTableGraphOptions { - - /** Completes ALTER TABLE specifying that compact storage should be removed from the table. */ - @NonNull - BuildableQuery dropCompactStorage(); - - /** - * Completes ALTER TABLE specifying the the type of a column should be changed. - * - *

    To create the data type, use the constants and static methods in {@link DataTypes}, or - * {@link SchemaBuilder#udt(CqlIdentifier, boolean)}. - */ - @NonNull - BuildableQuery alterColumn(@NonNull CqlIdentifier columnName, @NonNull DataType dataType); - - /** - * Shortcut for {@link #alterColumn(CqlIdentifier, DataType) - * alterColumn(CqlIdentifier.fromCql(columnName,dataType)}. - */ - @NonNull - default BuildableQuery alterColumn(@NonNull String columnName, @NonNull DataType dataType) { - return alterColumn(CqlIdentifier.fromCql(columnName), dataType); - } - - /** Removes the named vertex label from this table. */ - @NonNull - BuildableQuery withoutVertexLabel(@Nullable CqlIdentifier vertexLabelId); - - /** - * Shortcut for {@link #withoutVertexLabel(CqlIdentifier) - * withoutVertexLabel(CqlIdentifier.fromCql(vertexLabelName))}. - */ - @NonNull - default BuildableQuery withoutVertexLabel(@NonNull String vertexLabelName) { - return withoutVertexLabel(CqlIdentifier.fromCql(vertexLabelName)); - } - - /** - * Removes the anonymous vertex label from this table. - * - *

    This is a shortcut for {@link #withoutVertexLabel(CqlIdentifier) withoutVertexLabel(null)}. - */ - @NonNull - default BuildableQuery withoutVertexLabel() { - return withoutVertexLabel((CqlIdentifier) null); - } - - /** Removes the named edge label from this table. */ - @NonNull - BuildableQuery withoutEdgeLabel(@Nullable CqlIdentifier edgeLabelId); - - /** - * Shortcut for {@link #withoutEdgeLabel(CqlIdentifier) - * withoutEdgeLabel(CqlIdentifier.fromCql(edgeLabelName))}. - */ - @NonNull - default BuildableQuery withoutEdgeLabel(@NonNull String edgeLabelName) { - return withoutEdgeLabel(CqlIdentifier.fromCql(edgeLabelName)); - } - - /** - * Removes the anonymous edge label from this table. - * - *

    This is a shortcut for {@link #withoutVertexLabel(CqlIdentifier) withoutEdgeLabel(null)}. - */ - @NonNull - default BuildableQuery withoutEdgeLabel() { - return withoutEdgeLabel((CqlIdentifier) null); - } -} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableWithOptions.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableWithOptions.java deleted file mode 100644 index 5713c3f25d6..00000000000 --- a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableWithOptions.java +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.querybuilder.schema; - -public interface AlterDseTableWithOptions extends DseRelationOptions {} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableWithOptionsEnd.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableWithOptionsEnd.java deleted file mode 100644 index ef63881caa8..00000000000 --- a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableWithOptionsEnd.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.querybuilder.BuildableQuery; - -public interface AlterDseTableWithOptionsEnd - extends DseRelationOptions, BuildableQuery {} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseAggregateEnd.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseAggregateEnd.java deleted file mode 100644 index e28c887cd22..00000000000 --- a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseAggregateEnd.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.BuildableQuery; -import com.datastax.oss.driver.api.querybuilder.term.Term; -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface CreateDseAggregateEnd extends BuildableQuery { - - /** - * Adds INITCOND to the aggregate query. Defines the initial condition, values, of the first - * parameter in the SFUNC. - */ - @NonNull - CreateDseAggregateEnd withInitCond(@NonNull Term term); - - /** - * Adds FINALFUNC to the create aggregate query. This is used to specify what type is returned - * from the state function. - */ - @NonNull - CreateDseAggregateEnd withFinalFunc(@NonNull CqlIdentifier finalFunc); - - /** - * Shortcut for {@link #withFinalFunc(CqlIdentifier) - * withFinalFunc(CqlIdentifier.fromCql(finalFuncName))}. - */ - @NonNull - default CreateDseAggregateEnd withFinalFunc(@NonNull String finalFuncName) { - return withFinalFunc(CqlIdentifier.fromCql(finalFuncName)); - } - - /** - * Adds "DETERMINISTIC" to create aggregate specification. This is used to specify that this - * aggregate always returns the same output for a given input. Requires an initial condition and - * returns a single value. - */ - @NonNull - CreateDseAggregateEnd deterministic(); -} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseAggregateStart.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseAggregateStart.java deleted file mode 100644 index 76bece6ca5f..00000000000 --- a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseAggregateStart.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.querybuilder.SchemaBuilder; -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface CreateDseAggregateStart { - /** - * Adds IF NOT EXISTS to the create aggregate specification. This indicates that the aggregate - * should not be created if it already exists. - */ - @NonNull - CreateDseAggregateStart ifNotExists(); - - /** - * Adds OR REPLACE to the create aggregate specification. This indicates that the aggregate should - * replace an existing aggregate with the same name if it exists. - */ - @NonNull - CreateDseAggregateStart orReplace(); - - /** - * Adds a parameter definition in the CREATE AGGREGATE statement. - * - *

    Parameter keys are added in the order of their declaration. - * - *

    To create the data type, use the constants and static methods in {@link DataTypes}, or - * {@link SchemaBuilder#udt(CqlIdentifier, boolean)}. - */ - @NonNull - CreateDseAggregateStart withParameter(@NonNull DataType paramType); - - /** Adds SFUNC to the create aggregate specification. This is the state function for each row. */ - @NonNull - CreateDseAggregateStateFunc withSFunc(@NonNull CqlIdentifier sfuncName); - - /** Shortcut for {@link #withSFunc(CqlIdentifier) withSFunc(CqlIdentifier.fromCql(sfuncName))}. */ - @NonNull - default CreateDseAggregateStateFunc withSFunc(@NonNull String sfuncName) { - return withSFunc(CqlIdentifier.fromCql(sfuncName)); - } -} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseAggregateStateFunc.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseAggregateStateFunc.java deleted file mode 100644 index deb3b49a34a..00000000000 --- a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseAggregateStateFunc.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.querybuilder.SchemaBuilder; -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface CreateDseAggregateStateFunc { - - /** - * Adds STYPE to the create aggregate query. This is used to specify what type is returned from - * the state function. - * - *

    To create the data type, use the constants and static methods in {@link DataTypes}, or - * {@link SchemaBuilder#udt(CqlIdentifier, boolean)}. - */ - @NonNull - CreateDseAggregateEnd withSType(@NonNull DataType dataType); -} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseFunctionEnd.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseFunctionEnd.java deleted file mode 100644 index 901eb1705ab..00000000000 --- a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseFunctionEnd.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.querybuilder.BuildableQuery; - -public interface CreateDseFunctionEnd extends BuildableQuery {} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseFunctionStart.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseFunctionStart.java deleted file mode 100644 index 64a741d62a9..00000000000 --- a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseFunctionStart.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.querybuilder.SchemaBuilder; -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface CreateDseFunctionStart { - - /** - * Adds IF NOT EXISTS to the create function specification. This indicates that the function - * should not be created if it already exists. - */ - @NonNull - CreateDseFunctionStart ifNotExists(); - - /** - * Adds OR REPLACE to the create function specification. This indicates that the function should - * replace an existing function with the same name if it exists. - */ - @NonNull - CreateDseFunctionStart orReplace(); - - /** - * Adds a parameter definition in the CREATE FUNCTION statement. - * - *

    Parameter keys are added in the order of their declaration. - * - *

    To create the data type, use the constants and static methods in {@link DataTypes}, or - * {@link SchemaBuilder#udt(CqlIdentifier, boolean)}. - */ - @NonNull - CreateDseFunctionStart withParameter( - @NonNull CqlIdentifier paramName, @NonNull DataType paramType); - - /** - * Shortcut for {@link #withParameter(CqlIdentifier, DataType) - * withParameter(CqlIdentifier.asCql(paramName), dataType)}. - */ - @NonNull - default CreateDseFunctionStart withParameter( - @NonNull String paramName, @NonNull DataType paramType) { - return withParameter(CqlIdentifier.fromCql(paramName), paramType); - } - - /** - * Adds RETURNS NULL ON NULL to the create function specification. This indicates that the body of - * the function should be skipped when null input is provided. - */ - @NonNull - CreateDseFunctionWithNullOption returnsNullOnNull(); - - /** - * Adds CALLED ON NULL to the create function specification. This indicates that the body of the - * function not be skipped when null input is provided. - */ - @NonNull - CreateDseFunctionWithNullOption calledOnNull(); -} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseFunctionWithLanguage.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseFunctionWithLanguage.java deleted file mode 100644 index 10935061404..00000000000 --- a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseFunctionWithLanguage.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.querybuilder.schema; - -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface CreateDseFunctionWithLanguage { - - /** - * Adds AS to the create function specification. This is used to specify the body of the function. - * Note that it is expected that the provided body is properly quoted as this method does not make - * that decision for the user. For simple cases, one should wrap the input in single quotes, i.e. - * 'myBody'. If the body itself contains single quotes, one could use a - * postgres-style string literal, which is surrounded in two dollar signs, i.e. $$ myBody $$ - * . - */ - @NonNull - CreateDseFunctionEnd as(@NonNull String functionBody); - - /** - * Adds AS to the create function specification and quotes the function body. Assumes that if the - * input body contains at least one single quote, to quote the body with two dollar signs, i.e. - * $$ myBody $$, otherwise the body is quoted with single quotes, i.e. - * ' myBody '. If the function body is already quoted {@link #as(String)} should be used - * instead. - */ - @NonNull - default CreateDseFunctionEnd asQuoted(@NonNull String functionBody) { - if (functionBody.contains("'")) { - return as("$$ " + functionBody + " $$"); - } else { - return as('\'' + functionBody + '\''); - } - } -} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseFunctionWithNullOption.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseFunctionWithNullOption.java deleted file mode 100644 index 2a44c002852..00000000000 --- a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseFunctionWithNullOption.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.querybuilder.SchemaBuilder; -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface CreateDseFunctionWithNullOption { - /** - * Adds RETURNS to the create function specification. This is used to specify what type is - * returned from the function. - * - *

    To create the data type, use the constants and static methods in {@link DataTypes}, or - * {@link SchemaBuilder#udt(CqlIdentifier, boolean)}. - */ - @NonNull - CreateDseFunctionWithType returnsType(@NonNull DataType dataType); -} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseFunctionWithType.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseFunctionWithType.java deleted file mode 100644 index b70facf51a1..00000000000 --- a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseFunctionWithType.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface CreateDseFunctionWithType { - /** - * Adds LANGUAGE to the create function specification. This is used to specify what language is - * used in the function body. - */ - @NonNull - CreateDseFunctionWithLanguage withLanguage(@NonNull String language); - - /** - * Adds "LANGUAGE java" to create function specification. Shortcut for {@link - * #withLanguage(String) withLanguage("java")}. - */ - @NonNull - default CreateDseFunctionWithLanguage withJavaLanguage() { - return withLanguage("java"); - } - - /** - * Adds "LANGUAGE javascript" to create function specification. Shortcut for {@link - * #withLanguage(String) withLanguage("javascript")}. - */ - @NonNull - default CreateDseFunctionWithLanguage withJavaScriptLanguage() { - return withLanguage("javascript"); - } - - /** - * Adds "DETERMINISTIC" to create function specification. This is used to specify that this - * function always returns the same output for a given input. - */ - @NonNull - CreateDseFunctionWithType deterministic(); - - /** - * Adds "MONOTONIC" to create function specification. This is used to specify that this function - * is either entirely non-increasing, or entirely non-decreasing. - */ - @NonNull - CreateDseFunctionWithType monotonic(); - - /** - * Adds "MONOTONIC ON" to create function specification. This is used to specify that this - * function has only a single column that is monotonic. If the function is fully monotonic, use - * {@link #monotonic()} instead. - */ - @NonNull - CreateDseFunctionWithType monotonicOn(@NonNull CqlIdentifier monotonicColumn); - - /** - * Shortcut for {@link #monotonicOn(CqlIdentifier) - * monotonicOn(CqlIdentifier.fromCql(monotonicColumn))}. - */ - @NonNull - default CreateDseFunctionWithType monotonicOn(@NonNull String monotonicColumn) { - return monotonicOn(CqlIdentifier.fromCql(monotonicColumn)); - } -} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseKeyspace.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseKeyspace.java deleted file mode 100644 index 0fcb87bafbd..00000000000 --- a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseKeyspace.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.querybuilder.BuildableQuery; -import com.datastax.oss.driver.api.querybuilder.schema.KeyspaceOptions; -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface CreateDseKeyspace extends BuildableQuery, KeyspaceOptions { - - @NonNull - @Override - CreateDseKeyspace withOption(@NonNull String name, @NonNull Object value); - - /** - * Adjusts durable writes configuration for this keyspace. If set to false, data written to the - * keyspace will bypass the commit log. - */ - @NonNull - @Override - default CreateDseKeyspace withDurableWrites(boolean durableWrites) { - return withOption("durable_writes", durableWrites); - } - - /** Adjusts the graph engine that will be used to interpret this keyspace. */ - @NonNull - default CreateDseKeyspace withGraphEngine(String graphEngine) { - return this.withOption("graph_engine", graphEngine); - } -} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseKeyspaceStart.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseKeyspaceStart.java deleted file mode 100644 index c0ee240c8ff..00000000000 --- a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseKeyspaceStart.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.querybuilder.schema.KeyspaceReplicationOptions; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; - -public interface CreateDseKeyspaceStart extends KeyspaceReplicationOptions { - /** - * Adds IF NOT EXISTS to the create keyspace specification. This indicates that the keyspace - * should not be created it already exists. - */ - @NonNull - CreateDseKeyspaceStart ifNotExists(); - - /** - * Adds 'replication' options. One should only use this when they have a custom replication - * strategy, otherwise it is advisable to use {@link #withSimpleStrategy(int)} or {@link - * #withNetworkTopologyStrategy(Map)}. - */ - @NonNull - @Override - CreateDseKeyspace withReplicationOptions(@NonNull Map replicationOptions); - - /** - * Adds SimpleStrategy replication options with the given replication factor. - * - *

    Note that using this will overwrite any previous use of this method or {@link - * #withNetworkTopologyStrategy(Map)}. - */ - @NonNull - @Override - default CreateDseKeyspace withSimpleStrategy(int replicationFactor) { - ImmutableMap replication = - ImmutableMap.builder() - .put("class", "SimpleStrategy") - .put("replication_factor", replicationFactor) - .build(); - - return withReplicationOptions(replication); - } - - /** - * Adds NetworkTopologyStrategy replication options with the given data center replication - * factors. - * - *

    Note that using this will overwrite any previous use of this method or {@link - * #withSimpleStrategy(int)}. - * - * @param replications Mapping of data center name to replication factor to use for that data - * center. - */ - @NonNull - @Override - default CreateDseKeyspace withNetworkTopologyStrategy( - @NonNull Map replications) { - ImmutableMap.Builder replicationBuilder = - ImmutableMap.builder().put("class", "NetworkTopologyStrategy"); - - for (Map.Entry replication : replications.entrySet()) { - replicationBuilder.put(replication.getKey(), replication.getValue()); - } - - return withReplicationOptions(replicationBuilder.build()); - } -} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseTable.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseTable.java deleted file mode 100644 index fa6d008c114..00000000000 --- a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseTable.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.querybuilder.BuildableQuery; -import com.datastax.oss.driver.api.querybuilder.SchemaBuilder; -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface CreateDseTable - extends BuildableQuery, OngoingDsePartitionKey, CreateDseTableWithOptions { - - /** - * Adds a clustering column definition in the CREATE TABLE statement. - * - *

    This includes the column declaration (you don't need an additional {@link - * #withColumn(CqlIdentifier, DataType) addColumn} call). - * - *

    Clustering key columns are added in the order of their declaration. - * - *

    To create the data type, use the constants and static methods in {@link DataTypes}, or - * {@link SchemaBuilder#udt(CqlIdentifier, boolean)}. - */ - @NonNull - CreateDseTable withClusteringColumn( - @NonNull CqlIdentifier columnName, @NonNull DataType dataType); - - /** - * Shortcut for {@link #withClusteringColumn(CqlIdentifier, DataType) - * withClusteringColumn(CqlIdentifier.asCql(columnName), dataType)}. - */ - @NonNull - default CreateDseTable withClusteringColumn( - @NonNull String columnName, @NonNull DataType dataType) { - return withClusteringColumn(CqlIdentifier.fromCql(columnName), dataType); - } - - /** - * Adds a column definition in the CREATE TABLE statement. - * - *

    To create the data type, use the constants and static methods in {@link DataTypes}, or - * {@link SchemaBuilder#udt(CqlIdentifier, boolean)}. - */ - @NonNull - CreateDseTable withColumn(@NonNull CqlIdentifier columnName, @NonNull DataType dataType); - - /** - * Shortcut for {@link #withColumn(CqlIdentifier, DataType) - * withColumn(CqlIdentifier.asCql(columnName), dataType)}. - */ - @NonNull - default CreateDseTable withColumn(@NonNull String columnName, @NonNull DataType dataType) { - return withColumn(CqlIdentifier.fromCql(columnName), dataType); - } - - /** - * Adds a static column definition in the CREATE TABLE statement. - * - *

    This includes the column declaration (you don't need an additional {@link - * #withColumn(CqlIdentifier, DataType) addColumn} call). - * - *

    To create the data type, use the constants and static methods in {@link DataTypes}, or - * {@link SchemaBuilder#udt(CqlIdentifier, boolean)}. - */ - @NonNull - CreateDseTable withStaticColumn(@NonNull CqlIdentifier columnName, @NonNull DataType dataType); - - /** - * Shortcut for {@link #withStaticColumn(CqlIdentifier, DataType) - * withStaticColumn(CqlIdentifier.asCql(columnName), dataType)}. - */ - @NonNull - default CreateDseTable withStaticColumn(@NonNull String columnName, @NonNull DataType dataType) { - return withStaticColumn(CqlIdentifier.fromCql(columnName), dataType); - } -} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseTableStart.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseTableStart.java deleted file mode 100644 index 030668262df..00000000000 --- a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseTableStart.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.querybuilder.schema; - -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface CreateDseTableStart extends OngoingDsePartitionKey { - - /** - * Adds IF NOT EXISTS to the create table specification. This indicates that the table should not - * be created if it already exists. - */ - @NonNull - CreateDseTableStart ifNotExists(); -} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseTableWithOptions.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseTableWithOptions.java deleted file mode 100644 index 3d7b44ef905..00000000000 --- a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseTableWithOptions.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.querybuilder.BuildableQuery; -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface CreateDseTableWithOptions - extends BuildableQuery, - DseRelationStructure, - DseTableGraphOptions { - - /** Enables COMPACT STORAGE in the CREATE TABLE statement. */ - @NonNull - CreateDseTableWithOptions withCompactStorage(); -} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/DseGraphEdgeSide.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/DseGraphEdgeSide.java deleted file mode 100644 index 7cfe8285919..00000000000 --- a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/DseGraphEdgeSide.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.querybuilder.schema; - -import com.datastax.dse.driver.internal.querybuilder.schema.DefaultDseGraphEdgeSide; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.List; - -public interface DseGraphEdgeSide { - - /** Starts the definition of a graph edge side by designating the from/to table. */ - @NonNull - static DseGraphEdgeSide table(@NonNull CqlIdentifier tableId) { - return new DefaultDseGraphEdgeSide(tableId); - } - - /** Shortcut for {@link #table(CqlIdentifier) table(CqlIdentifier.fromCql(tableName))}. */ - @NonNull - static DseGraphEdgeSide table(@NonNull String tableName) { - return table(CqlIdentifier.fromCql(tableName)); - } - - /** - * Adds a partition key column to the primary key definition of this edge side. - * - *

    Call this method multiple times if the partition key is composite. - */ - @NonNull - DseGraphEdgeSide withPartitionKey(@NonNull CqlIdentifier columnId); - - /** - * Shortcut for {@link #withPartitionKey(CqlIdentifier) - * withPartitionKey(CqlIdentifier.fromCql(columnName))}. - */ - @NonNull - default DseGraphEdgeSide withPartitionKey(@NonNull String columnName) { - return withPartitionKey(CqlIdentifier.fromCql(columnName)); - } - - /** - * Adds a clustering column to the primary key definition of this edge side. - * - *

    Call this method multiple times to add more than one clustering column. - */ - @NonNull - DseGraphEdgeSide withClusteringColumn(@NonNull CqlIdentifier columnId); - - /** - * Shortcut for {@link #withClusteringColumn(CqlIdentifier) - * withClusteringColumn(CqlIdentifier.fromCql(columnName))}. - */ - @NonNull - default DseGraphEdgeSide withClusteringColumn(@NonNull String columnName) { - return withClusteringColumn(CqlIdentifier.fromCql(columnName)); - } - - @NonNull - CqlIdentifier getTableId(); - - @NonNull - List getPartitionKeyColumns(); - - @NonNull - List getClusteringColumns(); -} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/DseRelationOptions.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/DseRelationOptions.java deleted file mode 100644 index 170390c43d5..00000000000 --- a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/DseRelationOptions.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.querybuilder.schema.RelationOptions; - -public interface DseRelationOptions> - extends RelationOptions {} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/DseRelationStructure.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/DseRelationStructure.java deleted file mode 100644 index f26039e45b4..00000000000 --- a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/DseRelationStructure.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.ClusteringOrder; -import com.datastax.oss.driver.internal.core.CqlIdentifiers; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; - -public interface DseRelationStructure> - extends DseRelationOptions { - - /** - * Adds the provided CLUSTERING ORDER. - * - *

    They will be appended in the iteration order of the provided map. If an ordering was already - * defined for a given identifier, it will be removed and the new ordering will appear in its - * position in the provided map. - */ - @NonNull - SelfT withClusteringOrderByIds(@NonNull Map orderings); - - /** - * Shortcut for {@link #withClusteringOrderByIds(Map)} with the columns specified as - * case-insensitive names. They will be wrapped with {@link CqlIdentifier#fromCql(String)}. - * - *

    Note that it's possible for two different case-sensitive names to resolve to the same - * identifier, for example "foo" and "Foo"; if this happens, a runtime exception will be thrown. - */ - @NonNull - default SelfT withClusteringOrder(@NonNull Map orderings) { - return withClusteringOrderByIds(CqlIdentifiers.wrapKeys(orderings)); - } - - /** - * Adds the provided clustering order. - * - *

    If clustering order was already defined for this identifier, it will be removed and the new - * clause will be appended at the end of the current clustering order. - */ - @NonNull - SelfT withClusteringOrder(@NonNull CqlIdentifier columnName, @NonNull ClusteringOrder order); - - /** - * Shortcut for {@link #withClusteringOrder(CqlIdentifier, ClusteringOrder) - * withClusteringOrder(CqlIdentifier.fromCql(columnName), order)}. - */ - @NonNull - default SelfT withClusteringOrder(@NonNull String columnName, @NonNull ClusteringOrder order) { - return withClusteringOrder(CqlIdentifier.fromCql(columnName), order); - } -} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/DseTableGraphOptions.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/DseTableGraphOptions.java deleted file mode 100644 index df1008ff053..00000000000 --- a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/DseTableGraphOptions.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -public interface DseTableGraphOptions { - - /** Adds a vertex label to this table. */ - @NonNull - NextT withVertexLabel(@Nullable CqlIdentifier vertexLabelId); - - /** - * Shortcut for {@link #withVertexLabel(CqlIdentifier) - * withVertexLabel(CqlIdentifier.fromCql(vertexLabel))}. - */ - @NonNull - default NextT withVertexLabel(@NonNull String vertexLabelName) { - return withVertexLabel(CqlIdentifier.fromCql(vertexLabelName)); - } - - /** - * Adds an anonymous vertex label to this table. - * - *

    This is a shortcut for {@link #withVertexLabel(CqlIdentifier) withVertexLabel(null)}. - */ - @NonNull - default NextT withVertexLabel() { - return withVertexLabel((CqlIdentifier) null); - } - - /** - * Adds an edge label to this table. - * - *

    Use {@link DseGraphEdgeSide#table(CqlIdentifier)} to build the definitions of both sides, - * for example: - * - *

    {@code
    -   * withEdgeLabel("contrib",
    -   *               table("person")
    -   *                 .withPartitionKey("contributor"),
    -   *               table("soft")
    -   *                 .withPartitionKey("company_name"),
    -   *                 .withPartitionKey("software_name"),
    -   *                 .withClusteringColumn("software_version"))
    -   * }
    - */ - @NonNull - NextT withEdgeLabel( - @Nullable CqlIdentifier edgeLabelId, - @NonNull DseGraphEdgeSide from, - @NonNull DseGraphEdgeSide to); - - /** - * Shortcut for {@link #withEdgeLabel(CqlIdentifier, DseGraphEdgeSide, DseGraphEdgeSide) - * withEdgeLabel(CqlIdentifier.fromCql(edgeLabelName), from, to)}. - */ - @NonNull - default NextT withEdgeLabel( - @NonNull String edgeLabelName, @NonNull DseGraphEdgeSide from, @NonNull DseGraphEdgeSide to) { - return withEdgeLabel(CqlIdentifier.fromCql(edgeLabelName), from, to); - } - - /** - * Adds an anonymous edge label to this table. - * - *

    This is a shortcut for {@link #withEdgeLabel(CqlIdentifier, DseGraphEdgeSide, - * DseGraphEdgeSide) withEdgeLabel(null, from, to)}. - */ - @NonNull - default NextT withEdgeLabel(@NonNull DseGraphEdgeSide from, @NonNull DseGraphEdgeSide to) { - return withEdgeLabel((CqlIdentifier) null, from, to); - } -} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/OngoingDsePartitionKey.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/OngoingDsePartitionKey.java deleted file mode 100644 index d535939994b..00000000000 --- a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/OngoingDsePartitionKey.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.querybuilder.SchemaBuilder; -import com.datastax.oss.driver.api.querybuilder.schema.CreateTable; -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface OngoingDsePartitionKey { - - /** - * Adds a partition key column definition. - * - *

    This includes the column declaration (you don't need an additional {@link - * CreateTable#withColumn(CqlIdentifier, DataType) addColumn} call). - * - *

    Partition keys are added in the order of their declaration. - * - *

    To create the data type, use the constants and static methods in {@link DataTypes}, or - * {@link SchemaBuilder#udt(CqlIdentifier, boolean)}. - */ - @NonNull - CreateDseTable withPartitionKey(@NonNull CqlIdentifier columnName, @NonNull DataType dataType); - - /** - * Shortcut for {@link #withPartitionKey(CqlIdentifier, DataType) - * withPartitionKey(CqlIdentifier.asCql(columnName), dataType)}. - */ - @NonNull - default CreateDseTable withPartitionKey(@NonNull String columnName, @NonNull DataType dataType) { - return withPartitionKey(CqlIdentifier.fromCql(columnName), dataType); - } -} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/package-info.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/package-info.java deleted file mode 100644 index 5e647ee3a4d..00000000000 --- a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * This package effectively mirrors the Cassandra OSS Schema interfaces to allow extended schema and - * query building for the DSE driver. NOTE: Changes made to the OSS driver will need to be mirrored - * here if the OSS driver changes affect an extended schema build strategy for the DSE driver. - */ -package com.datastax.dse.driver.api.querybuilder.schema; diff --git a/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DefaultAlterDseKeyspace.java b/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DefaultAlterDseKeyspace.java deleted file mode 100644 index 6fa2a64eaf3..00000000000 --- a/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DefaultAlterDseKeyspace.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.querybuilder.schema; - -import com.datastax.dse.driver.api.querybuilder.schema.AlterDseKeyspace; -import com.datastax.dse.driver.api.querybuilder.schema.AlterDseKeyspaceStart; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.internal.querybuilder.ImmutableCollections; -import com.datastax.oss.driver.internal.querybuilder.schema.OptionsUtils; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultAlterDseKeyspace implements AlterDseKeyspaceStart, AlterDseKeyspace { - - private final CqlIdentifier keyspaceName; - private final ImmutableMap options; - - public DefaultAlterDseKeyspace(@NonNull CqlIdentifier keyspaceName) { - this(keyspaceName, ImmutableMap.of()); - } - - public DefaultAlterDseKeyspace( - @NonNull CqlIdentifier keyspaceName, @NonNull ImmutableMap options) { - this.keyspaceName = keyspaceName; - this.options = options; - } - - @NonNull - @Override - public AlterDseKeyspace withDurableWrites(boolean durableWrites) { - return withOption("durable_writes", durableWrites); - } - - @NonNull - @Override - public AlterDseKeyspace withGraphEngine(String graphEngine) { - return this.withOption("graph_engine", graphEngine); - } - - @NonNull - @Override - public AlterDseKeyspace withReplicationOptions(@NonNull Map replicationOptions) { - return withOption("replication", replicationOptions); - } - - @NonNull - @Override - public AlterDseKeyspace withSimpleStrategy(int replicationFactor) { - ImmutableMap replication = - ImmutableMap.builder() - .put("class", "SimpleStrategy") - .put("replication_factor", replicationFactor) - .build(); - - return withReplicationOptions(replication); - } - - @NonNull - @Override - public AlterDseKeyspace withNetworkTopologyStrategy(@NonNull Map replications) { - ImmutableMap.Builder replicationBuilder = - ImmutableMap.builder().put("class", "NetworkTopologyStrategy"); - - for (Map.Entry replication : replications.entrySet()) { - replicationBuilder.put(replication.getKey(), replication.getValue()); - } - - return withReplicationOptions(replicationBuilder.build()); - } - - @NonNull - @Override - public AlterDseKeyspace withOption(@NonNull String name, @NonNull Object value) { - return new DefaultAlterDseKeyspace( - keyspaceName, ImmutableCollections.append(options, name, value)); - } - - @NonNull - @Override - public String asCql() { - return "ALTER KEYSPACE " + keyspaceName.asCql(true) + OptionsUtils.buildOptions(options, true); - } - - @NonNull - @Override - public Map getOptions() { - return options; - } - - @NonNull - public CqlIdentifier getKeyspace() { - return keyspaceName; - } - - @Override - public String toString() { - return asCql(); - } -} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DefaultAlterDseTable.java b/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DefaultAlterDseTable.java deleted file mode 100644 index 5f2ad10b7d1..00000000000 --- a/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DefaultAlterDseTable.java +++ /dev/null @@ -1,485 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.querybuilder.schema; - -import static com.datastax.oss.driver.internal.querybuilder.schema.Utils.appendSet; - -import com.datastax.dse.driver.api.querybuilder.schema.AlterDseTableAddColumnEnd; -import com.datastax.dse.driver.api.querybuilder.schema.AlterDseTableDropColumnEnd; -import com.datastax.dse.driver.api.querybuilder.schema.AlterDseTableRenameColumnEnd; -import com.datastax.dse.driver.api.querybuilder.schema.AlterDseTableStart; -import com.datastax.dse.driver.api.querybuilder.schema.AlterDseTableWithOptionsEnd; -import com.datastax.dse.driver.api.querybuilder.schema.DseGraphEdgeSide; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.querybuilder.BuildableQuery; -import com.datastax.oss.driver.internal.querybuilder.CqlHelper; -import com.datastax.oss.driver.internal.querybuilder.ImmutableCollections; -import com.datastax.oss.driver.internal.querybuilder.schema.OptionsUtils; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Map; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultAlterDseTable - implements AlterDseTableStart, - AlterDseTableAddColumnEnd, - AlterDseTableDropColumnEnd, - AlterDseTableRenameColumnEnd, - AlterDseTableWithOptionsEnd, - BuildableQuery { - - private final CqlIdentifier keyspace; - private final CqlIdentifier tableName; - - private final ImmutableMap columnsToAddInOrder; - private final ImmutableSet columnsToAdd; - private final ImmutableSet columnsToAddStatic; - private final ImmutableSet columnsToDrop; - private final ImmutableMap columnsToRename; - private final CqlIdentifier columnToAlter; - private final DataType columnToAlterType; - private final DseTableVertexOperation vertexOperation; - private final DseTableEdgeOperation edgeOperation; - private final ImmutableMap options; - private final boolean dropCompactStorage; - - public DefaultAlterDseTable(@NonNull CqlIdentifier tableName) { - this(null, tableName); - } - - public DefaultAlterDseTable(@Nullable CqlIdentifier keyspace, @NonNull CqlIdentifier tableName) { - this( - keyspace, - tableName, - false, - ImmutableMap.of(), - ImmutableSet.of(), - ImmutableSet.of(), - ImmutableSet.of(), - ImmutableMap.of(), - null, - null, - null, - null, - ImmutableMap.of()); - } - - public DefaultAlterDseTable( - @Nullable CqlIdentifier keyspace, - @NonNull CqlIdentifier tableName, - boolean dropCompactStorage, - @NonNull ImmutableMap columnsToAddInOrder, - @NonNull ImmutableSet columnsToAdd, - @NonNull ImmutableSet columnsToAddStatic, - @NonNull ImmutableSet columnsToDrop, - @NonNull ImmutableMap columnsToRename, - @Nullable CqlIdentifier columnToAlter, - @Nullable DataType columnToAlterType, - @Nullable DseTableVertexOperation vertexOperation, - @Nullable DseTableEdgeOperation edgeOperation, - @NonNull ImmutableMap options) { - this.keyspace = keyspace; - this.tableName = tableName; - this.dropCompactStorage = dropCompactStorage; - this.columnsToAddInOrder = columnsToAddInOrder; - this.columnsToAdd = columnsToAdd; - this.columnsToAddStatic = columnsToAddStatic; - this.columnsToDrop = columnsToDrop; - this.columnsToRename = columnsToRename; - this.columnToAlter = columnToAlter; - this.columnToAlterType = columnToAlterType; - this.vertexOperation = vertexOperation; - this.edgeOperation = edgeOperation; - this.options = options; - } - - @NonNull - @Override - public AlterDseTableAddColumnEnd addColumn( - @NonNull CqlIdentifier columnName, @NonNull DataType dataType) { - return new DefaultAlterDseTable( - keyspace, - tableName, - dropCompactStorage, - ImmutableCollections.append(columnsToAddInOrder, columnName, dataType), - appendSet(columnsToAdd, columnName), - columnsToAddStatic, - columnsToDrop, - columnsToRename, - columnToAlter, - columnToAlterType, - vertexOperation, - edgeOperation, - options); - } - - @NonNull - @Override - public AlterDseTableAddColumnEnd addStaticColumn( - @NonNull CqlIdentifier columnName, @NonNull DataType dataType) { - return new DefaultAlterDseTable( - keyspace, - tableName, - dropCompactStorage, - ImmutableCollections.append(columnsToAddInOrder, columnName, dataType), - columnsToAdd, - appendSet(columnsToAddStatic, columnName), - columnsToDrop, - columnsToRename, - columnToAlter, - columnToAlterType, - vertexOperation, - edgeOperation, - options); - } - - @NonNull - @Override - public BuildableQuery dropCompactStorage() { - return new DefaultAlterDseTable( - keyspace, - tableName, - true, - columnsToAddInOrder, - columnsToAdd, - columnsToAddStatic, - columnsToDrop, - columnsToRename, - columnToAlter, - columnToAlterType, - vertexOperation, - edgeOperation, - options); - } - - @NonNull - @Override - public AlterDseTableDropColumnEnd dropColumns(@NonNull CqlIdentifier... columnNames) { - ImmutableSet.Builder builder = - ImmutableSet.builder().addAll(columnsToDrop); - for (CqlIdentifier columnName : columnNames) { - builder = builder.add(columnName); - } - - return new DefaultAlterDseTable( - keyspace, - tableName, - dropCompactStorage, - columnsToAddInOrder, - columnsToAdd, - columnsToAddStatic, - builder.build(), - columnsToRename, - columnToAlter, - columnToAlterType, - vertexOperation, - edgeOperation, - options); - } - - @NonNull - @Override - public AlterDseTableRenameColumnEnd renameColumn( - @NonNull CqlIdentifier from, @NonNull CqlIdentifier to) { - return new DefaultAlterDseTable( - keyspace, - tableName, - dropCompactStorage, - columnsToAddInOrder, - columnsToAdd, - columnsToAddStatic, - columnsToDrop, - ImmutableCollections.append(columnsToRename, from, to), - columnToAlter, - columnToAlterType, - vertexOperation, - edgeOperation, - options); - } - - @NonNull - @Override - public BuildableQuery alterColumn(@NonNull CqlIdentifier columnName, @NonNull DataType dataType) { - return new DefaultAlterDseTable( - keyspace, - tableName, - dropCompactStorage, - columnsToAddInOrder, - columnsToAdd, - columnsToAddStatic, - columnsToDrop, - columnsToRename, - columnName, - dataType, - vertexOperation, - edgeOperation, - options); - } - - @NonNull - @Override - public BuildableQuery withVertexLabel(@Nullable CqlIdentifier vertexLabelId) { - return new DefaultAlterDseTable( - keyspace, - tableName, - dropCompactStorage, - columnsToAddInOrder, - columnsToAdd, - columnsToAddStatic, - columnsToDrop, - columnsToRename, - columnToAlter, - columnToAlterType, - new DseTableVertexOperation(DseTableGraphOperationType.WITH, vertexLabelId), - edgeOperation, - options); - } - - @NonNull - @Override - public BuildableQuery withoutVertexLabel(@Nullable CqlIdentifier vertexLabelId) { - return new DefaultAlterDseTable( - keyspace, - tableName, - dropCompactStorage, - columnsToAddInOrder, - columnsToAdd, - columnsToAddStatic, - columnsToDrop, - columnsToRename, - columnToAlter, - columnToAlterType, - new DseTableVertexOperation(DseTableGraphOperationType.WITHOUT, vertexLabelId), - edgeOperation, - options); - } - - @NonNull - @Override - public BuildableQuery withEdgeLabel( - @Nullable CqlIdentifier edgeLabelId, - @NonNull DseGraphEdgeSide from, - @NonNull DseGraphEdgeSide to) { - return new DefaultAlterDseTable( - keyspace, - tableName, - dropCompactStorage, - columnsToAddInOrder, - columnsToAdd, - columnsToAddStatic, - columnsToDrop, - columnsToRename, - columnToAlter, - columnToAlterType, - vertexOperation, - new DseTableEdgeOperation(DseTableGraphOperationType.WITH, edgeLabelId, from, to), - options); - } - - @NonNull - @Override - public BuildableQuery withoutEdgeLabel(@Nullable CqlIdentifier edgeLabelId) { - return new DefaultAlterDseTable( - keyspace, - tableName, - dropCompactStorage, - columnsToAddInOrder, - columnsToAdd, - columnsToAddStatic, - columnsToDrop, - columnsToRename, - columnToAlter, - columnToAlterType, - vertexOperation, - new DseTableEdgeOperation(DseTableGraphOperationType.WITHOUT, edgeLabelId, null, null), - options); - } - - @NonNull - @Override - public AlterDseTableWithOptionsEnd withOption(@NonNull String name, @NonNull Object value) { - return new DefaultAlterDseTable( - keyspace, - tableName, - dropCompactStorage, - columnsToAddInOrder, - columnsToAdd, - columnsToAddStatic, - columnsToDrop, - columnsToRename, - columnToAlter, - columnToAlterType, - vertexOperation, - edgeOperation, - ImmutableCollections.append(options, name, value)); - } - - @NonNull - @Override - public String asCql() { - StringBuilder builder = new StringBuilder("ALTER TABLE "); - - CqlHelper.qualify(keyspace, tableName, builder); - - if (columnToAlter != null) { - return builder - .append(" ALTER ") - .append(columnToAlter.asCql(true)) - .append(" TYPE ") - .append(columnToAlterType.asCql(true, true)) - .toString(); - } else if (!columnsToAdd.isEmpty()) { - builder.append(" ADD "); - if (columnsToAdd.size() > 1) { - builder.append('('); - } - boolean first = true; - for (Map.Entry column : columnsToAddInOrder.entrySet()) { - if (first) { - first = false; - } else { - builder.append(','); - } - builder - .append(column.getKey().asCql(true)) - .append(' ') - .append(column.getValue().asCql(true, true)); - - if (columnsToAddStatic.contains(column.getKey())) { - builder.append(" STATIC"); - } - } - if (columnsToAdd.size() > 1) { - builder.append(')'); - } - return builder.toString(); - } else if (!columnsToDrop.isEmpty()) { - boolean moreThanOneDrop = columnsToDrop.size() > 1; - CqlHelper.appendIds( - columnsToDrop, - builder, - moreThanOneDrop ? " DROP (" : " DROP ", - ",", - moreThanOneDrop ? ")" : ""); - return builder.toString(); - } else if (!columnsToRename.isEmpty()) { - builder.append(" RENAME "); - boolean first = true; - for (Map.Entry entry : columnsToRename.entrySet()) { - if (first) { - first = false; - } else { - builder.append(" AND "); - } - builder - .append(entry.getKey().asCql(true)) - .append(" TO ") - .append(entry.getValue().asCql(true)); - } - return builder.toString(); - } else if (vertexOperation != null) { - builder.append(' ').append(vertexOperation.getType()).append(' '); - vertexOperation.append(builder); - } else if (edgeOperation != null) { - builder.append(' ').append(edgeOperation.getType()).append(' '); - edgeOperation.append(builder); - } else if (dropCompactStorage) { - return builder.append(" DROP COMPACT STORAGE").toString(); - } else if (!options.isEmpty()) { - return builder.append(OptionsUtils.buildOptions(options, true)).toString(); - } - - // While this is incomplete, we should return partially build query at this point for toString - // purposes. - return builder.toString(); - } - - @Override - public String toString() { - return asCql(); - } - - @NonNull - @Override - public Map getOptions() { - return options; - } - - @Nullable - public CqlIdentifier getKeyspace() { - return keyspace; - } - - @NonNull - public CqlIdentifier getTable() { - return tableName; - } - - @NonNull - public ImmutableMap getColumnsToAddInOrder() { - return columnsToAddInOrder; - } - - @NonNull - public ImmutableSet getColumnsToAddRegular() { - return columnsToAdd; - } - - @NonNull - public ImmutableSet getColumnsToAddStatic() { - return columnsToAddStatic; - } - - @NonNull - public ImmutableSet getColumnsToDrop() { - return columnsToDrop; - } - - @NonNull - public ImmutableMap getColumnsToRename() { - return columnsToRename; - } - - @Nullable - public CqlIdentifier getColumnToAlter() { - return columnToAlter; - } - - @Nullable - public DataType getColumnToAlterType() { - return columnToAlterType; - } - - @Nullable - public DseTableVertexOperation getVertexOperation() { - return vertexOperation; - } - - @Nullable - public DseTableEdgeOperation getEdgeOperation() { - return edgeOperation; - } - - public boolean isDropCompactStorage() { - return dropCompactStorage; - } -} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DefaultCreateDseAggregate.java b/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DefaultCreateDseAggregate.java deleted file mode 100644 index 38c13f6e7d5..00000000000 --- a/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DefaultCreateDseAggregate.java +++ /dev/null @@ -1,315 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.querybuilder.schema; - -import com.datastax.dse.driver.api.querybuilder.schema.CreateDseAggregateEnd; -import com.datastax.dse.driver.api.querybuilder.schema.CreateDseAggregateStart; -import com.datastax.dse.driver.api.querybuilder.schema.CreateDseAggregateStateFunc; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.querybuilder.term.Term; -import com.datastax.oss.driver.internal.querybuilder.CqlHelper; -import com.datastax.oss.driver.internal.querybuilder.ImmutableCollections; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import net.jcip.annotations.Immutable; - -/** - * Implements DSE extended interfaces for creating aggregates. This class provides the same - * functionality as the Cassandra OSS {@link - * com.datastax.oss.driver.internal.querybuilder.schema.DefaultCreateAggregate} implementation, with - * the additional DSE specific extended functionality (DETERMINISTIC keyword). - */ -@Immutable -public class DefaultCreateDseAggregate - implements CreateDseAggregateEnd, CreateDseAggregateStart, CreateDseAggregateStateFunc { - - private final CqlIdentifier keyspace; - private final CqlIdentifier functionName; - private boolean orReplace; - private boolean ifNotExists; - private final ImmutableList parameters; - private final CqlIdentifier sFunc; - private final DataType sType; - private final CqlIdentifier finalFunc; - private final Term term; - private final boolean deterministic; - - public DefaultCreateDseAggregate(@NonNull CqlIdentifier functionName) { - this(null, functionName); - } - - public DefaultCreateDseAggregate( - @Nullable CqlIdentifier keyspace, @NonNull CqlIdentifier functionName) { - this(keyspace, functionName, false, false, ImmutableList.of(), null, null, null, null, false); - } - - public DefaultCreateDseAggregate( - @Nullable CqlIdentifier keyspace, - @NonNull CqlIdentifier functionName, - boolean orReplace, - boolean ifNotExists, - @NonNull ImmutableList parameters, - @Nullable CqlIdentifier sFunc, - @Nullable DataType sType, - @Nullable CqlIdentifier finalFunc, - @Nullable Term term, - boolean deterministic) { - this.keyspace = keyspace; - this.functionName = functionName; - this.orReplace = orReplace; - this.ifNotExists = ifNotExists; - this.parameters = parameters; - this.sFunc = sFunc; - this.sType = sType; - this.finalFunc = finalFunc; - this.term = term; - this.deterministic = deterministic; - } - - @NonNull - @Override - public String asCql() { - StringBuilder builder = new StringBuilder(); - - builder.append("CREATE "); - if (orReplace) { - builder.append("OR REPLACE "); - } - builder.append("AGGREGATE "); - - if (ifNotExists) { - builder.append("IF NOT EXISTS "); - } - CqlHelper.qualify(keyspace, functionName, builder); - - builder.append(" ("); - boolean first = true; - for (DataType param : parameters) { - if (first) { - first = false; - } else { - builder.append(','); - } - builder.append(param.asCql(false, true)); - } - builder.append(')'); - if (sFunc != null) { - builder.append(" SFUNC "); - builder.append(sFunc.asCql(true)); - } - if (sType != null) { - builder.append(" STYPE "); - builder.append(sType.asCql(false, true)); - } - if (finalFunc != null) { - builder.append(" FINALFUNC "); - builder.append(finalFunc.asCql(true)); - } - if (term != null) { - builder.append(" INITCOND "); - term.appendTo(builder); - } - // deterministic - if (deterministic) { - builder.append(" DETERMINISTIC"); - } - return builder.toString(); - } - - @NonNull - @Override - public CreateDseAggregateEnd withInitCond(@NonNull Term term) { - return new DefaultCreateDseAggregate( - keyspace, - functionName, - orReplace, - ifNotExists, - parameters, - sFunc, - sType, - finalFunc, - term, - deterministic); - } - - @NonNull - @Override - public CreateDseAggregateStart ifNotExists() { - return new DefaultCreateDseAggregate( - keyspace, - functionName, - orReplace, - true, - parameters, - sFunc, - sType, - finalFunc, - term, - deterministic); - } - - @NonNull - @Override - public CreateDseAggregateStart orReplace() { - return new DefaultCreateDseAggregate( - keyspace, - functionName, - true, - ifNotExists, - parameters, - sFunc, - sType, - finalFunc, - term, - deterministic); - } - - @NonNull - @Override - public CreateDseAggregateStart withParameter(@NonNull DataType paramType) { - return new DefaultCreateDseAggregate( - keyspace, - functionName, - orReplace, - ifNotExists, - ImmutableCollections.append(parameters, paramType), - sFunc, - sType, - finalFunc, - term, - deterministic); - } - - @NonNull - @Override - public CreateDseAggregateStateFunc withSFunc(@NonNull CqlIdentifier sFunc) { - return new DefaultCreateDseAggregate( - keyspace, - functionName, - orReplace, - ifNotExists, - parameters, - sFunc, - sType, - finalFunc, - term, - deterministic); - } - - @NonNull - @Override - public CreateDseAggregateEnd withSType(@NonNull DataType sType) { - return new DefaultCreateDseAggregate( - keyspace, - functionName, - orReplace, - ifNotExists, - parameters, - sFunc, - sType, - finalFunc, - term, - deterministic); - } - - @NonNull - @Override - public CreateDseAggregateEnd withFinalFunc(@NonNull CqlIdentifier finalFunc) { - return new DefaultCreateDseAggregate( - keyspace, - functionName, - orReplace, - ifNotExists, - parameters, - sFunc, - sType, - finalFunc, - term, - deterministic); - } - - @NonNull - @Override - public CreateDseAggregateEnd deterministic() { - return new DefaultCreateDseAggregate( - keyspace, - functionName, - orReplace, - ifNotExists, - parameters, - sFunc, - sType, - finalFunc, - term, - true); - } - - @Override - public String toString() { - return asCql(); - } - - @Nullable - public CqlIdentifier getKeyspace() { - return keyspace; - } - - @NonNull - public CqlIdentifier getFunctionName() { - return functionName; - } - - public boolean isOrReplace() { - return orReplace; - } - - public boolean isIfNotExists() { - return ifNotExists; - } - - @NonNull - public ImmutableList getParameters() { - return parameters; - } - - @Nullable - public CqlIdentifier getsFunc() { - return sFunc; - } - - @Nullable - public DataType getsType() { - return sType; - } - - @Nullable - public CqlIdentifier getFinalFunc() { - return finalFunc; - } - - @Nullable - public Term getTerm() { - return term; - } - - public boolean isDeterministic() { - return deterministic; - } -} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DefaultCreateDseFunction.java b/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DefaultCreateDseFunction.java deleted file mode 100644 index 679629bf893..00000000000 --- a/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DefaultCreateDseFunction.java +++ /dev/null @@ -1,444 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.querybuilder.schema; - -import com.datastax.dse.driver.api.querybuilder.schema.CreateDseFunctionEnd; -import com.datastax.dse.driver.api.querybuilder.schema.CreateDseFunctionStart; -import com.datastax.dse.driver.api.querybuilder.schema.CreateDseFunctionWithLanguage; -import com.datastax.dse.driver.api.querybuilder.schema.CreateDseFunctionWithNullOption; -import com.datastax.dse.driver.api.querybuilder.schema.CreateDseFunctionWithType; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.internal.querybuilder.CqlHelper; -import com.datastax.oss.driver.internal.querybuilder.ImmutableCollections; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Map; -import net.jcip.annotations.Immutable; - -/** - * Implements DSE extended interfaces for creating functions. This class provides the same - * functionality as the Cassandra OSS {@link - * com.datastax.oss.driver.internal.querybuilder.schema.DefaultCreateFunction} implementation, with - * the additional DSE specific extended functionality (DETERMINISTIC and MONOTONIC keywords). - */ -@Immutable -public class DefaultCreateDseFunction - implements CreateDseFunctionEnd, - CreateDseFunctionStart, - CreateDseFunctionWithLanguage, - CreateDseFunctionWithNullOption, - CreateDseFunctionWithType { - - private final CqlIdentifier keyspace; - private final CqlIdentifier functionName; - private boolean orReplace; - private boolean ifNotExists; - private final ImmutableMap parameters; - private boolean returnsNullOnNull; - private final DataType returnType; - private final String language; - private final String functionBody; - private final boolean deterministic; - private final boolean globallyMonotonic; - private final CqlIdentifier monotonicOn; - - public DefaultCreateDseFunction(CqlIdentifier functionName) { - this(null, functionName); - } - - public DefaultCreateDseFunction(CqlIdentifier keyspace, CqlIdentifier functionName) { - this( - keyspace, - functionName, - false, - false, - ImmutableMap.of(), - false, - null, - null, - null, - false, - false, - null); - } - - public DefaultCreateDseFunction( - CqlIdentifier keyspace, - CqlIdentifier functionName, - boolean orReplace, - boolean ifNotExists, - ImmutableMap parameters, - boolean returnsNullOnNull, - DataType returns, - String language, - String functionBody, - boolean deterministic, - boolean globallyMonotonic, - CqlIdentifier monotonicOn) { - this.keyspace = keyspace; - this.functionName = functionName; - this.orReplace = orReplace; - this.ifNotExists = ifNotExists; - this.parameters = parameters; - this.returnsNullOnNull = returnsNullOnNull; - this.returnType = returns; - this.language = language; - this.functionBody = functionBody; - this.deterministic = deterministic; - this.globallyMonotonic = globallyMonotonic; - this.monotonicOn = monotonicOn; - } - - @NonNull - @Override - public String asCql() { - StringBuilder builder = new StringBuilder(); - - builder.append("CREATE "); - if (orReplace) { - builder.append("OR REPLACE "); - } - builder.append("FUNCTION "); - - if (ifNotExists) { - builder.append("IF NOT EXISTS "); - } - CqlHelper.qualify(keyspace, functionName, builder); - - builder.append(" ("); - - boolean first = true; - for (Map.Entry param : parameters.entrySet()) { - if (first) { - first = false; - } else { - builder.append(','); - } - builder - .append(param.getKey().asCql(true)) - .append(' ') - .append(param.getValue().asCql(false, true)); - } - builder.append(')'); - if (returnsNullOnNull) { - builder.append(" RETURNS NULL"); - } else { - builder.append(" CALLED"); - } - - builder.append(" ON NULL INPUT"); - - if (returnType == null) { - // return type has not been provided yet. - return builder.toString(); - } - - builder.append(" RETURNS "); - builder.append(returnType.asCql(false, true)); - - // deterministic - if (deterministic) { - builder.append(" DETERMINISTIC"); - } - - // monotonic - if (globallyMonotonic) { - builder.append(" MONOTONIC"); - } else if (monotonicOn != null) { - builder.append(" MONOTONIC ON ").append(monotonicOn.asCql(true)); - } - - if (language == null) { - // language has not been provided yet. - return builder.toString(); - } - - builder.append(" LANGUAGE "); - builder.append(language); - - if (functionBody == null) { - // body has not been provided yet. - return builder.toString(); - } - - builder.append(" AS "); - builder.append(functionBody); - return builder.toString(); - } - - @Override - public String toString() { - return asCql(); - } - - @NonNull - @Override - public CreateDseFunctionEnd as(@NonNull String functionBody) { - return new DefaultCreateDseFunction( - keyspace, - functionName, - orReplace, - ifNotExists, - parameters, - returnsNullOnNull, - returnType, - language, - functionBody, - deterministic, - globallyMonotonic, - monotonicOn); - } - - @NonNull - @Override - public CreateDseFunctionWithLanguage withLanguage(@NonNull String language) { - return new DefaultCreateDseFunction( - keyspace, - functionName, - orReplace, - ifNotExists, - parameters, - returnsNullOnNull, - returnType, - language, - functionBody, - deterministic, - globallyMonotonic, - monotonicOn); - } - - @NonNull - @Override - public CreateDseFunctionStart ifNotExists() { - return new DefaultCreateDseFunction( - keyspace, - functionName, - orReplace, - true, - parameters, - returnsNullOnNull, - returnType, - language, - functionBody, - deterministic, - globallyMonotonic, - monotonicOn); - } - - @NonNull - @Override - public CreateDseFunctionStart orReplace() { - return new DefaultCreateDseFunction( - keyspace, - functionName, - true, - ifNotExists, - parameters, - returnsNullOnNull, - returnType, - language, - functionBody, - deterministic, - globallyMonotonic, - monotonicOn); - } - - @NonNull - @Override - public CreateDseFunctionStart withParameter( - @NonNull CqlIdentifier paramName, @NonNull DataType paramType) { - return new DefaultCreateDseFunction( - keyspace, - functionName, - orReplace, - ifNotExists, - ImmutableCollections.append(parameters, paramName, paramType), - returnsNullOnNull, - returnType, - language, - functionBody, - deterministic, - globallyMonotonic, - monotonicOn); - } - - @NonNull - @Override - public CreateDseFunctionWithNullOption returnsNullOnNull() { - return new DefaultCreateDseFunction( - keyspace, - functionName, - orReplace, - ifNotExists, - parameters, - true, - returnType, - language, - functionBody, - deterministic, - globallyMonotonic, - monotonicOn); - } - - @NonNull - @Override - public CreateDseFunctionWithNullOption calledOnNull() { - return new DefaultCreateDseFunction( - keyspace, - functionName, - orReplace, - ifNotExists, - parameters, - false, - returnType, - language, - functionBody, - deterministic, - globallyMonotonic, - monotonicOn); - } - - @NonNull - @Override - public CreateDseFunctionWithType deterministic() { - return new DefaultCreateDseFunction( - keyspace, - functionName, - orReplace, - ifNotExists, - parameters, - returnsNullOnNull, - returnType, - language, - functionBody, - true, - globallyMonotonic, - monotonicOn); - } - - @NonNull - @Override - public CreateDseFunctionWithType monotonic() { - return new DefaultCreateDseFunction( - keyspace, - functionName, - orReplace, - ifNotExists, - parameters, - returnsNullOnNull, - returnType, - language, - functionBody, - deterministic, - true, - null); - } - - @NonNull - @Override - public CreateDseFunctionWithType monotonicOn(@NonNull CqlIdentifier monotonicColumn) { - return new DefaultCreateDseFunction( - keyspace, - functionName, - orReplace, - ifNotExists, - parameters, - returnsNullOnNull, - returnType, - language, - functionBody, - deterministic, - false, - monotonicColumn); - } - - @NonNull - @Override - public CreateDseFunctionWithType returnsType(@NonNull DataType returnType) { - return new DefaultCreateDseFunction( - keyspace, - functionName, - orReplace, - ifNotExists, - parameters, - returnsNullOnNull, - returnType, - language, - functionBody, - deterministic, - globallyMonotonic, - monotonicOn); - } - - @Nullable - public CqlIdentifier getKeyspace() { - return keyspace; - } - - @NonNull - public CqlIdentifier getFunction() { - return functionName; - } - - public boolean isOrReplace() { - return orReplace; - } - - public boolean isIfNotExists() { - return ifNotExists; - } - - @NonNull - public ImmutableMap getParameters() { - return parameters; - } - - public boolean isReturnsNullOnNull() { - return returnsNullOnNull; - } - - @Nullable - public DataType getReturnType() { - return returnType; - } - - @Nullable - public String getLanguage() { - return language; - } - - @Nullable - public String getFunctionBody() { - return functionBody; - } - - public boolean isDeterministic() { - return deterministic; - } - - public boolean isGloballyMonotonic() { - return globallyMonotonic; - } - - @Nullable - public CqlIdentifier getMonotonicOn() { - return monotonicOn; - } -} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DefaultCreateDseKeyspace.java b/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DefaultCreateDseKeyspace.java deleted file mode 100644 index 3b542254dab..00000000000 --- a/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DefaultCreateDseKeyspace.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.querybuilder.schema; - -import com.datastax.dse.driver.api.querybuilder.schema.CreateDseKeyspace; -import com.datastax.dse.driver.api.querybuilder.schema.CreateDseKeyspaceStart; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.internal.querybuilder.ImmutableCollections; -import com.datastax.oss.driver.internal.querybuilder.schema.OptionsUtils; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultCreateDseKeyspace implements CreateDseKeyspace, CreateDseKeyspaceStart { - - private final CqlIdentifier keyspaceName; - private final boolean ifNotExists; - private final ImmutableMap options; - - public DefaultCreateDseKeyspace(@NonNull CqlIdentifier keyspaceName) { - this(keyspaceName, false, ImmutableMap.of()); - } - - public DefaultCreateDseKeyspace( - @NonNull CqlIdentifier keyspaceName, - boolean ifNotExists, - @NonNull ImmutableMap options) { - this.keyspaceName = keyspaceName; - this.ifNotExists = ifNotExists; - this.options = options; - } - - @NonNull - @Override - public CreateDseKeyspace withOption(@NonNull String name, @NonNull Object value) { - return new DefaultCreateDseKeyspace( - keyspaceName, ifNotExists, ImmutableCollections.append(options, name, value)); - } - - @NonNull - @Override - public CreateDseKeyspaceStart ifNotExists() { - return new DefaultCreateDseKeyspace(keyspaceName, true, options); - } - - @NonNull - @Override - public CreateDseKeyspace withReplicationOptions(@NonNull Map replicationOptions) { - return withOption("replication", replicationOptions); - } - - @NonNull - @Override - public String asCql() { - StringBuilder builder = new StringBuilder(); - - builder.append("CREATE KEYSPACE "); - if (ifNotExists) { - builder.append("IF NOT EXISTS "); - } - - builder.append(keyspaceName.asCql(true)); - builder.append(OptionsUtils.buildOptions(options, true)); - return builder.toString(); - } - - @Override - public String toString() { - return asCql(); - } - - @NonNull - @Override - public Map getOptions() { - return options; - } - - @NonNull - public CqlIdentifier getKeyspace() { - return keyspaceName; - } - - public boolean isIfNotExists() { - return ifNotExists; - } -} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DefaultCreateDseTable.java b/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DefaultCreateDseTable.java deleted file mode 100644 index 86169cdd29b..00000000000 --- a/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DefaultCreateDseTable.java +++ /dev/null @@ -1,479 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.querybuilder.schema; - -import static com.datastax.oss.driver.internal.querybuilder.schema.Utils.appendSet; - -import com.datastax.dse.driver.api.querybuilder.schema.CreateDseTable; -import com.datastax.dse.driver.api.querybuilder.schema.CreateDseTableStart; -import com.datastax.dse.driver.api.querybuilder.schema.CreateDseTableWithOptions; -import com.datastax.dse.driver.api.querybuilder.schema.DseGraphEdgeSide; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.ClusteringOrder; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.internal.querybuilder.CqlHelper; -import com.datastax.oss.driver.internal.querybuilder.ImmutableCollections; -import com.datastax.oss.driver.internal.querybuilder.schema.OptionsUtils; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Map; -import net.jcip.annotations.Immutable; - -@Immutable -public class DefaultCreateDseTable - implements CreateDseTableStart, CreateDseTable, CreateDseTableWithOptions { - - private final CqlIdentifier keyspace; - private final CqlIdentifier tableName; - - private final boolean ifNotExists; - private final boolean compactStorage; - - private final ImmutableMap options; - - private final ImmutableMap columnsInOrder; - - private final ImmutableSet partitionKeyColumns; - private final ImmutableSet clusteringKeyColumns; - private final ImmutableSet staticColumns; - private final ImmutableSet regularColumns; - - private final ImmutableMap orderings; - - private final DseTableVertexOperation vertexOperation; - private final DseTableEdgeOperation edgeOperation; - - public DefaultCreateDseTable(@Nullable CqlIdentifier keyspace, @NonNull CqlIdentifier tableName) { - this( - keyspace, - tableName, - false, - false, - ImmutableMap.of(), - ImmutableSet.of(), - ImmutableSet.of(), - ImmutableSet.of(), - ImmutableSet.of(), - ImmutableMap.of(), - null, - null, - ImmutableMap.of()); - } - - public DefaultCreateDseTable( - @Nullable CqlIdentifier keyspace, - @NonNull CqlIdentifier tableName, - boolean ifNotExists, - boolean compactStorage, - @NonNull ImmutableMap columnsInOrder, - @NonNull ImmutableSet partitionKeyColumns, - @NonNull ImmutableSet clusteringKeyColumns, - @NonNull ImmutableSet staticColumns, - @NonNull ImmutableSet regularColumns, - @NonNull ImmutableMap orderings, - @Nullable DseTableVertexOperation vertexOperation, - @Nullable DseTableEdgeOperation edgeOperation, - @NonNull ImmutableMap options) { - this.keyspace = keyspace; - this.tableName = tableName; - this.ifNotExists = ifNotExists; - this.compactStorage = compactStorage; - this.columnsInOrder = columnsInOrder; - this.partitionKeyColumns = partitionKeyColumns; - this.clusteringKeyColumns = clusteringKeyColumns; - this.staticColumns = staticColumns; - this.regularColumns = regularColumns; - this.orderings = orderings; - this.options = options; - this.vertexOperation = vertexOperation; - this.edgeOperation = edgeOperation; - } - - @NonNull - @Override - public CreateDseTableStart ifNotExists() { - return new DefaultCreateDseTable( - keyspace, - tableName, - true, - compactStorage, - columnsInOrder, - partitionKeyColumns, - clusteringKeyColumns, - staticColumns, - regularColumns, - orderings, - vertexOperation, - edgeOperation, - options); - } - - @NonNull - @Override - public CreateDseTable withPartitionKey( - @NonNull CqlIdentifier columnName, @NonNull DataType dataType) { - return new DefaultCreateDseTable( - keyspace, - tableName, - ifNotExists, - compactStorage, - ImmutableCollections.append(columnsInOrder, columnName, dataType), - appendSet(partitionKeyColumns, columnName), - clusteringKeyColumns, - staticColumns, - regularColumns, - orderings, - vertexOperation, - edgeOperation, - options); - } - - @NonNull - @Override - public CreateDseTable withClusteringColumn( - @NonNull CqlIdentifier columnName, @NonNull DataType dataType) { - return new DefaultCreateDseTable( - keyspace, - tableName, - ifNotExists, - compactStorage, - ImmutableCollections.append(columnsInOrder, columnName, dataType), - partitionKeyColumns, - appendSet(clusteringKeyColumns, columnName), - staticColumns, - regularColumns, - orderings, - vertexOperation, - edgeOperation, - options); - } - - @NonNull - @Override - public CreateDseTable withColumn(@NonNull CqlIdentifier columnName, @NonNull DataType dataType) { - return new DefaultCreateDseTable( - keyspace, - tableName, - ifNotExists, - compactStorage, - ImmutableCollections.append(columnsInOrder, columnName, dataType), - partitionKeyColumns, - clusteringKeyColumns, - staticColumns, - appendSet(regularColumns, columnName), - orderings, - vertexOperation, - edgeOperation, - options); - } - - @NonNull - @Override - public CreateDseTable withStaticColumn( - @NonNull CqlIdentifier columnName, @NonNull DataType dataType) { - return new DefaultCreateDseTable( - keyspace, - tableName, - ifNotExists, - compactStorage, - ImmutableCollections.append(columnsInOrder, columnName, dataType), - partitionKeyColumns, - clusteringKeyColumns, - appendSet(staticColumns, columnName), - regularColumns, - orderings, - vertexOperation, - edgeOperation, - options); - } - - @NonNull - @Override - public CreateDseTableWithOptions withCompactStorage() { - return new DefaultCreateDseTable( - keyspace, - tableName, - ifNotExists, - true, - columnsInOrder, - partitionKeyColumns, - clusteringKeyColumns, - staticColumns, - regularColumns, - orderings, - vertexOperation, - edgeOperation, - options); - } - - @NonNull - @Override - public CreateDseTableWithOptions withClusteringOrderByIds( - @NonNull Map orderings) { - return withClusteringOrders(ImmutableCollections.concat(this.orderings, orderings)); - } - - @NonNull - @Override - public CreateDseTableWithOptions withClusteringOrder( - @NonNull CqlIdentifier columnName, @NonNull ClusteringOrder order) { - return withClusteringOrders(ImmutableCollections.append(orderings, columnName, order)); - } - - @NonNull - public CreateDseTableWithOptions withClusteringOrders( - @NonNull ImmutableMap orderings) { - return new DefaultCreateDseTable( - keyspace, - tableName, - ifNotExists, - compactStorage, - columnsInOrder, - partitionKeyColumns, - clusteringKeyColumns, - staticColumns, - regularColumns, - orderings, - vertexOperation, - edgeOperation, - options); - } - - @NonNull - @Override - public CreateDseTableWithOptions withVertexLabel(@Nullable CqlIdentifier vertexLabelId) { - return new DefaultCreateDseTable( - keyspace, - tableName, - ifNotExists, - compactStorage, - columnsInOrder, - partitionKeyColumns, - clusteringKeyColumns, - staticColumns, - regularColumns, - orderings, - new DseTableVertexOperation(DseTableGraphOperationType.WITH, vertexLabelId), - edgeOperation, - options); - } - - @NonNull - @Override - public CreateDseTableWithOptions withEdgeLabel( - @Nullable CqlIdentifier edgeLabelId, - @NonNull DseGraphEdgeSide from, - @NonNull DseGraphEdgeSide to) { - return new DefaultCreateDseTable( - keyspace, - tableName, - ifNotExists, - compactStorage, - columnsInOrder, - partitionKeyColumns, - clusteringKeyColumns, - staticColumns, - regularColumns, - orderings, - vertexOperation, - new DseTableEdgeOperation(DseTableGraphOperationType.WITH, edgeLabelId, from, to), - options); - } - - @NonNull - @Override - public CreateDseTable withOption(@NonNull String name, @NonNull Object value) { - return new DefaultCreateDseTable( - keyspace, - tableName, - ifNotExists, - compactStorage, - columnsInOrder, - partitionKeyColumns, - clusteringKeyColumns, - staticColumns, - regularColumns, - orderings, - vertexOperation, - edgeOperation, - ImmutableCollections.append(options, name, value)); - } - - @NonNull - @Override - public String asCql() { - StringBuilder builder = new StringBuilder(); - - builder.append("CREATE TABLE "); - if (ifNotExists) { - builder.append("IF NOT EXISTS "); - } - - CqlHelper.qualify(keyspace, tableName, builder); - - if (columnsInOrder.isEmpty()) { - // no columns provided yet. - return builder.toString(); - } - - boolean singlePrimaryKey = partitionKeyColumns.size() == 1 && clusteringKeyColumns.size() == 0; - - builder.append(" ("); - - boolean first = true; - for (Map.Entry column : columnsInOrder.entrySet()) { - if (first) { - first = false; - } else { - builder.append(','); - } - builder - .append(column.getKey().asCql(true)) - .append(' ') - .append(column.getValue().asCql(true, true)); - - if (singlePrimaryKey && partitionKeyColumns.contains(column.getKey())) { - builder.append(" PRIMARY KEY"); - } else if (staticColumns.contains(column.getKey())) { - builder.append(" STATIC"); - } - } - - if (!singlePrimaryKey) { - builder.append(","); - CqlHelper.buildPrimaryKey(partitionKeyColumns, clusteringKeyColumns, builder); - } - - builder.append(')'); - - boolean firstOption = true; - - if (compactStorage) { - firstOption = false; - builder.append(" WITH COMPACT STORAGE"); - } - - if (!orderings.isEmpty()) { - if (firstOption) { - builder.append(" WITH "); - firstOption = false; - } else { - builder.append(" AND "); - } - builder.append("CLUSTERING ORDER BY ("); - boolean firstClustering = true; - - for (Map.Entry ordering : orderings.entrySet()) { - if (firstClustering) { - firstClustering = false; - } else { - builder.append(','); - } - builder - .append(ordering.getKey().asCql(true)) - .append(' ') - .append(ordering.getValue().toString()); - } - - builder.append(')'); - } - - if (vertexOperation != null) { - if (firstOption) { - builder.append(" WITH "); - firstOption = false; - } else { - builder.append(" AND "); - } - vertexOperation.append(builder); - } else if (edgeOperation != null) { - if (firstOption) { - builder.append(" WITH "); - firstOption = false; - } else { - builder.append(" AND "); - } - edgeOperation.append(builder); - } - - builder.append(OptionsUtils.buildOptions(options, firstOption)); - - return builder.toString(); - } - - @Override - public String toString() { - return asCql(); - } - - @NonNull - @Override - public Map getOptions() { - return options; - } - - @Nullable - public CqlIdentifier getKeyspace() { - return keyspace; - } - - @NonNull - public CqlIdentifier getTable() { - return tableName; - } - - public boolean isIfNotExists() { - return ifNotExists; - } - - public boolean isCompactStorage() { - return compactStorage; - } - - @NonNull - public ImmutableMap getColumnsInOrder() { - return columnsInOrder; - } - - @NonNull - public ImmutableSet getPartitionKeyColumns() { - return partitionKeyColumns; - } - - @NonNull - public ImmutableSet getClusteringKeyColumns() { - return clusteringKeyColumns; - } - - @NonNull - public ImmutableSet getStaticColumns() { - return staticColumns; - } - - @NonNull - public ImmutableSet getRegularColumns() { - return regularColumns; - } - - @NonNull - public ImmutableMap getOrderings() { - return orderings; - } -} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DefaultDseGraphEdgeSide.java b/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DefaultDseGraphEdgeSide.java deleted file mode 100644 index 32f43ab8ff2..00000000000 --- a/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DefaultDseGraphEdgeSide.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.querybuilder.schema; - -import com.datastax.dse.driver.api.querybuilder.schema.DseGraphEdgeSide; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.internal.querybuilder.ImmutableCollections; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.List; - -public class DefaultDseGraphEdgeSide implements DseGraphEdgeSide { - - private final CqlIdentifier tableId; - private final ImmutableList partitionKeyColumns; - private final ImmutableList clusteringColumns; - - public DefaultDseGraphEdgeSide(CqlIdentifier tableId) { - this(tableId, ImmutableList.of(), ImmutableList.of()); - } - - private DefaultDseGraphEdgeSide( - CqlIdentifier tableId, - ImmutableList partitionKeyColumns, - ImmutableList clusteringColumns) { - this.tableId = tableId; - this.partitionKeyColumns = partitionKeyColumns; - this.clusteringColumns = clusteringColumns; - } - - @NonNull - @Override - public DseGraphEdgeSide withPartitionKey(@NonNull CqlIdentifier columnId) { - return new DefaultDseGraphEdgeSide( - tableId, ImmutableCollections.append(partitionKeyColumns, columnId), clusteringColumns); - } - - @NonNull - @Override - public DseGraphEdgeSide withClusteringColumn(@NonNull CqlIdentifier columnId) { - return new DefaultDseGraphEdgeSide( - tableId, partitionKeyColumns, ImmutableCollections.append(clusteringColumns, columnId)); - } - - @NonNull - @Override - public CqlIdentifier getTableId() { - return tableId; - } - - @NonNull - @Override - public List getPartitionKeyColumns() { - return partitionKeyColumns; - } - - @NonNull - @Override - public List getClusteringColumns() { - return clusteringColumns; - } -} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DseTableEdgeOperation.java b/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DseTableEdgeOperation.java deleted file mode 100644 index f514158e853..00000000000 --- a/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DseTableEdgeOperation.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.querybuilder.schema; - -import com.datastax.dse.driver.api.querybuilder.schema.DseGraphEdgeSide; -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.internal.querybuilder.CqlHelper; -import java.util.List; - -public class DseTableEdgeOperation { - - private final DseTableGraphOperationType type; - private final CqlIdentifier label; - private final DseGraphEdgeSide from; - private final DseGraphEdgeSide to; - - public DseTableEdgeOperation( - DseTableGraphOperationType type, - CqlIdentifier label, - DseGraphEdgeSide from, - DseGraphEdgeSide to) { - this.type = type; - this.label = label; - this.from = from; - this.to = to; - } - - public DseTableGraphOperationType getType() { - return type; - } - - public CqlIdentifier getLabel() { - return label; - } - - public DseGraphEdgeSide getFrom() { - return from; - } - - public DseGraphEdgeSide getTo() { - return to; - } - - public void append(StringBuilder builder) { - builder.append("EDGE LABEL"); - if (label != null) { - builder.append(' ').append(label.asCql(true)); - } - if (type == DseTableGraphOperationType.WITH) { - builder.append(" FROM "); - append(from, builder); - builder.append(" TO "); - append(to, builder); - } - } - - private static void append(DseGraphEdgeSide side, StringBuilder builder) { - builder.append(side.getTableId().asCql(true)).append('('); - List pkColumns = side.getPartitionKeyColumns(); - if (pkColumns.size() == 1) { - builder.append(pkColumns.get(0).asCql(true)); - } else { - CqlHelper.appendIds(pkColumns, builder, "(", ",", ")"); - } - CqlHelper.appendIds(side.getClusteringColumns(), builder, ",", ",", null); - builder.append(')'); - } -} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DseTableGraphOperationType.java b/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DseTableGraphOperationType.java deleted file mode 100644 index 35d5dd7c80b..00000000000 --- a/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DseTableGraphOperationType.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.querybuilder.schema; - -public enum DseTableGraphOperationType { - WITH, - WITHOUT -} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DseTableVertexOperation.java b/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DseTableVertexOperation.java deleted file mode 100644 index 64a2d44c29a..00000000000 --- a/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DseTableVertexOperation.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.dse.driver.internal.querybuilder.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; - -public class DseTableVertexOperation { - - private final DseTableGraphOperationType type; - private final CqlIdentifier label; - - public DseTableVertexOperation(DseTableGraphOperationType type, CqlIdentifier label) { - this.type = type; - this.label = label; - } - - public DseTableGraphOperationType getType() { - return type; - } - - public CqlIdentifier getLabel() { - return label; - } - - public void append(StringBuilder builder) { - builder.append("VERTEX LABEL"); - if (label != null) { - builder.append(' ').append(label.asCql(true)); - } - } -} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/package-info.java b/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/package-info.java deleted file mode 100644 index de137d9f952..00000000000 --- a/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * This package effectively mirrors the Cassandra OSS default query and schema implementations to - * allow extended schema and query building for the DSE driver. In general, a class in this package - * will need to implement the DSE equivalent interfaces for any DSE specific extensions. - */ -package com.datastax.dse.driver.internal.querybuilder.schema; diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/BindMarker.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/BindMarker.java deleted file mode 100644 index 58ef2c88647..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/BindMarker.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder; - -import com.datastax.oss.driver.api.querybuilder.term.Term; - -/** - * A bind marker in the query. - * - *

    It can be anonymous or named, for example: - * - *

    {@code
    - * selectFrom("foo").all().whereColumn("k").isEqualTo(bindMarker())
    - * // SELECT * FROM foo WHERE k=?
    - *
    - * selectFrom("foo").all().whereColumn("k").isEqualTo(bindMarker("key"))
    - * // SELECT * FROM foo WHERE k=:key
    - * }
    - */ -public interface BindMarker extends Term {} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/BuildableQuery.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/BuildableQuery.java deleted file mode 100644 index 729b73deead..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/BuildableQuery.java +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.cql.SimpleStatementBuilder; -import com.datastax.oss.driver.api.core.cql.Statement; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; - -/** - * End state for the query builder DSL, which allows the generation of a CQL query. - * - *

    The API returns this type as soon as there is enough information for a minimal query (in most - * cases, it's still possible to call more methods to keep building). - */ -public interface BuildableQuery { - - /** - * Builds the CQL query as a raw string. - * - *

    Use this if you plan to pass the query to {@link CqlSession#execute(String)} or {@link - * CqlSession#prepare(String)} without any further customization. - */ - @NonNull - String asCql(); - - /** - * Builds the CQL query and wraps it in a simple statement. - * - *

    This is a similar to: - * - *

    {@code
    -   * SimpleStatement.newInstance(asCql())
    -   * }
    - * - * In addition, some implementations might try to infer additional statement properties (such as - * {@link Statement#isIdempotent()}). - */ - @NonNull - default SimpleStatement build() { - return SimpleStatement.newInstance(asCql()); - } - - /** - * Builds the CQL query and wraps it in a simple statement, also providing positional values for - * bind markers. - * - *

    This is a similar to: - * - *

    {@code
    -   * SimpleStatement.newInstance(asCql(), values)
    -   * }
    - * - * In addition, some implementations might try to infer additional statement properties (such as - * {@link Statement#isIdempotent()}). - */ - @NonNull - default SimpleStatement build(@NonNull Object... values) { - return SimpleStatement.newInstance(asCql(), values); - } - - /** - * Builds the CQL query and wraps it in a simple statement, also providing named values for bind - * markers. - * - *

    This is a similar to: - * - *

    {@code
    -   * SimpleStatement.newInstance(asCql(), namedValues)
    -   * }
    - * - * In addition, some implementations might try to infer additional statement properties (such as - * {@link Statement#isIdempotent()}). - */ - @NonNull - default SimpleStatement build(@NonNull Map namedValues) { - return SimpleStatement.newInstance(asCql(), namedValues); - } - - /** - * Builds the CQL query and wraps it in a simple statement builder. - * - *

    This is equivalent to {@link #build()}, but the builder might be slightly more efficient if - * you plan to customize multiple properties on the statement, for example: - * - *

    {@code
    -   * SimpleStatementBuilder builder =
    -   *     selectFrom("foo")
    -   *         .all()
    -   *         .whereColumn("k").isEqualTo(bindMarker("k"))
    -   *         .whereColumn("c").isLessThan(bindMarker("c"))
    -   *         .builder();
    -   * SimpleStatement statement =
    -   *     builder.addNamedValue("k", 1).addNamedValue("c", 2).setTracing().build();
    -   * }
    - * - * In addition, some implementations might try to infer additional statement properties (such as - * {@link Statement#isIdempotent()}). - */ - @NonNull - default SimpleStatementBuilder builder() { - return SimpleStatement.builder(asCql()); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/CqlSnippet.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/CqlSnippet.java deleted file mode 100644 index ba06391e628..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/CqlSnippet.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder; - -import edu.umd.cs.findbugs.annotations.NonNull; - -/** An element in the query builder DSL, that will generate part of a CQL query. */ -public interface CqlSnippet { - void appendTo(@NonNull StringBuilder builder); -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/Literal.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/Literal.java deleted file mode 100644 index 652e3e0de18..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/Literal.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.select.Selector; -import com.datastax.oss.driver.api.querybuilder.term.Term; - -/** - * A value that will be appended as a CQL literal. - * - *

    For convenience, it can be used both as a selector and a term. The only downside is that the - * {@link #as(CqlIdentifier)} method is only valid when used as a selector; make sure you don't use - * it elsewhere, or you will generate invalid CQL that will fail at execution time. - */ -public interface Literal extends Selector, Term {} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/QueryBuilder.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/QueryBuilder.java deleted file mode 100644 index 8df2b7efdd0..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/QueryBuilder.java +++ /dev/null @@ -1,541 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.api.core.type.codec.CodecNotFoundException; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.api.querybuilder.delete.DeleteSelection; -import com.datastax.oss.driver.api.querybuilder.insert.InsertInto; -import com.datastax.oss.driver.api.querybuilder.relation.Relation; -import com.datastax.oss.driver.api.querybuilder.select.SelectFrom; -import com.datastax.oss.driver.api.querybuilder.select.Selector; -import com.datastax.oss.driver.api.querybuilder.term.Term; -import com.datastax.oss.driver.api.querybuilder.truncate.Truncate; -import com.datastax.oss.driver.api.querybuilder.update.UpdateStart; -import com.datastax.oss.driver.internal.core.metadata.schema.ShallowUserDefinedType; -import com.datastax.oss.driver.internal.core.metadata.token.ByteOrderedToken; -import com.datastax.oss.driver.internal.core.metadata.token.Murmur3Token; -import com.datastax.oss.driver.internal.core.metadata.token.RandomToken; -import com.datastax.oss.driver.internal.querybuilder.ArithmeticOperator; -import com.datastax.oss.driver.internal.querybuilder.DefaultLiteral; -import com.datastax.oss.driver.internal.querybuilder.DefaultRaw; -import com.datastax.oss.driver.internal.querybuilder.delete.DefaultDelete; -import com.datastax.oss.driver.internal.querybuilder.insert.DefaultInsert; -import com.datastax.oss.driver.internal.querybuilder.select.DefaultBindMarker; -import com.datastax.oss.driver.internal.querybuilder.select.DefaultSelect; -import com.datastax.oss.driver.internal.querybuilder.term.BinaryArithmeticTerm; -import com.datastax.oss.driver.internal.querybuilder.term.FunctionTerm; -import com.datastax.oss.driver.internal.querybuilder.term.OppositeTerm; -import com.datastax.oss.driver.internal.querybuilder.term.TupleTerm; -import com.datastax.oss.driver.internal.querybuilder.term.TypeHintTerm; -import com.datastax.oss.driver.internal.querybuilder.truncate.DefaultTruncate; -import com.datastax.oss.driver.internal.querybuilder.update.DefaultUpdate; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Arrays; - -/** A Domain-Specific Language to build CQL queries using Java code. */ -public class QueryBuilder { - - /** Starts a SELECT query for a qualified table. */ - @NonNull - public static SelectFrom selectFrom( - @Nullable CqlIdentifier keyspace, @NonNull CqlIdentifier table) { - return new DefaultSelect(keyspace, table); - } - - /** - * Shortcut for {@link #selectFrom(CqlIdentifier, CqlIdentifier) - * selectFrom(CqlIdentifier.fromCql(keyspace), CqlIdentifier.fromCql(table))} - */ - @NonNull - public static SelectFrom selectFrom(@Nullable String keyspace, @NonNull String table) { - return selectFrom( - keyspace == null ? null : CqlIdentifier.fromCql(keyspace), CqlIdentifier.fromCql(table)); - } - - /** Starts a SELECT query for an unqualified table. */ - @NonNull - public static SelectFrom selectFrom(@NonNull CqlIdentifier table) { - return selectFrom(null, table); - } - - /** Shortcut for {@link #selectFrom(CqlIdentifier) selectFrom(CqlIdentifier.fromCql(table))} */ - @NonNull - public static SelectFrom selectFrom(@NonNull String table) { - return selectFrom(CqlIdentifier.fromCql(table)); - } - - /** Starts an INSERT query for a qualified table. */ - @NonNull - public static InsertInto insertInto( - @Nullable CqlIdentifier keyspace, @NonNull CqlIdentifier table) { - return new DefaultInsert(keyspace, table); - } - - /** - * Shortcut for {@link #insertInto(CqlIdentifier, CqlIdentifier) - * insertInto(CqlIdentifier.fromCql(keyspace), CqlIdentifier.fromCql(table))}. - */ - @NonNull - public static InsertInto insertInto(@Nullable String keyspace, @NonNull String table) { - return insertInto( - keyspace == null ? null : CqlIdentifier.fromCql(keyspace), CqlIdentifier.fromCql(table)); - } - - /** Starts an INSERT query for an unqualified table. */ - @NonNull - public static InsertInto insertInto(@NonNull CqlIdentifier table) { - return insertInto(null, table); - } - - /** Shortcut for {@link #insertInto(CqlIdentifier) insertInto(CqlIdentifier.fromCql(table))}. */ - @NonNull - public static InsertInto insertInto(@NonNull String table) { - return insertInto(CqlIdentifier.fromCql(table)); - } - - /** Starts an UPDATE query for a qualified table. */ - @NonNull - public static UpdateStart update(@Nullable CqlIdentifier keyspace, @NonNull CqlIdentifier table) { - return new DefaultUpdate(keyspace, table); - } - - /** - * Shortcut for {@link #update(CqlIdentifier, CqlIdentifier) - * update(CqlIdentifier.fromCql(keyspace), CqlIdentifier.fromCql(table))} - */ - @NonNull - public static UpdateStart update(@Nullable String keyspace, @NonNull String table) { - return update( - keyspace == null ? null : CqlIdentifier.fromCql(keyspace), CqlIdentifier.fromCql(table)); - } - - /** Starts an UPDATE query for an unqualified table. */ - @NonNull - public static UpdateStart update(@NonNull CqlIdentifier table) { - return update(null, table); - } - - /** Shortcut for {@link #update(CqlIdentifier) update(CqlIdentifier.fromCql(table))} */ - @NonNull - public static UpdateStart update(@NonNull String table) { - return update(CqlIdentifier.fromCql(table)); - } - - /** Starts a DELETE query for a qualified table. */ - @NonNull - public static DeleteSelection deleteFrom( - @Nullable CqlIdentifier keyspace, @NonNull CqlIdentifier table) { - return new DefaultDelete(keyspace, table); - } - - /** - * Shortcut for {@link #deleteFrom(CqlIdentifier, CqlIdentifier) - * deleteFrom(CqlIdentifier.fromCql(keyspace), CqlIdentifier.fromCql(table))} - */ - @NonNull - public static DeleteSelection deleteFrom(@Nullable String keyspace, @NonNull String table) { - return deleteFrom( - keyspace == null ? null : CqlIdentifier.fromCql(keyspace), CqlIdentifier.fromCql(table)); - } - - /** Starts a DELETE query for an unqualified table. */ - @NonNull - public static DeleteSelection deleteFrom(@NonNull CqlIdentifier table) { - return deleteFrom(null, table); - } - - /** Shortcut for {@link #deleteFrom(CqlIdentifier) deleteFrom(CqlIdentifier.fromCql(table))} */ - @NonNull - public static DeleteSelection deleteFrom(@NonNull String table) { - return deleteFrom(CqlIdentifier.fromCql(table)); - } - - /** - * An ordered set of anonymous terms, as in {@code WHERE (a, b) = (1, 2)} (on the right-hand - * side). - * - *

    For example, this can be used as the right operand of {@link Relation#columns(String...)}. - */ - @NonNull - public static Term tuple(@NonNull Iterable components) { - return new TupleTerm(components); - } - - /** Var-arg equivalent of {@link #tuple(Iterable)}. */ - @NonNull - public static Term tuple(@NonNull Term... components) { - return tuple(Arrays.asList(components)); - } - - /** The sum of two terms, as in {@code WHERE k = left + right}. */ - @NonNull - public static Term add(@NonNull Term left, @NonNull Term right) { - return new BinaryArithmeticTerm(ArithmeticOperator.SUM, left, right); - } - - /** The difference of two terms, as in {@code WHERE k = left - right}. */ - @NonNull - public static Term subtract(@NonNull Term left, @NonNull Term right) { - return new BinaryArithmeticTerm(ArithmeticOperator.DIFFERENCE, left, right); - } - - /** The product of two terms, as in {@code WHERE k = left * right}. */ - @NonNull - public static Term multiply(@NonNull Term left, @NonNull Term right) { - return new BinaryArithmeticTerm(ArithmeticOperator.PRODUCT, left, right); - } - - /** The quotient of two terms, as in {@code WHERE k = left / right}. */ - @NonNull - public static Term divide(@NonNull Term left, @NonNull Term right) { - return new BinaryArithmeticTerm(ArithmeticOperator.QUOTIENT, left, right); - } - - /** The remainder of two terms, as in {@code WHERE k = left % right}. */ - @NonNull - public static Term remainder(@NonNull Term left, @NonNull Term right) { - return new BinaryArithmeticTerm(ArithmeticOperator.REMAINDER, left, right); - } - - /** The opposite of a term, as in {@code WHERE k = -argument}. */ - @NonNull - public static Term negate(@NonNull Term argument) { - return new OppositeTerm(argument); - } - - /** A function call as a term, as in {@code WHERE k = f(arguments)}. */ - @NonNull - public static Term function( - @NonNull CqlIdentifier functionId, @NonNull Iterable arguments) { - return function(null, functionId, arguments); - } - - /** Var-arg equivalent of {@link #function(CqlIdentifier, Iterable)}. */ - @NonNull - public static Term function(@NonNull CqlIdentifier functionId, @NonNull Term... arguments) { - return function(functionId, Arrays.asList(arguments)); - } - - /** - * Shortcut for {@link #function(CqlIdentifier, Iterable) - * function(CqlIdentifier.fromCql(functionName), arguments)}. - */ - @NonNull - public static Term function(@NonNull String functionName, @NonNull Iterable arguments) { - return function(CqlIdentifier.fromCql(functionName), arguments); - } - - /** - * Shortcut for {@link #function(CqlIdentifier, Term...) - * function(CqlIdentifier.fromCql(functionName), arguments)}. - */ - @NonNull - public static Term function(@NonNull String functionName, @NonNull Term... arguments) { - return function(CqlIdentifier.fromCql(functionName), arguments); - } - - /** A function call as a term, as in {@code WHERE k = ks.f(arguments)}. */ - @NonNull - public static Term function( - @Nullable CqlIdentifier keyspaceId, - @NonNull CqlIdentifier functionId, - @NonNull Iterable arguments) { - return new FunctionTerm(keyspaceId, functionId, arguments); - } - - /** Var-arg equivalent of {@link #function(CqlIdentifier, CqlIdentifier, Iterable)}. */ - @NonNull - public static Term function( - @Nullable CqlIdentifier keyspaceId, - @NonNull CqlIdentifier functionId, - @NonNull Term... arguments) { - return function(keyspaceId, functionId, Arrays.asList(arguments)); - } - - /** - * Shortcut for {@link #function(CqlIdentifier, CqlIdentifier, Iterable) - * function(CqlIdentifier.fromCql(keyspaceName), CqlIdentifier.fromCql(functionName), arguments)}. - */ - @NonNull - public static Term function( - @Nullable String keyspaceName, - @NonNull String functionName, - @NonNull Iterable arguments) { - return function( - keyspaceName == null ? null : CqlIdentifier.fromCql(keyspaceName), - CqlIdentifier.fromCql(functionName), - arguments); - } - - /** - * Shortcut for {@link #function(CqlIdentifier, CqlIdentifier, Term...) - * function(CqlIdentifier.fromCql(keyspaceName), CqlIdentifier.fromCql(functionName), arguments)}. - */ - @NonNull - public static Term function( - @Nullable String keyspaceName, @NonNull String functionName, @NonNull Term... arguments) { - return function( - keyspaceName == null ? null : CqlIdentifier.fromCql(keyspaceName), - CqlIdentifier.fromCql(functionName), - arguments); - } - - /** - * Provides a type hint for an expression, as in {@code WHERE k = (double)1/3}. - * - *

    To create the data type, use the constants and static methods in {@link DataTypes}, or - * {@link #udt(CqlIdentifier)}. - */ - @NonNull - public static Term typeHint(@NonNull Term term, @NonNull DataType targetType) { - return new TypeHintTerm(term, targetType); - } - - /** A call to the built-in {@code now} function as a term. */ - @NonNull - public static Term now() { - return function("now"); - } - - /** A call to the built-in {@code currentTimestamp} function as a term. */ - @NonNull - public static Term currentTimestamp() { - return function("currenttimestamp"); - } - - /** A call to the built-in {@code currentDate} function as a term. */ - @NonNull - public static Term currentDate() { - return function("currentdate"); - } - - /** A call to the built-in {@code currentTime} function as a term. */ - @NonNull - public static Term currentTime() { - return function("currenttime"); - } - - /** A call to the built-in {@code currentTimeUuid} function as a term. */ - @NonNull - public static Term currentTimeUuid() { - return function("currenttimeuuid"); - } - - /** A call to the built-in {@code minTimeUuid} function as a term. */ - @NonNull - public static Term minTimeUuid(@NonNull Term argument) { - return function("mintimeuuid", argument); - } - - /** A call to the built-in {@code maxTimeUuid} function as a term. */ - @NonNull - public static Term maxTimeUuid(@NonNull Term argument) { - return function("maxtimeuuid", argument); - } - - /** A call to the built-in {@code toDate} function as a term. */ - @NonNull - public static Term toDate(@NonNull Term argument) { - return function("todate", argument); - } - - /** A call to the built-in {@code toTimestamp} function as a term. */ - @NonNull - public static Term toTimestamp(@NonNull Term argument) { - return function("totimestamp", argument); - } - - /** A call to the built-in {@code toUnixTimestamp} function as a term. */ - @NonNull - public static Term toUnixTimestamp(@NonNull Term argument) { - return function("tounixtimestamp", argument); - } - - /** - * A literal term, as in {@code WHERE k = 1}. - * - *

    This method can process any type for which there is a default Java to CQL mapping, namely: - * primitive types ({@code Integer=>int, Long=>bigint, String=>text, etc.}), and collections, - * tuples, and user defined types thereof. - * - *

    A null argument will be rendered as {@code NULL}. - * - *

    For custom mappings, use {@link #literal(Object, CodecRegistry)} or {@link #literal(Object, - * TypeCodec)}. - * - * @throws CodecNotFoundException if there is no default CQL mapping for the Java type of {@code - * value}. - */ - @NonNull - public static Literal literal(@Nullable Object value) { - return literal(value, CodecRegistry.DEFAULT); - } - - /** - * A literal term, as in {@code WHERE k = 1}. - * - *

    This is an alternative to {@link #literal(Object)} for custom type mappings. The provided - * registry should contain a codec that can format the value. Typically, this will be your - * session's registry, which is accessible via {@code session.getContext().getCodecRegistry()}. - * - * @see DriverContext#getCodecRegistry() - * @throws CodecNotFoundException if {@code codecRegistry} does not contain any codec that can - * handle {@code value}. - */ - @NonNull - public static Literal literal(@Nullable Object value, @NonNull CodecRegistry codecRegistry) { - if (value instanceof Murmur3Token) { - value = ((Murmur3Token) value).getValue(); - } else if (value instanceof ByteOrderedToken) { - value = ((ByteOrderedToken) value).getValue(); - } else if (value instanceof RandomToken) { - value = ((RandomToken) value).getValue(); - } else if (value instanceof Token) { - throw new IllegalArgumentException("Unsupported token type: " + value.getClass().getName()); - } - try { - return literal(value, (value == null) ? null : codecRegistry.codecFor(value)); - } catch (CodecNotFoundException e) { - assert value != null; - throw new IllegalArgumentException( - String.format( - "Could not inline literal of type %s. " - + "This happens because the driver doesn't know how to map it to a CQL type. " - + "Try passing a TypeCodec or CodecRegistry to literal().", - value.getClass().getName()), - e); - } - } - - /** - * A literal term, as in {@code WHERE k = 1}. - * - *

    This is an alternative to {@link #literal(Object)} for custom type mappings. The value will - * be turned into a string with {@link TypeCodec#format(Object)}, and inlined in the query. - */ - @NonNull - public static Literal literal(@Nullable T value, @Nullable TypeCodec codec) { - // Don't handle Token here, if the user calls this directly we assume they passed a codec that - // can handle the value - return new DefaultLiteral<>(value, codec); - } - - /** - * A raw CQL snippet. - * - *

    The contents will be appended to the query as-is, without any syntax checking or escaping. - * This method should be used with caution, as it's possible to generate invalid CQL that will - * fail at execution time; on the other hand, it can be used as a workaround to handle new CQL - * features that are not yet covered by the query builder. - */ - @NonNull - public static Raw raw(@NonNull String raw) { - return new DefaultRaw(raw); - } - - /** Creates an anonymous bind marker, which appears as {@code ?} in the generated CQL. */ - @NonNull - public static BindMarker bindMarker() { - return bindMarker((CqlIdentifier) null); - } - - /** Creates a named bind marker, which appears as {@code :id} in the generated CQL. */ - @NonNull - public static BindMarker bindMarker(@Nullable CqlIdentifier id) { - return new DefaultBindMarker(id); - } - - /** Shortcut for {@link #bindMarker(CqlIdentifier) bindMarker(CqlIdentifier.fromCql(name))} */ - @NonNull - public static BindMarker bindMarker(@Nullable String name) { - return bindMarker(name == null ? null : CqlIdentifier.fromCql(name)); - } - - /** - * Shortcut to reference a UDT in methods that use a {@link DataType}, such as {@link - * #typeHint(Term, DataType)} and {@link Selector#cast(Selector, DataType)}. - */ - @NonNull - public static UserDefinedType udt(@NonNull CqlIdentifier name) { - return new ShallowUserDefinedType(null, name, false); - } - - /** Shortcut for {@link #udt(CqlIdentifier) udt(CqlIdentifier.fromCql(name))}. */ - @NonNull - public static UserDefinedType udt(@NonNull String name) { - return udt(CqlIdentifier.fromCql(name)); - } - - /** - * Creates a new {@code TRUNCATE} query. - * - * @param table the name of the table to truncate. - * @return the truncation query. - */ - public static Truncate truncate(@NonNull CqlIdentifier table) { - return truncate(null, table); - } - - /** - * Creates a new {@code TRUNCATE} query. - * - *

    This is a shortcut for {@link #truncate(CqlIdentifier) - * truncate(CqlIdentifier.fromCql(table))}. - * - * @param table the name of the table to truncate. - * @return the truncation query. - */ - public static Truncate truncate(@NonNull String table) { - return truncate(null, CqlIdentifier.fromCql(table)); - } - - /** - * Creates a new {@code TRUNCATE} query. - * - * @param keyspace the name of the keyspace to use. - * @param table the name of the table to truncate. - * @return the truncation query. - */ - public static Truncate truncate(@Nullable CqlIdentifier keyspace, @NonNull CqlIdentifier table) { - return new DefaultTruncate(keyspace, table); - } - - /** - * Creates a new {@code TRUNCATE} query. - * - *

    This is a shortcut for {@link #truncate(CqlIdentifier, CqlIdentifier) - * truncate(CqlIdentifier.fromCql(keyspace), CqlIdentifier.fromCql(table))}. - * - * @param keyspace the name of the keyspace to use. - * @param table the name of the table to truncate. - * @return the truncation query. - */ - public static Truncate truncate(@Nullable String keyspace, @NonNull String table) { - return truncate( - keyspace == null ? null : CqlIdentifier.fromCql(keyspace), CqlIdentifier.fromCql(table)); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/Raw.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/Raw.java deleted file mode 100644 index 0c551bfa557..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/Raw.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.condition.Condition; -import com.datastax.oss.driver.api.querybuilder.relation.Relation; -import com.datastax.oss.driver.api.querybuilder.select.Selector; -import com.datastax.oss.driver.api.querybuilder.term.Term; - -/** - * A raw CQL snippet that will be appended to the query as-is, without any syntax checking or - * escaping. - * - *

    To build an instance of this type, use {@link QueryBuilder#raw(String)}. - * - *

    It should be used with caution, as it's possible to generate invalid CQL that will fail at - * execution time; on the other hand, it can be used as a workaround to handle new CQL features that - * are not yet covered by the query builder. - * - *

    For convenience, there is a single raw element in the query builder; it can be used in several - * places: as a selector, relation, etc. The only downside is that the {@link #as(CqlIdentifier)} - * method is only valid when used as a selector; make sure you don't use it elsewhere, or you will - * generate invalid CQL that will fail at execution time. - */ -public interface Raw extends Selector, Relation, Condition, Term {} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/SchemaBuilder.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/SchemaBuilder.java deleted file mode 100644 index 27fabf29219..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/SchemaBuilder.java +++ /dev/null @@ -1,690 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.api.querybuilder.schema.AlterKeyspaceStart; -import com.datastax.oss.driver.api.querybuilder.schema.AlterMaterializedViewStart; -import com.datastax.oss.driver.api.querybuilder.schema.AlterTableStart; -import com.datastax.oss.driver.api.querybuilder.schema.AlterTypeStart; -import com.datastax.oss.driver.api.querybuilder.schema.CreateAggregateStart; -import com.datastax.oss.driver.api.querybuilder.schema.CreateFunctionStart; -import com.datastax.oss.driver.api.querybuilder.schema.CreateIndexStart; -import com.datastax.oss.driver.api.querybuilder.schema.CreateKeyspaceStart; -import com.datastax.oss.driver.api.querybuilder.schema.CreateMaterializedViewStart; -import com.datastax.oss.driver.api.querybuilder.schema.CreateTable; -import com.datastax.oss.driver.api.querybuilder.schema.CreateTableStart; -import com.datastax.oss.driver.api.querybuilder.schema.CreateTableWithOptions; -import com.datastax.oss.driver.api.querybuilder.schema.CreateTypeStart; -import com.datastax.oss.driver.api.querybuilder.schema.Drop; -import com.datastax.oss.driver.api.querybuilder.schema.RelationOptions; -import com.datastax.oss.driver.api.querybuilder.schema.compaction.CompactionStrategy; -import com.datastax.oss.driver.api.querybuilder.schema.compaction.LeveledCompactionStrategy; -import com.datastax.oss.driver.api.querybuilder.schema.compaction.SizeTieredCompactionStrategy; -import com.datastax.oss.driver.api.querybuilder.schema.compaction.TimeWindowCompactionStrategy; -import com.datastax.oss.driver.internal.core.metadata.schema.ShallowUserDefinedType; -import com.datastax.oss.driver.internal.querybuilder.schema.DefaultAlterKeyspace; -import com.datastax.oss.driver.internal.querybuilder.schema.DefaultAlterMaterializedView; -import com.datastax.oss.driver.internal.querybuilder.schema.DefaultAlterTable; -import com.datastax.oss.driver.internal.querybuilder.schema.DefaultAlterType; -import com.datastax.oss.driver.internal.querybuilder.schema.DefaultCreateAggregate; -import com.datastax.oss.driver.internal.querybuilder.schema.DefaultCreateFunction; -import com.datastax.oss.driver.internal.querybuilder.schema.DefaultCreateIndex; -import com.datastax.oss.driver.internal.querybuilder.schema.DefaultCreateKeyspace; -import com.datastax.oss.driver.internal.querybuilder.schema.DefaultCreateMaterializedView; -import com.datastax.oss.driver.internal.querybuilder.schema.DefaultCreateTable; -import com.datastax.oss.driver.internal.querybuilder.schema.DefaultCreateType; -import com.datastax.oss.driver.internal.querybuilder.schema.DefaultDrop; -import com.datastax.oss.driver.internal.querybuilder.schema.DefaultDropKeyspace; -import com.datastax.oss.driver.internal.querybuilder.schema.compaction.DefaultLeveledCompactionStrategy; -import com.datastax.oss.driver.internal.querybuilder.schema.compaction.DefaultSizeTieredCompactionStrategy; -import com.datastax.oss.driver.internal.querybuilder.schema.compaction.DefaultTimeWindowCompactionStrategy; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** A Domain-Specific Language to build CQL DDL queries using Java code. */ -public class SchemaBuilder { - - /** Starts a CREATE KEYSPACE query. */ - @NonNull - public static CreateKeyspaceStart createKeyspace(@NonNull CqlIdentifier keyspaceName) { - return new DefaultCreateKeyspace(keyspaceName); - } - - /** - * Shortcut for {@link #createKeyspace(CqlIdentifier) - * createKeyspace(CqlIdentifier.fromCql(keyspaceName))} - */ - @NonNull - public static CreateKeyspaceStart createKeyspace(@NonNull String keyspaceName) { - return createKeyspace(CqlIdentifier.fromCql(keyspaceName)); - } - - /** Starts an ALTER KEYSPACE query. */ - @NonNull - public static AlterKeyspaceStart alterKeyspace(@NonNull CqlIdentifier keyspaceName) { - return new DefaultAlterKeyspace(keyspaceName); - } - - /** - * Shortcut for {@link #alterKeyspace(CqlIdentifier) - * alterKeyspace(CqlIdentifier.fromCql(keyspaceName)}. - */ - @NonNull - public static AlterKeyspaceStart alterKeyspace(@NonNull String keyspaceName) { - return alterKeyspace(CqlIdentifier.fromCql(keyspaceName)); - } - - /** Starts a DROP KEYSPACE query. */ - @NonNull - public static Drop dropKeyspace(@NonNull CqlIdentifier keyspaceName) { - return new DefaultDropKeyspace(keyspaceName); - } - - /** - * Shortcut for {@link #dropKeyspace(CqlIdentifier) - * dropKeyspace(CqlIdentifier.fromCql(keyspaceName)}. - */ - @NonNull - public static Drop dropKeyspace(@NonNull String keyspaceName) { - return dropKeyspace(CqlIdentifier.fromCql(keyspaceName)); - } - - /** - * Starts a CREATE TABLE query with the given table name. This assumes the keyspace name is - * already qualified for the Session or Statement. - */ - @NonNull - public static CreateTableStart createTable(@NonNull CqlIdentifier tableName) { - return new DefaultCreateTable(tableName); - } - - /** Starts a CREATE TABLE query with the given table name for the given keyspace name. */ - @NonNull - public static CreateTableStart createTable( - @Nullable CqlIdentifier keyspace, @NonNull CqlIdentifier tableName) { - return new DefaultCreateTable(keyspace, tableName); - } - - /** - * Shortcut for {@link #createTable(CqlIdentifier) createTable(CqlIdentifier.fromCql(tableName)} - */ - @NonNull - public static CreateTableStart createTable(@NonNull String tableName) { - return createTable(CqlIdentifier.fromCql(tableName)); - } - - /** - * Shortcut for {@link #createTable(CqlIdentifier,CqlIdentifier) - * createTable(CqlIdentifier.fromCql(keyspaceName),CqlIdentifier.fromCql(tableName)} - */ - @NonNull - public static CreateTableStart createTable(@Nullable String keyspace, @NonNull String tableName) { - return createTable( - keyspace == null ? null : CqlIdentifier.fromCql(keyspace), - CqlIdentifier.fromCql(tableName)); - } - - /** - * Starts an ALTER TABLE query with the given table name. This assumes the keyspace name is - * already qualified for the Session or Statement. - */ - @NonNull - public static AlterTableStart alterTable(@NonNull CqlIdentifier tableName) { - return new DefaultAlterTable(tableName); - } - - /** Starts an ALTER TABLE query with the given table name for the given keyspace name. */ - @NonNull - public static AlterTableStart alterTable( - @Nullable CqlIdentifier keyspace, @NonNull CqlIdentifier tableName) { - return new DefaultAlterTable(keyspace, tableName); - } - - /** Shortcut for {@link #alterTable(CqlIdentifier) alterTable(CqlIdentifier.fromCql(tableName)} */ - @NonNull - public static AlterTableStart alterTable(@NonNull String tableName) { - return alterTable(CqlIdentifier.fromCql(tableName)); - } - - /** - * Shortcut for {@link #alterTable(CqlIdentifier,CqlIdentifier) - * alterTable(CqlIdentifier.fromCql(keyspaceName),CqlIdentifier.fromCql(tableName)} - */ - @NonNull - public static AlterTableStart alterTable(@Nullable String keyspace, @NonNull String tableName) { - return alterTable( - keyspace == null ? null : CqlIdentifier.fromCql(keyspace), - CqlIdentifier.fromCql(tableName)); - } - - /** - * Starts a DROP TABLE query. This assumes the keyspace name is already qualified for the Session - * or Statement. - */ - @NonNull - public static Drop dropTable(@NonNull CqlIdentifier tableName) { - return new DefaultDrop(tableName, "TABLE"); - } - - /** Shortcut for {@link #dropTable(CqlIdentifier) dropTable(CqlIdentifier.fromCql(tableName)}. */ - @NonNull - public static Drop dropTable(@NonNull String tableName) { - return dropTable(CqlIdentifier.fromCql(tableName)); - } - - /** Starts a DROP TABLE query for the given table name for the given keyspace name. */ - @NonNull - public static Drop dropTable(@Nullable CqlIdentifier keyspace, @NonNull CqlIdentifier tableName) { - return new DefaultDrop(keyspace, tableName, "TABLE"); - } - - /** - * Shortcut for {@link #dropTable(CqlIdentifier,CqlIdentifier) - * dropTable(CqlIdentifier.fromCql(keyspace),CqlIdentifier.fromCql(tableName)}. - */ - @NonNull - public static Drop dropTable(@Nullable String keyspace, @NonNull String tableName) { - return dropTable( - keyspace == null ? null : CqlIdentifier.fromCql(keyspace), - CqlIdentifier.fromCql(tableName)); - } - - /** - * Starts a CREATE MATERIALIZED VIEW query with the given view name. This assumes the keyspace - * name is already qualified for the Session or Statement. - */ - @NonNull - public static CreateMaterializedViewStart createMaterializedView( - @NonNull CqlIdentifier viewName) { - return new DefaultCreateMaterializedView(viewName); - } - - /** - * Starts a CREATE MATERIALIZED VIEW query with the given view name for the given keyspace name. - */ - @NonNull - public static CreateMaterializedViewStart createMaterializedView( - @Nullable CqlIdentifier keyspace, @NonNull CqlIdentifier viewName) { - return new DefaultCreateMaterializedView(keyspace, viewName); - } - - /** - * Shortcut for {@link #createMaterializedView(CqlIdentifier) - * createMaterializedView(CqlIdentifier.fromCql(viewName)} - */ - @NonNull - public static CreateMaterializedViewStart createMaterializedView(@NonNull String viewName) { - return createMaterializedView(CqlIdentifier.fromCql(viewName)); - } - - /** - * Shortcut for {@link #createMaterializedView(CqlIdentifier,CqlIdentifier) - * createMaterializedView(CqlIdentifier.fromCql(keyspaceName),CqlIdentifier.fromCql(viewName)} - */ - @NonNull - public static CreateMaterializedViewStart createMaterializedView( - @Nullable String keyspace, @NonNull String viewName) { - return createMaterializedView( - keyspace == null ? null : CqlIdentifier.fromCql(keyspace), CqlIdentifier.fromCql(viewName)); - } - - /** - * Starts an ALTER MATERIALIZED VIEW query with the given view name. This assumes the keyspace - * name is already qualified for the Session or Statement. - */ - @NonNull - public static AlterMaterializedViewStart alterMaterializedView(@NonNull CqlIdentifier viewName) { - return new DefaultAlterMaterializedView(viewName); - } - - /** - * Starts an ALTER MATERIALIZED VIEW query with the given view name for the given keyspace name. - */ - @NonNull - public static AlterMaterializedViewStart alterMaterializedView( - @Nullable CqlIdentifier keyspace, @NonNull CqlIdentifier viewName) { - return new DefaultAlterMaterializedView(keyspace, viewName); - } - - /** - * Shortcut for {@link #alterMaterializedView(CqlIdentifier) - * alterMaterializedView(CqlIdentifier.fromCql(viewName)} - */ - @NonNull - public static AlterMaterializedViewStart alterMaterializedView(@NonNull String viewName) { - return alterMaterializedView(CqlIdentifier.fromCql(viewName)); - } - - /** - * Shortcut for {@link #alterMaterializedView(CqlIdentifier,CqlIdentifier) - * alterMaterializedView(CqlIdentifier.fromCql(keyspaceName),CqlIdentifier.fromCql(viewName)} - */ - @NonNull - public static AlterMaterializedViewStart alterMaterializedView( - @Nullable String keyspace, @NonNull String viewName) { - return alterMaterializedView( - keyspace == null ? null : CqlIdentifier.fromCql(keyspace), CqlIdentifier.fromCql(viewName)); - } - - /** - * Starts a DROP MATERIALIZED VIEW query. This assumes the keyspace name is already qualified for - * the Session or Statement. - */ - @NonNull - public static Drop dropMaterializedView(@NonNull CqlIdentifier viewName) { - return new DefaultDrop(viewName, "MATERIALIZED VIEW"); - } - - /** - * Shortcut for {@link #dropMaterializedView(CqlIdentifier) - * dropMaterializedView(CqlIdentifier.fromCql(viewName)}. - */ - @NonNull - public static Drop dropMaterializedView(@NonNull String viewName) { - return dropMaterializedView(CqlIdentifier.fromCql(viewName)); - } - - /** Starts a DROP MATERIALIZED VIEW query for the given view name for the given keyspace name. */ - @NonNull - public static Drop dropMaterializedView( - @Nullable CqlIdentifier keyspace, @NonNull CqlIdentifier viewName) { - return new DefaultDrop(keyspace, viewName, "MATERIALIZED VIEW"); - } - - /** - * Shortcut for {@link #dropMaterializedView(CqlIdentifier,CqlIdentifier) - * dropMaterializedView(CqlIdentifier.fromCql(keyspace),CqlIdentifier.fromCql(viewName)}. - */ - @NonNull - public static Drop dropMaterializedView(@Nullable String keyspace, @NonNull String viewName) { - return dropMaterializedView( - keyspace == null ? null : CqlIdentifier.fromCql(keyspace), CqlIdentifier.fromCql(viewName)); - } - - /** - * Starts a CREATE TYPE query with the given type name. This assumes the keyspace name is already - * qualified for the Session or Statement. - */ - @NonNull - public static CreateTypeStart createType(@NonNull CqlIdentifier typeName) { - return new DefaultCreateType(typeName); - } - - /** Starts a CREATE TYPE query with the given type name for the given keyspace name. */ - @NonNull - public static CreateTypeStart createType( - @Nullable CqlIdentifier keyspace, @NonNull CqlIdentifier typeName) { - return new DefaultCreateType(keyspace, typeName); - } - - /** Shortcut for {@link #createType(CqlIdentifier) createType(CqlIdentifier.fromCql(typeName)}. */ - @NonNull - public static CreateTypeStart createType(@NonNull String typeName) { - return createType(CqlIdentifier.fromCql(typeName)); - } - - /** - * Shortcut for {@link #createType(CqlIdentifier,CqlIdentifier) - * createType(CqlIdentifier.fromCql(keyspace),CqlIdentifier.fromCql(typeName)}. - */ - @NonNull - public static CreateTypeStart createType(@Nullable String keyspace, @NonNull String typeName) { - return createType( - keyspace == null ? null : CqlIdentifier.fromCql(keyspace), CqlIdentifier.fromCql(typeName)); - } - - /** - * Starts an ALTER TYPE query with the given type name. This assumes the keyspace name is already - * qualified for the Session or Statement. - */ - @NonNull - public static AlterTypeStart alterType(@NonNull CqlIdentifier typeName) { - return new DefaultAlterType(typeName); - } - - /** Starts an ALTER TYPE query with the given type name for the given keyspace name. */ - @NonNull - public static AlterTypeStart alterType( - @Nullable CqlIdentifier keyspace, @NonNull CqlIdentifier typeName) { - return new DefaultAlterType(keyspace, typeName); - } - - /** Shortcut for {@link #alterType(CqlIdentifier) alterType(CqlIdentifier.fromCql(typeName)} */ - @NonNull - public static AlterTypeStart alterType(@NonNull String typeName) { - return alterType(CqlIdentifier.fromCql(typeName)); - } - - /** - * Shortcut for {@link #alterType(CqlIdentifier,CqlIdentifier) - * alterType(CqlIdentifier.fromCql(keyspaceName),CqlIdentifier.fromCql(typeName)} - */ - @NonNull - public static AlterTypeStart alterType(@Nullable String keyspace, @NonNull String typeName) { - return alterType( - keyspace == null ? null : CqlIdentifier.fromCql(keyspace), CqlIdentifier.fromCql(typeName)); - } - - /** - * Starts a DROP TYPE query. This assumes the keyspace name is already qualified for the Session - * or Statement. - */ - @NonNull - public static Drop dropType(@NonNull CqlIdentifier typeName) { - return new DefaultDrop(typeName, "TYPE"); - } - - /** Shortcut for {@link #dropType(CqlIdentifier) dropType(CqlIdentifier.fromCql(typeName)}. */ - @NonNull - public static Drop dropType(@NonNull String typeName) { - return dropType(CqlIdentifier.fromCql(typeName)); - } - - /** Starts a DROP TYPE query for the given view name for the given type name. */ - @NonNull - public static Drop dropType(@Nullable CqlIdentifier keyspace, @NonNull CqlIdentifier typeName) { - return new DefaultDrop(keyspace, typeName, "TYPE"); - } - - /** - * Shortcut for {@link #dropType(CqlIdentifier,CqlIdentifier) - * dropType(CqlIdentifier.fromCql(keyspace),CqlIdentifier.fromCql(typeName)}. - */ - @NonNull - public static Drop dropType(@Nullable String keyspace, @NonNull String typeName) { - return dropType( - keyspace == null ? null : CqlIdentifier.fromCql(keyspace), CqlIdentifier.fromCql(typeName)); - } - - /** - * Starts a CREATE INDEX query with no name. When this is used a name will be generated on the - * server side, usually with a format of tableName_idx_columnName. - */ - @NonNull - public static CreateIndexStart createIndex() { - return new DefaultCreateIndex(); - } - - /** Starts a CREATE INDEX query with the given name. */ - @NonNull - public static CreateIndexStart createIndex(@Nullable CqlIdentifier indexName) { - return new DefaultCreateIndex(indexName); - } - - /** - * Shortcut for {@link #createIndex(CqlIdentifier) createIndex(CqlIdentifier.fromCql(indexName)}. - */ - @NonNull - public static CreateIndexStart createIndex(@Nullable String indexName) { - return createIndex(indexName == null ? null : CqlIdentifier.fromCql(indexName)); - } - - /** - * Starts a DROP INDEX query. This assumes the keyspace name is already qualified for the Session - * or Statement. - */ - @NonNull - public static Drop dropIndex(@NonNull CqlIdentifier indexName) { - return new DefaultDrop(indexName, "INDEX"); - } - - /** Shortcut for {@link #dropIndex(CqlIdentifier) dropIndex(CqlIdentifier.fromCql(indexName)}. */ - @NonNull - public static Drop dropIndex(@NonNull String indexName) { - return dropIndex(CqlIdentifier.fromCql(indexName)); - } - - /** Starts a DROP INDEX query for the given index for the given keyspace name. */ - @NonNull - public static Drop dropIndex(@Nullable CqlIdentifier keyspace, @NonNull CqlIdentifier indexName) { - return new DefaultDrop(keyspace, indexName, "INDEX"); - } - - /** - * Shortcut for {@link #dropIndex(CqlIdentifier, CqlIdentifier)} - * dropIndex(CqlIdentifier.fromCql(keyspace),CqlIdentifier.fromCql(indexName)}. - */ - @NonNull - public static Drop dropIndex(@Nullable String keyspace, @NonNull String indexName) { - return dropIndex( - keyspace == null ? null : CqlIdentifier.fromCql(keyspace), - CqlIdentifier.fromCql(indexName)); - } - - /** - * Starts a CREATE FUNCTION query with the given function name. This assumes the keyspace name is - * already qualified for the Session or Statement. - */ - @NonNull - public static CreateFunctionStart createFunction(@NonNull CqlIdentifier functionName) { - return new DefaultCreateFunction(functionName); - } - - /** Starts a CREATE FUNCTION query with the given function name for the given keyspace name. */ - @NonNull - public static CreateFunctionStart createFunction( - @Nullable CqlIdentifier keyspace, @NonNull CqlIdentifier functionName) { - return new DefaultCreateFunction(keyspace, functionName); - } - /** - * Shortcut for {@link #createFunction(CqlIdentifier) - * createFunction(CqlIdentifier.fromCql(keyspaceName),CqlIdentifier.fromCql(functionName)} - */ - @NonNull - public static CreateFunctionStart createFunction(@NonNull String functionName) { - return new DefaultCreateFunction(CqlIdentifier.fromCql(functionName)); - } - /** - * Shortcut for {@link #createFunction(CqlIdentifier, CqlIdentifier) - * createFunction(CqlIdentifier.fromCql(keyspace, functionName)} - */ - @NonNull - public static CreateFunctionStart createFunction( - @Nullable String keyspace, @NonNull String functionName) { - return new DefaultCreateFunction( - keyspace == null ? null : CqlIdentifier.fromCql(keyspace), - CqlIdentifier.fromCql(functionName)); - } - - /** - * Starts a DROP FUNCTION query. This assumes the keyspace name is already qualified for the - * Session or Statement. - */ - @NonNull - public static Drop dropFunction(@NonNull CqlIdentifier functionName) { - return new DefaultDrop(functionName, "FUNCTION"); - } - - /** Starts a DROP FUNCTION query for the given function name for the given keyspace name. */ - @NonNull - public static Drop dropFunction( - @Nullable CqlIdentifier keyspace, @NonNull CqlIdentifier functionName) { - return new DefaultDrop(keyspace, functionName, "FUNCTION"); - } - - /** - * Shortcut for {@link #dropFunction(CqlIdentifier) - * dropFunction(CqlIdentifier.fromCql(functionName)}. - */ - @NonNull - public static Drop dropFunction(@NonNull String functionName) { - return new DefaultDrop(CqlIdentifier.fromCql(functionName), "FUNCTION"); - } - - /** - * Shortcut for {@link #dropFunction(CqlIdentifier, CqlIdentifier) - * dropFunction(CqlIdentifier.fromCql(keyspace),CqlIdentifier.fromCql(functionName)}. - */ - @NonNull - public static Drop dropFunction(@Nullable String keyspace, @NonNull String functionName) { - return new DefaultDrop( - keyspace == null ? null : CqlIdentifier.fromCql(keyspace), - CqlIdentifier.fromCql(functionName), - "FUNCTION"); - } - - /** - * Starts a CREATE AGGREGATE query with the given aggregate name. This assumes the keyspace name - * is already qualified for the Session or Statement. - */ - @NonNull - public static CreateAggregateStart createAggregate(@NonNull CqlIdentifier aggregateName) { - return new DefaultCreateAggregate(aggregateName); - } - - /** Starts a CREATE AGGREGATE query with the given aggregate name for the given keyspace name. */ - @NonNull - public static CreateAggregateStart createAggregate( - @Nullable CqlIdentifier keyspace, @NonNull CqlIdentifier aggregateName) { - return new DefaultCreateAggregate(keyspace, aggregateName); - } - - /** - * Shortcut for {@link #createAggregate(CqlIdentifier) - * CreateAggregateStart(CqlIdentifier.fromCql(keyspaceName),CqlIdentifier.fromCql(aggregateName)} - */ - @NonNull - public static CreateAggregateStart createAggregate(@NonNull String aggregateName) { - return new DefaultCreateAggregate(CqlIdentifier.fromCql(aggregateName)); - } - - /** - * Shortcut for {@link #createAggregate(CqlIdentifier, CqlIdentifier) - * CreateAggregateStart(CqlIdentifier.fromCql(keyspace, aggregateName)} - */ - @NonNull - public static CreateAggregateStart createAggregate( - @Nullable String keyspace, @NonNull String aggregateName) { - return new DefaultCreateAggregate( - keyspace == null ? null : CqlIdentifier.fromCql(keyspace), - CqlIdentifier.fromCql(aggregateName)); - } - - /** - * Starts an DROP AGGREGATE query. This assumes the keyspace name is already qualified for the - * Session or Statement. - */ - @NonNull - public static Drop dropAggregate(@NonNull CqlIdentifier aggregateName) { - return new DefaultDrop(aggregateName, "AGGREGATE"); - } - - /** Starts an DROP AGGREGATE query for the given aggregate name for the given keyspace name. */ - @NonNull - public static Drop dropAggregate( - @Nullable CqlIdentifier keyspace, @NonNull CqlIdentifier aggregateName) { - return new DefaultDrop(keyspace, aggregateName, "AGGREGATE"); - } - - /** - * Shortcut for {@link #dropAggregate(CqlIdentifier) - * dropAggregate(CqlIdentifier.fromCql(aggregateName)}. - */ - @NonNull - public static Drop dropAggregate(@NonNull String aggregateName) { - return new DefaultDrop(CqlIdentifier.fromCql(aggregateName), "AGGREGATE"); - } - - /** - * Shortcut for {@link #dropAggregate(CqlIdentifier, CqlIdentifier) - * dropAggregate(CqlIdentifier.fromCql(keyspace),CqlIdentifier.fromCql(aggregateName)}. - */ - @NonNull - public static Drop dropAggregate(@Nullable String keyspace, @NonNull String aggregateName) { - return new DefaultDrop( - keyspace == null ? null : CqlIdentifier.fromCql(keyspace), - CqlIdentifier.fromCql(aggregateName), - "AGGREGATE"); - } - - /** - * Compaction options for Size Tiered Compaction Strategy (STCS). - * - * @see CreateTableWithOptions#withCompaction(CompactionStrategy) - */ - @NonNull - public static SizeTieredCompactionStrategy sizeTieredCompactionStrategy() { - return new DefaultSizeTieredCompactionStrategy(); - } - - /** - * Compaction options for Leveled Compaction Strategy (LCS). - * - * @see CreateTableWithOptions#withCompaction(CompactionStrategy) - */ - @NonNull - public static LeveledCompactionStrategy leveledCompactionStrategy() { - return new DefaultLeveledCompactionStrategy(); - } - - /** - * Compaction options for Time Window Compaction Strategy (TWCS). - * - * @see CreateTableWithOptions#withCompaction(CompactionStrategy) - */ - @NonNull - public static TimeWindowCompactionStrategy timeWindowCompactionStrategy() { - return new DefaultTimeWindowCompactionStrategy(); - } - - /** - * Shortcut for creating a user-defined {@link DataType} for use in UDT and Table builder - * definitions, such as {@link CreateTable#withColumn(CqlIdentifier, DataType)}. - */ - @NonNull - public static UserDefinedType udt(@NonNull CqlIdentifier name, boolean frozen) { - return new ShallowUserDefinedType(null, name, frozen); - } - - /** Shortcut for {@link #udt(CqlIdentifier,boolean) udt(CqlIdentifier.fromCql(name),frozen)}. */ - @NonNull - public static UserDefinedType udt(@NonNull String name, boolean frozen) { - return udt(CqlIdentifier.fromCql(name), frozen); - } - - /** - * Specifies the rows_per_partition configuration for table caching options. - * - * @see RelationOptions#withCaching(boolean, SchemaBuilder.RowsPerPartition) - */ - public static class RowsPerPartition { - - private final String value; - - private RowsPerPartition(String value) { - this.value = value; - } - - @NonNull public static RowsPerPartition ALL = new RowsPerPartition("ALL"); - - @NonNull public static RowsPerPartition NONE = new RowsPerPartition("NONE"); - - @NonNull - public static RowsPerPartition rows(int rowNumber) { - return new RowsPerPartition(Integer.toString(rowNumber)); - } - - @NonNull - public String getValue() { - return value; - } - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/condition/Condition.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/condition/Condition.java deleted file mode 100644 index 01b50166426..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/condition/Condition.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.condition; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.CqlSnippet; -import com.datastax.oss.driver.api.querybuilder.term.Term; -import com.datastax.oss.driver.internal.querybuilder.condition.DefaultConditionBuilder; -import com.datastax.oss.driver.internal.querybuilder.lhs.ColumnComponentLeftOperand; -import com.datastax.oss.driver.internal.querybuilder.lhs.ColumnLeftOperand; -import com.datastax.oss.driver.internal.querybuilder.lhs.FieldLeftOperand; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * A condition in a {@link ConditionalStatement}. - * - *

    To build instances of this type, use the factory methods, such as {@link #column(String) - * column}, {@link #field(String, String)} field}, etc. - * - *

    They are used as arguments to the {@link ConditionalStatement#if_(Iterable)} method, for - * example: - * - *

    {@code
    - * deleteFrom("foo").whereColumn("k").isEqualTo(bindMarker())
    - *     .if_(Condition.column("v").isEqualTo(literal(1)))
    - * // DELETE FROM foo WHERE k=? IF v=1
    - * }
    - * - * There are also shortcuts in the fluent API when you build a statement, for example: - * - *
    {@code
    - * deleteFrom("foo").whereColumn("k").isEqualTo(bindMarker())
    - *     .ifColumn("v").isEqualTo(literal(1))
    - * // DELETE FROM foo WHERE k=? IF v=1
    - * }
    - */ -public interface Condition extends CqlSnippet { - - /** Builds a condition on a column for a conditional statement, as in {@code DELETE... IF k=1}. */ - @NonNull - static ConditionBuilder column(@NonNull CqlIdentifier columnId) { - return new DefaultConditionBuilder(new ColumnLeftOperand(columnId)); - } - - /** Shortcut for {@link #column(CqlIdentifier) column(CqlIdentifier.fromCql(columnName))}. */ - @NonNull - static ConditionBuilder column(@NonNull String columnName) { - return column(CqlIdentifier.fromCql(columnName)); - } - - /** - * Builds a condition on a field in a UDT column for a conditional statement, as in {@code - * DELETE... IF address.street='test'}. - */ - @NonNull - static ConditionBuilder field( - @NonNull CqlIdentifier columnId, @NonNull CqlIdentifier fieldId) { - return new DefaultConditionBuilder(new FieldLeftOperand(columnId, fieldId)); - } - - /** - * Shortcut for {@link #field(CqlIdentifier, CqlIdentifier) - * field(CqlIdentifier.fromCql(columnName), CqlIdentifier.fromCql(fieldName))}. - */ - @NonNull - static ConditionBuilder field(@NonNull String columnName, @NonNull String fieldName) { - return field(CqlIdentifier.fromCql(columnName), CqlIdentifier.fromCql(fieldName)); - } - - /** - * Builds a condition on an element in a collection column for a conditional statement, as in - * {@code DELETE... IF m[0]=1}. - */ - @NonNull - static ConditionBuilder element(@NonNull CqlIdentifier columnId, @NonNull Term index) { - return new DefaultConditionBuilder(new ColumnComponentLeftOperand(columnId, index)); - } - - /** - * Shortcut for {@link #element(CqlIdentifier, Term) element(CqlIdentifier.fromCql(columnName), - * index)}. - */ - @NonNull - static ConditionBuilder element(@NonNull String columnName, @NonNull Term index) { - return element(CqlIdentifier.fromCql(columnName), index); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/condition/ConditionBuilder.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/condition/ConditionBuilder.java deleted file mode 100644 index 83ab849ca9d..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/condition/ConditionBuilder.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.condition; - -import com.datastax.oss.driver.api.querybuilder.relation.ArithmeticRelationBuilder; -import com.datastax.oss.driver.api.querybuilder.relation.InRelationBuilder; - -public interface ConditionBuilder - extends ArithmeticRelationBuilder, InRelationBuilder {} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/condition/ConditionalStatement.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/condition/ConditionalStatement.java deleted file mode 100644 index 2822c0a2f6f..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/condition/ConditionalStatement.java +++ /dev/null @@ -1,172 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.condition; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.QueryBuilder; -import com.datastax.oss.driver.api.querybuilder.term.Term; -import com.datastax.oss.driver.internal.querybuilder.condition.DefaultConditionBuilder; -import com.datastax.oss.driver.internal.querybuilder.lhs.ColumnComponentLeftOperand; -import com.datastax.oss.driver.internal.querybuilder.lhs.ColumnLeftOperand; -import com.datastax.oss.driver.internal.querybuilder.lhs.FieldLeftOperand; -import edu.umd.cs.findbugs.annotations.CheckReturnValue; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Arrays; - -/** - * A statement that can be applied conditionally, such as UPDATE or DELETE. - * - *

    Not that this does not covert INSERT... IF NOT EXISTS, which is handled separately. - */ -public interface ConditionalStatement> { - - /** - * Adds an IF EXISTS condition. - * - *

    If any column conditions were added before, they will be cleared. - */ - @NonNull - @CheckReturnValue - SelfT ifExists(); - - /** - * Adds an IF condition. All conditions are logically joined with AND. If {@link #ifExists()} was - * invoked on this statement before, it will get cancelled. - * - *

    To create the argument, use one of the factory methods in {@link Condition}, for example - * {@link Condition#column(CqlIdentifier) column}. - * - *

    If you add multiple conditions as once, consider {@link #if_(Iterable)} as a more efficient - * alternative. - */ - @NonNull - @CheckReturnValue - SelfT if_(@NonNull Condition condition); - - /** - * Adds multiple IF conditions at once. All conditions are logically joined with AND. If {@link - * #ifExists()} was invoked on this statement before, it will get cancelled. - * - *

    This is slightly more efficient than adding the relations one by one (since the underlying - * implementation of this object is immutable). - * - *

    To create the arguments, use one of the factory methods in {@link Condition}, for example - * {@link Condition#column(CqlIdentifier) column}. - */ - @NonNull - @CheckReturnValue - SelfT if_(@NonNull Iterable conditions); - - /** Var-arg equivalent of {@link #if_(Iterable)}. */ - @NonNull - @CheckReturnValue - default SelfT if_(@NonNull Condition... conditions) { - return if_(Arrays.asList(conditions)); - } - - /** - * Adds an IF condition on a simple column, as in {@code DELETE... IF k=1}. - * - *

    This is the equivalent of creating a condition with {@link Condition#column(CqlIdentifier)} - * and passing it to {@link #if_(Condition)}. - */ - @NonNull - default ConditionBuilder ifColumn(@NonNull CqlIdentifier columnId) { - return new DefaultConditionBuilder.Fluent<>(this, new ColumnLeftOperand(columnId)); - } - - /** - * Shortcut for {@link #ifColumn(CqlIdentifier) column(CqlIdentifier.fromCql(columnName))}. - * - *

    This is the equivalent of creating a condition with {@link Condition#column(String)} and - * passing it to {@link #if_(Condition)}. - */ - @NonNull - default ConditionBuilder ifColumn(@NonNull String columnName) { - return ifColumn(CqlIdentifier.fromCql(columnName)); - } - - /** - * Adds an IF condition on a field in a UDT column for a conditional statement, as in {@code - * DELETE... IF address.street='test'}. - * - *

    This is the equivalent of creating a condition with {@link Condition#field(CqlIdentifier, - * CqlIdentifier)} and passing it to {@link #if_(Condition)}. - */ - @NonNull - default ConditionBuilder ifField( - @NonNull CqlIdentifier columnId, @NonNull CqlIdentifier fieldId) { - return new DefaultConditionBuilder.Fluent<>(this, new FieldLeftOperand(columnId, fieldId)); - } - - /** - * Shortcut for {@link #ifField(CqlIdentifier, CqlIdentifier) - * field(CqlIdentifier.fromCql(columnName), CqlIdentifier.fromCql(fieldName))}. - * - *

    This is the equivalent of creating a condition with {@link Condition#field(String, String)} - * and passing it to {@link #if_(Condition)}. - */ - @NonNull - default ConditionBuilder ifField(@NonNull String columnName, @NonNull String fieldName) { - return ifField(CqlIdentifier.fromCql(columnName), CqlIdentifier.fromCql(fieldName)); - } - - /** - * Adds an IF condition on an element in a collection column for a conditional statement, as in - * {@code DELETE... IF m[0]=1}. - * - *

    This is the equivalent of creating a condition with {@link Condition#element(CqlIdentifier, - * Term)} and passing it to {@link #if_(Condition)}. - */ - @NonNull - default ConditionBuilder ifElement(@NonNull CqlIdentifier columnId, @NonNull Term index) { - return new DefaultConditionBuilder.Fluent<>( - this, new ColumnComponentLeftOperand(columnId, index)); - } - - /** - * Shortcut for {@link #ifElement(CqlIdentifier, Term) element(CqlIdentifier.fromCql(columnName), - * index)}. - * - *

    This is the equivalent of creating a condition with {@link Condition#element(String, Term)} - * and passing it to {@link #if_(Condition)}. - */ - @NonNull - default ConditionBuilder ifElement(@NonNull String columnName, @NonNull Term index) { - return ifElement(CqlIdentifier.fromCql(columnName), index); - } - - /** - * Adds a raw CQL snippet as a condition. - * - *

    This is the equivalent of creating a condition with {@link QueryBuilder#raw(String)} and - * passing it to {@link #if_(Condition)}. - * - *

    The contents will be appended to the query as-is, without any syntax checking or escaping. - * This method should be used with caution, as it's possible to generate invalid CQL that will - * fail at execution time; on the other hand, it can be used as a workaround to handle new CQL - * features that are not yet covered by the query builder. - * - * @see QueryBuilder#raw(String) - */ - @NonNull - @CheckReturnValue - default SelfT ifRaw(@NonNull String raw) { - return if_(QueryBuilder.raw(raw)); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/delete/Delete.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/delete/Delete.java deleted file mode 100644 index 4688cac86e0..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/delete/Delete.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.delete; - -import com.datastax.oss.driver.api.querybuilder.BuildableQuery; -import com.datastax.oss.driver.api.querybuilder.condition.ConditionalStatement; -import com.datastax.oss.driver.api.querybuilder.relation.OngoingWhereClause; - -/** A complete DELETE statement, with at least one WHERE clause. */ -public interface Delete - extends OngoingWhereClause, ConditionalStatement, BuildableQuery {} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/delete/DeleteSelection.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/delete/DeleteSelection.java deleted file mode 100644 index e8cf7a26855..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/delete/DeleteSelection.java +++ /dev/null @@ -1,179 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.delete; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.BindMarker; -import com.datastax.oss.driver.api.querybuilder.QueryBuilder; -import com.datastax.oss.driver.api.querybuilder.relation.OngoingWhereClause; -import com.datastax.oss.driver.api.querybuilder.select.Selector; -import com.datastax.oss.driver.api.querybuilder.term.Term; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Arrays; - -/** - * An in-progress DELETE statement: it targets a table and optionally a list of columns to delete; - * it needs at least one WHERE relation to become buildable. - */ -public interface DeleteSelection extends OngoingWhereClause { - - /** - * Adds a selector. - * - *

    To create the argument, use one of the factory methods in {@link Selector}, for example - * {@link Selector#column(CqlIdentifier) column}. This type also provides shortcuts to create and - * add the selector in one call, for example {@link #column(CqlIdentifier)} for {@code - * selector(column(...))}. - * - *

    Note that the only valid arguments for DELETE are a column, a field in a UDT column (nested - * UDTs are not supported), and an element in a collection column (nested collections are not - * supported). - * - *

    If you add multiple selectors as once, consider {@link #selectors(Iterable)} as a more - * efficient alternative. - */ - @NonNull - DeleteSelection selector(@NonNull Selector selector); - - /** - * Adds multiple selectors at once. - * - *

    This is slightly more efficient than adding the selectors one by one (since the underlying - * implementation of this object is immutable). - * - *

    To create the arguments, use one of the factory methods in {@link Selector}, for example - * {@link Selector#column(CqlIdentifier) column}. - * - *

    Note that the only valid arguments for DELETE are a column, a field in a UDT column (nested - * UDTs are not supported), and an element in a collection column (nested collections are not - * supported). - * - * @see #selector(Selector) - */ - @NonNull - DeleteSelection selectors(@NonNull Iterable additionalSelectors); - - /** Var-arg equivalent of {@link #selectors(Iterable)}. */ - @NonNull - default DeleteSelection selectors(@NonNull Selector... additionalSelectors) { - return selectors(Arrays.asList(additionalSelectors)); - } - - /** - * Deletes a particular column by its CQL identifier. - * - *

    This is a shortcut for {@link #selector(Selector) selector(Selector.column(columnId))}. - * - * @see Selector#column(CqlIdentifier) - */ - @NonNull - default DeleteSelection column(@NonNull CqlIdentifier columnId) { - return selector(Selector.column(columnId)); - } - - /** Shortcut for {@link #column(CqlIdentifier) column(CqlIdentifier.fromCql(columnName))} */ - @NonNull - default DeleteSelection column(@NonNull String columnName) { - return column(CqlIdentifier.fromCql(columnName)); - } - - /** - * Deletes a field inside of a UDT column, as in {@code DELETE user.name}. - * - *

    This is a shortcut for {@link #selector(Selector) selector(Selector.field(udtColumnId, - * fieldId))}. - * - * @see Selector#field(CqlIdentifier, CqlIdentifier) - */ - @NonNull - default DeleteSelection field( - @NonNull CqlIdentifier udtColumnId, @NonNull CqlIdentifier fieldId) { - return selector(Selector.field(udtColumnId, fieldId)); - } - - /** - * Shortcut for {@link #field(CqlIdentifier, CqlIdentifier) - * field(CqlIdentifier.fromCql(udtColumnName), CqlIdentifier.fromCql(fieldName))}. - * - * @see Selector#field(String, String) - */ - @NonNull - default DeleteSelection field(@NonNull String udtColumnName, @NonNull String fieldName) { - return field(CqlIdentifier.fromCql(udtColumnName), CqlIdentifier.fromCql(fieldName)); - } - - /** - * Deletes an element in a collection column, as in {@code DELETE m['key']}. - * - *

    This is a shortcut for {@link #selector(Selector) selector(Selector.element(collectionId, - * index))}. - * - * @see Selector#element(CqlIdentifier, Term) - */ - @NonNull - default DeleteSelection element(@NonNull CqlIdentifier collectionId, @NonNull Term index) { - return selector(Selector.element(collectionId, index)); - } - - /** - * Shortcut for {@link #element(CqlIdentifier, Term) - * element(CqlIdentifier.fromCql(collectionName), index)}. - * - * @see Selector#element(String, Term) - */ - @NonNull - default DeleteSelection element(@NonNull String collectionName, @NonNull Term index) { - return element(CqlIdentifier.fromCql(collectionName), index); - } - - /** - * Specifies an element to delete as a raw CQL snippet. - * - *

    This is a shortcut for {@link #selector(Selector) selector(QueryBuilder.raw(raw))}. - * - *

    The contents will be appended to the query as-is, without any syntax checking or escaping. - * This method should be used with caution, as it's possible to generate invalid CQL that will - * fail at execution time; on the other hand, it can be used as a workaround to handle new CQL - * features that are not yet covered by the query builder. - * - * @see QueryBuilder#raw(String) - */ - @NonNull - default DeleteSelection raw(@NonNull String raw) { - return selector(QueryBuilder.raw(raw)); - } - - /** - * Adds a USING TIMESTAMP clause to this statement with a literal value. - * - *

    If this method or {@link #usingTimestamp(BindMarker)} is called multiple times, the last - * value is used. - */ - @NonNull - DeleteSelection usingTimestamp(long timestamp); - - /** - * Adds a USING TIMESTAMP clause to this statement with a bind marker. - * - *

    If this method or {@link #usingTimestamp(long)} is called multiple times, the last value is - * used. Passing {@code null} to this method removes any previous timestamp. - */ - @NonNull - DeleteSelection usingTimestamp(@Nullable BindMarker bindMarker); -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/insert/Insert.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/insert/Insert.java deleted file mode 100644 index 09a29161417..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/insert/Insert.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.insert; - -import com.datastax.oss.driver.api.querybuilder.BindMarker; -import com.datastax.oss.driver.api.querybuilder.BuildableQuery; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -/** A complete INSERT statement that is ready to be built. */ -public interface Insert extends BuildableQuery { - - /** Adds an IF NOT EXISTS clause to this statement. */ - @NonNull - Insert ifNotExists(); - - /** - * Adds a USING TIMESTAMP clause to this statement with a literal value. - * - *

    If this method or {@link #usingTimestamp(BindMarker)} is called multiple times, the last - * value is used. - */ - @NonNull - Insert usingTimestamp(long timestamp); - - /** - * Adds a USING TIMESTAMP clause to this statement with a bind marker. - * - *

    If this method or {@link #usingTimestamp(long)} is called multiple times, the last value is - * used. Passing {@code null} to this method removes any previous timestamp. - */ - @NonNull - Insert usingTimestamp(@Nullable BindMarker bindMarker); - - /** - * Adds a {@code USING TTL} clause to this statement with a literal value. Setting a value of - * {@code null} will remove the {@code USING TTL} clause on this statement. Setting a value of - * {@code 0} will insert the data with no TTL when the statement is executed, overriding any Table - * TTL that might exist. - * - *

    If this method or {@link #usingTtl(BindMarker) } is called multiple times, the value from - * the last invocation is used. - * - * @param ttlInSeconds Time, in seconds, the inserted data should live before expiring. - */ - @NonNull - Insert usingTtl(int ttlInSeconds); - - /** - * Adds a {@code USING TTL} clause to this statement with a bind marker. Setting a value of {@code - * null} will remove the {@code USING TTL} clause on this statement. Binding a value of {@code 0} - * will insert the data with no TTL when the statement is executed, overriding any Table TTL that - * might exist. - * - *

    If this method or {@link #usingTtl(int) } is called multiple times, the value from the last - * invocation is used. - * - * @param bindMarker A bind marker that is understood to be a value in seconds. - */ - @NonNull - Insert usingTtl(@Nullable BindMarker bindMarker); -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/insert/InsertInto.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/insert/InsertInto.java deleted file mode 100644 index 26bee52c377..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/insert/InsertInto.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.insert; - -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.type.codec.CodecNotFoundException; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.api.querybuilder.BindMarker; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * The beginning of an INSERT statement; at this point only the table is known, it might become a - * JSON insert or a regular one, depending on which method is called next. - */ -public interface InsertInto extends OngoingValues { - - /** Makes this statement an INSERT JSON with the provided JSON string. */ - @NonNull - JsonInsert json(@NonNull String json); - - /** Makes this statement an INSERT JSON with a bind marker, as in {@code INSERT JSON ?}. */ - @NonNull - JsonInsert json(@NonNull BindMarker bindMarker); - - /** - * Makes this statement an INSERT JSON with a custom type mapping. The provided {@code Object - * value} will be mapped to a JSON string. - * - *

    This is an alternative to {@link #json(String)} for custom type mappings. The provided - * registry should contain a codec that can format the value. Typically, this will be your - * session's registry, which is accessible via {@code session.getContext().getCodecRegistry()}. - * - * @throws CodecNotFoundException if {@code codecRegistry} does not contain any codec that can - * handle {@code value}. - * @see DriverContext#getCodecRegistry() - */ - @NonNull - default JsonInsert json(@NonNull Object value, @NonNull CodecRegistry codecRegistry) { - try { - return json(value, codecRegistry.codecFor(value)); - } catch (CodecNotFoundException e) { - throw new IllegalArgumentException( - String.format( - "Could not inline JSON literal of type %s. " - + "This happens because the provided CodecRegistry does not contain " - + "a codec for this type. Try registering your TypeCodec in the registry first, " - + "or use json(Object, TypeCodec).", - value.getClass().getName()), - e); - } - } - - /** - * Makes this statement an INSERT JSON with a custom type mapping. The provided {@code Object - * value} will be mapped to a JSON string. The value will be turned into a string with {@link - * TypeCodec#format(Object)}, and inlined in the query. - */ - @NonNull - JsonInsert json(@NonNull T value, @NonNull TypeCodec codec); -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/insert/JsonInsert.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/insert/JsonInsert.java deleted file mode 100644 index f287cd31501..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/insert/JsonInsert.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.insert; - -import edu.umd.cs.findbugs.annotations.NonNull; - -/** An INSERT JSON statement. */ -public interface JsonInsert extends Insert { - - /** - * Adds a DEFAULT NULL clause to this statement. - * - *

    If this or {@link #defaultUnset()} is called multiple times, the last value is used. - */ - @NonNull - JsonInsert defaultNull(); - - /** - * Adds a DEFAULT UNSET clause to this statement. - * - *

    If this or {@link #defaultNull()} is called multiple times, the last value is used. - */ - @NonNull - JsonInsert defaultUnset(); -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/insert/OngoingValues.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/insert/OngoingValues.java deleted file mode 100644 index a3e6572608a..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/insert/OngoingValues.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.insert; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.term.Term; -import com.datastax.oss.driver.internal.core.CqlIdentifiers; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; - -public interface OngoingValues { - - /** - * Sets a value for a column, as in {@code INSERT INTO ... (c) VALUES (?)}. - * - *

    If this is called twice for the same column, the previous entry is discarded and the new - * entry will be added at the end of the list. - */ - @NonNull - RegularInsert value(@NonNull CqlIdentifier columnId, @NonNull Term value); - - /** - * Shortcut for {@link #value(CqlIdentifier, Term) value(CqlIdentifier.fromCql(columnName), - * value)}. - */ - @NonNull - default RegularInsert value(@NonNull String columnName, @NonNull Term value) { - return value(CqlIdentifier.fromCql(columnName), value); - } - - /** - * Sets values for multiple columns in one call, as in {@code INSERT INTO ... (key1, key2) VALUES - * (value1, value2)}. - * - *

    If the map contains columns that had already been added to this statement, the previous - * entries are discarded and the new entries will be added at the end of the list. - * - *

    Implementation note: this is a default method only for backward compatibility. The default - * implementation calls {@link #value(CqlIdentifier, Term)} in a loop; it should be overridden if - * a more efficient alternative exists. - */ - @NonNull - default RegularInsert valuesByIds(@NonNull Map newAssignments) { - if (newAssignments.isEmpty()) { - throw new IllegalArgumentException("newAssignments can't be empty"); - } - RegularInsert result = null; - for (Map.Entry entry : newAssignments.entrySet()) { - result = (result == null ? this : result).value(entry.getKey(), entry.getValue()); - } - return result; - } - - /** Shortcut for {@link #valuesByIds(Map)} when the keys are plain strings. */ - @NonNull - default RegularInsert values(@NonNull Map newAssignments) { - return valuesByIds(CqlIdentifiers.wrapKeys(newAssignments)); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/insert/RegularInsert.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/insert/RegularInsert.java deleted file mode 100644 index f26a1c53e93..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/insert/RegularInsert.java +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.insert; - -/** A regular (not JSON) INSERT statement. */ -public interface RegularInsert extends OngoingValues, Insert {} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/ArithmeticRelationBuilder.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/ArithmeticRelationBuilder.java deleted file mode 100644 index 50e2a03f347..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/ArithmeticRelationBuilder.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.relation; - -import com.datastax.oss.driver.api.querybuilder.term.Term; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -public interface ArithmeticRelationBuilder { - - /** Builds an '=' relation with the given term. */ - @NonNull - default ResultT isEqualTo(@NonNull Term rightOperand) { - return build("=", rightOperand); - } - - /** Builds a '<' relation with the given term. */ - @NonNull - default ResultT isLessThan(@NonNull Term rightOperand) { - return build("<", rightOperand); - } - - /** Builds a '<=' relation with the given term. */ - @NonNull - default ResultT isLessThanOrEqualTo(@NonNull Term rightOperand) { - return build("<=", rightOperand); - } - - /** Builds a '>' relation with the given term. */ - @NonNull - default ResultT isGreaterThan(@NonNull Term rightOperand) { - return build(">", rightOperand); - } - - /** Builds a '>=' relation with the given term. */ - @NonNull - default ResultT isGreaterThanOrEqualTo(@NonNull Term rightOperand) { - return build(">=", rightOperand); - } - - /** Builds a '!=' relation with the given term. */ - @NonNull - default ResultT isNotEqualTo(@NonNull Term rightOperand) { - return build("!=", rightOperand); - } - - @NonNull - ResultT build(@NonNull String operator, @Nullable Term rightOperand); -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/ColumnComponentRelationBuilder.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/ColumnComponentRelationBuilder.java deleted file mode 100644 index 5b79dfb74d0..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/ColumnComponentRelationBuilder.java +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.relation; - -public interface ColumnComponentRelationBuilder - extends ArithmeticRelationBuilder {} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/ColumnRelationBuilder.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/ColumnRelationBuilder.java deleted file mode 100644 index 247d61eaed5..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/ColumnRelationBuilder.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.relation; - -import com.datastax.oss.driver.api.querybuilder.term.Term; -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface ColumnRelationBuilder - extends ArithmeticRelationBuilder, InRelationBuilder { - - /** Builds a LIKE relation for the column. */ - @NonNull - default ResultT like(@NonNull Term term) { - return build(" LIKE ", term); - } - - /** Builds an IS NOT NULL relation for the column. */ - @NonNull - default ResultT isNotNull() { - return build(" IS NOT NULL", null); - } - - /** Builds a CONTAINS relation for the column. */ - @NonNull - default ResultT contains(@NonNull Term term) { - return build(" CONTAINS ", term); - } - - /** Builds a CONTAINS KEY relation for the column. */ - @NonNull - default ResultT containsKey(@NonNull Term term) { - return build(" CONTAINS KEY ", term); - } - - /** - * Builds a NOT CONTAINS relation for the column. - * - *

    Note that NOT CONTAINS support is only available in Cassandra 5.1 or later. See CASSANDRA-18584 for more - * information. - */ - @NonNull - default ResultT notContains(@NonNull Term term) { - return build(" NOT CONTAINS ", term); - } - - /** - * Builds a NOT CONTAINS KEY relation for the column. - * - *

    Note that NOT CONTAINS KEY support is only available in Cassandra 5.1 or later. See CASSANDRA-18584 for more - * information. - */ - @NonNull - default ResultT notContainsKey(@NonNull Term term) { - return build(" NOT CONTAINS KEY ", term); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/InRelationBuilder.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/InRelationBuilder.java deleted file mode 100644 index afaa19ff724..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/InRelationBuilder.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.relation; - -import com.datastax.oss.driver.api.querybuilder.BindMarker; -import com.datastax.oss.driver.api.querybuilder.QueryBuilder; -import com.datastax.oss.driver.api.querybuilder.term.Term; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Arrays; - -public interface InRelationBuilder { - - /** - * Builds an IN relation where the whole set of possible values is a bound variable, as in {@code - * IN ?}. - */ - @NonNull - default ResultT in(@NonNull BindMarker bindMarker) { - return build(" IN ", bindMarker); - } - - /** - * Builds an IN relation where the arguments are the possible values, as in {@code IN (term1, - * term2...)}. - */ - @NonNull - default ResultT in(@NonNull Iterable alternatives) { - return build(" IN ", QueryBuilder.tuple(alternatives)); - } - - /** Var-arg equivalent of {@link #in(Iterable)} . */ - @NonNull - default ResultT in(@NonNull Term... alternatives) { - return in(Arrays.asList(alternatives)); - } - - /** - * Builds a NOT IN relation where the whole set of possible values is a bound variable, as in - * {@code NOT IN ?}. - * - *

    Note that NOT IN support is only available in Cassandra 5.1 or later. See CASSANDRA-18584 for more - * information. - */ - @NonNull - default ResultT notIn(@NonNull BindMarker bindMarker) { - return build(" NOT IN ", bindMarker); - } - - /** - * Builds an IN relation where the arguments are the possible values, as in {@code IN (term1, - * term2...)}. - * - *

    Note that NOT IN support is only available in Cassandra 5.1 or later. See CASSANDRA-18584 for more - * information. - */ - @NonNull - default ResultT notIn(@NonNull Iterable alternatives) { - return build(" NOT IN ", QueryBuilder.tuple(alternatives)); - } - - /** Var-arg equivalent of {@link #notIn(Iterable)} . */ - @NonNull - default ResultT notIn(@NonNull Term... alternatives) { - return notIn(Arrays.asList(alternatives)); - } - - @NonNull - ResultT build(@NonNull String operator, @Nullable Term rightOperand); -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/MultiColumnRelationBuilder.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/MultiColumnRelationBuilder.java deleted file mode 100644 index 26bc927953b..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/MultiColumnRelationBuilder.java +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.relation; - -public interface MultiColumnRelationBuilder - extends ArithmeticRelationBuilder, InRelationBuilder {} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/OngoingWhereClause.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/OngoingWhereClause.java deleted file mode 100644 index 16b8072fdff..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/OngoingWhereClause.java +++ /dev/null @@ -1,258 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.relation; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.QueryBuilder; -import com.datastax.oss.driver.api.querybuilder.term.Term; -import com.datastax.oss.driver.internal.core.CqlIdentifiers; -import com.datastax.oss.driver.internal.querybuilder.DefaultRaw; -import com.datastax.oss.driver.internal.querybuilder.relation.CustomIndexRelation; -import com.datastax.oss.driver.internal.querybuilder.relation.DefaultColumnComponentRelationBuilder; -import com.datastax.oss.driver.internal.querybuilder.relation.DefaultColumnRelationBuilder; -import com.datastax.oss.driver.internal.querybuilder.relation.DefaultMultiColumnRelationBuilder; -import com.datastax.oss.driver.internal.querybuilder.relation.DefaultTokenRelationBuilder; -import edu.umd.cs.findbugs.annotations.CheckReturnValue; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Arrays; - -/** A statement that is ready to accept relations in its WHERE clause. */ -public interface OngoingWhereClause> { - - /** - * Adds a relation in the WHERE clause. All relations are logically joined with AND. - * - *

    To create the argument, use one of the factory methods in {@link Relation}, for example - * {@link Relation#column(CqlIdentifier) column}. - * - *

    If you add multiple selectors as once, consider {@link #where(Iterable)} as a more efficient - * alternative. - */ - @NonNull - @CheckReturnValue - SelfT where(@NonNull Relation relation); - - /** - * Adds multiple relations at once. All relations are logically joined with AND. - * - *

    This is slightly more efficient than adding the relations one by one (since the underlying - * implementation of this object is immutable). - * - *

    To create the arguments, use one of the factory methods in {@link Relation}, for example - * {@link Relation#column(CqlIdentifier) column}. - * - * @see #where(Relation) - */ - @NonNull - @CheckReturnValue - SelfT where(@NonNull Iterable additionalRelations); - - /** Var-arg equivalent of {@link #where(Iterable)}. */ - @NonNull - @CheckReturnValue - default SelfT where(@NonNull Relation... additionalRelations) { - return where(Arrays.asList(additionalRelations)); - } - - /** - * Adds a relation testing a column. - * - *

    This must be chained with an operator call, for example: - * - *

    {@code
    -   * selectFrom("foo").all().whereColumn("k").isEqualTo(bindMarker());
    -   * }
    - * - * This is the equivalent of creating a relation with {@link Relation#column(CqlIdentifier)} and - * passing it to {@link #where(Relation)}. - */ - @NonNull - default ColumnRelationBuilder whereColumn(@NonNull CqlIdentifier id) { - return new DefaultColumnRelationBuilder.Fluent<>(this, id); - } - - /** - * Shortcut for {@link #whereColumn(CqlIdentifier) whereColumn(CqlIdentifier.fromCql(name))}. - * - *

    This is the equivalent of creating a relation with {@link Relation#column(String)} and - * passing it to {@link #where(Relation)}. - */ - @NonNull - default ColumnRelationBuilder whereColumn(@NonNull String name) { - return whereColumn(CqlIdentifier.fromCql(name)); - } - - /** - * Adds a relation testing a value in a map (Cassandra 4 and above). - * - *

    This is the equivalent of creating a relation with {@link Relation#mapValue(CqlIdentifier, - * Term)} and passing it to {@link #where(Relation)}. - */ - @NonNull - default ColumnComponentRelationBuilder whereMapValue( - @NonNull CqlIdentifier columnId, @NonNull Term index) { - return new DefaultColumnComponentRelationBuilder.Fluent<>(this, columnId, index); - } - - /** - * Shortcut for {@link #whereMapValue(CqlIdentifier, Term) - * whereMapValue(CqlIdentifier.fromCql(columnName), index)}. - * - *

    This is the equivalent of creating a relation with {@link Relation#mapValue(String, Term)} - * and passing it to {@link #where(Relation)}. - */ - @NonNull - default ColumnComponentRelationBuilder whereMapValue( - @NonNull String columnName, @NonNull Term index) { - return whereMapValue(CqlIdentifier.fromCql(columnName), index); - } - - /** - * Adds a relation testing a token generated from a set of columns. - * - *

    This is the equivalent of creating a relation with {@link Relation#tokenFromIds(Iterable)} - * and passing it to {@link #where(Relation)}. - */ - @NonNull - default TokenRelationBuilder whereTokenFromIds( - @NonNull Iterable identifiers) { - return new DefaultTokenRelationBuilder.Fluent<>(this, identifiers); - } - - /** - * Var-arg equivalent of {@link #whereTokenFromIds(Iterable)}. - * - *

    This is the equivalent of creating a relation with {@link Relation#token(CqlIdentifier...)} - * and passing it to {@link #where(Relation)}. - */ - @NonNull - default TokenRelationBuilder whereToken(@NonNull CqlIdentifier... identifiers) { - return whereTokenFromIds(Arrays.asList(identifiers)); - } - - /** - * Equivalent of {@link #whereTokenFromIds(Iterable)} with raw strings; the names are converted - * with {@link CqlIdentifier#fromCql(String)}. - * - *

    This is the equivalent of creating a relation with {@link Relation#token(Iterable)} and - * passing it to {@link #where(Relation)}. - */ - @NonNull - default TokenRelationBuilder whereToken(@NonNull Iterable names) { - return whereTokenFromIds(CqlIdentifiers.wrap(names)); - } - - /** - * Var-arg equivalent of {@link #whereToken(Iterable)}. - * - *

    This is the equivalent of creating a relation with {@link Relation#token(String...)} and - * passing it to {@link #where(Relation)}. - */ - @NonNull - default TokenRelationBuilder whereToken(@NonNull String... names) { - return whereToken(Arrays.asList(names)); - } - - /** - * Adds a multi-column relation, as in {@code WHERE (c1, c2, c3) IN ...}. - * - *

    This is the equivalent of creating a relation with {@link Relation#columnIds(Iterable)} and - * passing it to {@link #where(Relation)}. - */ - @NonNull - default MultiColumnRelationBuilder whereColumnIds( - @NonNull Iterable identifiers) { - return new DefaultMultiColumnRelationBuilder.Fluent<>(this, identifiers); - } - - /** - * Var-arg equivalent of {@link #whereColumnIds(Iterable)}. - * - *

    This is the equivalent of creating a relation with {@link - * Relation#columns(CqlIdentifier...)} and passing it to {@link #where(Relation)}. - */ - @NonNull - default MultiColumnRelationBuilder whereColumns(@NonNull CqlIdentifier... identifiers) { - return whereColumnIds(Arrays.asList(identifiers)); - } - - /** - * Equivalent of {@link #whereColumnIds(Iterable)} with raw strings; the names are converted with - * {@link CqlIdentifier#fromCql(String)}. - * - *

    This is the equivalent of creating a relation with {@link Relation#columns(Iterable)} and - * passing it to {@link #where(Relation)}. - */ - @NonNull - default MultiColumnRelationBuilder whereColumns(@NonNull Iterable names) { - return whereColumnIds(CqlIdentifiers.wrap(names)); - } - - /** - * Var-arg equivalent of {@link #whereColumns(Iterable)}. - * - *

    This is the equivalent of creating a relation with {@link Relation#columns(String...)} and - * passing it to {@link #where(Relation)}. - */ - @NonNull - default MultiColumnRelationBuilder whereColumns(@NonNull String... names) { - return whereColumns(Arrays.asList(names)); - } - - /** - * Adds a relation on a custom index. - * - *

    This is the equivalent of creating a relation with {@link - * Relation#customIndex(CqlIdentifier, Term)} and passing it to {@link #where(Relation)}. - */ - @NonNull - @CheckReturnValue - default SelfT whereCustomIndex(@NonNull CqlIdentifier indexId, @NonNull Term expression) { - return where(new CustomIndexRelation(indexId, expression)); - } - - /** - * Shortcut for {@link #whereCustomIndex(CqlIdentifier, Term) - * whereCustomIndex(CqlIdentifier.fromCql(indexName), expression)}. - * - *

    This is the equivalent of creating a relation with {@link Relation#customIndex(String, - * Term)} and passing it to {@link #where(Relation)}. - */ - @NonNull - @CheckReturnValue - default SelfT whereCustomIndex(@NonNull String indexName, @NonNull Term expression) { - return whereCustomIndex(CqlIdentifier.fromCql(indexName), expression); - } - - /** - * Adds a raw CQL snippet as a relation. - * - *

    This is the equivalent of creating a relation with {@link QueryBuilder#raw(String)} and - * passing it to {@link #where(Relation)}. - * - *

    The contents will be appended to the query as-is, without any syntax checking or escaping. - * This method should be used with caution, as it's possible to generate invalid CQL that will - * fail at execution time; on the other hand, it can be used as a workaround to handle new CQL - * features that are not yet covered by the query builder. - */ - @NonNull - @CheckReturnValue - default SelfT whereRaw(@NonNull String raw) { - return where(new DefaultRaw(raw)); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/Relation.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/Relation.java deleted file mode 100644 index 41020332643..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/Relation.java +++ /dev/null @@ -1,179 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.relation; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.querybuilder.BuildableQuery; -import com.datastax.oss.driver.api.querybuilder.CqlSnippet; -import com.datastax.oss.driver.api.querybuilder.term.Term; -import com.datastax.oss.driver.internal.core.CqlIdentifiers; -import com.datastax.oss.driver.internal.querybuilder.relation.CustomIndexRelation; -import com.datastax.oss.driver.internal.querybuilder.relation.DefaultColumnComponentRelationBuilder; -import com.datastax.oss.driver.internal.querybuilder.relation.DefaultColumnRelationBuilder; -import com.datastax.oss.driver.internal.querybuilder.relation.DefaultMultiColumnRelationBuilder; -import com.datastax.oss.driver.internal.querybuilder.relation.DefaultTokenRelationBuilder; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Arrays; - -/** - * A relation in a WHERE clause. - * - *

    To build instances of this type, use the factory methods, such as {@link #column(String) - * column}, {@link #token(String...) token}, etc. - * - *

    They are used as arguments to the {@link OngoingWhereClause#where(Iterable) where} method, for - * example: - * - *

    {@code
    - * selectFrom("foo").all().where(Relation.column("k").isEqualTo(literal(1)))
    - * // SELECT * FROM foo WHERE k=1
    - * }
    - * - * There are also shortcuts in the fluent API when you build a statement, for example: - * - *
    {@code
    - * selectFrom("foo").all().whereColumn("k").isEqualTo(literal(1))
    - * // SELECT * FROM foo WHERE k=1
    - * }
    - */ -public interface Relation extends CqlSnippet { - - /** - * Builds a relation testing a column. - * - *

    This must be chained with an operator call, for example: - * - *

    {@code
    -   * Relation r = Relation.column("k").isEqualTo(bindMarker());
    -   * }
    - */ - @NonNull - static ColumnRelationBuilder column(@NonNull CqlIdentifier id) { - return new DefaultColumnRelationBuilder(id); - } - - /** Shortcut for {@link #column(CqlIdentifier) column(CqlIdentifier.fromCql(name))} */ - @NonNull - static ColumnRelationBuilder column(@NonNull String name) { - return column(CqlIdentifier.fromCql(name)); - } - - /** Builds a relation testing a value in a map (Cassandra 4 and above). */ - @NonNull - static ColumnComponentRelationBuilder mapValue( - @NonNull CqlIdentifier columnId, @NonNull Term index) { - // The concept could easily be extended to list elements and tuple components, so use a generic - // name internally, we'll add other shortcuts if necessary. - return new DefaultColumnComponentRelationBuilder(columnId, index); - } - - /** - * Shortcut for {@link #mapValue(CqlIdentifier, Term) mapValue(CqlIdentifier.fromCql(columnName), - * index)} - */ - @NonNull - static ColumnComponentRelationBuilder mapValue( - @NonNull String columnName, @NonNull Term index) { - return mapValue(CqlIdentifier.fromCql(columnName), index); - } - - /** Builds a relation testing a token generated from a set of columns. */ - @NonNull - static TokenRelationBuilder tokenFromIds(@NonNull Iterable identifiers) { - return new DefaultTokenRelationBuilder(identifiers); - } - - /** Var-arg equivalent of {@link #tokenFromIds(Iterable)}. */ - @NonNull - static TokenRelationBuilder token(@NonNull CqlIdentifier... identifiers) { - return tokenFromIds(Arrays.asList(identifiers)); - } - - /** - * Equivalent of {@link #tokenFromIds(Iterable)} with raw strings; the names are converted with - * {@link CqlIdentifier#fromCql(String)}. - */ - @NonNull - static TokenRelationBuilder token(@NonNull Iterable names) { - return tokenFromIds(CqlIdentifiers.wrap(names)); - } - - /** Var-arg equivalent of {@link #token(Iterable)}. */ - @NonNull - static TokenRelationBuilder token(@NonNull String... names) { - return token(Arrays.asList(names)); - } - - /** Builds a multi-column relation, as in {@code WHERE (c1, c2, c3) IN ...}. */ - @NonNull - static MultiColumnRelationBuilder columnIds( - @NonNull Iterable identifiers) { - return new DefaultMultiColumnRelationBuilder(identifiers); - } - - /** Var-arg equivalent of {@link #columnIds(Iterable)}. */ - @NonNull - static MultiColumnRelationBuilder columns(@NonNull CqlIdentifier... identifiers) { - return columnIds(Arrays.asList(identifiers)); - } - - /** - * Equivalent of {@link #columnIds(Iterable)} with raw strings; the names are converted with - * {@link CqlIdentifier#fromCql(String)}. - */ - @NonNull - static MultiColumnRelationBuilder columns(@NonNull Iterable names) { - return columnIds(CqlIdentifiers.wrap(names)); - } - - /** Var-arg equivalent of {@link #columns(Iterable)}. */ - @NonNull - static MultiColumnRelationBuilder columns(@NonNull String... names) { - return columns(Arrays.asList(names)); - } - - /** Builds a relation on a custom index. */ - @NonNull - static Relation customIndex(@NonNull CqlIdentifier indexId, @NonNull Term expression) { - return new CustomIndexRelation(indexId, expression); - } - - /** - * Shortcut for {@link #customIndex(CqlIdentifier, Term) - * customIndex(CqlIdentifier.fromCql(indexName), expression)} - */ - @NonNull - static Relation customIndex(@NonNull String indexName, @NonNull Term expression) { - return customIndex(CqlIdentifier.fromCql(indexName), expression); - } - - /** - * Whether this relation is idempotent. - * - *

    That is, whether it always selects the same rows when used multiple times. For example, - * {@code WHERE c=1} is idempotent, {@code WHERE c=now()} isn't. - * - *

    This is used internally by the query builder to compute the {@link Statement#isIdempotent()} - * flag on the UPDATE and DELETE statements generated by {@link BuildableQuery#build()} (this is - * not relevant for SELECT statement, which are always idempotent). If a term is ambiguous (for - * example a raw snippet or a call to a user function in the right operands), the builder is - * pessimistic and assumes the term is not idempotent. - */ - boolean isIdempotent(); -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/TokenRelationBuilder.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/TokenRelationBuilder.java deleted file mode 100644 index 05fe10527ee..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/TokenRelationBuilder.java +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.relation; - -public interface TokenRelationBuilder extends ArithmeticRelationBuilder {} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterKeyspace.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterKeyspace.java deleted file mode 100644 index 82eccdd94ca..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterKeyspace.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.querybuilder.BuildableQuery; - -public interface AlterKeyspace - extends BuildableQuery, - KeyspaceOptions, - KeyspaceReplicationOptions {} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterKeyspaceStart.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterKeyspaceStart.java deleted file mode 100644 index 3112aaf5950..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterKeyspaceStart.java +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -public interface AlterKeyspaceStart - extends KeyspaceOptions, KeyspaceReplicationOptions {} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterMaterializedView.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterMaterializedView.java deleted file mode 100644 index 56faffc70a9..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterMaterializedView.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.querybuilder.BuildableQuery; - -public interface AlterMaterializedView - extends RelationOptions, BuildableQuery {} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterMaterializedViewStart.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterMaterializedViewStart.java deleted file mode 100644 index 5739f9ff9bf..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterMaterializedViewStart.java +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -public interface AlterMaterializedViewStart extends RelationOptions {} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableAddColumn.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableAddColumn.java deleted file mode 100644 index 701cec1509b..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableAddColumn.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.querybuilder.SchemaBuilder; -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface AlterTableAddColumn { - /** - * Adds a column definition in the ALTER TABLE statement. - * - *

    To create the data type, use the constants and static methods in {@link DataTypes}, or - * {@link SchemaBuilder#udt(CqlIdentifier, boolean)}. - */ - @NonNull - AlterTableAddColumnEnd addColumn(@NonNull CqlIdentifier columnName, @NonNull DataType dataType); - - /** - * Shortcut for {@link #addColumn(CqlIdentifier, DataType) - * addColumn(CqlIdentifier.asCql(columnName), dataType)}. - */ - @NonNull - default AlterTableAddColumnEnd addColumn(@NonNull String columnName, @NonNull DataType dataType) { - return addColumn(CqlIdentifier.fromCql(columnName), dataType); - } - - /** - * Adds a static column definition in the ALTER TABLE statement. - * - *

    To create the data type, use the constants and static methods in {@link DataTypes}, or - * {@link SchemaBuilder#udt(CqlIdentifier, boolean)}. - */ - @NonNull - AlterTableAddColumnEnd addStaticColumn( - @NonNull CqlIdentifier columnName, @NonNull DataType dataType); - - /** - * Shortcut for {@link #addStaticColumn(CqlIdentifier, DataType) - * addStaticColumn(CqlIdentifier.asCql(columnName), dataType)}. - */ - @NonNull - default AlterTableAddColumnEnd addStaticColumn( - @NonNull String columnName, @NonNull DataType dataType) { - return addStaticColumn(CqlIdentifier.fromCql(columnName), dataType); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableAddColumnEnd.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableAddColumnEnd.java deleted file mode 100644 index d2728082ad3..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableAddColumnEnd.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.querybuilder.BuildableQuery; - -public interface AlterTableAddColumnEnd extends AlterTableAddColumn, BuildableQuery {} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableDropColumn.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableDropColumn.java deleted file mode 100644 index 39d6abd558f..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableDropColumn.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface AlterTableDropColumn { - /** - * Adds column(s) to drop to ALTER TABLE specification. This may be repeated with successive calls - * to drop columns. - */ - @NonNull - AlterTableDropColumnEnd dropColumns(@NonNull CqlIdentifier... columnNames); - - /** Shortcut for {@link #dropColumns(CqlIdentifier...)}. */ - @NonNull - default AlterTableDropColumnEnd dropColumns(@NonNull String... columnNames) { - CqlIdentifier ids[] = new CqlIdentifier[columnNames.length]; - for (int i = 0; i < columnNames.length; i++) { - ids[i] = CqlIdentifier.fromCql(columnNames[i]); - } - return dropColumns(ids); - } - - /** - * Adds a column to drop to ALTER TABLE specification. This may be repeated with successive calls - * to drop columns. Shortcut for {@link #dropColumns(CqlIdentifier...) #dropColumns(columnName)}. - */ - @NonNull - default AlterTableDropColumnEnd dropColumn(@NonNull CqlIdentifier columnName) { - return dropColumns(columnName); - } - - /** - * Shortcut for {@link #dropColumn(CqlIdentifier) dropColumn(CqlIdentifier.fromCql(columnName))}. - */ - @NonNull - default AlterTableDropColumnEnd dropColumn(@NonNull String columnName) { - return dropColumns(CqlIdentifier.fromCql(columnName)); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableDropColumnEnd.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableDropColumnEnd.java deleted file mode 100644 index 662f9eb0749..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableDropColumnEnd.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.querybuilder.BuildableQuery; - -public interface AlterTableDropColumnEnd extends AlterTableDropColumn, BuildableQuery {} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableRenameColumn.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableRenameColumn.java deleted file mode 100644 index 8938b11ca9a..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableRenameColumn.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface AlterTableRenameColumn { - - /** - * Adds a column rename to ALTER TABLE specification. This may be repeated with successive calls - * to rename columns. - */ - @NonNull - AlterTableRenameColumnEnd renameColumn(@NonNull CqlIdentifier from, @NonNull CqlIdentifier to); - - /** - * Shortcut for {@link #renameColumn(CqlIdentifier,CqlIdentifier) - * renameField(CqlIdentifier.fromCql(from),CqlIdentifier.fromCql(to))}. - */ - @NonNull - default AlterTableRenameColumnEnd renameColumn(@NonNull String from, @NonNull String to) { - return renameColumn(CqlIdentifier.fromCql(from), CqlIdentifier.fromCql(to)); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableRenameColumnEnd.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableRenameColumnEnd.java deleted file mode 100644 index cb7b5e8699f..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableRenameColumnEnd.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.querybuilder.BuildableQuery; - -public interface AlterTableRenameColumnEnd extends AlterTableRenameColumn, BuildableQuery {} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableStart.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableStart.java deleted file mode 100644 index f1537073b19..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableStart.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.querybuilder.BuildableQuery; -import com.datastax.oss.driver.api.querybuilder.SchemaBuilder; -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface AlterTableStart - extends AlterTableWithOptions, - AlterTableAddColumn, - AlterTableDropColumn, - AlterTableRenameColumn { - - /** Completes ALTER TABLE specifying that compact storage should be removed from the table. */ - @NonNull - BuildableQuery dropCompactStorage(); - - /** - * Completes ALTER TABLE specifying the the type of a column should be changed. - * - *

    To create the data type, use the constants and static methods in {@link DataTypes}, or - * {@link SchemaBuilder#udt(CqlIdentifier, boolean)}. - */ - @NonNull - BuildableQuery alterColumn(@NonNull CqlIdentifier columnName, @NonNull DataType dataType); - - /** - * Shortcut for {@link #alterColumn(CqlIdentifier,DataType) - * alterColumn(CqlIdentifier.fromCql(columnName,dataType)}. - */ - @NonNull - default BuildableQuery alterColumn(@NonNull String columnName, @NonNull DataType dataType) { - return alterColumn(CqlIdentifier.fromCql(columnName), dataType); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableWithOptions.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableWithOptions.java deleted file mode 100644 index c80281a4582..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableWithOptions.java +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -public interface AlterTableWithOptions extends RelationOptions {} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableWithOptionsEnd.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableWithOptionsEnd.java deleted file mode 100644 index df1cb4293d6..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableWithOptionsEnd.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.querybuilder.BuildableQuery; - -public interface AlterTableWithOptionsEnd - extends RelationOptions, BuildableQuery {} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTypeRenameField.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTypeRenameField.java deleted file mode 100644 index 82d9667c9cc..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTypeRenameField.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface AlterTypeRenameField { - - /** - * Adds a field rename to ALTER TYPE specification. This may be repeated with successive calls to - * rename fields. - */ - @NonNull - AlterTypeRenameFieldEnd renameField(@NonNull CqlIdentifier from, @NonNull CqlIdentifier to); - - /** - * Shortcut for {@link #renameField(CqlIdentifier,CqlIdentifier) - * renameField(CqlIdentifier.fromCql(from),CqlIdentifier.fromCql(to))}. - */ - @NonNull - default AlterTypeRenameFieldEnd renameField(@NonNull String from, @NonNull String to) { - return renameField(CqlIdentifier.fromCql(from), CqlIdentifier.fromCql(to)); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTypeRenameFieldEnd.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTypeRenameFieldEnd.java deleted file mode 100644 index 18da1aa4c1e..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTypeRenameFieldEnd.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.querybuilder.BuildableQuery; - -public interface AlterTypeRenameFieldEnd extends AlterTypeRenameField, BuildableQuery {} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTypeStart.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTypeStart.java deleted file mode 100644 index 6ea197a235f..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTypeStart.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.querybuilder.BuildableQuery; -import com.datastax.oss.driver.api.querybuilder.SchemaBuilder; -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface AlterTypeStart extends AlterTypeRenameField { - - /** - * Completes ALTER TYPE specifying the the type of a field should be changed. - * - *

    To create the data type, use the constants and static methods in {@link DataTypes}, or - * {@link SchemaBuilder#udt(CqlIdentifier, boolean)}. - */ - @NonNull - BuildableQuery alterField(@NonNull CqlIdentifier fieldName, @NonNull DataType dataType); - - /** - * Shortcut for {@link #alterField(CqlIdentifier,DataType) - * alterField(CqlIdentifier.fromCql(columnName,dataType)}. - */ - @NonNull - default BuildableQuery alterField(@NonNull String fieldName, @NonNull DataType dataType) { - return alterField(CqlIdentifier.fromCql(fieldName), dataType); - } - - /** - * Completes ALTER TYPE by adding a field definition in the ALTER TYPE statement. - * - *

    To create the data type, use the constants and static methods in {@link DataTypes}, or - * {@link SchemaBuilder#udt(CqlIdentifier, boolean)}. - */ - @NonNull - BuildableQuery addField(@NonNull CqlIdentifier fieldName, @NonNull DataType dataType); - - /** - * Shortcut for {@link #addField(CqlIdentifier, DataType) addField(CqlIdentifier.asCql(fieldName), - * dataType)}. - */ - @NonNull - default BuildableQuery addField(@NonNull String fieldName, @NonNull DataType dataType) { - return addField(CqlIdentifier.fromCql(fieldName), dataType); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateAggregateEnd.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateAggregateEnd.java deleted file mode 100644 index 75d7bf1e681..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateAggregateEnd.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.BuildableQuery; -import com.datastax.oss.driver.api.querybuilder.term.Term; -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface CreateAggregateEnd extends BuildableQuery { - - /** - * Adds INITCOND to the aggregate query. Defines the initial condition, values, of the first - * parameter in the SFUNC. - */ - @NonNull - CreateAggregateEnd withInitCond(@NonNull Term term); - - /** - * Adds FINALFUNC to the create aggregate query. This is used to specify what type is returned - * from the state function. - */ - @NonNull - CreateAggregateEnd withFinalFunc(@NonNull CqlIdentifier finalFunc); - - /** - * Shortcut for {@link #withFinalFunc(CqlIdentifier) - * withFinalFunc(CqlIdentifier.fromCql(finalFuncName))}. - */ - @NonNull - default CreateAggregateEnd withFinalFunc(@NonNull String finalFuncName) { - return withFinalFunc(CqlIdentifier.fromCql(finalFuncName)); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateAggregateStart.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateAggregateStart.java deleted file mode 100644 index 4b46ed18f97..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateAggregateStart.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.querybuilder.SchemaBuilder; -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface CreateAggregateStart { - /** - * Adds IF NOT EXISTS to the create aggregate specification. This indicates that the aggregate - * should not be created if it already exists. - */ - @NonNull - CreateAggregateStart ifNotExists(); - - /** - * Adds OR REPLACE to the create aggregate specification. This indicates that the aggregate should - * replace an existing aggregate with the same name if it exists. - */ - @NonNull - CreateAggregateStart orReplace(); - - /** - * Adds a parameter definition in the CREATE AGGREGATE statement. - * - *

    Parameter keys are added in the order of their declaration. - * - *

    To create the data type, use the constants and static methods in {@link DataTypes}, or - * {@link SchemaBuilder#udt(CqlIdentifier, boolean)}. - */ - @NonNull - CreateAggregateStart withParameter(@NonNull DataType paramType); - - /** Adds SFUNC to the create aggregate specification. This is the state function for each row. */ - @NonNull - CreateAggregateStateFunc withSFunc(@NonNull CqlIdentifier sfuncName); - - /** Shortcut for {@link #withSFunc(CqlIdentifier) withSFunc(CqlIdentifier.fromCql(sfuncName))}. */ - @NonNull - default CreateAggregateStateFunc withSFunc(@NonNull String sfuncName) { - return withSFunc(CqlIdentifier.fromCql(sfuncName)); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateAggregateStateFunc.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateAggregateStateFunc.java deleted file mode 100644 index 42b774ec8fc..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateAggregateStateFunc.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.querybuilder.SchemaBuilder; -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface CreateAggregateStateFunc { - - /** - * Adds STYPE to the create aggregate query. This is used to specify what type is returned from - * the state function. - * - *

    To create the data type, use the constants and static methods in {@link DataTypes}, or - * {@link SchemaBuilder#udt(CqlIdentifier, boolean)}. - */ - @NonNull - CreateAggregateEnd withSType(@NonNull DataType dataType); -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateFunctionEnd.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateFunctionEnd.java deleted file mode 100644 index de600a384b2..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateFunctionEnd.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.querybuilder.BuildableQuery; - -public interface CreateFunctionEnd extends BuildableQuery {} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateFunctionStart.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateFunctionStart.java deleted file mode 100644 index 17ae78d4b24..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateFunctionStart.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.querybuilder.SchemaBuilder; -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface CreateFunctionStart { - - /** - * Adds IF NOT EXISTS to the create function specification. This indicates that the function - * should not be created if it already exists. - */ - @NonNull - CreateFunctionStart ifNotExists(); - - /** - * Adds OR REPLACE to the create function specification. This indicates that the function should - * replace an existing function with the same name if it exists. - */ - @NonNull - CreateFunctionStart orReplace(); - - /** - * Adds a parameter definition in the CREATE FUNCTION statement. - * - *

    Parameter keys are added in the order of their declaration. - * - *

    To create the data type, use the constants and static methods in {@link DataTypes}, or - * {@link SchemaBuilder#udt(CqlIdentifier, boolean)}. - */ - @NonNull - CreateFunctionStart withParameter(@NonNull CqlIdentifier paramName, @NonNull DataType paramType); - - /** - * Shortcut for {@link #withParameter(CqlIdentifier, DataType) - * withParameter(CqlIdentifier.asCql(paramName), dataType)}. - */ - @NonNull - default CreateFunctionStart withParameter( - @NonNull String paramName, @NonNull DataType paramType) { - return withParameter(CqlIdentifier.fromCql(paramName), paramType); - } - - /** - * Adds RETURNS NULL ON NULL to the create function specification. This indicates that the body of - * the function should be skipped when null input is provided. - */ - @NonNull - CreateFunctionWithNullOption returnsNullOnNull(); - - /** - * Adds CALLED ON NULL to the create function specification. This indicates that the body of the - * function not be skipped when null input is provided. - */ - @NonNull - CreateFunctionWithNullOption calledOnNull(); -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateFunctionWithLanguage.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateFunctionWithLanguage.java deleted file mode 100644 index bc55fd5124a..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateFunctionWithLanguage.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface CreateFunctionWithLanguage { - - /** - * Adds AS to the create function specification. This is used to specify the body of the function. - * Note that it is expected that the provided body is properly quoted as this method does not make - * that decision for the user. For simple cases, one should wrap the input in single quotes, i.e. - * 'myBody'. If the body itself contains single quotes, one could use a - * postgres-style string literal, which is surrounded in two dollar signs, i.e. $$ myBody $$ - * . - */ - @NonNull - CreateFunctionEnd as(@NonNull String functionBody); - - /** - * Adds AS to the create function specification and quotes the function body. Assumes that if the - * input body contains at least one single quote, to quote the body with two dollar signs, i.e. - * $$ myBody $$, otherwise the body is quoted with single quotes, i.e. - * ' myBody '. If the function body is already quoted {@link #as(String)} should be used - * instead. - */ - @NonNull - default CreateFunctionEnd asQuoted(@NonNull String functionBody) { - if (functionBody.contains("'")) { - return as("$$ " + functionBody + " $$"); - } else { - return as('\'' + functionBody + '\''); - } - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateFunctionWithNullOption.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateFunctionWithNullOption.java deleted file mode 100644 index 037dd1cd522..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateFunctionWithNullOption.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.querybuilder.SchemaBuilder; -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface CreateFunctionWithNullOption { - /** - * Adds RETURNS to the create function specification. This is used to specify what type is - * returned from the function. - * - *

    To create the data type, use the constants and static methods in {@link DataTypes}, or - * {@link SchemaBuilder#udt(CqlIdentifier, boolean)}. - */ - @NonNull - CreateFunctionWithType returnsType(@NonNull DataType dataType); -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateFunctionWithType.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateFunctionWithType.java deleted file mode 100644 index b78780bfa7c..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateFunctionWithType.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface CreateFunctionWithType { - /** - * Adds LANGUAGE to the create function specification. This is used to specify what language is - * used in the function body. - */ - @NonNull - CreateFunctionWithLanguage withLanguage(@NonNull String language); - - /** - * Adds "LANGUAGE java" to create function specification. Shortcut for {@link - * #withLanguage(String) withLanguage("java")}. - */ - @NonNull - default CreateFunctionWithLanguage withJavaLanguage() { - return withLanguage("java"); - } - - /** - * Adds "LANGUAGE javascript" to create function specification. Shortcut for {@link - * #withLanguage(String) withLanguage("javascript")}. - */ - @NonNull - default CreateFunctionWithLanguage withJavaScriptLanguage() { - return withLanguage("javascript"); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateIndex.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateIndex.java deleted file mode 100644 index ed54fd2312f..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateIndex.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.querybuilder.BuildableQuery; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; - -public interface CreateIndex extends OptionProvider, BuildableQuery { - - /** - * Convenience method for when {@link CreateIndexStart#usingSASI()} is used, provides SASI - * specific options that are provided under the index 'OPTIONS' property. Is equivalent to {@link - * #withOption(String, Object) withOption("OPTIONS", sasiOptions)}. - */ - @NonNull - default CreateIndex withSASIOptions(@NonNull Map sasiOptions) { - return withOption("OPTIONS", sasiOptions); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateIndexOnTable.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateIndexOnTable.java deleted file mode 100644 index 62c930d4180..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateIndexOnTable.java +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -public interface CreateIndexOnTable { - - /** Specifies the column to create the index on. */ - @NonNull - default CreateIndex andColumn(@NonNull CqlIdentifier columnName) { - return andColumn(columnName, null); - } - - /** Shortcut for {@link #andColumn(CqlIdentifier) andColumn(CqlIdentifier.fromCql(columnName)}. */ - @NonNull - default CreateIndex andColumn(@NonNull String columnName) { - return andColumn(CqlIdentifier.fromCql(columnName)); - } - - /** - * Specifies to create the index on the given columns' keys, this must be done against a map - * column. - */ - @NonNull - default CreateIndex andColumnKeys(@NonNull CqlIdentifier columnName) { - return andColumn(columnName, "KEYS"); - } - - /** - * Shortcut for {@link #andColumnKeys(CqlIdentifier) - * andColumnKeys(CqlIdentifier.fromCql(columnName)}. - */ - @NonNull - default CreateIndex andColumnKeys(@NonNull String columnName) { - return andColumnKeys(CqlIdentifier.fromCql(columnName)); - } - - /** - * Specifies to create the index on the given columns' values, this must be done against a - * map column. - */ - @NonNull - default CreateIndex andColumnValues(@NonNull CqlIdentifier columnName) { - return andColumn(columnName, "VALUES"); - } - - /** - * Shortcut for {@link #andColumnValues(CqlIdentifier) - * andColumnValues(CqlIdentifier.fromCql(columnName)}. - */ - @NonNull - default CreateIndex andColumnValues(@NonNull String columnName) { - return andColumnValues(CqlIdentifier.fromCql(columnName)); - } - - /** - * Specifies to create the index on the given columns' entries (key-value pairs), this must be - * done against a map column. - */ - @NonNull - default CreateIndex andColumnEntries(@NonNull CqlIdentifier columnName) { - return andColumn(columnName, "ENTRIES"); - } - - /** - * Shortcut for {@link #andColumnEntries(CqlIdentifier) - * andColumnEntries(CqlIdentifier.fromCql(columnName)}. - */ - @NonNull - default CreateIndex andColumnEntries(@NonNull String columnName) { - return andColumnEntries(CqlIdentifier.fromCql(columnName)); - } - - /** - * Specifies to create the index on the given columns' entire value, this must be done against a - * frozen collection column. - */ - @NonNull - default CreateIndex andColumnFull(@NonNull CqlIdentifier columnName) { - return andColumn(columnName, "FULL"); - } - - /** - * Shortcut for {@link #andColumnFull(CqlIdentifier) - * andColumnFull(CqlIdentifier.fromCql(columnName)}. - */ - @NonNull - default CreateIndex andColumnFull(@NonNull String columnName) { - return andColumnFull(CqlIdentifier.fromCql(columnName)); - } - - /** - * Specifies to create the index on a given column with the given index type. This method should - * not be used in the general case, unless there is an additional index type to use beyond KEYS, - * VALUES, ENTRIES, or FULL. - */ - @NonNull - CreateIndex andColumn(@NonNull CqlIdentifier columnName, @Nullable String indexType); - - /** - * Shortcut for {@link #andColumn(CqlIdentifier,String) - * andColumn(CqlIdentifier.fromCql(columnName),indexType}. - */ - @NonNull - default CreateIndex andColumn(@NonNull String columnName, @NonNull String indexType) { - return andColumn(CqlIdentifier.fromCql(columnName), indexType); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateIndexStart.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateIndexStart.java deleted file mode 100644 index 3786b8346b6..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateIndexStart.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Map; - -public interface CreateIndexStart { - - /** - * Adds IF NOT EXISTS to the create index specification. This indicates that the index should not - * be created if it already exists. - */ - @NonNull - CreateIndexStart ifNotExists(); - - /** - * Adds CUSTOM specification to the index for the given class name. The class name will added to - * the end of the CREATE INDEX specification with USING 'classname'. - */ - @NonNull - CreateIndexStart custom(@NonNull String className); - - /** - * Declares that the index is a "SSTable Attached Secondary Index" (SASI) type index. This is a - * custom index with the class org.apache.cassandra.index.SASIIndex. - * - * @see CreateIndex#withSASIOptions(Map) - */ - @NonNull - default CreateIndexStart usingSASI() { - return custom("org.apache.cassandra.index.sasi.SASIIndex"); - } - - /** Indicates which table this index is on. */ - @NonNull - CreateIndexOnTable onTable(@Nullable CqlIdentifier keyspace, @NonNull CqlIdentifier table); - - /** - * Indicates which table this index is on. This assumes the keyspace name is already qualified for - * the Session or Statement. - */ - @NonNull - default CreateIndexOnTable onTable(@NonNull CqlIdentifier table) { - return onTable(null, table); - } - - /** - * Shortcut for {@link #onTable(CqlIdentifier,CqlIdentifier) - * onTable(CqlIdentifier.fromCql(keyspace),CqlIdentifier.fromCql(table))}. - */ - @NonNull - default CreateIndexOnTable onTable(@Nullable String keyspace, @NonNull String table) { - return onTable( - keyspace == null ? null : CqlIdentifier.fromCql(keyspace), CqlIdentifier.fromCql(table)); - } - - /** Shortcut for {@link #onTable(CqlIdentifier) onTable(CqlIdentifier.fromCql(table))}. */ - @NonNull - default CreateIndexOnTable onTable(@NonNull String table) { - return onTable(CqlIdentifier.fromCql(table)); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateKeyspace.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateKeyspace.java deleted file mode 100644 index 098a1596db0..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateKeyspace.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.querybuilder.BuildableQuery; - -public interface CreateKeyspace extends BuildableQuery, KeyspaceOptions {} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateKeyspaceStart.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateKeyspaceStart.java deleted file mode 100644 index 57abf5f35bc..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateKeyspaceStart.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface CreateKeyspaceStart extends KeyspaceReplicationOptions { - /** - * Adds IF NOT EXISTS to the create keyspace specification. This indicates that the keyspace - * should not be created it already exists. - */ - @NonNull - CreateKeyspaceStart ifNotExists(); -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedView.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedView.java deleted file mode 100644 index e231356d4c2..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedView.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.querybuilder.BuildableQuery; - -public interface CreateMaterializedView - extends BuildableQuery, RelationStructure {} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewPrimaryKey.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewPrimaryKey.java deleted file mode 100644 index 14b8c7583f6..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewPrimaryKey.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.querybuilder.BuildableQuery; -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface CreateMaterializedViewPrimaryKey - extends CreateMaterializedViewPrimaryKeyStart, - RelationStructure, - BuildableQuery { - /** - * Adds a clustering column to primary key definition. - * - *

    Clustering columns are added in the order of their declaration. - */ - @NonNull - CreateMaterializedViewPrimaryKey withClusteringColumn(@NonNull CqlIdentifier columnName); - - /** - * Shortcut for {@link #withClusteringColumn(CqlIdentifier) - * withClusteringColumn(CqlIdentifier.asCql(columnName)}. - */ - @NonNull - default CreateMaterializedViewPrimaryKey withClusteringColumn(@NonNull String columnName) { - return withClusteringColumn(CqlIdentifier.fromCql(columnName)); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewPrimaryKeyStart.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewPrimaryKeyStart.java deleted file mode 100644 index c214c01f6d1..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewPrimaryKeyStart.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface CreateMaterializedViewPrimaryKeyStart { - - /** - * Adds a partition key to primary key definition. - * - *

    Partition keys are added in the order of their declaration. - */ - @NonNull - CreateMaterializedViewPrimaryKey withPartitionKey(@NonNull CqlIdentifier columnName); - - /** - * Shortcut for {@link #withPartitionKey(CqlIdentifier) - * withPartitionKey(CqlIdentifier.asCql(columnName)}. - */ - @NonNull - default CreateMaterializedViewPrimaryKey withPartitionKey(@NonNull String columnName) { - return withPartitionKey(CqlIdentifier.fromCql(columnName)); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewSelection.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewSelection.java deleted file mode 100644 index 28f141f5017..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewSelection.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.internal.core.CqlIdentifiers; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Arrays; - -public interface CreateMaterializedViewSelection { - - /** Selects all columns from the base table. */ - @NonNull - CreateMaterializedViewWhereStart all(); - - /** Selects a particular column by its CQL identifier. */ - @NonNull - CreateMaterializedViewSelectionWithColumns column(@NonNull CqlIdentifier columnName); - - /** Shortcut for {@link #column(CqlIdentifier) column(CqlIdentifier.fromCql(columnName))} */ - @NonNull - default CreateMaterializedViewSelectionWithColumns column(@NonNull String columnName) { - return column(CqlIdentifier.fromCql(columnName)); - } - - /** - * Convenience method to select multiple simple columns at once, as in {@code SELECT a,b,c}. - * - *

    This is the same as calling {@link #column(CqlIdentifier)} for each element. - */ - @NonNull - CreateMaterializedViewSelectionWithColumns columnsIds(@NonNull Iterable columnIds); - - /** Var-arg equivalent of {@link #columnsIds(Iterable)}. */ - @NonNull - default CreateMaterializedViewSelectionWithColumns columns(@NonNull CqlIdentifier... columnIds) { - return columnsIds(Arrays.asList(columnIds)); - } - - /** - * Convenience method to select multiple simple columns at once, as in {@code SELECT a,b,c}. - * - *

    This is the same as calling {@link #column(String)} for each element. - */ - @NonNull - default CreateMaterializedViewSelectionWithColumns columns( - @NonNull Iterable columnNames) { - return columnsIds(CqlIdentifiers.wrap(columnNames)); - } - - /** Var-arg equivalent of {@link #columns(Iterable)}. */ - @NonNull - default CreateMaterializedViewSelectionWithColumns columns(@NonNull String... columnNames) { - return columns(Arrays.asList(columnNames)); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewSelectionWithColumns.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewSelectionWithColumns.java deleted file mode 100644 index ff47c9b0c1a..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewSelectionWithColumns.java +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -public interface CreateMaterializedViewSelectionWithColumns - extends CreateMaterializedViewSelection, CreateMaterializedViewWhereStart {} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewStart.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewStart.java deleted file mode 100644 index 1926d6ce83c..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewStart.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; - -public interface CreateMaterializedViewStart { - - /** - * Adds IF NOT EXISTS to the create table specification. This indicates that the table should not - * be created if it already exists. - */ - @NonNull - CreateMaterializedViewStart ifNotExists(); - - /** - * Specifies the base table for the materialized view. This assumes the keyspace name is already - * qualified for the Session or Statement. - */ - @NonNull - CreateMaterializedViewSelection asSelectFrom(@NonNull CqlIdentifier table); - - /** - * Shortcut for {@link #asSelectFrom(CqlIdentifier) asSelectFrom(CqlIdentifier.fromCql(table)}. - */ - @NonNull - default CreateMaterializedViewSelection asSelectFrom(@NonNull String table) { - return asSelectFrom(CqlIdentifier.fromCql(table)); - } - - /** Specifies the base table for the materialized view. */ - @NonNull - CreateMaterializedViewSelection asSelectFrom( - @Nullable CqlIdentifier keyspace, @NonNull CqlIdentifier table); - - /** - * Shortcut for {@link #asSelectFrom(CqlIdentifier,CqlIdentifier) - * asSelectFrom(CqlIdentifier.fromCql(keyspace),CqlIdentifier.fromCql(table)}. - */ - @NonNull - default CreateMaterializedViewSelection asSelectFrom( - @Nullable String keyspace, @NonNull String table) { - return asSelectFrom( - keyspace == null ? null : CqlIdentifier.fromCql(keyspace), CqlIdentifier.fromCql(table)); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewWhere.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewWhere.java deleted file mode 100644 index e2d1bf9b26c..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewWhere.java +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -public interface CreateMaterializedViewWhere - extends CreateMaterializedViewWhereStart, CreateMaterializedViewPrimaryKeyStart {} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewWhereStart.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewWhereStart.java deleted file mode 100644 index e7af2c07dc7..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewWhereStart.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.querybuilder.relation.OngoingWhereClause; - -public interface CreateMaterializedViewWhereStart - extends OngoingWhereClause {} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateTable.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateTable.java deleted file mode 100644 index 08a6f85c424..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateTable.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.querybuilder.BuildableQuery; -import com.datastax.oss.driver.api.querybuilder.SchemaBuilder; -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface CreateTable extends BuildableQuery, OngoingPartitionKey, CreateTableWithOptions { - - /** - * Adds a clustering column definition in the CREATE TABLE statement. - * - *

    This includes the column declaration (you don't need an additional {@link - * #withColumn(CqlIdentifier, DataType) addColumn} call). - * - *

    Clustering key columns are added in the order of their declaration. - * - *

    To create the data type, use the constants and static methods in {@link DataTypes}, or - * {@link SchemaBuilder#udt(CqlIdentifier, boolean)}. - */ - @NonNull - CreateTable withClusteringColumn(@NonNull CqlIdentifier columnName, @NonNull DataType dataType); - - /** - * Shortcut for {@link #withClusteringColumn(CqlIdentifier, DataType) - * withClusteringColumn(CqlIdentifier.asCql(columnName), dataType)}. - */ - @NonNull - default CreateTable withClusteringColumn(@NonNull String columnName, @NonNull DataType dataType) { - return withClusteringColumn(CqlIdentifier.fromCql(columnName), dataType); - } - - /** - * Adds a column definition in the CREATE TABLE statement. - * - *

    To create the data type, use the constants and static methods in {@link DataTypes}, or - * {@link SchemaBuilder#udt(CqlIdentifier, boolean)}. - */ - @NonNull - CreateTable withColumn(@NonNull CqlIdentifier columnName, @NonNull DataType dataType); - - /** - * Shortcut for {@link #withColumn(CqlIdentifier, DataType) - * withColumn(CqlIdentifier.asCql(columnName), dataType)}. - */ - @NonNull - default CreateTable withColumn(@NonNull String columnName, @NonNull DataType dataType) { - return withColumn(CqlIdentifier.fromCql(columnName), dataType); - } - - /** - * Adds a static column definition in the CREATE TABLE statement. - * - *

    This includes the column declaration (you don't need an additional {@link - * #withColumn(CqlIdentifier, DataType) addColumn} call). - * - *

    To create the data type, use the constants and static methods in {@link DataTypes}, or - * {@link SchemaBuilder#udt(CqlIdentifier, boolean)}. - */ - @NonNull - CreateTable withStaticColumn(@NonNull CqlIdentifier columnName, @NonNull DataType dataType); - - /** - * Shortcut for {@link #withStaticColumn(CqlIdentifier, DataType) - * withStaticColumn(CqlIdentifier.asCql(columnName), dataType)}. - */ - @NonNull - default CreateTable withStaticColumn(@NonNull String columnName, @NonNull DataType dataType) { - return withStaticColumn(CqlIdentifier.fromCql(columnName), dataType); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateTableStart.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateTableStart.java deleted file mode 100644 index 82949bceb56..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateTableStart.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface CreateTableStart extends OngoingPartitionKey { - - /** - * Adds IF NOT EXISTS to the create table specification. This indicates that the table should not - * be created if it already exists. - */ - @NonNull - CreateTableStart ifNotExists(); -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateTableWithOptions.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateTableWithOptions.java deleted file mode 100644 index c7bddf575fb..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateTableWithOptions.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.querybuilder.BuildableQuery; -import com.datastax.oss.driver.internal.querybuilder.schema.RawOptionsWrapper; -import com.datastax.oss.driver.shaded.guava.common.collect.Maps; -import edu.umd.cs.findbugs.annotations.CheckReturnValue; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; - -public interface CreateTableWithOptions - extends BuildableQuery, RelationStructure { - - /** Enables COMPACT STORAGE in the CREATE TABLE statement. */ - @NonNull - CreateTableWithOptions withCompactStorage(); - - /** Attaches custom metadata to CQL table definition. */ - @NonNull - @CheckReturnValue - default CreateTableWithOptions withExtensions(@NonNull Map extensions) { - return withOption("extensions", Maps.transformValues(extensions, RawOptionsWrapper::of)); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateType.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateType.java deleted file mode 100644 index 9c9fc6e62fe..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateType.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.querybuilder.BuildableQuery; - -public interface CreateType extends BuildableQuery, OngoingCreateType {} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateTypeStart.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateTypeStart.java deleted file mode 100644 index ab19bd7ad84..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateTypeStart.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface CreateTypeStart extends OngoingCreateType { - - /** - * Adds IF NOT EXISTS to the create type specification. This indicates that the type should not be - * created if it already exists. - */ - @NonNull - CreateTypeStart ifNotExists(); -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/Drop.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/Drop.java deleted file mode 100644 index 418f806395f..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/Drop.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.querybuilder.BuildableQuery; -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface Drop extends BuildableQuery { - - /** Adds 'IF EXISTS" to the drop specification. */ - @NonNull - Drop ifExists(); -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/KeyspaceOptions.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/KeyspaceOptions.java deleted file mode 100644 index 16da0f13dd2..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/KeyspaceOptions.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import edu.umd.cs.findbugs.annotations.CheckReturnValue; -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface KeyspaceOptions> - extends OptionProvider { - - /** - * Adjusts durable writes configuration for this keyspace. If set to false, data written to the - * keyspace will bypass the commit log. - */ - @NonNull - @CheckReturnValue - default SelfT withDurableWrites(boolean durableWrites) { - return withOption("durable_writes", durableWrites); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/KeyspaceReplicationOptions.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/KeyspaceReplicationOptions.java deleted file mode 100644 index 95113cf987f..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/KeyspaceReplicationOptions.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; - -public interface KeyspaceReplicationOptions { - /** - * Adds SimpleStrategy replication options with the given replication factor. - * - *

    Note that using this will overwrite any previous use of this method or {@link - * #withNetworkTopologyStrategy(Map)}. - */ - @NonNull - default TargetT withSimpleStrategy(int replicationFactor) { - ImmutableMap replication = - ImmutableMap.builder() - .put("class", "SimpleStrategy") - .put("replication_factor", replicationFactor) - .build(); - - return withReplicationOptions(replication); - } - - /** - * Adds NetworkTopologyStrategy replication options with the given data center replication - * factors. - * - *

    Note that using this will overwrite any previous use of this method or {@link - * #withSimpleStrategy(int)}. - * - * @param replications Mapping of data center name to replication factor to use for that data - * center. - */ - @NonNull - default TargetT withNetworkTopologyStrategy(@NonNull Map replications) { - ImmutableMap.Builder replicationBuilder = - ImmutableMap.builder().put("class", "NetworkTopologyStrategy"); - - for (Map.Entry replication : replications.entrySet()) { - replicationBuilder.put(replication.getKey(), replication.getValue()); - } - - return withReplicationOptions(replicationBuilder.build()); - } - - /** - * Adds 'replication' options. One should only use this when they have a custom replication - * strategy, otherwise it is advisable to use {@link #withSimpleStrategy(int)} or {@link - * #withNetworkTopologyStrategy(Map)}. - */ - @NonNull - TargetT withReplicationOptions(@NonNull Map replicationOptions); -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/OngoingCreateType.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/OngoingCreateType.java deleted file mode 100644 index 18409b349b9..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/OngoingCreateType.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.querybuilder.SchemaBuilder; -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface OngoingCreateType { - - /** - * Adds a field definition in the CREATE TYPE statement. - * - *

    Fields keys are added in the order of their declaration. - * - *

    To create the data type, use the constants and static methods in {@link DataTypes}, or - * {@link SchemaBuilder#udt(CqlIdentifier, boolean)}. - */ - @NonNull - CreateType withField(@NonNull CqlIdentifier identifier, @NonNull DataType dataType); - - /** - * Shortcut for {@link #withField(CqlIdentifier, DataType)} (CqlIdentifier, DataType) - * withField(CqlIdentifier.asCql(columnName), dataType)}. - */ - @NonNull - default CreateType withField(@NonNull String columnName, @NonNull DataType dataType) { - return withField(CqlIdentifier.fromCql(columnName), dataType); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/OngoingPartitionKey.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/OngoingPartitionKey.java deleted file mode 100644 index bf3f70f982a..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/OngoingPartitionKey.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.querybuilder.SchemaBuilder; -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface OngoingPartitionKey { - - /** - * Adds a partition key column definition. - * - *

    This includes the column declaration (you don't need an additional {@link - * CreateTable#withColumn(CqlIdentifier, DataType) addColumn} call). - * - *

    Partition keys are added in the order of their declaration. - * - *

    To create the data type, use the constants and static methods in {@link DataTypes}, or - * {@link SchemaBuilder#udt(CqlIdentifier, boolean)}. - */ - @NonNull - CreateTable withPartitionKey(@NonNull CqlIdentifier columnName, @NonNull DataType dataType); - - /** - * Shortcut for {@link #withPartitionKey(CqlIdentifier, DataType) - * withPartitionKey(CqlIdentifier.asCql(columnName), dataType)}. - */ - @NonNull - default CreateTable withPartitionKey(@NonNull String columnName, @NonNull DataType dataType) { - return withPartitionKey(CqlIdentifier.fromCql(columnName), dataType); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/OptionProvider.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/OptionProvider.java deleted file mode 100644 index 5a503ffa93a..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/OptionProvider.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import edu.umd.cs.findbugs.annotations.CheckReturnValue; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; - -public interface OptionProvider> { - /** - * Adds a free-form option. This is useful for custom options or new options that have not yet - * been added to this API. - */ - @NonNull - @CheckReturnValue - SelfT withOption(@NonNull String name, @NonNull Object value); - - @NonNull - Map getOptions(); -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/RelationOptions.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/RelationOptions.java deleted file mode 100644 index 49b342acb7f..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/RelationOptions.java +++ /dev/null @@ -1,421 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import static com.datastax.oss.driver.api.querybuilder.SchemaBuilder.RowsPerPartition; - -import com.datastax.oss.driver.api.querybuilder.SchemaBuilder; -import com.datastax.oss.driver.api.querybuilder.schema.compaction.CompactionStrategy; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.CheckReturnValue; -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface RelationOptions> - extends OptionProvider { - - /** - * Defines the false-positive probability for SSTable bloom filters. - * - *

    If no call was made to this method, the default value set is: - * - *

      - *
    • 0.01 for the size-tiered compaction strategy; - *
    • 0.1 for the leveled compaction strategy. - *
    - */ - @NonNull - @CheckReturnValue - default SelfT withBloomFilterFpChance(double bloomFilterFpChance) { - return withOption("bloom_filter_fp_chance", bloomFilterFpChance); - } - - /** - * Defines whether or not change data capture is enabled. - * - *

    Note that using this option with a version of Apache Cassandra less than 3.8 or DataStax - * Enterprise 5.0 will raise a syntax error. - * - *

    If no call is made to this method, the default value set is {@code false}. - */ - @NonNull - @CheckReturnValue - default SelfT withCDC(boolean enabled) { - return withOption("cdc", enabled); - } - - /** - * Defines the crc check chance. - * - *

    Note that using this option with a version of Apache Cassandra less than 3.0 will raise a - * syntax error. - */ - @NonNull - @CheckReturnValue - default SelfT withCRCCheckChance(double crcCheckChance) { - return withOption("crc_check_chance", crcCheckChance); - } - - /** - * Defines the caching criteria. - * - *

    If no call is made to this method, the default value is determined by the global caching - * properties in cassandra.yaml. - * - * @param keys If true, caches all keys, otherwise none. - * @param rowsPerPartition Whether to cache ALL, NONE or the first N rows per partition. - */ - @NonNull - @CheckReturnValue - default SelfT withCaching(boolean keys, @NonNull RowsPerPartition rowsPerPartition) { - return withOption( - "caching", - ImmutableMap.of( - "keys", keys ? "ALL" : "NONE", "rows_per_partition", rowsPerPartition.getValue())); - } - - /** Defines documentation for this relation. */ - @NonNull - @CheckReturnValue - default SelfT withComment(@NonNull String comment) { - return withOption("comment", comment); - } - - /** - * Defines the compaction strategy to use. - * - * @see SchemaBuilder#sizeTieredCompactionStrategy() - * @see SchemaBuilder#leveledCompactionStrategy() - * @see SchemaBuilder#timeWindowCompactionStrategy() - */ - @NonNull - @CheckReturnValue - default SelfT withCompaction(@NonNull CompactionStrategy compactionStrategy) { - return withOption("compaction", compactionStrategy.getOptions()); - } - - /** - * @deprecated This method only exists for backward compatibility. Will not work with Apache - * Cassandra 5.0 or later. Use {@link #withLZ4Compression(int)} instead. - */ - @Deprecated - @NonNull - @CheckReturnValue - default SelfT withLZ4Compression(int chunkLengthKB, double crcCheckChance) { - return withCompression("LZ4Compressor", chunkLengthKB, crcCheckChance); - } - - /** - * Configures compression using the LZ4 algorithm with the given chunk length. - * - * @see #withCompression(String, int) - */ - @NonNull - @CheckReturnValue - default SelfT withLZ4Compression(int chunkLengthKB) { - return withCompression("LZ4Compressor", chunkLengthKB); - } - - /** - * Configures compression using the LZ4 algorithm using the default configuration (64kb - * chunk_length). - * - * @see #withCompression(String, int) - */ - @NonNull - @CheckReturnValue - default SelfT withLZ4Compression() { - return withCompression("LZ4Compressor"); - } - - /** - * Configures compression using the Zstd algorithm with the given chunk length. - * - * @see #withCompression(String, int) - */ - @NonNull - @CheckReturnValue - default SelfT withZstdCompression(int chunkLengthKB) { - return withCompression("ZstdCompressor", chunkLengthKB); - } - - /** - * Configures compression using the Zstd algorithm using the default configuration (64kb - * chunk_length). - * - * @see #withCompression(String, int) - */ - @NonNull - @CheckReturnValue - default SelfT withZstdCompression() { - return withCompression("ZstdCompressor"); - } - - /** - * @deprecated This method only exists for backward compatibility. Will not work with Apache - * Cassandra 5.0 or later due to removal of deprecated table properties (CASSANDRA-18742). Use - * {@link #withSnappyCompression(int)} instead. - */ - @Deprecated - @NonNull - @CheckReturnValue - default SelfT withSnappyCompression(int chunkLengthKB, double crcCheckChance) { - return withCompression("SnappyCompressor", chunkLengthKB, crcCheckChance); - } - - /** - * Configures compression using the Snappy algorithm with the given chunk length. - * - * @see #withCompression(String, int) - */ - @NonNull - @CheckReturnValue - default SelfT withSnappyCompression(int chunkLengthKB) { - return withCompression("SnappyCompressor", chunkLengthKB); - } - - /** - * Configures compression using the Snappy algorithm using the default configuration (64kb - * chunk_length). - * - * @see #withCompression(String, int) - */ - @NonNull - @CheckReturnValue - default SelfT withSnappyCompression() { - return withCompression("SnappyCompressor"); - } - - /** - * @deprecated This method only exists for backward compatibility. Will not work with Apache - * Cassandra 5.0 or later due to removal of deprecated table properties (CASSANDRA-18742). Use - * {@link #withDeflateCompression(int)} instead. - */ - @Deprecated - @NonNull - @CheckReturnValue - default SelfT withDeflateCompression(int chunkLengthKB, double crcCheckChance) { - return withCompression("DeflateCompressor", chunkLengthKB, crcCheckChance); - } - - /** - * Configures compression using the Deflate algorithm with the given chunk length. - * - * @see #withCompression(String, int) - */ - @NonNull - @CheckReturnValue - default SelfT withDeflateCompression(int chunkLengthKB) { - return withCompression("DeflateCompressor", chunkLengthKB); - } - - /** - * Configures compression using the Deflate algorithm using the default configuration (64kb - * chunk_length). - * - * @see #withCompression(String, int) - */ - @NonNull - @CheckReturnValue - default SelfT withDeflateCompression() { - return withCompression("DeflateCompressor"); - } - - /** - * Configures compression using the given algorithm using the default configuration (64kb - * chunk_length). - * - *

    Unless specifying a custom compression algorithm implementation, it is recommended to use - * {@link #withLZ4Compression()}, {@link #withSnappyCompression()}, or {@link - * #withDeflateCompression()}. - * - * @see #withCompression(String, int) - */ - @NonNull - @CheckReturnValue - default SelfT withCompression(@NonNull String compressionAlgorithmName) { - return withOption("compression", ImmutableMap.of("class", compressionAlgorithmName)); - } - - /** - * Configures compression using the given algorithm, chunk length. - * - *

    Unless specifying a custom compression algorithm implementation, it is recommended to use - * {@link #withLZ4Compression()}, {@link #withSnappyCompression()}, or {@link - * #withDeflateCompression()}. - * - * @param compressionAlgorithmName The class name of the compression algorithm. - * @param chunkLengthKB The chunk length in KB of compression blocks. Defaults to 64. - */ - @NonNull - @CheckReturnValue - default SelfT withCompression(@NonNull String compressionAlgorithmName, int chunkLengthKB) { - return withOption( - "compression", - ImmutableMap.of("class", compressionAlgorithmName, "chunk_length_in_kb", chunkLengthKB)); - } - - /** - * @deprecated This method only exists for backward compatibility. Will not work with Apache - * Cassandra 5.0 or later due to removal of deprecated table properties (CASSANDRA-18742). Use - * {@link #withCompression(String, int)} instead. - */ - @NonNull - @CheckReturnValue - @Deprecated - default SelfT withCompression( - @NonNull String compressionAlgorithmName, int chunkLengthKB, double crcCheckChance) { - return withOption( - "compression", - ImmutableMap.of( - "class", - compressionAlgorithmName, - "chunk_length_kb", - chunkLengthKB, - "crc_check_chance", - crcCheckChance)); - } - - /** Defines that compression should be disabled. */ - @NonNull - @CheckReturnValue - default SelfT withNoCompression() { - return withOption("compression", ImmutableMap.of("sstable_compression", "")); - } - - /** - * Defines the probability of read repairs being invoked over all replicas in the current data - * center. - * - *

    If no call is made to this method, the default value set is 0.0. - * - * @param dcLocalReadRepairChance the probability. - * @return this {@code TableOptions} object. - */ - @NonNull - @CheckReturnValue - default SelfT withDcLocalReadRepairChance(double dcLocalReadRepairChance) { - return withOption("dclocal_read_repair_chance", dcLocalReadRepairChance); - } - - /** - * Defines the default 'time to live' (expiration time) of writes in seconds. - * - *

    If no call is made to this method, the default value is 0 (no TTL). - */ - @NonNull - @CheckReturnValue - default SelfT withDefaultTimeToLiveSeconds(int ttl) { - return withOption("default_time_to_live", ttl); - } - - /** - * Defines the time to wait before garbage collecting tombstones (deletion markers). - * - *

    The default value allows a great deal of time for consistency to be achieved prior to - * deletion. In many deployments this interval can be reduced, and in a single-node cluster it can - * be safely set to zero. - * - *

    If no call is made to this method, the default value set is 864000 secs (10 days). - */ - @NonNull - @CheckReturnValue - default SelfT withGcGraceSeconds(int gcGraceSeconds) { - return withOption("gc_grace_seconds", gcGraceSeconds); - } - - /** - * Defines the memtable flush period in milliseconds. - * - *

    If set, this forces flushing of memtables after the specified time elapses. - * - *

    If no call is made to this method, the default value is 0 (unset). - */ - @NonNull - @CheckReturnValue - default SelfT withMemtableFlushPeriodInMs(int memtableFlushPeriodInMs) { - return withOption("memtable_flush_period_in_ms", memtableFlushPeriodInMs); - } - - /** - * Defines the minimum index interval. This is the gap between index entries in the index summary. - * A lower value will increase the size of the index (more RAM usage) but potentially improve disk - * I/O. - * - *

    If no call is made to this method, the default value set is 128. - */ - @NonNull - @CheckReturnValue - default SelfT withMinIndexInterval(int min) { - return withOption("min_index_interval", min); - } - - /** - * Defines the maximum index interval. - * - *

    If no call is made to this method, the default value set is 2048. - * - * @see #withMinIndexInterval(int) - */ - @NonNull - @CheckReturnValue - default SelfT withMaxIndexInterval(int max) { - return withOption("max_index_interval", max); - } - - /** - * Defines the probability with which read repairs should be invoked on non-quorum reads. The - * value must be between 0 and 1. - * - *

    If no call is made to this method, the default value set is 0.1. - */ - @NonNull - @CheckReturnValue - default SelfT withReadRepairChance(double readRepairChance) { - return withOption("read_repair_chance", readRepairChance); - } - - /** - * Defines the configuration for coordinator to replica speculative retries. - * - *

    This overrides the normal read timeout when read_repair_chance is not 1.0, sending a request - * to other replica(s) to service reads. - * - *

    Valid values include: - * - *

      - *
    • ALWAYS: Retry reads of all replicas. - *
    • Xpercentile: Retry reads based on the effect on throughput and latency. - *
    • Yms: Retry reads after specified milliseconds. - *
    • NONE: Do not retry reads. - *
    - * - *

    Using the speculative retry property, you can configure rapid read protection in Cassandra - * 2.0.2 and later. Use this property to retry a request after some milliseconds have passed or - * after a percentile of the typical read latency has been reached, which is tracked per table. - * - *

    If no call is made to this method, the default value set is {@code 99percentile}. - */ - @NonNull - @CheckReturnValue - default SelfT withSpeculativeRetry(@NonNull String speculativeRetry) { - return withOption("speculative_retry", speculativeRetry); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/RelationStructure.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/RelationStructure.java deleted file mode 100644 index 3716cd03256..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/RelationStructure.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.ClusteringOrder; -import com.datastax.oss.driver.internal.core.CqlIdentifiers; -import edu.umd.cs.findbugs.annotations.CheckReturnValue; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; - -public interface RelationStructure> - extends RelationOptions { - - /** - * Adds the provided CLUSTERING ORDER. - * - *

    They will be appended in the iteration order of the provided map. If an ordering was already - * defined for a given identifier, it will be removed and the new ordering will appear in its - * position in the provided map. - */ - @NonNull - @CheckReturnValue - SelfT withClusteringOrderByIds(@NonNull Map orderings); - - /** - * Shortcut for {@link #withClusteringOrderByIds(Map)} with the columns specified as - * case-insensitive names. They will be wrapped with {@link CqlIdentifier#fromCql(String)}. - * - *

    Note that it's possible for two different case-sensitive names to resolve to the same - * identifier, for example "foo" and "Foo"; if this happens, a runtime exception will be thrown. - */ - @NonNull - @CheckReturnValue - default SelfT withClusteringOrder(@NonNull Map orderings) { - return withClusteringOrderByIds(CqlIdentifiers.wrapKeys(orderings)); - } - - /** - * Adds the provided clustering order. - * - *

    If clustering order was already defined for this identifier, it will be removed and the new - * clause will be appended at the end of the current clustering order. - */ - @NonNull - @CheckReturnValue - SelfT withClusteringOrder(@NonNull CqlIdentifier columnName, @NonNull ClusteringOrder order); - - /** - * Shortcut for {@link #withClusteringOrder(CqlIdentifier, ClusteringOrder) - * withClusteringOrder(CqlIdentifier.fromCql(columnName), order)}. - */ - @NonNull - @CheckReturnValue - default SelfT withClusteringOrder(@NonNull String columnName, @NonNull ClusteringOrder order) { - return withClusteringOrder(CqlIdentifier.fromCql(columnName), order); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/compaction/CompactionStrategy.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/compaction/CompactionStrategy.java deleted file mode 100644 index 922f596b603..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/compaction/CompactionStrategy.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema.compaction; - -import com.datastax.oss.driver.api.querybuilder.schema.OptionProvider; -import edu.umd.cs.findbugs.annotations.CheckReturnValue; -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface CompactionStrategy> - extends OptionProvider { - - @NonNull - @CheckReturnValue - default SelfT withEnabled(boolean enabled) { - return withOption("enabled", enabled); - } - - @NonNull - @CheckReturnValue - default SelfT withTombstoneCompactionIntervalInSeconds(int seconds) { - return withOption("tombstone_compaction_interval", seconds); - } - - @NonNull - @CheckReturnValue - default SelfT withTombstoneThreshold(double threshold) { - return withOption("tombstone_threshold", threshold); - } - - @NonNull - @CheckReturnValue - default SelfT withUncheckedTombstoneCompaction(boolean enabled) { - return withOption("unchecked_tombstone_compaction", enabled); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/compaction/LeveledCompactionStrategy.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/compaction/LeveledCompactionStrategy.java deleted file mode 100644 index 5839a2155a9..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/compaction/LeveledCompactionStrategy.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema.compaction; - -import edu.umd.cs.findbugs.annotations.CheckReturnValue; -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface LeveledCompactionStrategy> - extends CompactionStrategy { - - @NonNull - @CheckReturnValue - default SelfT withSSTableSizeInMB(int ssTableSizeInMB) { - return withOption("sstable_size_in_mb", ssTableSizeInMB); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/compaction/SizeTieredCompactionStrategy.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/compaction/SizeTieredCompactionStrategy.java deleted file mode 100644 index b33f6d73744..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/compaction/SizeTieredCompactionStrategy.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema.compaction; - -import edu.umd.cs.findbugs.annotations.CheckReturnValue; -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface SizeTieredCompactionStrategy> - extends CompactionStrategy { - - @NonNull - @CheckReturnValue - default SelfT withMaxThreshold(int maxThreshold) { - return withOption("max_threshold", maxThreshold); - } - - @NonNull - @CheckReturnValue - default SelfT withMinThreshold(int minThreshold) { - return withOption("min_threshold", minThreshold); - } - - @NonNull - @CheckReturnValue - default SelfT withMinSSTableSizeInBytes(long bytes) { - return withOption("min_sstable_size", bytes); - } - - @NonNull - @CheckReturnValue - default SelfT withOnlyPurgeRepairedTombstones(boolean enabled) { - return withOption("only_purge_repaired_tombstones", enabled); - } - - @NonNull - @CheckReturnValue - default SelfT withBucketHigh(double bucketHigh) { - return withOption("bucket_high", bucketHigh); - } - - @NonNull - @CheckReturnValue - default SelfT withBucketLow(double bucketHigh) { - return withOption("bucket_low", bucketHigh); - } - - // 2.1 only - @NonNull - @CheckReturnValue - default SelfT withColdReadsToOmit(double ratio) { - return withOption("cold_reads_to_omit", ratio); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/compaction/TimeWindowCompactionStrategy.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/compaction/TimeWindowCompactionStrategy.java deleted file mode 100644 index a6a1a129da5..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/compaction/TimeWindowCompactionStrategy.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.schema.compaction; - -import edu.umd.cs.findbugs.annotations.CheckReturnValue; -import edu.umd.cs.findbugs.annotations.NonNull; - -public interface TimeWindowCompactionStrategy> - extends CompactionStrategy, SizeTieredCompactionStrategy { - - enum CompactionWindowUnit { - MINUTES, - HOURS, - DAYS - } - - enum TimestampResolution { - MICROSECONDS, - MILLISECONDS - } - - @NonNull - @CheckReturnValue - default SelfT withCompactionWindow(long size, @NonNull CompactionWindowUnit unit) { - return withOption("compaction_window_size", size) - .withOption("compaction_window_unit", unit.toString()); - } - - @NonNull - @CheckReturnValue - default SelfT withUnsafeAggressiveSSTableExpiration(boolean enabled) { - return withOption("unsafe_aggressive_sstable_expiration", enabled); - } - - @NonNull - @CheckReturnValue - default SelfT withTimestampResolution(@NonNull TimestampResolution timestampResolution) { - return withOption("timestamp_resolution", timestampResolution.toString()); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/select/AnnOrderingClause.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/select/AnnOrderingClause.java deleted file mode 100644 index f7163a30160..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/select/AnnOrderingClause.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.select; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.data.CqlVector; -import com.datastax.oss.driver.api.querybuilder.QueryBuilder; -import edu.umd.cs.findbugs.annotations.NonNull; - -/** - * Concrete implementation of {@link OrderingClause} which supports ordering by the adjacent - * nearest-neighbor (ANN) calculation. This usage is primarily used for vector calculations. - */ -public class AnnOrderingClause extends OrderingClause { - - private final CqlIdentifier identifier; - private final CqlVector vector; - - AnnOrderingClause(CqlIdentifier identifier, CqlVector vector) { - - this.identifier = identifier; - this.vector = vector; - } - - public static AnnOrderingClause create(CqlIdentifier identifier, CqlVector vector) { - return new AnnOrderingClause(identifier, vector); - } - - @Override - public void appendTo(@NonNull StringBuilder builder) { - builder.append(" ORDER BY ").append(this.identifier.asCql(true)).append(" ANN OF "); - QueryBuilder.literal(this.vector).appendTo(builder); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/select/ColumnsOrderingClause.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/select/ColumnsOrderingClause.java deleted file mode 100644 index 2f03a979138..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/select/ColumnsOrderingClause.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.select; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.ClusteringOrder; -import com.datastax.oss.driver.internal.querybuilder.ImmutableCollections; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Map; - -/** - * Concrete implementation of {@link OrderingClause} which supports ordering by specified columns. - * This usages is the default ORDER BY syntax for Apache Cassandra. - */ -public class ColumnsOrderingClause extends OrderingClause { - - private final ImmutableMap orderings; - - ColumnsOrderingClause(ImmutableMap orderings) { - - this.orderings = orderings; - } - - public static ColumnsOrderingClause create() { - return new ColumnsOrderingClause(ImmutableMap.of()); - } - - public ColumnsOrderingClause add( - @NonNull CqlIdentifier identifier, @NonNull ClusteringOrder order) { - return new ColumnsOrderingClause( - ImmutableCollections.append(this.orderings, identifier, order)); - } - - public ColumnsOrderingClause add(@NonNull Map orderMap) { - return new ColumnsOrderingClause(ImmutableCollections.concat(this.orderings, orderMap)); - } - - @Override - public void appendTo(@NonNull StringBuilder builder) { - - boolean first = true; - for (Map.Entry entry : orderings.entrySet()) { - if (first) { - builder.append(" ORDER BY "); - first = false; - } else { - builder.append(","); - } - builder.append(entry.getKey().asCql(true)).append(" ").append(entry.getValue().name()); - } - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/select/OngoingSelection.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/select/OngoingSelection.java deleted file mode 100644 index dcf59daf06f..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/select/OngoingSelection.java +++ /dev/null @@ -1,828 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.select; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.CodecNotFoundException; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.api.querybuilder.QueryBuilder; -import com.datastax.oss.driver.api.querybuilder.term.Term; -import com.datastax.oss.driver.shaded.guava.common.collect.Iterables; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Arrays; -import java.util.Map; - -/** - * A SELECT query that accepts additional selectors (that is, elements in the SELECT clause to - * return as columns in the result set, as in: {@code SELECT count(*), sku, price...}). - */ -public interface OngoingSelection { - - /** - * Adds a selector. - * - *

    To create the argument, use one of the factory methods in {@link Selector}, for example - * {@link Selector#column(CqlIdentifier) column}. This type also provides shortcuts to create and - * add the selector in one call, for example {@link #column(CqlIdentifier)} for {@code - * selector(Selector.column(...))}. - * - *

    If you add multiple selectors as once, consider {@link #selectors(Iterable)} as a more - * efficient alternative. - */ - @NonNull - Select selector(@NonNull Selector selector); - - /** - * Adds multiple selectors at once. - * - *

    This is slightly more efficient than adding the selectors one by one (since the underlying - * implementation of this object is immutable). - * - *

    To create the arguments, use one of the factory methods in {@link Selector}, for example - * {@link Selector#column(CqlIdentifier) column}. - * - * @throws IllegalArgumentException if one of the selectors is {@link Selector#all()} ({@code *} - * can only be used on its own). - * @see #selector(Selector) - */ - @NonNull - Select selectors(@NonNull Iterable additionalSelectors); - - /** Var-arg equivalent of {@link #selectors(Iterable)}. */ - @NonNull - default Select selectors(@NonNull Selector... additionalSelectors) { - return selectors(Arrays.asList(additionalSelectors)); - } - - /** - * Selects all columns, as in {@code SELECT *}. - * - *

    This will clear any previously configured selector. Similarly, if any other selector is - * added later, it will cancel this one. - * - *

    This is a shortcut for {@link #selector(Selector) selector(Selector.all())}. - * - * @see Selector#all() - */ - @NonNull - default Select all() { - return selector(Selector.all()); - } - - /** - * Selects the count of all returned rows, as in {@code SELECT count(*)}. - * - *

    This is a shortcut for {@link #selector(Selector) selector(Selector.countAll())}. - * - * @see Selector#countAll() - */ - @NonNull - default Select countAll() { - return selector(Selector.countAll()); - } - - /** - * Selects a particular column by its CQL identifier. - * - *

    This is a shortcut for {@link #selector(Selector) selector(Selector.column(columnId))}. - * - * @see Selector#column(CqlIdentifier) - */ - @NonNull - default Select column(@NonNull CqlIdentifier columnId) { - return selector(Selector.column(columnId)); - } - - /** Shortcut for {@link #column(CqlIdentifier) column(CqlIdentifier.fromCql(columnName))} */ - @NonNull - default Select column(@NonNull String columnName) { - return column(CqlIdentifier.fromCql(columnName)); - } - - /** - * Convenience method to select multiple simple columns at once, as in {@code SELECT a,b,c}. - * - *

    This is the same as calling {@link #column(CqlIdentifier)} for each element. - */ - @NonNull - default Select columnsIds(@NonNull Iterable columnIds) { - return selectors(Iterables.transform(columnIds, Selector::column)); - } - - /** Var-arg equivalent of {@link #columnsIds(Iterable)}. */ - @NonNull - default Select columns(@NonNull CqlIdentifier... columnIds) { - return columnsIds(Arrays.asList(columnIds)); - } - - /** - * Convenience method to select multiple simple columns at once, as in {@code SELECT a,b,c}. - * - *

    This is the same as calling {@link #column(String)} for each element. - */ - @NonNull - default Select columns(@NonNull Iterable columnNames) { - return selectors(Iterables.transform(columnNames, Selector::column)); - } - - /** Var-arg equivalent of {@link #columns(Iterable)}. */ - @NonNull - default Select columns(@NonNull String... columnNames) { - return columns(Arrays.asList(columnNames)); - } - - /** - * Selects the sum of two arguments, as in {@code SELECT col1 + col2}. - * - *

    This is available in Cassandra 4 and above. - * - *

    This is a shortcut for {@link #selector(Selector) selector(Selector.add(left, right))}. - * - * @see Selector#add(Selector, Selector) - */ - @NonNull - default Select add(@NonNull Selector left, @NonNull Selector right) { - return selector(Selector.add(left, right)); - } - - /** - * Selects the difference of two terms, as in {@code SELECT col1 - col2}. - * - *

    This is available in Cassandra 4 and above. - * - *

    This is a shortcut for {@link #selector(Selector) selector(Selector.subtract(left, right))}. - * - * @see Selector#subtract(Selector, Selector) - */ - @NonNull - default Select subtract(@NonNull Selector left, @NonNull Selector right) { - return selector(Selector.subtract(left, right)); - } - - /** - * Selects the product of two arguments, as in {@code SELECT col1 * col2}. - * - *

    This is available in Cassandra 4 and above. - * - *

    This is a shortcut for {@link #selector(Selector) selector(Selector.multiply(left, right))}. - * - *

    The arguments will be parenthesized if they are instances of {@link Selector#add} or {@link - * Selector#subtract}. If they are raw selectors, you might have to parenthesize them yourself. - * - * @see Selector#multiply(Selector, Selector) - */ - @NonNull - default Select multiply(@NonNull Selector left, @NonNull Selector right) { - return selector(Selector.multiply(left, right)); - } - - /** - * Selects the quotient of two arguments, as in {@code SELECT col1 / col2}. - * - *

    This is available in Cassandra 4 and above. - * - *

    This is a shortcut for {@link #selector(Selector) selector(Selector.divide(left, right))}. - * - *

    The arguments will be parenthesized if they are instances of {@link Selector#add} or {@link - * Selector#subtract}. If they are raw selectors, you might have to parenthesize them yourself. - * - * @see Selector#divide(Selector, Selector) - */ - @NonNull - default Select divide(@NonNull Selector left, @NonNull Selector right) { - return selector(Selector.divide(left, right)); - } - - /** - * Selects the remainder of two arguments, as in {@code SELECT col1 % col2}. - * - *

    This is available in Cassandra 4 and above. - * - *

    This is a shortcut for {@link #selector(Selector) selector(Selector.remainder(left, - * right))}. - * - *

    The arguments will be parenthesized if they are instances of {@link Selector#add} or {@link - * Selector#subtract}. If they are raw selectors, you might have to parenthesize them yourself. - * - * @see Selector#remainder(Selector, Selector) - */ - @NonNull - default Select remainder(@NonNull Selector left, @NonNull Selector right) { - return selector(Selector.remainder(left, right)); - } - - /** - * Selects the opposite of an argument, as in {@code SELECT -col1}. - * - *

    This is available in Cassandra 4 and above. - * - *

    This is a shortcut for {@link #selector(Selector) selector(Selector.negate(argument))}. - * - *

    The argument will be parenthesized if it is an instance of {@link Selector#add} or {@link - * Selector#subtract}. If it is a raw selector, you might have to parenthesize it yourself. - * - * @see Selector#negate(Selector) - */ - @NonNull - default Select negate(@NonNull Selector argument) { - return selector(Selector.negate(argument)); - } - - /** - * Selects a field inside of a UDT column, as in {@code SELECT user.name}. - * - *

    This is a shortcut for {@link #selector(Selector) selector(Selector.field(udt, fieldId))}. - * - * @see Selector#field(Selector, CqlIdentifier) - */ - @NonNull - default Select field(@NonNull Selector udt, @NonNull CqlIdentifier fieldId) { - return selector(Selector.field(udt, fieldId)); - } - - /** - * Shortcut for {@link #field(Selector, CqlIdentifier) field(udt, - * CqlIdentifier.fromCql(fieldName))}. - */ - @NonNull - default Select field(@NonNull Selector udt, @NonNull String fieldName) { - return field(udt, CqlIdentifier.fromCql(fieldName)); - } - - /** - * Shortcut to select a UDT field when the UDT is a simple column (as opposed to a more complex - * selection, like a nested UDT). - * - *

    In other words, this is a shortcut for {{@link #field(Selector, CqlIdentifier) - * field(QueryBuilder.column(udtColumnId), fieldId)}. - * - * @see Selector#field(CqlIdentifier, CqlIdentifier) - */ - @NonNull - default Select field(@NonNull CqlIdentifier udtColumnId, @NonNull CqlIdentifier fieldId) { - return field(Selector.column(udtColumnId), fieldId); - } - - /** - * Shortcut for {@link #field(CqlIdentifier, CqlIdentifier) - * field(CqlIdentifier.fromCql(udtColumnName), CqlIdentifier.fromCql(fieldName))}. - * - * @see Selector#field(String, String) - */ - @NonNull - default Select field(@NonNull String udtColumnName, @NonNull String fieldName) { - return field(CqlIdentifier.fromCql(udtColumnName), CqlIdentifier.fromCql(fieldName)); - } - - /** - * Selects an element in a collection column, as in {@code SELECT m['key']}. - * - *

    As of Cassandra 4, this is only allowed for map and set columns. - * - *

    This is a shortcut for {@link #selector(Selector) selector(Selector.element(collection, - * index))}. - * - * @see Selector#element(Selector, Term) - */ - @NonNull - default Select element(@NonNull Selector collection, @NonNull Term index) { - return selector(Selector.element(collection, index)); - } - - /** - * Shortcut for element selection when the target collection is a simple column. - * - *

    In other words, this is the equivalent of {@link #element(Selector, Term) - * element(QueryBuilder.column(collection), index)}. - * - * @see Selector#element(CqlIdentifier, Term) - */ - @NonNull - default Select element(@NonNull CqlIdentifier collectionId, @NonNull Term index) { - return element(Selector.column(collectionId), index); - } - - /** - * Shortcut for {@link #element(CqlIdentifier, Term) - * element(CqlIdentifier.fromCql(collectionName), index)}. - * - * @see Selector#element(String, Term) - */ - @NonNull - default Select element(@NonNull String collectionName, @NonNull Term index) { - return element(CqlIdentifier.fromCql(collectionName), index); - } - - /** - * Selects a slice in a collection column, as in {@code SELECT s[4..8]}. - * - *

    As of Cassandra 4, this is only allowed for set and map columns. Those collections are - * ordered, the elements (or keys in the case of a map), will be compared to the bounds for - * inclusions. Either bound can be unspecified, but not both. - * - *

    This is a shortcut for {@link #selector(Selector) selector(Selector.range(collection, left, - * right))}. - * - * @param left the left bound (inclusive). Can be {@code null} to indicate that the slice is only - * right-bound. - * @param right the right bound (inclusive). Can be {@code null} to indicate that the slice is - * only left-bound. - * @see Selector#range(Selector, Term, Term) - */ - @NonNull - default Select range(@NonNull Selector collection, @Nullable Term left, @Nullable Term right) { - return selector(Selector.range(collection, left, right)); - } - - /** - * Shortcut for slice selection when the target collection is a simple column. - * - *

    In other words, this is the equivalent of {@link #range(Selector, Term, Term)} - * range(Selector.column(collectionId), left, right)}. - * - * @see Selector#range(CqlIdentifier, Term, Term) - */ - @NonNull - default Select range( - @NonNull CqlIdentifier collectionId, @Nullable Term left, @Nullable Term right) { - return range(Selector.column(collectionId), left, right); - } - - /** - * Shortcut for {@link #range(CqlIdentifier, Term, Term) - * range(CqlIdentifier.fromCql(collectionName), left, right)}. - * - * @see Selector#range(String, Term, Term) - */ - @NonNull - default Select range(@NonNull String collectionName, @Nullable Term left, @Nullable Term right) { - return range(CqlIdentifier.fromCql(collectionName), left, right); - } - - /** - * Selects a group of elements as a list, as in {@code SELECT [a,b,c]}. - * - *

    None of the selectors should be aliased (the query builder checks this at runtime), and they - * should all produce the same data type (the query builder can't check this, so the query will - * fail at execution time). - * - *

    This is a shortcut for {@link #selector(Selector) - * selector(Selector.listOf(elementSelectors))}. - * - * @throws IllegalArgumentException if any of the selectors is aliased. - * @see Selector#listOf(Iterable) - */ - @NonNull - default Select listOf(@NonNull Iterable elementSelectors) { - return selector(Selector.listOf(elementSelectors)); - } - - /** Var-arg equivalent of {@link #listOf(Iterable)}. */ - @NonNull - default Select listOf(@NonNull Selector... elementSelectors) { - return listOf(Arrays.asList(elementSelectors)); - } - - /** - * Selects a group of elements as a set, as in {@code SELECT {a,b,c}}. - * - *

    None of the selectors should be aliased (the query builder checks this at runtime), and they - * should all produce the same data type (the query builder can't check this, so the query will - * fail at execution time). - * - *

    This is a shortcut for {@link #selector(Selector) - * selector(Selector.setOf(elementSelectors))}. - * - * @throws IllegalArgumentException if any of the selectors is aliased. - * @see Selector#setOf(Iterable) - */ - @NonNull - default Select setOf(@NonNull Iterable elementSelectors) { - return selector(Selector.setOf(elementSelectors)); - } - - /** Var-arg equivalent of {@link #setOf(Iterable)}. */ - @NonNull - default Select setOf(@NonNull Selector... elementSelectors) { - return setOf(Arrays.asList(elementSelectors)); - } - - /** - * Selects a group of elements as a tuple, as in {@code SELECT (a,b,c)}. - * - *

    None of the selectors should be aliased (the query builder checks this at runtime). - * - *

    This is a shortcut for {@link #selector(Selector) - * selector(Selector.tupleOf(elementSelectors))}. - * - * @throws IllegalArgumentException if any of the selectors is aliased. - * @see Selector#tupleOf(Iterable) - */ - @NonNull - default Select tupleOf(@NonNull Iterable elementSelectors) { - return selector(Selector.tupleOf(elementSelectors)); - } - - /** Var-arg equivalent of {@link #tupleOf(Iterable)}. */ - @NonNull - default Select tupleOf(@NonNull Selector... elementSelectors) { - return tupleOf(Arrays.asList(elementSelectors)); - } - - /** - * Selects a group of elements as a map, as in {@code SELECT {a:b,c:d}}. - * - *

    None of the selectors should be aliased (the query builder checks this at runtime). In - * addition, all key selectors should produce the same type, and all value selectors as well (the - * key and value types can be different); the query builder can't check this, so the query will - * fail at execution time if the types are not uniform. - * - *

    This is a shortcut for {@link #selector(Selector) - * selector(Selector.mapOf(elementSelectors))}. - * - *

    Note that Cassandra often has trouble inferring the exact map type. This will manifest as - * the error message: - * - *

    -   *   Cannot infer type for term xxx in selection clause (try using a cast to force a type)
    -   * 
    - * - * If you run into this, consider providing the types explicitly with {@link #mapOf(Map, DataType, - * DataType)}. - * - * @throws IllegalArgumentException if any of the selectors is aliased. - * @see Selector#mapOf(Map) - */ - @NonNull - default Select mapOf(@NonNull Map elementSelectors) { - return selector(Selector.mapOf(elementSelectors)); - } - - /** - * Selects a group of elements as a map and force the resulting map type, as in {@code SELECT - * (map){a:b,c:d}}. - * - *

    To create the data types, use the constants and static methods in {@link DataTypes}, or - * {@link QueryBuilder#udt(CqlIdentifier)}. - * - *

    This is a shortcut for {@link #selector(Selector) selector(Selector.mapOf(elementSelectors, - * keyType, valueType))}. - * - * @see #mapOf(Map) - * @see Selector#mapOf(Map, DataType, DataType) - */ - @NonNull - default Select mapOf( - @NonNull Map elementSelectors, - @NonNull DataType keyType, - @NonNull DataType valueType) { - return selector(Selector.mapOf(elementSelectors, keyType, valueType)); - } - - /** - * Provides a type hint for a selector, as in {@code SELECT (double)1/3}. - * - *

    Use the constants and static methods in {@link DataTypes} to create the data type. - * - *

    This is a shortcut for {@link #selector(Selector) selector(Selector.typeHint(selector, - * targetType))}. - * - * @see Selector#typeHint(Selector, DataType) - */ - @NonNull - default Select typeHint(@NonNull Selector selector, @NonNull DataType targetType) { - return selector(Selector.typeHint(selector, targetType)); - } - - /** - * Selects the result of a function call, as is {@code SELECT f(a,b)} - * - *

    None of the arguments should be aliased (the query builder checks this at runtime). - * - *

    This is a shortcut for {@link #selector(Selector) selector(Selector.function(functionId, - * arguments))}. - * - * @throws IllegalArgumentException if any of the selectors is aliased. - * @see Selector#function(CqlIdentifier, Iterable) - */ - @NonNull - default Select function( - @NonNull CqlIdentifier functionId, @NonNull Iterable arguments) { - return selector(Selector.function(functionId, arguments)); - } - - /** - * Var-arg equivalent of {@link #function(CqlIdentifier, Iterable)}. - * - * @see Selector#function(CqlIdentifier, Selector...) - */ - @NonNull - default Select function(@NonNull CqlIdentifier functionId, @NonNull Selector... arguments) { - return function(functionId, Arrays.asList(arguments)); - } - - /** - * Shortcut for {@link #function(CqlIdentifier, Iterable) - * function(CqlIdentifier.fromCql(functionName), arguments)}. - * - * @see Selector#function(String, Iterable) - */ - @NonNull - default Select function(@NonNull String functionName, @NonNull Iterable arguments) { - return function(CqlIdentifier.fromCql(functionName), arguments); - } - - /** - * Var-arg equivalent of {@link #function(String, Iterable)}. - * - * @see Selector#function(String, Selector...) - */ - @NonNull - default Select function(@NonNull String functionName, @NonNull Selector... arguments) { - return function(functionName, Arrays.asList(arguments)); - } - - /** - * Selects the result of a function call, as is {@code SELECT f(a,b)} - * - *

    None of the arguments should be aliased (the query builder checks this at runtime). - * - *

    This is a shortcut for {@link #selector(Selector) selector(Selector.function(keyspaceId, - * functionId, arguments))}. - * - * @throws IllegalArgumentException if any of the selectors is aliased. - * @see Selector#function(CqlIdentifier, CqlIdentifier, Iterable) - */ - @NonNull - default Select function( - @Nullable CqlIdentifier keyspaceId, - @NonNull CqlIdentifier functionId, - @NonNull Iterable arguments) { - return selector(Selector.function(keyspaceId, functionId, arguments)); - } - - /** - * Var-arg equivalent of {@link #function(CqlIdentifier,CqlIdentifier, Iterable)}. - * - * @see Selector#function(CqlIdentifier, CqlIdentifier, Selector...) - */ - @NonNull - default Select function( - @Nullable CqlIdentifier keyspaceId, - @NonNull CqlIdentifier functionId, - @NonNull Selector... arguments) { - return function(keyspaceId, functionId, Arrays.asList(arguments)); - } - - /** - * Shortcut for {@link #function(CqlIdentifier, CqlIdentifier, Iterable) - * function(CqlIdentifier.fromCql(keyspaceName), CqlIdentifier.fromCql(functionName), arguments)}. - * - * @see Selector#function(String, String, Iterable) - */ - @NonNull - default Select function( - @Nullable String keyspaceName, - @NonNull String functionName, - @NonNull Iterable arguments) { - return function( - keyspaceName == null ? null : CqlIdentifier.fromCql(keyspaceName), - CqlIdentifier.fromCql(functionName), - arguments); - } - - /** - * Var-arg equivalent of {@link #function(String, String, Iterable)}. - * - * @see Selector#function(String, String, Selector...) - */ - @NonNull - default Select function( - @Nullable String keyspaceName, @NonNull String functionName, @NonNull Selector... arguments) { - return function(keyspaceName, functionName, Arrays.asList(arguments)); - } - - /** - * Shortcut to select the result of the built-in {@code writetime} function, as in {@code SELECT - * writetime(c)}. - * - * @see Selector#writeTime(CqlIdentifier) - */ - @NonNull - default Select writeTime(@NonNull CqlIdentifier columnId) { - return selector(Selector.writeTime(columnId)); - } - - /** - * Shortcut for {@link #writeTime(CqlIdentifier) writeTime(CqlIdentifier.fromCql(columnName))}. - * - * @see Selector#writeTime(String) - */ - @NonNull - default Select writeTime(@NonNull String columnName) { - return writeTime(CqlIdentifier.fromCql(columnName)); - } - - /** - * Shortcut to select the result of the built-in {@code ttl} function, as in {@code SELECT - * ttl(c)}. - * - * @see Selector#ttl(CqlIdentifier) - */ - @NonNull - default Select ttl(@NonNull CqlIdentifier columnId) { - return selector(Selector.ttl(columnId)); - } - - /** - * Shortcut for {@link #ttl(CqlIdentifier) ttl(CqlIdentifier.fromCql(columnName))}. - * - * @see Selector#ttl(String) - */ - @NonNull - default Select ttl(@NonNull String columnName) { - return ttl(CqlIdentifier.fromCql(columnName)); - } - - /** - * Casts a selector to a type, as in {@code SELECT CAST(a AS double)}. - * - *

    To create the data type, use the constants and static methods in {@link DataTypes}, or - * {@link QueryBuilder#udt(CqlIdentifier)}. - * - *

    This is a shortcut for {@link #selector(Selector) selector(Selector.function(keyspaceId, - * functionId, arguments))}. - * - * @throws IllegalArgumentException if the selector is aliased. - * @see Selector#cast(Selector, DataType) - */ - @NonNull - default Select cast(@NonNull Selector selector, @NonNull DataType targetType) { - return selector(Selector.cast(selector, targetType)); - } - - /** - * Shortcut to select the result of the built-in {@code toDate} function. - * - * @see Selector#toDate(CqlIdentifier) - */ - @NonNull - default Select toDate(@NonNull CqlIdentifier columnId) { - return selector(Selector.toDate(columnId)); - } - - /** Shortcut for {@link #toDate(CqlIdentifier) toDate(CqlIdentifier.fromCql(columnName))}. */ - @NonNull - default Select toDate(@NonNull String columnName) { - return toDate(CqlIdentifier.fromCql(columnName)); - } - - /** - * Shortcut to select the result of the built-in {@code toTimestamp} function. - * - * @see Selector#toTimestamp(CqlIdentifier) - */ - @NonNull - default Select toTimestamp(@NonNull CqlIdentifier columnId) { - return selector(Selector.toTimestamp(columnId)); - } - - /** - * Shortcut for {@link #toTimestamp(CqlIdentifier) - * toTimestamp(CqlIdentifier.fromCql(columnName))}. - */ - @NonNull - default Select toTimestamp(@NonNull String columnName) { - return toTimestamp(CqlIdentifier.fromCql(columnName)); - } - - /** - * Shortcut to select the result of the built-in {@code toUnixTimestamp} function. - * - * @see Selector#toUnixTimestamp(CqlIdentifier) - */ - @NonNull - default Select toUnixTimestamp(@NonNull CqlIdentifier columnId) { - return selector(Selector.toUnixTimestamp(columnId)); - } - - /** - * Shortcut for {@link #toUnixTimestamp(CqlIdentifier) - * toUnixTimestamp(CqlIdentifier.fromCql(columnName))}. - */ - @NonNull - default Select toUnixTimestamp(@NonNull String columnName) { - return toUnixTimestamp(CqlIdentifier.fromCql(columnName)); - } - - /** - * Selects literal value, as in {@code WHERE k = 1}. - * - *

    This method can process any type for which there is a default Java to CQL mapping, namely: - * primitive types ({@code Integer=>int, Long=>bigint, String=>text, etc.}), and collections, - * tuples, and user defined types thereof. - * - *

    A null argument will be rendered as {@code NULL}. - * - *

    For custom mappings, use {@link #literal(Object, CodecRegistry)} or {@link #literal(Object, - * TypeCodec)}. - * - * @throws CodecNotFoundException if there is no default CQL mapping for the Java type of {@code - * value}. - * @see QueryBuilder#literal(Object) - */ - @NonNull - default Select literal(@Nullable Object value) { - return literal(value, CodecRegistry.DEFAULT); - } - - /** - * Selects a literal value, as in {@code WHERE k = 1}. - * - *

    This is an alternative to {@link #literal(Object)} for custom type mappings. The provided - * registry should contain a codec that can format the value. Typically, this will be your - * session's registry, which is accessible via {@code session.getContext().getCodecRegistry()}. - * - * @see DriverContext#getCodecRegistry() - * @throws CodecNotFoundException if {@code codecRegistry} does not contain any codec that can - * handle {@code value}. - * @see QueryBuilder#literal(Object, CodecRegistry) - */ - @NonNull - default Select literal(@Nullable Object value, @NonNull CodecRegistry codecRegistry) { - return literal(value, (value == null) ? null : codecRegistry.codecFor(value)); - } - - /** - * Selects a literal value, as in {@code WHERE k = 1}. - * - *

    This is an alternative to {@link #literal(Object)} for custom type mappings. The value will - * be turned into a string with {@link TypeCodec#format(Object)}, and inlined in the query. - * - * @see QueryBuilder#literal(Object, TypeCodec) - */ - @NonNull - default Select literal(@Nullable T value, @Nullable TypeCodec codec) { - return selector(QueryBuilder.literal(value, codec)); - } - - /** - * Selects an arbitrary expression expressed as a raw string. - * - *

    The contents will be appended to the query as-is, without any syntax checking or escaping. - * This method should be used with caution, as it's possible to generate invalid CQL that will - * fail at execution time; on the other hand, it can be used as a workaround to handle new CQL - * features that are not yet covered by the query builder. - * - *

    This is a shortcut for {@link #selector(Selector) - * selector(QueryBuilder.raw(rawExpression))}. - */ - @NonNull - default Select raw(@NonNull String rawExpression) { - return selector(QueryBuilder.raw(rawExpression)); - } - - /** - * Aliases the last added selector, as in {@code SELECT count(*) AS total}. - * - *

    It is the caller's responsibility to ensure that this method is called at most once after - * each selector, and that this selector can legally be aliased: - * - *

      - *
    • if it is called multiple times ({@code countAll().as("total1").as("total2")}), the last - * alias will override the previous ones. - *
    • if it is called before any selector was set, or after {@link #all()}, an {@link - * IllegalStateException} is thrown. - *
    • if it is called after a {@link #raw(String)} selector that already defines an alias, the - * query will fail at runtime. - *
    - */ - @NonNull - Select as(@NonNull CqlIdentifier alias); - - /** Shortcut for {@link #as(CqlIdentifier) as(CqlIdentifier.fromCql(alias))} */ - @NonNull - default Select as(@NonNull String alias) { - return as(CqlIdentifier.fromCql(alias)); - } -} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/select/OrderingClause.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/select/OrderingClause.java deleted file mode 100644 index d653d5f4b8f..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/select/OrderingClause.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.select; - -import com.datastax.oss.driver.api.querybuilder.CqlSnippet; - -/** - * Abstract representation of an ordering clause (i.e. ORDER BY) in a CQL statement. Alternate - * implementations may be provided if servers wind up implementing customized orderings. - */ -public abstract class OrderingClause implements CqlSnippet {} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/select/Select.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/select/Select.java deleted file mode 100644 index 159657989da..00000000000 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/select/Select.java +++ /dev/null @@ -1,209 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder.select; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.data.CqlVector; -import com.datastax.oss.driver.api.core.metadata.schema.ClusteringOrder; -import com.datastax.oss.driver.api.querybuilder.BindMarker; -import com.datastax.oss.driver.api.querybuilder.BuildableQuery; -import com.datastax.oss.driver.api.querybuilder.QueryBuilder; -import com.datastax.oss.driver.api.querybuilder.relation.OngoingWhereClause; -import com.datastax.oss.driver.internal.core.CqlIdentifiers; -import com.datastax.oss.driver.shaded.guava.common.collect.Iterables; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Arrays; -import java.util.Map; - -/** - * A complete SELECT query. - * - *

    It knows about the table and at least one selector, and is therefore buildable. Additional - * selectors and clauses can still be added before building. - */ -public interface Select extends OngoingSelection, OngoingWhereClause